hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
790e79b85d67c39c7121ab447093d21ae2acf789
| 29,763
|
py
|
Python
|
analysis_module/analyzer/AnalyzerDatabaseManager_tmp.py
|
ria-ee/monitor
|
d5cb9384abf38394b35e760729649136cbbc7548
|
[
"MIT"
] | 10
|
2017-12-01T11:59:54.000Z
|
2021-11-08T10:30:35.000Z
|
analysis_module/analyzer/AnalyzerDatabaseManager_tmp.py
|
ria-ee/monitor
|
d5cb9384abf38394b35e760729649136cbbc7548
|
[
"MIT"
] | 16
|
2019-11-15T08:45:33.000Z
|
2021-06-10T18:06:03.000Z
|
analysis_module/analyzer/AnalyzerDatabaseManager_tmp.py
|
ria-ee/monitor
|
d5cb9384abf38394b35e760729649136cbbc7548
|
[
"MIT"
] | 13
|
2017-11-22T08:46:57.000Z
|
2021-12-16T06:51:07.000Z
|
# _tmp_
import datetime
from copy import deepcopy
# _tmp_
from pymongo import MongoClient
import pymongo
import pandas as pd
import numpy as np
import sys
pd.options.mode.chained_assignment = None
class AnalyzerDatabaseManager(object):
def __init__(self, db_config, config):
self._db_config = db_config
self._config = config
def aggregate_data(self, model_type, agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[],
metric=None, threshold=None):
if model_type == "failed_request_ratio":
return self._aggregate_data_for_failed_request_ratio_model(agg_minutes=agg_minutes, start_time=start_time,
end_time=end_time, ids_to_exclude=ids_to_exclude)
elif model_type == "duplicate_message_ids":
return self._aggregate_data_for_duplicate_message_id_model(agg_minutes=agg_minutes, start_time=start_time,
end_time=end_time, ids_to_exclude=ids_to_exclude)
elif model_type == "time_sync_errors":
return self._aggregate_data_for_time_sync_model(relevant_metric=metric, threshold=threshold,
agg_minutes=agg_minutes, start_time=start_time, end_time=end_time,
ids_to_exclude=ids_to_exclude)
else:
return None
def aggregate_data_for_historic_averages_model(self, agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[], service_calls=None):
# create connection
clean_data = self._get_clean_data_collection()
# nested fields need to be projected (select field from client if, exists, else from producer)
project_dict = self._get_clean_data_projection_dict()
# conditions to filter the data before processing
filter_dict_elems = [{'succeeded': True, 'correctorStatus': 'done'}]
if len(ids_to_exclude) > 0:
id_exclude_query = {'_id': {'$nin': ids_to_exclude}}
filter_dict_elems.append(id_exclude_query)
if start_time is not None:
start_time_query = {self._config.timestamp_field: {"$gte": start_time}}
filter_dict_elems.append(start_time_query)
if end_time is not None:
end_time_query = {self._config.timestamp_field: {"$lt": end_time}}
filter_dict_elems.append(end_time_query)
if service_calls is not None and len(service_calls) > 0:
for col in self._config.service_call_fields:
service_calls.loc[service_calls[col] == "-", col] = None
service_call_query = {"$or": service_calls.to_dict(orient="records")}
filter_dict_elems.append(service_call_query)
if len(filter_dict_elems) == 1:
filter_dict = filter_dict_elems[0]
elif len(filter_dict_elems) > 1:
filter_dict = {"$and": filter_dict_elems}
# set up elements to group by (service call fields and temporal aggregation window)
group_dict = {col: "$%s" % col for col in self._config.service_call_fields}
group_dict[self._config.timestamp_field] = {
"$subtract": [
"$%s" % self._config.timestamp_field,
{"$mod": ["$%s" % self._config.timestamp_field, 1000 * 60 * agg_minutes]}
]}
res = clean_data.aggregate([
{'$project': project_dict},
{'$match': filter_dict},
{'$group': {
"_id": group_dict,
"request_count": {"$sum": 1},
"mean_request_size": {"$avg": "$requestSize"},
"mean_response_size": {"$avg": "$responseSize"},
"mean_client_duration": {"$avg": "$totalDuration"},
"mean_producer_duration": {"$avg": "$producerDurationProducerView"},
"request_ids": {"$push": "$_id"}}}],
allowDiskUse=True, maxTimeMS=14400000)
# _tmp_
print(datetime.datetime.now().strftime('%H:%M:%s') + " aggregate_data_for_historic_averages_model_start ")
results = []
for item_tmp in res:
# print(datetime.datetime.now().strftime('%H:%M:%s') + " aggregate_data_for_historic_averages_model " + str(item_tmp))
results.append(item_tmp)
print(datetime.datetime.now().strftime('%H:%M:%s') + " aggregate_data_for_historic_averages_model_end ")
# _tmp_
# return self._generate_dataframe(list(res))
return self._generate_dataframe(results)
def add_first_request_timestamps_from_clean_data(self, data=None):
# create connection
clean_data = self._get_clean_data_collection()
# nested fields need to be projected (select field from client if, exists, else from producer)
project_dict = self._get_clean_data_projection_dict()
# conditions to filter the data before processing
filter_dict = {'correctorStatus': 'done'}
if data is not None:
for col in self._config.service_call_fields:
data.loc[data[col] == "-", col] = None
filter_dict["$or"] = data.to_dict(orient="records")
# set up elements to group by (service call fields and temporal aggregation window)
group_dict = {col: "$%s" % col for col in self._config.service_call_fields}
res = clean_data.aggregate([
{'$project': project_dict},
{'$match': filter_dict},
{'$group': {
"_id": group_dict,
self._config.timestamp_field: {"$min": "$%s" % self._config.timestamp_field}}}],
allowDiskUse=True, maxTimeMS=14400000)
# _tmp_
results = []
print(datetime.datetime.now().strftime('%H:%M:%s') + " add_first_request_timestamps_from_clean_data_start ")
for item_tmp in res:
# print(datetime.datetime.now().strftime('%H:%M:%s') + " add_first_request_timestamps_from_clean_data " + str(item_tmp))
results.append(item_tmp)
print(datetime.datetime.now().strftime('%H:%M:%s') + " add_first_request_timestamps_from_clean_data_end ")
# _tmp_
# res = list(res)
res = deepcopy(results)
if len(res) == 0:
return
# res = self._generate_dataframe(list(res))
res = self._generate_dataframe(res)
res = res.sort_values(self._config.timestamp_field, ascending=True).drop_duplicates(self._config.service_call_fields)
# exclude service calls that already exist in the first timestamps table
existing_first_timestamps = self.get_first_timestamps_for_service_calls()
if len(existing_first_timestamps) > 0:
res = res.merge(existing_first_timestamps[self._config.service_call_fields + ["first_request_timestamp"]],
on=self._config.service_call_fields, how="left")
res = res[pd.isnull(res.first_request_timestamp)].drop("first_request_timestamp", axis=1)
res = res.rename(columns={self._config.timestamp_field: "first_request_timestamp"})
res.first_request_timestamp = pd.to_datetime(res.first_request_timestamp, unit='ms')
res = res.assign(first_incident_timestamp=None)
res = res.assign(first_model_retrain_timestamp=None)
res = res.assign(first_model_train_timestamp=None)
# add new service calls
scft = self._get_service_call_first_timestamps_collection()
if len(res) > 0:
scft.insert_many(res.to_dict('records'))
def update_first_timestamps(self, field, value, service_calls=None):
scft = self._get_service_call_first_timestamps_collection()
scft.update({"$or": service_calls.to_dict(orient="records")}, {"$set": {field: value}}, upsert=False, multi=True)
def update_first_train_retrain_timestamps(self, sc_first_model, sc_second_model, current_time):
if len(sc_first_model) > 0:
self.update_first_timestamps(field="first_model_train_timestamp",
value=current_time,
service_calls=sc_first_model[self._config.service_call_fields])
if len(sc_second_model) > 0:
self.update_first_timestamps(field="first_model_retrain_timestamp",
value=current_time,
service_calls=sc_second_model[self._config.service_call_fields])
def _aggregate_data_for_failed_request_ratio_model(self, agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[]):
# create connection
clean_data = self._get_clean_data_collection()
# nested fields need to be projected (select field from client if, exists, else from producer)
project_dict = self._get_clean_data_projection_dict()
filter_dict_elems = [{'correctorStatus': 'done'}]
# conditions to filter the data before processing
if len(ids_to_exclude) > 0:
id_exclude_query = {'_id': {'$nin': ids_to_exclude}}
filter_dict_elems.append(id_exclude_query)
if start_time is not None:
start_time_query = {self._config.timestamp_field: {"$gte": start_time}}
filter_dict_elems.append(start_time_query)
if end_time is not None:
end_time_query = {self._config.timestamp_field: {"$lt": end_time}}
filter_dict_elems.append(end_time_query)
if len(filter_dict_elems) == 1:
filter_dict = filter_dict_elems[0]
elif len(filter_dict_elems) > 1:
filter_dict = {"$and": filter_dict_elems}
else:
filter_dict = {}
# set up elements to group by (service call fields and temporal aggregation window)
group_dict = {col: "$%s" % col for col in self._config.service_call_fields}
group_dict[self._config.timestamp_field] = {
"$subtract": [
"$%s" % self._config.timestamp_field,
{"$mod": ["$%s" % self._config.timestamp_field, 1000 * 60 * agg_minutes]}
]}
group_dict['succeeded'] = '$succeeded'
res = clean_data.aggregate([
{'$project': project_dict},
{'$match': filter_dict},
{'$group': {
"_id": group_dict,
'count': {'$sum': 1},
"request_ids": {"$push": "$_id"}}}],
allowDiskUse=True, maxTimeMS=14400000)
# _tmp_
print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_failed_request_ratio_model_start ")
results = []
for item_tmp in res:
# print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_failed_request_ratio_model " + str(item_tmp))
results.append(item_tmp)
print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_failed_request_ratio_model_end ")
# _tmp_
# return self._generate_dataframe(list(res))
return self._generate_dataframe(results)
def _aggregate_data_for_duplicate_message_id_model(self, agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[]):
# create connection
clean_data = self._get_clean_data_collection()
# nested fields need to be projected (select field from client if, exists, else from producer)
project_dict = self._get_clean_data_projection_dict()
# conditions to filter the data before processing
filter_dict_elems = [{'succeeded': True, 'correctorStatus': 'done'}]
if len(ids_to_exclude) > 0:
id_exclude_query = {'_id': {'$nin': ids_to_exclude}}
filter_dict_elems.append(id_exclude_query)
if start_time is not None:
start_time_query = {self._config.timestamp_field: {"$gte": start_time}}
filter_dict_elems.append(start_time_query)
if end_time is not None:
end_time_query = {self._config.timestamp_field: {"$lt": end_time}}
filter_dict_elems.append(end_time_query)
if len(filter_dict_elems) == 1:
filter_dict = filter_dict_elems[0]
elif len(filter_dict_elems) > 1:
filter_dict = {"$and": filter_dict_elems}
# set up elements to group by (service call fields and temporal aggregation window)
group_dict = {col: "$%s" % col for col in self._config.service_call_fields}
group_dict[self._config.timestamp_field] = {
"$subtract": [
"$%s" % self._config.timestamp_field,
{"$mod": ["$%s" % self._config.timestamp_field, 1000 * 60 * agg_minutes]}
]}
group_dict['messageId'] = '$messageId'
res = clean_data.aggregate([
{'$project': project_dict},
{'$match': filter_dict},
{'$group': {"_id": group_dict,
'message_id_count': {'$sum': 1},
"request_ids": {"$push": "$_id"}}},
{'$match': {'message_id_count': {"$gt": 1}}}],
allowDiskUse=True, maxTimeMS=14400000)
# _tmp_
print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_duplicate_message_id_model_start ")
results = []
for item_tmp in res:
# print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_duplicate_message_id_model " + str(item_tmp))
results.append(item_tmp)
print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_duplicate_message_id_model_end ")
# _tmp_
# return self._generate_dataframe(list(res))
return self._generate_dataframe(results)
def _aggregate_data_for_time_sync_model(self, relevant_metric, threshold, agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[]):
# create connection
clean_data = self._get_clean_data_collection()
# nested fields need to be projected (select field from client if, exists, else from producer)
project_dict = self._get_clean_data_projection_dict()
# conditions to filter the data before processing
filter_dict_elems = [{'succeeded': True, 'correctorStatus': 'done'}]
if len(ids_to_exclude) > 0:
id_exclude_query = {'_id': {'$nin': ids_to_exclude}}
filter_dict_elems.append(id_exclude_query)
if start_time is not None:
start_time_query = {self._config.timestamp_field: {"$gte": start_time}}
filter_dict_elems.append(start_time_query)
if end_time is not None:
end_time_query = {self._config.timestamp_field: {"$lt": end_time}}
filter_dict_elems.append(end_time_query)
if len(filter_dict_elems) == 1:
filter_dict = filter_dict_elems[0]
elif len(filter_dict_elems) > 1:
filter_dict = {"$and": filter_dict_elems}
# set up elements to group by (service call fields and temporal aggregation window)
group_dict = {col: "$%s" % col for col in self._config.service_call_fields}
group_dict[self._config.timestamp_field] = {
"$subtract": [
"$%s" % self._config.timestamp_field,
{"$mod": ["$%s" % self._config.timestamp_field, 1000 * 60 * agg_minutes]}
]}
res = clean_data.aggregate([
{'$project': project_dict},
{'$match': filter_dict},
{'$group': {"_id": group_dict,
'request_count': {'$sum': 1},
"docs": {"$push":
{relevant_metric: "$%s" % relevant_metric,
"id": "$_id"}}}},
{"$unwind": "$docs"},
{'$match': {'docs.%s' % relevant_metric: {"$lt": threshold}}},
{'$group': {"_id": "$_id",
'erroneous_count': {'$sum': 1},
'avg_erroneous_diff': {'$avg': '$docs.%s' % relevant_metric},
"request_count": {"$first": "$request_count"},
"request_ids": {"$push": "$docs.id"}}}
], allowDiskUse=True, maxTimeMS=14400000)
# _tmp_
print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_time_sync_model_start ")
results = []
for item_tmp in res:
# print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_time_sync_model " + str(item_tmp))
results.append(item_tmp)
print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_time_sync_model_end ")
# _tmp_
# return self._generate_dataframe(list(res))
return self._generate_dataframe(results)
def get_request_ids_from_incidents(self, incident_status=["new", "showed", "normal", "incident", "viewed"],
relevant_anomalous_metrics=None, max_incident_creation_timestamp=None):
filter_dict = {"incident_status": {"$in": incident_status}}
if relevant_anomalous_metrics is not None:
filter_dict["anomalous_metric"] = {"$in": relevant_anomalous_metrics}
if max_incident_creation_timestamp is not None:
filter_dict["incident_creation_timestamp"] = {"$lte": max_incident_creation_timestamp}
incident_collection = self._get_incident_collection()
# request_ids = incident_collection.distinct("request_ids", filter_dict)
request_ids = [doc['_id'] for doc in incident_collection.aggregate([{'$match': filter_dict}, {'$group': {'_id': '$request_ids'}}], allowDiskUse=True)]
return request_ids
def delete_incidents(self, field=None, value=None):
incident_collection = self._get_incident_collection()
if field is None or value is None:
incident_collection.delete_many({})
else:
incident_collection.delete_many({field: value})
def insert_incidents(self, dt_incidents):
incident_collection = self._get_incident_collection()
incident_collection.insert_many(dt_incidents.to_dict('records'))
def get_timestamp(self, ts_type, model_type):
ts_collection = self._get_incident_timestamp_collection()
ts = ts_collection.find_one({"type": ts_type, "model": model_type})
if ts:
return ts["timestamp"]
return ts
def load_model(self, model_name, version=None):
incident_model_collection = self._get_incident_model_collection()
filter_dict = {"model_name": model_name}
if version is not None:
filter_dict["version"] = version
result = incident_model_collection.find(filter_dict)
# _tmp_
print(datetime.datetime.now().strftime('%H:%M:%s') + " load_model_start ")
results = []
for item_tmp in result:
# print(datetime.datetime.now().strftime('%H:%M:%s') + " load_model " + str(item_tmp))
results.append(item_tmp)
print(datetime.datetime.now().strftime('%H:%M:%s') + " load_model_end ")
# _tmp_
# return pd.DataFrame(list(result)).drop("_id", axis=1)
return pd.DataFrame(results).drop("_id", axis=1)
def save_model(self, df, delete_old_version=True):
incident_model_collection = self._get_incident_model_collection()
df = df.to_dict('records')
if delete_old_version and len(df) > 0:
model_name = df[0]["model_name"]
incident_model_collection.delete_many({"model_name": model_name})
incident_model_collection.insert_many(df)
def set_timestamp(self, ts_type, model_type, value):
ts_collection = self._get_incident_timestamp_collection()
ts_collection.update({"type": ts_type, "model": model_type},
{"type": ts_type, "model": model_type, "timestamp": value},
upsert=True)
def get_first_timestamps_for_service_calls(self):
scft = self._get_service_call_first_timestamps_collection()
# results = list(scft.find())
# _tmp_
print(datetime.datetime.now().strftime('%H:%M:%s') + " get_first_timestamps_for_service_calls_start1 ")
results = []
results_tmp = scft.find()
print(datetime.datetime.now().strftime('%H:%M:%s') + " get_first_timestamps_for_service_calls_start2 ")
for item_tmp in results_tmp:
# print(datetime.datetime.now().strftime('%H:%M:%s') + " get_first_timestamps_for_service_calls " + str(item_tmp))
results.append(item_tmp)
print(datetime.datetime.now().strftime('%H:%M:%s') + " get_first_timestamps_for_service_calls_end ")
# _tmp_
if len(results) == 0:
return pd.DataFrame()
data = pd.DataFrame(results).drop("_id", axis=1)
for col in ["first_request_timestamp", "first_model_train_timestamp", "first_incident_timestamp",
"first_model_retrain_timestamp"]:
data.loc[:, col] = pd.to_datetime(data.loc[:, col])
return data
def get_service_calls_for_train_stages(self, time_first_model, time_second_model):
first_timestamps = self.get_first_timestamps_for_service_calls()
if len(first_timestamps) == 0:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame()
first_model_to_be_trained = first_timestamps[(pd.isnull(first_timestamps.first_model_train_timestamp)) &
(first_timestamps.first_request_timestamp <= time_first_model)]
model_to_be_retrained = first_timestamps[(pd.isnull(first_timestamps.first_model_retrain_timestamp)) &
(first_timestamps.first_incident_timestamp <= time_second_model)]
first_timestamps = first_timestamps[~pd.isnull(first_timestamps.first_model_retrain_timestamp)]
return first_timestamps, first_model_to_be_trained, model_to_be_retrained
def get_service_calls_for_transform_stages(self):
first_timestamps = self.get_first_timestamps_for_service_calls()
first_incidents_to_be_reported = first_timestamps[(pd.isnull(first_timestamps.first_incident_timestamp)) &
(~pd.isnull(first_timestamps.first_model_train_timestamp))]
regular_service_calls = first_timestamps[~pd.isnull(first_timestamps.first_incident_timestamp)]
return regular_service_calls, first_incidents_to_be_reported
def get_data_for_train_stages(self, sc_regular, sc_first_model, sc_second_model, relevant_anomalous_metrics,
max_incident_creation_timestamp, last_fit_timestamp, agg_minutes, max_request_timestamp):
# exclude requests that are part of a "true" incident
ids_to_exclude = self.get_request_ids_from_incidents(
incident_status=["incident"],
relevant_anomalous_metrics=relevant_anomalous_metrics,
max_incident_creation_timestamp=max_incident_creation_timestamp)
# make the timestamps correspond to the millisecond format
if max_request_timestamp is not None:
max_request_timestamp = max_request_timestamp.timestamp() * 1000
if last_fit_timestamp is not None:
last_fit_timestamp = last_fit_timestamp.timestamp() * 1000
data_regular = pd.DataFrame()
data_first_train = pd.DataFrame()
data_first_retrain = pd.DataFrame()
# for the first-time training, don't exclude anything
if len(sc_first_model) > 0:
if len(sc_first_model) > 100:
data_first_train = self.aggregate_data_for_historic_averages_model(agg_minutes=agg_minutes,
end_time=max_request_timestamp)
if len(data_first_train) > 0:
data_first_train = data_first_train.merge(sc_first_model[self._config.service_call_fields])
else:
data_first_train = self.aggregate_data_for_historic_averages_model(
agg_minutes=agg_minutes,
end_time=max_request_timestamp,
service_calls=sc_first_model[self._config.service_call_fields])
# for the second model, exclude queries that were marked as "incident" after the first training,
# but don't limit the start time
if len(sc_second_model) > 0:
if len(sc_second_model) > 100:
data_first_retrain = self.aggregate_data_for_historic_averages_model(agg_minutes=agg_minutes,
end_time=max_request_timestamp,
ids_to_exclude=ids_to_exclude)
if len(data_first_retrain) > 0:
data_first_retrain = data_first_retrain.merge(sc_second_model[self._config.service_call_fields])
else:
data_first_retrain = self.aggregate_data_for_historic_averages_model(
agg_minutes=agg_minutes,
service_calls=sc_second_model[self._config.service_call_fields],
end_time=max_request_timestamp,
ids_to_exclude=ids_to_exclude)
# for regular training, exclude the incidents and limit the start time
if len(sc_regular) > 0:
data_regular = self.aggregate_data_for_historic_averages_model(
agg_minutes=agg_minutes,
start_time=last_fit_timestamp,
end_time=max_request_timestamp,
ids_to_exclude=ids_to_exclude)
if len(data_regular) > 0:
data_regular = data_regular.merge(sc_regular[self._config.service_call_fields])
return data_regular, data_first_train, data_first_retrain
def get_data_for_transform_stages(self, agg_minutes, last_transform_timestamp, current_transform_timestamp,
sc_regular, sc_first_incidents):
data_regular = pd.DataFrame()
data_first_incidents = pd.DataFrame()
# retrieve all data that have appeared after the last transform time
data = self.aggregate_data_for_historic_averages_model(agg_minutes=agg_minutes,
start_time=last_transform_timestamp,
end_time=current_transform_timestamp)
if len(data) > 0:
# exclude service calls that are not past the training period
data_regular = data.merge(sc_regular[self._config.service_call_fields])
if len(sc_first_incidents) > 100:
# for first-time incdent reporting, retrieve all data for these service calls
data_first_incidents = self.aggregate_data_for_historic_averages_model(agg_minutes=agg_minutes,
end_time=current_transform_timestamp)
if len(data_first_incidents) > 0:
data_first_incidents = data_first_incidents.merge(sc_first_incidents[self._config.service_call_fields])
elif len(sc_first_incidents) > 0:
data_first_incidents = self.aggregate_data_for_historic_averages_model(
agg_minutes=agg_minutes,
end_time=current_transform_timestamp,
service_calls=sc_first_incidents[self._config.service_call_fields])
return pd.concat([data_regular, data_first_incidents])
def _get_incident_collection(self):
db_client = MongoClient(self._db_config.MONGODB_URI)
db = db_client[self._db_config.MONGODB_AD]
return db.incident
def _get_incident_model_collection(self):
db_client = MongoClient(self._db_config.MONGODB_URI)
db = db_client[self._db_config.MONGODB_AD]
return db.incident_model
def _get_incident_timestamp_collection(self):
db_client = MongoClient(self._db_config.MONGODB_URI)
db = db_client[self._db_config.MONGODB_AD]
return db.incident_timestamps
def _get_service_call_first_timestamps_collection(self):
db_client = MongoClient(self._db_config.MONGODB_URI)
db = db_client[self._db_config.MONGODB_AD]
return db.service_call_first_timestamps
def _get_clean_data_collection(self):
db_client = MongoClient(self._db_config.MONGODB_URI)
db = db_client[self._db_config.MONGODB_QD]
return db.clean_data
def _get_clean_data_projection_dict(self):
project_dict = {col: {"$ifNull": ["$client.%s" % col, "$producer.%s" % col]}
for col in self._config.relevant_cols_nested}
for col, field1, field2 in self._config.relevant_cols_general_alternative:
project_dict[col] = {"$ifNull": ["$%s" % field1, "$%s" % field2]}
for col in self._config.relevant_cols_general:
project_dict[col] = "$%s" % col
return project_dict
def _generate_dataframe(self, result):
data = pd.DataFrame(result)
if len(data) > 0:
data = pd.concat([data, pd.DataFrame(list(data["_id"]))], axis=1)
data = data.drop(["_id"], axis=1)
data.loc[:, self._config.timestamp_field] = pd.to_datetime(data.loc[:, self._config.timestamp_field], unit='ms')
for col in self._config.service_call_fields:
data.loc[:, col] = data.loc[:, col].fillna("-")
return data
| 50.876923
| 158
| 0.629977
|
import datetime
from copy import deepcopy
from pymongo import MongoClient
import pymongo
import pandas as pd
import numpy as np
import sys
pd.options.mode.chained_assignment = None
class AnalyzerDatabaseManager(object):
def __init__(self, db_config, config):
self._db_config = db_config
self._config = config
def aggregate_data(self, model_type, agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[],
metric=None, threshold=None):
if model_type == "failed_request_ratio":
return self._aggregate_data_for_failed_request_ratio_model(agg_minutes=agg_minutes, start_time=start_time,
end_time=end_time, ids_to_exclude=ids_to_exclude)
elif model_type == "duplicate_message_ids":
return self._aggregate_data_for_duplicate_message_id_model(agg_minutes=agg_minutes, start_time=start_time,
end_time=end_time, ids_to_exclude=ids_to_exclude)
elif model_type == "time_sync_errors":
return self._aggregate_data_for_time_sync_model(relevant_metric=metric, threshold=threshold,
agg_minutes=agg_minutes, start_time=start_time, end_time=end_time,
ids_to_exclude=ids_to_exclude)
else:
return None
def aggregate_data_for_historic_averages_model(self, agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[], service_calls=None):
clean_data = self._get_clean_data_collection()
project_dict = self._get_clean_data_projection_dict()
filter_dict_elems = [{'succeeded': True, 'correctorStatus': 'done'}]
if len(ids_to_exclude) > 0:
id_exclude_query = {'_id': {'$nin': ids_to_exclude}}
filter_dict_elems.append(id_exclude_query)
if start_time is not None:
start_time_query = {self._config.timestamp_field: {"$gte": start_time}}
filter_dict_elems.append(start_time_query)
if end_time is not None:
end_time_query = {self._config.timestamp_field: {"$lt": end_time}}
filter_dict_elems.append(end_time_query)
if service_calls is not None and len(service_calls) > 0:
for col in self._config.service_call_fields:
service_calls.loc[service_calls[col] == "-", col] = None
service_call_query = {"$or": service_calls.to_dict(orient="records")}
filter_dict_elems.append(service_call_query)
if len(filter_dict_elems) == 1:
filter_dict = filter_dict_elems[0]
elif len(filter_dict_elems) > 1:
filter_dict = {"$and": filter_dict_elems}
group_dict = {col: "$%s" % col for col in self._config.service_call_fields}
group_dict[self._config.timestamp_field] = {
"$subtract": [
"$%s" % self._config.timestamp_field,
{"$mod": ["$%s" % self._config.timestamp_field, 1000 * 60 * agg_minutes]}
]}
res = clean_data.aggregate([
{'$project': project_dict},
{'$match': filter_dict},
{'$group': {
"_id": group_dict,
"request_count": {"$sum": 1},
"mean_request_size": {"$avg": "$requestSize"},
"mean_response_size": {"$avg": "$responseSize"},
"mean_client_duration": {"$avg": "$totalDuration"},
"mean_producer_duration": {"$avg": "$producerDurationProducerView"},
"request_ids": {"$push": "$_id"}}}],
allowDiskUse=True, maxTimeMS=14400000)
print(datetime.datetime.now().strftime('%H:%M:%s') + " aggregate_data_for_historic_averages_model_start ")
results = []
for item_tmp in res:
results.append(item_tmp)
print(datetime.datetime.now().strftime('%H:%M:%s') + " aggregate_data_for_historic_averages_model_end ")
return self._generate_dataframe(results)
def add_first_request_timestamps_from_clean_data(self, data=None):
clean_data = self._get_clean_data_collection()
project_dict = self._get_clean_data_projection_dict()
filter_dict = {'correctorStatus': 'done'}
if data is not None:
for col in self._config.service_call_fields:
data.loc[data[col] == "-", col] = None
filter_dict["$or"] = data.to_dict(orient="records")
group_dict = {col: "$%s" % col for col in self._config.service_call_fields}
res = clean_data.aggregate([
{'$project': project_dict},
{'$match': filter_dict},
{'$group': {
"_id": group_dict,
self._config.timestamp_field: {"$min": "$%s" % self._config.timestamp_field}}}],
allowDiskUse=True, maxTimeMS=14400000)
results = []
print(datetime.datetime.now().strftime('%H:%M:%s') + " add_first_request_timestamps_from_clean_data_start ")
for item_tmp in res:
results.append(item_tmp)
print(datetime.datetime.now().strftime('%H:%M:%s') + " add_first_request_timestamps_from_clean_data_end ")
res = deepcopy(results)
if len(res) == 0:
return
res = self._generate_dataframe(res)
res = res.sort_values(self._config.timestamp_field, ascending=True).drop_duplicates(self._config.service_call_fields)
existing_first_timestamps = self.get_first_timestamps_for_service_calls()
if len(existing_first_timestamps) > 0:
res = res.merge(existing_first_timestamps[self._config.service_call_fields + ["first_request_timestamp"]],
on=self._config.service_call_fields, how="left")
res = res[pd.isnull(res.first_request_timestamp)].drop("first_request_timestamp", axis=1)
res = res.rename(columns={self._config.timestamp_field: "first_request_timestamp"})
res.first_request_timestamp = pd.to_datetime(res.first_request_timestamp, unit='ms')
res = res.assign(first_incident_timestamp=None)
res = res.assign(first_model_retrain_timestamp=None)
res = res.assign(first_model_train_timestamp=None)
scft = self._get_service_call_first_timestamps_collection()
if len(res) > 0:
scft.insert_many(res.to_dict('records'))
def update_first_timestamps(self, field, value, service_calls=None):
scft = self._get_service_call_first_timestamps_collection()
scft.update({"$or": service_calls.to_dict(orient="records")}, {"$set": {field: value}}, upsert=False, multi=True)
def update_first_train_retrain_timestamps(self, sc_first_model, sc_second_model, current_time):
if len(sc_first_model) > 0:
self.update_first_timestamps(field="first_model_train_timestamp",
value=current_time,
service_calls=sc_first_model[self._config.service_call_fields])
if len(sc_second_model) > 0:
self.update_first_timestamps(field="first_model_retrain_timestamp",
value=current_time,
service_calls=sc_second_model[self._config.service_call_fields])
def _aggregate_data_for_failed_request_ratio_model(self, agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[]):
clean_data = self._get_clean_data_collection()
project_dict = self._get_clean_data_projection_dict()
filter_dict_elems = [{'correctorStatus': 'done'}]
if len(ids_to_exclude) > 0:
id_exclude_query = {'_id': {'$nin': ids_to_exclude}}
filter_dict_elems.append(id_exclude_query)
if start_time is not None:
start_time_query = {self._config.timestamp_field: {"$gte": start_time}}
filter_dict_elems.append(start_time_query)
if end_time is not None:
end_time_query = {self._config.timestamp_field: {"$lt": end_time}}
filter_dict_elems.append(end_time_query)
if len(filter_dict_elems) == 1:
filter_dict = filter_dict_elems[0]
elif len(filter_dict_elems) > 1:
filter_dict = {"$and": filter_dict_elems}
else:
filter_dict = {}
group_dict = {col: "$%s" % col for col in self._config.service_call_fields}
group_dict[self._config.timestamp_field] = {
"$subtract": [
"$%s" % self._config.timestamp_field,
{"$mod": ["$%s" % self._config.timestamp_field, 1000 * 60 * agg_minutes]}
]}
group_dict['succeeded'] = '$succeeded'
res = clean_data.aggregate([
{'$project': project_dict},
{'$match': filter_dict},
{'$group': {
"_id": group_dict,
'count': {'$sum': 1},
"request_ids": {"$push": "$_id"}}}],
allowDiskUse=True, maxTimeMS=14400000)
print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_failed_request_ratio_model_start ")
results = []
for item_tmp in res:
results.append(item_tmp)
print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_failed_request_ratio_model_end ")
return self._generate_dataframe(results)
def _aggregate_data_for_duplicate_message_id_model(self, agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[]):
clean_data = self._get_clean_data_collection()
project_dict = self._get_clean_data_projection_dict()
filter_dict_elems = [{'succeeded': True, 'correctorStatus': 'done'}]
if len(ids_to_exclude) > 0:
id_exclude_query = {'_id': {'$nin': ids_to_exclude}}
filter_dict_elems.append(id_exclude_query)
if start_time is not None:
start_time_query = {self._config.timestamp_field: {"$gte": start_time}}
filter_dict_elems.append(start_time_query)
if end_time is not None:
end_time_query = {self._config.timestamp_field: {"$lt": end_time}}
filter_dict_elems.append(end_time_query)
if len(filter_dict_elems) == 1:
filter_dict = filter_dict_elems[0]
elif len(filter_dict_elems) > 1:
filter_dict = {"$and": filter_dict_elems}
group_dict = {col: "$%s" % col for col in self._config.service_call_fields}
group_dict[self._config.timestamp_field] = {
"$subtract": [
"$%s" % self._config.timestamp_field,
{"$mod": ["$%s" % self._config.timestamp_field, 1000 * 60 * agg_minutes]}
]}
group_dict['messageId'] = '$messageId'
res = clean_data.aggregate([
{'$project': project_dict},
{'$match': filter_dict},
{'$group': {"_id": group_dict,
'message_id_count': {'$sum': 1},
"request_ids": {"$push": "$_id"}}},
{'$match': {'message_id_count': {"$gt": 1}}}],
allowDiskUse=True, maxTimeMS=14400000)
print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_duplicate_message_id_model_start ")
results = []
for item_tmp in res:
results.append(item_tmp)
print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_duplicate_message_id_model_end ")
return self._generate_dataframe(results)
def _aggregate_data_for_time_sync_model(self, relevant_metric, threshold, agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[]):
clean_data = self._get_clean_data_collection()
project_dict = self._get_clean_data_projection_dict()
filter_dict_elems = [{'succeeded': True, 'correctorStatus': 'done'}]
if len(ids_to_exclude) > 0:
id_exclude_query = {'_id': {'$nin': ids_to_exclude}}
filter_dict_elems.append(id_exclude_query)
if start_time is not None:
start_time_query = {self._config.timestamp_field: {"$gte": start_time}}
filter_dict_elems.append(start_time_query)
if end_time is not None:
end_time_query = {self._config.timestamp_field: {"$lt": end_time}}
filter_dict_elems.append(end_time_query)
if len(filter_dict_elems) == 1:
filter_dict = filter_dict_elems[0]
elif len(filter_dict_elems) > 1:
filter_dict = {"$and": filter_dict_elems}
group_dict = {col: "$%s" % col for col in self._config.service_call_fields}
group_dict[self._config.timestamp_field] = {
"$subtract": [
"$%s" % self._config.timestamp_field,
{"$mod": ["$%s" % self._config.timestamp_field, 1000 * 60 * agg_minutes]}
]}
res = clean_data.aggregate([
{'$project': project_dict},
{'$match': filter_dict},
{'$group': {"_id": group_dict,
'request_count': {'$sum': 1},
"docs": {"$push":
{relevant_metric: "$%s" % relevant_metric,
"id": "$_id"}}}},
{"$unwind": "$docs"},
{'$match': {'docs.%s' % relevant_metric: {"$lt": threshold}}},
{'$group': {"_id": "$_id",
'erroneous_count': {'$sum': 1},
'avg_erroneous_diff': {'$avg': '$docs.%s' % relevant_metric},
"request_count": {"$first": "$request_count"},
"request_ids": {"$push": "$docs.id"}}}
], allowDiskUse=True, maxTimeMS=14400000)
print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_time_sync_model_start ")
results = []
for item_tmp in res:
results.append(item_tmp)
print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_time_sync_model_end ")
return self._generate_dataframe(results)
def get_request_ids_from_incidents(self, incident_status=["new", "showed", "normal", "incident", "viewed"],
relevant_anomalous_metrics=None, max_incident_creation_timestamp=None):
filter_dict = {"incident_status": {"$in": incident_status}}
if relevant_anomalous_metrics is not None:
filter_dict["anomalous_metric"] = {"$in": relevant_anomalous_metrics}
if max_incident_creation_timestamp is not None:
filter_dict["incident_creation_timestamp"] = {"$lte": max_incident_creation_timestamp}
incident_collection = self._get_incident_collection()
request_ids = [doc['_id'] for doc in incident_collection.aggregate([{'$match': filter_dict}, {'$group': {'_id': '$request_ids'}}], allowDiskUse=True)]
return request_ids
def delete_incidents(self, field=None, value=None):
incident_collection = self._get_incident_collection()
if field is None or value is None:
incident_collection.delete_many({})
else:
incident_collection.delete_many({field: value})
def insert_incidents(self, dt_incidents):
incident_collection = self._get_incident_collection()
incident_collection.insert_many(dt_incidents.to_dict('records'))
def get_timestamp(self, ts_type, model_type):
ts_collection = self._get_incident_timestamp_collection()
ts = ts_collection.find_one({"type": ts_type, "model": model_type})
if ts:
return ts["timestamp"]
return ts
def load_model(self, model_name, version=None):
incident_model_collection = self._get_incident_model_collection()
filter_dict = {"model_name": model_name}
if version is not None:
filter_dict["version"] = version
result = incident_model_collection.find(filter_dict)
print(datetime.datetime.now().strftime('%H:%M:%s') + " load_model_start ")
results = []
for item_tmp in result:
results.append(item_tmp)
print(datetime.datetime.now().strftime('%H:%M:%s') + " load_model_end ")
return pd.DataFrame(results).drop("_id", axis=1)
def save_model(self, df, delete_old_version=True):
incident_model_collection = self._get_incident_model_collection()
df = df.to_dict('records')
if delete_old_version and len(df) > 0:
model_name = df[0]["model_name"]
incident_model_collection.delete_many({"model_name": model_name})
incident_model_collection.insert_many(df)
def set_timestamp(self, ts_type, model_type, value):
ts_collection = self._get_incident_timestamp_collection()
ts_collection.update({"type": ts_type, "model": model_type},
{"type": ts_type, "model": model_type, "timestamp": value},
upsert=True)
def get_first_timestamps_for_service_calls(self):
scft = self._get_service_call_first_timestamps_collection()
print(datetime.datetime.now().strftime('%H:%M:%s') + " get_first_timestamps_for_service_calls_start1 ")
results = []
results_tmp = scft.find()
print(datetime.datetime.now().strftime('%H:%M:%s') + " get_first_timestamps_for_service_calls_start2 ")
for item_tmp in results_tmp:
results.append(item_tmp)
print(datetime.datetime.now().strftime('%H:%M:%s') + " get_first_timestamps_for_service_calls_end ")
if len(results) == 0:
return pd.DataFrame()
data = pd.DataFrame(results).drop("_id", axis=1)
for col in ["first_request_timestamp", "first_model_train_timestamp", "first_incident_timestamp",
"first_model_retrain_timestamp"]:
data.loc[:, col] = pd.to_datetime(data.loc[:, col])
return data
def get_service_calls_for_train_stages(self, time_first_model, time_second_model):
first_timestamps = self.get_first_timestamps_for_service_calls()
if len(first_timestamps) == 0:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame()
first_model_to_be_trained = first_timestamps[(pd.isnull(first_timestamps.first_model_train_timestamp)) &
(first_timestamps.first_request_timestamp <= time_first_model)]
model_to_be_retrained = first_timestamps[(pd.isnull(first_timestamps.first_model_retrain_timestamp)) &
(first_timestamps.first_incident_timestamp <= time_second_model)]
first_timestamps = first_timestamps[~pd.isnull(first_timestamps.first_model_retrain_timestamp)]
return first_timestamps, first_model_to_be_trained, model_to_be_retrained
def get_service_calls_for_transform_stages(self):
first_timestamps = self.get_first_timestamps_for_service_calls()
first_incidents_to_be_reported = first_timestamps[(pd.isnull(first_timestamps.first_incident_timestamp)) &
(~pd.isnull(first_timestamps.first_model_train_timestamp))]
regular_service_calls = first_timestamps[~pd.isnull(first_timestamps.first_incident_timestamp)]
return regular_service_calls, first_incidents_to_be_reported
def get_data_for_train_stages(self, sc_regular, sc_first_model, sc_second_model, relevant_anomalous_metrics,
max_incident_creation_timestamp, last_fit_timestamp, agg_minutes, max_request_timestamp):
ids_to_exclude = self.get_request_ids_from_incidents(
incident_status=["incident"],
relevant_anomalous_metrics=relevant_anomalous_metrics,
max_incident_creation_timestamp=max_incident_creation_timestamp)
if max_request_timestamp is not None:
max_request_timestamp = max_request_timestamp.timestamp() * 1000
if last_fit_timestamp is not None:
last_fit_timestamp = last_fit_timestamp.timestamp() * 1000
data_regular = pd.DataFrame()
data_first_train = pd.DataFrame()
data_first_retrain = pd.DataFrame()
if len(sc_first_model) > 0:
if len(sc_first_model) > 100:
data_first_train = self.aggregate_data_for_historic_averages_model(agg_minutes=agg_minutes,
end_time=max_request_timestamp)
if len(data_first_train) > 0:
data_first_train = data_first_train.merge(sc_first_model[self._config.service_call_fields])
else:
data_first_train = self.aggregate_data_for_historic_averages_model(
agg_minutes=agg_minutes,
end_time=max_request_timestamp,
service_calls=sc_first_model[self._config.service_call_fields])
# for the second model, exclude queries that were marked as "incident" after the first training,
# but don't limit the start time
if len(sc_second_model) > 0:
if len(sc_second_model) > 100:
data_first_retrain = self.aggregate_data_for_historic_averages_model(agg_minutes=agg_minutes,
end_time=max_request_timestamp,
ids_to_exclude=ids_to_exclude)
if len(data_first_retrain) > 0:
data_first_retrain = data_first_retrain.merge(sc_second_model[self._config.service_call_fields])
else:
data_first_retrain = self.aggregate_data_for_historic_averages_model(
agg_minutes=agg_minutes,
service_calls=sc_second_model[self._config.service_call_fields],
end_time=max_request_timestamp,
ids_to_exclude=ids_to_exclude)
if len(sc_regular) > 0:
data_regular = self.aggregate_data_for_historic_averages_model(
agg_minutes=agg_minutes,
start_time=last_fit_timestamp,
end_time=max_request_timestamp,
ids_to_exclude=ids_to_exclude)
if len(data_regular) > 0:
data_regular = data_regular.merge(sc_regular[self._config.service_call_fields])
return data_regular, data_first_train, data_first_retrain
def get_data_for_transform_stages(self, agg_minutes, last_transform_timestamp, current_transform_timestamp,
sc_regular, sc_first_incidents):
data_regular = pd.DataFrame()
data_first_incidents = pd.DataFrame()
data = self.aggregate_data_for_historic_averages_model(agg_minutes=agg_minutes,
start_time=last_transform_timestamp,
end_time=current_transform_timestamp)
if len(data) > 0:
data_regular = data.merge(sc_regular[self._config.service_call_fields])
if len(sc_first_incidents) > 100:
data_first_incidents = self.aggregate_data_for_historic_averages_model(agg_minutes=agg_minutes,
end_time=current_transform_timestamp)
if len(data_first_incidents) > 0:
data_first_incidents = data_first_incidents.merge(sc_first_incidents[self._config.service_call_fields])
elif len(sc_first_incidents) > 0:
data_first_incidents = self.aggregate_data_for_historic_averages_model(
agg_minutes=agg_minutes,
end_time=current_transform_timestamp,
service_calls=sc_first_incidents[self._config.service_call_fields])
return pd.concat([data_regular, data_first_incidents])
def _get_incident_collection(self):
db_client = MongoClient(self._db_config.MONGODB_URI)
db = db_client[self._db_config.MONGODB_AD]
return db.incident
def _get_incident_model_collection(self):
db_client = MongoClient(self._db_config.MONGODB_URI)
db = db_client[self._db_config.MONGODB_AD]
return db.incident_model
def _get_incident_timestamp_collection(self):
db_client = MongoClient(self._db_config.MONGODB_URI)
db = db_client[self._db_config.MONGODB_AD]
return db.incident_timestamps
def _get_service_call_first_timestamps_collection(self):
db_client = MongoClient(self._db_config.MONGODB_URI)
db = db_client[self._db_config.MONGODB_AD]
return db.service_call_first_timestamps
def _get_clean_data_collection(self):
db_client = MongoClient(self._db_config.MONGODB_URI)
db = db_client[self._db_config.MONGODB_QD]
return db.clean_data
def _get_clean_data_projection_dict(self):
project_dict = {col: {"$ifNull": ["$client.%s" % col, "$producer.%s" % col]}
for col in self._config.relevant_cols_nested}
for col, field1, field2 in self._config.relevant_cols_general_alternative:
project_dict[col] = {"$ifNull": ["$%s" % field1, "$%s" % field2]}
for col in self._config.relevant_cols_general:
project_dict[col] = "$%s" % col
return project_dict
def _generate_dataframe(self, result):
data = pd.DataFrame(result)
if len(data) > 0:
data = pd.concat([data, pd.DataFrame(list(data["_id"]))], axis=1)
data = data.drop(["_id"], axis=1)
data.loc[:, self._config.timestamp_field] = pd.to_datetime(data.loc[:, self._config.timestamp_field], unit='ms')
for col in self._config.service_call_fields:
data.loc[:, col] = data.loc[:, col].fillna("-")
return data
| true
| true
|
790e7d287528a1269e09470762940c56aefc0e6c
| 3,674
|
py
|
Python
|
monai/handlers/checkpoint_loader.py
|
BRAINSia/MONAI
|
04e1c345fc840f5a1b6504ee5857d5a9feb27d84
|
[
"Apache-2.0"
] | null | null | null |
monai/handlers/checkpoint_loader.py
|
BRAINSia/MONAI
|
04e1c345fc840f5a1b6504ee5857d5a9feb27d84
|
[
"Apache-2.0"
] | null | null | null |
monai/handlers/checkpoint_loader.py
|
BRAINSia/MONAI
|
04e1c345fc840f5a1b6504ee5857d5a9feb27d84
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Dict, Optional
import torch
from monai.utils import exact_version, optional_import
Events, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Events")
Checkpoint, _ = optional_import("ignite.handlers", "0.3.0", exact_version, "Checkpoint")
if TYPE_CHECKING:
from ignite.engine import Engine
else:
Engine, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Engine")
class CheckpointLoader:
"""
CheckpointLoader acts as an Ignite handler to load checkpoint data from file.
It can load variables for network, optimizer, lr_scheduler, etc.
If saving checkpoint after `torch.nn.DataParallel`, need to save `model.module` instead
as PyTorch recommended and then use this loader to load the model.
Args:
load_path: the file path of checkpoint, it should be a PyTorch `pth` file.
load_dict: target objects that load checkpoint to. examples::
{'network': net, 'optimizer': optimizer, 'lr_scheduler': lr_scheduler}
name: identifier of logging.logger to use, if None, defaulting to ``engine.logger``.
map_location: when loading the module for distributed training/evaluation,
need to provide an appropriate map_location argument to prevent a process
to step into others’ devices. If map_location is missing, torch.load will
first load the module to CPU and then copy each parameter to where it was
saved, which would result in all processes on the same machine using the
same set of devices.
"""
def __init__(
self, load_path: str, load_dict: Dict, name: Optional[str] = None, map_location: Optional[Dict] = None,
) -> None:
assert load_path is not None, "must provide clear path to load checkpoint."
self.load_path = load_path
assert load_dict is not None and len(load_dict) > 0, "must provide target objects to load."
self.logger = logging.getLogger(name)
for k, v in load_dict.items():
if hasattr(v, "module"):
load_dict[k] = v.module
self.load_dict = load_dict
self._name = name
self.map_location = map_location
def attach(self, engine: Engine) -> None:
"""
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
if self._name is None:
self.logger = engine.logger
engine.add_event_handler(Events.STARTED, self)
def __call__(self, engine: Engine) -> None:
"""
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
checkpoint = torch.load(self.load_path, map_location=self.map_location)
if len(self.load_dict) == 1:
key = list(self.load_dict.keys())[0]
if not (key in checkpoint):
checkpoint = {key: checkpoint}
Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint)
self.logger.info(f"Restored all variables from {self.load_path}")
| 42.72093
| 111
| 0.679096
|
import logging
from typing import TYPE_CHECKING, Dict, Optional
import torch
from monai.utils import exact_version, optional_import
Events, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Events")
Checkpoint, _ = optional_import("ignite.handlers", "0.3.0", exact_version, "Checkpoint")
if TYPE_CHECKING:
from ignite.engine import Engine
else:
Engine, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Engine")
class CheckpointLoader:
def __init__(
self, load_path: str, load_dict: Dict, name: Optional[str] = None, map_location: Optional[Dict] = None,
) -> None:
assert load_path is not None, "must provide clear path to load checkpoint."
self.load_path = load_path
assert load_dict is not None and len(load_dict) > 0, "must provide target objects to load."
self.logger = logging.getLogger(name)
for k, v in load_dict.items():
if hasattr(v, "module"):
load_dict[k] = v.module
self.load_dict = load_dict
self._name = name
self.map_location = map_location
def attach(self, engine: Engine) -> None:
if self._name is None:
self.logger = engine.logger
engine.add_event_handler(Events.STARTED, self)
def __call__(self, engine: Engine) -> None:
checkpoint = torch.load(self.load_path, map_location=self.map_location)
if len(self.load_dict) == 1:
key = list(self.load_dict.keys())[0]
if not (key in checkpoint):
checkpoint = {key: checkpoint}
Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint)
self.logger.info(f"Restored all variables from {self.load_path}")
| true
| true
|
790e7d48fc74b078b698d22908efa30d7a798044
| 994
|
py
|
Python
|
tethys/bin/cli.py
|
tethys-platform/tethys
|
c27daf5a832b05f9d771b04355001c331bc08766
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-05-20T19:03:14.000Z
|
2020-06-03T20:43:34.000Z
|
tethys/bin/cli.py
|
tethys-platform/tethys
|
c27daf5a832b05f9d771b04355001c331bc08766
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tethys/bin/cli.py
|
tethys-platform/tethys
|
c27daf5a832b05f9d771b04355001c331bc08766
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Konstruktor, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from tethys.apps.base import AppBase
@click.group()
def cli():
"""
The tethys CLI for managing your environment.
"""
@cli.group(name="apps")
def apps_entry():
"""
Tethys apps manager
"""
for app_cls in AppBase.get_apps():
add_click_entry = getattr(app_cls, "add_click_entry", None)
if add_click_entry:
add_click_entry(apps_entry)
| 26.864865
| 74
| 0.72334
|
import click
from tethys.apps.base import AppBase
@click.group()
def cli():
@cli.group(name="apps")
def apps_entry():
for app_cls in AppBase.get_apps():
add_click_entry = getattr(app_cls, "add_click_entry", None)
if add_click_entry:
add_click_entry(apps_entry)
| true
| true
|
790e7d851357a5a65fe4fce0b51959cc8cde1937
| 54,066
|
py
|
Python
|
external/vulkancts/scripts/gen_framework.py
|
jingpad-bsp/android_external_deqp
|
50f948294cb12f5384633efc9327c571feb0fa21
|
[
"Apache-2.0"
] | null | null | null |
external/vulkancts/scripts/gen_framework.py
|
jingpad-bsp/android_external_deqp
|
50f948294cb12f5384633efc9327c571feb0fa21
|
[
"Apache-2.0"
] | null | null | null |
external/vulkancts/scripts/gen_framework.py
|
jingpad-bsp/android_external_deqp
|
50f948294cb12f5384633efc9327c571feb0fa21
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# Vulkan CTS
# ----------
#
# Copyright (c) 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import os
import re
import sys
import copy
from itertools import chain
from collections import OrderedDict
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "..", "scripts"))
from build.common import DEQP_DIR
from khr_util.format import indentLines, writeInlFile
VULKAN_H = os.path.join(os.path.dirname(__file__), "src", "vulkan.h.in")
VULKAN_DIR = os.path.join(os.path.dirname(__file__), "..", "framework", "vulkan")
INL_HEADER = """\
/* WARNING: This is auto-generated file. Do not modify, since changes will
* be lost! Modify the generating script instead.
*/\
"""
DEFINITIONS = [
("VK_API_VERSION_1_0", "deUint32"),
("VK_API_VERSION_1_1", "deUint32"),
("VK_MAX_PHYSICAL_DEVICE_NAME_SIZE", "size_t"),
("VK_MAX_EXTENSION_NAME_SIZE", "size_t"),
("VK_MAX_DRIVER_NAME_SIZE_KHR", "size_t"),
("VK_MAX_DRIVER_INFO_SIZE_KHR", "size_t"),
("VK_UUID_SIZE", "size_t"),
("VK_LUID_SIZE", "size_t"),
("VK_MAX_MEMORY_TYPES", "size_t"),
("VK_MAX_MEMORY_HEAPS", "size_t"),
("VK_MAX_DESCRIPTION_SIZE", "size_t"),
("VK_MAX_DEVICE_GROUP_SIZE", "size_t"),
("VK_ATTACHMENT_UNUSED", "deUint32"),
("VK_SUBPASS_EXTERNAL", "deUint32"),
("VK_QUEUE_FAMILY_IGNORED", "deUint32"),
("VK_QUEUE_FAMILY_EXTERNAL", "deUint32"),
("VK_REMAINING_MIP_LEVELS", "deUint32"),
("VK_REMAINING_ARRAY_LAYERS", "deUint32"),
("VK_WHOLE_SIZE", "vk::VkDeviceSize"),
("VK_TRUE", "vk::VkBool32"),
("VK_FALSE", "vk::VkBool32"),
]
PLATFORM_TYPES = [
# VK_KHR_xlib_surface
(["Display","*"], ["XlibDisplayPtr"], "void*"),
(["Window"], ["XlibWindow"], "deUintptr",),
(["VisualID"], ["XlibVisualID"], "deUint32"),
# VK_KHR_xcb_surface
(["xcb_connection_t", "*"], ["XcbConnectionPtr"], "void*"),
(["xcb_window_t"], ["XcbWindow"], "deUintptr"),
(["xcb_visualid_t"], ["XcbVisualid"], "deUint32"),
# VK_KHR_wayland_surface
(["struct", "wl_display","*"], ["WaylandDisplayPtr"], "void*"),
(["struct", "wl_surface", "*"], ["WaylandSurfacePtr"], "void*"),
# VK_KHR_mir_surface
(["MirConnection", "*"], ["MirConnectionPtr"], "void*"),
(["MirSurface", "*"], ["MirSurfacePtr"], "void*"),
# VK_KHR_android_surface
(["ANativeWindow", "*"], ["AndroidNativeWindowPtr"], "void*"),
# VK_KHR_win32_surface
(["HINSTANCE"], ["Win32InstanceHandle"], "void*"),
(["HWND"], ["Win32WindowHandle"], "void*"),
(["HANDLE"], ["Win32Handle"], "void*"),
(["const", "SECURITY_ATTRIBUTES", "*"], ["Win32SecurityAttributesPtr"], "const void*"),
(["AHardwareBuffer", "*"], ["AndroidHardwareBufferPtr"], "void*"),
# VK_EXT_acquire_xlib_display
(["RROutput"], ["RROutput"], "void*")
]
PLATFORM_TYPE_NAMESPACE = "pt"
TYPE_SUBSTITUTIONS = [
("uint8_t", "deUint8"),
("uint16_t", "deUint16"),
("uint32_t", "deUint32"),
("uint64_t", "deUint64"),
("int8_t", "deInt8"),
("int16_t", "deInt16"),
("int32_t", "deInt32"),
("int64_t", "deInt64"),
("bool32_t", "deUint32"),
("size_t", "deUintptr"),
# Platform-specific
("DWORD", "deUint32"),
("HANDLE*", PLATFORM_TYPE_NAMESPACE + "::" + "Win32Handle*"),
("LPCWSTR", "char*"),
]
EXTENSION_POSTFIXES = ["KHR", "EXT", "NV", "NVX", "KHX", "NN", "MVK"]
EXTENSION_POSTFIXES_STANDARD = ["KHR"]
def prefixName (prefix, name):
name = re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', name[2:])
name = re.sub(r'([a-zA-Z])([0-9])', r'\1_\2', name)
name = name.upper()
name = name.replace("YCB_CR_", "YCBCR_")
name = name.replace("WIN_32_", "WIN32_")
name = name.replace("8_BIT_", "8BIT_")
name = name.replace("16_BIT_", "16BIT_")
name = name.replace("INT_64_", "INT64_")
name = name.replace("D_3_D_12_", "D3D12_")
name = name.replace("IOSSURFACE_", "IOS_SURFACE_")
name = name.replace("MAC_OS", "MACOS_")
name = name.replace("TEXTURE_LOD", "TEXTURE_LOD_")
name = name.replace("VIEWPORT_W", "VIEWPORT_W_")
name = name.replace("_IDPROPERTIES", "_ID_PROPERTIES")
name = name.replace("PHYSICAL_DEVICE_FLOAT_16_INT_8_FEATURES", "PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES")
return prefix + name
class Version:
def __init__ (self, versionTuple):
self.major = versionTuple[0]
self.minor = versionTuple[1]
self.patch = versionTuple[2]
def getInHex (self):
if self.major == 1 and self.minor == 0 and self.patch == 0:
return "VK_API_VERSION_1_0"
elif self.major == 1 and self.minor == 1 and self.patch == 0:
return "VK_API_VERSION_1_1"
else:
hex = (self.major << 22) | (self.minor << 12) | self.patch
return '0x%Xu' % (hex)
def isStandardVersion (self):
if self.patch != 0:
return False
if self.major != 1:
return False
if self.minor != 1 and self.minor != 0:
return False
return True
def getBestRepresentation (self):
if self.isStandardVersion():
return self.getInHex()
return self.getDefineName()
def getDefineName (self):
return 'VERSION_%d_%d_%d' % (self.major, self.minor, self.patch)
def __hash__ (self):
return (self.major << 22) | (self.minor << 12) | self.patch
def __eq__ (self, other):
return self.major == other.major and self.minor == other.minor and self.patch == other.patch
def __str__ (self):
return self.getBestRepresentation()
class Handle:
TYPE_DISP = 0
TYPE_NONDISP = 1
def __init__ (self, type, name):
self.type = type
self.name = name
self.alias = None
self.isAlias = False
def getHandleType (self):
return prefixName("HANDLE_TYPE_", self.name)
def checkAliasValidity (self):
pass
def __repr__ (self):
return '%s (%s, %s)' % (self.name, self.alias, self.isAlias)
class Definition:
def __init__ (self, type, name, value):
self.type = type
self.name = name
self.value = value
self.alias = None
self.isAlias = False
def __repr__ (self):
return '%s = %s (%s)' % (self.name, self.value, self.type)
class Enum:
def __init__ (self, name, values):
self.name = name
self.values = values
self.alias = None
self.isAlias = False
def checkAliasValidity (self):
if self.alias != None:
if len(self.values) != len(self.alias.values):
raise Exception("%s has different number of flags than its alias %s." % (self.name, self.alias.name))
for index, value in enumerate(self.values):
aliasVal = self.alias.values[index]
if value[1] != aliasVal[1] or not (value[0].startswith(aliasVal[0]) or aliasVal[0].startswith(value[0])):
raise Exception("Flag %s of %s has different value than %s of %s." % (self.alias.values[index], self.alias.name, value, self.name))
def __repr__ (self):
return '%s (%s) %s' % (self.name, self.alias, self.values)
class Bitfield:
def __init__ (self, name, values):
self.name = name
self.values = values
self.alias = None
self.isAlias = False
def checkAliasValidity (self):
if self.alias != None:
if len(self.values) != len(self.alias.values):
raise Exception("%s has different number of flags than its alias %s." % (self.name, self.alias.name))
for index, value in enumerate(self.values):
aliasVal = self.alias.values[index]
if value[1] != aliasVal[1] or not (value[0].startswith(aliasVal[0]) or aliasVal[0].startswith(value[0])):
raise Exception("Flag %s of %s has different value than %s of %s." % (self.alias.values[index], self.alias.name, value, self.name))
def __repr__ (self):
return '%s (%s)' % (self.name, self.alias)
class Variable:
def __init__ (self, type, name, arraySize):
type = type.replace('*',' *').replace('&',' &')
for src, dst in TYPE_SUBSTITUTIONS:
type = type.replace(src, dst)
self.type = type.split(' ')
for platformType, substitute, compat in PLATFORM_TYPES:
range = self.contains(self.type, platformType)
if range != None:
self.type = self.type[:range[0]]+[PLATFORM_TYPE_NAMESPACE + '::' + substitute[0]] + substitute[1:] + self.type[range[1]:]
break
self.name = name
self.arraySize = arraySize
def contains(self, big, small):
for i in xrange(len(big)-len(small)+1):
for j in xrange(len(small)):
if big[i+j] != small[j]:
break
else:
return i, i+len(small)
return None
def getType (self):
return ' '.join(self.type).replace(' *','*').replace(' &','&')
def getAsString (self, separator):
return '%s%s%s%s' % (self.getType(), separator, self.name, self.arraySize)
def __repr__ (self):
return '<%s> <%s> <%s>' % (self.type, self.name, self.arraySize)
def __eq__ (self, other):
if len(self.type) != len(other.type):
return False
for index, type in enumerate(self.type):
if "*" == type or "&" == type or "const" == type or "volatile" == type:
if type != other.type[index]:
return False
elif type != other.type[index] and \
type not in map(lambda ext: other.type[index] + ext, EXTENSION_POSTFIXES_STANDARD) and \
other.type[index] not in map(lambda ext: type + ext, EXTENSION_POSTFIXES_STANDARD):
return False
return self.arraySize == other.arraySize
def __ne__ (self, other):
return not self == other
class CompositeType:
CLASS_STRUCT = 0
CLASS_UNION = 1
def __init__ (self, typeClass, name, members):
self.typeClass = typeClass
self.name = name
self.members = members
self.alias = None
self.isAlias = False
def getClassName (self):
names = {CompositeType.CLASS_STRUCT: 'struct', CompositeType.CLASS_UNION: 'union'}
return names[self.typeClass]
def checkAliasValidity (self):
if self.alias != None:
if len(self.members) != len(self.alias.members):
raise Exception("%s has different number of members than its alias %s." % (self.name, self.alias.name))
for index, member in enumerate(self.members ):
break
#if member != self.alias.members[index]:
#raise Exception("Member %s of %s is different than core member %s in %s." % (self.alias.members[index], self.alias.name, member, self.name))
#raise Exception("Member ",str(self.alias.members[index])," of ", str(self.alias.name)," is different than core member ", str(member)," in ", str(self.name),".")
def __repr__ (self):
return '%s (%s)' % (self.name, self.alias)
class Function:
TYPE_PLATFORM = 0 # Not bound to anything
TYPE_INSTANCE = 1 # Bound to VkInstance
TYPE_DEVICE = 2 # Bound to VkDevice
def __init__ (self, name, returnType, arguments, apiVersion = None):
self.name = name
self.returnType = returnType
self.arguments = arguments
self.alias = None
self.isAlias = False
self.apiVersion = apiVersion
def getType (self):
# Special functions
if self.name == "vkGetInstanceProcAddr":
return Function.TYPE_PLATFORM
assert len(self.arguments) > 0
firstArgType = self.arguments[0].getType()
if firstArgType in ["VkInstance", "VkPhysicalDevice"]:
return Function.TYPE_INSTANCE
elif firstArgType in ["VkDevice", "VkCommandBuffer", "VkQueue"]:
return Function.TYPE_DEVICE
else:
return Function.TYPE_PLATFORM
def checkAliasValidity (self):
if self.alias != None:
if len(self.arguments) != len(self.alias.arguments):
raise Exception("%s has different number of arguments than its alias %s." % (self.name, self.alias.name))
if self.returnType != self.alias.returnType or not (self.returnType.startswith(self.alias.returnType) or self.alias.returnType.startswith(self.returnType)):
raise Exception("%s has different return value's type than its alias %s." % (self.name, self.alias.name))
for index, argument in enumerate(self.arguments):
if argument != self.alias.arguments[index]:
raise Exception("argument %s: \"%s\" of %s is different than \"%s\" of %s." % (index, self.alias.arguments[index].getAsString(' '), self.alias.name, argument.getAsString(' '), self.name))
def __repr__ (self):
return '%s (%s)' % (self.name, self.alias)
class Extension:
def __init__ (self, name, handles, enums, bitfields, compositeTypes, functions, definitions, additionalDefinitions, versionInCore):
self.name = name
self.definitions = definitions
self.additionalDefs = additionalDefinitions
self.handles = handles
self.enums = enums
self.bitfields = bitfields
self.compositeTypes = compositeTypes
self.functions = functions
self.versionInCore = versionInCore
def __repr__ (self):
return 'EXT:\n%s ->\nENUMS:\n%s\nCOMPOS:\n%s\nFUNCS:\n%s\nBITF:\n%s\nHAND:\n%s\nDEFS:\n%s\n' % (self.name, self.enums, self.compositeTypes, self.functions, self.bitfields, self.handles, self.definitions, self.versionInCore)
class API:
def __init__ (self, definitions, handles, enums, bitfields, compositeTypes, functions, extensions):
self.definitions = definitions
self.handles = handles
self.enums = enums
self.bitfields = bitfields
self.compositeTypes = compositeTypes
self.functions = functions # \note contains extension functions as well
self.extensions = extensions
def readFile (filename):
with open(filename, 'rb') as f:
return f.read()
IDENT_PTRN = r'[a-zA-Z_][a-zA-Z0-9_]*'
TYPE_PTRN = r'[a-zA-Z_][a-zA-Z0-9_ \t*&]*'
def fixupEnumValues (values):
fixed = []
for name, value in values:
if "_BEGIN_RANGE" in name or "_END_RANGE" in name:
continue
fixed.append((name, value))
return fixed
def getInterfaceName (function):
assert function.name[:2] == "vk"
return function.name[2].lower() + function.name[3:]
def getFunctionTypeName (function):
assert function.name[:2] == "vk"
return function.name[2:] + "Func"
def endsWith (str, postfix):
return str[-len(postfix):] == postfix
def splitNameExtPostfix (name):
knownExtPostfixes = EXTENSION_POSTFIXES
for postfix in knownExtPostfixes:
if endsWith(name, postfix):
return (name[:-len(postfix)], postfix)
return (name, "")
def getBitEnumNameForBitfield (bitfieldName):
bitfieldName, postfix = splitNameExtPostfix(bitfieldName)
assert bitfieldName[-1] == "s"
return bitfieldName[:-1] + "Bits" + postfix
def getBitfieldNameForBitEnum (bitEnumName):
bitEnumName, postfix = splitNameExtPostfix(bitEnumName)
assert bitEnumName[-4:] == "Bits"
return bitEnumName[:-4] + "s" + postfix
def parsePreprocDefinedValue (src, name):
value = parsePreprocDefinedValueOptional(src, name)
if value is None:
raise Exception("No such definition: %s" % name)
return value
def parsePreprocDefinedValueOptional (src, name):
definition = re.search(r'#\s*define\s+' + name + r'\s+([^\n]+)\n', src)
if definition is None:
return None
value = definition.group(1).strip()
if value == "UINT32_MAX":
value = "(~0u)"
return value
def parseEnum (name, src):
keyValuePtrn = '(' + IDENT_PTRN + r')\s*=\s*([^\s,}]+)\s*[,}]'
matches = re.findall(keyValuePtrn, src)
return Enum(name, fixupEnumValues(matches))
# \note Parses raw enums, some are mapped to bitfields later
def parseEnums (src):
matches = re.findall(r'typedef enum(\s*' + IDENT_PTRN + r')?\s*{([^}]*)}\s*(' + IDENT_PTRN + r')\s*;', src)
enums = []
for enumname, contents, typename in matches:
enums.append(parseEnum(typename, contents))
return enums
def parseCompositeType (type, name, src):
typeNamePtrn = r'(' + TYPE_PTRN + r')(\s+' + IDENT_PTRN + r')((\[[^\]]+\])*)\s*;'
matches = re.findall(typeNamePtrn, src)
members = [Variable(t.strip(), n.strip(), a.strip()) for t, n, a, _ in matches]
return CompositeType(type, name, members)
def parseCompositeTypes (src):
typeMap = { 'struct': CompositeType.CLASS_STRUCT, 'union': CompositeType.CLASS_UNION }
matches = re.findall(r'typedef (struct|union)(\s*' + IDENT_PTRN + r')?\s*{([^}]*)}\s*(' + IDENT_PTRN + r')\s*;', src)
types = []
for type, structname, contents, typename in matches:
types.append(parseCompositeType(typeMap[type], typename, contents))
return types
def parseHandles (src):
matches = re.findall(r'VK_DEFINE(_NON_DISPATCHABLE|)_HANDLE\((' + IDENT_PTRN + r')\)[ \t]*[\n\r]', src)
handles = []
typeMap = {'': Handle.TYPE_DISP, '_NON_DISPATCHABLE': Handle.TYPE_NONDISP}
for type, name in matches:
handle = Handle(typeMap[type], name)
handles.append(handle)
return handles
def parseArgList (src):
typeNamePtrn = r'(' + TYPE_PTRN + r')(\s+' + IDENT_PTRN + r')((\[[^\]]+\])*)\s*'
args = []
for rawArg in src.split(','):
m = re.search(typeNamePtrn, rawArg)
args.append(Variable(m.group(1).strip(), m.group(2).strip(), m.group(3)))
return args
def removeTypeExtPostfix (name):
for extPostfix in EXTENSION_POSTFIXES_STANDARD:
if endsWith(name, extPostfix):
return name[0:-len(extPostfix)]
return None
def populateAliases (objects):
objectsByName = {}
for object in objects:
objectsByName[object.name] = object
for object in objects:
withoutPostfix = removeTypeExtPostfix(object.name)
if withoutPostfix != None and withoutPostfix in objectsByName:
objectsByName[withoutPostfix].alias = object
object.isAlias = True
for object in objects:
object.checkAliasValidity()
def populateAliasesWithTypedefs (objects, src):
objectsByName = {}
for object in objects:
objectsByName[object.name] = object
ptrn = r'\s*typedef\s+' + object.name + r'\s+([^;]+)'
stash = re.findall(ptrn, src)
if len(stash) == 1:
objExt = copy.deepcopy(object)
objExt.name = stash[0]
object.alias = objExt
objExt.isAlias = True
objects.append(objExt)
def removeAliasedValues (enum):
valueByName = {}
for name, value in enum.values:
valueByName[name] = value
def removeDefExtPostfix (name):
for extPostfix in EXTENSION_POSTFIXES:
if endsWith(name, "_" + extPostfix):
return name[0:-(len(extPostfix)+1)]
return None
newValues = []
for name, value in enum.values:
withoutPostfix = removeDefExtPostfix(name)
if withoutPostfix != None and withoutPostfix in valueByName and valueByName[withoutPostfix] == value:
continue
newValues.append((name, value))
enum.values = newValues
def parseFunctions (src):
ptrn = r'VKAPI_ATTR\s+(' + TYPE_PTRN + ')\s+VKAPI_CALL\s+(' + IDENT_PTRN + r')\s*\(([^)]*)\)\s*;'
matches = re.findall(ptrn, src)
functions = []
for returnType, name, argList in matches:
functions.append(Function(name.strip(), returnType.strip(), parseArgList(argList)))
return functions
def parseFunctionsByVersion (src):
ptrnVer10 = 'VK_VERSION_1_0 1'
ptrnVer11 = 'VK_VERSION_1_1 1'
matchVer10 = re.search(ptrnVer10, src)
matchVer11 = re.search(ptrnVer11, src)
ptrn = r'VKAPI_ATTR\s+(' + TYPE_PTRN + ')\s+VKAPI_CALL\s+(' + IDENT_PTRN + r')\s*\(([^)]*)\)\s*;'
regPtrn = re.compile(ptrn)
matches = regPtrn.findall(src, matchVer10.start(), matchVer11.start())
functions = []
for returnType, name, argList in matches:
functions.append(Function(name.strip(), returnType.strip(), parseArgList(argList), 'VK_VERSION_1_0'))
matches = regPtrn.findall(src, matchVer11.start())
for returnType, name, argList in matches:
functions.append(Function(name.strip(), returnType.strip(), parseArgList(argList), 'VK_VERSION_1_1'))
return functions
def splitByExtension (src):
ptrn = r'#define\s+[A-Z0-9_]+_EXTENSION_NAME\s+"([^"]+)"'
match = "#define\s+("
for part in re.finditer(ptrn, src):
match += part.group(1)+"|"
match = match[:-1] + ")\s+1"
parts = re.split(match, src)
# First part is core
byExtension = [(None, parts[0])]
for ndx in range(1, len(parts), 2):
byExtension.append((parts[ndx], parts[ndx+1]))
return byExtension
def parseDefinitions (extensionName, src):
def skipDefinition (extensionName, definition):
if extensionName == None:
return True
# SPEC_VERSION enums
if definition[0].startswith(extensionName.upper()) and definition[1].isdigit():
return False
if definition[0].startswith(extensionName.upper()):
return True
if definition[1].isdigit():
return True
return False
ptrn = r'#define\s+([^\s]+)\s+([^\r\n]+)'
matches = re.findall(ptrn, src)
return [Definition(None, match[0], match[1]) for match in matches if not skipDefinition(extensionName, match)]
def parseExtensions (src, allFunctions, allCompositeTypes, allEnums, allBitfields, allHandles, allDefinitions):
def getCoreVersion (extensionTuple):
if not extensionTuple[0]:
return None
ptrn = r'\/\/\s*' + extensionTuple[0] + r'\s+(DEVICE|INSTANCE)\s+([0-9_]+)'
coreVersion = re.search(ptrn, extensionTuple[1], re.I)
if coreVersion != None:
return [coreVersion.group(1)] + [int(number) for number in coreVersion.group(2).split('_')[:3]]
return None
splitSrc = splitByExtension(src)
extensions = []
functionsByName = {function.name: function for function in allFunctions}
compositeTypesByName = {compType.name: compType for compType in allCompositeTypes}
enumsByName = {enum.name: enum for enum in allEnums}
bitfieldsByName = {bitfield.name: bitfield for bitfield in allBitfields}
handlesByName = {handle.name: handle for handle in allHandles}
definitionsByName = {definition.name: definition for definition in allDefinitions}
for extensionName, extensionSrc in splitSrc:
definitions = [Definition(type, name, parsePreprocDefinedValueOptional(extensionSrc, name)) for name, type in DEFINITIONS]
definitions = [definition for definition in definitions if definition.value != None]
additionalDefinitions = parseDefinitions(extensionName, extensionSrc)
handles = parseHandles(extensionSrc)
functions = parseFunctions(extensionSrc)
compositeTypes = parseCompositeTypes(extensionSrc)
rawEnums = parseEnums(extensionSrc)
bitfieldNames = parseBitfieldNames(extensionSrc)
enumBitfieldNames = [getBitEnumNameForBitfield(name) for name in bitfieldNames]
enums = [enum for enum in rawEnums if enum.name not in enumBitfieldNames]
extCoreVersion = getCoreVersion((extensionName, extensionSrc))
extFunctions = [functionsByName[function.name] for function in functions]
extCompositeTypes = [compositeTypesByName[compositeType.name] for compositeType in compositeTypes]
extEnums = [enumsByName[enum.name] for enum in enums]
extBitfields = [bitfieldsByName[bitfieldName] for bitfieldName in bitfieldNames]
extHandles = [handlesByName[handle.name] for handle in handles]
extDefinitions = [definitionsByName[definition.name] for definition in definitions]
extensions.append(Extension(extensionName, extHandles, extEnums, extBitfields, extCompositeTypes, extFunctions, extDefinitions, additionalDefinitions, extCoreVersion))
return extensions
def parseBitfieldNames (src):
ptrn = r'typedef\s+VkFlags\s(' + IDENT_PTRN + r')\s*;'
matches = re.findall(ptrn, src)
return matches
def parseAPI (src):
definitions = [Definition(type, name, parsePreprocDefinedValue(src, name)) for name, type in DEFINITIONS]
handles = parseHandles(src)
rawEnums = parseEnums(src)
bitfieldNames = parseBitfieldNames(src)
enums = []
bitfields = []
bitfieldEnums = set([getBitEnumNameForBitfield(n) for n in bitfieldNames if getBitEnumNameForBitfield(n) in [enum.name for enum in rawEnums]])
compositeTypes = parseCompositeTypes(src)
allFunctions = parseFunctionsByVersion(src)
for enum in rawEnums:
if enum.name in bitfieldEnums:
bitfields.append(Bitfield(getBitfieldNameForBitEnum(enum.name), enum.values))
else:
enums.append(enum)
for bitfieldName in bitfieldNames:
if not bitfieldName in [bitfield.name for bitfield in bitfields]:
# Add empty bitfield
bitfields.append(Bitfield(bitfieldName, []))
# Populate alias fields
populateAliasesWithTypedefs(compositeTypes, src)
populateAliasesWithTypedefs(enums, src)
populateAliasesWithTypedefs(bitfields, src)
populateAliases(allFunctions)
populateAliases(handles)
populateAliases(enums)
populateAliases(bitfields)
populateAliases(compositeTypes)
for enum in enums:
removeAliasedValues(enum)
extensions = parseExtensions(src, allFunctions, compositeTypes, enums, bitfields, handles, definitions)
return API(
definitions = definitions,
handles = handles,
enums = enums,
bitfields = bitfields,
compositeTypes = compositeTypes,
functions = allFunctions,
extensions = extensions)
def splitUniqueAndDuplicatedEntries (handles):
listOfUniqueHandles = []
duplicates = OrderedDict()
for handle in handles:
if handle.alias != None:
duplicates[handle.alias] = handle
if not handle.isAlias:
listOfUniqueHandles.append(handle)
return listOfUniqueHandles, duplicates
def writeHandleType (api, filename):
uniqeHandles, duplicatedHandles = splitUniqueAndDuplicatedEntries(api.handles)
def genHandles ():
yield "\t%s\t= 0," % uniqeHandles[0].getHandleType()
for handle in uniqeHandles[1:]:
yield "\t%s," % handle.getHandleType()
for duplicate in duplicatedHandles:
yield "\t%s\t= %s," % (duplicate.getHandleType(), duplicatedHandles[duplicate].getHandleType())
yield "\tHANDLE_TYPE_LAST\t= %s + 1" % (uniqeHandles[-1].getHandleType())
def genHandlesBlock ():
yield "enum HandleType"
yield "{"
for line in indentLines(genHandles()):
yield line
yield "};"
yield ""
writeInlFile(filename, INL_HEADER, genHandlesBlock())
def getEnumValuePrefix (enum):
prefix = enum.name[0]
for i in range(1, len(enum.name)):
if enum.name[i].isupper() and not enum.name[i-1].isupper():
prefix += "_"
prefix += enum.name[i].upper()
return prefix
def parseInt (value):
if value[:2] == "0x":
return int(value, 16)
else:
return int(value, 10)
def areEnumValuesLinear (enum):
curIndex = 0
for name, value in enum.values:
if parseInt(value) != curIndex:
return False
curIndex += 1
return True
def genEnumSrc (enum):
yield "enum %s" % enum.name
yield "{"
for line in indentLines(["\t%s\t= %s," % v for v in enum.values]):
yield line
if areEnumValuesLinear(enum):
yield ""
yield "\t%s_LAST" % getEnumValuePrefix(enum)
yield "};"
def genBitfieldSrc (bitfield):
if len(bitfield.values) > 0:
yield "enum %s" % getBitEnumNameForBitfield(bitfield.name)
yield "{"
for line in indentLines(["\t%s\t= %s," % v for v in bitfield.values]):
yield line
yield "};"
yield "typedef deUint32 %s;" % bitfield.name
def genCompositeTypeSrc (type):
yield "%s %s" % (type.getClassName(), type.name)
yield "{"
for line in indentLines(['\t'+m.getAsString('\t')+';' for m in type.members]):
yield line
yield "};"
def genHandlesSrc (handles):
uniqeHandles, duplicatedHandles = splitUniqueAndDuplicatedEntries(handles)
def genLines (handles):
for handle in uniqeHandles:
if handle.type == Handle.TYPE_DISP:
yield "VK_DEFINE_HANDLE\t(%s,\t%s);" % (handle.name, handle.getHandleType())
elif handle.type == Handle.TYPE_NONDISP:
yield "VK_DEFINE_NON_DISPATCHABLE_HANDLE\t(%s,\t%s);" % (handle.name, handle.getHandleType())
for duplicate in duplicatedHandles:
if duplicate.type == Handle.TYPE_DISP:
yield "VK_DEFINE_HANDLE\t(%s,\t%s);" % (duplicate.name, duplicatedHandles[duplicate].getHandleType())
elif duplicate.type == Handle.TYPE_NONDISP:
yield "VK_DEFINE_NON_DISPATCHABLE_HANDLE\t(%s,\t%s);" % (duplicate.name, duplicatedHandles[duplicate].getHandleType())
for line in indentLines(genLines(handles)):
yield line
def genDefinitionsSrc (definitions):
for line in ["#define %s\t(static_cast<%s>\t(%s))" % (definition.name, definition.type, definition.value) for definition in definitions]:
yield line
def genDefinitionsAliasSrc (definitions):
for line in ["#define %s\t%s" % (definition.name, definitions[definition].name) for definition in definitions]:
if definition.value != definitions[definition].value and definition.value != definitions[definition].name:
raise Exception("Value of %s (%s) is different than core definition value %s (%s)." % (definition.name, definition.value, definitions[definition].name, definitions[definition].value))
yield line
def writeBasicTypes (api, filename):
def gen ():
definitionsCore, definitionDuplicates = splitUniqueAndDuplicatedEntries(api.definitions)
for line in indentLines(chain(genDefinitionsSrc(definitionsCore), genDefinitionsAliasSrc(definitionDuplicates))):
yield line
yield ""
for line in genHandlesSrc(api.handles):
yield line
yield ""
for enum in api.enums:
if not enum.isAlias:
for line in genEnumSrc(enum):
yield line
yield ""
for bitfield in api.bitfields:
if not bitfield.isAlias:
for line in genBitfieldSrc(bitfield):
yield line
yield ""
for line in indentLines(["VK_DEFINE_PLATFORM_TYPE(%s,\t%s);" % (s[0], c) for n, s, c in PLATFORM_TYPES]):
yield line
for ext in api.extensions:
if ext.additionalDefs != None:
for definition in ext.additionalDefs:
yield "#define " + definition.name + " " + definition.value
writeInlFile(filename, INL_HEADER, gen())
def writeCompositeTypes (api, filename):
def gen ():
for type in api.compositeTypes:
type.checkAliasValidity()
if not type.isAlias:
for line in genCompositeTypeSrc(type):
yield line
yield ""
writeInlFile(filename, INL_HEADER, gen())
def argListToStr (args):
return ", ".join(v.getAsString(' ') for v in args)
def writeInterfaceDecl (api, filename, functionTypes, concrete):
def genProtos ():
postfix = "" if concrete else " = 0"
for function in api.functions:
if not function.getType() in functionTypes:
continue
if not function.isAlias:
yield "virtual %s\t%s\t(%s) const%s;" % (function.returnType, getInterfaceName(function), argListToStr(function.arguments), postfix)
writeInlFile(filename, INL_HEADER, indentLines(genProtos()))
def writeFunctionPtrTypes (api, filename):
def genTypes ():
for function in api.functions:
yield "typedef VKAPI_ATTR %s\t(VKAPI_CALL* %s)\t(%s);" % (function.returnType, getFunctionTypeName(function), argListToStr(function.arguments))
writeInlFile(filename, INL_HEADER, indentLines(genTypes()))
def writeFunctionPointers (api, filename, functionTypes):
def FunctionsYielder ():
for function in api.functions:
if function.getType() in functionTypes:
if function.isAlias:
if function.getType() == Function.TYPE_INSTANCE and function.arguments[0].getType() == "VkPhysicalDevice":
yield "%s\t%s;" % (getFunctionTypeName(function), getInterfaceName(function))
else:
yield "%s\t%s;" % (getFunctionTypeName(function), getInterfaceName(function))
writeInlFile(filename, INL_HEADER, indentLines(FunctionsYielder()))
def writeInitFunctionPointers (api, filename, functionTypes, cond = None):
def makeInitFunctionPointers ():
for function in api.functions:
if function.getType() in functionTypes and (cond == None or cond(function)):
interfaceName = getInterfaceName(function)
if function.isAlias:
if function.getType() == Function.TYPE_INSTANCE and function.arguments[0].getType() == "VkPhysicalDevice":
yield "m_vk.%s\t= (%s)\tGET_PROC_ADDR(\"%s\");" % (getInterfaceName(function), getFunctionTypeName(function), function.name)
else:
yield "m_vk.%s\t= (%s)\tGET_PROC_ADDR(\"%s\");" % (getInterfaceName(function), getFunctionTypeName(function), function.name)
if function.alias != None:
yield "if (!m_vk.%s)" % (getInterfaceName(function))
yield " m_vk.%s\t= (%s)\tGET_PROC_ADDR(\"%s\");" % (getInterfaceName(function), getFunctionTypeName(function), function.alias.name)
lines = [line.replace(' ', '\t') for line in indentLines(makeInitFunctionPointers())]
writeInlFile(filename, INL_HEADER, lines)
def writeFuncPtrInterfaceImpl (api, filename, functionTypes, className):
def makeFuncPtrInterfaceImpl ():
for function in api.functions:
if function.getType() in functionTypes and not function.isAlias:
yield ""
yield "%s %s::%s (%s) const" % (function.returnType, className, getInterfaceName(function), argListToStr(function.arguments))
yield "{"
if function.name == "vkEnumerateInstanceVersion":
yield " if (m_vk.enumerateInstanceVersion)"
yield " return m_vk.enumerateInstanceVersion(pApiVersion);"
yield ""
yield " *pApiVersion = VK_API_VERSION_1_0;"
yield " return VK_SUCCESS;"
elif function.getType() == Function.TYPE_INSTANCE and function.arguments[0].getType() == "VkPhysicalDevice" and function.alias != None:
yield " vk::VkPhysicalDeviceProperties props;"
yield " m_vk.getPhysicalDeviceProperties(physicalDevice, &props);"
yield " if (props.apiVersion >= VK_API_VERSION_1_1)"
yield " %sm_vk.%s(%s);" % ("return " if function.returnType != "void" else "", getInterfaceName(function), ", ".join(a.name for a in function.arguments))
yield " else"
yield " %sm_vk.%s(%s);" % ("return " if function.returnType != "void" else "", getInterfaceName(function.alias), ", ".join(a.name for a in function.arguments))
else:
yield " %sm_vk.%s(%s);" % ("return " if function.returnType != "void" else "", getInterfaceName(function), ", ".join(a.name for a in function.arguments))
yield "}"
writeInlFile(filename, INL_HEADER, makeFuncPtrInterfaceImpl())
def writeStrUtilProto (api, filename):
def makeStrUtilProto ():
for line in indentLines(["const char*\tget%sName\t(%s value);" % (enum.name[2:], enum.name) for enum in api.enums if not enum.isAlias]):
yield line
yield ""
for line in indentLines(["inline tcu::Format::Enum<%s>\tget%sStr\t(%s value)\t{ return tcu::Format::Enum<%s>(get%sName, value);\t}" % (e.name, e.name[2:], e.name, e.name, e.name[2:]) for e in api.enums if not e.isAlias]):
yield line
yield ""
for line in indentLines(["inline std::ostream&\toperator<<\t(std::ostream& s, %s value)\t{ return s << get%sStr(value);\t}" % (e.name, e.name[2:]) for e in api.enums if not e.isAlias]):
yield line
yield ""
for line in indentLines(["tcu::Format::Bitfield<32>\tget%sStr\t(%s value);" % (bitfield.name[2:], bitfield.name) for bitfield in api.bitfields if not bitfield.isAlias]):
yield line
yield ""
for line in indentLines(["std::ostream&\toperator<<\t(std::ostream& s, const %s& value);" % (s.name) for s in api.compositeTypes if not s.isAlias]):
yield line
writeInlFile(filename, INL_HEADER, makeStrUtilProto())
def writeStrUtilImpl (api, filename):
def makeStrUtilImpl ():
for line in indentLines(["template<> const char*\tgetTypeName<%s>\t(void) { return \"%s\";\t}" % (handle.name, handle.name) for handle in api.handles if not handle.isAlias]):
yield line
yield ""
yield "namespace %s" % PLATFORM_TYPE_NAMESPACE
yield "{"
for line in indentLines("std::ostream& operator<< (std::ostream& s, %s\tv) { return s << tcu::toHex(v.internal); }" % ''.join(s) for n, s, c in PLATFORM_TYPES):
yield line
yield "}"
for enum in api.enums:
if enum.isAlias:
continue
yield ""
yield "const char* get%sName (%s value)" % (enum.name[2:], enum.name)
yield "{"
yield "\tswitch (value)"
yield "\t{"
for line in indentLines(["\t\tcase %s:\treturn \"%s\";" % (n, n) for n, v in enum.values] + ["\t\tdefault:\treturn DE_NULL;"]):
yield line
yield "\t}"
yield "}"
for bitfield in api.bitfields:
if bitfield.isAlias:
continue
yield ""
yield "tcu::Format::Bitfield<32> get%sStr (%s value)" % (bitfield.name[2:], bitfield.name)
yield "{"
if len(bitfield.values) > 0:
yield "\tstatic const tcu::Format::BitDesc s_desc[] ="
yield "\t{"
for line in indentLines(["\t\ttcu::Format::BitDesc(%s,\t\"%s\")," % (n, n) for n, v in bitfield.values]):
yield line
yield "\t};"
yield "\treturn tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));"
else:
yield "\treturn tcu::Format::Bitfield<32>(value, DE_NULL, DE_NULL);"
yield "}"
bitfieldTypeNames = set([bitfield.name for bitfield in api.bitfields])
for type in api.compositeTypes:
if not type.isAlias:
yield ""
yield "std::ostream& operator<< (std::ostream& s, const %s& value)" % type.name
yield "{"
yield "\ts << \"%s = {\\n\";" % type.name
for member in type.members:
memberName = member.name
valFmt = None
newLine = ""
if member.getType() in bitfieldTypeNames:
valFmt = "get%sStr(value.%s)" % (member.getType()[2:], member.name)
elif member.getType() == "const char*" or member.getType() == "char*":
valFmt = "getCharPtrStr(value.%s)" % member.name
elif member.arraySize != '':
if member.name in ["extensionName", "deviceName", "layerName", "description"]:
valFmt = "(const char*)value.%s" % member.name
elif member.getType() == 'char' or member.getType() == 'deUint8':
newLine = "'\\n' << "
valFmt = "tcu::formatArray(tcu::Format::HexIterator<%s>(DE_ARRAY_BEGIN(value.%s)), tcu::Format::HexIterator<%s>(DE_ARRAY_END(value.%s)))" % (member.getType(), member.name, member.getType(), member.name)
else:
if member.name == "memoryTypes" or member.name == "memoryHeaps":
endIter = "DE_ARRAY_BEGIN(value.%s) + value.%sCount" % (member.name, member.name[:-1])
else:
endIter = "DE_ARRAY_END(value.%s)" % member.name
newLine = "'\\n' << "
valFmt = "tcu::formatArray(DE_ARRAY_BEGIN(value.%s), %s)" % (member.name, endIter)
memberName = member.name
else:
valFmt = "value.%s" % member.name
yield ("\ts << \"\\t%s = \" << " % memberName) + newLine + valFmt + " << '\\n';"
yield "\ts << '}';"
yield "\treturn s;"
yield "}"
writeInlFile(filename, INL_HEADER, makeStrUtilImpl())
class ConstructorFunction:
def __init__ (self, type, name, objectType, ifaceArgs, arguments):
self.type = type
self.name = name
self.objectType = objectType
self.ifaceArgs = ifaceArgs
self.arguments = arguments
def getConstructorFunctions (api):
funcs = []
ifacesDict = {
Function.TYPE_PLATFORM: [Variable("const PlatformInterface&", "vk", "")],
Function.TYPE_INSTANCE: [Variable("const InstanceInterface&", "vk", "")],
Function.TYPE_DEVICE: [Variable("const DeviceInterface&", "vk", "")]
}
for function in api.functions:
if function.isAlias:
continue
if (function.name[:8] == "vkCreate" or function.name == "vkAllocateMemory") and not "createInfoCount" in [a.name for a in function.arguments]:
if function.name == "vkCreateDisplayModeKHR":
continue # No way to delete display modes (bug?)
# \todo [pyry] Rather hacky
ifaceArgs = ifacesDict[function.getType()]
if function.name == "vkCreateDevice":
ifaceArgs = [Variable("const PlatformInterface&", "vkp", ""), Variable("VkInstance", "instance", "")] + ifaceArgs
assert (function.arguments[-2].type == ["const", "VkAllocationCallbacks", "*"])
objectType = function.arguments[-1].type[0] #not getType() but type[0] on purpose
arguments = function.arguments[:-1]
funcs.append(ConstructorFunction(function.getType(), getInterfaceName(function), objectType, ifaceArgs, arguments))
return funcs
def addVersionDefines(versionSpectrum):
output = ["#define " + ver.getDefineName() + " " + ver.getInHex() for ver in versionSpectrum if not ver.isStandardVersion()]
return output
def removeVersionDefines(versionSpectrum):
output = ["#undef " + ver.getDefineName() for ver in versionSpectrum if not ver.isStandardVersion()]
return output
def writeRefUtilProto (api, filename):
functions = getConstructorFunctions(api)
def makeRefUtilProto ():
unindented = []
for line in indentLines(["Move<%s>\t%s\t(%s = DE_NULL);" % (function.objectType, function.name, argListToStr(function.ifaceArgs + function.arguments)) for function in functions]):
yield line
writeInlFile(filename, INL_HEADER, makeRefUtilProto())
def writeRefUtilImpl (api, filename):
functions = getConstructorFunctions(api)
def makeRefUtilImpl ():
yield "namespace refdetails"
yield "{"
yield ""
for function in api.functions:
if function.getType() == Function.TYPE_DEVICE \
and (function.name[:9] == "vkDestroy" or function.name == "vkFreeMemory") \
and not function.name == "vkDestroyDevice" \
and not function.isAlias:
objectType = function.arguments[-2].getType()
yield "template<>"
yield "void Deleter<%s>::operator() (%s obj) const" % (objectType, objectType)
yield "{"
yield "\tm_deviceIface->%s(m_device, obj, m_allocator);" % (getInterfaceName(function))
yield "}"
yield ""
yield "} // refdetails"
yield ""
dtorDict = {
Function.TYPE_PLATFORM: "object",
Function.TYPE_INSTANCE: "instance",
Function.TYPE_DEVICE: "device"
}
for function in functions:
deleterArgsString = ''
if function.name == "createDevice":
# createDevice requires two additional parameters to setup VkDevice deleter
deleterArgsString = "vkp, instance, object, " + function.arguments[-1].name
else:
deleterArgsString = "vk, %s, %s" % (dtorDict[function.type], function.arguments[-1].name)
yield "Move<%s> %s (%s)" % (function.objectType, function.name, argListToStr(function.ifaceArgs + function.arguments))
yield "{"
yield "\t%s object = 0;" % function.objectType
yield "\tVK_CHECK(vk.%s(%s));" % (function.name, ", ".join([a.name for a in function.arguments] + ["&object"]))
yield "\treturn Move<%s>(check<%s>(object), Deleter<%s>(%s));" % (function.objectType, function.objectType, function.objectType, deleterArgsString)
yield "}"
yield ""
writeInlFile(filename, INL_HEADER, makeRefUtilImpl())
def writeStructTraitsImpl (api, filename):
def gen ():
for type in api.compositeTypes:
if type.getClassName() == "struct" and type.members[0].name == "sType" and not type.isAlias:
yield "template<> VkStructureType getStructureType<%s> (void)" % type.name
yield "{"
yield "\treturn %s;" % prefixName("VK_STRUCTURE_TYPE_", type.name)
yield "}"
yield ""
writeInlFile(filename, INL_HEADER, gen())
def writeNullDriverImpl (api, filename):
def genNullDriverImpl ():
specialFuncNames = [
"vkCreateGraphicsPipelines",
"vkCreateComputePipelines",
"vkGetInstanceProcAddr",
"vkGetDeviceProcAddr",
"vkEnumeratePhysicalDevices",
"vkEnumerateInstanceExtensionProperties",
"vkEnumerateDeviceExtensionProperties",
"vkGetPhysicalDeviceFeatures",
"vkGetPhysicalDeviceFeatures2KHR",
"vkGetPhysicalDeviceProperties",
"vkGetPhysicalDeviceProperties2KHR",
"vkGetPhysicalDeviceQueueFamilyProperties",
"vkGetPhysicalDeviceMemoryProperties",
"vkGetPhysicalDeviceFormatProperties",
"vkGetPhysicalDeviceImageFormatProperties",
"vkGetDeviceQueue",
"vkGetBufferMemoryRequirements",
"vkGetBufferMemoryRequirements2KHR",
"vkGetImageMemoryRequirements",
"vkGetImageMemoryRequirements2KHR",
"vkAllocateMemory",
"vkMapMemory",
"vkUnmapMemory",
"vkAllocateDescriptorSets",
"vkFreeDescriptorSets",
"vkResetDescriptorPool",
"vkAllocateCommandBuffers",
"vkFreeCommandBuffers",
"vkCreateDisplayModeKHR",
"vkCreateSharedSwapchainsKHR",
"vkGetPhysicalDeviceExternalBufferPropertiesKHR",
"vkGetPhysicalDeviceImageFormatProperties2KHR",
"vkGetMemoryAndroidHardwareBufferANDROID",
]
coreFunctions = [f for f in api.functions if not f.isAlias]
specialFuncs = [f for f in coreFunctions if f.name in specialFuncNames]
createFuncs = [f for f in coreFunctions if (f.name[:8] == "vkCreate" or f.name == "vkAllocateMemory") and not f in specialFuncs]
destroyFuncs = [f for f in coreFunctions if (f.name[:9] == "vkDestroy" or f.name == "vkFreeMemory") and not f in specialFuncs]
dummyFuncs = [f for f in coreFunctions if f not in specialFuncs + createFuncs + destroyFuncs]
def getHandle (name):
for handle in api.handles:
if handle.name == name[0]:
return handle
raise Exception("No such handle: %s" % name)
for function in createFuncs:
objectType = function.arguments[-1].type[:-1]
argsStr = ", ".join([a.name for a in function.arguments[:-1]])
yield "VKAPI_ATTR %s VKAPI_CALL %s (%s)" % (function.returnType, getInterfaceName(function), argListToStr(function.arguments))
yield "{"
yield "\tDE_UNREF(%s);" % function.arguments[-2].name
if getHandle(objectType).type == Handle.TYPE_NONDISP:
yield "\tVK_NULL_RETURN((*%s = allocateNonDispHandle<%s, %s>(%s)));" % (function.arguments[-1].name, objectType[0][2:], objectType[0], argsStr)
else:
yield "\tVK_NULL_RETURN((*%s = allocateHandle<%s, %s>(%s)));" % (function.arguments[-1].name, objectType[0][2:], objectType[0], argsStr)
yield "}"
yield ""
for function in destroyFuncs:
objectArg = function.arguments[-2]
yield "VKAPI_ATTR %s VKAPI_CALL %s (%s)" % (function.returnType, getInterfaceName(function), argListToStr(function.arguments))
yield "{"
for arg in function.arguments[:-2]:
yield "\tDE_UNREF(%s);" % arg.name
if getHandle(objectArg.type).type == Handle.TYPE_NONDISP:
yield "\tfreeNonDispHandle<%s, %s>(%s, %s);" % (objectArg.getType()[2:], objectArg.getType(), objectArg.name, function.arguments[-1].name)
else:
yield "\tfreeHandle<%s, %s>(%s, %s);" % (objectArg.getType()[2:], objectArg.getType(), objectArg.name, function.arguments[-1].name)
yield "}"
yield ""
for function in dummyFuncs:
yield "VKAPI_ATTR %s VKAPI_CALL %s (%s)" % (function.returnType, getInterfaceName(function), argListToStr(function.arguments))
yield "{"
for arg in function.arguments:
yield "\tDE_UNREF(%s);" % arg.name
if function.returnType != "void":
yield "\treturn VK_SUCCESS;"
yield "}"
yield ""
def genFuncEntryTable (type, name):
funcs = [f for f in api.functions if f.getType() == type]
refFuncs = {}
for f in api.functions:
if f.alias != None:
refFuncs[f.alias] = f
yield "static const tcu::StaticFunctionLibrary::Entry %s[] =" % name
yield "{"
for line in indentLines(["\tVK_NULL_FUNC_ENTRY(%s,\t%s)," % (function.name, getInterfaceName(function if not function.isAlias else refFuncs[function])) for function in funcs]):
yield line
yield "};"
yield ""
# Func tables
for line in genFuncEntryTable(Function.TYPE_PLATFORM, "s_platformFunctions"):
yield line
for line in genFuncEntryTable(Function.TYPE_INSTANCE, "s_instanceFunctions"):
yield line
for line in genFuncEntryTable(Function.TYPE_DEVICE, "s_deviceFunctions"):
yield line
writeInlFile(filename, INL_HEADER, genNullDriverImpl())
def writeTypeUtil (api, filename):
# Structs filled by API queries are not often used in test code
QUERY_RESULT_TYPES = set([
"VkPhysicalDeviceFeatures",
"VkPhysicalDeviceLimits",
"VkFormatProperties",
"VkImageFormatProperties",
"VkPhysicalDeviceSparseProperties",
"VkQueueFamilyProperties",
"VkMemoryType",
"VkMemoryHeap",
])
COMPOSITE_TYPES = set([t.name for t in api.compositeTypes if not t.isAlias])
def isSimpleStruct (type):
def hasArrayMember (type):
for member in type.members:
if member.arraySize != '':
return True
return False
def hasCompositeMember (type):
for member in type.members:
if member.getType() in COMPOSITE_TYPES:
return True
return False
return type.typeClass == CompositeType.CLASS_STRUCT and \
type.members[0].getType() != "VkStructureType" and \
not type.name in QUERY_RESULT_TYPES and \
not hasArrayMember(type) and \
not hasCompositeMember(type)
def gen ():
for type in api.compositeTypes:
if not isSimpleStruct(type) or type.isAlias:
continue
yield ""
yield "inline %s make%s (%s)" % (type.name, type.name[2:], argListToStr(type.members))
yield "{"
yield "\t%s res;" % type.name
for line in indentLines(["\tres.%s\t= %s;" % (m.name, m.name) for m in type.members]):
yield line
yield "\treturn res;"
yield "}"
writeInlFile(filename, INL_HEADER, gen())
def writeSupportedExtenions(api, filename):
def writeExtensionsForVersions(map):
result = []
for version in map:
result.append(" if (coreVersion >= " + str(version) + ")")
result.append(" {")
for extension in map[version]:
result.append(' dst.push_back("' + extension.name + '");')
result.append(" }")
return result
instanceMap = {}
deviceMap = {}
versionSet = set()
for ext in api.extensions:
if ext.versionInCore != None:
if ext.versionInCore[0] == 'INSTANCE':
list = instanceMap.get(Version(ext.versionInCore[1:]))
instanceMap[Version(ext.versionInCore[1:])] = list + [ext] if list else [ext]
else:
list = deviceMap.get(Version(ext.versionInCore[1:]))
deviceMap[Version(ext.versionInCore[1:])] = list + [ext] if list else [ext]
versionSet.add(Version(ext.versionInCore[1:]))
lines = addVersionDefines(versionSet) + [
"",
"void getCoreDeviceExtensionsImpl (deUint32 coreVersion, ::std::vector<const char*>&%s)" % (" dst" if len(deviceMap) != 0 else ""),
"{"] + writeExtensionsForVersions(deviceMap) + [
"}",
"",
"void getCoreInstanceExtensionsImpl (deUint32 coreVersion, ::std::vector<const char*>&%s)" % (" dst" if len(instanceMap) != 0 else ""),
"{"] + writeExtensionsForVersions(instanceMap) + [
"}",
""] + removeVersionDefines(versionSet)
writeInlFile(filename, INL_HEADER, lines)
def writeCoreFunctionalities(api, filename):
functionOriginValues = ["FUNCTIONORIGIN_PLATFORM", "FUNCTIONORIGIN_INSTANCE", "FUNCTIONORIGIN_DEVICE"]
lines = addVersionDefines([Version((1, 0, 0)), Version((1, 1, 0))]) + [
"",
'enum FunctionOrigin', '{'] + [line for line in indentLines([
'\t' + functionOriginValues[0] + '\t= 0,',
'\t' + functionOriginValues[1] + ',',
'\t' + functionOriginValues[2]])] + [
"};",
"",
"typedef ::std::pair<const char*, FunctionOrigin> FunctionInfo;",
"typedef ::std::vector<FunctionInfo> FunctionInfosList;",
"typedef ::std::map<deUint32, FunctionInfosList> ApisMap;",
"",
"void initApisMap (ApisMap& apis)",
"{",
" apis.clear();",
" apis.insert(::std::pair<deUint32, FunctionInfosList>(" + str(Version((1, 0, 0))) + ", FunctionInfosList()));",
" apis.insert(::std::pair<deUint32, FunctionInfosList>(" + str(Version((1, 1, 0))) + ", FunctionInfosList()));",
""]
def list10Funcs ():
for fun in api.functions:
if fun.apiVersion == 'VK_VERSION_1_0':
insert = ' apis[' + str(Version((1, 0, 0))) + '].push_back(FunctionInfo("' + fun.name + '",\t' + functionOriginValues[fun.getType()] + '));'
yield insert
def listAllFuncs ():
for fun in api.extensions[0].functions:
insert = ' apis[' + str(Version((1, 1, 0))) + '].push_back(FunctionInfo("' + fun.name + '",\t' + functionOriginValues[fun.getType()] + '));'
yield insert
lines = lines + [line for line in indentLines(list10Funcs())]
lines.append("")
lines = lines + [line for line in indentLines(listAllFuncs())]
lines.append("}")
lines.append("")
lines = lines + removeVersionDefines([Version((1, 0, 0)), Version((1, 1, 0))])
writeInlFile(filename, INL_HEADER, lines)
if __name__ == "__main__":
src = readFile(VULKAN_H)
api = parseAPI(src)
platformFuncs = [Function.TYPE_PLATFORM]
instanceFuncs = [Function.TYPE_INSTANCE]
deviceFuncs = [Function.TYPE_DEVICE]
writeHandleType (api, os.path.join(VULKAN_DIR, "vkHandleType.inl"))
writeBasicTypes (api, os.path.join(VULKAN_DIR, "vkBasicTypes.inl"))
writeCompositeTypes (api, os.path.join(VULKAN_DIR, "vkStructTypes.inl"))
writeInterfaceDecl (api, os.path.join(VULKAN_DIR, "vkVirtualPlatformInterface.inl"), platformFuncs, False)
writeInterfaceDecl (api, os.path.join(VULKAN_DIR, "vkVirtualInstanceInterface.inl"), instanceFuncs, False)
writeInterfaceDecl (api, os.path.join(VULKAN_DIR, "vkVirtualDeviceInterface.inl"), deviceFuncs, False)
writeInterfaceDecl (api, os.path.join(VULKAN_DIR, "vkConcretePlatformInterface.inl"), platformFuncs, True)
writeInterfaceDecl (api, os.path.join(VULKAN_DIR, "vkConcreteInstanceInterface.inl"), instanceFuncs, True)
writeInterfaceDecl (api, os.path.join(VULKAN_DIR, "vkConcreteDeviceInterface.inl"), deviceFuncs, True)
writeFunctionPtrTypes (api, os.path.join(VULKAN_DIR, "vkFunctionPointerTypes.inl"))
writeFunctionPointers (api, os.path.join(VULKAN_DIR, "vkPlatformFunctionPointers.inl"), platformFuncs)
writeFunctionPointers (api, os.path.join(VULKAN_DIR, "vkInstanceFunctionPointers.inl"), instanceFuncs)
writeFunctionPointers (api, os.path.join(VULKAN_DIR, "vkDeviceFunctionPointers.inl"), deviceFuncs)
writeInitFunctionPointers (api, os.path.join(VULKAN_DIR, "vkInitPlatformFunctionPointers.inl"), platformFuncs, lambda f: f.name != "vkGetInstanceProcAddr")
writeInitFunctionPointers (api, os.path.join(VULKAN_DIR, "vkInitInstanceFunctionPointers.inl"), instanceFuncs)
writeInitFunctionPointers (api, os.path.join(VULKAN_DIR, "vkInitDeviceFunctionPointers.inl"), deviceFuncs)
writeFuncPtrInterfaceImpl (api, os.path.join(VULKAN_DIR, "vkPlatformDriverImpl.inl"), platformFuncs, "PlatformDriver")
writeFuncPtrInterfaceImpl (api, os.path.join(VULKAN_DIR, "vkInstanceDriverImpl.inl"), instanceFuncs, "InstanceDriver")
writeFuncPtrInterfaceImpl (api, os.path.join(VULKAN_DIR, "vkDeviceDriverImpl.inl"), deviceFuncs, "DeviceDriver")
writeStrUtilProto (api, os.path.join(VULKAN_DIR, "vkStrUtil.inl"))
writeStrUtilImpl (api, os.path.join(VULKAN_DIR, "vkStrUtilImpl.inl"))
writeRefUtilProto (api, os.path.join(VULKAN_DIR, "vkRefUtil.inl"))
writeRefUtilImpl (api, os.path.join(VULKAN_DIR, "vkRefUtilImpl.inl"))
writeStructTraitsImpl (api, os.path.join(VULKAN_DIR, "vkGetStructureTypeImpl.inl"))
writeNullDriverImpl (api, os.path.join(VULKAN_DIR, "vkNullDriverImpl.inl"))
writeTypeUtil (api, os.path.join(VULKAN_DIR, "vkTypeUtil.inl"))
writeSupportedExtenions (api, os.path.join(VULKAN_DIR, "vkSupportedExtensions.inl"))
writeCoreFunctionalities (api, os.path.join(VULKAN_DIR, "vkCoreFunctionalities.inl"))
| 37.729239
| 225
| 0.694281
|
import os
import re
import sys
import copy
from itertools import chain
from collections import OrderedDict
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "..", "scripts"))
from build.common import DEQP_DIR
from khr_util.format import indentLines, writeInlFile
VULKAN_H = os.path.join(os.path.dirname(__file__), "src", "vulkan.h.in")
VULKAN_DIR = os.path.join(os.path.dirname(__file__), "..", "framework", "vulkan")
INL_HEADER = """\
/* WARNING: This is auto-generated file. Do not modify, since changes will
* be lost! Modify the generating script instead.
*/\
"""
DEFINITIONS = [
("VK_API_VERSION_1_0", "deUint32"),
("VK_API_VERSION_1_1", "deUint32"),
("VK_MAX_PHYSICAL_DEVICE_NAME_SIZE", "size_t"),
("VK_MAX_EXTENSION_NAME_SIZE", "size_t"),
("VK_MAX_DRIVER_NAME_SIZE_KHR", "size_t"),
("VK_MAX_DRIVER_INFO_SIZE_KHR", "size_t"),
("VK_UUID_SIZE", "size_t"),
("VK_LUID_SIZE", "size_t"),
("VK_MAX_MEMORY_TYPES", "size_t"),
("VK_MAX_MEMORY_HEAPS", "size_t"),
("VK_MAX_DESCRIPTION_SIZE", "size_t"),
("VK_MAX_DEVICE_GROUP_SIZE", "size_t"),
("VK_ATTACHMENT_UNUSED", "deUint32"),
("VK_SUBPASS_EXTERNAL", "deUint32"),
("VK_QUEUE_FAMILY_IGNORED", "deUint32"),
("VK_QUEUE_FAMILY_EXTERNAL", "deUint32"),
("VK_REMAINING_MIP_LEVELS", "deUint32"),
("VK_REMAINING_ARRAY_LAYERS", "deUint32"),
("VK_WHOLE_SIZE", "vk::VkDeviceSize"),
("VK_TRUE", "vk::VkBool32"),
("VK_FALSE", "vk::VkBool32"),
]
PLATFORM_TYPES = [
(["Display","*"], ["XlibDisplayPtr"], "void*"),
(["Window"], ["XlibWindow"], "deUintptr",),
(["VisualID"], ["XlibVisualID"], "deUint32"),
(["xcb_connection_t", "*"], ["XcbConnectionPtr"], "void*"),
(["xcb_window_t"], ["XcbWindow"], "deUintptr"),
(["xcb_visualid_t"], ["XcbVisualid"], "deUint32"),
(["struct", "wl_display","*"], ["WaylandDisplayPtr"], "void*"),
(["struct", "wl_surface", "*"], ["WaylandSurfacePtr"], "void*"),
(["MirConnection", "*"], ["MirConnectionPtr"], "void*"),
(["MirSurface", "*"], ["MirSurfacePtr"], "void*"),
(["ANativeWindow", "*"], ["AndroidNativeWindowPtr"], "void*"),
(["HINSTANCE"], ["Win32InstanceHandle"], "void*"),
(["HWND"], ["Win32WindowHandle"], "void*"),
(["HANDLE"], ["Win32Handle"], "void*"),
(["const", "SECURITY_ATTRIBUTES", "*"], ["Win32SecurityAttributesPtr"], "const void*"),
(["AHardwareBuffer", "*"], ["AndroidHardwareBufferPtr"], "void*"),
(["RROutput"], ["RROutput"], "void*")
]
PLATFORM_TYPE_NAMESPACE = "pt"
TYPE_SUBSTITUTIONS = [
("uint8_t", "deUint8"),
("uint16_t", "deUint16"),
("uint32_t", "deUint32"),
("uint64_t", "deUint64"),
("int8_t", "deInt8"),
("int16_t", "deInt16"),
("int32_t", "deInt32"),
("int64_t", "deInt64"),
("bool32_t", "deUint32"),
("size_t", "deUintptr"),
("DWORD", "deUint32"),
("HANDLE*", PLATFORM_TYPE_NAMESPACE + "::" + "Win32Handle*"),
("LPCWSTR", "char*"),
]
EXTENSION_POSTFIXES = ["KHR", "EXT", "NV", "NVX", "KHX", "NN", "MVK"]
EXTENSION_POSTFIXES_STANDARD = ["KHR"]
def prefixName (prefix, name):
name = re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', name[2:])
name = re.sub(r'([a-zA-Z])([0-9])', r'\1_\2', name)
name = name.upper()
name = name.replace("YCB_CR_", "YCBCR_")
name = name.replace("WIN_32_", "WIN32_")
name = name.replace("8_BIT_", "8BIT_")
name = name.replace("16_BIT_", "16BIT_")
name = name.replace("INT_64_", "INT64_")
name = name.replace("D_3_D_12_", "D3D12_")
name = name.replace("IOSSURFACE_", "IOS_SURFACE_")
name = name.replace("MAC_OS", "MACOS_")
name = name.replace("TEXTURE_LOD", "TEXTURE_LOD_")
name = name.replace("VIEWPORT_W", "VIEWPORT_W_")
name = name.replace("_IDPROPERTIES", "_ID_PROPERTIES")
name = name.replace("PHYSICAL_DEVICE_FLOAT_16_INT_8_FEATURES", "PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES")
return prefix + name
class Version:
def __init__ (self, versionTuple):
self.major = versionTuple[0]
self.minor = versionTuple[1]
self.patch = versionTuple[2]
def getInHex (self):
if self.major == 1 and self.minor == 0 and self.patch == 0:
return "VK_API_VERSION_1_0"
elif self.major == 1 and self.minor == 1 and self.patch == 0:
return "VK_API_VERSION_1_1"
else:
hex = (self.major << 22) | (self.minor << 12) | self.patch
return '0x%Xu' % (hex)
def isStandardVersion (self):
if self.patch != 0:
return False
if self.major != 1:
return False
if self.minor != 1 and self.minor != 0:
return False
return True
def getBestRepresentation (self):
if self.isStandardVersion():
return self.getInHex()
return self.getDefineName()
def getDefineName (self):
return 'VERSION_%d_%d_%d' % (self.major, self.minor, self.patch)
def __hash__ (self):
return (self.major << 22) | (self.minor << 12) | self.patch
def __eq__ (self, other):
return self.major == other.major and self.minor == other.minor and self.patch == other.patch
def __str__ (self):
return self.getBestRepresentation()
class Handle:
TYPE_DISP = 0
TYPE_NONDISP = 1
def __init__ (self, type, name):
self.type = type
self.name = name
self.alias = None
self.isAlias = False
def getHandleType (self):
return prefixName("HANDLE_TYPE_", self.name)
def checkAliasValidity (self):
pass
def __repr__ (self):
return '%s (%s, %s)' % (self.name, self.alias, self.isAlias)
class Definition:
def __init__ (self, type, name, value):
self.type = type
self.name = name
self.value = value
self.alias = None
self.isAlias = False
def __repr__ (self):
return '%s = %s (%s)' % (self.name, self.value, self.type)
class Enum:
def __init__ (self, name, values):
self.name = name
self.values = values
self.alias = None
self.isAlias = False
def checkAliasValidity (self):
if self.alias != None:
if len(self.values) != len(self.alias.values):
raise Exception("%s has different number of flags than its alias %s." % (self.name, self.alias.name))
for index, value in enumerate(self.values):
aliasVal = self.alias.values[index]
if value[1] != aliasVal[1] or not (value[0].startswith(aliasVal[0]) or aliasVal[0].startswith(value[0])):
raise Exception("Flag %s of %s has different value than %s of %s." % (self.alias.values[index], self.alias.name, value, self.name))
def __repr__ (self):
return '%s (%s) %s' % (self.name, self.alias, self.values)
class Bitfield:
def __init__ (self, name, values):
self.name = name
self.values = values
self.alias = None
self.isAlias = False
def checkAliasValidity (self):
if self.alias != None:
if len(self.values) != len(self.alias.values):
raise Exception("%s has different number of flags than its alias %s." % (self.name, self.alias.name))
for index, value in enumerate(self.values):
aliasVal = self.alias.values[index]
if value[1] != aliasVal[1] or not (value[0].startswith(aliasVal[0]) or aliasVal[0].startswith(value[0])):
raise Exception("Flag %s of %s has different value than %s of %s." % (self.alias.values[index], self.alias.name, value, self.name))
def __repr__ (self):
return '%s (%s)' % (self.name, self.alias)
class Variable:
def __init__ (self, type, name, arraySize):
type = type.replace('*',' *').replace('&',' &')
for src, dst in TYPE_SUBSTITUTIONS:
type = type.replace(src, dst)
self.type = type.split(' ')
for platformType, substitute, compat in PLATFORM_TYPES:
range = self.contains(self.type, platformType)
if range != None:
self.type = self.type[:range[0]]+[PLATFORM_TYPE_NAMESPACE + '::' + substitute[0]] + substitute[1:] + self.type[range[1]:]
break
self.name = name
self.arraySize = arraySize
def contains(self, big, small):
for i in xrange(len(big)-len(small)+1):
for j in xrange(len(small)):
if big[i+j] != small[j]:
break
else:
return i, i+len(small)
return None
def getType (self):
return ' '.join(self.type).replace(' *','*').replace(' &','&')
def getAsString (self, separator):
return '%s%s%s%s' % (self.getType(), separator, self.name, self.arraySize)
def __repr__ (self):
return '<%s> <%s> <%s>' % (self.type, self.name, self.arraySize)
def __eq__ (self, other):
if len(self.type) != len(other.type):
return False
for index, type in enumerate(self.type):
if "*" == type or "&" == type or "const" == type or "volatile" == type:
if type != other.type[index]:
return False
elif type != other.type[index] and \
type not in map(lambda ext: other.type[index] + ext, EXTENSION_POSTFIXES_STANDARD) and \
other.type[index] not in map(lambda ext: type + ext, EXTENSION_POSTFIXES_STANDARD):
return False
return self.arraySize == other.arraySize
def __ne__ (self, other):
return not self == other
class CompositeType:
CLASS_STRUCT = 0
CLASS_UNION = 1
def __init__ (self, typeClass, name, members):
self.typeClass = typeClass
self.name = name
self.members = members
self.alias = None
self.isAlias = False
def getClassName (self):
names = {CompositeType.CLASS_STRUCT: 'struct', CompositeType.CLASS_UNION: 'union'}
return names[self.typeClass]
def checkAliasValidity (self):
if self.alias != None:
if len(self.members) != len(self.alias.members):
raise Exception("%s has different number of members than its alias %s." % (self.name, self.alias.name))
for index, member in enumerate(self.members ):
break
def __repr__ (self):
return '%s (%s)' % (self.name, self.alias)
class Function:
TYPE_PLATFORM = 0
TYPE_INSTANCE = 1
TYPE_DEVICE = 2
def __init__ (self, name, returnType, arguments, apiVersion = None):
self.name = name
self.returnType = returnType
self.arguments = arguments
self.alias = None
self.isAlias = False
self.apiVersion = apiVersion
def getType (self):
if self.name == "vkGetInstanceProcAddr":
return Function.TYPE_PLATFORM
assert len(self.arguments) > 0
firstArgType = self.arguments[0].getType()
if firstArgType in ["VkInstance", "VkPhysicalDevice"]:
return Function.TYPE_INSTANCE
elif firstArgType in ["VkDevice", "VkCommandBuffer", "VkQueue"]:
return Function.TYPE_DEVICE
else:
return Function.TYPE_PLATFORM
def checkAliasValidity (self):
if self.alias != None:
if len(self.arguments) != len(self.alias.arguments):
raise Exception("%s has different number of arguments than its alias %s." % (self.name, self.alias.name))
if self.returnType != self.alias.returnType or not (self.returnType.startswith(self.alias.returnType) or self.alias.returnType.startswith(self.returnType)):
raise Exception("%s has different return value's type than its alias %s." % (self.name, self.alias.name))
for index, argument in enumerate(self.arguments):
if argument != self.alias.arguments[index]:
raise Exception("argument %s: \"%s\" of %s is different than \"%s\" of %s." % (index, self.alias.arguments[index].getAsString(' '), self.alias.name, argument.getAsString(' '), self.name))
def __repr__ (self):
return '%s (%s)' % (self.name, self.alias)
class Extension:
def __init__ (self, name, handles, enums, bitfields, compositeTypes, functions, definitions, additionalDefinitions, versionInCore):
self.name = name
self.definitions = definitions
self.additionalDefs = additionalDefinitions
self.handles = handles
self.enums = enums
self.bitfields = bitfields
self.compositeTypes = compositeTypes
self.functions = functions
self.versionInCore = versionInCore
def __repr__ (self):
return 'EXT:\n%s ->\nENUMS:\n%s\nCOMPOS:\n%s\nFUNCS:\n%s\nBITF:\n%s\nHAND:\n%s\nDEFS:\n%s\n' % (self.name, self.enums, self.compositeTypes, self.functions, self.bitfields, self.handles, self.definitions, self.versionInCore)
class API:
def __init__ (self, definitions, handles, enums, bitfields, compositeTypes, functions, extensions):
self.definitions = definitions
self.handles = handles
self.enums = enums
self.bitfields = bitfields
self.compositeTypes = compositeTypes
self.functions = functions # \note contains extension functions as well
self.extensions = extensions
def readFile (filename):
with open(filename, 'rb') as f:
return f.read()
IDENT_PTRN = r'[a-zA-Z_][a-zA-Z0-9_]*'
TYPE_PTRN = r'[a-zA-Z_][a-zA-Z0-9_ \t*&]*'
def fixupEnumValues (values):
fixed = []
for name, value in values:
if "_BEGIN_RANGE" in name or "_END_RANGE" in name:
continue
fixed.append((name, value))
return fixed
def getInterfaceName (function):
assert function.name[:2] == "vk"
return function.name[2].lower() + function.name[3:]
def getFunctionTypeName (function):
assert function.name[:2] == "vk"
return function.name[2:] + "Func"
def endsWith (str, postfix):
return str[-len(postfix):] == postfix
def splitNameExtPostfix (name):
knownExtPostfixes = EXTENSION_POSTFIXES
for postfix in knownExtPostfixes:
if endsWith(name, postfix):
return (name[:-len(postfix)], postfix)
return (name, "")
def getBitEnumNameForBitfield (bitfieldName):
bitfieldName, postfix = splitNameExtPostfix(bitfieldName)
assert bitfieldName[-1] == "s"
return bitfieldName[:-1] + "Bits" + postfix
def getBitfieldNameForBitEnum (bitEnumName):
bitEnumName, postfix = splitNameExtPostfix(bitEnumName)
assert bitEnumName[-4:] == "Bits"
return bitEnumName[:-4] + "s" + postfix
def parsePreprocDefinedValue (src, name):
value = parsePreprocDefinedValueOptional(src, name)
if value is None:
raise Exception("No such definition: %s" % name)
return value
def parsePreprocDefinedValueOptional (src, name):
definition = re.search(r'
if definition is None:
return None
value = definition.group(1).strip()
if value == "UINT32_MAX":
value = "(~0u)"
return value
def parseEnum (name, src):
keyValuePtrn = '(' + IDENT_PTRN + r')\s*=\s*([^\s,}]+)\s*[,}]'
matches = re.findall(keyValuePtrn, src)
return Enum(name, fixupEnumValues(matches))
# \note Parses raw enums, some are mapped to bitfields later
def parseEnums (src):
matches = re.findall(r'typedef enum(\s*' + IDENT_PTRN + r')?\s*{([^}]*)}\s*(' + IDENT_PTRN + r')\s*;', src)
enums = []
for enumname, contents, typename in matches:
enums.append(parseEnum(typename, contents))
return enums
def parseCompositeType (type, name, src):
typeNamePtrn = r'(' + TYPE_PTRN + r')(\s+' + IDENT_PTRN + r')((\[[^\]]+\])*)\s*;'
matches = re.findall(typeNamePtrn, src)
members = [Variable(t.strip(), n.strip(), a.strip()) for t, n, a, _ in matches]
return CompositeType(type, name, members)
def parseCompositeTypes (src):
typeMap = { 'struct': CompositeType.CLASS_STRUCT, 'union': CompositeType.CLASS_UNION }
matches = re.findall(r'typedef (struct|union)(\s*' + IDENT_PTRN + r')?\s*{([^}]*)}\s*(' + IDENT_PTRN + r')\s*;', src)
types = []
for type, structname, contents, typename in matches:
types.append(parseCompositeType(typeMap[type], typename, contents))
return types
def parseHandles (src):
matches = re.findall(r'VK_DEFINE(_NON_DISPATCHABLE|)_HANDLE\((' + IDENT_PTRN + r')\)[ \t]*[\n\r]', src)
handles = []
typeMap = {'': Handle.TYPE_DISP, '_NON_DISPATCHABLE': Handle.TYPE_NONDISP}
for type, name in matches:
handle = Handle(typeMap[type], name)
handles.append(handle)
return handles
def parseArgList (src):
typeNamePtrn = r'(' + TYPE_PTRN + r')(\s+' + IDENT_PTRN + r')((\[[^\]]+\])*)\s*'
args = []
for rawArg in src.split(','):
m = re.search(typeNamePtrn, rawArg)
args.append(Variable(m.group(1).strip(), m.group(2).strip(), m.group(3)))
return args
def removeTypeExtPostfix (name):
for extPostfix in EXTENSION_POSTFIXES_STANDARD:
if endsWith(name, extPostfix):
return name[0:-len(extPostfix)]
return None
def populateAliases (objects):
objectsByName = {}
for object in objects:
objectsByName[object.name] = object
for object in objects:
withoutPostfix = removeTypeExtPostfix(object.name)
if withoutPostfix != None and withoutPostfix in objectsByName:
objectsByName[withoutPostfix].alias = object
object.isAlias = True
for object in objects:
object.checkAliasValidity()
def populateAliasesWithTypedefs (objects, src):
objectsByName = {}
for object in objects:
objectsByName[object.name] = object
ptrn = r'\s*typedef\s+' + object.name + r'\s+([^;]+)'
stash = re.findall(ptrn, src)
if len(stash) == 1:
objExt = copy.deepcopy(object)
objExt.name = stash[0]
object.alias = objExt
objExt.isAlias = True
objects.append(objExt)
def removeAliasedValues (enum):
valueByName = {}
for name, value in enum.values:
valueByName[name] = value
def removeDefExtPostfix (name):
for extPostfix in EXTENSION_POSTFIXES:
if endsWith(name, "_" + extPostfix):
return name[0:-(len(extPostfix)+1)]
return None
newValues = []
for name, value in enum.values:
withoutPostfix = removeDefExtPostfix(name)
if withoutPostfix != None and withoutPostfix in valueByName and valueByName[withoutPostfix] == value:
continue
newValues.append((name, value))
enum.values = newValues
def parseFunctions (src):
ptrn = r'VKAPI_ATTR\s+(' + TYPE_PTRN + ')\s+VKAPI_CALL\s+(' + IDENT_PTRN + r')\s*\(([^)]*)\)\s*;'
matches = re.findall(ptrn, src)
functions = []
for returnType, name, argList in matches:
functions.append(Function(name.strip(), returnType.strip(), parseArgList(argList)))
return functions
def parseFunctionsByVersion (src):
ptrnVer10 = 'VK_VERSION_1_0 1'
ptrnVer11 = 'VK_VERSION_1_1 1'
matchVer10 = re.search(ptrnVer10, src)
matchVer11 = re.search(ptrnVer11, src)
ptrn = r'VKAPI_ATTR\s+(' + TYPE_PTRN + ')\s+VKAPI_CALL\s+(' + IDENT_PTRN + r')\s*\(([^)]*)\)\s*;'
regPtrn = re.compile(ptrn)
matches = regPtrn.findall(src, matchVer10.start(), matchVer11.start())
functions = []
for returnType, name, argList in matches:
functions.append(Function(name.strip(), returnType.strip(), parseArgList(argList), 'VK_VERSION_1_0'))
matches = regPtrn.findall(src, matchVer11.start())
for returnType, name, argList in matches:
functions.append(Function(name.strip(), returnType.strip(), parseArgList(argList), 'VK_VERSION_1_1'))
return functions
def splitByExtension (src):
ptrn = r'
match = "#define\s+("
for part in re.finditer(ptrn, src):
match += part.group(1)+"|"
match = match[:-1] + ")\s+1"
parts = re.split(match, src)
# First part is core
byExtension = [(None, parts[0])]
for ndx in range(1, len(parts), 2):
byExtension.append((parts[ndx], parts[ndx+1]))
return byExtension
def parseDefinitions (extensionName, src):
def skipDefinition (extensionName, definition):
if extensionName == None:
return True
# SPEC_VERSION enums
if definition[0].startswith(extensionName.upper()) and definition[1].isdigit():
return False
if definition[0].startswith(extensionName.upper()):
return True
if definition[1].isdigit():
return True
return False
ptrn = r'#define\s+([^\s]+)\s+([^\r\n]+)'
matches = re.findall(ptrn, src)
return [Definition(None, match[0], match[1]) for match in matches if not skipDefinition(extensionName, match)]
def parseExtensions (src, allFunctions, allCompositeTypes, allEnums, allBitfields, allHandles, allDefinitions):
def getCoreVersion (extensionTuple):
if not extensionTuple[0]:
return None
ptrn = r'\/\/\s*' + extensionTuple[0] + r'\s+(DEVICE|INSTANCE)\s+([0-9_]+)'
coreVersion = re.search(ptrn, extensionTuple[1], re.I)
if coreVersion != None:
return [coreVersion.group(1)] + [int(number) for number in coreVersion.group(2).split('_')[:3]]
return None
splitSrc = splitByExtension(src)
extensions = []
functionsByName = {function.name: function for function in allFunctions}
compositeTypesByName = {compType.name: compType for compType in allCompositeTypes}
enumsByName = {enum.name: enum for enum in allEnums}
bitfieldsByName = {bitfield.name: bitfield for bitfield in allBitfields}
handlesByName = {handle.name: handle for handle in allHandles}
definitionsByName = {definition.name: definition for definition in allDefinitions}
for extensionName, extensionSrc in splitSrc:
definitions = [Definition(type, name, parsePreprocDefinedValueOptional(extensionSrc, name)) for name, type in DEFINITIONS]
definitions = [definition for definition in definitions if definition.value != None]
additionalDefinitions = parseDefinitions(extensionName, extensionSrc)
handles = parseHandles(extensionSrc)
functions = parseFunctions(extensionSrc)
compositeTypes = parseCompositeTypes(extensionSrc)
rawEnums = parseEnums(extensionSrc)
bitfieldNames = parseBitfieldNames(extensionSrc)
enumBitfieldNames = [getBitEnumNameForBitfield(name) for name in bitfieldNames]
enums = [enum for enum in rawEnums if enum.name not in enumBitfieldNames]
extCoreVersion = getCoreVersion((extensionName, extensionSrc))
extFunctions = [functionsByName[function.name] for function in functions]
extCompositeTypes = [compositeTypesByName[compositeType.name] for compositeType in compositeTypes]
extEnums = [enumsByName[enum.name] for enum in enums]
extBitfields = [bitfieldsByName[bitfieldName] for bitfieldName in bitfieldNames]
extHandles = [handlesByName[handle.name] for handle in handles]
extDefinitions = [definitionsByName[definition.name] for definition in definitions]
extensions.append(Extension(extensionName, extHandles, extEnums, extBitfields, extCompositeTypes, extFunctions, extDefinitions, additionalDefinitions, extCoreVersion))
return extensions
def parseBitfieldNames (src):
ptrn = r'typedef\s+VkFlags\s(' + IDENT_PTRN + r')\s*;'
matches = re.findall(ptrn, src)
return matches
def parseAPI (src):
definitions = [Definition(type, name, parsePreprocDefinedValue(src, name)) for name, type in DEFINITIONS]
handles = parseHandles(src)
rawEnums = parseEnums(src)
bitfieldNames = parseBitfieldNames(src)
enums = []
bitfields = []
bitfieldEnums = set([getBitEnumNameForBitfield(n) for n in bitfieldNames if getBitEnumNameForBitfield(n) in [enum.name for enum in rawEnums]])
compositeTypes = parseCompositeTypes(src)
allFunctions = parseFunctionsByVersion(src)
for enum in rawEnums:
if enum.name in bitfieldEnums:
bitfields.append(Bitfield(getBitfieldNameForBitEnum(enum.name), enum.values))
else:
enums.append(enum)
for bitfieldName in bitfieldNames:
if not bitfieldName in [bitfield.name for bitfield in bitfields]:
# Add empty bitfield
bitfields.append(Bitfield(bitfieldName, []))
# Populate alias fields
populateAliasesWithTypedefs(compositeTypes, src)
populateAliasesWithTypedefs(enums, src)
populateAliasesWithTypedefs(bitfields, src)
populateAliases(allFunctions)
populateAliases(handles)
populateAliases(enums)
populateAliases(bitfields)
populateAliases(compositeTypes)
for enum in enums:
removeAliasedValues(enum)
extensions = parseExtensions(src, allFunctions, compositeTypes, enums, bitfields, handles, definitions)
return API(
definitions = definitions,
handles = handles,
enums = enums,
bitfields = bitfields,
compositeTypes = compositeTypes,
functions = allFunctions,
extensions = extensions)
def splitUniqueAndDuplicatedEntries (handles):
listOfUniqueHandles = []
duplicates = OrderedDict()
for handle in handles:
if handle.alias != None:
duplicates[handle.alias] = handle
if not handle.isAlias:
listOfUniqueHandles.append(handle)
return listOfUniqueHandles, duplicates
def writeHandleType (api, filename):
uniqeHandles, duplicatedHandles = splitUniqueAndDuplicatedEntries(api.handles)
def genHandles ():
yield "\t%s\t= 0," % uniqeHandles[0].getHandleType()
for handle in uniqeHandles[1:]:
yield "\t%s," % handle.getHandleType()
for duplicate in duplicatedHandles:
yield "\t%s\t= %s," % (duplicate.getHandleType(), duplicatedHandles[duplicate].getHandleType())
yield "\tHANDLE_TYPE_LAST\t= %s + 1" % (uniqeHandles[-1].getHandleType())
def genHandlesBlock ():
yield "enum HandleType"
yield "{"
for line in indentLines(genHandles()):
yield line
yield "};"
yield ""
writeInlFile(filename, INL_HEADER, genHandlesBlock())
def getEnumValuePrefix (enum):
prefix = enum.name[0]
for i in range(1, len(enum.name)):
if enum.name[i].isupper() and not enum.name[i-1].isupper():
prefix += "_"
prefix += enum.name[i].upper()
return prefix
def parseInt (value):
if value[:2] == "0x":
return int(value, 16)
else:
return int(value, 10)
def areEnumValuesLinear (enum):
curIndex = 0
for name, value in enum.values:
if parseInt(value) != curIndex:
return False
curIndex += 1
return True
def genEnumSrc (enum):
yield "enum %s" % enum.name
yield "{"
for line in indentLines(["\t%s\t= %s," % v for v in enum.values]):
yield line
if areEnumValuesLinear(enum):
yield ""
yield "\t%s_LAST" % getEnumValuePrefix(enum)
yield "};"
def genBitfieldSrc (bitfield):
if len(bitfield.values) > 0:
yield "enum %s" % getBitEnumNameForBitfield(bitfield.name)
yield "{"
for line in indentLines(["\t%s\t= %s," % v for v in bitfield.values]):
yield line
yield "};"
yield "typedef deUint32 %s;" % bitfield.name
def genCompositeTypeSrc (type):
yield "%s %s" % (type.getClassName(), type.name)
yield "{"
for line in indentLines(['\t'+m.getAsString('\t')+';' for m in type.members]):
yield line
yield "};"
def genHandlesSrc (handles):
uniqeHandles, duplicatedHandles = splitUniqueAndDuplicatedEntries(handles)
def genLines (handles):
for handle in uniqeHandles:
if handle.type == Handle.TYPE_DISP:
yield "VK_DEFINE_HANDLE\t(%s,\t%s);" % (handle.name, handle.getHandleType())
elif handle.type == Handle.TYPE_NONDISP:
yield "VK_DEFINE_NON_DISPATCHABLE_HANDLE\t(%s,\t%s);" % (handle.name, handle.getHandleType())
for duplicate in duplicatedHandles:
if duplicate.type == Handle.TYPE_DISP:
yield "VK_DEFINE_HANDLE\t(%s,\t%s);" % (duplicate.name, duplicatedHandles[duplicate].getHandleType())
elif duplicate.type == Handle.TYPE_NONDISP:
yield "VK_DEFINE_NON_DISPATCHABLE_HANDLE\t(%s,\t%s);" % (duplicate.name, duplicatedHandles[duplicate].getHandleType())
for line in indentLines(genLines(handles)):
yield line
def genDefinitionsSrc (definitions):
for line in ["#define %s\t(static_cast<%s>\t(%s))" % (definition.name, definition.type, definition.value) for definition in definitions]:
yield line
def genDefinitionsAliasSrc (definitions):
for line in ["#define %s\t%s" % (definition.name, definitions[definition].name) for definition in definitions]:
if definition.value != definitions[definition].value and definition.value != definitions[definition].name:
raise Exception("Value of %s (%s) is different than core definition value %s (%s)." % (definition.name, definition.value, definitions[definition].name, definitions[definition].value))
yield line
def writeBasicTypes (api, filename):
def gen ():
definitionsCore, definitionDuplicates = splitUniqueAndDuplicatedEntries(api.definitions)
for line in indentLines(chain(genDefinitionsSrc(definitionsCore), genDefinitionsAliasSrc(definitionDuplicates))):
yield line
yield ""
for line in genHandlesSrc(api.handles):
yield line
yield ""
for enum in api.enums:
if not enum.isAlias:
for line in genEnumSrc(enum):
yield line
yield ""
for bitfield in api.bitfields:
if not bitfield.isAlias:
for line in genBitfieldSrc(bitfield):
yield line
yield ""
for line in indentLines(["VK_DEFINE_PLATFORM_TYPE(%s,\t%s);" % (s[0], c) for n, s, c in PLATFORM_TYPES]):
yield line
for ext in api.extensions:
if ext.additionalDefs != None:
for definition in ext.additionalDefs:
yield "#define " + definition.name + " " + definition.value
writeInlFile(filename, INL_HEADER, gen())
def writeCompositeTypes (api, filename):
def gen ():
for type in api.compositeTypes:
type.checkAliasValidity()
if not type.isAlias:
for line in genCompositeTypeSrc(type):
yield line
yield ""
writeInlFile(filename, INL_HEADER, gen())
def argListToStr (args):
return ", ".join(v.getAsString(' ') for v in args)
def writeInterfaceDecl (api, filename, functionTypes, concrete):
def genProtos ():
postfix = "" if concrete else " = 0"
for function in api.functions:
if not function.getType() in functionTypes:
continue
if not function.isAlias:
yield "virtual %s\t%s\t(%s) const%s;" % (function.returnType, getInterfaceName(function), argListToStr(function.arguments), postfix)
writeInlFile(filename, INL_HEADER, indentLines(genProtos()))
def writeFunctionPtrTypes (api, filename):
def genTypes ():
for function in api.functions:
yield "typedef VKAPI_ATTR %s\t(VKAPI_CALL* %s)\t(%s);" % (function.returnType, getFunctionTypeName(function), argListToStr(function.arguments))
writeInlFile(filename, INL_HEADER, indentLines(genTypes()))
def writeFunctionPointers (api, filename, functionTypes):
def FunctionsYielder ():
for function in api.functions:
if function.getType() in functionTypes:
if function.isAlias:
if function.getType() == Function.TYPE_INSTANCE and function.arguments[0].getType() == "VkPhysicalDevice":
yield "%s\t%s;" % (getFunctionTypeName(function), getInterfaceName(function))
else:
yield "%s\t%s;" % (getFunctionTypeName(function), getInterfaceName(function))
writeInlFile(filename, INL_HEADER, indentLines(FunctionsYielder()))
def writeInitFunctionPointers (api, filename, functionTypes, cond = None):
def makeInitFunctionPointers ():
for function in api.functions:
if function.getType() in functionTypes and (cond == None or cond(function)):
interfaceName = getInterfaceName(function)
if function.isAlias:
if function.getType() == Function.TYPE_INSTANCE and function.arguments[0].getType() == "VkPhysicalDevice":
yield "m_vk.%s\t= (%s)\tGET_PROC_ADDR(\"%s\");" % (getInterfaceName(function), getFunctionTypeName(function), function.name)
else:
yield "m_vk.%s\t= (%s)\tGET_PROC_ADDR(\"%s\");" % (getInterfaceName(function), getFunctionTypeName(function), function.name)
if function.alias != None:
yield "if (!m_vk.%s)" % (getInterfaceName(function))
yield " m_vk.%s\t= (%s)\tGET_PROC_ADDR(\"%s\");" % (getInterfaceName(function), getFunctionTypeName(function), function.alias.name)
lines = [line.replace(' ', '\t') for line in indentLines(makeInitFunctionPointers())]
writeInlFile(filename, INL_HEADER, lines)
def writeFuncPtrInterfaceImpl (api, filename, functionTypes, className):
def makeFuncPtrInterfaceImpl ():
for function in api.functions:
if function.getType() in functionTypes and not function.isAlias:
yield ""
yield "%s %s::%s (%s) const" % (function.returnType, className, getInterfaceName(function), argListToStr(function.arguments))
yield "{"
if function.name == "vkEnumerateInstanceVersion":
yield " if (m_vk.enumerateInstanceVersion)"
yield " return m_vk.enumerateInstanceVersion(pApiVersion);"
yield ""
yield " *pApiVersion = VK_API_VERSION_1_0;"
yield " return VK_SUCCESS;"
elif function.getType() == Function.TYPE_INSTANCE and function.arguments[0].getType() == "VkPhysicalDevice" and function.alias != None:
yield " vk::VkPhysicalDeviceProperties props;"
yield " m_vk.getPhysicalDeviceProperties(physicalDevice, &props);"
yield " if (props.apiVersion >= VK_API_VERSION_1_1)"
yield " %sm_vk.%s(%s);" % ("return " if function.returnType != "void" else "", getInterfaceName(function), ", ".join(a.name for a in function.arguments))
yield " else"
yield " %sm_vk.%s(%s);" % ("return " if function.returnType != "void" else "", getInterfaceName(function.alias), ", ".join(a.name for a in function.arguments))
else:
yield " %sm_vk.%s(%s);" % ("return " if function.returnType != "void" else "", getInterfaceName(function), ", ".join(a.name for a in function.arguments))
yield "}"
writeInlFile(filename, INL_HEADER, makeFuncPtrInterfaceImpl())
def writeStrUtilProto (api, filename):
def makeStrUtilProto ():
for line in indentLines(["const char*\tget%sName\t(%s value);" % (enum.name[2:], enum.name) for enum in api.enums if not enum.isAlias]):
yield line
yield ""
for line in indentLines(["inline tcu::Format::Enum<%s>\tget%sStr\t(%s value)\t{ return tcu::Format::Enum<%s>(get%sName, value);\t}" % (e.name, e.name[2:], e.name, e.name, e.name[2:]) for e in api.enums if not e.isAlias]):
yield line
yield ""
for line in indentLines(["inline std::ostream&\toperator<<\t(std::ostream& s, %s value)\t{ return s << get%sStr(value);\t}" % (e.name, e.name[2:]) for e in api.enums if not e.isAlias]):
yield line
yield ""
for line in indentLines(["tcu::Format::Bitfield<32>\tget%sStr\t(%s value);" % (bitfield.name[2:], bitfield.name) for bitfield in api.bitfields if not bitfield.isAlias]):
yield line
yield ""
for line in indentLines(["std::ostream&\toperator<<\t(std::ostream& s, const %s& value);" % (s.name) for s in api.compositeTypes if not s.isAlias]):
yield line
writeInlFile(filename, INL_HEADER, makeStrUtilProto())
def writeStrUtilImpl (api, filename):
def makeStrUtilImpl ():
for line in indentLines(["template<> const char*\tgetTypeName<%s>\t(void) { return \"%s\";\t}" % (handle.name, handle.name) for handle in api.handles if not handle.isAlias]):
yield line
yield ""
yield "namespace %s" % PLATFORM_TYPE_NAMESPACE
yield "{"
for line in indentLines("std::ostream& operator<< (std::ostream& s, %s\tv) { return s << tcu::toHex(v.internal); }" % ''.join(s) for n, s, c in PLATFORM_TYPES):
yield line
yield "}"
for enum in api.enums:
if enum.isAlias:
continue
yield ""
yield "const char* get%sName (%s value)" % (enum.name[2:], enum.name)
yield "{"
yield "\tswitch (value)"
yield "\t{"
for line in indentLines(["\t\tcase %s:\treturn \"%s\";" % (n, n) for n, v in enum.values] + ["\t\tdefault:\treturn DE_NULL;"]):
yield line
yield "\t}"
yield "}"
for bitfield in api.bitfields:
if bitfield.isAlias:
continue
yield ""
yield "tcu::Format::Bitfield<32> get%sStr (%s value)" % (bitfield.name[2:], bitfield.name)
yield "{"
if len(bitfield.values) > 0:
yield "\tstatic const tcu::Format::BitDesc s_desc[] ="
yield "\t{"
for line in indentLines(["\t\ttcu::Format::BitDesc(%s,\t\"%s\")," % (n, n) for n, v in bitfield.values]):
yield line
yield "\t};"
yield "\treturn tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));"
else:
yield "\treturn tcu::Format::Bitfield<32>(value, DE_NULL, DE_NULL);"
yield "}"
bitfieldTypeNames = set([bitfield.name for bitfield in api.bitfields])
for type in api.compositeTypes:
if not type.isAlias:
yield ""
yield "std::ostream& operator<< (std::ostream& s, const %s& value)" % type.name
yield "{"
yield "\ts << \"%s = {\\n\";" % type.name
for member in type.members:
memberName = member.name
valFmt = None
newLine = ""
if member.getType() in bitfieldTypeNames:
valFmt = "get%sStr(value.%s)" % (member.getType()[2:], member.name)
elif member.getType() == "const char*" or member.getType() == "char*":
valFmt = "getCharPtrStr(value.%s)" % member.name
elif member.arraySize != '':
if member.name in ["extensionName", "deviceName", "layerName", "description"]:
valFmt = "(const char*)value.%s" % member.name
elif member.getType() == 'char' or member.getType() == 'deUint8':
newLine = "'\\n' << "
valFmt = "tcu::formatArray(tcu::Format::HexIterator<%s>(DE_ARRAY_BEGIN(value.%s)), tcu::Format::HexIterator<%s>(DE_ARRAY_END(value.%s)))" % (member.getType(), member.name, member.getType(), member.name)
else:
if member.name == "memoryTypes" or member.name == "memoryHeaps":
endIter = "DE_ARRAY_BEGIN(value.%s) + value.%sCount" % (member.name, member.name[:-1])
else:
endIter = "DE_ARRAY_END(value.%s)" % member.name
newLine = "'\\n' << "
valFmt = "tcu::formatArray(DE_ARRAY_BEGIN(value.%s), %s)" % (member.name, endIter)
memberName = member.name
else:
valFmt = "value.%s" % member.name
yield ("\ts << \"\\t%s = \" << " % memberName) + newLine + valFmt + " << '\\n';"
yield "\ts << '}';"
yield "\treturn s;"
yield "}"
writeInlFile(filename, INL_HEADER, makeStrUtilImpl())
class ConstructorFunction:
def __init__ (self, type, name, objectType, ifaceArgs, arguments):
self.type = type
self.name = name
self.objectType = objectType
self.ifaceArgs = ifaceArgs
self.arguments = arguments
def getConstructorFunctions (api):
funcs = []
ifacesDict = {
Function.TYPE_PLATFORM: [Variable("const PlatformInterface&", "vk", "")],
Function.TYPE_INSTANCE: [Variable("const InstanceInterface&", "vk", "")],
Function.TYPE_DEVICE: [Variable("const DeviceInterface&", "vk", "")]
}
for function in api.functions:
if function.isAlias:
continue
if (function.name[:8] == "vkCreate" or function.name == "vkAllocateMemory") and not "createInfoCount" in [a.name for a in function.arguments]:
if function.name == "vkCreateDisplayModeKHR":
continue # No way to delete display modes (bug?)
# \todo [pyry] Rather hacky
ifaceArgs = ifacesDict[function.getType()]
if function.name == "vkCreateDevice":
ifaceArgs = [Variable("const PlatformInterface&", "vkp", ""), Variable("VkInstance", "instance", "")] + ifaceArgs
assert (function.arguments[-2].type == ["const", "VkAllocationCallbacks", "*"])
objectType = function.arguments[-1].type[0] #not getType() but type[0] on purpose
arguments = function.arguments[:-1]
funcs.append(ConstructorFunction(function.getType(), getInterfaceName(function), objectType, ifaceArgs, arguments))
return funcs
def addVersionDefines(versionSpectrum):
output = ["#define " + ver.getDefineName() + " " + ver.getInHex() for ver in versionSpectrum if not ver.isStandardVersion()]
return output
def removeVersionDefines(versionSpectrum):
output = ["#undef " + ver.getDefineName() for ver in versionSpectrum if not ver.isStandardVersion()]
return output
def writeRefUtilProto (api, filename):
functions = getConstructorFunctions(api)
def makeRefUtilProto ():
unindented = []
for line in indentLines(["Move<%s>\t%s\t(%s = DE_NULL);" % (function.objectType, function.name, argListToStr(function.ifaceArgs + function.arguments)) for function in functions]):
yield line
writeInlFile(filename, INL_HEADER, makeRefUtilProto())
def writeRefUtilImpl (api, filename):
functions = getConstructorFunctions(api)
def makeRefUtilImpl ():
yield "namespace refdetails"
yield "{"
yield ""
for function in api.functions:
if function.getType() == Function.TYPE_DEVICE \
and (function.name[:9] == "vkDestroy" or function.name == "vkFreeMemory") \
and not function.name == "vkDestroyDevice" \
and not function.isAlias:
objectType = function.arguments[-2].getType()
yield "template<>"
yield "void Deleter<%s>::operator() (%s obj) const" % (objectType, objectType)
yield "{"
yield "\tm_deviceIface->%s(m_device, obj, m_allocator);" % (getInterfaceName(function))
yield "}"
yield ""
yield "} // refdetails"
yield ""
dtorDict = {
Function.TYPE_PLATFORM: "object",
Function.TYPE_INSTANCE: "instance",
Function.TYPE_DEVICE: "device"
}
for function in functions:
deleterArgsString = ''
if function.name == "createDevice":
# createDevice requires two additional parameters to setup VkDevice deleter
deleterArgsString = "vkp, instance, object, " + function.arguments[-1].name
else:
deleterArgsString = "vk, %s, %s" % (dtorDict[function.type], function.arguments[-1].name)
yield "Move<%s> %s (%s)" % (function.objectType, function.name, argListToStr(function.ifaceArgs + function.arguments))
yield "{"
yield "\t%s object = 0;" % function.objectType
yield "\tVK_CHECK(vk.%s(%s));" % (function.name, ", ".join([a.name for a in function.arguments] + ["&object"]))
yield "\treturn Move<%s>(check<%s>(object), Deleter<%s>(%s));" % (function.objectType, function.objectType, function.objectType, deleterArgsString)
yield "}"
yield ""
writeInlFile(filename, INL_HEADER, makeRefUtilImpl())
def writeStructTraitsImpl (api, filename):
def gen ():
for type in api.compositeTypes:
if type.getClassName() == "struct" and type.members[0].name == "sType" and not type.isAlias:
yield "template<> VkStructureType getStructureType<%s> (void)" % type.name
yield "{"
yield "\treturn %s;" % prefixName("VK_STRUCTURE_TYPE_", type.name)
yield "}"
yield ""
writeInlFile(filename, INL_HEADER, gen())
def writeNullDriverImpl (api, filename):
def genNullDriverImpl ():
specialFuncNames = [
"vkCreateGraphicsPipelines",
"vkCreateComputePipelines",
"vkGetInstanceProcAddr",
"vkGetDeviceProcAddr",
"vkEnumeratePhysicalDevices",
"vkEnumerateInstanceExtensionProperties",
"vkEnumerateDeviceExtensionProperties",
"vkGetPhysicalDeviceFeatures",
"vkGetPhysicalDeviceFeatures2KHR",
"vkGetPhysicalDeviceProperties",
"vkGetPhysicalDeviceProperties2KHR",
"vkGetPhysicalDeviceQueueFamilyProperties",
"vkGetPhysicalDeviceMemoryProperties",
"vkGetPhysicalDeviceFormatProperties",
"vkGetPhysicalDeviceImageFormatProperties",
"vkGetDeviceQueue",
"vkGetBufferMemoryRequirements",
"vkGetBufferMemoryRequirements2KHR",
"vkGetImageMemoryRequirements",
"vkGetImageMemoryRequirements2KHR",
"vkAllocateMemory",
"vkMapMemory",
"vkUnmapMemory",
"vkAllocateDescriptorSets",
"vkFreeDescriptorSets",
"vkResetDescriptorPool",
"vkAllocateCommandBuffers",
"vkFreeCommandBuffers",
"vkCreateDisplayModeKHR",
"vkCreateSharedSwapchainsKHR",
"vkGetPhysicalDeviceExternalBufferPropertiesKHR",
"vkGetPhysicalDeviceImageFormatProperties2KHR",
"vkGetMemoryAndroidHardwareBufferANDROID",
]
coreFunctions = [f for f in api.functions if not f.isAlias]
specialFuncs = [f for f in coreFunctions if f.name in specialFuncNames]
createFuncs = [f for f in coreFunctions if (f.name[:8] == "vkCreate" or f.name == "vkAllocateMemory") and not f in specialFuncs]
destroyFuncs = [f for f in coreFunctions if (f.name[:9] == "vkDestroy" or f.name == "vkFreeMemory") and not f in specialFuncs]
dummyFuncs = [f for f in coreFunctions if f not in specialFuncs + createFuncs + destroyFuncs]
def getHandle (name):
for handle in api.handles:
if handle.name == name[0]:
return handle
raise Exception("No such handle: %s" % name)
for function in createFuncs:
objectType = function.arguments[-1].type[:-1]
argsStr = ", ".join([a.name for a in function.arguments[:-1]])
yield "VKAPI_ATTR %s VKAPI_CALL %s (%s)" % (function.returnType, getInterfaceName(function), argListToStr(function.arguments))
yield "{"
yield "\tDE_UNREF(%s);" % function.arguments[-2].name
if getHandle(objectType).type == Handle.TYPE_NONDISP:
yield "\tVK_NULL_RETURN((*%s = allocateNonDispHandle<%s, %s>(%s)));" % (function.arguments[-1].name, objectType[0][2:], objectType[0], argsStr)
else:
yield "\tVK_NULL_RETURN((*%s = allocateHandle<%s, %s>(%s)));" % (function.arguments[-1].name, objectType[0][2:], objectType[0], argsStr)
yield "}"
yield ""
for function in destroyFuncs:
objectArg = function.arguments[-2]
yield "VKAPI_ATTR %s VKAPI_CALL %s (%s)" % (function.returnType, getInterfaceName(function), argListToStr(function.arguments))
yield "{"
for arg in function.arguments[:-2]:
yield "\tDE_UNREF(%s);" % arg.name
if getHandle(objectArg.type).type == Handle.TYPE_NONDISP:
yield "\tfreeNonDispHandle<%s, %s>(%s, %s);" % (objectArg.getType()[2:], objectArg.getType(), objectArg.name, function.arguments[-1].name)
else:
yield "\tfreeHandle<%s, %s>(%s, %s);" % (objectArg.getType()[2:], objectArg.getType(), objectArg.name, function.arguments[-1].name)
yield "}"
yield ""
for function in dummyFuncs:
yield "VKAPI_ATTR %s VKAPI_CALL %s (%s)" % (function.returnType, getInterfaceName(function), argListToStr(function.arguments))
yield "{"
for arg in function.arguments:
yield "\tDE_UNREF(%s);" % arg.name
if function.returnType != "void":
yield "\treturn VK_SUCCESS;"
yield "}"
yield ""
def genFuncEntryTable (type, name):
funcs = [f for f in api.functions if f.getType() == type]
refFuncs = {}
for f in api.functions:
if f.alias != None:
refFuncs[f.alias] = f
yield "static const tcu::StaticFunctionLibrary::Entry %s[] =" % name
yield "{"
for line in indentLines(["\tVK_NULL_FUNC_ENTRY(%s,\t%s)," % (function.name, getInterfaceName(function if not function.isAlias else refFuncs[function])) for function in funcs]):
yield line
yield "};"
yield ""
# Func tables
for line in genFuncEntryTable(Function.TYPE_PLATFORM, "s_platformFunctions"):
yield line
for line in genFuncEntryTable(Function.TYPE_INSTANCE, "s_instanceFunctions"):
yield line
for line in genFuncEntryTable(Function.TYPE_DEVICE, "s_deviceFunctions"):
yield line
writeInlFile(filename, INL_HEADER, genNullDriverImpl())
def writeTypeUtil (api, filename):
# Structs filled by API queries are not often used in test code
QUERY_RESULT_TYPES = set([
"VkPhysicalDeviceFeatures",
"VkPhysicalDeviceLimits",
"VkFormatProperties",
"VkImageFormatProperties",
"VkPhysicalDeviceSparseProperties",
"VkQueueFamilyProperties",
"VkMemoryType",
"VkMemoryHeap",
])
COMPOSITE_TYPES = set([t.name for t in api.compositeTypes if not t.isAlias])
def isSimpleStruct (type):
def hasArrayMember (type):
for member in type.members:
if member.arraySize != '':
return True
return False
def hasCompositeMember (type):
for member in type.members:
if member.getType() in COMPOSITE_TYPES:
return True
return False
return type.typeClass == CompositeType.CLASS_STRUCT and \
type.members[0].getType() != "VkStructureType" and \
not type.name in QUERY_RESULT_TYPES and \
not hasArrayMember(type) and \
not hasCompositeMember(type)
def gen ():
for type in api.compositeTypes:
if not isSimpleStruct(type) or type.isAlias:
continue
yield ""
yield "inline %s make%s (%s)" % (type.name, type.name[2:], argListToStr(type.members))
yield "{"
yield "\t%s res;" % type.name
for line in indentLines(["\tres.%s\t= %s;" % (m.name, m.name) for m in type.members]):
yield line
yield "\treturn res;"
yield "}"
writeInlFile(filename, INL_HEADER, gen())
def writeSupportedExtenions(api, filename):
def writeExtensionsForVersions(map):
result = []
for version in map:
result.append(" if (coreVersion >= " + str(version) + ")")
result.append(" {")
for extension in map[version]:
result.append(' dst.push_back("' + extension.name + '");')
result.append(" }")
return result
instanceMap = {}
deviceMap = {}
versionSet = set()
for ext in api.extensions:
if ext.versionInCore != None:
if ext.versionInCore[0] == 'INSTANCE':
list = instanceMap.get(Version(ext.versionInCore[1:]))
instanceMap[Version(ext.versionInCore[1:])] = list + [ext] if list else [ext]
else:
list = deviceMap.get(Version(ext.versionInCore[1:]))
deviceMap[Version(ext.versionInCore[1:])] = list + [ext] if list else [ext]
versionSet.add(Version(ext.versionInCore[1:]))
lines = addVersionDefines(versionSet) + [
"",
"void getCoreDeviceExtensionsImpl (deUint32 coreVersion, ::std::vector<const char*>&%s)" % (" dst" if len(deviceMap) != 0 else ""),
"{"] + writeExtensionsForVersions(deviceMap) + [
"}",
"",
"void getCoreInstanceExtensionsImpl (deUint32 coreVersion, ::std::vector<const char*>&%s)" % (" dst" if len(instanceMap) != 0 else ""),
"{"] + writeExtensionsForVersions(instanceMap) + [
"}",
""] + removeVersionDefines(versionSet)
writeInlFile(filename, INL_HEADER, lines)
def writeCoreFunctionalities(api, filename):
functionOriginValues = ["FUNCTIONORIGIN_PLATFORM", "FUNCTIONORIGIN_INSTANCE", "FUNCTIONORIGIN_DEVICE"]
lines = addVersionDefines([Version((1, 0, 0)), Version((1, 1, 0))]) + [
"",
'enum FunctionOrigin', '{'] + [line for line in indentLines([
'\t' + functionOriginValues[0] + '\t= 0,',
'\t' + functionOriginValues[1] + ',',
'\t' + functionOriginValues[2]])] + [
"};",
"",
"typedef ::std::pair<const char*, FunctionOrigin> FunctionInfo;",
"typedef ::std::vector<FunctionInfo> FunctionInfosList;",
"typedef ::std::map<deUint32, FunctionInfosList> ApisMap;",
"",
"void initApisMap (ApisMap& apis)",
"{",
" apis.clear();",
" apis.insert(::std::pair<deUint32, FunctionInfosList>(" + str(Version((1, 0, 0))) + ", FunctionInfosList()));",
" apis.insert(::std::pair<deUint32, FunctionInfosList>(" + str(Version((1, 1, 0))) + ", FunctionInfosList()));",
""]
def list10Funcs ():
for fun in api.functions:
if fun.apiVersion == 'VK_VERSION_1_0':
insert = ' apis[' + str(Version((1, 0, 0))) + '].push_back(FunctionInfo("' + fun.name + '",\t' + functionOriginValues[fun.getType()] + '));'
yield insert
def listAllFuncs ():
for fun in api.extensions[0].functions:
insert = ' apis[' + str(Version((1, 1, 0))) + '].push_back(FunctionInfo("' + fun.name + '",\t' + functionOriginValues[fun.getType()] + '));'
yield insert
lines = lines + [line for line in indentLines(list10Funcs())]
lines.append("")
lines = lines + [line for line in indentLines(listAllFuncs())]
lines.append("}")
lines.append("")
lines = lines + removeVersionDefines([Version((1, 0, 0)), Version((1, 1, 0))])
writeInlFile(filename, INL_HEADER, lines)
if __name__ == "__main__":
src = readFile(VULKAN_H)
api = parseAPI(src)
platformFuncs = [Function.TYPE_PLATFORM]
instanceFuncs = [Function.TYPE_INSTANCE]
deviceFuncs = [Function.TYPE_DEVICE]
writeHandleType (api, os.path.join(VULKAN_DIR, "vkHandleType.inl"))
writeBasicTypes (api, os.path.join(VULKAN_DIR, "vkBasicTypes.inl"))
writeCompositeTypes (api, os.path.join(VULKAN_DIR, "vkStructTypes.inl"))
writeInterfaceDecl (api, os.path.join(VULKAN_DIR, "vkVirtualPlatformInterface.inl"), platformFuncs, False)
writeInterfaceDecl (api, os.path.join(VULKAN_DIR, "vkVirtualInstanceInterface.inl"), instanceFuncs, False)
writeInterfaceDecl (api, os.path.join(VULKAN_DIR, "vkVirtualDeviceInterface.inl"), deviceFuncs, False)
writeInterfaceDecl (api, os.path.join(VULKAN_DIR, "vkConcretePlatformInterface.inl"), platformFuncs, True)
writeInterfaceDecl (api, os.path.join(VULKAN_DIR, "vkConcreteInstanceInterface.inl"), instanceFuncs, True)
writeInterfaceDecl (api, os.path.join(VULKAN_DIR, "vkConcreteDeviceInterface.inl"), deviceFuncs, True)
writeFunctionPtrTypes (api, os.path.join(VULKAN_DIR, "vkFunctionPointerTypes.inl"))
writeFunctionPointers (api, os.path.join(VULKAN_DIR, "vkPlatformFunctionPointers.inl"), platformFuncs)
writeFunctionPointers (api, os.path.join(VULKAN_DIR, "vkInstanceFunctionPointers.inl"), instanceFuncs)
writeFunctionPointers (api, os.path.join(VULKAN_DIR, "vkDeviceFunctionPointers.inl"), deviceFuncs)
writeInitFunctionPointers (api, os.path.join(VULKAN_DIR, "vkInitPlatformFunctionPointers.inl"), platformFuncs, lambda f: f.name != "vkGetInstanceProcAddr")
writeInitFunctionPointers (api, os.path.join(VULKAN_DIR, "vkInitInstanceFunctionPointers.inl"), instanceFuncs)
writeInitFunctionPointers (api, os.path.join(VULKAN_DIR, "vkInitDeviceFunctionPointers.inl"), deviceFuncs)
writeFuncPtrInterfaceImpl (api, os.path.join(VULKAN_DIR, "vkPlatformDriverImpl.inl"), platformFuncs, "PlatformDriver")
writeFuncPtrInterfaceImpl (api, os.path.join(VULKAN_DIR, "vkInstanceDriverImpl.inl"), instanceFuncs, "InstanceDriver")
writeFuncPtrInterfaceImpl (api, os.path.join(VULKAN_DIR, "vkDeviceDriverImpl.inl"), deviceFuncs, "DeviceDriver")
writeStrUtilProto (api, os.path.join(VULKAN_DIR, "vkStrUtil.inl"))
writeStrUtilImpl (api, os.path.join(VULKAN_DIR, "vkStrUtilImpl.inl"))
writeRefUtilProto (api, os.path.join(VULKAN_DIR, "vkRefUtil.inl"))
writeRefUtilImpl (api, os.path.join(VULKAN_DIR, "vkRefUtilImpl.inl"))
writeStructTraitsImpl (api, os.path.join(VULKAN_DIR, "vkGetStructureTypeImpl.inl"))
writeNullDriverImpl (api, os.path.join(VULKAN_DIR, "vkNullDriverImpl.inl"))
writeTypeUtil (api, os.path.join(VULKAN_DIR, "vkTypeUtil.inl"))
writeSupportedExtenions (api, os.path.join(VULKAN_DIR, "vkSupportedExtensions.inl"))
writeCoreFunctionalities (api, os.path.join(VULKAN_DIR, "vkCoreFunctionalities.inl"))
| true
| true
|
790e7da15c19838fcf7d83854540fed3484ca0bd
| 12,476
|
py
|
Python
|
src/AsmvarVarScore/FeatureToScore2.py
|
bioinformatics-centre/AsmVar
|
5abd91a47feedfbd39b89ec3e2d6d20c02fe5a5e
|
[
"MIT"
] | 17
|
2015-12-25T10:58:03.000Z
|
2021-05-06T01:56:40.000Z
|
src/AsmvarVarScore/FeatureToScore2.py
|
bioinformatics-centre/AsmVar
|
5abd91a47feedfbd39b89ec3e2d6d20c02fe5a5e
|
[
"MIT"
] | 3
|
2017-07-20T22:12:16.000Z
|
2021-04-19T14:37:14.000Z
|
src/AsmvarVarScore/FeatureToScore2.py
|
bioinformatics-centre/AsmVar
|
5abd91a47feedfbd39b89ec3e2d6d20c02fe5a5e
|
[
"MIT"
] | 3
|
2018-01-26T02:03:04.000Z
|
2020-08-07T08:01:20.000Z
|
"""
========================================================
Statistic the SV Stat after AGE Process
========================================================
Author: Shujia Huang & Siyang Liu
Date : 2014-03-07 0idx:54:15
"""
import sys
import re
import os
import string
import numpy as np
import matplotlib.pyplot as plt
def DrawFig(figureFile, distance, properDepth, imProperDepth, nr, aa, bb, mscore, misprob, aveIden, inbCoe):
fig = plt.figure(num=None, figsize=(16, 30), facecolor='w', edgecolor='k')
title = ['Distance distribution', 'NRatio', 'Perfect Depth', 'Imperfect depth', '', '', '']
ylabel = ['The position of breakpoint', 'N Ratio of varints', \
'Perfect Depth', 'Both ImPerfect Depth', 'InbreedCoefficient', \
'Map score', 'Mismapping Probability' , 'Average Identity', \
'ProperReadDepth', 'ImProperReadDepth']
al = 0.5
for i, data in enumerate ([distance, nr, aa, bb, inbCoe, mscore, misprob, aveIden, properDepth, imProperDepth ]):
plt.subplot(10,2,2 * i + 1)
#plt.title(title[i], fontsize=16)
P = data[:,0] == 1; N = data[:,0] == 2; X = data[:,0] == 3
plt.scatter(data[:,1][N], data[:,2][N], marker='o', c = 'r', alpha=al, linewidths = 0.1, label = 'Negative(%d)'%len(data[:,1][N])) # Negative
plt.scatter(data[:,1][P], data[:,2][P], marker='o', c = 'g', alpha=al, linewidths = 0.1, label = 'Positive(%d)'%len(data[:,1][P])) # Positive
plt.scatter(data[:,1][X], data[:,2][X], marker='*', c = 'Y', alpha=al, linewidths = 0.1, label = 'Positive->Negative(%d)' % len(data[:,1][X])) # Positive->Negative
plt.legend(loc='upper right')
plt.xlim(-10, 50)
if i == 9: plt.xlabel('Score', fontsize=16)
plt.ylabel(ylabel[i], fontsize=16)
plt.subplot(10, 2, 2*i + 2)
NEW = data[:,0] == 0
good = data[:,1][NEW] >= VQ_CUTOFF
bad = data[:,1][NEW] < VQ_CUTOFF
plt.scatter(data[:,1][NEW][bad], data[:,2][NEW][bad], marker='o', c = 'm', alpha=al, linewidths = 0.1, label = 'bad(%d)' % len(data[:,1][NEW][bad])) # bad
plt.scatter(data[:,1][NEW][good], data[:,2][NEW][good], marker='o', c = 'b', alpha=al, linewidths = 0.1, label = 'good(%d)' % len(data[:,1][NEW][good])) # good
plt.xlim(-3, 30)
plt.legend(loc='upper right')
if i == 9: plt.xlabel('Score', fontsize=16)
fig.savefig(figureFile + '.png')
#fig.savefig(figureFile + '.pdf')
def DrawPhredScale (figureFile, phredScal):
fig = plt.figure()
ylabel = ['Phred Scale']
for i, data in enumerate ([phredScal ]):
plt.subplot(2, 1, 2 * i + 1)
P = data[:,0] == 1; N = data[:,0] == 2; X = data[:,0] == 3
plt.scatter(data[:,1][N], data[:,2][N], marker='o', c = 'r', alpha=0.5, linewidths = 0, label = 'Negative(%d)'%len(data[:,1][N])) # Negative
plt.scatter(data[:,1][P], data[:,2][P], marker='o', c = 'g', alpha=0.5, linewidths = 0, label = 'Positive(%d)'%len(data[:,1][P])) # Positive
plt.scatter(data[:,1][X], data[:,2][X], marker='o', c = 'Y', alpha=0.5, linewidths = 0, label = 'Positive->Negative(%d)' % len(data[:,1][X])) # Positive->Negative
plt.legend(loc='upper left')
plt.ylabel(ylabel[i], fontsize=16)
plt.subplot(2, 1, 2*i + 2)
NEW = data[:,0] == 0
good = data[:,1][NEW] >= VQ_CUTOFF
bad = data[:,1][NEW] < VQ_CUTOFF
plt.scatter(data[:,1][NEW][bad] , data[:,2][NEW][bad] , marker='o', c = 'm', alpha=0.5, linewidths = 0, label = 'bad(%d)' % len(data[:,1][NEW][bad])) # bad
plt.scatter(data[:,1][NEW][good], data[:,2][NEW][good], marker='o', c = 'b', alpha=0.5, linewidths = 0, label = 'good(%d)' % len(data[:,1][NEW][good])) # good
plt.legend(loc='upper left')
plt.xlabel('Score' , fontsize=16)
plt.ylabel(ylabel[i], fontsize=16)
fig.savefig(figureFile + '.png')
#fig.savefig(figureFile + '.pdf')
def Accum (data, isBig = False):
tmpD= data
k = sorted(tmpD.keys(), key = lambda d: float(d))
dat = []
for i in range(len(k)):
if isBig:
for j in range(i,len(k)): tmpD[k[i]][1] += tmpD[k[j]][0]
else:
for j in range(i+1): tmpD[k[i]][1] += tmpD[k[j]][0]
dat.append([float(k[i]), float(tmpD[k[i]][0]), float(tmpD[k[i]][1]) ])
return dat
def SampleFaLen (faLenFile):
if faLenFile[-3:] == '.gz': I = os.popen('gzip -dc %s' % faLenFile)
else : I = open(faLenFile)
data = {}
while 1:
lines = I.readlines (100000)
if not lines: break
for line in lines:
col = line.strip('\n').split()
data[col[0]] = string.atoi(col[1])
I.close()
return data
def LoadFaLen (faLenLstFile):
data = {}
I = open (faLenLstFile)
for line in I.readlines():
if len(line.strip('\n').split()) != 2: raise ValueError('[ERROR] The format of Fa length list maybe not right. It could just be: "sample FalenghtFile", but found',line)
sampleId, fileName = line.strip('\n').split()
if sampleId not in data: data[sampleId] = {}
data[sampleId] = SampleFaLen(fileName)
I.close()
return data
def main (argv):
qFaLen = LoadFaLen(argv[1])
figPrefix = 'test'
if len(argv) > 2: figPrefix = argv[2]
if argv[0][-3:] == '.gz':
I = os.popen('gzip -dc %s' % argv[0])
else:
I = open (argv[0])
s, annotations, mark = set(), [], []
print '#Chr\tPosition\tDistance\tLeftIden\tRightIden\tAveIden\tN-Ratio\tAA'
while 1: # VCF format
lines = I.readlines(100000)
if not lines: break
for line in lines:
col = line.strip('\n').split()
if re.search(r'^#CHROM', line): col2sam = { i+9:sam for i,sam in enumerate(col[9:]) }
if re.search(r'^#', line): continue
key = col[0] + ':' + col[1]
if key in s: continue
s.add(key)
#if re.search(r'^PASS', col[6]): continue
#if not re.search(r'_TRAIN_SITE', col[7]): continue
#if not re.search(r'^PASS', col[6]): continue
isbad = False
for i, sample in enumerate (col[9:]):
if re.search(r'NULL', sample): isbad = True
if isbad: continue
fmat = { k:i for i,k in enumerate(col[8].split(':')) }
if 'VS' not in fmat or 'QR' not in fmat: continue
if 'AGE' not in fmat: continue
if len(annotations) == 0: annotations = [[] for _ in col[9:] ]
vcfinfo = { d.split('=')[0]: d.split('=')[1] for d in col[7].split(';') if len(d.split('=')) == 2 }
vq = string.atof(vcfinfo['VQ'])
inb = string.atof(vcfinfo['InbCoeff'])
if ('POSITIVE_TRAIN_SITE' in col[7]) and ('NEGATIVE_TRAIN_SITE' in col[7]):
mark.append([3, vq, inb])
elif 'POSITIVE_TRAIN_SITE' in col[7]:
mark.append([1, vq, inb])
elif 'NEGATIVE_TRAIN_SITE' in col[7]:
mark.append([2, vq, inb])
else:
mark.append([0, vq, inb])
# GT:AA:AE:FN:MIP:MS:QR:RR:VS:VT
for i, sample in enumerate (col[9:]):
sampleId = col2sam[9+i]
field = sample.split(':')
if sample == './.' or len(field) < fmat['QR'] + 1 or field[fmat['QR']].split(',')[-1] == '.' or field[fmat['AS']] == '.':
annotations[i].append([0, 0, 0, 0, 0, 0, 0, 0, 0])
continue
qr = field[fmat['QR']].split(',')[-1]
qregion = np.array(qr.split('-'))
if len(qregion) > 3: qId = qregion[0] + '-' + qregion[1]
else : qId = qregion[0]
qSta = string.atoi(qregion[-2])
qEnd = string.atoi(qregion[-1])
if sampleId not in qFaLen:
raise ValueError ('[ERROR] The sample name $s(in vcf) is not in the name of Fa list.' % sampleId)
if qId not in qFaLen[sampleId]:
raise ValueError ('[ERROR]', qId, 'is not been found in file', opt.qFalen, '\n')
qSta= int(qSta * 100 / qFaLen[sampleId][qId] + 0.5)
qEnd= int(qEnd * 100 / qFaLen[sampleId][qId] + 0.5)
if qSta > 100 or qEnd > 100:
raise ValueError ('[ERROR] Query size Overflow! sample: %s; scaffold: %s' % (sampleId, qId))
leg = qSta
if 100 - qEnd < qSta: leg = qEnd
nn = string.atof(sample.split(':')[fmat['NR']])
n = round(1000 * nn) / 10.0 # N ratio
alt = string.atoi(sample.split(':')[fmat['AA']].split(',')[1]) # Alternate perfect
bot = string.atoi(sample.split(':')[fmat['AA']].split(',')[3]) # Both imperfect
pro, ipr = [0,0]
ms = string.atoi(sample.split(':')[fmat['AS']]) # Mapping score
mip = string.atof(sample.split(':')[fmat['MS']]) # Mismapping probability
if sample.split(':')[fmat['AGE']] != '.':
aveI = string.atoi(sample.split(':')[fmat['AGE']].split(',')[3]) # ave_iden in AGE
else:
aveI = 0
annotations[i].append([leg, n, alt, bot, pro, ipr, ms, mip, aveI])
I.close()
print >> sys.stderr, '# Number of Positions: %d' % len(mark)
if len(mark) != len(annotations[0]):
raise ValueError ('[ERROR] The size is not match mark=%d, annotations=%d!' % (len(mark), len(annotations)))
annotations = np.array(annotations);
sampleNum = len(annotations)
data, distance, properDepth, imProperDepth, nr, aa, bb, mscore, misprob, aveIden = [],[],[],[],[],[],[],[],[],[]
inbreedCoe, phredScal = [], []
for i in range(len(annotations[0])):
anno = np.array([annotations[s][i] for s in range(sampleNum) if len(annotations[s][i][annotations[s][i]!=0]) > 0 ]) # each person in the same position
score = np.array([annotations[s][i][-3] for s in range(sampleNum) if annotations[s][i][-3] > 0 ])
msprob = np.array([annotations[s][i][-2] for s in range(sampleNum) if annotations[s][i][-3] > 0 ])
phred = -10 * np.log10(1.0 - score.sum() / np.sum(score/(1.0 - msprob))) # Phred scale
if len(anno) == 0: continue
leg, n, alt, bot, pro,ipr, ms, mip, aveI = np.median(anno, axis=0)
distance.append ([mark[i][0], mark[i][1], leg ])
properDepth.append ([mark[i][0], mark[i][1], pro ])
imProperDepth.append ([mark[i][0], mark[i][1], ipr ])
nr.append ([mark[i][0], mark[i][1], n ])
aa.append ([mark[i][0], mark[i][1], alt ])
bb.append ([mark[i][0], mark[i][1], bot ])
mscore.append ([mark[i][0], mark[i][1], ms ])
misprob.append ([mark[i][0], mark[i][1], mip ])
aveIden.append ([mark[i][0], mark[i][1], aveI])
phredScal.append ([mark[i][0], mark[i][1], phred])
inbreedCoe.append ([mark[i][0], mark[i][1], mark[i][2]])
data.append([leg, alt, pro, ipr, n, bot])
print mark[i][0], mark[i][1], mark[i][2], '\t', leg, '\t', pro, '\t', ipr,'\t', n, '\t', alt, '\t', bot
data = np.array(data)
print >> sys.stderr, '\nPosition\tALTernatePerfect\tLeftIdentity\tRightIdentity\tAveIden\tNRatio\tBothImperfect'
print >> sys.stderr, 'Means: ', data.mean(axis=0), '\nstd : ', data.std(axis=0), '\nMedian: ', np.median(data, axis=0)
print >> sys.stderr, '25 Percentile:', np.percentile(data, 25,axis=0), '\n50 Percentile:', np.percentile(data, 50,axis=0), '\n75 Percentile:', np.percentile(data, 75,axis=0)
DrawFig(figPrefix, \
np.array (distance ), \
np.array (properDepth ), \
np.array (imProperDepth), \
np.array (nr ), \
np.array (aa ), \
np.array (bb ), \
np.array (mscore ), \
np.array (misprob ), \
np.array (aveIden ), \
np.array (inbreedCoe ) )
DrawPhredScale (figPrefix + '.phred', np.array(phredScal))
if __name__ == '__main__':
VQ_CUTOFF = 3.0
main(sys.argv[1:])
| 46.207407
| 177
| 0.511783
|
"""
========================================================
Statistic the SV Stat after AGE Process
========================================================
Author: Shujia Huang & Siyang Liu
Date : 2014-03-07 0idx:54:15
"""
import sys
import re
import os
import string
import numpy as np
import matplotlib.pyplot as plt
def DrawFig(figureFile, distance, properDepth, imProperDepth, nr, aa, bb, mscore, misprob, aveIden, inbCoe):
fig = plt.figure(num=None, figsize=(16, 30), facecolor='w', edgecolor='k')
title = ['Distance distribution', 'NRatio', 'Perfect Depth', 'Imperfect depth', '', '', '']
ylabel = ['The position of breakpoint', 'N Ratio of varints', \
'Perfect Depth', 'Both ImPerfect Depth', 'InbreedCoefficient', \
'Map score', 'Mismapping Probability' , 'Average Identity', \
'ProperReadDepth', 'ImProperReadDepth']
al = 0.5
for i, data in enumerate ([distance, nr, aa, bb, inbCoe, mscore, misprob, aveIden, properDepth, imProperDepth ]):
plt.subplot(10,2,2 * i + 1)
P = data[:,0] == 1; N = data[:,0] == 2; X = data[:,0] == 3
plt.scatter(data[:,1][N], data[:,2][N], marker='o', c = 'r', alpha=al, linewidths = 0.1, label = 'Negative(%d)'%len(data[:,1][N]))
plt.scatter(data[:,1][P], data[:,2][P], marker='o', c = 'g', alpha=al, linewidths = 0.1, label = 'Positive(%d)'%len(data[:,1][P]))
plt.scatter(data[:,1][X], data[:,2][X], marker='*', c = 'Y', alpha=al, linewidths = 0.1, label = 'Positive->Negative(%d)' % len(data[:,1][X]))
plt.legend(loc='upper right')
plt.xlim(-10, 50)
if i == 9: plt.xlabel('Score', fontsize=16)
plt.ylabel(ylabel[i], fontsize=16)
plt.subplot(10, 2, 2*i + 2)
NEW = data[:,0] == 0
good = data[:,1][NEW] >= VQ_CUTOFF
bad = data[:,1][NEW] < VQ_CUTOFF
plt.scatter(data[:,1][NEW][bad], data[:,2][NEW][bad], marker='o', c = 'm', alpha=al, linewidths = 0.1, label = 'bad(%d)' % len(data[:,1][NEW][bad]))
plt.scatter(data[:,1][NEW][good], data[:,2][NEW][good], marker='o', c = 'b', alpha=al, linewidths = 0.1, label = 'good(%d)' % len(data[:,1][NEW][good]))
plt.xlim(-3, 30)
plt.legend(loc='upper right')
if i == 9: plt.xlabel('Score', fontsize=16)
fig.savefig(figureFile + '.png')
def DrawPhredScale (figureFile, phredScal):
fig = plt.figure()
ylabel = ['Phred Scale']
for i, data in enumerate ([phredScal ]):
plt.subplot(2, 1, 2 * i + 1)
P = data[:,0] == 1; N = data[:,0] == 2; X = data[:,0] == 3
plt.scatter(data[:,1][N], data[:,2][N], marker='o', c = 'r', alpha=0.5, linewidths = 0, label = 'Negative(%d)'%len(data[:,1][N]))
plt.scatter(data[:,1][P], data[:,2][P], marker='o', c = 'g', alpha=0.5, linewidths = 0, label = 'Positive(%d)'%len(data[:,1][P]))
plt.scatter(data[:,1][X], data[:,2][X], marker='o', c = 'Y', alpha=0.5, linewidths = 0, label = 'Positive->Negative(%d)' % len(data[:,1][X]))
plt.legend(loc='upper left')
plt.ylabel(ylabel[i], fontsize=16)
plt.subplot(2, 1, 2*i + 2)
NEW = data[:,0] == 0
good = data[:,1][NEW] >= VQ_CUTOFF
bad = data[:,1][NEW] < VQ_CUTOFF
plt.scatter(data[:,1][NEW][bad] , data[:,2][NEW][bad] , marker='o', c = 'm', alpha=0.5, linewidths = 0, label = 'bad(%d)' % len(data[:,1][NEW][bad]))
plt.scatter(data[:,1][NEW][good], data[:,2][NEW][good], marker='o', c = 'b', alpha=0.5, linewidths = 0, label = 'good(%d)' % len(data[:,1][NEW][good]))
plt.legend(loc='upper left')
plt.xlabel('Score' , fontsize=16)
plt.ylabel(ylabel[i], fontsize=16)
fig.savefig(figureFile + '.png')
def Accum (data, isBig = False):
tmpD= data
k = sorted(tmpD.keys(), key = lambda d: float(d))
dat = []
for i in range(len(k)):
if isBig:
for j in range(i,len(k)): tmpD[k[i]][1] += tmpD[k[j]][0]
else:
for j in range(i+1): tmpD[k[i]][1] += tmpD[k[j]][0]
dat.append([float(k[i]), float(tmpD[k[i]][0]), float(tmpD[k[i]][1]) ])
return dat
def SampleFaLen (faLenFile):
if faLenFile[-3:] == '.gz': I = os.popen('gzip -dc %s' % faLenFile)
else : I = open(faLenFile)
data = {}
while 1:
lines = I.readlines (100000)
if not lines: break
for line in lines:
col = line.strip('\n').split()
data[col[0]] = string.atoi(col[1])
I.close()
return data
def LoadFaLen (faLenLstFile):
data = {}
I = open (faLenLstFile)
for line in I.readlines():
if len(line.strip('\n').split()) != 2: raise ValueError('[ERROR] The format of Fa length list maybe not right. It could just be: "sample FalenghtFile", but found',line)
sampleId, fileName = line.strip('\n').split()
if sampleId not in data: data[sampleId] = {}
data[sampleId] = SampleFaLen(fileName)
I.close()
return data
def main (argv):
qFaLen = LoadFaLen(argv[1])
figPrefix = 'test'
if len(argv) > 2: figPrefix = argv[2]
if argv[0][-3:] == '.gz':
I = os.popen('gzip -dc %s' % argv[0])
else:
I = open (argv[0])
s, annotations, mark = set(), [], []
print '#Chr\tPosition\tDistance\tLeftIden\tRightIden\tAveIden\tN-Ratio\tAA'
while 1:
lines = I.readlines(100000)
if not lines: break
for line in lines:
col = line.strip('\n').split()
if re.search(r'^#CHROM', line): col2sam = { i+9:sam for i,sam in enumerate(col[9:]) }
if re.search(r'^#', line): continue
key = col[0] + ':' + col[1]
if key in s: continue
s.add(key)
isbad = False
for i, sample in enumerate (col[9:]):
if re.search(r'NULL', sample): isbad = True
if isbad: continue
fmat = { k:i for i,k in enumerate(col[8].split(':')) }
if 'VS' not in fmat or 'QR' not in fmat: continue
if 'AGE' not in fmat: continue
if len(annotations) == 0: annotations = [[] for _ in col[9:] ]
vcfinfo = { d.split('=')[0]: d.split('=')[1] for d in col[7].split(';') if len(d.split('=')) == 2 }
vq = string.atof(vcfinfo['VQ'])
inb = string.atof(vcfinfo['InbCoeff'])
if ('POSITIVE_TRAIN_SITE' in col[7]) and ('NEGATIVE_TRAIN_SITE' in col[7]):
mark.append([3, vq, inb])
elif 'POSITIVE_TRAIN_SITE' in col[7]:
mark.append([1, vq, inb])
elif 'NEGATIVE_TRAIN_SITE' in col[7]:
mark.append([2, vq, inb])
else:
mark.append([0, vq, inb])
for i, sample in enumerate (col[9:]):
sampleId = col2sam[9+i]
field = sample.split(':')
if sample == './.' or len(field) < fmat['QR'] + 1 or field[fmat['QR']].split(',')[-1] == '.' or field[fmat['AS']] == '.':
annotations[i].append([0, 0, 0, 0, 0, 0, 0, 0, 0])
continue
qr = field[fmat['QR']].split(',')[-1]
qregion = np.array(qr.split('-'))
if len(qregion) > 3: qId = qregion[0] + '-' + qregion[1]
else : qId = qregion[0]
qSta = string.atoi(qregion[-2])
qEnd = string.atoi(qregion[-1])
if sampleId not in qFaLen:
raise ValueError ('[ERROR] The sample name $s(in vcf) is not in the name of Fa list.' % sampleId)
if qId not in qFaLen[sampleId]:
raise ValueError ('[ERROR]', qId, 'is not been found in file', opt.qFalen, '\n')
qSta= int(qSta * 100 / qFaLen[sampleId][qId] + 0.5)
qEnd= int(qEnd * 100 / qFaLen[sampleId][qId] + 0.5)
if qSta > 100 or qEnd > 100:
raise ValueError ('[ERROR] Query size Overflow! sample: %s; scaffold: %s' % (sampleId, qId))
leg = qSta
if 100 - qEnd < qSta: leg = qEnd
nn = string.atof(sample.split(':')[fmat['NR']])
n = round(1000 * nn) / 10.0
alt = string.atoi(sample.split(':')[fmat['AA']].split(',')[1])
bot = string.atoi(sample.split(':')[fmat['AA']].split(',')[3])
pro, ipr = [0,0]
ms = string.atoi(sample.split(':')[fmat['AS']])
mip = string.atof(sample.split(':')[fmat['MS']])
if sample.split(':')[fmat['AGE']] != '.':
aveI = string.atoi(sample.split(':')[fmat['AGE']].split(',')[3])
else:
aveI = 0
annotations[i].append([leg, n, alt, bot, pro, ipr, ms, mip, aveI])
I.close()
print >> sys.stderr, '# Number of Positions: %d' % len(mark)
if len(mark) != len(annotations[0]):
raise ValueError ('[ERROR] The size is not match mark=%d, annotations=%d!' % (len(mark), len(annotations)))
annotations = np.array(annotations);
sampleNum = len(annotations)
data, distance, properDepth, imProperDepth, nr, aa, bb, mscore, misprob, aveIden = [],[],[],[],[],[],[],[],[],[]
inbreedCoe, phredScal = [], []
for i in range(len(annotations[0])):
anno = np.array([annotations[s][i] for s in range(sampleNum) if len(annotations[s][i][annotations[s][i]!=0]) > 0 ])
score = np.array([annotations[s][i][-3] for s in range(sampleNum) if annotations[s][i][-3] > 0 ])
msprob = np.array([annotations[s][i][-2] for s in range(sampleNum) if annotations[s][i][-3] > 0 ])
phred = -10 * np.log10(1.0 - score.sum() / np.sum(score/(1.0 - msprob)))
if len(anno) == 0: continue
leg, n, alt, bot, pro,ipr, ms, mip, aveI = np.median(anno, axis=0)
distance.append ([mark[i][0], mark[i][1], leg ])
properDepth.append ([mark[i][0], mark[i][1], pro ])
imProperDepth.append ([mark[i][0], mark[i][1], ipr ])
nr.append ([mark[i][0], mark[i][1], n ])
aa.append ([mark[i][0], mark[i][1], alt ])
bb.append ([mark[i][0], mark[i][1], bot ])
mscore.append ([mark[i][0], mark[i][1], ms ])
misprob.append ([mark[i][0], mark[i][1], mip ])
aveIden.append ([mark[i][0], mark[i][1], aveI])
phredScal.append ([mark[i][0], mark[i][1], phred])
inbreedCoe.append ([mark[i][0], mark[i][1], mark[i][2]])
data.append([leg, alt, pro, ipr, n, bot])
print mark[i][0], mark[i][1], mark[i][2], '\t', leg, '\t', pro, '\t', ipr,'\t', n, '\t', alt, '\t', bot
data = np.array(data)
print >> sys.stderr, '\nPosition\tALTernatePerfect\tLeftIdentity\tRightIdentity\tAveIden\tNRatio\tBothImperfect'
print >> sys.stderr, 'Means: ', data.mean(axis=0), '\nstd : ', data.std(axis=0), '\nMedian: ', np.median(data, axis=0)
print >> sys.stderr, '25 Percentile:', np.percentile(data, 25,axis=0), '\n50 Percentile:', np.percentile(data, 50,axis=0), '\n75 Percentile:', np.percentile(data, 75,axis=0)
DrawFig(figPrefix, \
np.array (distance ), \
np.array (properDepth ), \
np.array (imProperDepth), \
np.array (nr ), \
np.array (aa ), \
np.array (bb ), \
np.array (mscore ), \
np.array (misprob ), \
np.array (aveIden ), \
np.array (inbreedCoe ) )
DrawPhredScale (figPrefix + '.phred', np.array(phredScal))
if __name__ == '__main__':
VQ_CUTOFF = 3.0
main(sys.argv[1:])
| false
| true
|
790e7e3cbbb549ed654db291347be7a238b3d730
| 374
|
py
|
Python
|
pkgs/spyder-2.3.8-py27_1/lib/python2.7/site-packages/spyderplugins/widgets/__init__.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
pkgs/spyder-2.3.8-py27_1/lib/python2.7/site-packages/spyderplugins/widgets/__init__.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
pkgs/spyder-2.3.8-py27_1/lib/python2.7/site-packages/spyderplugins/widgets/__init__.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2021-08-04T08:13:34.000Z
|
2021-08-04T08:13:34.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright © 2009-2010 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""
spyderlib.widgets
=================
Widgets defined in this module may be used in any other Qt-based application
They are also used in Spyder through the Plugin interface
(see spyderlib.plugins)
"""
| 23.375
| 77
| 0.679144
| true
| true
|
|
790e7e9cf0a7ae176e306f42356a271451e13a17
| 680
|
py
|
Python
|
ampel/test/test_T3SimpleDataLoader.py
|
mafn/Ampel-core
|
744acbf36f0a2ceae7230ceab1350236c1501b57
|
[
"BSD-3-Clause"
] | null | null | null |
ampel/test/test_T3SimpleDataLoader.py
|
mafn/Ampel-core
|
744acbf36f0a2ceae7230ceab1350236c1501b57
|
[
"BSD-3-Clause"
] | null | null | null |
ampel/test/test_T3SimpleDataLoader.py
|
mafn/Ampel-core
|
744acbf36f0a2ceae7230ceab1350236c1501b57
|
[
"BSD-3-Clause"
] | null | null | null |
from ampel.t3.supply.load.T3SimpleDataLoader import T3SimpleDataLoader
from ampel.core.AmpelContext import AmpelContext
def test_instantiate(core_config, patch_mongo, ampel_logger):
"""
AbsT3Loader understands all the aliases in the ampel-core config
"""
ctx = AmpelContext.load(core_config)
aliases = ctx.config.get("alias.t3", dict)
assert len(
directives := T3SimpleDataLoader(
context=ctx,
logger=ampel_logger,
directives=[k[1:] for k in aliases.keys()]
).directives
) == len(aliases)
for d, value in zip(directives, aliases.values()):
assert d.dict(exclude_defaults=True) == value
| 34
| 70
| 0.677941
|
from ampel.t3.supply.load.T3SimpleDataLoader import T3SimpleDataLoader
from ampel.core.AmpelContext import AmpelContext
def test_instantiate(core_config, patch_mongo, ampel_logger):
ctx = AmpelContext.load(core_config)
aliases = ctx.config.get("alias.t3", dict)
assert len(
directives := T3SimpleDataLoader(
context=ctx,
logger=ampel_logger,
directives=[k[1:] for k in aliases.keys()]
).directives
) == len(aliases)
for d, value in zip(directives, aliases.values()):
assert d.dict(exclude_defaults=True) == value
| true
| true
|
790e7ec6ba85fc5230215d834a4fe8ea2e783dfe
| 8,135
|
py
|
Python
|
info/utils/captcha/captcha.py
|
rymmx/My_information
|
70e7b8b294c19328c10d1335cacca8832d86931f
|
[
"MIT"
] | 1
|
2019-02-03T03:53:30.000Z
|
2019-02-03T03:53:30.000Z
|
info/utils/captcha/captcha.py
|
rymmx/My_information
|
70e7b8b294c19328c10d1335cacca8832d86931f
|
[
"MIT"
] | null | null | null |
info/utils/captcha/captcha.py
|
rymmx/My_information
|
70e7b8b294c19328c10d1335cacca8832d86931f
|
[
"MIT"
] | 1
|
2019-02-03T03:55:10.000Z
|
2019-02-03T03:55:10.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# refer to `https://bitbucket.org/akorn/wheezy.captcha`
import random
import string
import os.path
from io import BytesIO
from PIL import Image
from PIL import ImageFilter
from PIL.ImageDraw import Draw
from PIL.ImageFont import truetype
class Bezier:
def __init__(self):
self.tsequence = tuple([t / 20.0 for t in range(21)])
self.beziers = {}
def pascal_row(self, n):
""" Returns n-th row of Pascal's triangle
"""
result = [1]
x, numerator = 1, n
for denominator in range(1, n // 2 + 1):
x *= numerator
x /= denominator
result.append(x)
numerator -= 1
if n & 1 == 0:
result.extend(reversed(result[:-1]))
else:
result.extend(reversed(result))
return result
def make_bezier(self, n):
""" Bezier curves:
http://en.wikipedia.org/wiki/B%C3%A9zier_curve#Generalization
"""
try:
return self.beziers[n]
except KeyError:
combinations = self.pascal_row(n - 1)
result = []
for t in self.tsequence:
tpowers = (t ** i for i in range(n))
upowers = ((1 - t) ** i for i in range(n - 1, -1, -1))
coefs = [c * a * b for c, a, b in zip(combinations,
tpowers, upowers)]
result.append(coefs)
self.beziers[n] = result
return result
class Captcha(object):
def __init__(self):
self._bezier = Bezier()
self._dir = os.path.dirname(__file__)
# self._captcha_path = os.path.join(self._dir, '..', 'static', 'captcha')
@staticmethod
def instance():
if not hasattr(Captcha, "_instance"):
Captcha._instance = Captcha()
return Captcha._instance
def initialize(self, width=200, height=75, color=None, text=None, fonts=None):
# self.image = Image.new('RGB', (width, height), (255, 255, 255))
# 4位验证随机码
self._text = text if text else random.sample(string.ascii_uppercase + string.ascii_uppercase + '3456789', 4)
# print(self._text)
self.fonts = fonts if fonts else \
[os.path.join(self._dir, 'fonts', font) for font in ['Arial.ttf', 'Georgia.ttf', 'actionj.ttf']]
self.width = width
self.height = height
self._color = color if color else self.random_color(0, 200, random.randint(220, 255))
@staticmethod
def random_color(start, end, opacity=None):
red = random.randint(start, end)
green = random.randint(start, end)
blue = random.randint(start, end)
if opacity is None:
return red, green, blue
return red, green, blue, opacity
# draw image
def background(self, image):
Draw(image).rectangle([(0, 0), image.size], fill=self.random_color(238, 255))
return image
@staticmethod
def smooth(image):
return image.filter(ImageFilter.SMOOTH)
def curve(self, image, width=4, number=6, color=None):
dx, height = image.size
dx /= number
path = [(dx * i, random.randint(0, height))
for i in range(1, number)]
bcoefs = self._bezier.make_bezier(number - 1)
points = []
for coefs in bcoefs:
points.append(tuple(sum([coef * p for coef, p in zip(coefs, ps)])
for ps in zip(*path)))
Draw(image).line(points, fill=color if color else self._color, width=width)
return image
def noise(self, image, number=50, level=2, color=None):
width, height = image.size
dx = width / 10
width -= dx
dy = height / 10
height -= dy
draw = Draw(image)
for i in range(number):
x = int(random.uniform(dx, width))
y = int(random.uniform(dy, height))
draw.line(((x, y), (x + level, y)), fill=color if color else self._color, width=level)
return image
def text(self, image, fonts, font_sizes=None, drawings=None, squeeze_factor=0.75, color=None):
color = color if color else self._color
fonts = tuple([truetype(name, size)
for name in fonts
for size in font_sizes or (65, 70, 75)])
draw = Draw(image)
char_images = []
for c in self._text:
font = random.choice(fonts)
c_width, c_height = draw.textsize(c, font=font)
char_image = Image.new('RGB', (c_width, c_height), (0, 0, 0))
char_draw = Draw(char_image)
char_draw.text((0, 0), c, font=font, fill=color)
char_image = char_image.crop(char_image.getbbox())
for drawing in drawings:
d = getattr(self, drawing)
char_image = d(char_image)
char_images.append(char_image)
width, height = image.size
offset = int((width - sum(int(i.size[0] * squeeze_factor)
for i in char_images[:-1]) -
char_images[-1].size[0]) / 2)
for char_image in char_images:
c_width, c_height = char_image.size
mask = char_image.convert('L').point(lambda i: i * 1.97)
image.paste(char_image,
(offset, int((height - c_height) / 2)),
mask)
offset += int(c_width * squeeze_factor)
return image
# draw text
@staticmethod
def warp(image, dx_factor=0.27, dy_factor=0.21):
width, height = image.size
dx = width * dx_factor
dy = height * dy_factor
x1 = int(random.uniform(-dx, dx))
y1 = int(random.uniform(-dy, dy))
x2 = int(random.uniform(-dx, dx))
y2 = int(random.uniform(-dy, dy))
image2 = Image.new('RGB',
(width + abs(x1) + abs(x2),
height + abs(y1) + abs(y2)))
image2.paste(image, (abs(x1), abs(y1)))
width2, height2 = image2.size
return image2.transform(
(width, height), Image.QUAD,
(x1, y1,
-x1, height2 - y2,
width2 + x2, height2 + y2,
width2 - x2, -y1))
@staticmethod
def offset(image, dx_factor=0.1, dy_factor=0.2):
width, height = image.size
dx = int(random.random() * width * dx_factor)
dy = int(random.random() * height * dy_factor)
image2 = Image.new('RGB', (width + dx, height + dy))
image2.paste(image, (dx, dy))
return image2
@staticmethod
def rotate(image, angle=25):
return image.rotate(
random.uniform(-angle, angle), Image.BILINEAR, expand=1)
def captcha(self, path=None, fmt='JPEG'):
"""Create a captcha.
Args:
path: save path, default None.
fmt: image format, PNG / JPEG.
Returns:
A tuple, (name, text, StringIO.value).
For example:
('fXZJN4AFxHGoU5mIlcsdOypa', 'JGW9', '\x89PNG\r\n\x1a\n\x00\x00\x00\r...')
"""
image = Image.new('RGB', (self.width, self.height), (255, 255, 255))
image = self.background(image)
image = self.text(image, self.fonts, drawings=['warp', 'rotate', 'offset'])
image = self.curve(image)
image = self.noise(image)
image = self.smooth(image)
name = "".join(random.sample(string.ascii_lowercase + string.ascii_uppercase + '3456789', 24))
text = "".join(self._text)
out = BytesIO()
image.save(out, format=fmt)
if path:
image.save(os.path.join(path, name), fmt)
return name, text, out.getvalue()
def generate_captcha(self):
self.initialize()
return self.captcha("")
# 全局变量。使用就调用generate_captcha()即可
captcha = Captcha.instance()
if __name__ == '__main__':
x = captcha.generate_captcha()
y = "%s.jpg" % x[1]
print(x)
with open(y,"wb") as f:
f.write(x[2])
| 34.764957
| 116
| 0.547019
|
import random
import string
import os.path
from io import BytesIO
from PIL import Image
from PIL import ImageFilter
from PIL.ImageDraw import Draw
from PIL.ImageFont import truetype
class Bezier:
def __init__(self):
self.tsequence = tuple([t / 20.0 for t in range(21)])
self.beziers = {}
def pascal_row(self, n):
result = [1]
x, numerator = 1, n
for denominator in range(1, n // 2 + 1):
x *= numerator
x /= denominator
result.append(x)
numerator -= 1
if n & 1 == 0:
result.extend(reversed(result[:-1]))
else:
result.extend(reversed(result))
return result
def make_bezier(self, n):
try:
return self.beziers[n]
except KeyError:
combinations = self.pascal_row(n - 1)
result = []
for t in self.tsequence:
tpowers = (t ** i for i in range(n))
upowers = ((1 - t) ** i for i in range(n - 1, -1, -1))
coefs = [c * a * b for c, a, b in zip(combinations,
tpowers, upowers)]
result.append(coefs)
self.beziers[n] = result
return result
class Captcha(object):
def __init__(self):
self._bezier = Bezier()
self._dir = os.path.dirname(__file__)
@staticmethod
def instance():
if not hasattr(Captcha, "_instance"):
Captcha._instance = Captcha()
return Captcha._instance
def initialize(self, width=200, height=75, color=None, text=None, fonts=None):
self._text = text if text else random.sample(string.ascii_uppercase + string.ascii_uppercase + '3456789', 4)
self.fonts = fonts if fonts else \
[os.path.join(self._dir, 'fonts', font) for font in ['Arial.ttf', 'Georgia.ttf', 'actionj.ttf']]
self.width = width
self.height = height
self._color = color if color else self.random_color(0, 200, random.randint(220, 255))
@staticmethod
def random_color(start, end, opacity=None):
red = random.randint(start, end)
green = random.randint(start, end)
blue = random.randint(start, end)
if opacity is None:
return red, green, blue
return red, green, blue, opacity
def background(self, image):
Draw(image).rectangle([(0, 0), image.size], fill=self.random_color(238, 255))
return image
@staticmethod
def smooth(image):
return image.filter(ImageFilter.SMOOTH)
def curve(self, image, width=4, number=6, color=None):
dx, height = image.size
dx /= number
path = [(dx * i, random.randint(0, height))
for i in range(1, number)]
bcoefs = self._bezier.make_bezier(number - 1)
points = []
for coefs in bcoefs:
points.append(tuple(sum([coef * p for coef, p in zip(coefs, ps)])
for ps in zip(*path)))
Draw(image).line(points, fill=color if color else self._color, width=width)
return image
def noise(self, image, number=50, level=2, color=None):
width, height = image.size
dx = width / 10
width -= dx
dy = height / 10
height -= dy
draw = Draw(image)
for i in range(number):
x = int(random.uniform(dx, width))
y = int(random.uniform(dy, height))
draw.line(((x, y), (x + level, y)), fill=color if color else self._color, width=level)
return image
def text(self, image, fonts, font_sizes=None, drawings=None, squeeze_factor=0.75, color=None):
color = color if color else self._color
fonts = tuple([truetype(name, size)
for name in fonts
for size in font_sizes or (65, 70, 75)])
draw = Draw(image)
char_images = []
for c in self._text:
font = random.choice(fonts)
c_width, c_height = draw.textsize(c, font=font)
char_image = Image.new('RGB', (c_width, c_height), (0, 0, 0))
char_draw = Draw(char_image)
char_draw.text((0, 0), c, font=font, fill=color)
char_image = char_image.crop(char_image.getbbox())
for drawing in drawings:
d = getattr(self, drawing)
char_image = d(char_image)
char_images.append(char_image)
width, height = image.size
offset = int((width - sum(int(i.size[0] * squeeze_factor)
for i in char_images[:-1]) -
char_images[-1].size[0]) / 2)
for char_image in char_images:
c_width, c_height = char_image.size
mask = char_image.convert('L').point(lambda i: i * 1.97)
image.paste(char_image,
(offset, int((height - c_height) / 2)),
mask)
offset += int(c_width * squeeze_factor)
return image
@staticmethod
def warp(image, dx_factor=0.27, dy_factor=0.21):
width, height = image.size
dx = width * dx_factor
dy = height * dy_factor
x1 = int(random.uniform(-dx, dx))
y1 = int(random.uniform(-dy, dy))
x2 = int(random.uniform(-dx, dx))
y2 = int(random.uniform(-dy, dy))
image2 = Image.new('RGB',
(width + abs(x1) + abs(x2),
height + abs(y1) + abs(y2)))
image2.paste(image, (abs(x1), abs(y1)))
width2, height2 = image2.size
return image2.transform(
(width, height), Image.QUAD,
(x1, y1,
-x1, height2 - y2,
width2 + x2, height2 + y2,
width2 - x2, -y1))
@staticmethod
def offset(image, dx_factor=0.1, dy_factor=0.2):
width, height = image.size
dx = int(random.random() * width * dx_factor)
dy = int(random.random() * height * dy_factor)
image2 = Image.new('RGB', (width + dx, height + dy))
image2.paste(image, (dx, dy))
return image2
@staticmethod
def rotate(image, angle=25):
return image.rotate(
random.uniform(-angle, angle), Image.BILINEAR, expand=1)
def captcha(self, path=None, fmt='JPEG'):
image = Image.new('RGB', (self.width, self.height), (255, 255, 255))
image = self.background(image)
image = self.text(image, self.fonts, drawings=['warp', 'rotate', 'offset'])
image = self.curve(image)
image = self.noise(image)
image = self.smooth(image)
name = "".join(random.sample(string.ascii_lowercase + string.ascii_uppercase + '3456789', 24))
text = "".join(self._text)
out = BytesIO()
image.save(out, format=fmt)
if path:
image.save(os.path.join(path, name), fmt)
return name, text, out.getvalue()
def generate_captcha(self):
self.initialize()
return self.captcha("")
captcha = Captcha.instance()
if __name__ == '__main__':
x = captcha.generate_captcha()
y = "%s.jpg" % x[1]
print(x)
with open(y,"wb") as f:
f.write(x[2])
| true
| true
|
790e7f140f004009c0d7b94b4a17b6bb1d019f53
| 4,382
|
py
|
Python
|
factern_client/com/factern/model/create_entity_response.py
|
Factern/factern-client-python
|
2453dbf0d683417142fe98514ef6de2742f14f92
|
[
"MIT"
] | null | null | null |
factern_client/com/factern/model/create_entity_response.py
|
Factern/factern-client-python
|
2453dbf0d683417142fe98514ef6de2742f14f92
|
[
"MIT"
] | null | null | null |
factern_client/com/factern/model/create_entity_response.py
|
Factern/factern-client-python
|
2453dbf0d683417142fe98514ef6de2742f14f92
|
[
"MIT"
] | 2
|
2018-07-20T15:02:06.000Z
|
2018-08-01T20:38:38.000Z
|
# coding: utf-8
"""
Factern API
"""
import pprint
import re # noqa: F401
import six
import importlib
parent_name = "BaseResponse"
def get_parent():
# Lazy importing of parent means that loading the classes happens
# in the correct order.
if get_parent.cache is None:
parent_fname = "factern_client.com.factern.model.%s" % re.sub("([a-z])([A-Z])", "\\1_\\2", "BaseResponse").lower()
parent = importlib.import_module(parent_fname).BaseResponse
get_parent.cache = parent
return get_parent.cache
get_parent.cache = None
class CreateEntityResponse(get_parent()):
@staticmethod
def get_parent():
return get_parent()
@staticmethod
def compute_parent_updates():
pass
get_parent().compute_parent_updates()
CreateEntityResponse.swagger_types.update(get_parent().swagger_types)
CreateEntityResponse.attribute_map.update(get_parent().attribute_map)
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'description': 'str',
'name': 'str'
}
attribute_map = {
'description': 'description',
'name': 'name'
}
def __init__(self, **kwargs): # noqa: E501
"""CreateEntityResponse - a model defined in Swagger""" # noqa: E501
self.compute_parent_updates()
for k in kwargs:
if k not in self.swagger_types:
raise ValueError("CreateEntityResponse got unexpected argument '%s'" % k)
get_parent().__init__(self, **kwargs)
self._description = None
self._name = None
if "description" in kwargs:
self.description = kwargs["description"]
if "name" in kwargs:
self.name = kwargs["name"]
@property
def description(self):
"""Gets the description of this CreateEntityResponse. # noqa: E501
:return: The description of this CreateEntityResponse. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this CreateEntityResponse.
:param description: The description of this CreateEntityResponse. # noqa: E501
:type: str
"""
self._description = description
@property
def name(self):
"""Gets the name of this CreateEntityResponse. # noqa: E501
:return: The name of this CreateEntityResponse. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CreateEntityResponse.
:param name: The name of this CreateEntityResponse. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateEntityResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.883436
| 122
| 0.581698
|
import pprint
import re
import six
import importlib
parent_name = "BaseResponse"
def get_parent():
if get_parent.cache is None:
parent_fname = "factern_client.com.factern.model.%s" % re.sub("([a-z])([A-Z])", "\\1_\\2", "BaseResponse").lower()
parent = importlib.import_module(parent_fname).BaseResponse
get_parent.cache = parent
return get_parent.cache
get_parent.cache = None
class CreateEntityResponse(get_parent()):
@staticmethod
def get_parent():
return get_parent()
@staticmethod
def compute_parent_updates():
pass
get_parent().compute_parent_updates()
CreateEntityResponse.swagger_types.update(get_parent().swagger_types)
CreateEntityResponse.attribute_map.update(get_parent().attribute_map)
swagger_types = {
'description': 'str',
'name': 'str'
}
attribute_map = {
'description': 'description',
'name': 'name'
}
def __init__(self, **kwargs):
self.compute_parent_updates()
for k in kwargs:
if k not in self.swagger_types:
raise ValueError("CreateEntityResponse got unexpected argument '%s'" % k)
get_parent().__init__(self, **kwargs)
self._description = None
self._name = None
if "description" in kwargs:
self.description = kwargs["description"]
if "name" in kwargs:
self.name = kwargs["name"]
@property
def description(self):
return self._description
@description.setter
def description(self, description):
self._description = description
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, CreateEntityResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
790e7f4d6d28c11ac979b72d02dfc112ce6de065
| 39,647
|
py
|
Python
|
main.py
|
DeniseMak/ner-neuron
|
d7ca8a2b1f5652b42892b4bda9b07a2e4edd09db
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
DeniseMak/ner-neuron
|
d7ca8a2b1f5652b42892b4bda9b07a2e4edd09db
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
DeniseMak/ner-neuron
|
d7ca8a2b1f5652b42892b4bda9b07a2e4edd09db
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Author: Jie
# @Date: 2017-06-15 14:11:08
# @Last Modified by: Jie Yang, Contact: jieynlp@gmail.com
# @Last Modified time: 2019-02-13 12:41:44
from __future__ import print_function
import time
import sys
import argparse
import random
import torch
import gc
import torch.nn as nn
import torch.optim as optim
import numpy as np
from utils.metric import get_ner_fmeasure
from model.seqlabel import SeqLabel
from model.sentclassifier import SentClassifier
from utils.data import Data
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
try:
import cPickle as pickle
except ImportError:
import pickle
DEFAULT_TRAINED_FILE = 'test_data/lstmtestglove50.9.model'
seed_num = 46
random.seed(seed_num)
torch.manual_seed(seed_num)
np.random.seed(seed_num)
def importance_matrix(sensitivities, data,
print_imp=True, show_table=True, tag_to_ablate=None):
'''
Builds a matrix of tag sensitivities
:param sensitivities: This is a matrix of [num_tags, num_neurons],
which is [10 x 50] in our experimental configuration.
:return:
'''
important_lists = []
important_nps = np.zeros(50, dtype=int)
sensitivities = sensitivities[1:] # omit padding tag
for i in range(len(sensitivities)):
important_list = []
important_np = np.zeros(50, dtype=int)
tag_sensitivity_row = sensitivities[i]
for j in range(len(tag_sensitivity_row)):
most_important = np.argmax(tag_sensitivity_row)
important_list.append(most_important)
important_np[j] = most_important
index = [most_important]
tag_sensitivity_row[most_important] = np.NINF
important_lists.append(important_list)
important_nps = np.vstack((important_nps, important_np))
important_nps = np.delete(important_nps, 0, axis=0) # delete padding tag
np.save("imps.npy",important_nps) # save importance rows for other scripts to use
important_nps = np.transpose(important_nps)
if show_table:
sns.set()
# Smaller than normal fonts
sns.set(font_scale=0.5)
x_tick = [data.label_alphabet.get_instance(tag) for tag in sorted(data.tag_counts)]
del(x_tick[0])
ax = sns.heatmap(important_nps, annot=True, xticklabels=x_tick,
cmap=ListedColormap(['white']), cbar=False, yticklabels=False,
linecolor='gray', linewidths=0.4)
title = "Importance rankings of neurons per tag"
plt.title(title, fontsize=18)
ttl = ax.title
ttl.set_position([0.5, 1.05])
plt.show()
def trim_model_dir(model_dir):
model_dir = model_dir.replace('/','-')
return model_dir
ax.figure.savefig("ImportanceRankings-{}.png".format(trim_model_dir(data.model_dir)))
if print_imp:
imp_file = open("Importance-{}.txt".format(trim_model_dir(data.model_dir)), "w+")
print('Neuron importance ranking for each NER tag:')
for i, l in enumerate(important_lists):
tags = [data.label_alphabet.get_instance(tag) for tag in sorted(data.tag_counts)]
del(tags[0]) # remove PAD tag
print ("\t{}\t{}".format(tags[i], l))
imp_file.write("{}\t{}\n".format(tags[i], l))
imp_file.write("\n")
np.savetxt("Importance-{}.tsv".format(trim_model_dir(data.model_dir)),
important_nps, fmt='%2.0d', delimiter='\t')
return important_nps
def heatmap_sensitivity(sensitivities,
modelname=DEFAULT_TRAINED_FILE,
testname="",
show_pad=False,
show_vals=True,
disable=False):
'''
Shows a heatmap for the sensitivity values, saves the heatmap to a PNG file,
and also saves the sensitivity matrix to an .npy file,
which we use for calculating correlations between models later.
:param sensitivities: This is a matrix of [num_tags, num_neurons],
which is [10 x 50] in our experimental configuration.
:param disable: disable is just to turn off for debugging
:return:
'''
# transpose to match chart in Figure 7. of paper
sensitivities = np.transpose(sensitivities)
# column 0 is the padding tag
start = 1
if show_pad:
start = 0
sensitivities = sensitivities[0:50, start:10]
sns.set()
# Smaller than normal fonts
sns.set(font_scale=0.5)
x_tick = [data.label_alphabet.get_instance(tag) for tag in sorted(data.tag_counts)]
if show_pad: x_tick[0] = 'PAD'
else: del(x_tick[0])
# change tags' order to use in downstream correlation diagrams
sensitivities_temp = np.zeros((50, 9))
x_tick_output = ['B-PER', 'I-PER', 'B-LOC', 'I-LOC', 'B-ORG', 'I-ORG', 'B-MISC', 'I-MISC', 'O']
for i in range(len(x_tick_output)):
sensitivities_temp[:, i] = sensitivities[:, x_tick.index(x_tick_output[i])]
np.save(modelname+'_sensitivities.npy', sensitivities_temp)
# put sensititivites in heat map
if not disable:
ax = sns.heatmap(sensitivities, xticklabels=x_tick, annot=show_vals, fmt=".2g")
title = "({}): ".format(testname) + modelname
plt.title(title, fontsize=18)
ttl = ax.title
ttl.set_position([0.5, 1.05])
plt.show()
ax.figure.savefig(modelname+"_heatmap.png")
def get_sensitivity_matrix(label, debug=True):
'''
Given a tag like 4: (B-PER), return the sensitivity matrix
:param label:
:return:
'''
avg_for_label = data.tag_contributions[label]/data.tag_counts[label]
sum_other_counts = 0
# data.tag_contributions[0] is for the padding label and can be ignored
sum_other_contributions = np.zeros((10, 50))
for l in data.tag_counts:
if l != label and l != 0: # if l != label: (to consider the padding label which is 0)
sum_other_counts += data.tag_counts[l]
sum_other_contributions += data.tag_contributions[l]
avg_for_others = sum_other_contributions/sum_other_counts
s_ij = avg_for_label - avg_for_others
s_ij_label = s_ij[label]
return s_ij_label # was return s_ij
def data_initialization(data):
data.initial_feature_alphabets()
data.build_alphabet(data.train_dir)
data.build_alphabet(data.dev_dir)
data.build_alphabet(data.test_dir)
data.fix_alphabet()
def predict_check(pred_variable, gold_variable, mask_variable, sentence_classification=False):
"""
input:
pred_variable (batch_size, sent_len): pred tag result, in numpy format
gold_variable (batch_size, sent_len): gold result variable
mask_variable (batch_size, sent_len): mask variable
"""
pred = pred_variable.cpu().data.numpy()
gold = gold_variable.cpu().data.numpy()
mask = mask_variable.cpu().data.numpy()
overlaped = (pred == gold)
if sentence_classification:
# print(overlaped)
# print(overlaped*pred)
right_token = np.sum(overlaped)
total_token = overlaped.shape[0] ## =batch_size
else:
right_token = np.sum(overlaped * mask)
total_token = mask.sum()
# print("right: %s, total: %s"%(right_token, total_token))
return right_token, total_token
def recover_label(pred_variable, gold_variable, mask_variable, label_alphabet, word_recover, sentence_classification=False):
"""
input:
pred_variable (batch_size, sent_len): pred tag result
gold_variable (batch_size, sent_len): gold result variable
mask_variable (batch_size, sent_len): mask variable
"""
pred_variable = pred_variable[word_recover]
# print("reordered labels: {}".format(pred_variable))
gold_variable = gold_variable[word_recover]
mask_variable = mask_variable[word_recover]
batch_size = gold_variable.size(0)
if sentence_classification:
pred_tag = pred_variable.cpu().data.numpy().tolist()
gold_tag = gold_variable.cpu().data.numpy().tolist()
pred_label = [label_alphabet.get_instance(pred) for pred in pred_tag]
gold_label = [label_alphabet.get_instance(gold) for gold in gold_tag]
else:
seq_len = gold_variable.size(1)
mask = mask_variable.cpu().data.numpy()
pred_tag = pred_variable.cpu().data.numpy()
gold_tag = gold_variable.cpu().data.numpy()
batch_size = mask.shape[0]
pred_label = []
gold_label = []
for idx in range(batch_size):
pred = [label_alphabet.get_instance(pred_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0]
gold = [label_alphabet.get_instance(gold_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0]
assert(len(pred)==len(gold))
pred_label.append(pred)
gold_label.append(gold)
return pred_label, gold_label
def recover_nbest_label(pred_variable, mask_variable, label_alphabet, word_recover):
"""
input:
pred_variable (batch_size, sent_len, nbest): pred tag result
mask_variable (batch_size, sent_len): mask variable
word_recover (batch_size)
output:
nbest_pred_label list: [batch_size, nbest, each_seq_len]
"""
# exit(0)
pred_variable = pred_variable[word_recover]
mask_variable = mask_variable[word_recover]
batch_size = pred_variable.size(0)
seq_len = pred_variable.size(1)
nbest = pred_variable.size(2)
mask = mask_variable.cpu().data.numpy()
pred_tag = pred_variable.cpu().data.numpy()
batch_size = mask.shape[0]
pred_label = []
for idx in range(batch_size):
pred = []
for idz in range(nbest):
each_pred = [label_alphabet.get_instance(pred_tag[idx][idy][idz]) for idy in range(seq_len) if mask[idx][idy] != 0]
pred.append(each_pred)
pred_label.append(pred)
return pred_label
def lr_decay(optimizer, epoch, decay_rate, init_lr):
lr = init_lr/(1+decay_rate*epoch)
print(" Learning rate is set as:", lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def evaluate(data, model, name, nbest=None, print_tag_counts=False, tag_to_ablate=None):
'''
:param data:
:param model:
:param name:
:param nbest:
:param print_tag_counts:
:param tag_to_ablate: if this is set to a tag name, like 'B-ORG', then in the LSTM layer's forward() we ablate the
number of neurons specified by data.ablate_num
:return:
'''
ablate_list_for_tag = None
if tag_to_ablate:
data.ablate_tag = tag_to_ablate
ablate_list_for_tag = data.ablate_list[tag_to_ablate]
print("\nEVALUATE file: {}, set={}, \n\t ablate_num={} tag: {} \nablate_list_for_tag={}".format(
data.model_dir, name, data.current_ablate_ind, tag_to_ablate, ablate_list_for_tag))
if name == "train":
instances = data.train_Ids
elif name == "dev":
instances = data.dev_Ids
elif name == 'test':
instances = data.test_Ids
elif name == 'raw':
instances = data.raw_Ids
else:
print("Error: wrong evaluate name,", name)
exit(1)
right_token = 0
whole_token = 0
nbest_pred_results = []
pred_scores = []
pred_results = []
gold_results = []
## set model in eval model
model.eval()
''' Get count of model parameters '''
# print("COUNT PARAMETERS: {}".format(count_parameters(model)))
batch_size = data.HP_batch_size
start_time = time.time()
train_num = len(instances)
total_batch = train_num//batch_size+1
for batch_id in range(total_batch):
start = batch_id*batch_size
end = (batch_id+1)*batch_size
if end > train_num:
end = train_num
instance = instances[start:end]
if not instance:
continue
batch_word, batch_features, batch_wordlen, batch_wordrecover, batch_char, batch_charlen, batch_charrecover, batch_label, mask = batchify_with_label(instance, data.HP_gpu, False, data.sentence_classification)
if nbest and not data.sentence_classification:
scores, nbest_tag_seq = model.decode_nbest(batch_word,batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, mask, nbest)
nbest_pred_result = recover_nbest_label(nbest_tag_seq, mask, data.label_alphabet, batch_wordrecover)
nbest_pred_results += nbest_pred_result
pred_scores += scores[batch_wordrecover].cpu().data.numpy().tolist()
## select the best sequence to evalurate
tag_seq = nbest_tag_seq[:,:,0]
else:
tag_seq = model(batch_word, batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, mask)
pred_label, gold_label = recover_label(tag_seq, batch_label, mask, data.label_alphabet, batch_wordrecover, data.sentence_classification)
pred_results += pred_label
gold_results += gold_label
decode_time = time.time() - start_time
speed = len(instances)/decode_time
acc, p, r, f = get_ner_fmeasure(gold_results, pred_results, data.tagScheme, data=data)
if nbest and not data.sentence_classification:
return speed, acc, p, r, f, nbest_pred_results, pred_scores
''' Get per-tag sensitivity '''
## print("TOTAL BATCH ITERATIONS: {}".format(data.iteration))
sensitivity_matrices = [] # This will hold a row for each tag's sensitivity
for tag in sorted(data.tag_counts):
if print_tag_counts:
if tag == 0:
print("Padding {}: {} instances.".format('0', data.tag_counts[tag]))
else:
print("Tag {}: {} instances.".format(data.label_alphabet.get_instance(tag), data.tag_counts[tag]))
sensitivity_tag = get_sensitivity_matrix(tag)
sensitivity_matrices.append(sensitivity_tag)
sensitivity_combined = np.squeeze(np.stack([sensitivity_matrices]))
# TODO: the following line would stack multiple models' sensitivity,
# but we don't need it unless running many different models for stats
# data.sensitivity_matrices_combined.append(sensitivity_combined)
return speed, acc, p, r, f, pred_results, pred_scores, sensitivity_combined
def batchify_with_label(input_batch_list, gpu, if_train=True, sentence_classification=False):
if sentence_classification:
return batchify_sentence_classification_with_label(input_batch_list, gpu, if_train)
else:
return batchify_sequence_labeling_with_label(input_batch_list, gpu, if_train)
def batchify_sequence_labeling_with_label(input_batch_list, gpu, if_train=True):
"""
input: list of words, chars and labels, various length. [[words, features, chars, labels],[words, features, chars,labels],...]
words: word ids for one sentence. (batch_size, sent_len)
features: features ids for one sentence. (batch_size, sent_len, feature_num)
chars: char ids for on sentences, various length. (batch_size, sent_len, each_word_length)
labels: label ids for one sentence. (batch_size, sent_len)
output:
zero padding for word and char, with their batch length
word_seq_tensor: (batch_size, max_sent_len) Variable
feature_seq_tensors: [(batch_size, max_sent_len),...] list of Variable
word_seq_lengths: (batch_size,1) Tensor
char_seq_tensor: (batch_size*max_sent_len, max_word_len) Variable
char_seq_lengths: (batch_size*max_sent_len,1) Tensor
char_seq_recover: (batch_size*max_sent_len,1) recover char sequence order
label_seq_tensor: (batch_size, max_sent_len)
mask: (batch_size, max_sent_len)
"""
batch_size = len(input_batch_list)
words = [sent[0] for sent in input_batch_list]
features = [np.asarray(sent[1]) for sent in input_batch_list]
feature_num = len(features[0][0])
chars = [sent[2] for sent in input_batch_list]
labels = [sent[3] for sent in input_batch_list]
word_seq_lengths = torch.LongTensor(list(map(len, words)))
max_seq_len = word_seq_lengths.max().item()
word_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).long()
label_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).long()
feature_seq_tensors = []
for idx in range(feature_num):
feature_seq_tensors.append(torch.zeros((batch_size, max_seq_len),requires_grad = if_train).long())
# '
''' 517 '''
# mask = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).byte()
mask = torch.zeros((batch_size, max_seq_len), requires_grad=if_train).bool()
for idx, (seq, label, seqlen) in enumerate(zip(words, labels, word_seq_lengths)):
seqlen = seqlen.item()
word_seq_tensor[idx, :seqlen] = torch.LongTensor(seq)
label_seq_tensor[idx, :seqlen] = torch.LongTensor(label)
mask[idx, :seqlen] = torch.Tensor([1]*seqlen)
for idy in range(feature_num):
feature_seq_tensors[idy][idx,:seqlen] = torch.LongTensor(features[idx][:,idy])
word_seq_lengths, word_perm_idx = word_seq_lengths.sort(0, descending=True)
word_seq_tensor = word_seq_tensor[word_perm_idx]
for idx in range(feature_num):
feature_seq_tensors[idx] = feature_seq_tensors[idx][word_perm_idx]
label_seq_tensor = label_seq_tensor[word_perm_idx]
mask = mask[word_perm_idx]
### deal with char
# pad_chars (batch_size, max_seq_len)
pad_chars = [chars[idx] + [[0]] * (max_seq_len-len(chars[idx])) for idx in range(len(chars))]
length_list = [list(map(len, pad_char)) for pad_char in pad_chars]
max_word_len = max(map(max, length_list))
char_seq_tensor = torch.zeros((batch_size, max_seq_len, max_word_len), requires_grad = if_train).long()
char_seq_lengths = torch.LongTensor(length_list)
for idx, (seq, seqlen) in enumerate(zip(pad_chars, char_seq_lengths)):
for idy, (word, wordlen) in enumerate(zip(seq, seqlen)):
# print len(word), wordlen
char_seq_tensor[idx, idy, :wordlen] = torch.LongTensor(word)
char_seq_tensor = char_seq_tensor[word_perm_idx].view(batch_size*max_seq_len,-1)
char_seq_lengths = char_seq_lengths[word_perm_idx].view(batch_size*max_seq_len,)
char_seq_lengths, char_perm_idx = char_seq_lengths.sort(0, descending=True)
char_seq_tensor = char_seq_tensor[char_perm_idx]
_, char_seq_recover = char_perm_idx.sort(0, descending=False)
_, word_seq_recover = word_perm_idx.sort(0, descending=False)
if gpu:
word_seq_tensor = word_seq_tensor.cuda()
for idx in range(feature_num):
feature_seq_tensors[idx] = feature_seq_tensors[idx].cuda()
word_seq_lengths = word_seq_lengths.cuda()
word_seq_recover = word_seq_recover.cuda()
label_seq_tensor = label_seq_tensor.cuda()
char_seq_tensor = char_seq_tensor.cuda()
char_seq_recover = char_seq_recover.cuda()
mask = mask.cuda()
return word_seq_tensor,feature_seq_tensors, word_seq_lengths, word_seq_recover, char_seq_tensor, char_seq_lengths, char_seq_recover, label_seq_tensor, mask
def batchify_sentence_classification_with_label(input_batch_list, gpu, if_train=True):
"""
input: list of words, chars and labels, various length. [[words, features, chars, labels],[words, features, chars,labels],...]
words: word ids for one sentence. (batch_size, sent_len)
features: features ids for one sentence. (batch_size, feature_num), each sentence has one set of feature
chars: char ids for on sentences, various length. (batch_size, sent_len, each_word_length)
labels: label ids for one sentence. (batch_size,), each sentence has one set of feature
output:
zero padding for word and char, with their batch length
word_seq_tensor: (batch_size, max_sent_len) Variable
feature_seq_tensors: [(batch_size,), ... ] list of Variable
word_seq_lengths: (batch_size,1) Tensor
char_seq_tensor: (batch_size*max_sent_len, max_word_len) Variable
char_seq_lengths: (batch_size*max_sent_len,1) Tensor
char_seq_recover: (batch_size*max_sent_len,1) recover char sequence order
label_seq_tensor: (batch_size, )
mask: (batch_size, max_sent_len)
"""
batch_size = len(input_batch_list)
words = [sent[0] for sent in input_batch_list]
features = [np.asarray(sent[1]) for sent in input_batch_list]
feature_num = len(features[0])
chars = [sent[2] for sent in input_batch_list]
labels = [sent[3] for sent in input_batch_list]
word_seq_lengths = torch.LongTensor(list(map(len, words)))
max_seq_len = word_seq_lengths.max().item()
word_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).long()
label_seq_tensor = torch.zeros((batch_size, ), requires_grad = if_train).long()
feature_seq_tensors = []
for idx in range(feature_num):
feature_seq_tensors.append(torch.zeros((batch_size, max_seq_len),requires_grad = if_train).long())
''' 517 '''
# mask = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).byte()
mask = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).bool()
label_seq_tensor = torch.LongTensor(labels)
# exit(0)
for idx, (seq, seqlen) in enumerate(zip(words, word_seq_lengths)):
seqlen = seqlen.item()
word_seq_tensor[idx, :seqlen] = torch.LongTensor(seq)
mask[idx, :seqlen] = torch.Tensor([1]*seqlen)
for idy in range(feature_num):
feature_seq_tensors[idy][idx,:seqlen] = torch.LongTensor(features[idx][:,idy])
word_seq_lengths, word_perm_idx = word_seq_lengths.sort(0, descending=True)
word_seq_tensor = word_seq_tensor[word_perm_idx]
for idx in range(feature_num):
feature_seq_tensors[idx] = feature_seq_tensors[idx][word_perm_idx]
label_seq_tensor = label_seq_tensor[word_perm_idx]
mask = mask[word_perm_idx]
### deal with char
# pad_chars (batch_size, max_seq_len)
pad_chars = [chars[idx] + [[0]] * (max_seq_len-len(chars[idx])) for idx in range(len(chars))]
length_list = [list(map(len, pad_char)) for pad_char in pad_chars]
max_word_len = max(map(max, length_list))
char_seq_tensor = torch.zeros((batch_size, max_seq_len, max_word_len), requires_grad = if_train).long()
char_seq_lengths = torch.LongTensor(length_list)
for idx, (seq, seqlen) in enumerate(zip(pad_chars, char_seq_lengths)):
for idy, (word, wordlen) in enumerate(zip(seq, seqlen)):
# print len(word), wordlen
char_seq_tensor[idx, idy, :wordlen] = torch.LongTensor(word)
char_seq_tensor = char_seq_tensor[word_perm_idx].view(batch_size*max_seq_len,-1)
char_seq_lengths = char_seq_lengths[word_perm_idx].view(batch_size*max_seq_len,)
char_seq_lengths, char_perm_idx = char_seq_lengths.sort(0, descending=True)
char_seq_tensor = char_seq_tensor[char_perm_idx]
_, char_seq_recover = char_perm_idx.sort(0, descending=False)
_, word_seq_recover = word_perm_idx.sort(0, descending=False)
if gpu:
word_seq_tensor = word_seq_tensor.cuda()
for idx in range(feature_num):
feature_seq_tensors[idx] = feature_seq_tensors[idx].cuda()
word_seq_lengths = word_seq_lengths.cuda()
word_seq_recover = word_seq_recover.cuda()
label_seq_tensor = label_seq_tensor.cuda()
char_seq_tensor = char_seq_tensor.cuda()
char_seq_recover = char_seq_recover.cuda()
mask = mask.cuda()
return word_seq_tensor,feature_seq_tensors, word_seq_lengths, word_seq_recover, char_seq_tensor, char_seq_lengths, char_seq_recover, label_seq_tensor, mask
def load_model_to_test(data, train=False, dev=True, test=False, tag=None):
'''
Set any ONE of train, dev, test to true, in order to evaluate on that set.
:param data:
:param train:
:param dev: Default set to test, because that was what the original experiment did
:param test:
:return:
'''
print("Load pretrained model...")
if data.sentence_classification:
model = SentClassifier(data)
else:
model = SeqLabel(data)
model.load_state_dict(torch.load(data.pretrained_model_path))
'''----------------TESTING----------------'''
if (train):
speed, acc, p, r, f, _,_, train_sensitivities = evaluate(data, model, "train")
heatmap_sensitivity(train_sensitivities, data.pretrained_model_path, testname="train")
if data.seg:
current_score = f
print("Speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%(speed, acc, p, r, f))
else:
current_score = acc
print("Speed: %.2fst/s; acc: %.4f"%(speed, acc))
if (dev):
# for tag in data.ablate_list:
speed, acc, p, r, f, _,_, sensitivities = evaluate(
data, model, "dev", tag_to_ablate=tag)
if data.seg:
current_score = f
print("Speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f" % (speed, acc, p, r, f))
else:
current_score = acc
print("Speed: %.2fst/s; acc: %.4f" % (speed, acc))
if (data.ablate_num == 0):
heatmap_sensitivity(sensitivities, data.pretrained_model_path, testname="dev")
importance_matrix(sensitivities, data)
if (test):
speed, acc, p, r, f, _,_ = evaluate(data, model, "test")
if data.seg:
print("Speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%(speed, acc, p, r, f))
else:
print("Speed: %.2fst/s; acc: %.4f"%(speed, acc))
return
def train(data):
print("Training model...")
data.show_data_summary()
save_data_name = data.model_dir +".dset"
data.save(save_data_name)
if data.sentence_classification:
model = SentClassifier(data)
else:
model = SeqLabel(data)
if data.optimizer.lower() == "sgd":
optimizer = optim.SGD(model.parameters(), lr=data.HP_lr, momentum=data.HP_momentum,weight_decay=data.HP_l2)
elif data.optimizer.lower() == "adagrad":
optimizer = optim.Adagrad(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
elif data.optimizer.lower() == "adadelta":
optimizer = optim.Adadelta(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
elif data.optimizer.lower() == "rmsprop":
optimizer = optim.RMSprop(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
elif data.optimizer.lower() == "adam":
optimizer = optim.Adam(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
else:
print("Optimizer illegal: %s"%(data.optimizer))
exit(1)
best_dev = -10
# data.HP_iteration = 1
## start training
for idx in range(data.HP_iteration):
epoch_start = time.time()
temp_start = epoch_start
print("Epoch: %s/%s" %(idx,data.HP_iteration))
if data.optimizer == "SGD":
optimizer = lr_decay(optimizer, idx, data.HP_lr_decay, data.HP_lr)
instance_count = 0
sample_id = 0
sample_loss = 0
total_loss = 0
right_token = 0
whole_token = 0
random.shuffle(data.train_Ids)
print("Shuffle: first input word list:", data.train_Ids[0][0])
## set model in train model
model.train()
model.zero_grad()
batch_size = data.HP_batch_size
batch_id = 0
train_num = len(data.train_Ids)
total_batch = train_num//batch_size+1
for batch_id in range(total_batch):
start = batch_id*batch_size
end = (batch_id+1)*batch_size
if end >train_num:
end = train_num
instance = data.train_Ids[start:end]
if not instance:
continue
batch_word, batch_features, batch_wordlen, batch_wordrecover, batch_char, batch_charlen, batch_charrecover, batch_label, mask = batchify_with_label(instance, data.HP_gpu, True, data.sentence_classification)
instance_count += 1
loss, tag_seq = model.calculate_loss(batch_word, batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, batch_label, mask)
right, whole = predict_check(tag_seq, batch_label, mask, data.sentence_classification)
right_token += right
whole_token += whole
# print("loss:",loss.item())
sample_loss += loss.item()
total_loss += loss.item()
if end%500 == 0:
temp_time = time.time()
temp_cost = temp_time - temp_start
temp_start = temp_time
print(" Instance: %s; Time: %.2fs; loss: %.4f; acc: %s/%s=%.4f"%(end, temp_cost, sample_loss, right_token, whole_token,(right_token+0.)/whole_token))
if sample_loss > 1e8 or str(sample_loss) == "nan":
print("ERROR: LOSS EXPLOSION (>1e8) ! PLEASE SET PROPER PARAMETERS AND STRUCTURE! EXIT....")
exit(1)
sys.stdout.flush()
sample_loss = 0
loss.backward()
optimizer.step()
model.zero_grad()
temp_time = time.time()
temp_cost = temp_time - temp_start
print(" Instance: %s; Time: %.2fs; loss: %.4f; acc: %s/%s=%.4f"%(end, temp_cost, sample_loss, right_token, whole_token,(right_token+0.)/whole_token))
epoch_finish = time.time()
epoch_cost = epoch_finish - epoch_start
print("Epoch: %s training finished. Time: %.2fs, speed: %.2fst/s, total loss: %s"%(idx, epoch_cost, train_num/epoch_cost, total_loss))
print("totalloss:", total_loss)
if total_loss > 1e8 or str(total_loss) == "nan":
print("ERROR: LOSS EXPLOSION (>1e8) ! PLEASE SET PROPER PARAMETERS AND STRUCTURE! EXIT....")
exit(1)
# continue
speed, acc, p, r, f, _,_ , sensitivities = evaluate(data, model, "dev")
dev_finish = time.time()
dev_cost = dev_finish - epoch_finish
if data.seg:
current_score = f
print("Dev: time: %.2fs, speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%(dev_cost, speed, acc, p, r, f))
else:
current_score = acc
print("Dev: time: %.2fs speed: %.2fst/s; acc: %.4f"%(dev_cost, speed, acc))
if current_score > best_dev:
if data.seg:
print("Exceed previous best f score:", best_dev)
else:
print("Exceed previous best acc score:", best_dev)
model_name = data.model_dir +'.'+ str(idx) + ".model"
print("Save current best model in file:", model_name)
torch.save(model.state_dict(), model_name)
best_dev = current_score
# ## decode test
speed, acc, p, r, f, _,_ , sensitivities = evaluate(data, model, "test")
test_finish = time.time()
test_cost = test_finish - dev_finish
if data.seg:
print("Test: time: %.2fs, speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%(test_cost, speed, acc, p, r, f))
else:
print("Test: time: %.2fs, speed: %.2fst/s; acc: %.4f"%(test_cost, speed, acc))
gc.collect()
def load_model_decode(data, name):
print("Load Model from file: {}, name={}".format(data.model_dir, name) )
if data.sentence_classification:
model = SentClassifier(data)
else:
model = SeqLabel(data)
# model = SeqModel(data)
## load model need consider if the model trained in GPU and load in CPU, or vice versa
# if not gpu:
# model.load_state_dict(torch.load(model_dir))
# # model.load_state_dict(torch.load(model_dir), map_location=lambda storage, loc: storage)
# # model = torch.load(model_dir, map_location=lambda storage, loc: storage)
# else:
# model.load_state_dict(torch.load(model_dir))
# # model = torch.load(model_dir)
model.load_state_dict(torch.load(data.load_model_dir))
print("Decode %s data, nbest: %s ..."%(name, data.nbest))
start_time = time.time()
speed, acc, p, r, f, pred_results, pred_scores = evaluate(data, model, name, data.nbest)
end_time = time.time()
time_cost = end_time - start_time
if data.seg:
print("%s: time:%.2fs, speed:%.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%(name, time_cost, speed, acc, p, r, f))
else:
print("%s: time:%.2fs, speed:%.2fst/s; acc: %.4f"%(name, time_cost, speed, acc))
return pred_results, pred_scores
def load_ablation_file():
filename = ("Importance-" + data.model_dir + ".txt").replace('/','-')
ablate_lists = {}
''' B-ORG [4, 24, 14, 15, 19, 46, 36, 22, 27, 9, 13, 20, 25, 33, 45, 0, 35, 40, 48, 42, 44, 18, 37, 21, 32, 29, 16, 26, 11, 7, 23, 49, 12, 5, 8, 38, 2, 47, 1, 43, 31, 30, 41, 6, 28, 3, 34, 39, 10, 17]'''
with open(filename, 'r+') as file:
lines = file.readlines()
for line in lines:
line = line.strip()
if len(line) > 0:
(tag, list) = line.split('[')[0].strip(), line.split('[')[1].strip().replace(']','')
list = list.split(',')
ablate_lists[tag] = [int(i) for i in list]
return ablate_lists
def clear_sensitivity_data():
data.iteration = 0
data.batch_contributions = []
data.tag_contributions = {}
data.tag_counts = {}
data.sensitivity_matrices = []
data.sensitivity_matrices_combined = []
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Tuning with NCRF++')
# parser.add_argument('--status', choices=['train', 'decode'], help='update algorithm', default='train')
parser.add_argument('--config', help='Configuration File', default='None')
parser.add_argument('--wordemb', help='Embedding for words', default='None')
parser.add_argument('--charemb', help='Embedding for chars', default='None')
parser.add_argument('--status', choices=['train', 'decode'], help='update algorithm', default='train')
parser.add_argument('--savemodel', default="data/model/saved_model.lstmcrf.")
parser.add_argument('--savedset', help='Dir of saved data setting')
parser.add_argument('--train', default="data/conll03/train.bmes")
parser.add_argument('--dev', default="data/conll03/dev.bmes" )
parser.add_argument('--test', default="data/conll03/test.bmes")
parser.add_argument('--seg', default="True")
parser.add_argument('--raw')
parser.add_argument('--loadmodel')
parser.add_argument('--output')
parser.add_argument('--loadtotest', help='Load the model just to test it')
parser.add_argument('--pretrainedmodelpath', help='Path to a pretrained model that you just want to test',
default=DEFAULT_TRAINED_FILE)
parser.add_argument('--ablate', help='how many neurons to ablate', default=0) # indicate number of neurons to ablate
# Importance.txt is generated by importance_matrix() (automatically reading this file is a TODO)
parser.add_argument('--ablate_file', help='list of neurons to ablate')
args = parser.parse_args()
data = Data()
data.HP_gpu = torch.cuda.is_available()
if args.config == 'None':
data.train_dir = args.train
data.dev_dir = args.dev
data.test_dir = args.test
data.model_dir = args.savemodel
data.dset_dir = args.savedset
print("Save dset directory:",data.dset_dir)
save_model_dir = args.savemodel
data.word_emb_dir = args.wordemb
data.char_emb_dir = args.charemb
if args.seg.lower() == 'true':
data.seg = True
else:
data.seg = False
print("Seed num:",seed_num)
else:
data.read_config(args.config)
# adding arg for pretrained model path
data.pretrained_model_path = args.pretrainedmodelpath
data.ablate_num = int(args.ablate)
# data.show_data_summary()
status = data.status.lower()
print("Seed num:",seed_num)
if status == 'train':
print("MODEL: train")
data_initialization(data) # set up alphabets
data.generate_instance('train')
data.generate_instance('dev')
data.generate_instance('test')
data.build_pretrain_emb()
if not args.loadtotest:
print("Training model, not just testing because --loadtotest is {}".format(args.loadtotest))
print("Loading ablation file even though it's just a placeholder")
debug_ablation = False
if debug_ablation:
data.ablate_list = load_ablation_file() # TODO: file not found
tag_list = data.ablate_list.keys()
train(data)
else:
if args.ablate:
data.ablate_num = int(args.ablate)
print("Loading model to test.")
data.ablate_list = load_ablation_file()
tag_list = data.ablate_list.keys()
# todo: command line arg for specific current ablate index
# todo: command line arg for intervals
for tag in tag_list:
data.ablate_tag = tag
data.current_ablate_ind[tag] = 0
data.acc_chart[data.ablate_tag] = {} # clear accuracy dict of lists for the tag
for i in range(0, data.ablate_num + 1):
data.current_ablate_ind[tag] = i #+= 1 # todo: option to skip by different interval like every 5
clear_sensitivity_data()
load_model_to_test(data, tag=tag)
# print out acc_chart
#for tag in data.ablate_list:
print ('{} ABLATION RESULTS:'.format(tag))
degradations = {}
for t in tag_list:
print("\tTag: {}, Decr. Accs: {}".format(t, data.acc_chart[tag][t]))
degradations[t] = \
[data.acc_chart[tag][t][ind] - data.acc_chart[tag][t][0] for ind in range (0, data.ablate_num+1)]
print("\t\tDegradation={})".format(degradations[t]))
if (t==tag):
# ablation tag, so use bolder symbol
plt.plot(degradations[t], 'bs', label=t)
else:
plt.plot(degradations[t], label=t)
plt.title(tag, fontsize=18)
plt.legend()
plt.savefig("{}_chart.png".format(tag))
plt.clf() # clear the plot -was plot.show()
elif status == 'decode':
print("MODEL: decode")
data.load(data.dset_dir)
data.read_config(args.config)
print(data.raw_dir)
# exit(0)
data.show_data_summary()
data.generate_instance('raw')
print("nbest: %s"%(data.nbest))
decode_results, pred_scores = load_model_decode(data, 'raw')
if data.nbest and not data.sentence_classification:
data.write_nbest_decoded_results(decode_results, pred_scores, 'raw')
else:
data.write_decoded_results(decode_results, 'raw')
else:
print("Invalid argument! Please use valid arguments! (train/test/decode)")
| 44.199554
| 219
| 0.645749
|
from __future__ import print_function
import time
import sys
import argparse
import random
import torch
import gc
import torch.nn as nn
import torch.optim as optim
import numpy as np
from utils.metric import get_ner_fmeasure
from model.seqlabel import SeqLabel
from model.sentclassifier import SentClassifier
from utils.data import Data
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
try:
import cPickle as pickle
except ImportError:
import pickle
DEFAULT_TRAINED_FILE = 'test_data/lstmtestglove50.9.model'
seed_num = 46
random.seed(seed_num)
torch.manual_seed(seed_num)
np.random.seed(seed_num)
def importance_matrix(sensitivities, data,
print_imp=True, show_table=True, tag_to_ablate=None):
important_lists = []
important_nps = np.zeros(50, dtype=int)
sensitivities = sensitivities[1:]
for i in range(len(sensitivities)):
important_list = []
important_np = np.zeros(50, dtype=int)
tag_sensitivity_row = sensitivities[i]
for j in range(len(tag_sensitivity_row)):
most_important = np.argmax(tag_sensitivity_row)
important_list.append(most_important)
important_np[j] = most_important
index = [most_important]
tag_sensitivity_row[most_important] = np.NINF
important_lists.append(important_list)
important_nps = np.vstack((important_nps, important_np))
important_nps = np.delete(important_nps, 0, axis=0)
np.save("imps.npy",important_nps)
important_nps = np.transpose(important_nps)
if show_table:
sns.set()
sns.set(font_scale=0.5)
x_tick = [data.label_alphabet.get_instance(tag) for tag in sorted(data.tag_counts)]
del(x_tick[0])
ax = sns.heatmap(important_nps, annot=True, xticklabels=x_tick,
cmap=ListedColormap(['white']), cbar=False, yticklabels=False,
linecolor='gray', linewidths=0.4)
title = "Importance rankings of neurons per tag"
plt.title(title, fontsize=18)
ttl = ax.title
ttl.set_position([0.5, 1.05])
plt.show()
def trim_model_dir(model_dir):
model_dir = model_dir.replace('/','-')
return model_dir
ax.figure.savefig("ImportanceRankings-{}.png".format(trim_model_dir(data.model_dir)))
if print_imp:
imp_file = open("Importance-{}.txt".format(trim_model_dir(data.model_dir)), "w+")
print('Neuron importance ranking for each NER tag:')
for i, l in enumerate(important_lists):
tags = [data.label_alphabet.get_instance(tag) for tag in sorted(data.tag_counts)]
del(tags[0])
print ("\t{}\t{}".format(tags[i], l))
imp_file.write("{}\t{}\n".format(tags[i], l))
imp_file.write("\n")
np.savetxt("Importance-{}.tsv".format(trim_model_dir(data.model_dir)),
important_nps, fmt='%2.0d', delimiter='\t')
return important_nps
def heatmap_sensitivity(sensitivities,
modelname=DEFAULT_TRAINED_FILE,
testname="",
show_pad=False,
show_vals=True,
disable=False):
sensitivities = np.transpose(sensitivities)
start = 1
if show_pad:
start = 0
sensitivities = sensitivities[0:50, start:10]
sns.set()
sns.set(font_scale=0.5)
x_tick = [data.label_alphabet.get_instance(tag) for tag in sorted(data.tag_counts)]
if show_pad: x_tick[0] = 'PAD'
else: del(x_tick[0])
sensitivities_temp = np.zeros((50, 9))
x_tick_output = ['B-PER', 'I-PER', 'B-LOC', 'I-LOC', 'B-ORG', 'I-ORG', 'B-MISC', 'I-MISC', 'O']
for i in range(len(x_tick_output)):
sensitivities_temp[:, i] = sensitivities[:, x_tick.index(x_tick_output[i])]
np.save(modelname+'_sensitivities.npy', sensitivities_temp)
# put sensititivites in heat map
if not disable:
ax = sns.heatmap(sensitivities, xticklabels=x_tick, annot=show_vals, fmt=".2g")
title = "({}): ".format(testname) + modelname
plt.title(title, fontsize=18)
ttl = ax.title
ttl.set_position([0.5, 1.05])
plt.show()
ax.figure.savefig(modelname+"_heatmap.png")
def get_sensitivity_matrix(label, debug=True):
avg_for_label = data.tag_contributions[label]/data.tag_counts[label]
sum_other_counts = 0
# data.tag_contributions[0] is for the padding label and can be ignored
sum_other_contributions = np.zeros((10, 50))
for l in data.tag_counts:
if l != label and l != 0: # if l != label: (to consider the padding label which is 0)
sum_other_counts += data.tag_counts[l]
sum_other_contributions += data.tag_contributions[l]
avg_for_others = sum_other_contributions/sum_other_counts
s_ij = avg_for_label - avg_for_others
s_ij_label = s_ij[label]
return s_ij_label # was return s_ij
def data_initialization(data):
data.initial_feature_alphabets()
data.build_alphabet(data.train_dir)
data.build_alphabet(data.dev_dir)
data.build_alphabet(data.test_dir)
data.fix_alphabet()
def predict_check(pred_variable, gold_variable, mask_variable, sentence_classification=False):
pred = pred_variable.cpu().data.numpy()
gold = gold_variable.cpu().data.numpy()
mask = mask_variable.cpu().data.numpy()
overlaped = (pred == gold)
if sentence_classification:
# print(overlaped)
# print(overlaped*pred)
right_token = np.sum(overlaped)
total_token = overlaped.shape[0] ## =batch_size
else:
right_token = np.sum(overlaped * mask)
total_token = mask.sum()
# print("right: %s, total: %s"%(right_token, total_token))
return right_token, total_token
def recover_label(pred_variable, gold_variable, mask_variable, label_alphabet, word_recover, sentence_classification=False):
pred_variable = pred_variable[word_recover]
# print("reordered labels: {}".format(pred_variable))
gold_variable = gold_variable[word_recover]
mask_variable = mask_variable[word_recover]
batch_size = gold_variable.size(0)
if sentence_classification:
pred_tag = pred_variable.cpu().data.numpy().tolist()
gold_tag = gold_variable.cpu().data.numpy().tolist()
pred_label = [label_alphabet.get_instance(pred) for pred in pred_tag]
gold_label = [label_alphabet.get_instance(gold) for gold in gold_tag]
else:
seq_len = gold_variable.size(1)
mask = mask_variable.cpu().data.numpy()
pred_tag = pred_variable.cpu().data.numpy()
gold_tag = gold_variable.cpu().data.numpy()
batch_size = mask.shape[0]
pred_label = []
gold_label = []
for idx in range(batch_size):
pred = [label_alphabet.get_instance(pred_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0]
gold = [label_alphabet.get_instance(gold_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0]
assert(len(pred)==len(gold))
pred_label.append(pred)
gold_label.append(gold)
return pred_label, gold_label
def recover_nbest_label(pred_variable, mask_variable, label_alphabet, word_recover):
# exit(0)
pred_variable = pred_variable[word_recover]
mask_variable = mask_variable[word_recover]
batch_size = pred_variable.size(0)
seq_len = pred_variable.size(1)
nbest = pred_variable.size(2)
mask = mask_variable.cpu().data.numpy()
pred_tag = pred_variable.cpu().data.numpy()
batch_size = mask.shape[0]
pred_label = []
for idx in range(batch_size):
pred = []
for idz in range(nbest):
each_pred = [label_alphabet.get_instance(pred_tag[idx][idy][idz]) for idy in range(seq_len) if mask[idx][idy] != 0]
pred.append(each_pred)
pred_label.append(pred)
return pred_label
def lr_decay(optimizer, epoch, decay_rate, init_lr):
lr = init_lr/(1+decay_rate*epoch)
print(" Learning rate is set as:", lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def evaluate(data, model, name, nbest=None, print_tag_counts=False, tag_to_ablate=None):
ablate_list_for_tag = None
if tag_to_ablate:
data.ablate_tag = tag_to_ablate
ablate_list_for_tag = data.ablate_list[tag_to_ablate]
print("\nEVALUATE file: {}, set={}, \n\t ablate_num={} tag: {} \nablate_list_for_tag={}".format(
data.model_dir, name, data.current_ablate_ind, tag_to_ablate, ablate_list_for_tag))
if name == "train":
instances = data.train_Ids
elif name == "dev":
instances = data.dev_Ids
elif name == 'test':
instances = data.test_Ids
elif name == 'raw':
instances = data.raw_Ids
else:
print("Error: wrong evaluate name,", name)
exit(1)
right_token = 0
whole_token = 0
nbest_pred_results = []
pred_scores = []
pred_results = []
gold_results = []
## set model in eval model
model.eval()
# print("COUNT PARAMETERS: {}".format(count_parameters(model)))
batch_size = data.HP_batch_size
start_time = time.time()
train_num = len(instances)
total_batch = train_num//batch_size+1
for batch_id in range(total_batch):
start = batch_id*batch_size
end = (batch_id+1)*batch_size
if end > train_num:
end = train_num
instance = instances[start:end]
if not instance:
continue
batch_word, batch_features, batch_wordlen, batch_wordrecover, batch_char, batch_charlen, batch_charrecover, batch_label, mask = batchify_with_label(instance, data.HP_gpu, False, data.sentence_classification)
if nbest and not data.sentence_classification:
scores, nbest_tag_seq = model.decode_nbest(batch_word,batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, mask, nbest)
nbest_pred_result = recover_nbest_label(nbest_tag_seq, mask, data.label_alphabet, batch_wordrecover)
nbest_pred_results += nbest_pred_result
pred_scores += scores[batch_wordrecover].cpu().data.numpy().tolist()
## select the best sequence to evalurate
tag_seq = nbest_tag_seq[:,:,0]
else:
tag_seq = model(batch_word, batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, mask)
pred_label, gold_label = recover_label(tag_seq, batch_label, mask, data.label_alphabet, batch_wordrecover, data.sentence_classification)
pred_results += pred_label
gold_results += gold_label
decode_time = time.time() - start_time
speed = len(instances)/decode_time
acc, p, r, f = get_ner_fmeasure(gold_results, pred_results, data.tagScheme, data=data)
if nbest and not data.sentence_classification:
return speed, acc, p, r, f, nbest_pred_results, pred_scores
## print("TOTAL BATCH ITERATIONS: {}".format(data.iteration))
sensitivity_matrices = [] # This will hold a row for each tag's sensitivity
for tag in sorted(data.tag_counts):
if print_tag_counts:
if tag == 0:
print("Padding {}: {} instances.".format('0', data.tag_counts[tag]))
else:
print("Tag {}: {} instances.".format(data.label_alphabet.get_instance(tag), data.tag_counts[tag]))
sensitivity_tag = get_sensitivity_matrix(tag)
sensitivity_matrices.append(sensitivity_tag)
sensitivity_combined = np.squeeze(np.stack([sensitivity_matrices]))
# but we don't need it unless running many different models for stats
return speed, acc, p, r, f, pred_results, pred_scores, sensitivity_combined
def batchify_with_label(input_batch_list, gpu, if_train=True, sentence_classification=False):
if sentence_classification:
return batchify_sentence_classification_with_label(input_batch_list, gpu, if_train)
else:
return batchify_sequence_labeling_with_label(input_batch_list, gpu, if_train)
def batchify_sequence_labeling_with_label(input_batch_list, gpu, if_train=True):
batch_size = len(input_batch_list)
words = [sent[0] for sent in input_batch_list]
features = [np.asarray(sent[1]) for sent in input_batch_list]
feature_num = len(features[0][0])
chars = [sent[2] for sent in input_batch_list]
labels = [sent[3] for sent in input_batch_list]
word_seq_lengths = torch.LongTensor(list(map(len, words)))
max_seq_len = word_seq_lengths.max().item()
word_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).long()
label_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).long()
feature_seq_tensors = []
for idx in range(feature_num):
feature_seq_tensors.append(torch.zeros((batch_size, max_seq_len),requires_grad = if_train).long())
# mask = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).byte()
mask = torch.zeros((batch_size, max_seq_len), requires_grad=if_train).bool()
for idx, (seq, label, seqlen) in enumerate(zip(words, labels, word_seq_lengths)):
seqlen = seqlen.item()
word_seq_tensor[idx, :seqlen] = torch.LongTensor(seq)
label_seq_tensor[idx, :seqlen] = torch.LongTensor(label)
mask[idx, :seqlen] = torch.Tensor([1]*seqlen)
for idy in range(feature_num):
feature_seq_tensors[idy][idx,:seqlen] = torch.LongTensor(features[idx][:,idy])
word_seq_lengths, word_perm_idx = word_seq_lengths.sort(0, descending=True)
word_seq_tensor = word_seq_tensor[word_perm_idx]
for idx in range(feature_num):
feature_seq_tensors[idx] = feature_seq_tensors[idx][word_perm_idx]
label_seq_tensor = label_seq_tensor[word_perm_idx]
mask = mask[word_perm_idx]
### deal with char
# pad_chars (batch_size, max_seq_len)
pad_chars = [chars[idx] + [[0]] * (max_seq_len-len(chars[idx])) for idx in range(len(chars))]
length_list = [list(map(len, pad_char)) for pad_char in pad_chars]
max_word_len = max(map(max, length_list))
char_seq_tensor = torch.zeros((batch_size, max_seq_len, max_word_len), requires_grad = if_train).long()
char_seq_lengths = torch.LongTensor(length_list)
for idx, (seq, seqlen) in enumerate(zip(pad_chars, char_seq_lengths)):
for idy, (word, wordlen) in enumerate(zip(seq, seqlen)):
# print len(word), wordlen
char_seq_tensor[idx, idy, :wordlen] = torch.LongTensor(word)
char_seq_tensor = char_seq_tensor[word_perm_idx].view(batch_size*max_seq_len,-1)
char_seq_lengths = char_seq_lengths[word_perm_idx].view(batch_size*max_seq_len,)
char_seq_lengths, char_perm_idx = char_seq_lengths.sort(0, descending=True)
char_seq_tensor = char_seq_tensor[char_perm_idx]
_, char_seq_recover = char_perm_idx.sort(0, descending=False)
_, word_seq_recover = word_perm_idx.sort(0, descending=False)
if gpu:
word_seq_tensor = word_seq_tensor.cuda()
for idx in range(feature_num):
feature_seq_tensors[idx] = feature_seq_tensors[idx].cuda()
word_seq_lengths = word_seq_lengths.cuda()
word_seq_recover = word_seq_recover.cuda()
label_seq_tensor = label_seq_tensor.cuda()
char_seq_tensor = char_seq_tensor.cuda()
char_seq_recover = char_seq_recover.cuda()
mask = mask.cuda()
return word_seq_tensor,feature_seq_tensors, word_seq_lengths, word_seq_recover, char_seq_tensor, char_seq_lengths, char_seq_recover, label_seq_tensor, mask
def batchify_sentence_classification_with_label(input_batch_list, gpu, if_train=True):
batch_size = len(input_batch_list)
words = [sent[0] for sent in input_batch_list]
features = [np.asarray(sent[1]) for sent in input_batch_list]
feature_num = len(features[0])
chars = [sent[2] for sent in input_batch_list]
labels = [sent[3] for sent in input_batch_list]
word_seq_lengths = torch.LongTensor(list(map(len, words)))
max_seq_len = word_seq_lengths.max().item()
word_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).long()
label_seq_tensor = torch.zeros((batch_size, ), requires_grad = if_train).long()
feature_seq_tensors = []
for idx in range(feature_num):
feature_seq_tensors.append(torch.zeros((batch_size, max_seq_len),requires_grad = if_train).long())
# mask = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).byte()
mask = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).bool()
label_seq_tensor = torch.LongTensor(labels)
# exit(0)
for idx, (seq, seqlen) in enumerate(zip(words, word_seq_lengths)):
seqlen = seqlen.item()
word_seq_tensor[idx, :seqlen] = torch.LongTensor(seq)
mask[idx, :seqlen] = torch.Tensor([1]*seqlen)
for idy in range(feature_num):
feature_seq_tensors[idy][idx,:seqlen] = torch.LongTensor(features[idx][:,idy])
word_seq_lengths, word_perm_idx = word_seq_lengths.sort(0, descending=True)
word_seq_tensor = word_seq_tensor[word_perm_idx]
for idx in range(feature_num):
feature_seq_tensors[idx] = feature_seq_tensors[idx][word_perm_idx]
label_seq_tensor = label_seq_tensor[word_perm_idx]
mask = mask[word_perm_idx]
### deal with char
# pad_chars (batch_size, max_seq_len)
pad_chars = [chars[idx] + [[0]] * (max_seq_len-len(chars[idx])) for idx in range(len(chars))]
length_list = [list(map(len, pad_char)) for pad_char in pad_chars]
max_word_len = max(map(max, length_list))
char_seq_tensor = torch.zeros((batch_size, max_seq_len, max_word_len), requires_grad = if_train).long()
char_seq_lengths = torch.LongTensor(length_list)
for idx, (seq, seqlen) in enumerate(zip(pad_chars, char_seq_lengths)):
for idy, (word, wordlen) in enumerate(zip(seq, seqlen)):
# print len(word), wordlen
char_seq_tensor[idx, idy, :wordlen] = torch.LongTensor(word)
char_seq_tensor = char_seq_tensor[word_perm_idx].view(batch_size*max_seq_len,-1)
char_seq_lengths = char_seq_lengths[word_perm_idx].view(batch_size*max_seq_len,)
char_seq_lengths, char_perm_idx = char_seq_lengths.sort(0, descending=True)
char_seq_tensor = char_seq_tensor[char_perm_idx]
_, char_seq_recover = char_perm_idx.sort(0, descending=False)
_, word_seq_recover = word_perm_idx.sort(0, descending=False)
if gpu:
word_seq_tensor = word_seq_tensor.cuda()
for idx in range(feature_num):
feature_seq_tensors[idx] = feature_seq_tensors[idx].cuda()
word_seq_lengths = word_seq_lengths.cuda()
word_seq_recover = word_seq_recover.cuda()
label_seq_tensor = label_seq_tensor.cuda()
char_seq_tensor = char_seq_tensor.cuda()
char_seq_recover = char_seq_recover.cuda()
mask = mask.cuda()
return word_seq_tensor,feature_seq_tensors, word_seq_lengths, word_seq_recover, char_seq_tensor, char_seq_lengths, char_seq_recover, label_seq_tensor, mask
def load_model_to_test(data, train=False, dev=True, test=False, tag=None):
print("Load pretrained model...")
if data.sentence_classification:
model = SentClassifier(data)
else:
model = SeqLabel(data)
model.load_state_dict(torch.load(data.pretrained_model_path))
if (train):
speed, acc, p, r, f, _,_, train_sensitivities = evaluate(data, model, "train")
heatmap_sensitivity(train_sensitivities, data.pretrained_model_path, testname="train")
if data.seg:
current_score = f
print("Speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%(speed, acc, p, r, f))
else:
current_score = acc
print("Speed: %.2fst/s; acc: %.4f"%(speed, acc))
if (dev):
# for tag in data.ablate_list:
speed, acc, p, r, f, _,_, sensitivities = evaluate(
data, model, "dev", tag_to_ablate=tag)
if data.seg:
current_score = f
print("Speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f" % (speed, acc, p, r, f))
else:
current_score = acc
print("Speed: %.2fst/s; acc: %.4f" % (speed, acc))
if (data.ablate_num == 0):
heatmap_sensitivity(sensitivities, data.pretrained_model_path, testname="dev")
importance_matrix(sensitivities, data)
if (test):
speed, acc, p, r, f, _,_ = evaluate(data, model, "test")
if data.seg:
print("Speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%(speed, acc, p, r, f))
else:
print("Speed: %.2fst/s; acc: %.4f"%(speed, acc))
return
def train(data):
print("Training model...")
data.show_data_summary()
save_data_name = data.model_dir +".dset"
data.save(save_data_name)
if data.sentence_classification:
model = SentClassifier(data)
else:
model = SeqLabel(data)
if data.optimizer.lower() == "sgd":
optimizer = optim.SGD(model.parameters(), lr=data.HP_lr, momentum=data.HP_momentum,weight_decay=data.HP_l2)
elif data.optimizer.lower() == "adagrad":
optimizer = optim.Adagrad(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
elif data.optimizer.lower() == "adadelta":
optimizer = optim.Adadelta(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
elif data.optimizer.lower() == "rmsprop":
optimizer = optim.RMSprop(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
elif data.optimizer.lower() == "adam":
optimizer = optim.Adam(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
else:
print("Optimizer illegal: %s"%(data.optimizer))
exit(1)
best_dev = -10
# data.HP_iteration = 1
## start training
for idx in range(data.HP_iteration):
epoch_start = time.time()
temp_start = epoch_start
print("Epoch: %s/%s" %(idx,data.HP_iteration))
if data.optimizer == "SGD":
optimizer = lr_decay(optimizer, idx, data.HP_lr_decay, data.HP_lr)
instance_count = 0
sample_id = 0
sample_loss = 0
total_loss = 0
right_token = 0
whole_token = 0
random.shuffle(data.train_Ids)
print("Shuffle: first input word list:", data.train_Ids[0][0])
## set model in train model
model.train()
model.zero_grad()
batch_size = data.HP_batch_size
batch_id = 0
train_num = len(data.train_Ids)
total_batch = train_num//batch_size+1
for batch_id in range(total_batch):
start = batch_id*batch_size
end = (batch_id+1)*batch_size
if end >train_num:
end = train_num
instance = data.train_Ids[start:end]
if not instance:
continue
batch_word, batch_features, batch_wordlen, batch_wordrecover, batch_char, batch_charlen, batch_charrecover, batch_label, mask = batchify_with_label(instance, data.HP_gpu, True, data.sentence_classification)
instance_count += 1
loss, tag_seq = model.calculate_loss(batch_word, batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, batch_label, mask)
right, whole = predict_check(tag_seq, batch_label, mask, data.sentence_classification)
right_token += right
whole_token += whole
# print("loss:",loss.item())
sample_loss += loss.item()
total_loss += loss.item()
if end%500 == 0:
temp_time = time.time()
temp_cost = temp_time - temp_start
temp_start = temp_time
print(" Instance: %s; Time: %.2fs; loss: %.4f; acc: %s/%s=%.4f"%(end, temp_cost, sample_loss, right_token, whole_token,(right_token+0.)/whole_token))
if sample_loss > 1e8 or str(sample_loss) == "nan":
print("ERROR: LOSS EXPLOSION (>1e8) ! PLEASE SET PROPER PARAMETERS AND STRUCTURE! EXIT....")
exit(1)
sys.stdout.flush()
sample_loss = 0
loss.backward()
optimizer.step()
model.zero_grad()
temp_time = time.time()
temp_cost = temp_time - temp_start
print(" Instance: %s; Time: %.2fs; loss: %.4f; acc: %s/%s=%.4f"%(end, temp_cost, sample_loss, right_token, whole_token,(right_token+0.)/whole_token))
epoch_finish = time.time()
epoch_cost = epoch_finish - epoch_start
print("Epoch: %s training finished. Time: %.2fs, speed: %.2fst/s, total loss: %s"%(idx, epoch_cost, train_num/epoch_cost, total_loss))
print("totalloss:", total_loss)
if total_loss > 1e8 or str(total_loss) == "nan":
print("ERROR: LOSS EXPLOSION (>1e8) ! PLEASE SET PROPER PARAMETERS AND STRUCTURE! EXIT....")
exit(1)
# continue
speed, acc, p, r, f, _,_ , sensitivities = evaluate(data, model, "dev")
dev_finish = time.time()
dev_cost = dev_finish - epoch_finish
if data.seg:
current_score = f
print("Dev: time: %.2fs, speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%(dev_cost, speed, acc, p, r, f))
else:
current_score = acc
print("Dev: time: %.2fs speed: %.2fst/s; acc: %.4f"%(dev_cost, speed, acc))
if current_score > best_dev:
if data.seg:
print("Exceed previous best f score:", best_dev)
else:
print("Exceed previous best acc score:", best_dev)
model_name = data.model_dir +'.'+ str(idx) + ".model"
print("Save current best model in file:", model_name)
torch.save(model.state_dict(), model_name)
best_dev = current_score
# ## decode test
speed, acc, p, r, f, _,_ , sensitivities = evaluate(data, model, "test")
test_finish = time.time()
test_cost = test_finish - dev_finish
if data.seg:
print("Test: time: %.2fs, speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%(test_cost, speed, acc, p, r, f))
else:
print("Test: time: %.2fs, speed: %.2fst/s; acc: %.4f"%(test_cost, speed, acc))
gc.collect()
def load_model_decode(data, name):
print("Load Model from file: {}, name={}".format(data.model_dir, name) )
if data.sentence_classification:
model = SentClassifier(data)
else:
model = SeqLabel(data)
# model = SeqModel(data)
## load model need consider if the model trained in GPU and load in CPU, or vice versa
# if not gpu:
# model.load_state_dict(torch.load(model_dir))
# # model.load_state_dict(torch.load(model_dir), map_location=lambda storage, loc: storage)
# # model = torch.load(model_dir, map_location=lambda storage, loc: storage)
# else:
# model.load_state_dict(torch.load(model_dir))
# # model = torch.load(model_dir)
model.load_state_dict(torch.load(data.load_model_dir))
print("Decode %s data, nbest: %s ..."%(name, data.nbest))
start_time = time.time()
speed, acc, p, r, f, pred_results, pred_scores = evaluate(data, model, name, data.nbest)
end_time = time.time()
time_cost = end_time - start_time
if data.seg:
print("%s: time:%.2fs, speed:%.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%(name, time_cost, speed, acc, p, r, f))
else:
print("%s: time:%.2fs, speed:%.2fst/s; acc: %.4f"%(name, time_cost, speed, acc))
return pred_results, pred_scores
def load_ablation_file():
filename = ("Importance-" + data.model_dir + ".txt").replace('/','-')
ablate_lists = {}
with open(filename, 'r+') as file:
lines = file.readlines()
for line in lines:
line = line.strip()
if len(line) > 0:
(tag, list) = line.split('[')[0].strip(), line.split('[')[1].strip().replace(']','')
list = list.split(',')
ablate_lists[tag] = [int(i) for i in list]
return ablate_lists
def clear_sensitivity_data():
data.iteration = 0
data.batch_contributions = []
data.tag_contributions = {}
data.tag_counts = {}
data.sensitivity_matrices = []
data.sensitivity_matrices_combined = []
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Tuning with NCRF++')
# parser.add_argument('--status', choices=['train', 'decode'], help='update algorithm', default='train')
parser.add_argument('--config', help='Configuration File', default='None')
parser.add_argument('--wordemb', help='Embedding for words', default='None')
parser.add_argument('--charemb', help='Embedding for chars', default='None')
parser.add_argument('--status', choices=['train', 'decode'], help='update algorithm', default='train')
parser.add_argument('--savemodel', default="data/model/saved_model.lstmcrf.")
parser.add_argument('--savedset', help='Dir of saved data setting')
parser.add_argument('--train', default="data/conll03/train.bmes")
parser.add_argument('--dev', default="data/conll03/dev.bmes" )
parser.add_argument('--test', default="data/conll03/test.bmes")
parser.add_argument('--seg', default="True")
parser.add_argument('--raw')
parser.add_argument('--loadmodel')
parser.add_argument('--output')
parser.add_argument('--loadtotest', help='Load the model just to test it')
parser.add_argument('--pretrainedmodelpath', help='Path to a pretrained model that you just want to test',
default=DEFAULT_TRAINED_FILE)
parser.add_argument('--ablate', help='how many neurons to ablate', default=0) # indicate number of neurons to ablate
# Importance.txt is generated by importance_matrix() (automatically reading this file is a TODO)
parser.add_argument('--ablate_file', help='list of neurons to ablate')
args = parser.parse_args()
data = Data()
data.HP_gpu = torch.cuda.is_available()
if args.config == 'None':
data.train_dir = args.train
data.dev_dir = args.dev
data.test_dir = args.test
data.model_dir = args.savemodel
data.dset_dir = args.savedset
print("Save dset directory:",data.dset_dir)
save_model_dir = args.savemodel
data.word_emb_dir = args.wordemb
data.char_emb_dir = args.charemb
if args.seg.lower() == 'true':
data.seg = True
else:
data.seg = False
print("Seed num:",seed_num)
else:
data.read_config(args.config)
# adding arg for pretrained model path
data.pretrained_model_path = args.pretrainedmodelpath
data.ablate_num = int(args.ablate)
# data.show_data_summary()
status = data.status.lower()
print("Seed num:",seed_num)
if status == 'train':
print("MODEL: train")
data_initialization(data) # set up alphabets
data.generate_instance('train')
data.generate_instance('dev')
data.generate_instance('test')
data.build_pretrain_emb()
if not args.loadtotest:
print("Training model, not just testing because --loadtotest is {}".format(args.loadtotest))
print("Loading ablation file even though it's just a placeholder")
debug_ablation = False
if debug_ablation:
data.ablate_list = load_ablation_file()
tag_list = data.ablate_list.keys()
train(data)
else:
if args.ablate:
data.ablate_num = int(args.ablate)
print("Loading model to test.")
data.ablate_list = load_ablation_file()
tag_list = data.ablate_list.keys()
for tag in tag_list:
data.ablate_tag = tag
data.current_ablate_ind[tag] = 0
data.acc_chart[data.ablate_tag] = {}
for i in range(0, data.ablate_num + 1):
data.current_ablate_ind[tag] = i load_model_to_test(data, tag=tag)
print ('{} ABLATION RESULTS:'.format(tag))
degradations = {}
for t in tag_list:
print("\tTag: {}, Decr. Accs: {}".format(t, data.acc_chart[tag][t]))
degradations[t] = \
[data.acc_chart[tag][t][ind] - data.acc_chart[tag][t][0] for ind in range (0, data.ablate_num+1)]
print("\t\tDegradation={})".format(degradations[t]))
if (t==tag):
plt.plot(degradations[t], 'bs', label=t)
else:
plt.plot(degradations[t], label=t)
plt.title(tag, fontsize=18)
plt.legend()
plt.savefig("{}_chart.png".format(tag))
plt.clf()
elif status == 'decode':
print("MODEL: decode")
data.load(data.dset_dir)
data.read_config(args.config)
print(data.raw_dir)
data.show_data_summary()
data.generate_instance('raw')
print("nbest: %s"%(data.nbest))
decode_results, pred_scores = load_model_decode(data, 'raw')
if data.nbest and not data.sentence_classification:
data.write_nbest_decoded_results(decode_results, pred_scores, 'raw')
else:
data.write_decoded_results(decode_results, 'raw')
else:
print("Invalid argument! Please use valid arguments! (train/test/decode)")
| true
| true
|
790e7f6b3c7e567f5959d6e6164964eafc2f6710
| 11,370
|
py
|
Python
|
mne/forward/tests/test_field_interpolation.py
|
0reza/mne-python
|
da02a256423404a81929d6de278bc63d3192a280
|
[
"BSD-3-Clause"
] | null | null | null |
mne/forward/tests/test_field_interpolation.py
|
0reza/mne-python
|
da02a256423404a81929d6de278bc63d3192a280
|
[
"BSD-3-Clause"
] | null | null | null |
mne/forward/tests/test_field_interpolation.py
|
0reza/mne-python
|
da02a256423404a81929d6de278bc63d3192a280
|
[
"BSD-3-Clause"
] | null | null | null |
from os import path as op
import numpy as np
from numpy.polynomial import legendre
from numpy.testing import (assert_allclose, assert_array_equal, assert_equal,
assert_array_almost_equal)
from scipy.interpolate import interp1d
import pytest
import mne
from mne.forward import _make_surface_mapping, make_field_map
from mne.forward._lead_dots import (_comp_sum_eeg, _comp_sums_meg,
_get_legen_table, _do_cross_dots)
from mne.forward._make_forward import _create_meg_coils
from mne.forward._field_interpolation import _setup_dots
from mne.surface import get_meg_helmet_surf, get_head_surf
from mne.datasets import testing
from mne import read_evokeds, pick_types, make_fixed_length_events, Epochs
from mne.io import read_raw_fif
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
data_path = testing.data_path(download=False)
trans_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
subjects_dir = op.join(data_path, 'subjects')
@testing.requires_testing_data
def test_field_map_ctf():
"""Test that field mapping can be done with CTF data."""
raw = read_raw_fif(raw_ctf_fname).crop(0, 1)
raw.apply_gradient_compensation(3)
events = make_fixed_length_events(raw, duration=0.5)
evoked = Epochs(raw, events).average()
evoked.pick_channels(evoked.ch_names[:50]) # crappy mapping but faster
# smoke test
make_field_map(evoked, trans=trans_fname, subject='sample',
subjects_dir=subjects_dir)
def test_legendre_val():
"""Test Legendre polynomial (derivative) equivalence."""
rng = np.random.RandomState(0)
# check table equiv
xs = np.linspace(-1., 1., 1000)
n_terms = 100
# True, numpy
vals_np = legendre.legvander(xs, n_terms - 1)
# Table approximation
for nc, interp in zip([100, 50], ['nearest', 'linear']):
lut, n_fact = _get_legen_table('eeg', n_coeff=nc, force_calc=True)
lut_fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, interp,
axis=0)
vals_i = lut_fun(xs)
# Need a "1:" here because we omit the first coefficient in our table!
assert_allclose(vals_np[:, 1:vals_i.shape[1] + 1], vals_i,
rtol=1e-2, atol=5e-3)
# Now let's look at our sums
ctheta = rng.rand(20, 30) * 2.0 - 1.0
beta = rng.rand(20, 30) * 0.8
c1 = _comp_sum_eeg(beta.flatten(), ctheta.flatten(), lut_fun, n_fact)
c1.shape = beta.shape
# compare to numpy
n = np.arange(1, n_terms, dtype=float)[:, np.newaxis, np.newaxis]
coeffs = np.zeros((n_terms,) + beta.shape)
coeffs[1:] = (np.cumprod([beta] * (n_terms - 1), axis=0) *
(2.0 * n + 1.0) * (2.0 * n + 1.0) / n)
# can't use tensor=False here b/c it isn't in old numpy
c2 = np.empty((20, 30))
for ci1 in range(20):
for ci2 in range(30):
c2[ci1, ci2] = legendre.legval(ctheta[ci1, ci2],
coeffs[:, ci1, ci2])
assert_allclose(c1, c2, 1e-2, 1e-3) # close enough...
# compare fast and slow for MEG
ctheta = rng.rand(20 * 30) * 2.0 - 1.0
beta = rng.rand(20 * 30) * 0.8
lut, n_fact = _get_legen_table('meg', n_coeff=10, force_calc=True)
fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, 'nearest', axis=0)
coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)
lut, n_fact = _get_legen_table('meg', n_coeff=20, force_calc=True)
fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, 'linear', axis=0)
coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)
def test_legendre_table():
"""Test Legendre table calculation."""
# double-check our table generation
n = 10
for ch_type in ['eeg', 'meg']:
lut1, n_fact1 = _get_legen_table(ch_type, n_coeff=25, force_calc=True)
lut1 = lut1[:, :n - 1].copy()
n_fact1 = n_fact1[:n - 1].copy()
lut2, n_fact2 = _get_legen_table(ch_type, n_coeff=n, force_calc=True)
assert_allclose(lut1, lut2)
assert_allclose(n_fact1, n_fact2)
@testing.requires_testing_data
def test_make_field_map_eeg():
"""Test interpolation of EEG field onto head."""
evoked = read_evokeds(evoked_fname, condition='Left Auditory')
evoked.info['bads'] = ['MEG 2443', 'EEG 053'] # add some bads
surf = get_head_surf('sample', subjects_dir=subjects_dir)
# we must have trans if surface is in MRI coords
pytest.raises(ValueError, _make_surface_mapping, evoked.info, surf, 'eeg')
evoked.pick_types(meg=False, eeg=True)
fmd = make_field_map(evoked, trans_fname,
subject='sample', subjects_dir=subjects_dir)
# trans is necessary for EEG only
pytest.raises(RuntimeError, make_field_map, evoked, None,
subject='sample', subjects_dir=subjects_dir)
fmd = make_field_map(evoked, trans_fname,
subject='sample', subjects_dir=subjects_dir)
assert len(fmd) == 1
assert_array_equal(fmd[0]['data'].shape, (642, 59)) # maps data onto surf
assert len(fmd[0]['ch_names']) == 59
@testing.requires_testing_data
@pytest.mark.slowtest
def test_make_field_map_meg():
"""Test interpolation of MEG field onto helmet | head."""
evoked = read_evokeds(evoked_fname, condition='Left Auditory')
info = evoked.info
surf = get_meg_helmet_surf(info)
# let's reduce the number of channels by a bunch to speed it up
info['bads'] = info['ch_names'][:200]
# bad ch_type
pytest.raises(ValueError, _make_surface_mapping, info, surf, 'foo')
# bad mode
pytest.raises(ValueError, _make_surface_mapping, info, surf, 'meg',
mode='foo')
# no picks
evoked_eeg = evoked.copy().pick_types(meg=False, eeg=True)
pytest.raises(RuntimeError, _make_surface_mapping, evoked_eeg.info,
surf, 'meg')
# bad surface def
nn = surf['nn']
del surf['nn']
pytest.raises(KeyError, _make_surface_mapping, info, surf, 'meg')
surf['nn'] = nn
cf = surf['coord_frame']
del surf['coord_frame']
pytest.raises(KeyError, _make_surface_mapping, info, surf, 'meg')
surf['coord_frame'] = cf
# now do it with make_field_map
evoked.pick_types(meg=True, eeg=False)
evoked.info.normalize_proj() # avoid projection warnings
fmd = make_field_map(evoked, None,
subject='sample', subjects_dir=subjects_dir)
assert (len(fmd) == 1)
assert_array_equal(fmd[0]['data'].shape, (304, 106)) # maps data onto surf
assert len(fmd[0]['ch_names']) == 106
pytest.raises(ValueError, make_field_map, evoked, ch_type='foobar')
# now test the make_field_map on head surf for MEG
evoked.pick_types(meg=True, eeg=False)
evoked.info.normalize_proj()
fmd = make_field_map(evoked, trans_fname, meg_surf='head',
subject='sample', subjects_dir=subjects_dir)
assert len(fmd) == 1
assert_array_equal(fmd[0]['data'].shape, (642, 106)) # maps data onto surf
assert len(fmd[0]['ch_names']) == 106
pytest.raises(ValueError, make_field_map, evoked, meg_surf='foobar',
subjects_dir=subjects_dir, trans=trans_fname)
@testing.requires_testing_data
def test_make_field_map_meeg():
"""Test making a M/EEG field map onto helmet & head."""
evoked = read_evokeds(evoked_fname, baseline=(-0.2, 0.0))[0]
picks = pick_types(evoked.info, meg=True, eeg=True)
picks = picks[::10]
evoked.pick_channels([evoked.ch_names[p] for p in picks])
evoked.info.normalize_proj()
maps = make_field_map(evoked, trans_fname, subject='sample',
subjects_dir=subjects_dir, verbose='debug')
assert_equal(maps[0]['data'].shape, (642, 6)) # EEG->Head
assert_equal(maps[1]['data'].shape, (304, 31)) # MEG->Helmet
# reasonable ranges
maxs = (1.2, 2.0) # before #4418, was (1.1, 2.0)
mins = (-0.8, -1.3) # before #4418, was (-0.6, -1.2)
assert_equal(len(maxs), len(maps))
for map_, max_, min_ in zip(maps, maxs, mins):
assert_allclose(map_['data'].max(), max_, rtol=5e-2)
assert_allclose(map_['data'].min(), min_, rtol=5e-2)
# calculated from correct looking mapping on 2015/12/26
assert_allclose(np.sqrt(np.sum(maps[0]['data'] ** 2)), 19.0903, # 16.6088,
atol=1e-3, rtol=1e-3)
assert_allclose(np.sqrt(np.sum(maps[1]['data'] ** 2)), 19.4748, # 20.1245,
atol=1e-3, rtol=1e-3)
def _setup_args(info):
"""Configure args for test_as_meg_type_evoked."""
coils = _create_meg_coils(info['chs'], 'normal', info['dev_head_t'])
int_rad, _, lut_fun, n_fact = _setup_dots('fast', info, coils, 'meg')
my_origin = np.array([0., 0., 0.04])
args_dict = dict(intrad=int_rad, volume=False, coils1=coils, r0=my_origin,
ch_type='meg', lut=lut_fun, n_fact=n_fact)
return args_dict
@testing.requires_testing_data
def test_as_meg_type_evoked():
"""Test interpolation of data on to virtual channels."""
# validation tests
raw = read_raw_fif(raw_fname)
events = mne.find_events(raw)
picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
ecg=True, eog=True, include=['STI 014'],
exclude='bads')
epochs = mne.Epochs(raw, events, picks=picks)
evoked = epochs.average()
with pytest.raises(ValueError, match="Invalid value for the 'ch_type'"):
evoked.as_type('meg')
with pytest.raises(ValueError, match="Invalid value for the 'ch_type'"):
evoked.copy().pick_types(meg='grad').as_type('meg')
# channel names
ch_names = evoked.info['ch_names']
virt_evoked = evoked.copy().pick_channels(ch_names=ch_names[:10:1])
virt_evoked.info.normalize_proj()
virt_evoked = virt_evoked.as_type('mag')
assert (all(ch.endswith('_v') for ch in virt_evoked.info['ch_names']))
# pick from and to channels
evoked_from = evoked.copy().pick_channels(ch_names=ch_names[2:10:3])
evoked_to = evoked.copy().pick_channels(ch_names=ch_names[0:10:3])
info_from, info_to = evoked_from.info, evoked_to.info
# set up things
args1, args2 = _setup_args(info_from), _setup_args(info_to)
args1.update(coils2=args2['coils1'])
args2.update(coils2=args1['coils1'])
# test cross dots
cross_dots1 = _do_cross_dots(**args1)
cross_dots2 = _do_cross_dots(**args2)
assert_array_almost_equal(cross_dots1, cross_dots2.T)
# correlation test
evoked = evoked.pick_channels(ch_names=ch_names[:10:]).copy()
data1 = evoked.pick_types(meg='grad').data.ravel()
data2 = evoked.as_type('grad').data.ravel()
assert (np.corrcoef(data1, data2)[0, 1] > 0.95)
# Do it with epochs
virt_epochs = \
epochs.copy().load_data().pick_channels(ch_names=ch_names[:10:1])
virt_epochs.info.normalize_proj()
virt_epochs = virt_epochs.as_type('mag')
assert (all(ch.endswith('_v') for ch in virt_epochs.info['ch_names']))
assert_allclose(virt_epochs.get_data().mean(0), virt_evoked.data)
| 41.195652
| 79
| 0.652858
|
from os import path as op
import numpy as np
from numpy.polynomial import legendre
from numpy.testing import (assert_allclose, assert_array_equal, assert_equal,
assert_array_almost_equal)
from scipy.interpolate import interp1d
import pytest
import mne
from mne.forward import _make_surface_mapping, make_field_map
from mne.forward._lead_dots import (_comp_sum_eeg, _comp_sums_meg,
_get_legen_table, _do_cross_dots)
from mne.forward._make_forward import _create_meg_coils
from mne.forward._field_interpolation import _setup_dots
from mne.surface import get_meg_helmet_surf, get_head_surf
from mne.datasets import testing
from mne import read_evokeds, pick_types, make_fixed_length_events, Epochs
from mne.io import read_raw_fif
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
data_path = testing.data_path(download=False)
trans_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
subjects_dir = op.join(data_path, 'subjects')
@testing.requires_testing_data
def test_field_map_ctf():
raw = read_raw_fif(raw_ctf_fname).crop(0, 1)
raw.apply_gradient_compensation(3)
events = make_fixed_length_events(raw, duration=0.5)
evoked = Epochs(raw, events).average()
evoked.pick_channels(evoked.ch_names[:50])
make_field_map(evoked, trans=trans_fname, subject='sample',
subjects_dir=subjects_dir)
def test_legendre_val():
rng = np.random.RandomState(0)
xs = np.linspace(-1., 1., 1000)
n_terms = 100
vals_np = legendre.legvander(xs, n_terms - 1)
for nc, interp in zip([100, 50], ['nearest', 'linear']):
lut, n_fact = _get_legen_table('eeg', n_coeff=nc, force_calc=True)
lut_fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, interp,
axis=0)
vals_i = lut_fun(xs)
assert_allclose(vals_np[:, 1:vals_i.shape[1] + 1], vals_i,
rtol=1e-2, atol=5e-3)
ctheta = rng.rand(20, 30) * 2.0 - 1.0
beta = rng.rand(20, 30) * 0.8
c1 = _comp_sum_eeg(beta.flatten(), ctheta.flatten(), lut_fun, n_fact)
c1.shape = beta.shape
# compare to numpy
n = np.arange(1, n_terms, dtype=float)[:, np.newaxis, np.newaxis]
coeffs = np.zeros((n_terms,) + beta.shape)
coeffs[1:] = (np.cumprod([beta] * (n_terms - 1), axis=0) *
(2.0 * n + 1.0) * (2.0 * n + 1.0) / n)
# can't use tensor=False here b/c it isn't in old numpy
c2 = np.empty((20, 30))
for ci1 in range(20):
for ci2 in range(30):
c2[ci1, ci2] = legendre.legval(ctheta[ci1, ci2],
coeffs[:, ci1, ci2])
assert_allclose(c1, c2, 1e-2, 1e-3) # close enough...
# compare fast and slow for MEG
ctheta = rng.rand(20 * 30) * 2.0 - 1.0
beta = rng.rand(20 * 30) * 0.8
lut, n_fact = _get_legen_table('meg', n_coeff=10, force_calc=True)
fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, 'nearest', axis=0)
coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)
lut, n_fact = _get_legen_table('meg', n_coeff=20, force_calc=True)
fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, 'linear', axis=0)
coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)
def test_legendre_table():
# double-check our table generation
n = 10
for ch_type in ['eeg', 'meg']:
lut1, n_fact1 = _get_legen_table(ch_type, n_coeff=25, force_calc=True)
lut1 = lut1[:, :n - 1].copy()
n_fact1 = n_fact1[:n - 1].copy()
lut2, n_fact2 = _get_legen_table(ch_type, n_coeff=n, force_calc=True)
assert_allclose(lut1, lut2)
assert_allclose(n_fact1, n_fact2)
@testing.requires_testing_data
def test_make_field_map_eeg():
evoked = read_evokeds(evoked_fname, condition='Left Auditory')
evoked.info['bads'] = ['MEG 2443', 'EEG 053'] # add some bads
surf = get_head_surf('sample', subjects_dir=subjects_dir)
# we must have trans if surface is in MRI coords
pytest.raises(ValueError, _make_surface_mapping, evoked.info, surf, 'eeg')
evoked.pick_types(meg=False, eeg=True)
fmd = make_field_map(evoked, trans_fname,
subject='sample', subjects_dir=subjects_dir)
# trans is necessary for EEG only
pytest.raises(RuntimeError, make_field_map, evoked, None,
subject='sample', subjects_dir=subjects_dir)
fmd = make_field_map(evoked, trans_fname,
subject='sample', subjects_dir=subjects_dir)
assert len(fmd) == 1
assert_array_equal(fmd[0]['data'].shape, (642, 59)) # maps data onto surf
assert len(fmd[0]['ch_names']) == 59
@testing.requires_testing_data
@pytest.mark.slowtest
def test_make_field_map_meg():
evoked = read_evokeds(evoked_fname, condition='Left Auditory')
info = evoked.info
surf = get_meg_helmet_surf(info)
# let's reduce the number of channels by a bunch to speed it up
info['bads'] = info['ch_names'][:200]
pytest.raises(ValueError, _make_surface_mapping, info, surf, 'foo')
pytest.raises(ValueError, _make_surface_mapping, info, surf, 'meg',
mode='foo')
evoked_eeg = evoked.copy().pick_types(meg=False, eeg=True)
pytest.raises(RuntimeError, _make_surface_mapping, evoked_eeg.info,
surf, 'meg')
nn = surf['nn']
del surf['nn']
pytest.raises(KeyError, _make_surface_mapping, info, surf, 'meg')
surf['nn'] = nn
cf = surf['coord_frame']
del surf['coord_frame']
pytest.raises(KeyError, _make_surface_mapping, info, surf, 'meg')
surf['coord_frame'] = cf
evoked.pick_types(meg=True, eeg=False)
evoked.info.normalize_proj()
fmd = make_field_map(evoked, None,
subject='sample', subjects_dir=subjects_dir)
assert (len(fmd) == 1)
assert_array_equal(fmd[0]['data'].shape, (304, 106))
assert len(fmd[0]['ch_names']) == 106
pytest.raises(ValueError, make_field_map, evoked, ch_type='foobar')
evoked.pick_types(meg=True, eeg=False)
evoked.info.normalize_proj()
fmd = make_field_map(evoked, trans_fname, meg_surf='head',
subject='sample', subjects_dir=subjects_dir)
assert len(fmd) == 1
assert_array_equal(fmd[0]['data'].shape, (642, 106))
assert len(fmd[0]['ch_names']) == 106
pytest.raises(ValueError, make_field_map, evoked, meg_surf='foobar',
subjects_dir=subjects_dir, trans=trans_fname)
@testing.requires_testing_data
def test_make_field_map_meeg():
evoked = read_evokeds(evoked_fname, baseline=(-0.2, 0.0))[0]
picks = pick_types(evoked.info, meg=True, eeg=True)
picks = picks[::10]
evoked.pick_channels([evoked.ch_names[p] for p in picks])
evoked.info.normalize_proj()
maps = make_field_map(evoked, trans_fname, subject='sample',
subjects_dir=subjects_dir, verbose='debug')
assert_equal(maps[0]['data'].shape, (642, 6))
assert_equal(maps[1]['data'].shape, (304, 31))
maxs = (1.2, 2.0) .3) axs), len(maps))
for map_, max_, min_ in zip(maps, maxs, mins):
assert_allclose(map_['data'].max(), max_, rtol=5e-2)
assert_allclose(map_['data'].min(), min_, rtol=5e-2)
assert_allclose(np.sqrt(np.sum(maps[0]['data'] ** 2)), 19.0903,
atol=1e-3, rtol=1e-3)
assert_allclose(np.sqrt(np.sum(maps[1]['data'] ** 2)), 19.4748,
atol=1e-3, rtol=1e-3)
def _setup_args(info):
coils = _create_meg_coils(info['chs'], 'normal', info['dev_head_t'])
int_rad, _, lut_fun, n_fact = _setup_dots('fast', info, coils, 'meg')
my_origin = np.array([0., 0., 0.04])
args_dict = dict(intrad=int_rad, volume=False, coils1=coils, r0=my_origin,
ch_type='meg', lut=lut_fun, n_fact=n_fact)
return args_dict
@testing.requires_testing_data
def test_as_meg_type_evoked():
raw = read_raw_fif(raw_fname)
events = mne.find_events(raw)
picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
ecg=True, eog=True, include=['STI 014'],
exclude='bads')
epochs = mne.Epochs(raw, events, picks=picks)
evoked = epochs.average()
with pytest.raises(ValueError, match="Invalid value for the 'ch_type'"):
evoked.as_type('meg')
with pytest.raises(ValueError, match="Invalid value for the 'ch_type'"):
evoked.copy().pick_types(meg='grad').as_type('meg')
ch_names = evoked.info['ch_names']
virt_evoked = evoked.copy().pick_channels(ch_names=ch_names[:10:1])
virt_evoked.info.normalize_proj()
virt_evoked = virt_evoked.as_type('mag')
assert (all(ch.endswith('_v') for ch in virt_evoked.info['ch_names']))
evoked_from = evoked.copy().pick_channels(ch_names=ch_names[2:10:3])
evoked_to = evoked.copy().pick_channels(ch_names=ch_names[0:10:3])
info_from, info_to = evoked_from.info, evoked_to.info
args1, args2 = _setup_args(info_from), _setup_args(info_to)
args1.update(coils2=args2['coils1'])
args2.update(coils2=args1['coils1'])
cross_dots1 = _do_cross_dots(**args1)
cross_dots2 = _do_cross_dots(**args2)
assert_array_almost_equal(cross_dots1, cross_dots2.T)
evoked = evoked.pick_channels(ch_names=ch_names[:10:]).copy()
data1 = evoked.pick_types(meg='grad').data.ravel()
data2 = evoked.as_type('grad').data.ravel()
assert (np.corrcoef(data1, data2)[0, 1] > 0.95)
virt_epochs = \
epochs.copy().load_data().pick_channels(ch_names=ch_names[:10:1])
virt_epochs.info.normalize_proj()
virt_epochs = virt_epochs.as_type('mag')
assert (all(ch.endswith('_v') for ch in virt_epochs.info['ch_names']))
assert_allclose(virt_epochs.get_data().mean(0), virt_evoked.data)
| true
| true
|
790e7f9473c29d485376aeb989c661c776a540bb
| 6,122
|
py
|
Python
|
pkgs/nbconvert-4.1.0-py27_0/lib/python2.7/site-packages/nbconvert/preprocessors/execute.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 652
|
2015-07-26T00:00:17.000Z
|
2022-02-24T18:30:04.000Z
|
pkgs/nbconvert-4.1.0-py27_0/lib/python2.7/site-packages/nbconvert/preprocessors/execute.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 8
|
2015-09-07T03:38:19.000Z
|
2021-05-23T03:18:51.000Z
|
pkgs/nbconvert-4.1.0-py27_0/lib/python2.7/site-packages/nbconvert/preprocessors/execute.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 40
|
2015-07-24T19:45:08.000Z
|
2021-11-01T14:54:56.000Z
|
"""Module containing a preprocessor that removes the outputs from code cells"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
from textwrap import dedent
try:
from queue import Empty # Py 3
except ImportError:
from Queue import Empty # Py 2
from traitlets import List, Unicode, Bool
from nbformat.v4 import output_from_msg
from .base import Preprocessor
from ..utils.exceptions import ConversionException
from traitlets import Integer
class CellExecutionError(ConversionException):
"""
Custom exception to propagate exceptions that are raised during
notebook execution to the caller. This is mostly useful when
using nbconvert as a library, since it allows to deal with
failures gracefully.
"""
def __init__(self, traceback):
self.traceback = traceback
class ExecutePreprocessor(Preprocessor):
"""
Executes all the cells in a notebook
"""
timeout = Integer(30, config=True,
help="The time to wait (in seconds) for output from executions."
)
interrupt_on_timeout = Bool(
False, config=True,
help=dedent(
"""
If execution of a cell times out, interrupt the kernel and
continue executing other cells rather than throwing an error and
stopping.
"""
)
)
allow_errors = Bool(
False, config=True,
help=dedent(
"""
If `True`, a `CellExecutionError` is raised if any of the notebook
cells raises an exception during execution. Otherwise, execution
is continued and the output from the exception is included in the
cell output.
"""
)
)
extra_arguments = List(Unicode())
def preprocess(self, nb, resources):
path = resources.get('metadata', {}).get('path', '')
if path == '':
path = None
from jupyter_client.manager import start_new_kernel
kernel_name = nb.metadata.get('kernelspec', {}).get('name', 'python')
self.log.info("Executing notebook with kernel: %s" % kernel_name)
self.km, self.kc = start_new_kernel(
kernel_name=kernel_name,
extra_arguments=self.extra_arguments,
stderr=open(os.devnull, 'w'),
cwd=path)
self.kc.allow_stdin = False
try:
nb, resources = super(ExecutePreprocessor, self).preprocess(nb, resources)
finally:
self.kc.stop_channels()
self.km.shutdown_kernel(now=True)
return nb, resources
def preprocess_cell(self, cell, resources, cell_index):
"""
Apply a transformation on each code cell. See base.py for details.
"""
if cell.cell_type != 'code':
return cell, resources
outputs = self.run_cell(cell)
cell.outputs = outputs
if not self.allow_errors:
for out in outputs:
if out.output_type == 'error':
pattern = """\
An error occurred while executing the following cell:
------------------
{cell.source}
------------------
{out.ename}: {out.evalue}
"""
msg = dedent(pattern).format(out=out, cell=cell)
raise CellExecutionError(msg)
return cell, resources
def run_cell(self, cell):
msg_id = self.kc.execute(cell.source)
self.log.debug("Executing cell:\n%s", cell.source)
# wait for finish, with timeout
while True:
try:
msg = self.kc.shell_channel.get_msg(timeout=self.timeout)
except Empty:
self.log.error("""Timeout waiting for execute reply (%is).
If your cell should take longer than this, you can increase the timeout with:
c.ExecutePreprocessor.timeout = SECONDS
in jupyter_nbconvert_config.py
""" % self.timeout)
if self.interrupt_on_timeout:
self.log.error("Interrupting kernel")
self.km.interrupt_kernel()
break
else:
try:
exception = TimeoutError
except NameError:
exception = RuntimeError
raise exception("Cell execution timed out, see log"
" for details.")
if msg['parent_header'].get('msg_id') == msg_id:
break
else:
# not our reply
continue
outs = []
while True:
try:
msg = self.kc.iopub_channel.get_msg(timeout=self.timeout)
except Empty:
self.log.warn("Timeout waiting for IOPub output")
break
if msg['parent_header'].get('msg_id') != msg_id:
# not an output from our execution
continue
msg_type = msg['msg_type']
self.log.debug("output: %s", msg_type)
content = msg['content']
# set the prompt number for the input and the output
if 'execution_count' in content:
cell['execution_count'] = content['execution_count']
if msg_type == 'status':
if content['execution_state'] == 'idle':
break
else:
continue
elif msg_type == 'execute_input':
continue
elif msg_type == 'clear_output':
outs = []
continue
elif msg_type.startswith('comm'):
continue
try:
out = output_from_msg(msg)
except ValueError:
self.log.error("unhandled iopub msg: " + msg_type)
else:
outs.append(out)
return outs
| 32.221053
| 93
| 0.542143
|
import os
from textwrap import dedent
try:
from queue import Empty
except ImportError:
from Queue import Empty
from traitlets import List, Unicode, Bool
from nbformat.v4 import output_from_msg
from .base import Preprocessor
from ..utils.exceptions import ConversionException
from traitlets import Integer
class CellExecutionError(ConversionException):
def __init__(self, traceback):
self.traceback = traceback
class ExecutePreprocessor(Preprocessor):
timeout = Integer(30, config=True,
help="The time to wait (in seconds) for output from executions."
)
interrupt_on_timeout = Bool(
False, config=True,
help=dedent(
"""
If execution of a cell times out, interrupt the kernel and
continue executing other cells rather than throwing an error and
stopping.
"""
)
)
allow_errors = Bool(
False, config=True,
help=dedent(
"""
If `True`, a `CellExecutionError` is raised if any of the notebook
cells raises an exception during execution. Otherwise, execution
is continued and the output from the exception is included in the
cell output.
"""
)
)
extra_arguments = List(Unicode())
def preprocess(self, nb, resources):
path = resources.get('metadata', {}).get('path', '')
if path == '':
path = None
from jupyter_client.manager import start_new_kernel
kernel_name = nb.metadata.get('kernelspec', {}).get('name', 'python')
self.log.info("Executing notebook with kernel: %s" % kernel_name)
self.km, self.kc = start_new_kernel(
kernel_name=kernel_name,
extra_arguments=self.extra_arguments,
stderr=open(os.devnull, 'w'),
cwd=path)
self.kc.allow_stdin = False
try:
nb, resources = super(ExecutePreprocessor, self).preprocess(nb, resources)
finally:
self.kc.stop_channels()
self.km.shutdown_kernel(now=True)
return nb, resources
def preprocess_cell(self, cell, resources, cell_index):
if cell.cell_type != 'code':
return cell, resources
outputs = self.run_cell(cell)
cell.outputs = outputs
if not self.allow_errors:
for out in outputs:
if out.output_type == 'error':
pattern = """\
An error occurred while executing the following cell:
------------------
{cell.source}
------------------
{out.ename}: {out.evalue}
"""
msg = dedent(pattern).format(out=out, cell=cell)
raise CellExecutionError(msg)
return cell, resources
def run_cell(self, cell):
msg_id = self.kc.execute(cell.source)
self.log.debug("Executing cell:\n%s", cell.source)
while True:
try:
msg = self.kc.shell_channel.get_msg(timeout=self.timeout)
except Empty:
self.log.error("""Timeout waiting for execute reply (%is).
If your cell should take longer than this, you can increase the timeout with:
c.ExecutePreprocessor.timeout = SECONDS
in jupyter_nbconvert_config.py
""" % self.timeout)
if self.interrupt_on_timeout:
self.log.error("Interrupting kernel")
self.km.interrupt_kernel()
break
else:
try:
exception = TimeoutError
except NameError:
exception = RuntimeError
raise exception("Cell execution timed out, see log"
" for details.")
if msg['parent_header'].get('msg_id') == msg_id:
break
else:
continue
outs = []
while True:
try:
msg = self.kc.iopub_channel.get_msg(timeout=self.timeout)
except Empty:
self.log.warn("Timeout waiting for IOPub output")
break
if msg['parent_header'].get('msg_id') != msg_id:
continue
msg_type = msg['msg_type']
self.log.debug("output: %s", msg_type)
content = msg['content']
if 'execution_count' in content:
cell['execution_count'] = content['execution_count']
if msg_type == 'status':
if content['execution_state'] == 'idle':
break
else:
continue
elif msg_type == 'execute_input':
continue
elif msg_type == 'clear_output':
outs = []
continue
elif msg_type.startswith('comm'):
continue
try:
out = output_from_msg(msg)
except ValueError:
self.log.error("unhandled iopub msg: " + msg_type)
else:
outs.append(out)
return outs
| true
| true
|
790e80318b8c20adfa0e3278f7d70c03a34e6a41
| 683
|
py
|
Python
|
tests/utils.py
|
YegorDB/phnl
|
879f4e728d9fcbcfe5e2f78720836483e2046160
|
[
"Apache-2.0"
] | 3
|
2019-02-20T06:51:43.000Z
|
2021-06-19T22:29:23.000Z
|
tests/utils.py
|
YegorDB/phnl
|
879f4e728d9fcbcfe5e2f78720836483e2046160
|
[
"Apache-2.0"
] | 1
|
2021-12-01T08:25:23.000Z
|
2021-12-15T17:16:57.000Z
|
tests/utils.py
|
YegorDB/phnl
|
879f4e728d9fcbcfe5e2f78720836483e2046160
|
[
"Apache-2.0"
] | 2
|
2020-12-16T01:21:09.000Z
|
2021-01-27T08:51:52.000Z
|
# Copyright 2018 Yegor Bitensky
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_parameters(func):
def wrap(self, values):
return func(self, **values)
return wrap
| 34.15
| 74
| 0.746706
|
def get_parameters(func):
def wrap(self, values):
return func(self, **values)
return wrap
| true
| true
|
790e805f07060deff8588dc86e74956319e8813e
| 1,678
|
py
|
Python
|
ml_rxe.py
|
sczyz/radioxenon_ml
|
73398f0060e88616c7652a72bdedf7f93ea17a20
|
[
"MIT"
] | null | null | null |
ml_rxe.py
|
sczyz/radioxenon_ml
|
73398f0060e88616c7652a72bdedf7f93ea17a20
|
[
"MIT"
] | null | null | null |
ml_rxe.py
|
sczyz/radioxenon_ml
|
73398f0060e88616c7652a72bdedf7f93ea17a20
|
[
"MIT"
] | 1
|
2018-04-23T20:52:43.000Z
|
2018-04-23T20:52:43.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 9 14:25:47 2018
@author: Steven
"""
import sys
import argparse
from radioxenon_ml.read_in import ml_matrix_composition as mlmc
from radioxenon_ml.solve import iterate
import numpy as np
"""
import radioxenon_ml.read_in.ml_matrix_composition
import radioxenon_ml.solve.iterate
import radioxenon_ml.solve.variance
"""
"""the master file for the radioxenon_ml package"""
parser = argparse.ArgumentParser(description='This is the master file for running the maximum likelihood package.')
parser.add_argument('-o', '--offset',
type=int,
default=84,
help='where to start the file selection from list of test files'
)
args = parser.parse_args(sys.argv[1:])
spectrum_file_location = 'radioxenon_ml/test_files/test'
offset = args.offset
err = 0.01 #acceptable error in normalized activity
scale_array = np.array([1,1,1,1]) #Should have elements equal to the number of isotopes
#scale_array = np.array([0.561,0.584,0.9,0.372,0.489,0.489,1]) #scaling factor for each simulation file
#currently taken from (Czyz, 2017)
n = np.shape(scale_array)[0] #number of simulated spectra
simulation, experiment, totcount = mlmc.form_matrix(spectrum_file_location,scale_array,n,offset); #known issue: requires UTF-8 encoding
#simulation, experiment = mlmc.scale_matrix(simulation_unscaled,experiment_unscaled,)
A,J,K,q=iterate.iterate(simulation, experiment, err)
print("\n_____________________________________\nTotal activity percents = " + str(A*100))
| 43.025641
| 138
| 0.690703
|
import sys
import argparse
from radioxenon_ml.read_in import ml_matrix_composition as mlmc
from radioxenon_ml.solve import iterate
import numpy as np
parser = argparse.ArgumentParser(description='This is the master file for running the maximum likelihood package.')
parser.add_argument('-o', '--offset',
type=int,
default=84,
help='where to start the file selection from list of test files'
)
args = parser.parse_args(sys.argv[1:])
spectrum_file_location = 'radioxenon_ml/test_files/test'
offset = args.offset
err = 0.01
scale_array = np.array([1,1,1,1])
n = np.shape(scale_array)[0]
simulation, experiment, totcount = mlmc.form_matrix(spectrum_file_location,scale_array,n,offset);
A,J,K,q=iterate.iterate(simulation, experiment, err)
print("\n_____________________________________\nTotal activity percents = " + str(A*100))
| true
| true
|
790e8251ca3599b1dd947fa9e452741fd177e540
| 2,463
|
py
|
Python
|
colour/recovery/tests/test__init__.py
|
JGoldstone/colour
|
6829b363d5f0682bff0f4826995e7ceac189ff28
|
[
"BSD-3-Clause"
] | null | null | null |
colour/recovery/tests/test__init__.py
|
JGoldstone/colour
|
6829b363d5f0682bff0f4826995e7ceac189ff28
|
[
"BSD-3-Clause"
] | null | null | null |
colour/recovery/tests/test__init__.py
|
JGoldstone/colour
|
6829b363d5f0682bff0f4826995e7ceac189ff28
|
[
"BSD-3-Clause"
] | null | null | null |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour.recovery` module.
"""
import numpy as np
import unittest
from colour.colorimetry import (
MSDS_CMFS,
SDS_ILLUMINANTS,
SpectralShape,
reshape_msds,
reshape_sd,
sd_to_XYZ_integration,
)
from colour.recovery import XYZ_to_sd
from colour.utilities import domain_range_scale
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'TestXYZ_to_sd',
]
class TestXYZ_to_sd(unittest.TestCase):
"""
Defines :func:`colour.recovery.XYZ_to_sd` definition unit tests
methods.
"""
def setUp(self):
"""
Initialises common tests attributes.
"""
# pylint: disable=E1102
self._cmfs = reshape_msds(
MSDS_CMFS['CIE 1931 2 Degree Standard Observer'],
SpectralShape(360, 780, 10))
self._sd_D65 = reshape_sd(SDS_ILLUMINANTS['D65'], self._cmfs.shape)
def test_domain_range_scale_XYZ_to_sd(self):
"""
Tests :func:`colour.recovery.XYZ_to_sd` definition domain
and range scale support.
"""
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
m = ('Jakob 2019', 'Mallett 2019', 'Meng 2015', 'Otsu 2018',
'Smits 1999')
v = [
sd_to_XYZ_integration(
XYZ_to_sd(
XYZ, method, cmfs=self._cmfs, illuminant=self._sd_D65),
self._cmfs, self._sd_D65) for method in m
]
d_r = (('reference', 1, 1), (1, 1, 0.01), (100, 100, 1))
for method, value in zip(m, v):
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
sd_to_XYZ_integration(
XYZ_to_sd(
XYZ * factor_a,
method,
cmfs=self._cmfs,
illuminant=self._sd_D65), self._cmfs,
self._sd_D65),
value * factor_b,
decimal=7)
if __name__ == '__main__':
unittest.main()
| 28.976471
| 78
| 0.568006
|
import numpy as np
import unittest
from colour.colorimetry import (
MSDS_CMFS,
SDS_ILLUMINANTS,
SpectralShape,
reshape_msds,
reshape_sd,
sd_to_XYZ_integration,
)
from colour.recovery import XYZ_to_sd
from colour.utilities import domain_range_scale
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'TestXYZ_to_sd',
]
class TestXYZ_to_sd(unittest.TestCase):
def setUp(self):
self._cmfs = reshape_msds(
MSDS_CMFS['CIE 1931 2 Degree Standard Observer'],
SpectralShape(360, 780, 10))
self._sd_D65 = reshape_sd(SDS_ILLUMINANTS['D65'], self._cmfs.shape)
def test_domain_range_scale_XYZ_to_sd(self):
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
m = ('Jakob 2019', 'Mallett 2019', 'Meng 2015', 'Otsu 2018',
'Smits 1999')
v = [
sd_to_XYZ_integration(
XYZ_to_sd(
XYZ, method, cmfs=self._cmfs, illuminant=self._sd_D65),
self._cmfs, self._sd_D65) for method in m
]
d_r = (('reference', 1, 1), (1, 1, 0.01), (100, 100, 1))
for method, value in zip(m, v):
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
sd_to_XYZ_integration(
XYZ_to_sd(
XYZ * factor_a,
method,
cmfs=self._cmfs,
illuminant=self._sd_D65), self._cmfs,
self._sd_D65),
value * factor_b,
decimal=7)
if __name__ == '__main__':
unittest.main()
| true
| true
|
790e82c2ea74403dab86a5d801648ecbbfe778d3
| 9,663
|
py
|
Python
|
examples/VideoSpeedTest.py
|
3DAlgoLab/pyqtgraph
|
53bedea724b05b5fda158946907ab881935ca865
|
[
"MIT"
] | 69
|
2020-01-06T13:31:06.000Z
|
2022-03-29T11:23:14.000Z
|
examples/VideoSpeedTest.py
|
tom00ti/pyqtgraph
|
6b4385ce0d0f9078aa22e2e27aa5307271e95ae1
|
[
"MIT"
] | 67
|
2019-11-30T14:45:05.000Z
|
2022-03-14T20:26:06.000Z
|
examples/VideoSpeedTest.py
|
tom00ti/pyqtgraph
|
6b4385ce0d0f9078aa22e2e27aa5307271e95ae1
|
[
"MIT"
] | 13
|
2020-01-06T13:44:40.000Z
|
2022-03-29T11:23:17.000Z
|
# -*- coding: utf-8 -*-
"""
Tests the speed of image updates for an ImageItem and RawImageWidget.
The speed will generally depend on the type of data being shown, whether
it is being scaled and/or converted by lookup table, and whether OpenGL
is used by the view widget
"""
## Add path to library (just for examples; you do not need this)
import initExample
import argparse
import sys
import numpy as np
import pyqtgraph as pg
import pyqtgraph.ptime as ptime
from pyqtgraph.Qt import QtGui, QtCore, QT_LIB
pg.setConfigOption('imageAxisOrder', 'row-major')
import importlib
ui_template = importlib.import_module(f'VideoTemplate_{QT_LIB.lower()}')
try:
import cupy as cp
pg.setConfigOption("useCupy", True)
_has_cupy = True
except ImportError:
cp = None
_has_cupy = False
try:
import numba
_has_numba = True
except ImportError:
numba = None
_has_numba = False
try:
from pyqtgraph.widgets.RawImageWidget import RawImageGLWidget
except ImportError:
RawImageGLWidget = None
parser = argparse.ArgumentParser(description="Benchmark for testing video performance")
parser.add_argument('--cuda', default=False, action='store_true', help="Use CUDA to process on the GPU", dest="cuda")
parser.add_argument('--dtype', default='uint8', choices=['uint8', 'uint16', 'float'], help="Image dtype (uint8, uint16, or float)")
parser.add_argument('--frames', default=3, type=int, help="Number of image frames to generate (default=3)")
parser.add_argument('--image-mode', default='mono', choices=['mono', 'rgb'], help="Image data mode (mono or rgb)", dest='image_mode')
parser.add_argument('--levels', default=None, type=lambda s: tuple([float(x) for x in s.split(',')]), help="min,max levels to scale monochromatic image dynamic range, or rmin,rmax,gmin,gmax,bmin,bmax to scale rgb")
parser.add_argument('--lut', default=False, action='store_true', help="Use color lookup table")
parser.add_argument('--lut-alpha', default=False, action='store_true', help="Use alpha color lookup table", dest='lut_alpha')
parser.add_argument('--size', default='512x512', type=lambda s: tuple([int(x) for x in s.split('x')]), help="WxH image dimensions default='512x512'")
args = parser.parse_args(sys.argv[1:])
if RawImageGLWidget is not None:
# don't limit frame rate to vsync
sfmt = QtGui.QSurfaceFormat()
sfmt.setSwapInterval(0)
QtGui.QSurfaceFormat.setDefaultFormat(sfmt)
app = pg.mkQApp("Video Speed Test Example")
win = QtGui.QMainWindow()
win.setWindowTitle('pyqtgraph example: VideoSpeedTest')
ui = ui_template.Ui_MainWindow()
ui.setupUi(win)
win.show()
if RawImageGLWidget is None:
ui.rawGLRadio.setEnabled(False)
ui.rawGLRadio.setText(ui.rawGLRadio.text() + " (OpenGL not available)")
else:
ui.rawGLImg = RawImageGLWidget()
ui.stack.addWidget(ui.rawGLImg)
# read in CLI args
ui.cudaCheck.setChecked(args.cuda and _has_cupy)
ui.cudaCheck.setEnabled(_has_cupy)
ui.numbaCheck.setChecked(_has_numba and pg.getConfigOption("useNumba"))
ui.numbaCheck.setEnabled(_has_numba)
ui.framesSpin.setValue(args.frames)
ui.widthSpin.setValue(args.size[0])
ui.heightSpin.setValue(args.size[1])
ui.dtypeCombo.setCurrentText(args.dtype)
ui.rgbCheck.setChecked(args.image_mode=='rgb')
ui.maxSpin1.setOpts(value=255, step=1)
ui.minSpin1.setOpts(value=0, step=1)
levelSpins = [ui.minSpin1, ui.maxSpin1, ui.minSpin2, ui.maxSpin2, ui.minSpin3, ui.maxSpin3]
if args.cuda and _has_cupy:
xp = cp
else:
xp = np
if args.levels is None:
ui.scaleCheck.setChecked(False)
ui.rgbLevelsCheck.setChecked(False)
else:
ui.scaleCheck.setChecked(True)
if len(args.levels) == 2:
ui.rgbLevelsCheck.setChecked(False)
ui.minSpin1.setValue(args.levels[0])
ui.maxSpin1.setValue(args.levels[1])
elif len(args.levels) == 6:
ui.rgbLevelsCheck.setChecked(True)
for spin,val in zip(levelSpins, args.levels):
spin.setValue(val)
else:
raise ValueError("levels argument must be 2 or 6 comma-separated values (got %r)" % (args.levels,))
ui.lutCheck.setChecked(args.lut)
ui.alphaCheck.setChecked(args.lut_alpha)
#ui.graphicsView.useOpenGL() ## buggy, but you can try it if you need extra speed.
vb = pg.ViewBox()
ui.graphicsView.setCentralItem(vb)
vb.setAspectLocked()
img = pg.ImageItem()
vb.addItem(img)
LUT = None
def updateLUT():
global LUT, ui
dtype = ui.dtypeCombo.currentText()
if dtype == 'uint8':
n = 256
else:
n = 4096
LUT = ui.gradient.getLookupTable(n, alpha=ui.alphaCheck.isChecked())
if _has_cupy and xp == cp:
LUT = cp.asarray(LUT)
ui.gradient.sigGradientChanged.connect(updateLUT)
updateLUT()
ui.alphaCheck.toggled.connect(updateLUT)
def updateScale():
global ui, levelSpins
if ui.rgbLevelsCheck.isChecked():
for s in levelSpins[2:]:
s.setEnabled(True)
else:
for s in levelSpins[2:]:
s.setEnabled(False)
updateScale()
ui.rgbLevelsCheck.toggled.connect(updateScale)
cache = {}
def mkData():
with pg.BusyCursor():
global data, cache, ui, xp
frames = ui.framesSpin.value()
width = ui.widthSpin.value()
height = ui.heightSpin.value()
cacheKey = (ui.dtypeCombo.currentText(), ui.rgbCheck.isChecked(), frames, width, height)
if cacheKey not in cache:
if cacheKey[0] == 'uint8':
dt = xp.uint8
loc = 128
scale = 64
mx = 255
elif cacheKey[0] == 'uint16':
dt = xp.uint16
loc = 4096
scale = 1024
mx = 2**16 - 1
elif cacheKey[0] == 'float':
dt = xp.float32
loc = 1.0
scale = 0.1
mx = 1.0
else:
raise ValueError(f"unable to handle dtype: {cacheKey[0]}")
chan_shape = (height, width)
if ui.rgbCheck.isChecked():
frame_shape = chan_shape + (3,)
else:
frame_shape = chan_shape
data = xp.empty((frames,) + frame_shape, dtype=dt)
view = data.reshape((-1,) + chan_shape)
for idx in range(view.shape[0]):
subdata = xp.random.normal(loc=loc, scale=scale, size=chan_shape)
# note: gaussian filtering has been removed as it slows down array
# creation greatly.
if cacheKey[0] != 'float':
xp.clip(subdata, 0, mx, out=subdata)
view[idx] = subdata
data[:, 10:50, 10] = mx
data[:, 48, 9:12] = mx
data[:, 47, 8:13] = mx
cache = {cacheKey: data} # clear to save memory (but keep one to prevent unnecessary regeneration)
data = cache[cacheKey]
updateLUT()
updateSize()
def updateSize():
global ui, vb
frames = ui.framesSpin.value()
width = ui.widthSpin.value()
height = ui.heightSpin.value()
dtype = xp.dtype(str(ui.dtypeCombo.currentText()))
rgb = 3 if ui.rgbCheck.isChecked() else 1
ui.sizeLabel.setText('%d MB' % (frames * width * height * rgb * dtype.itemsize / 1e6))
vb.setRange(QtCore.QRectF(0, 0, width, height))
def noticeCudaCheck():
global xp, cache
cache = {}
if ui.cudaCheck.isChecked():
if _has_cupy:
xp = cp
else:
xp = np
ui.cudaCheck.setChecked(False)
else:
xp = np
mkData()
def noticeNumbaCheck():
pg.setConfigOption('useNumba', _has_numba and ui.numbaCheck.isChecked())
mkData()
ui.dtypeCombo.currentIndexChanged.connect(mkData)
ui.rgbCheck.toggled.connect(mkData)
ui.widthSpin.editingFinished.connect(mkData)
ui.heightSpin.editingFinished.connect(mkData)
ui.framesSpin.editingFinished.connect(mkData)
ui.widthSpin.valueChanged.connect(updateSize)
ui.heightSpin.valueChanged.connect(updateSize)
ui.framesSpin.valueChanged.connect(updateSize)
ui.cudaCheck.toggled.connect(noticeCudaCheck)
ui.numbaCheck.toggled.connect(noticeNumbaCheck)
ptr = 0
lastTime = ptime.time()
fps = None
def update():
global ui, ptr, lastTime, fps, LUT, img
if ui.lutCheck.isChecked():
useLut = LUT
else:
useLut = None
downsample = ui.downsampleCheck.isChecked()
if ui.scaleCheck.isChecked():
if ui.rgbLevelsCheck.isChecked():
useScale = [
[ui.minSpin1.value(), ui.maxSpin1.value()],
[ui.minSpin2.value(), ui.maxSpin2.value()],
[ui.minSpin3.value(), ui.maxSpin3.value()]]
else:
useScale = [ui.minSpin1.value(), ui.maxSpin1.value()]
else:
useScale = None
if ui.rawRadio.isChecked():
ui.rawImg.setImage(data[ptr%data.shape[0]], lut=useLut, levels=useScale)
ui.stack.setCurrentIndex(1)
elif ui.rawGLRadio.isChecked():
ui.rawGLImg.setImage(data[ptr%data.shape[0]], lut=useLut, levels=useScale)
ui.stack.setCurrentIndex(2)
else:
img.setImage(data[ptr%data.shape[0]], autoLevels=False, levels=useScale, lut=useLut, autoDownsample=downsample)
ui.stack.setCurrentIndex(0)
#img.setImage(data[ptr%data.shape[0]], autoRange=False)
ptr += 1
now = ptime.time()
dt = now - lastTime
lastTime = now
if fps is None:
fps = 1.0/dt
else:
s = np.clip(dt*3., 0, 1)
fps = fps * (1-s) + (1.0/dt) * s
ui.fpsLabel.setText('%0.2f fps' % fps)
app.processEvents() ## force complete redraw for every plot
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(0)
if __name__ == '__main__':
pg.exec()
| 32.21
| 214
| 0.651868
|
s np
import pyqtgraph as pg
import pyqtgraph.ptime as ptime
from pyqtgraph.Qt import QtGui, QtCore, QT_LIB
pg.setConfigOption('imageAxisOrder', 'row-major')
import importlib
ui_template = importlib.import_module(f'VideoTemplate_{QT_LIB.lower()}')
try:
import cupy as cp
pg.setConfigOption("useCupy", True)
_has_cupy = True
except ImportError:
cp = None
_has_cupy = False
try:
import numba
_has_numba = True
except ImportError:
numba = None
_has_numba = False
try:
from pyqtgraph.widgets.RawImageWidget import RawImageGLWidget
except ImportError:
RawImageGLWidget = None
parser = argparse.ArgumentParser(description="Benchmark for testing video performance")
parser.add_argument('--cuda', default=False, action='store_true', help="Use CUDA to process on the GPU", dest="cuda")
parser.add_argument('--dtype', default='uint8', choices=['uint8', 'uint16', 'float'], help="Image dtype (uint8, uint16, or float)")
parser.add_argument('--frames', default=3, type=int, help="Number of image frames to generate (default=3)")
parser.add_argument('--image-mode', default='mono', choices=['mono', 'rgb'], help="Image data mode (mono or rgb)", dest='image_mode')
parser.add_argument('--levels', default=None, type=lambda s: tuple([float(x) for x in s.split(',')]), help="min,max levels to scale monochromatic image dynamic range, or rmin,rmax,gmin,gmax,bmin,bmax to scale rgb")
parser.add_argument('--lut', default=False, action='store_true', help="Use color lookup table")
parser.add_argument('--lut-alpha', default=False, action='store_true', help="Use alpha color lookup table", dest='lut_alpha')
parser.add_argument('--size', default='512x512', type=lambda s: tuple([int(x) for x in s.split('x')]), help="WxH image dimensions default='512x512'")
args = parser.parse_args(sys.argv[1:])
if RawImageGLWidget is not None:
sfmt = QtGui.QSurfaceFormat()
sfmt.setSwapInterval(0)
QtGui.QSurfaceFormat.setDefaultFormat(sfmt)
app = pg.mkQApp("Video Speed Test Example")
win = QtGui.QMainWindow()
win.setWindowTitle('pyqtgraph example: VideoSpeedTest')
ui = ui_template.Ui_MainWindow()
ui.setupUi(win)
win.show()
if RawImageGLWidget is None:
ui.rawGLRadio.setEnabled(False)
ui.rawGLRadio.setText(ui.rawGLRadio.text() + " (OpenGL not available)")
else:
ui.rawGLImg = RawImageGLWidget()
ui.stack.addWidget(ui.rawGLImg)
# read in CLI args
ui.cudaCheck.setChecked(args.cuda and _has_cupy)
ui.cudaCheck.setEnabled(_has_cupy)
ui.numbaCheck.setChecked(_has_numba and pg.getConfigOption("useNumba"))
ui.numbaCheck.setEnabled(_has_numba)
ui.framesSpin.setValue(args.frames)
ui.widthSpin.setValue(args.size[0])
ui.heightSpin.setValue(args.size[1])
ui.dtypeCombo.setCurrentText(args.dtype)
ui.rgbCheck.setChecked(args.image_mode=='rgb')
ui.maxSpin1.setOpts(value=255, step=1)
ui.minSpin1.setOpts(value=0, step=1)
levelSpins = [ui.minSpin1, ui.maxSpin1, ui.minSpin2, ui.maxSpin2, ui.minSpin3, ui.maxSpin3]
if args.cuda and _has_cupy:
xp = cp
else:
xp = np
if args.levels is None:
ui.scaleCheck.setChecked(False)
ui.rgbLevelsCheck.setChecked(False)
else:
ui.scaleCheck.setChecked(True)
if len(args.levels) == 2:
ui.rgbLevelsCheck.setChecked(False)
ui.minSpin1.setValue(args.levels[0])
ui.maxSpin1.setValue(args.levels[1])
elif len(args.levels) == 6:
ui.rgbLevelsCheck.setChecked(True)
for spin,val in zip(levelSpins, args.levels):
spin.setValue(val)
else:
raise ValueError("levels argument must be 2 or 6 comma-separated values (got %r)" % (args.levels,))
ui.lutCheck.setChecked(args.lut)
ui.alphaCheck.setChecked(args.lut_alpha)
#ui.graphicsView.useOpenGL() ## buggy, but you can try it if you need extra speed.
vb = pg.ViewBox()
ui.graphicsView.setCentralItem(vb)
vb.setAspectLocked()
img = pg.ImageItem()
vb.addItem(img)
LUT = None
def updateLUT():
global LUT, ui
dtype = ui.dtypeCombo.currentText()
if dtype == 'uint8':
n = 256
else:
n = 4096
LUT = ui.gradient.getLookupTable(n, alpha=ui.alphaCheck.isChecked())
if _has_cupy and xp == cp:
LUT = cp.asarray(LUT)
ui.gradient.sigGradientChanged.connect(updateLUT)
updateLUT()
ui.alphaCheck.toggled.connect(updateLUT)
def updateScale():
global ui, levelSpins
if ui.rgbLevelsCheck.isChecked():
for s in levelSpins[2:]:
s.setEnabled(True)
else:
for s in levelSpins[2:]:
s.setEnabled(False)
updateScale()
ui.rgbLevelsCheck.toggled.connect(updateScale)
cache = {}
def mkData():
with pg.BusyCursor():
global data, cache, ui, xp
frames = ui.framesSpin.value()
width = ui.widthSpin.value()
height = ui.heightSpin.value()
cacheKey = (ui.dtypeCombo.currentText(), ui.rgbCheck.isChecked(), frames, width, height)
if cacheKey not in cache:
if cacheKey[0] == 'uint8':
dt = xp.uint8
loc = 128
scale = 64
mx = 255
elif cacheKey[0] == 'uint16':
dt = xp.uint16
loc = 4096
scale = 1024
mx = 2**16 - 1
elif cacheKey[0] == 'float':
dt = xp.float32
loc = 1.0
scale = 0.1
mx = 1.0
else:
raise ValueError(f"unable to handle dtype: {cacheKey[0]}")
chan_shape = (height, width)
if ui.rgbCheck.isChecked():
frame_shape = chan_shape + (3,)
else:
frame_shape = chan_shape
data = xp.empty((frames,) + frame_shape, dtype=dt)
view = data.reshape((-1,) + chan_shape)
for idx in range(view.shape[0]):
subdata = xp.random.normal(loc=loc, scale=scale, size=chan_shape)
# note: gaussian filtering has been removed as it slows down array
# creation greatly.
if cacheKey[0] != 'float':
xp.clip(subdata, 0, mx, out=subdata)
view[idx] = subdata
data[:, 10:50, 10] = mx
data[:, 48, 9:12] = mx
data[:, 47, 8:13] = mx
cache = {cacheKey: data} # clear to save memory (but keep one to prevent unnecessary regeneration)
data = cache[cacheKey]
updateLUT()
updateSize()
def updateSize():
global ui, vb
frames = ui.framesSpin.value()
width = ui.widthSpin.value()
height = ui.heightSpin.value()
dtype = xp.dtype(str(ui.dtypeCombo.currentText()))
rgb = 3 if ui.rgbCheck.isChecked() else 1
ui.sizeLabel.setText('%d MB' % (frames * width * height * rgb * dtype.itemsize / 1e6))
vb.setRange(QtCore.QRectF(0, 0, width, height))
def noticeCudaCheck():
global xp, cache
cache = {}
if ui.cudaCheck.isChecked():
if _has_cupy:
xp = cp
else:
xp = np
ui.cudaCheck.setChecked(False)
else:
xp = np
mkData()
def noticeNumbaCheck():
pg.setConfigOption('useNumba', _has_numba and ui.numbaCheck.isChecked())
mkData()
ui.dtypeCombo.currentIndexChanged.connect(mkData)
ui.rgbCheck.toggled.connect(mkData)
ui.widthSpin.editingFinished.connect(mkData)
ui.heightSpin.editingFinished.connect(mkData)
ui.framesSpin.editingFinished.connect(mkData)
ui.widthSpin.valueChanged.connect(updateSize)
ui.heightSpin.valueChanged.connect(updateSize)
ui.framesSpin.valueChanged.connect(updateSize)
ui.cudaCheck.toggled.connect(noticeCudaCheck)
ui.numbaCheck.toggled.connect(noticeNumbaCheck)
ptr = 0
lastTime = ptime.time()
fps = None
def update():
global ui, ptr, lastTime, fps, LUT, img
if ui.lutCheck.isChecked():
useLut = LUT
else:
useLut = None
downsample = ui.downsampleCheck.isChecked()
if ui.scaleCheck.isChecked():
if ui.rgbLevelsCheck.isChecked():
useScale = [
[ui.minSpin1.value(), ui.maxSpin1.value()],
[ui.minSpin2.value(), ui.maxSpin2.value()],
[ui.minSpin3.value(), ui.maxSpin3.value()]]
else:
useScale = [ui.minSpin1.value(), ui.maxSpin1.value()]
else:
useScale = None
if ui.rawRadio.isChecked():
ui.rawImg.setImage(data[ptr%data.shape[0]], lut=useLut, levels=useScale)
ui.stack.setCurrentIndex(1)
elif ui.rawGLRadio.isChecked():
ui.rawGLImg.setImage(data[ptr%data.shape[0]], lut=useLut, levels=useScale)
ui.stack.setCurrentIndex(2)
else:
img.setImage(data[ptr%data.shape[0]], autoLevels=False, levels=useScale, lut=useLut, autoDownsample=downsample)
ui.stack.setCurrentIndex(0)
#img.setImage(data[ptr%data.shape[0]], autoRange=False)
ptr += 1
now = ptime.time()
dt = now - lastTime
lastTime = now
if fps is None:
fps = 1.0/dt
else:
s = np.clip(dt*3., 0, 1)
fps = fps * (1-s) + (1.0/dt) * s
ui.fpsLabel.setText('%0.2f fps' % fps)
app.processEvents() ## force complete redraw for every plot
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(0)
if __name__ == '__main__':
pg.exec()
| true
| true
|
790e84ee7c77e55183385013de3fec12c64baa89
| 1,654
|
py
|
Python
|
desktop/core/ext-py/avro-1.7.6/setup.py
|
vinaymundada27/Hue
|
7bffb33bbe7cfa34d340241c4ba3b19476211b2a
|
[
"Apache-2.0"
] | 11
|
2019-03-20T07:38:35.000Z
|
2021-06-18T09:42:46.000Z
|
desktop/core/ext-py/avro-1.7.6/setup.py
|
vinaymundada27/Hue
|
7bffb33bbe7cfa34d340241c4ba3b19476211b2a
|
[
"Apache-2.0"
] | null | null | null |
desktop/core/ext-py/avro-1.7.6/setup.py
|
vinaymundada27/Hue
|
7bffb33bbe7cfa34d340241c4ba3b19476211b2a
|
[
"Apache-2.0"
] | 5
|
2019-06-29T03:13:02.000Z
|
2020-04-23T04:47:11.000Z
|
#! /usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from sys import version_info
install_requires = []
if version_info[:2] <= (2, 5):
install_requires.append('simplejson >= 2.0.9')
setup(
name = 'avro',
version = '1.7.6',
packages = ['avro',],
package_dir = {'avro': 'src/avro'},
scripts = ["./scripts/avro"],
# Project uses simplejson, so ensure that it gets installed or upgraded
# on the target machine
install_requires = install_requires,
# metadata for upload to PyPI
author = 'Apache Avro',
author_email = 'avro-dev@hadoop.apache.org',
description = 'Avro is a serialization and RPC framework.',
license = 'Apache License 2.0',
keywords = 'avro serialization rpc',
url = 'http://hadoop.apache.org/avro',
extras_require = {
'snappy': ['python-snappy'],
},
)
| 33.08
| 74
| 0.724305
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from sys import version_info
install_requires = []
if version_info[:2] <= (2, 5):
install_requires.append('simplejson >= 2.0.9')
setup(
name = 'avro',
version = '1.7.6',
packages = ['avro',],
package_dir = {'avro': 'src/avro'},
scripts = ["./scripts/avro"],
install_requires = install_requires,
author = 'Apache Avro',
author_email = 'avro-dev@hadoop.apache.org',
description = 'Avro is a serialization and RPC framework.',
license = 'Apache License 2.0',
keywords = 'avro serialization rpc',
url = 'http://hadoop.apache.org/avro',
extras_require = {
'snappy': ['python-snappy'],
},
)
| true
| true
|
790e85948a1fb20f8a06349df155398d71916fcb
| 2,958
|
py
|
Python
|
experimental/megno.py
|
LuisCerdenoMota/SHERLOCK
|
5fb52795d3ab44e27bc7dbc6f2c2e6c214995ba1
|
[
"MIT"
] | null | null | null |
experimental/megno.py
|
LuisCerdenoMota/SHERLOCK
|
5fb52795d3ab44e27bc7dbc6f2c2e6c214995ba1
|
[
"MIT"
] | null | null | null |
experimental/megno.py
|
LuisCerdenoMota/SHERLOCK
|
5fb52795d3ab44e27bc7dbc6f2c2e6c214995ba1
|
[
"MIT"
] | null | null | null |
import rebound
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import display, clear_output
from sherlockpipe.nbodies.PlanetInput import PlanetInput
class StabilityCalculator:
def __init__(self, star_mass):
self.star_mass = star_mass
def mass_from_radius(self, radius):
return radius ** (1 / 0.55) if radius <= 12.1 else radius ** (1 / 0.01)
def run(self, planet_params):
sim = rebound.Simulation()
sim.integrator = "whfast"
sim.ri_whfast.safe_mode = 0
sim.dt = 1e-2
sim.add(m=1.0)
for planet_param in planet_params:
sim.add(m=self.mass_from_radius(planet_param.r) * 0.000003003 / self.star_mass, P=planet_param.P, e=planet_param.e, omega=planet_param.omega)
#sim.status()
sim.move_to_com()
sim.init_megno()
sim.exit_max_distance = 20.
try:
sim.integrate(5e2 * 2. * np.pi, exact_finish_time=0) # integrate for 500 years, integrating to the nearest
# for i in range(500):
# sim.integrate(sim.t + i * 2 * np.pi)
# fig, ax = rebound.OrbitPlot(sim, color=True, unitlabel="[AU]", xlim=[-0.1, 0.1], ylim=[-0.1, 0.1])
# plt.show()
# plt.close(fig)
#clear_output(wait=True)
#timestep for each output to keep the timestep constant and preserve WHFast's symplectic nature
megno = sim.calculate_megno()
megno = megno if megno < 10 else 10
return megno
except rebound.Escape:
return 10. # At least one particle got ejected, returning large MEGNO
planet_params = []
parameters = []
# grid = 20
# par_e = np.linspace(0.0, 0.7, grid)
# par_e1 = np.linspace(0.0, 0.7, grid)
# for i in par_e:
# for j in par_e1:
# parameters.append((PlanetInput(1.74542, 0.01606, 1.12207, 0), PlanetInput(0.03088, 2.97, j)))
from rebound.interruptible_pool import InterruptiblePool
parameters.append(PlanetInput(5.43440, 1.68792, 0))
parameters.append(PlanetInput(1.74542, 1.12207, 0))
parameters.append(PlanetInput(4.02382, 1.34990, 0))
parameters.append(PlanetInput(2.8611, 1.17643, 0))
parameters.append(PlanetInput(1.58834, 1.07459, 0))
result = StabilityCalculator(0.211299).run(parameters)
print("MEGNO: " + str(result))
# pool = InterruptiblePool()
# results = pool.map(StabilityCalculator(0.211299).run, parameters)
# results2d = np.array(results).reshape(grid, grid)
# fig = plt.figure(figsize=(7, 5))
# ax = plt.subplot(111)
# extent = [min(par_e), max(par_e), min(par_e1), max(par_e1)]
# ax.set_xlim(extent[0], extent[1])
# ax.set_xlabel("ecc1 $e$")
# ax.set_ylim(extent[2], extent[3])
# ax.set_ylabel("ecc2 $e1$")
# im = ax.imshow(results2d, interpolation="none", vmin=1.9, vmax=10, cmap="RdYlGn_r", origin="lower", aspect='auto', extent=extent)
# cb = plt.colorbar(im, ax=ax)
# cb.set_label("MEGNO $\\langle Y \\rangle$")
# plt.show()
| 39.44
| 153
| 0.649763
|
import rebound
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import display, clear_output
from sherlockpipe.nbodies.PlanetInput import PlanetInput
class StabilityCalculator:
def __init__(self, star_mass):
self.star_mass = star_mass
def mass_from_radius(self, radius):
return radius ** (1 / 0.55) if radius <= 12.1 else radius ** (1 / 0.01)
def run(self, planet_params):
sim = rebound.Simulation()
sim.integrator = "whfast"
sim.ri_whfast.safe_mode = 0
sim.dt = 1e-2
sim.add(m=1.0)
for planet_param in planet_params:
sim.add(m=self.mass_from_radius(planet_param.r) * 0.000003003 / self.star_mass, P=planet_param.P, e=planet_param.e, omega=planet_param.omega)
sim.move_to_com()
sim.init_megno()
sim.exit_max_distance = 20.
try:
sim.integrate(5e2 * 2. * np.pi, exact_finish_time=0)
megno = sim.calculate_megno()
megno = megno if megno < 10 else 10
return megno
except rebound.Escape:
return 10. # At least one particle got ejected, returning large MEGNO
planet_params = []
parameters = []
# grid = 20
# par_e = np.linspace(0.0, 0.7, grid)
# par_e1 = np.linspace(0.0, 0.7, grid)
# for i in par_e:
# for j in par_e1:
# parameters.append((PlanetInput(1.74542, 0.01606, 1.12207, 0), PlanetInput(0.03088, 2.97, j)))
from rebound.interruptible_pool import InterruptiblePool
parameters.append(PlanetInput(5.43440, 1.68792, 0))
parameters.append(PlanetInput(1.74542, 1.12207, 0))
parameters.append(PlanetInput(4.02382, 1.34990, 0))
parameters.append(PlanetInput(2.8611, 1.17643, 0))
parameters.append(PlanetInput(1.58834, 1.07459, 0))
result = StabilityCalculator(0.211299).run(parameters)
print("MEGNO: " + str(result))
# pool = InterruptiblePool()
# results = pool.map(StabilityCalculator(0.211299).run, parameters)
# results2d = np.array(results).reshape(grid, grid)
# fig = plt.figure(figsize=(7, 5))
# ax = plt.subplot(111)
# extent = [min(par_e), max(par_e), min(par_e1), max(par_e1)]
# ax.set_xlim(extent[0], extent[1])
# ax.set_xlabel("ecc1 $e$")
# ax.set_ylim(extent[2], extent[3])
# ax.set_ylabel("ecc2 $e1$")
# im = ax.imshow(results2d, interpolation="none", vmin=1.9, vmax=10, cmap="RdYlGn_r", origin="lower", aspect='auto', extent=extent)
# cb = plt.colorbar(im, ax=ax)
# cb.set_label("MEGNO $\\langle Y \\rangle$")
# plt.show()
| true
| true
|
790e860c5735e301a8b6ec577a6fc063d016ff82
| 503
|
py
|
Python
|
tests/openbb_terminal/cryptocurrency/defi/test_terraengineer_model.py
|
tehcoderer/GamestonkTerminal
|
54a1b6f545a0016c576e9e00eef5c003d229dacf
|
[
"MIT"
] | 255
|
2022-03-29T16:43:51.000Z
|
2022-03-31T23:57:08.000Z
|
tests/openbb_terminal/cryptocurrency/defi/test_terraengineer_model.py
|
tehcoderer/GamestonkTerminal
|
54a1b6f545a0016c576e9e00eef5c003d229dacf
|
[
"MIT"
] | 14
|
2022-03-29T14:20:33.000Z
|
2022-03-31T23:39:20.000Z
|
tests/openbb_terminal/cryptocurrency/defi/test_terraengineer_model.py
|
tehcoderer/GamestonkTerminal
|
54a1b6f545a0016c576e9e00eef5c003d229dacf
|
[
"MIT"
] | 24
|
2022-03-29T15:28:56.000Z
|
2022-03-31T23:54:15.000Z
|
# IMPORTATION STANDARD
# IMPORTATION THIRDPARTY
import pytest
# IMPORTATION INTERNAL
from openbb_terminal.cryptocurrency.defi import terraengineer_model
@pytest.mark.vcr
@pytest.mark.parametrize(
"asset,address",
[("ust", "terra1tmnqgvg567ypvsvk6rwsga3srp7e3lg6u0elp8")],
)
def test_get_history_asset_from_terra_address(asset, address, recorder):
df = terraengineer_model.get_history_asset_from_terra_address(
asset=asset,
address=address,
)
recorder.capture(df)
| 23.952381
| 72
| 0.771372
|
import pytest
from openbb_terminal.cryptocurrency.defi import terraengineer_model
@pytest.mark.vcr
@pytest.mark.parametrize(
"asset,address",
[("ust", "terra1tmnqgvg567ypvsvk6rwsga3srp7e3lg6u0elp8")],
)
def test_get_history_asset_from_terra_address(asset, address, recorder):
df = terraengineer_model.get_history_asset_from_terra_address(
asset=asset,
address=address,
)
recorder.capture(df)
| true
| true
|
790e86d135ee08aa433422ee85291f62d15dc188
| 2,125
|
py
|
Python
|
setup.py
|
mikiec84/gaffer
|
8c5d5b5e2ff3fcb1f7cc7c8fbfc623f97dd0da8d
|
[
"MIT",
"Unlicense"
] | null | null | null |
setup.py
|
mikiec84/gaffer
|
8c5d5b5e2ff3fcb1f7cc7c8fbfc623f97dd0da8d
|
[
"MIT",
"Unlicense"
] | null | null | null |
setup.py
|
mikiec84/gaffer
|
8c5d5b5e2ff3fcb1f7cc7c8fbfc623f97dd0da8d
|
[
"MIT",
"Unlicense"
] | 1
|
2018-10-28T00:59:17.000Z
|
2018-10-28T00:59:17.000Z
|
# -*- coding: utf-8 -
#
# This file is part of gaffer. See the NOTICE for more information.
import os
import sys
from setuptools import setup, find_packages, Extension
py_version = sys.version_info[:2]
if py_version < (2, 6):
raise RuntimeError('On Python 2, Gaffer requires Python 2.6 or better')
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Topic :: System :: Boot',
'Topic :: System :: Monitoring',
'Topic :: System :: Systems Administration',
'Topic :: Software Development :: Libraries']
# read long description
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:
long_description = f.read()
DATA_FILES = [
('gaffer', ["LICENSE", "MANIFEST.in", "NOTICE", "README.rst",
"THANKS", "UNLICENSE"])
]
setup(name='gaffer',
version="0.4.1",
description = 'simple system process manager',
long_description = long_description,
classifiers = CLASSIFIERS,
license = 'BSD',
url = 'http://github.com/benoitc/gaffer',
author = 'Benoit Chesneau',
author_email = 'benoitc@e-engura.org',
packages=find_packages(),
ext_modules = [
Extension("gaffer.sync", ["gaffer/sync.c"])
],
install_requires = [
'pyuv>=0.8.3',
'six',
'psutil',
'tornado==2.4',
'colorama',
'setproctitle'
],
data_files = DATA_FILES,
entry_points="""
[console_scripts]
gafferd=gaffer.node.gafferd:run
gafferctl=gaffer.node.gafferctl:run
gaffer=gaffer.pm.main:main
""")
| 28.333333
| 75
| 0.6
|
import os
import sys
from setuptools import setup, find_packages, Extension
py_version = sys.version_info[:2]
if py_version < (2, 6):
raise RuntimeError('On Python 2, Gaffer requires Python 2.6 or better')
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Topic :: System :: Boot',
'Topic :: System :: Monitoring',
'Topic :: System :: Systems Administration',
'Topic :: Software Development :: Libraries']
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:
long_description = f.read()
DATA_FILES = [
('gaffer', ["LICENSE", "MANIFEST.in", "NOTICE", "README.rst",
"THANKS", "UNLICENSE"])
]
setup(name='gaffer',
version="0.4.1",
description = 'simple system process manager',
long_description = long_description,
classifiers = CLASSIFIERS,
license = 'BSD',
url = 'http://github.com/benoitc/gaffer',
author = 'Benoit Chesneau',
author_email = 'benoitc@e-engura.org',
packages=find_packages(),
ext_modules = [
Extension("gaffer.sync", ["gaffer/sync.c"])
],
install_requires = [
'pyuv>=0.8.3',
'six',
'psutil',
'tornado==2.4',
'colorama',
'setproctitle'
],
data_files = DATA_FILES,
entry_points="""
[console_scripts]
gafferd=gaffer.node.gafferd:run
gafferctl=gaffer.node.gafferctl:run
gaffer=gaffer.pm.main:main
""")
| true
| true
|
790e86d5a57af29752f9e5f7c45064dda92d72f6
| 1,541
|
py
|
Python
|
scripts/mysql.py
|
harveywangdao/road
|
062f787ca69d72d5d997eb7a18d860485857ffe1
|
[
"Apache-2.0"
] | null | null | null |
scripts/mysql.py
|
harveywangdao/road
|
062f787ca69d72d5d997eb7a18d860485857ffe1
|
[
"Apache-2.0"
] | null | null | null |
scripts/mysql.py
|
harveywangdao/road
|
062f787ca69d72d5d997eb7a18d860485857ffe1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import pymysql
db = pymysql.connect("localhost","root","123456","tboxdb" )
cursor = db.cursor()
#create table
cursor.execute("DROP TABLE IF EXISTS EMPLOYEE")
sql = """CREATE TABLE EMPLOYEE (
FIRST_NAME CHAR(20) NOT NULL,
LAST_NAME CHAR(20),
AGE INT,
SEX CHAR(1),
INCOME FLOAT )"""
cursor.execute(sql)
#insert
sql = """INSERT INTO EMPLOYEE(FIRST_NAME,
LAST_NAME, AGE, SEX, INCOME)
VALUES ('Mac', 'Mohan', 20, 'M', 2000)"""
#sql = "INSERT INTO EMPLOYEE(FIRST_NAME, \
# LAST_NAME, AGE, SEX, INCOME) \
# VALUES ('%s', '%s', '%d', '%c', '%d' )" % \
# ('Mac', 'Mohan', 20, 'M', 2000)
try:
cursor.execute(sql)
db.commit()
except:
db.rollback()
#query
sql = "SELECT * FROM EMPLOYEE \
WHERE INCOME > '%d'" % (1000)
try:
cursor.execute(sql)
results = cursor.fetchall()
for row in results:
fname = row[0]
lname = row[1]
age = row[2]
sex = row[3]
income = row[4]
print ("fname=%s,lname=%s,age=%d,sex=%s,income=%d" % \
(fname, lname, age, sex, income ))
except:
print ("Error: unable to fetch data")
#update
sql = "UPDATE EMPLOYEE SET AGE = AGE + 1 WHERE SEX = '%c'" % ('M')
try:
cursor.execute(sql)
db.commit()
except:
db.rollback()
#delete
sql = "DELETE FROM EMPLOYEE WHERE AGE > '%d'" % (20)
try:
cursor.execute(sql)
db.commit()
except:
db.rollback()
db.close()
| 21.109589
| 67
| 0.53926
|
import pymysql
db = pymysql.connect("localhost","root","123456","tboxdb" )
cursor = db.cursor()
cursor.execute("DROP TABLE IF EXISTS EMPLOYEE")
sql = """CREATE TABLE EMPLOYEE (
FIRST_NAME CHAR(20) NOT NULL,
LAST_NAME CHAR(20),
AGE INT,
SEX CHAR(1),
INCOME FLOAT )"""
cursor.execute(sql)
sql = """INSERT INTO EMPLOYEE(FIRST_NAME,
LAST_NAME, AGE, SEX, INCOME)
VALUES ('Mac', 'Mohan', 20, 'M', 2000)"""
# LAST_NAME, AGE, SEX, INCOME) \
# VALUES ('%s', '%s', '%d', '%c', '%d' )" % \
try:
cursor.execute(sql)
db.commit()
except:
db.rollback()
sql = "SELECT * FROM EMPLOYEE \
WHERE INCOME > '%d'" % (1000)
try:
cursor.execute(sql)
results = cursor.fetchall()
for row in results:
fname = row[0]
lname = row[1]
age = row[2]
sex = row[3]
income = row[4]
print ("fname=%s,lname=%s,age=%d,sex=%s,income=%d" % \
(fname, lname, age, sex, income ))
except:
print ("Error: unable to fetch data")
sql = "UPDATE EMPLOYEE SET AGE = AGE + 1 WHERE SEX = '%c'" % ('M')
try:
cursor.execute(sql)
db.commit()
except:
db.rollback()
sql = "DELETE FROM EMPLOYEE WHERE AGE > '%d'" % (20)
try:
cursor.execute(sql)
db.commit()
except:
db.rollback()
db.close()
| true
| true
|
790e8756114562099f0bcd94c6413c469f54eb86
| 1,499
|
py
|
Python
|
main.py
|
0xol/server-installer
|
e4f41c2e34bfb4bc2103d099820b4026c3ed3b23
|
[
"MIT"
] | null | null | null |
main.py
|
0xol/server-installer
|
e4f41c2e34bfb4bc2103d099820b4026c3ed3b23
|
[
"MIT"
] | null | null | null |
main.py
|
0xol/server-installer
|
e4f41c2e34bfb4bc2103d099820b4026c3ed3b23
|
[
"MIT"
] | null | null | null |
from importlib.resources import path
import sys
import os
import shutil
from git import Repo
from subprocess import call
from git import RemoteProgress
import git
from tqdm import tqdm
from pathlib import Path
dir_path = (os.path.expanduser('~/Documents') + "\server")
os.chdir(dir_path)
gitaddress = str("https://github.com/0xol/server")
print("what server version would you like to install")
print("format is 'client-version'")
print("example 'forge-1.16.5' or 'vanilla-1.7.10'")
print("for lists of supported server version check https://github.com/0xol/server and check under branches")
branch = input()
os.system("del /F /S /Q /A .git")
os.system("del /F /S /Q /A .git") #just in case the program didnt kill it the first time
folder = dir_path
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
class CloneProgress(RemoteProgress):
def __init__(self):
super().__init__()
self.pbar = tqdm()
def update(self, op_code, cur_count, max_count=None, message=''):
self.pbar.total = max_count
self.pbar.n = cur_count
self.pbar.refresh()
print(dir_path)
Repo.clone_from(gitaddress, dir_path , branch=branch, progress=CloneProgress())
| 28.283019
| 108
| 0.695797
|
from importlib.resources import path
import sys
import os
import shutil
from git import Repo
from subprocess import call
from git import RemoteProgress
import git
from tqdm import tqdm
from pathlib import Path
dir_path = (os.path.expanduser('~/Documents') + "\server")
os.chdir(dir_path)
gitaddress = str("https://github.com/0xol/server")
print("what server version would you like to install")
print("format is 'client-version'")
print("example 'forge-1.16.5' or 'vanilla-1.7.10'")
print("for lists of supported server version check https://github.com/0xol/server and check under branches")
branch = input()
os.system("del /F /S /Q /A .git")
os.system("del /F /S /Q /A .git")
folder = dir_path
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
class CloneProgress(RemoteProgress):
def __init__(self):
super().__init__()
self.pbar = tqdm()
def update(self, op_code, cur_count, max_count=None, message=''):
self.pbar.total = max_count
self.pbar.n = cur_count
self.pbar.refresh()
print(dir_path)
Repo.clone_from(gitaddress, dir_path , branch=branch, progress=CloneProgress())
| true
| true
|
790e87b572d17d593c5544de491af8b47ea1d36b
| 271
|
py
|
Python
|
Python/Dicionarios.py
|
Kauan677/Projetos-Python
|
62f6b476e6d250d9ff31c95808b31ebd3ab4fdbb
|
[
"MIT"
] | 1
|
2022-03-03T23:19:57.000Z
|
2022-03-03T23:19:57.000Z
|
Python/Dicionarios.py
|
Kauan677/Projetos-Python
|
62f6b476e6d250d9ff31c95808b31ebd3ab4fdbb
|
[
"MIT"
] | null | null | null |
Python/Dicionarios.py
|
Kauan677/Projetos-Python
|
62f6b476e6d250d9ff31c95808b31ebd3ab4fdbb
|
[
"MIT"
] | null | null | null |
estado = dict()
brasil = list()
for c in range(0,3):
estado['uf'] = str(input('Uf: '))
estado['sigla'] = str(input('Sigla: '))
brasil.append(estado.copy())
print(brasil)
for e in brasil:
for k, v in e.items():
print(f'O campo {k} tem valor {v}')
| 22.583333
| 43
| 0.571956
|
estado = dict()
brasil = list()
for c in range(0,3):
estado['uf'] = str(input('Uf: '))
estado['sigla'] = str(input('Sigla: '))
brasil.append(estado.copy())
print(brasil)
for e in brasil:
for k, v in e.items():
print(f'O campo {k} tem valor {v}')
| true
| true
|
790e880e49a4aff05d076208bbe8350a414f060a
| 5,986
|
py
|
Python
|
analyzer/apisan/parse/sparser.py
|
oslab-swrc/apisan
|
9ff3d3bc04c8e119f4d659f03b38747395e58c3e
|
[
"MIT"
] | 58
|
2016-08-27T03:19:14.000Z
|
2022-01-05T17:33:44.000Z
|
analyzer/apisan/parse/sparser.py
|
oslab-swrc/apisan
|
9ff3d3bc04c8e119f4d659f03b38747395e58c3e
|
[
"MIT"
] | 14
|
2017-12-01T17:16:59.000Z
|
2020-12-21T12:16:35.000Z
|
analyzer/apisan/parse/sparser.py
|
oslab-swrc/apisan
|
9ff3d3bc04c8e119f4d659f03b38747395e58c3e
|
[
"MIT"
] | 22
|
2016-11-27T09:53:31.000Z
|
2021-11-22T00:22:53.000Z
|
# SPDX-License-Identifier: MIT
#!/usr/bin/env python3
import os
import sys
from ply import yacc
from ply.lex import TOKEN
from .slexer import SLexer
from ..lib import dbg
from .symbol import (
BinaryOperatorSymbol, ConstraintSymbol, FieldSymbol, ArraySymbol,
CallSymbol, IDSymbol, ConcreteIntSymbol, StringLiteralSymbol
)
# for LALR table reuse
ROOT = os.path.dirname(__file__)
sys.path.append(ROOT)
class SParser(object):
# Precedence rules for the arithmetic operators
precedence = (
('left', 'LOR'),
('left', 'LAND'),
('left', 'OR'),
('left', 'XOR'),
('left', 'AND'),
('left', 'EQ', 'NE'),
('left', 'GT', 'GE', 'LT', 'LE'),
('left', 'RSHIFT', 'LSHIFT'),
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE', 'MOD')
)
def __init__(self, **kwargs):
self.slex = SLexer()
self.slex.build()
self.tokens = self.slex.tokens
self.yacc = yacc.yacc(module=self)
def p_expression_1(self, p):
''' expression : binary_expression '''
p[0] = p[1]
def p_binary_expression_1(self, p):
''' binary_expression : cast_expression '''
p[0] = p[1]
def p_binary_expression_2(self, p):
''' binary_expression : binary_expression TIMES binary_expression
| binary_expression DIVIDE binary_expression
| binary_expression MOD binary_expression
| binary_expression PLUS binary_expression
| binary_expression MINUS binary_expression
| binary_expression RSHIFT binary_expression
| binary_expression LSHIFT binary_expression
| binary_expression LT binary_expression
| binary_expression LE binary_expression
| binary_expression GE binary_expression
| binary_expression GT binary_expression
| binary_expression EQ binary_expression
| binary_expression NE binary_expression
| binary_expression AND binary_expression
| binary_expression OR binary_expression
| binary_expression XOR binary_expression
| binary_expression LAND binary_expression
| binary_expression LOR binary_expression
'''
p[0] = BinaryOperatorSymbol(p[1], p[2], p[3])
def p_binary_expression_3(self, p):
# expr CONSTRAINT_OP constraints
''' expression : expression CONSTRAINT_OP LBRACE constraint_list RBRACE '''
p[0] = ConstraintSymbol(p[1], p[4])
def p_constraint(self, p):
''' constraint : LBRACKET concrete_integer_expression COMMA concrete_integer_expression RBRACKET '''
p[0] = (p[2], p[4])
def p_constraint_list(self, p):
''' constraint_list : constraint_list COMMA constraint
| constraint '''
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1]
p[1].append(p[3])
def p_cast_expression_1(self, p):
""" cast_expression : unary_expression """
p[0] = p[1]
def p_unary_expression_1(self, p):
""" unary_expression : postfix_expression """
p[0] = p[1]
def p_unary_expression_2(self, p):
""" unary_expression : AND postfix_expression """
# XXX : needs to handle & operator
p[0] = p[2]
def p_postfix_expression_1(self, p):
''' postfix_expression : primary_expression '''
p[0] = p[1]
def p_postfix_expression_2(self, p):
''' postfix_expression : postfix_expression ARROW ID'''
p[0] = FieldSymbol(p[1], p[3])
def p_postfix_expression3(self, p):
''' postfix_expression : postfix_expression LBRACKET expression RBRACKET '''
p[0] = ArraySymbol(p[1], p[3])
def p_postfix_expression4(self, p):
''' postfix_expression : postfix_expression LPAREN argument_list RPAREN '''
p[0] = CallSymbol(p[1], p[3])
def p_primary_expression_1(self, p):
''' primary_expression : ID '''
p[0] = IDSymbol(p[1])
def p_primary_expression_2(self, p):
''' primary_expression : concrete_integer_expression '''
p[0] = ConcreteIntSymbol(p[1])
def p_primary_expression_3(self, p):
'''primary_expression : LPAREN expression RPAREN'''
p[0] = p[2]
def p_primary_expression_4(self, p):
''' primary_expression : STRING_LITERAL '''
p[0] = StringLiteralSymbol(p[1])
def p_concrete_integer(self, p):
''' concrete_integer_expression : INT_CONST_DEC
| MINUS INT_CONST_DEC '''
if len(p) == 3:
p[0] = -int(p[2])
else:
p[0] = int(p[1])
def p_argument_list(self, p):
''' argument_list :
| expression
| argument_list COMMA expression '''
if len(p) == 1:
p[0] = []
elif len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1]
p[1].append(p[3])
def parse(self, text):
self.last_text = text
return self.yacc.parse(input = text,
lexer = self.slex)
def p_error(self, p):
#dbg.debug('Illegal token %s' % repr(p))
#dbg.debug('Text : %s' % self.last_text)
return
if __name__ == '__main__':
parser = SParser()
tests = ["\"String Literal\\n\"",
"malloc(256)@={ [0, 0] }",
"malloc(256)@={ [0, 0], [2, 18446744073709551615] }"]
for test in tests:
print(parse_symbol(test))
| 35.005848
| 109
| 0.538423
|
import os
import sys
from ply import yacc
from ply.lex import TOKEN
from .slexer import SLexer
from ..lib import dbg
from .symbol import (
BinaryOperatorSymbol, ConstraintSymbol, FieldSymbol, ArraySymbol,
CallSymbol, IDSymbol, ConcreteIntSymbol, StringLiteralSymbol
)
ROOT = os.path.dirname(__file__)
sys.path.append(ROOT)
class SParser(object):
precedence = (
('left', 'LOR'),
('left', 'LAND'),
('left', 'OR'),
('left', 'XOR'),
('left', 'AND'),
('left', 'EQ', 'NE'),
('left', 'GT', 'GE', 'LT', 'LE'),
('left', 'RSHIFT', 'LSHIFT'),
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE', 'MOD')
)
def __init__(self, **kwargs):
self.slex = SLexer()
self.slex.build()
self.tokens = self.slex.tokens
self.yacc = yacc.yacc(module=self)
def p_expression_1(self, p):
p[0] = p[1]
def p_binary_expression_1(self, p):
p[0] = p[1]
def p_binary_expression_2(self, p):
p[0] = BinaryOperatorSymbol(p[1], p[2], p[3])
def p_binary_expression_3(self, p):
p[0] = ConstraintSymbol(p[1], p[4])
def p_constraint(self, p):
p[0] = (p[2], p[4])
def p_constraint_list(self, p):
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1]
p[1].append(p[3])
def p_cast_expression_1(self, p):
p[0] = p[1]
def p_unary_expression_1(self, p):
p[0] = p[1]
def p_unary_expression_2(self, p):
p[0] = p[2]
def p_postfix_expression_1(self, p):
p[0] = p[1]
def p_postfix_expression_2(self, p):
p[0] = FieldSymbol(p[1], p[3])
def p_postfix_expression3(self, p):
p[0] = ArraySymbol(p[1], p[3])
def p_postfix_expression4(self, p):
p[0] = CallSymbol(p[1], p[3])
def p_primary_expression_1(self, p):
p[0] = IDSymbol(p[1])
def p_primary_expression_2(self, p):
p[0] = ConcreteIntSymbol(p[1])
def p_primary_expression_3(self, p):
p[0] = p[2]
def p_primary_expression_4(self, p):
p[0] = StringLiteralSymbol(p[1])
def p_concrete_integer(self, p):
if len(p) == 3:
p[0] = -int(p[2])
else:
p[0] = int(p[1])
def p_argument_list(self, p):
if len(p) == 1:
p[0] = []
elif len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1]
p[1].append(p[3])
def parse(self, text):
self.last_text = text
return self.yacc.parse(input = text,
lexer = self.slex)
def p_error(self, p):
return
if __name__ == '__main__':
parser = SParser()
tests = ["\"String Literal\\n\"",
"malloc(256)@={ [0, 0] }",
"malloc(256)@={ [0, 0], [2, 18446744073709551615] }"]
for test in tests:
print(parse_symbol(test))
| true
| true
|
790e8836ca1d11ca7cd114e940447e0491a46db0
| 3,326
|
py
|
Python
|
test/test_all.py
|
icgrp/fpga-tool-perf
|
3b4f079fd13ffe89d5703bf3a995031ecf32b923
|
[
"0BSD"
] | 48
|
2018-09-21T00:15:52.000Z
|
2021-12-15T03:06:19.000Z
|
test/test_all.py
|
icgrp/fpga-tool-perf
|
3b4f079fd13ffe89d5703bf3a995031ecf32b923
|
[
"0BSD"
] | 243
|
2018-07-16T20:47:16.000Z
|
2022-01-10T21:43:13.000Z
|
test/test_all.py
|
icgrp/fpga-tool-perf
|
3b4f079fd13ffe89d5703bf3a995031ecf32b923
|
[
"0BSD"
] | 21
|
2018-07-17T22:16:47.000Z
|
2022-03-28T10:37:13.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import sys
import os
sys.path.append(os.getcwd() + "/..")
import unittest
import fpgaperf
import re
import random
def def_devpack(toolchain):
if 'radiant' in toolchain:
device = 'up5k'
package = 'uwg30'
else:
# tinyfpga b2
# XXX: change to hx8k, ct256?
device = 'lp8k'
package = 'cm81'
return device, package
class TestCase(unittest.TestCase):
def setUp(self):
self.verbose = False
def test_env_ready(self):
assert fpgaperf.env_ready()
def test_icetime_parse(self):
with open('icetime.txt', 'r') as f:
m = fpgaperf.icetime_parse(f)
assert 'max_freq' in m
assert abs(m['max_freq'] - 132.94e6) < 1.0
def test_yosys_ver(self):
v = fpgaperf.yosys_ver()
assert re.match(r'Yosys .* .*git sha1 .*', v)
def test_get_toolchains(self):
ts = fpgaperf.get_toolchains()
assert 'vpr' in ts
assert 'arachne' in ts
assert 'radiant-synpro' in ts
def test_get_projects(self):
ps = fpgaperf.get_projects()
assert 'oneblink' in ps
assert 'picosoc-hx8kdemo' in ps
assert 'picorv32-wrap' in ps
def test_get_seedable(self):
ts = fpgaperf.get_seedable()
assert 'vpr' in ts
assert 'arachne' in ts
assert 'nextpnr' in ts
def test_toolchains(self):
'''Try each toolchain'''
for toolchain in fpgaperf.toolchains.keys():
device, package = def_devpack(toolchain)
fpgaperf.run(
family='ice40',
device=device,
package=package,
toolchain=toolchain,
project=fpgaperf.get_project('oneblink'),
verbose=self.verbose
)
def test_pcf(self):
'''Try each toolchain with a pcf'''
for toolchain in fpgaperf.toolchains.keys():
device, package = def_devpack(toolchain)
if 'radiant' in toolchain:
pcf = fpgaperf.root_dir + '/project/FIXME.pcf'
else:
pcf = fpgaperf.root_dir + '/project/oneblink_lp8k-cm81.pcf'
fpgaperf.run(
family='ice40',
device=device,
package=package,
toolchain=toolchain,
project=fpgaperf.get_project('oneblink'),
pcf=pcf,
verbose=self.verbose
)
def test_seed(self):
'''Try seeding, where possible'''
random.seed(1234)
for toolchain in fpgaperf.get_seedable():
seed = random.randint(1, 0x7FFFFFFF)
device, package = def_devpack(toolchain)
fpgaperf.run(
family='ice40',
device=device,
package=package,
toolchain=toolchain,
project=fpgaperf.get_project('oneblink'),
seed=seed,
verbose=self.verbose
)
if __name__ == '__main__':
unittest.main()
| 27.94958
| 75
| 0.562538
|
import sys
import os
sys.path.append(os.getcwd() + "/..")
import unittest
import fpgaperf
import re
import random
def def_devpack(toolchain):
if 'radiant' in toolchain:
device = 'up5k'
package = 'uwg30'
else:
device = 'lp8k'
package = 'cm81'
return device, package
class TestCase(unittest.TestCase):
def setUp(self):
self.verbose = False
def test_env_ready(self):
assert fpgaperf.env_ready()
def test_icetime_parse(self):
with open('icetime.txt', 'r') as f:
m = fpgaperf.icetime_parse(f)
assert 'max_freq' in m
assert abs(m['max_freq'] - 132.94e6) < 1.0
def test_yosys_ver(self):
v = fpgaperf.yosys_ver()
assert re.match(r'Yosys .* .*git sha1 .*', v)
def test_get_toolchains(self):
ts = fpgaperf.get_toolchains()
assert 'vpr' in ts
assert 'arachne' in ts
assert 'radiant-synpro' in ts
def test_get_projects(self):
ps = fpgaperf.get_projects()
assert 'oneblink' in ps
assert 'picosoc-hx8kdemo' in ps
assert 'picorv32-wrap' in ps
def test_get_seedable(self):
ts = fpgaperf.get_seedable()
assert 'vpr' in ts
assert 'arachne' in ts
assert 'nextpnr' in ts
def test_toolchains(self):
for toolchain in fpgaperf.toolchains.keys():
device, package = def_devpack(toolchain)
fpgaperf.run(
family='ice40',
device=device,
package=package,
toolchain=toolchain,
project=fpgaperf.get_project('oneblink'),
verbose=self.verbose
)
def test_pcf(self):
for toolchain in fpgaperf.toolchains.keys():
device, package = def_devpack(toolchain)
if 'radiant' in toolchain:
pcf = fpgaperf.root_dir + '/project/FIXME.pcf'
else:
pcf = fpgaperf.root_dir + '/project/oneblink_lp8k-cm81.pcf'
fpgaperf.run(
family='ice40',
device=device,
package=package,
toolchain=toolchain,
project=fpgaperf.get_project('oneblink'),
pcf=pcf,
verbose=self.verbose
)
def test_seed(self):
random.seed(1234)
for toolchain in fpgaperf.get_seedable():
seed = random.randint(1, 0x7FFFFFFF)
device, package = def_devpack(toolchain)
fpgaperf.run(
family='ice40',
device=device,
package=package,
toolchain=toolchain,
project=fpgaperf.get_project('oneblink'),
seed=seed,
verbose=self.verbose
)
if __name__ == '__main__':
unittest.main()
| true
| true
|
790e892bd198756436c17dbc6379a7f2d1957725
| 690
|
py
|
Python
|
chapter_07/06_larger_than_n.py
|
SergeHall/Tony-Gaddis-Python-4th
|
24e7c70fbd196ff531a5e4e7f6f5021c4b4177ba
|
[
"MIT"
] | 2
|
2021-04-07T03:26:37.000Z
|
2021-07-26T07:38:49.000Z
|
chapter_07/06_larger_than_n.py
|
SergeHall/Tony-Gaddis-Python-4th
|
24e7c70fbd196ff531a5e4e7f6f5021c4b4177ba
|
[
"MIT"
] | null | null | null |
chapter_07/06_larger_than_n.py
|
SergeHall/Tony-Gaddis-Python-4th
|
24e7c70fbd196ff531a5e4e7f6f5021c4b4177ba
|
[
"MIT"
] | null | null | null |
# 6. Больше числа п. В программе напишите функцию, которая принимает два
# аргумента: список и число п. Допустим, что список содержит числа. Функция
# должна показать все числа в списке, которые больше п.
import random
def main():
list_num = [random.randint(0, 100) for i in range(20)]
print(list_num)
n = int(input('entered n: '))
print("This is list " + str(check_n(list_num, n)) + " of numbers\nthat are "
"greater than the number you provided ", n, ".", sep="")
def check_n(list_num, n):
num_greater_n = []
for i in range(len(list_num)):
if list_num[i] > n:
num_greater_n.append(list_num[i])
return num_greater_n
main()
| 28.75
| 80
| 0.650725
|
import random
def main():
list_num = [random.randint(0, 100) for i in range(20)]
print(list_num)
n = int(input('entered n: '))
print("This is list " + str(check_n(list_num, n)) + " of numbers\nthat are "
"greater than the number you provided ", n, ".", sep="")
def check_n(list_num, n):
num_greater_n = []
for i in range(len(list_num)):
if list_num[i] > n:
num_greater_n.append(list_num[i])
return num_greater_n
main()
| true
| true
|
790e8ae2cd5b0b00bd94352c58a976ddfef79142
| 901
|
py
|
Python
|
src/python/tests/resolver.py
|
djp952/prebuilt-libxml2
|
8f067d2965c964bf4ba35ad1c0a3f5c313a2fa0f
|
[
"AML"
] | 2
|
2020-07-24T11:11:36.000Z
|
2022-03-17T13:33:59.000Z
|
src/python/tests/resolver.py
|
djp952/prebuilt-libxml2
|
8f067d2965c964bf4ba35ad1c0a3f5c313a2fa0f
|
[
"AML"
] | null | null | null |
src/python/tests/resolver.py
|
djp952/prebuilt-libxml2
|
8f067d2965c964bf4ba35ad1c0a3f5c313a2fa0f
|
[
"AML"
] | 3
|
2020-02-20T12:24:55.000Z
|
2021-03-19T08:41:48.000Z
|
#!/usr/bin/python -u
import sys
import libxml2
try:
import StringIO
str_io = StringIO.StringIO
except:
import io
str_io = io.StringIO
# Memory debug specific
libxml2.debugMemory(1)
def myResolver(URL, ID, ctxt):
return(str_io("<foo/>"))
libxml2.setEntityLoader(myResolver)
doc = libxml2.parseFile("doesnotexist.xml")
root = doc.children
if root.name != "foo":
print("root element name error")
sys.exit(1)
doc.freeDoc()
i = 0
while i < 5000:
doc = libxml2.parseFile("doesnotexist.xml")
root = doc.children
if root.name != "foo":
print("root element name error")
sys.exit(1)
doc.freeDoc()
i = i + 1
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print("OK")
else:
print("Memory leak %d bytes" % (libxml2.debugMemory(1)))
libxml2.dumpMemory()
| 20.022222
| 61
| 0.627081
|
import sys
import libxml2
try:
import StringIO
str_io = StringIO.StringIO
except:
import io
str_io = io.StringIO
libxml2.debugMemory(1)
def myResolver(URL, ID, ctxt):
return(str_io("<foo/>"))
libxml2.setEntityLoader(myResolver)
doc = libxml2.parseFile("doesnotexist.xml")
root = doc.children
if root.name != "foo":
print("root element name error")
sys.exit(1)
doc.freeDoc()
i = 0
while i < 5000:
doc = libxml2.parseFile("doesnotexist.xml")
root = doc.children
if root.name != "foo":
print("root element name error")
sys.exit(1)
doc.freeDoc()
i = i + 1
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print("OK")
else:
print("Memory leak %d bytes" % (libxml2.debugMemory(1)))
libxml2.dumpMemory()
| true
| true
|
790e8b6fc8974254edd28e3e3dde2a226e3a7184
| 452
|
py
|
Python
|
src/grepros/__main__.py
|
suurjaak/grepros
|
4e719252858b6895d2ee071fcf0c332a3a5dafaa
|
[
"BSD-3-Clause"
] | 10
|
2021-11-05T12:43:21.000Z
|
2022-03-17T06:08:30.000Z
|
src/grepros/__main__.py
|
suurjaak/grepros
|
4e719252858b6895d2ee071fcf0c332a3a5dafaa
|
[
"BSD-3-Clause"
] | 1
|
2022-03-01T09:19:53.000Z
|
2022-03-01T21:38:52.000Z
|
src/grepros/__main__.py
|
suurjaak/grepros
|
4e719252858b6895d2ee071fcf0c332a3a5dafaa
|
[
"BSD-3-Clause"
] | 1
|
2022-01-24T23:46:00.000Z
|
2022-01-24T23:46:00.000Z
|
# -*- coding: utf-8 -*-
"""
Module entry point.
------------------------------------------------------------------------------
This file is part of grepros - grep for ROS bag files and live topics.
Released under the BSD License.
@author Erki Suurjaak
@created 24.10.2021
@modified 02.11.2021
------------------------------------------------------------------------------
"""
from . import main
if "__main__" == __name__:
main.run()
| 25.111111
| 78
| 0.429204
|
from . import main
if "__main__" == __name__:
main.run()
| true
| true
|
790e8bf8c36c53b5716cc2c80f558b1aeef48bd5
| 1,183
|
py
|
Python
|
launch.py
|
brianjimenez/emol
|
b789b85b40a99247f008fb7cafa0d019d142cd3c
|
[
"MIT"
] | null | null | null |
launch.py
|
brianjimenez/emol
|
b789b85b40a99247f008fb7cafa0d019d142cd3c
|
[
"MIT"
] | null | null | null |
launch.py
|
brianjimenez/emol
|
b789b85b40a99247f008fb7cafa0d019d142cd3c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''
Created on Apr 12, 2017
@author: Brian Jimenez-Garcia
@contact: brian.jimenez@bsc.es
'''
import sys
import os
if len(sys.argv[1:]) != 2:
raise SystemExit("usage: %s pdb_file1 pdb_file2" % os.path.basename(sys.argv[0]))
pdb_file1 = sys.argv[1]
pdb_file2 = sys.argv[2]
# Panda3D imports
from pandac.PandaModules import loadPrcFileData
from emol import EMol
width = 1400
height = 900
# Change window properties
loadPrcFileData("", "window-title Energy Visualizer")
loadPrcFileData("", "fullscreen 0")
loadPrcFileData("", "win-size %s %s" % (width, height))
from direct.showbase.ShowBase import ShowBase
base = ShowBase()
# Set up a loading screen
from direct.gui.OnscreenText import OnscreenText,TextNode
loadingText=OnscreenText("Loading molecules...",1,fg=(1,1,1,1),
pos=(0,0),align=TextNode.ACenter,
scale=.07,mayChange=1)
# Render three frames to avoid black screen
base.graphicsEngine.renderFrame()
base.graphicsEngine.renderFrame()
base.graphicsEngine.renderFrame()
# Load the game
visualizer = EMol(width, height, pdb_file1, pdb_file2)
# Hide loading
loadingText.cleanup()
base.run()
| 23.196078
| 85
| 0.715976
|
import sys
import os
if len(sys.argv[1:]) != 2:
raise SystemExit("usage: %s pdb_file1 pdb_file2" % os.path.basename(sys.argv[0]))
pdb_file1 = sys.argv[1]
pdb_file2 = sys.argv[2]
from pandac.PandaModules import loadPrcFileData
from emol import EMol
width = 1400
height = 900
loadPrcFileData("", "window-title Energy Visualizer")
loadPrcFileData("", "fullscreen 0")
loadPrcFileData("", "win-size %s %s" % (width, height))
from direct.showbase.ShowBase import ShowBase
base = ShowBase()
from direct.gui.OnscreenText import OnscreenText,TextNode
loadingText=OnscreenText("Loading molecules...",1,fg=(1,1,1,1),
pos=(0,0),align=TextNode.ACenter,
scale=.07,mayChange=1)
base.graphicsEngine.renderFrame()
base.graphicsEngine.renderFrame()
base.graphicsEngine.renderFrame()
visualizer = EMol(width, height, pdb_file1, pdb_file2)
loadingText.cleanup()
base.run()
| true
| true
|
790e8c064a9b611e4e6bc404c4db08f867c93260
| 5,112
|
py
|
Python
|
python_anticaptcha/tasks.py
|
uguraba/python-anticaptcha
|
572e51fc60e768150429832c31e352e28d1a340c
|
[
"MIT"
] | null | null | null |
python_anticaptcha/tasks.py
|
uguraba/python-anticaptcha
|
572e51fc60e768150429832c31e352e28d1a340c
|
[
"MIT"
] | null | null | null |
python_anticaptcha/tasks.py
|
uguraba/python-anticaptcha
|
572e51fc60e768150429832c31e352e28d1a340c
|
[
"MIT"
] | null | null | null |
import base64
from .fields import BaseField
class BaseTask(object):
def serialize(self, **result):
return result
class ProxyMixin(BaseTask):
def __init__(self, *args, **kwargs):
self.proxyType = kwargs.pop('proxy_type')
self.userAgent = kwargs.pop('user_agent')
self.proxyAddress = kwargs.pop('proxy_address')
self.proxyPort = kwargs.pop('proxy_port')
self.proxyLogin = kwargs.pop('proxy_login')
self.proxyPassword = kwargs.pop('proxy_password')
self.cookies = kwargs.pop('cookies', '')
super(ProxyMixin, self).__init__(*args, **kwargs)
def serialize(self, **result):
result = super(ProxyMixin, self).serialize(**result)
result['userAgent'] = self.userAgent
result['proxyType'] = self.proxyType
result['proxyAddress'] = self.proxyAddress
result['proxyPort'] = self.proxyPort
if self.proxyLogin:
result['proxyLogin'] = self.proxyLogin
result['proxyPassword'] = self.proxyPassword
if self.cookies:
result['cookies'] = self.cookies
return result
class NoCaptchaTaskProxylessTask(BaseTask):
type = "NoCaptchaTaskProxyless"
websiteURL = None
websiteKey = None
websiteSToken = None
def __init__(self, website_url, website_key, website_s_token=None, is_invisible=None):
self.websiteURL = website_url
self.websiteKey = website_key
self.websiteSToken = website_s_token
self.isInvisible = is_invisible
def serialize(self):
data = {'type': self.type,
'websiteURL': self.websiteURL,
'websiteKey': self.websiteKey}
if self.websiteSToken is not None:
data['websiteSToken'] = self.websiteSToken
if self.isInvisible is not None:
data['isInvisible'] = self.isInvisible
return data
class FunCaptchaTask(ProxyMixin):
type = "FunCaptchaTask"
websiteURL = None
websiteKey = None
def __init__(self, website_url, website_key, *args, **kwargs):
self.websiteURL = website_url
self.websiteKey = website_key
super(FunCaptchaTask, self).__init__(*args, **kwargs)
def serialize(self, **result):
result = super(FunCaptchaTask, self).serialize(**result)
result.update({'type': self.type,
'websiteURL': self.websiteURL,
'websitePublicKey': self.websiteKey})
return result
class NoCaptchaTask(ProxyMixin, NoCaptchaTaskProxylessTask):
type = "NoCaptchaTask"
class ImageToTextTask(object):
type = "ImageToTextTask"
fp = None
phrase = None
case = None
numeric = None
math = None
minLength = None
maxLength = None
def __init__(self, fp, phrase=None, case=None, numeric=None, math=None, min_length=None, max_length=None):
self.fp = fp
self.phrase = phrase
self.case = case
self.numeric = numeric
self.math = math
self.minLength = min_length
self.maxLength = max_length
def serialize(self):
return {'type': self.type,
'body': base64.b64encode(self.fp.read()).decode('utf-8'),
'phrase': self.phrase,
'case': self.case,
'numeric': self.numeric,
'math': self.math,
'minLength': self.minLength,
'maxLength': self.maxLength}
class CustomCaptchaTask(BaseTask):
type = 'CustomCaptchaTask'
imageUrl = None
assignment = None
form = None
def __init__(self, imageUrl, form=None, assignment=None):
self.imageUrl = imageUrl
self.form = form or {}
self.assignment = assignment
def serialize(self):
data = super(CustomCaptchaTask, self).serialize()
data.update({'type': self.type,
'imageUrl': self.imageUrl})
if self.form:
forms = []
for name, field in self.form.items():
if isinstance(field, BaseField):
forms.append(field.serialize(name))
else:
field = field.copy()
field['name'] = name
forms.append(field)
data['forms'] = forms
if self.assignment:
data['assignment'] = self.assignment
return data
class RecaptchaV3TaskProxyless(BaseTask):
type = 'RecaptchaV3TaskProxyless'
websiteURL = None
websiteKey = None
minScore = None
pageAction = None
def __init__(self, website_url, website_key, min_score, page_action):
self.websiteURL = website_url
self.websiteKey = website_key
self.minScore = min_score
self.pageAction = page_action
def serialize(self):
data = super(RecaptchaV3TaskProxyless, self).serialize()
data['type'] = self.type
data['websiteURL'] = self.websiteURL
data['websiteKey'] = self.websiteKey
data['minScore'] = self.minScore
data['pageAction'] = self.pageAction
return data
| 31.555556
| 110
| 0.607981
|
import base64
from .fields import BaseField
class BaseTask(object):
def serialize(self, **result):
return result
class ProxyMixin(BaseTask):
def __init__(self, *args, **kwargs):
self.proxyType = kwargs.pop('proxy_type')
self.userAgent = kwargs.pop('user_agent')
self.proxyAddress = kwargs.pop('proxy_address')
self.proxyPort = kwargs.pop('proxy_port')
self.proxyLogin = kwargs.pop('proxy_login')
self.proxyPassword = kwargs.pop('proxy_password')
self.cookies = kwargs.pop('cookies', '')
super(ProxyMixin, self).__init__(*args, **kwargs)
def serialize(self, **result):
result = super(ProxyMixin, self).serialize(**result)
result['userAgent'] = self.userAgent
result['proxyType'] = self.proxyType
result['proxyAddress'] = self.proxyAddress
result['proxyPort'] = self.proxyPort
if self.proxyLogin:
result['proxyLogin'] = self.proxyLogin
result['proxyPassword'] = self.proxyPassword
if self.cookies:
result['cookies'] = self.cookies
return result
class NoCaptchaTaskProxylessTask(BaseTask):
type = "NoCaptchaTaskProxyless"
websiteURL = None
websiteKey = None
websiteSToken = None
def __init__(self, website_url, website_key, website_s_token=None, is_invisible=None):
self.websiteURL = website_url
self.websiteKey = website_key
self.websiteSToken = website_s_token
self.isInvisible = is_invisible
def serialize(self):
data = {'type': self.type,
'websiteURL': self.websiteURL,
'websiteKey': self.websiteKey}
if self.websiteSToken is not None:
data['websiteSToken'] = self.websiteSToken
if self.isInvisible is not None:
data['isInvisible'] = self.isInvisible
return data
class FunCaptchaTask(ProxyMixin):
type = "FunCaptchaTask"
websiteURL = None
websiteKey = None
def __init__(self, website_url, website_key, *args, **kwargs):
self.websiteURL = website_url
self.websiteKey = website_key
super(FunCaptchaTask, self).__init__(*args, **kwargs)
def serialize(self, **result):
result = super(FunCaptchaTask, self).serialize(**result)
result.update({'type': self.type,
'websiteURL': self.websiteURL,
'websitePublicKey': self.websiteKey})
return result
class NoCaptchaTask(ProxyMixin, NoCaptchaTaskProxylessTask):
type = "NoCaptchaTask"
class ImageToTextTask(object):
type = "ImageToTextTask"
fp = None
phrase = None
case = None
numeric = None
math = None
minLength = None
maxLength = None
def __init__(self, fp, phrase=None, case=None, numeric=None, math=None, min_length=None, max_length=None):
self.fp = fp
self.phrase = phrase
self.case = case
self.numeric = numeric
self.math = math
self.minLength = min_length
self.maxLength = max_length
def serialize(self):
return {'type': self.type,
'body': base64.b64encode(self.fp.read()).decode('utf-8'),
'phrase': self.phrase,
'case': self.case,
'numeric': self.numeric,
'math': self.math,
'minLength': self.minLength,
'maxLength': self.maxLength}
class CustomCaptchaTask(BaseTask):
type = 'CustomCaptchaTask'
imageUrl = None
assignment = None
form = None
def __init__(self, imageUrl, form=None, assignment=None):
self.imageUrl = imageUrl
self.form = form or {}
self.assignment = assignment
def serialize(self):
data = super(CustomCaptchaTask, self).serialize()
data.update({'type': self.type,
'imageUrl': self.imageUrl})
if self.form:
forms = []
for name, field in self.form.items():
if isinstance(field, BaseField):
forms.append(field.serialize(name))
else:
field = field.copy()
field['name'] = name
forms.append(field)
data['forms'] = forms
if self.assignment:
data['assignment'] = self.assignment
return data
class RecaptchaV3TaskProxyless(BaseTask):
type = 'RecaptchaV3TaskProxyless'
websiteURL = None
websiteKey = None
minScore = None
pageAction = None
def __init__(self, website_url, website_key, min_score, page_action):
self.websiteURL = website_url
self.websiteKey = website_key
self.minScore = min_score
self.pageAction = page_action
def serialize(self):
data = super(RecaptchaV3TaskProxyless, self).serialize()
data['type'] = self.type
data['websiteURL'] = self.websiteURL
data['websiteKey'] = self.websiteKey
data['minScore'] = self.minScore
data['pageAction'] = self.pageAction
return data
| true
| true
|
790e8c366ce5a96106155346be664452de39c797
| 2,580
|
py
|
Python
|
tools/jsfy.py
|
floatinghotpot/ajax-local
|
dfbeb6e6aa21a3e6e4bd3f64badd655da3f9c0b3
|
[
"MIT"
] | 1
|
2020-05-25T21:13:24.000Z
|
2020-05-25T21:13:24.000Z
|
tools/jsfy.py
|
floatinghotpot/ajax-local
|
dfbeb6e6aa21a3e6e4bd3f64badd655da3f9c0b3
|
[
"MIT"
] | null | null | null |
tools/jsfy.py
|
floatinghotpot/ajax-local
|
dfbeb6e6aa21a3e6e4bd3f64badd655da3f9c0b3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Convert *.json, *.csv and other text data files to js for local use and avoid ajax call.
"""
import optparse
from os import listdir
from os.path import abspath, isfile, isdir, join, splitext, basename
import json;
#curdir = os.path.abspath('.')
curdir = "."
filter_text_ext = [".json", ".csv"]
filter_binary_ext = []
def jsfy_file(path, basedir, fout):
fname = basename(path)
if(fname.startswith(".")):
return
#print(path, basedir)
if(not path.startswith(basedir)):
return
filename, extname = splitext( path )
#print( extname )
if(extname in filter_text_ext):
res_key = path[ len(basedir) : ]
print( res_key + " -> " + path )
fin = open(path, "r")
txt = json.dumps( fin.read() )
fout.write("jsfy_res[\"" + res_key + "\"] = " + txt + ";\n\n");
#elif(extname in filter_binary_ext):
#
pass
def jsfy_dir(path, basedir, fout):
if(not path.endswith("/")):
path = path + "/"
fname = basename(path)
if(fname.startswith(".")):
return
#print(path, basedir)
if(not path.startswith(basedir)):
return
#print( path + ":" )
for f in listdir(path):
subpath = join(path,f)
if( isfile(subpath) ):
jsfy_file(subpath, basedir, fout)
elif( isdir(subpath) ):
jsfy_dir(subpath, basedir, fout)
def main():
"""The entry point for this script."""
usage = """usage: %prog [dir] [-b basedir] [-o jsfile]
example:
%prog
%prog assets -o js/jsfy_res.js
"""
parser = optparse.OptionParser(usage)
parser.add_option("-b", "--base", dest="basedir", help="base dir")
parser.add_option("-o", "--output", dest="outputpath", help="export js file path")
(options, args) = parser.parse_args()
if( isinstance(options.basedir, str)):
basedir = options.basedir
else:
basedir = "."
basedir = abspath(basedir)
if( isinstance(options.outputpath, str)):
outputpath = options.outputpath
else:
outputpath ="./jsfy_res.js"
fout = open( outputpath, "w" )
fout.write("// generated with jsfy.py, v0.1 (https://github.com/floatinghotpot/jsfy)\n\n" )
fout.write("var jsfy_res = jsfy_res || {};\n\n" )
if(not basedir.endswith("/")):
basedir = basedir + "/"
for f in args:
f = abspath(f)
if( isfile(f) ): jsfy_file(f,basedir,fout)
elif( isdir(f) ): jsfy_dir(f,basedir,fout)
fout.close()
# end of main()
if __name__ == "__main__":
main()
| 26.060606
| 95
| 0.584109
|
import optparse
from os import listdir
from os.path import abspath, isfile, isdir, join, splitext, basename
import json;
curdir = "."
filter_text_ext = [".json", ".csv"]
filter_binary_ext = []
def jsfy_file(path, basedir, fout):
fname = basename(path)
if(fname.startswith(".")):
return
if(not path.startswith(basedir)):
return
filename, extname = splitext( path )
if(extname in filter_text_ext):
res_key = path[ len(basedir) : ]
print( res_key + " -> " + path )
fin = open(path, "r")
txt = json.dumps( fin.read() )
fout.write("jsfy_res[\"" + res_key + "\"] = " + txt + ";\n\n");
pass
def jsfy_dir(path, basedir, fout):
if(not path.endswith("/")):
path = path + "/"
fname = basename(path)
if(fname.startswith(".")):
return
if(not path.startswith(basedir)):
return
for f in listdir(path):
subpath = join(path,f)
if( isfile(subpath) ):
jsfy_file(subpath, basedir, fout)
elif( isdir(subpath) ):
jsfy_dir(subpath, basedir, fout)
def main():
usage = """usage: %prog [dir] [-b basedir] [-o jsfile]
example:
%prog
%prog assets -o js/jsfy_res.js
"""
parser = optparse.OptionParser(usage)
parser.add_option("-b", "--base", dest="basedir", help="base dir")
parser.add_option("-o", "--output", dest="outputpath", help="export js file path")
(options, args) = parser.parse_args()
if( isinstance(options.basedir, str)):
basedir = options.basedir
else:
basedir = "."
basedir = abspath(basedir)
if( isinstance(options.outputpath, str)):
outputpath = options.outputpath
else:
outputpath ="./jsfy_res.js"
fout = open( outputpath, "w" )
fout.write("// generated with jsfy.py, v0.1 (https://github.com/floatinghotpot/jsfy)\n\n" )
fout.write("var jsfy_res = jsfy_res || {};\n\n" )
if(not basedir.endswith("/")):
basedir = basedir + "/"
for f in args:
f = abspath(f)
if( isfile(f) ): jsfy_file(f,basedir,fout)
elif( isdir(f) ): jsfy_dir(f,basedir,fout)
fout.close()
if __name__ == "__main__":
main()
| true
| true
|
790e8d2aa97f1835c89929924f4c62d04a7e13d2
| 2,655
|
py
|
Python
|
terminusdb_client/tests/mockResponse.py
|
pnijhara/terminusdb-client-python
|
bdc3480c21a6c1eb123abae1592e6a2b27b046c1
|
[
"Apache-2.0"
] | null | null | null |
terminusdb_client/tests/mockResponse.py
|
pnijhara/terminusdb-client-python
|
bdc3480c21a6c1eb123abae1592e6a2b27b046c1
|
[
"Apache-2.0"
] | null | null | null |
terminusdb_client/tests/mockResponse.py
|
pnijhara/terminusdb-client-python
|
bdc3480c21a6c1eb123abae1592e6a2b27b046c1
|
[
"Apache-2.0"
] | null | null | null |
from terminusdb_client.woqlclient.api_endpoint_const import APIEndpointConst
from .connectCapabilitiesResponse import ConnectResponse
from .getSchemaTurtleResponse import RESPONSE
def mocked_requests(*args, **kwargs):
class MockResponse:
def json(self):
if self._json_data is None:
raise ValueError("EXCEPTION NO JSON OBJECT")
return self._json_data
@property
def status_code(self):
return self._status_code
@property
def url(self):
return self._url
@property
def text(self):
return self._text
def __init__(self, url, status, action_type):
# set status code and content
self._json_data = None
self._text = None
self._status_code = status
self._content = "cont"
self._url = url
# add json data if provided
if action_type == APIEndpointConst.CONNECT:
self._json_data = ConnectResponse
elif action_type == APIEndpointConst.GET_TRIPLES:
self._text = RESPONSE
# elif action_type == APIEndpointConst.WOQL_SELECT:
# with open("tests/getAllClassQueryResponse.json") as json_file:
# json_data = json.load(json_file)
# self._json_data = json_data
# json_file.close()
elif (
action_type == APIEndpointConst.CREATE_DATABASE
or action_type == APIEndpointConst.DELETE_DATABASE
or action_type == APIEndpointConst.UPDATE_TRIPLES
or action_type == APIEndpointConst.BRANCH
or action_type == APIEndpointConst.CREATE_GRAPH
):
self._json_data = {"terminus:status": "terminus:success"}
if (
args[0]
== "http://localhost:6363/branch/admin/myDBName/local/branch/my_new_branch"
):
return MockResponse(args[0], 200, APIEndpointConst.BRANCH)
elif (
args[0]
== "http://localhost:6363/graph/admin/myDBName/local/branch/master/instance/mygraph"
):
return MockResponse(args[0], 200, APIEndpointConst.CREATE_GRAPH)
elif (
args[0]
== "http://localhost:6363/triples/admin/myDBName/local/branch/master/instance/mygraph"
):
return MockResponse(args[0], 200, APIEndpointConst.GET_TRIPLES)
elif args[0] == "http://localhost:6363/db/admin/myFirstTerminusDB":
return MockResponse(args[0], 200, APIEndpointConst.DELETE_DATABASE)
return MockResponse(args[0], 200, APIEndpointConst.CONNECT)
| 33.607595
| 94
| 0.616196
|
from terminusdb_client.woqlclient.api_endpoint_const import APIEndpointConst
from .connectCapabilitiesResponse import ConnectResponse
from .getSchemaTurtleResponse import RESPONSE
def mocked_requests(*args, **kwargs):
class MockResponse:
def json(self):
if self._json_data is None:
raise ValueError("EXCEPTION NO JSON OBJECT")
return self._json_data
@property
def status_code(self):
return self._status_code
@property
def url(self):
return self._url
@property
def text(self):
return self._text
def __init__(self, url, status, action_type):
self._json_data = None
self._text = None
self._status_code = status
self._content = "cont"
self._url = url
if action_type == APIEndpointConst.CONNECT:
self._json_data = ConnectResponse
elif action_type == APIEndpointConst.GET_TRIPLES:
self._text = RESPONSE
elif (
action_type == APIEndpointConst.CREATE_DATABASE
or action_type == APIEndpointConst.DELETE_DATABASE
or action_type == APIEndpointConst.UPDATE_TRIPLES
or action_type == APIEndpointConst.BRANCH
or action_type == APIEndpointConst.CREATE_GRAPH
):
self._json_data = {"terminus:status": "terminus:success"}
if (
args[0]
== "http://localhost:6363/branch/admin/myDBName/local/branch/my_new_branch"
):
return MockResponse(args[0], 200, APIEndpointConst.BRANCH)
elif (
args[0]
== "http://localhost:6363/graph/admin/myDBName/local/branch/master/instance/mygraph"
):
return MockResponse(args[0], 200, APIEndpointConst.CREATE_GRAPH)
elif (
args[0]
== "http://localhost:6363/triples/admin/myDBName/local/branch/master/instance/mygraph"
):
return MockResponse(args[0], 200, APIEndpointConst.GET_TRIPLES)
elif args[0] == "http://localhost:6363/db/admin/myFirstTerminusDB":
return MockResponse(args[0], 200, APIEndpointConst.DELETE_DATABASE)
return MockResponse(args[0], 200, APIEndpointConst.CONNECT)
| true
| true
|
790e8d7c3fe7dfe74097a23bfce3009a1fdf6d41
| 3,076
|
py
|
Python
|
BeiKeZuFangSpider/settings.py
|
sunhailin-Leo/BeiKeZuFangSpider
|
2fe0a29b1f666b71379c1d357331b9badd8ccc3c
|
[
"MIT"
] | 4
|
2018-09-18T00:33:12.000Z
|
2020-08-19T06:24:46.000Z
|
BeiKeZuFangSpider/settings.py
|
sunhailin-Leo/BeiKeZuFangSpider
|
2fe0a29b1f666b71379c1d357331b9badd8ccc3c
|
[
"MIT"
] | null | null | null |
BeiKeZuFangSpider/settings.py
|
sunhailin-Leo/BeiKeZuFangSpider
|
2fe0a29b1f666b71379c1d357331b9badd8ccc3c
|
[
"MIT"
] | 1
|
2018-10-09T09:19:39.000Z
|
2018-10-09T09:19:39.000Z
|
# -*- coding: utf-8 -*-
BOT_NAME = 'BeiKeZuFangSpider'
SPIDER_MODULES = ['BeiKeZuFangSpider.spiders']
NEWSPIDER_MODULE = 'BeiKeZuFangSpider.spiders'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# KAFKA配置
# KAFKA的访问ip或者端口(默认localhost:9092)
KAFKA_IP_PORT = ["localhost:9092"]
# Kafka的Topic name
KAFKA_TOPIC_NAME = "BeiKeZuFang"
# MONGODB配置
MONGODB_HOST = "127.0.0.1"
MONGODB_PORT = 27017
MONGODB_USER = ""
MONGODB_PASS = ""
MONGODB_DB_NAME = "BeiKeData"
MONGODB_COL_NAME = "ZuFang"
# CSV导出
CSV_EXPORTER = True
CSV_DEFAULT_PATH = "./ExportData/"
# Disable Telnet Console (enabled by default)
TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'BeiKeZuFangSpider.middlewares.BeikezufangspiderSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'scrapy.downloadermiddleware.useragent.UserAgentMiddleware': None,
'BeiKeZuFangSpider.middlewares.BeiKeZuFangScrapyUserAgentMiddleware': 400,
}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'BeiKeZuFangSpider.pipelines.BeiKeZuFangSpiderPipeline': 1,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 32.041667
| 102
| 0.776983
|
BOT_NAME = 'BeiKeZuFangSpider'
SPIDER_MODULES = ['BeiKeZuFangSpider.spiders']
NEWSPIDER_MODULE = 'BeiKeZuFangSpider.spiders'
ROBOTSTXT_OBEY = True
KA_IP_PORT = ["localhost:9092"]
KAFKA_TOPIC_NAME = "BeiKeZuFang"
MONGODB_HOST = "127.0.0.1"
MONGODB_PORT = 27017
MONGODB_USER = ""
MONGODB_PASS = ""
MONGODB_DB_NAME = "BeiKeData"
MONGODB_COL_NAME = "ZuFang"
CSV_EXPORTER = True
CSV_DEFAULT_PATH = "./ExportData/"
TELNETCONSOLE_ENABLED = False
DOWNLOADER_MIDDLEWARES = {
'scrapy.downloadermiddleware.useragent.UserAgentMiddleware': None,
'BeiKeZuFangSpider.middlewares.BeiKeZuFangScrapyUserAgentMiddleware': 400,
}
ITEM_PIPELINES = {
'BeiKeZuFangSpider.pipelines.BeiKeZuFangSpiderPipeline': 1,
}
| true
| true
|
790e8deb03af8d1cae7c0cfaed049b854206a7d8
| 833
|
py
|
Python
|
gscripts/expr_db/test_seqdb.py
|
YeoLab/gscripts
|
ae653d29d0ce82d342f7f6ff5bbeedd27a2e062b
|
[
"MIT"
] | 12
|
2015-07-10T09:36:49.000Z
|
2021-07-06T03:25:04.000Z
|
gscripts/expr_db/test_seqdb.py
|
YeoLab/gscripts
|
ae653d29d0ce82d342f7f6ff5bbeedd27a2e062b
|
[
"MIT"
] | 43
|
2015-01-21T20:01:38.000Z
|
2021-04-13T17:50:38.000Z
|
gscripts/expr_db/test_seqdb.py
|
YeoLab/gscripts
|
ae653d29d0ce82d342f7f6ff5bbeedd27a2e062b
|
[
"MIT"
] | 19
|
2015-05-02T09:33:17.000Z
|
2022-02-12T17:08:06.000Z
|
import seq_db
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
engine = create_engine('mysql+pymysql://ppliu:some_pass@sauron.ucsd.edu/test')
seq_db.Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
"""
try:
s = seq_db.CLIPSeq()
s.sample_name = 'test_sample'
s.expr_name = 'test_experiment'
s.file_location = 'some/where/on/the/server'
s.species = 'hg19'
s.collab = 'dr. sequencing'
s.collab_institute = 'ucsd'
session.add(s)
session.commit()
except Exception as e:
print e
session.rollback()
session.commit()
"""
try:
for expr in session.query(seq_db.SeqExpr).all():
print expr.sample_name,
print expr.project_name,
print expr.check_file()
except Exception as e:
print e
| 20.825
| 78
| 0.684274
|
import seq_db
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
engine = create_engine('mysql+pymysql://ppliu:some_pass@sauron.ucsd.edu/test')
seq_db.Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
"""
try:
s = seq_db.CLIPSeq()
s.sample_name = 'test_sample'
s.expr_name = 'test_experiment'
s.file_location = 'some/where/on/the/server'
s.species = 'hg19'
s.collab = 'dr. sequencing'
s.collab_institute = 'ucsd'
session.add(s)
session.commit()
except Exception as e:
print e
session.rollback()
session.commit()
"""
try:
for expr in session.query(seq_db.SeqExpr).all():
print expr.sample_name,
print expr.project_name,
print expr.check_file()
except Exception as e:
print e
| false
| true
|
790e8deb9fc4d03f5d854f02dc6665b72ed2f535
| 1,154
|
py
|
Python
|
day03/binary_diagnostic.py
|
SimeonHristov99/aoc_2021
|
1925b4b967a4486e54714c3fe523f72a20707993
|
[
"MIT"
] | 3
|
2021-12-03T17:12:33.000Z
|
2021-12-09T21:24:56.000Z
|
day03/binary_diagnostic.py
|
SimeonHristov99/aoc_2021
|
1925b4b967a4486e54714c3fe523f72a20707993
|
[
"MIT"
] | null | null | null |
day03/binary_diagnostic.py
|
SimeonHristov99/aoc_2021
|
1925b4b967a4486e54714c3fe523f72a20707993
|
[
"MIT"
] | null | null | null |
from collections import Counter
def part1(lines):
gamma = ''
epsilon = ''
num_bits = len(lines[0])
for i in range(num_bits):
most_common = Counter(map(lambda x: x[i], lines)).most_common(1)[0][0]
gamma += most_common
epsilon += '0' if most_common == '1' else '1'
return int(gamma, base=2) * int(epsilon, base=2)
def get_value(data, default):
for i in range(len(data[0])):
cntr = Counter(map(lambda x: x[i], data))
most_common = cntr.most_common(1)[0][0] if default == '1' else cntr.most_common()[-1][0][0]
if cntr.most_common(1)[0][1] == cntr.most_common()[-1][1] and len(cntr.most_common()) > 1:
most_common = default
data = list(filter(lambda x: x[i] == most_common, data))
if len(data) < 2:
break
print(data[0])
return int(data[0], base=2)
def part2(lines):
return get_value(lines, '1') * get_value(lines, '0')
def main():
with open('input.txt', 'r') as f:
lines = f.read().splitlines()
print(f'Part 1: {part1(lines)}')
print(f'Part 2: {part2(lines)}')
if __name__ == '__main__':
main()
| 24.553191
| 99
| 0.57539
|
from collections import Counter
def part1(lines):
gamma = ''
epsilon = ''
num_bits = len(lines[0])
for i in range(num_bits):
most_common = Counter(map(lambda x: x[i], lines)).most_common(1)[0][0]
gamma += most_common
epsilon += '0' if most_common == '1' else '1'
return int(gamma, base=2) * int(epsilon, base=2)
def get_value(data, default):
for i in range(len(data[0])):
cntr = Counter(map(lambda x: x[i], data))
most_common = cntr.most_common(1)[0][0] if default == '1' else cntr.most_common()[-1][0][0]
if cntr.most_common(1)[0][1] == cntr.most_common()[-1][1] and len(cntr.most_common()) > 1:
most_common = default
data = list(filter(lambda x: x[i] == most_common, data))
if len(data) < 2:
break
print(data[0])
return int(data[0], base=2)
def part2(lines):
return get_value(lines, '1') * get_value(lines, '0')
def main():
with open('input.txt', 'r') as f:
lines = f.read().splitlines()
print(f'Part 1: {part1(lines)}')
print(f'Part 2: {part2(lines)}')
if __name__ == '__main__':
main()
| true
| true
|
790e8ef5044f3c01c9f08e978e6f0c11248d9e9b
| 227
|
py
|
Python
|
src/utils.py
|
AntoineRondelet/SideChannelLeaksOverHTTPS
|
1035398effe2178c496a15b31579232cf2e3df29
|
[
"MIT"
] | 3
|
2021-11-01T14:59:29.000Z
|
2022-03-09T20:06:03.000Z
|
src/utils.py
|
AntoineRondelet/SideChannelLeaksOverHTTPS
|
1035398effe2178c496a15b31579232cf2e3df29
|
[
"MIT"
] | 2
|
2018-10-27T20:33:23.000Z
|
2018-11-08T18:48:02.000Z
|
src/utils.py
|
AntoineRondelet/SideChannelLeaksOverHTTPS
|
1035398effe2178c496a15b31579232cf2e3df29
|
[
"MIT"
] | 1
|
2018-05-10T14:45:03.000Z
|
2018-05-10T14:45:03.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class TrafficWeight:
def __init__(self):
self.request = 0
self.response = 0
class PacketInterval:
def __init__(self):
self.firstPacket = 0
self.lastPacket = 0
| 17.461538
| 24
| 0.651982
|
class TrafficWeight:
def __init__(self):
self.request = 0
self.response = 0
class PacketInterval:
def __init__(self):
self.firstPacket = 0
self.lastPacket = 0
| true
| true
|
790e8f171d56aa5ded6020675a1feb1bf06ffe59
| 736
|
py
|
Python
|
api/server.py
|
xu-hao/tx-router
|
3e94edc7b6341d68ea0cc0e68bfa0e787df45ae8
|
[
"MIT"
] | null | null | null |
api/server.py
|
xu-hao/tx-router
|
3e94edc7b6341d68ea0cc0e68bfa0e787df45ae8
|
[
"MIT"
] | null | null | null |
api/server.py
|
xu-hao/tx-router
|
3e94edc7b6341d68ea0cc0e68bfa0e787df45ae8
|
[
"MIT"
] | null | null | null |
import connexion
from flask_cors import CORS
import api
from flask import request
import sys
def create_app():
app = connexion.FlaskApp(__name__, specification_dir='openapi/')
app.add_api('my_api.yaml')
@app.app.route("/v1/plugin/<name>/<path:path>", methods=["GET", "POST"])
def plugin(name, path):
if request.method == "GET":
return api.get_plugin(name, path, request.headers, kwargs=request.args.to_dict())
elif request.method == "POST":
return api.post_plugin(name, path, request.headers, request.get_json(), kwargs=request.args.to_dict())
else:
raise RuntimeError("unsupported method " + request.method)
CORS(app.app)
return app
| 29.44
| 114
| 0.653533
|
import connexion
from flask_cors import CORS
import api
from flask import request
import sys
def create_app():
app = connexion.FlaskApp(__name__, specification_dir='openapi/')
app.add_api('my_api.yaml')
@app.app.route("/v1/plugin/<name>/<path:path>", methods=["GET", "POST"])
def plugin(name, path):
if request.method == "GET":
return api.get_plugin(name, path, request.headers, kwargs=request.args.to_dict())
elif request.method == "POST":
return api.post_plugin(name, path, request.headers, request.get_json(), kwargs=request.args.to_dict())
else:
raise RuntimeError("unsupported method " + request.method)
CORS(app.app)
return app
| true
| true
|
790e9162d319d5f33ca3882cd3a109ca98c02965
| 2,706
|
py
|
Python
|
thor/orbits/ephemeris.py
|
B612-Asteroid-Institute/thor
|
d3d1dcbe86f67a62c90b4cde3fc577e414825cf2
|
[
"BSD-3-Clause"
] | null | null | null |
thor/orbits/ephemeris.py
|
B612-Asteroid-Institute/thor
|
d3d1dcbe86f67a62c90b4cde3fc577e414825cf2
|
[
"BSD-3-Clause"
] | null | null | null |
thor/orbits/ephemeris.py
|
B612-Asteroid-Institute/thor
|
d3d1dcbe86f67a62c90b4cde3fc577e414825cf2
|
[
"BSD-3-Clause"
] | null | null | null |
import warnings
import pandas as pd
from ..config import Config
from ..backend import Backend
from ..backend import PYOORB
from ..backend import FINDORB
from ..backend import MJOLNIR
__all__ = [
"generateEphemeris"
]
def generateEphemeris(
orbits,
observers,
backend="MJOLNIR",
backend_kwargs={},
test_orbit=None,
threads=Config.NUM_THREADS,
chunk_size=1
):
"""
Generate ephemeris for the orbits and the given observatories.
Parameters
----------
orbits : `~numpy.ndarray` (N, 6)
Orbits for which to generate ephemeris. If backend is 'THOR', then these orbits must be expressed
as heliocentric ecliptic cartesian elements. If backend is 'PYOORB' orbits may be
expressed in keplerian, cometary or cartesian elements.
observers : dict
A dictionary with observatory codes as keys and observation_times (`~astropy.time.core.Time`) as values.
Or a data frame with observatory codes, observation times (in UTC), and the observer's heliocentric ecliptic state.
The expected data frame columns are obs_x, obs_y, obs_y and optionally the velocity columns obs_vx, obs_vy, obs_vz.
If no velocities are not correctly given, then sky-plane velocities will all be zero.
(See: `~thor.observatories.getObserverState`)
backend : {'MJOLNIR', 'PYOORB'}, optional
Which backend to use.
backend_kwargs : dict, optional
Settings and additional parameters to pass to selected
backend.
Returns
-------
ephemeris : `~pandas.DataFrame` (N x M, 21) or (N x M, 18)
A DataFrame containing the generated ephemeris.
"""
if backend == "MJOLNIR":
backend = MJOLNIR(**backend_kwargs)
elif backend == "PYOORB":
backend = PYOORB(**backend_kwargs)
elif backend == "FINDORB":
backend = FINDORB(**backend_kwargs)
elif isinstance(backend, Backend):
backend = backend
if len(backend_kwargs) > 0:
warnings.warn("backend_kwargs will be ignored since a instantiated backend class has been given.")
else:
err = (
"backend should be one of 'MJOLNIR', 'PYOORB', 'FINDORB' or an instantiated Backend class"
)
raise ValueError(err)
ephemeris = backend.generateEphemeris(
orbits,
observers,
test_orbit=test_orbit,
threads=threads,
chunk_size=chunk_size
)
ephemeris.sort_values(
by=["orbit_id", "observatory_code", "mjd_utc"],
inplace=True
)
ephemeris.reset_index(
inplace=True,
drop=True
)
return ephemeris
| 31.103448
| 123
| 0.645233
|
import warnings
import pandas as pd
from ..config import Config
from ..backend import Backend
from ..backend import PYOORB
from ..backend import FINDORB
from ..backend import MJOLNIR
__all__ = [
"generateEphemeris"
]
def generateEphemeris(
orbits,
observers,
backend="MJOLNIR",
backend_kwargs={},
test_orbit=None,
threads=Config.NUM_THREADS,
chunk_size=1
):
if backend == "MJOLNIR":
backend = MJOLNIR(**backend_kwargs)
elif backend == "PYOORB":
backend = PYOORB(**backend_kwargs)
elif backend == "FINDORB":
backend = FINDORB(**backend_kwargs)
elif isinstance(backend, Backend):
backend = backend
if len(backend_kwargs) > 0:
warnings.warn("backend_kwargs will be ignored since a instantiated backend class has been given.")
else:
err = (
"backend should be one of 'MJOLNIR', 'PYOORB', 'FINDORB' or an instantiated Backend class"
)
raise ValueError(err)
ephemeris = backend.generateEphemeris(
orbits,
observers,
test_orbit=test_orbit,
threads=threads,
chunk_size=chunk_size
)
ephemeris.sort_values(
by=["orbit_id", "observatory_code", "mjd_utc"],
inplace=True
)
ephemeris.reset_index(
inplace=True,
drop=True
)
return ephemeris
| true
| true
|
790e919e7a4953bc91186ab386782cbf53a77711
| 3,831
|
py
|
Python
|
nerblackbox/modules/datasets/formatter/conll2003_formatter.py
|
af-ai-center/nerblackbox
|
a2b751d0b74c3f4779ccf3846e35d8575b488027
|
[
"Apache-2.0"
] | 11
|
2020-09-24T12:10:52.000Z
|
2021-05-28T12:59:06.000Z
|
nerblackbox/modules/datasets/formatter/conll2003_formatter.py
|
af-ai-center/nerblackbox
|
a2b751d0b74c3f4779ccf3846e35d8575b488027
|
[
"Apache-2.0"
] | 1
|
2020-07-03T13:13:35.000Z
|
2020-07-03T13:13:35.000Z
|
nerblackbox/modules/datasets/formatter/conll2003_formatter.py
|
af-ai-center/nerblackbox
|
a2b751d0b74c3f4779ccf3846e35d8575b488027
|
[
"Apache-2.0"
] | null | null | null |
import os
import requests
from os.path import join, isfile
from nerblackbox.modules.datasets.formatter.base_formatter import BaseFormatter
class CoNLL2003Formatter(BaseFormatter):
def __init__(self):
ner_dataset = "conll2003"
ner_tag_list = ["PER", "ORG", "LOC", "MISC"]
super().__init__(ner_dataset, ner_tag_list)
####################################################################################################################
# ABSTRACT BASE METHODS
####################################################################################################################
def get_data(self, verbose: bool):
"""
I: get data
-----------
:param verbose: [bool]
:return: -
"""
url_base = "https://raw.githubusercontent.com/patverga/torch-ner-nlp-from-scratch/master/data/conll2003/"
targets = ["eng.train", "eng.testa", "eng.testb"]
for target in targets:
target_file = join(self.dataset_path, target)
# fetch tgz from url
if isfile(target_file):
if verbose:
print(f".. file at {target_file} already exists")
else:
url = url_base + target
myfile = requests.get(url, allow_redirects=True)
open(target_file, "wb").write(myfile.content)
if verbose:
print(f".. file fetched from {url} and saved at {target_file}")
def create_ner_tag_mapping(self):
"""
II: customize ner_training tag mapping if wanted
-------------------------------------
:return: ner_tag_mapping: [dict] w/ keys = tags in original data, values = tags in formatted data
"""
return dict()
def format_data(self):
"""
III: format data
----------------
:return: -
"""
for phase in ["train", "val", "test"]:
rows = self._read_original_file(phase)
self._write_formatted_csv(phase, rows)
def resplit_data(self, val_fraction: float):
"""
IV: resplit data
----------------
:param val_fraction: [float]
:return: -
"""
# train -> train
df_train = self._read_formatted_csvs(["train"])
self._write_final_csv("train", df_train)
# val -> val
df_val = self._read_formatted_csvs(["val"])
self._write_final_csv("val", df_val)
# test -> test
df_test = self._read_formatted_csvs(["test"])
self._write_final_csv("test", df_test)
####################################################################################################################
# HELPER: READ ORIGINAL
####################################################################################################################
def _read_original_file(self, phase):
"""
III: format data
---------------------------------------------
:param phase: [str] 'train' or 'test'
:return: _rows: [list] of [list] of [str], e.g. [[], ['Inger', 'PER'], ['säger', '0'], ..]
"""
file_name = {
"train": "eng.train",
"val": "eng.testa",
"test": "eng.testb",
}
file_path_original = join(self.dataset_path, file_name[phase])
_rows = list()
if os.path.isfile(file_path_original):
with open(file_path_original) as f:
for i, row in enumerate(f.readlines()):
_rows.append(row.strip().split())
print(f"\n> read {file_path_original}")
_rows = [
[row[0], row[-1]] if (len(row) == 4 and row[0] != "-DOCSTART-") else list()
for row in _rows
]
return _rows
| 35.472222
| 120
| 0.456017
|
import os
import requests
from os.path import join, isfile
from nerblackbox.modules.datasets.formatter.base_formatter import BaseFormatter
class CoNLL2003Formatter(BaseFormatter):
def __init__(self):
ner_dataset = "conll2003"
ner_tag_list = ["PER", "ORG", "LOC", "MISC"]
super().__init__(ner_dataset, ner_tag_list)
| true
| true
|
790e921990c3acd738f6bd67c36429795b03f0c4
| 13,727
|
py
|
Python
|
tests/test_speedify.py
|
meramsey/speedify-py
|
db3210f48a9234f20f28d29463934c061df2f9e0
|
[
"Apache-2.0"
] | 23
|
2019-07-24T16:40:39.000Z
|
2022-02-09T22:49:42.000Z
|
tests/test_speedify.py
|
meramsey/speedify-py
|
db3210f48a9234f20f28d29463934c061df2f9e0
|
[
"Apache-2.0"
] | 3
|
2019-08-08T18:32:56.000Z
|
2020-05-14T14:04:29.000Z
|
tests/test_speedify.py
|
meramsey/speedify-py
|
db3210f48a9234f20f28d29463934c061df2f9e0
|
[
"Apache-2.0"
] | 9
|
2019-09-26T03:30:00.000Z
|
2022-03-13T04:38:22.000Z
|
import os
import sys
sys.path.append('../')
import speedify
from speedify import State, Priority, SpeedifyError, SpeedifyAPIError
import speedifysettings
import speedifyutil
import logging
import unittest
import time
logging.basicConfig(handlers=[logging.FileHandler('test.log'),logging.StreamHandler(sys.stdout)],format='%(asctime)s\t%(levelname)s\t%(module)s\t%(message)s', level=logging.INFO)
# Test the speedify library
# assumes you're logged in
class TestSpeedify(unittest.TestCase):
#Note doesn't test login/logout. but then we have to deal with credentials being stored.
def setUp(self):
speedify.encryption(True)
speedify.transport("auto")
speedify.jumbo(True)
speedify.crashreports(True)
speedify.packetaggregation(True)
speedify.routedefault(True)
speedify.connectmethod("closest")
speedify.disconnect()
def test_connect(self):
serverinfo = speedify.connect_closest()
state = speedify.show_state()
self.assertEqual(state,State.CONNECTED)
self.assertIn("tag", serverinfo)
self.assertIn("country", serverinfo)
def test_connect_country(self):
serverinfo = speedify.connect_country("sg")
state = speedify.show_state()
self.assertEqual(state,State.CONNECTED)
self.assertIn("tag", serverinfo)
self.assertIn("country", serverinfo)
self.assertEqual(serverinfo["country"], "sg")
new_serverinfo = speedify.show_currentserver()
self.assertEqual(new_serverinfo["country"], "sg")
def test_transport(self):
mysettings = speedify.transport("https")
serverinfo = speedify.connect()
mysettings = speedify.show_settings()
self.assertEqual(mysettings["transportMode"], "https")
# to make sure runtime changed, could check stats and look for connectionstats : connections[] : protocol
mysettings = speedify.transport("tcp")
self.assertEqual(mysettings["transportMode"], "tcp")
serverinfo = speedify.connect()
mysettings = speedify.show_settings()
self.assertEqual(mysettings["transportMode"], "tcp")
def test_bad_country(self):
#logging.disable(logging.ERROR);
logging.info("Testing error handling, ignore next few errors")
state = speedify.show_state()
self.assertEqual(state,State.LOGGED_IN)
logging.debug("connecting to bad country")
with self.assertRaises(SpeedifyAPIError):
speedify.connect_country("pp")
logging.debug("after connecting to bad country")
state = speedify.show_state()
self.assertEqual(state,State.LOGGED_IN)
logging.info("Done testing error handling")
#logging.disable(logging.NOTSET)
def test_disconnect(self):
speedify.connect_closest()
state = speedify.show_state()
self.assertEqual(state,State.CONNECTED)
speedify.disconnect()
state = speedify.show_state()
self.assertEqual(state,speedify.State.LOGGED_IN)
def test_connectmethod(self):
speedify.connect_closest()
speedify.connectmethod("private", "jp")
#pull settings from speedify to be sure they really set
cm_settings = speedify.show_connectmethod()
self.assertEqual(cm_settings["connectMethod"],"private")
# country is ignored on
self.assertEqual(cm_settings["country"], "")
self.assertEqual(cm_settings["num"], 0)
self.assertEqual(cm_settings["city"], "")
speedify.connectmethod("p2p")
cm_settings = speedify.show_connectmethod()
self.assertEqual(cm_settings["connectMethod"],"p2p")
self.assertEqual(cm_settings["country"], "")
self.assertEqual(cm_settings["num"], 0)
self.assertEqual(cm_settings["city"], "")
retval = speedify.connectmethod("country", country="sg")
cm_settings = speedify.show_connectmethod()
self.assertEqual(cm_settings["connectMethod"],"country")
self.assertEqual(cm_settings["country"], "sg")
# the settings were returned by the actual connectmethod call,
# and should be exactly the same
self.assertEqual(cm_settings["connectMethod"],retval["connectMethod"])
self.assertEqual(cm_settings["country"], retval["country"])
self.assertEqual(cm_settings["num"],retval["num"])
self.assertEqual(cm_settings["city"], retval["city"])
speedify.connectmethod("closest")
cm_settings = speedify.show_connectmethod()
self.assertEqual(cm_settings["connectMethod"],"closest")
self.assertEqual(cm_settings["country"], "")
self.assertEqual(cm_settings["num"], 0)
self.assertEqual(cm_settings["city"], "")
def test_version(self):
version = speedify.show_version()
self.assertIn("maj",version)
# expect at least Speedify 8.0
self.assertGreater(version["maj"], 7)
self.assertIn("min",version)
self.assertIn("bug",version)
self.assertIn("build",version)
def test_settings(self):
# test some basic settings
speedify.packetaggregation(False)
speedify.jumbo(False)
my_settings = speedify.show_settings()
self.assertFalse(my_settings["packetAggregation"])
self.assertFalse(my_settings["jumboPackets"])
speedify.packetaggregation(True)
speedify.jumbo(True)
my_settings = speedify.show_settings()
self.assertTrue(my_settings["packetAggregation"])
self.assertTrue(my_settings["jumboPackets"])
def test_badarguments(self):
# reaching into private methods to force some errors to be sure they're handled
try:
goterror = False
#invalid command
speedify._run_speedify_cmd(["invalidcommand"])
except speedify.SpeedifyError as sapie:
self.assertTrue("Unknown Parameter" in sapie.message)
goterror = True
self.assertTrue(goterror)
try:
#valid command, missing required argument
goterror = False
speedify._run_speedify_cmd(["overflow"])
except speedify.SpeedifyError as sapie:
self.assertTrue("Missing parameters" in sapie.message)
goterror = True
self.assertTrue(goterror)
try:
goterror = False
#valid command, invalid argument
speedify._run_speedify_cmd(["overflow", "bob"])
except speedify.SpeedifyError as sapie:
self.assertTrue("Invalid parameters" in sapie.message)
goterror = True
self.assertTrue(goterror)
def test_privacy(self):
speedify.crashreports(False)
privacy_settings = speedify.show_privacy()
self.assertFalse(privacy_settings["crashReports"])
speedify.crashreports(True)
privacy_settings = speedify.show_privacy()
self.assertTrue(privacy_settings["crashReports"])
if os.name == 'nt':
#the windows only calls
speedify.killswitch(True)
privacy_settings = speedify.show_privacy()
self.assertTrue(privacy_settings["killswitch"])
speedify.killswitch(False)
privacy_settings = speedify.show_privacy()
self.assertFalse(privacy_settings["killswitch"])
else:
# shouldn't be there if we're not windows
with self.assertRaises(SpeedifyError):
logging.disable(logging.ERROR);
speedify.killswitch(True)
logging.disable(logging.NOTSET)
def test_routedefault(self):
speedify.connect()
if not speedifyutil.using_speedify():
time.sleep(3)
self.assertTrue(speedifyutil.using_speedify())
speedify.routedefault(False)
self.assertFalse(speedify.show_settings()[ "enableDefaultRoute"])
time.sleep(1)
if speedifyutil.using_speedify():
# try twice in case it takes a moment to settle
time.sleep(1)
self.assertFalse(speedifyutil.using_speedify())
speedify.routedefault(True)
# for whatever reason getting the route back takes longer than giving it up
self.assertTrue(speedify.show_settings()[ "enableDefaultRoute"])
time.sleep(2)
if not speedifyutil.using_speedify():
# try twice in case it takes a moment to settle
time.sleep(2)
self.assertTrue(speedifyutil.using_speedify())
def test_serverlist(self):
# also tests connecting to one server
server_list = speedify.show_servers()
self.assertIn("public", server_list)
public_list = server_list["public"]
server_info = public_list[0]
self.assertIn("tag", server_info)
self.assertIn("country", server_info)
self.assertIn("city", server_info)
self.assertIn("num", server_info)
self.assertFalse(server_info["isPrivate"])
connectstring = server_info["tag"]
new_server = speedify.connect(connectstring)
self.assertEqual(new_server["tag"], connectstring)
self.assertEqual(server_info["country"], new_server["country"])
self.assertEqual(server_info["city"], new_server["city"])
self.assertEqual(server_info["num"], new_server["num"])
def test_stats(self):
speedify.connect_closest()
report_list = speedify.stats(2)
self.assertTrue(report_list) #Check for non empty list
reports = [item[0] for item in report_list]
self.assertIn("adapters", reports) #Check for at least one adapters report
def test_adapters(self):
adapters = speedify.show_adapters()
self.assertTrue(adapters)
adapterIDs = [adapter['adapterID'] for adapter in adapters]
self._set_and_test_adapter_list(adapterIDs, Priority.BACKUP, 10000000)
self._set_and_test_adapter_list(adapterIDs, Priority.ALWAYS, 0)
def test_encryption(self):
adapters = speedify.show_adapters()
self.assertTrue(adapters)
# just grab first adapter for testing
adapterID = [adapter['adapterID'] for adapter in adapters][0]
speedify.adapter_encryption(adapterID, False)
mysettings = speedify.show_settings()
perConnectionEncryptionEnabled = mysettings["perConnectionEncryptionEnabled"]
self.assertTrue(perConnectionEncryptionEnabled)
encrypted = mysettings["encrypted"]
perConnectionEncryptionSettings = mysettings["perConnectionEncryptionSettings"]
firstadapter = perConnectionEncryptionSettings[0]
self.assertEqual(firstadapter["adapterID"], adapterID)
self.assertEqual(firstadapter["encrypted"], False)
# main thing should still be encrypted just not our one adapter
self.assertTrue(encrypted)
speedify.encryption(False)
#this should both turn off encryption and wipe the custom settings
mysettings = speedify.show_settings()
perConnectionEncryptionEnabled = mysettings["perConnectionEncryptionEnabled"]
self.assertFalse(perConnectionEncryptionEnabled)
encrypted = mysettings["encrypted"]
self.assertFalse(encrypted)
# now let's test with only the adapter being encrypted
speedify.adapter_encryption(adapterID, True)
mysettings = speedify.show_settings()
perConnectionEncryptionEnabled = mysettings["perConnectionEncryptionEnabled"]
self.assertTrue(perConnectionEncryptionEnabled)
encrypted = mysettings["encrypted"]
perConnectionEncryptionSettings = mysettings["perConnectionEncryptionSettings"]
firstadapter = perConnectionEncryptionSettings[0]
self.assertEqual(firstadapter["adapterID"], adapterID)
self.assertEqual(firstadapter["encrypted"], True)
speedify.encryption(True)
#this should both turn on encryption and wipe the custom settings
mysettings = speedify.show_settings()
perConnectionEncryptionEnabled = mysettings["perConnectionEncryptionEnabled"]
self.assertFalse(perConnectionEncryptionEnabled)
encrypted = mysettings["encrypted"]
self.assertTrue(encrypted)
def _set_and_test_adapter_list(self, adapterIDs, priority, limit):
for adapterID in adapterIDs:
speedify.adapter_priority(adapterID, priority)
speedify.adapter_ratelimit(adapterID, limit)
speedify.adapter_datalimit_daily(adapterID, limit)
speedify.adapter_datalimit_monthly(adapterID, limit,0)
updated_adapters = speedify.show_adapters()
priorities = [adapter['priority'] for adapter in updated_adapters]
rate_limits = [adapter['rateLimit'] for adapter in updated_adapters]
daily_limits = [adapter['dataUsage']['usageDailyLimit'] for adapter in updated_adapters]
monthly_limits = [adapter['dataUsage']['usageMonthlyLimit'] for adapter in updated_adapters]
for set_priority, rate_limit, daily_limit, monthly_limit in zip(priorities, rate_limits, daily_limits, monthly_limits):
# Disconnected adapters speedify is aware of will have an unchangable priority never
if (set_priority != Priority.NEVER.value):
self.assertEqual(set_priority, priority.value)
self.assertEqual(rate_limit, limit)
self.assertEqual(daily_limit, limit)
self.assertEqual(monthly_limit, limit)
if __name__ == '__main__':
speedifysettings.apply_speedify_settings(speedifysettings.speedify_defaults)
unittest.main()
speedifysettings.apply_speedify_settings(speedifysettings.speedify_defaults)
| 44.280645
| 179
| 0.678298
|
import os
import sys
sys.path.append('../')
import speedify
from speedify import State, Priority, SpeedifyError, SpeedifyAPIError
import speedifysettings
import speedifyutil
import logging
import unittest
import time
logging.basicConfig(handlers=[logging.FileHandler('test.log'),logging.StreamHandler(sys.stdout)],format='%(asctime)s\t%(levelname)s\t%(module)s\t%(message)s', level=logging.INFO)
class TestSpeedify(unittest.TestCase):
#Note doesn't test login/logout. but then we have to deal with credentials being stored.
def setUp(self):
speedify.encryption(True)
speedify.transport("auto")
speedify.jumbo(True)
speedify.crashreports(True)
speedify.packetaggregation(True)
speedify.routedefault(True)
speedify.connectmethod("closest")
speedify.disconnect()
def test_connect(self):
serverinfo = speedify.connect_closest()
state = speedify.show_state()
self.assertEqual(state,State.CONNECTED)
self.assertIn("tag", serverinfo)
self.assertIn("country", serverinfo)
def test_connect_country(self):
serverinfo = speedify.connect_country("sg")
state = speedify.show_state()
self.assertEqual(state,State.CONNECTED)
self.assertIn("tag", serverinfo)
self.assertIn("country", serverinfo)
self.assertEqual(serverinfo["country"], "sg")
new_serverinfo = speedify.show_currentserver()
self.assertEqual(new_serverinfo["country"], "sg")
def test_transport(self):
mysettings = speedify.transport("https")
serverinfo = speedify.connect()
mysettings = speedify.show_settings()
self.assertEqual(mysettings["transportMode"], "https")
mysettings = speedify.transport("tcp")
self.assertEqual(mysettings["transportMode"], "tcp")
serverinfo = speedify.connect()
mysettings = speedify.show_settings()
self.assertEqual(mysettings["transportMode"], "tcp")
def test_bad_country(self):
logging.info("Testing error handling, ignore next few errors")
state = speedify.show_state()
self.assertEqual(state,State.LOGGED_IN)
logging.debug("connecting to bad country")
with self.assertRaises(SpeedifyAPIError):
speedify.connect_country("pp")
logging.debug("after connecting to bad country")
state = speedify.show_state()
self.assertEqual(state,State.LOGGED_IN)
logging.info("Done testing error handling")
def test_disconnect(self):
speedify.connect_closest()
state = speedify.show_state()
self.assertEqual(state,State.CONNECTED)
speedify.disconnect()
state = speedify.show_state()
self.assertEqual(state,speedify.State.LOGGED_IN)
def test_connectmethod(self):
speedify.connect_closest()
speedify.connectmethod("private", "jp")
cm_settings = speedify.show_connectmethod()
self.assertEqual(cm_settings["connectMethod"],"private")
self.assertEqual(cm_settings["country"], "")
self.assertEqual(cm_settings["num"], 0)
self.assertEqual(cm_settings["city"], "")
speedify.connectmethod("p2p")
cm_settings = speedify.show_connectmethod()
self.assertEqual(cm_settings["connectMethod"],"p2p")
self.assertEqual(cm_settings["country"], "")
self.assertEqual(cm_settings["num"], 0)
self.assertEqual(cm_settings["city"], "")
retval = speedify.connectmethod("country", country="sg")
cm_settings = speedify.show_connectmethod()
self.assertEqual(cm_settings["connectMethod"],"country")
self.assertEqual(cm_settings["country"], "sg")
self.assertEqual(cm_settings["connectMethod"],retval["connectMethod"])
self.assertEqual(cm_settings["country"], retval["country"])
self.assertEqual(cm_settings["num"],retval["num"])
self.assertEqual(cm_settings["city"], retval["city"])
speedify.connectmethod("closest")
cm_settings = speedify.show_connectmethod()
self.assertEqual(cm_settings["connectMethod"],"closest")
self.assertEqual(cm_settings["country"], "")
self.assertEqual(cm_settings["num"], 0)
self.assertEqual(cm_settings["city"], "")
def test_version(self):
version = speedify.show_version()
self.assertIn("maj",version)
self.assertGreater(version["maj"], 7)
self.assertIn("min",version)
self.assertIn("bug",version)
self.assertIn("build",version)
def test_settings(self):
speedify.packetaggregation(False)
speedify.jumbo(False)
my_settings = speedify.show_settings()
self.assertFalse(my_settings["packetAggregation"])
self.assertFalse(my_settings["jumboPackets"])
speedify.packetaggregation(True)
speedify.jumbo(True)
my_settings = speedify.show_settings()
self.assertTrue(my_settings["packetAggregation"])
self.assertTrue(my_settings["jumboPackets"])
def test_badarguments(self):
try:
goterror = False
#invalid command
speedify._run_speedify_cmd(["invalidcommand"])
except speedify.SpeedifyError as sapie:
self.assertTrue("Unknown Parameter" in sapie.message)
goterror = True
self.assertTrue(goterror)
try:
#valid command, missing required argument
goterror = False
speedify._run_speedify_cmd(["overflow"])
except speedify.SpeedifyError as sapie:
self.assertTrue("Missing parameters" in sapie.message)
goterror = True
self.assertTrue(goterror)
try:
goterror = False
#valid command, invalid argument
speedify._run_speedify_cmd(["overflow", "bob"])
except speedify.SpeedifyError as sapie:
self.assertTrue("Invalid parameters" in sapie.message)
goterror = True
self.assertTrue(goterror)
def test_privacy(self):
speedify.crashreports(False)
privacy_settings = speedify.show_privacy()
self.assertFalse(privacy_settings["crashReports"])
speedify.crashreports(True)
privacy_settings = speedify.show_privacy()
self.assertTrue(privacy_settings["crashReports"])
if os.name == 'nt':
#the windows only calls
speedify.killswitch(True)
privacy_settings = speedify.show_privacy()
self.assertTrue(privacy_settings["killswitch"])
speedify.killswitch(False)
privacy_settings = speedify.show_privacy()
self.assertFalse(privacy_settings["killswitch"])
else:
# shouldn't be there if we're not windows
with self.assertRaises(SpeedifyError):
logging.disable(logging.ERROR);
speedify.killswitch(True)
logging.disable(logging.NOTSET)
def test_routedefault(self):
speedify.connect()
if not speedifyutil.using_speedify():
time.sleep(3)
self.assertTrue(speedifyutil.using_speedify())
speedify.routedefault(False)
self.assertFalse(speedify.show_settings()[ "enableDefaultRoute"])
time.sleep(1)
if speedifyutil.using_speedify():
# try twice in case it takes a moment to settle
time.sleep(1)
self.assertFalse(speedifyutil.using_speedify())
speedify.routedefault(True)
# for whatever reason getting the route back takes longer than giving it up
self.assertTrue(speedify.show_settings()[ "enableDefaultRoute"])
time.sleep(2)
if not speedifyutil.using_speedify():
# try twice in case it takes a moment to settle
time.sleep(2)
self.assertTrue(speedifyutil.using_speedify())
def test_serverlist(self):
# also tests connecting to one server
server_list = speedify.show_servers()
self.assertIn("public", server_list)
public_list = server_list["public"]
server_info = public_list[0]
self.assertIn("tag", server_info)
self.assertIn("country", server_info)
self.assertIn("city", server_info)
self.assertIn("num", server_info)
self.assertFalse(server_info["isPrivate"])
connectstring = server_info["tag"]
new_server = speedify.connect(connectstring)
self.assertEqual(new_server["tag"], connectstring)
self.assertEqual(server_info["country"], new_server["country"])
self.assertEqual(server_info["city"], new_server["city"])
self.assertEqual(server_info["num"], new_server["num"])
def test_stats(self):
speedify.connect_closest()
report_list = speedify.stats(2)
self.assertTrue(report_list) #Check for non empty list
reports = [item[0] for item in report_list]
self.assertIn("adapters", reports) #Check for at least one adapters report
def test_adapters(self):
adapters = speedify.show_adapters()
self.assertTrue(adapters)
adapterIDs = [adapter['adapterID'] for adapter in adapters]
self._set_and_test_adapter_list(adapterIDs, Priority.BACKUP, 10000000)
self._set_and_test_adapter_list(adapterIDs, Priority.ALWAYS, 0)
def test_encryption(self):
adapters = speedify.show_adapters()
self.assertTrue(adapters)
# just grab first adapter for testing
adapterID = [adapter['adapterID'] for adapter in adapters][0]
speedify.adapter_encryption(adapterID, False)
mysettings = speedify.show_settings()
perConnectionEncryptionEnabled = mysettings["perConnectionEncryptionEnabled"]
self.assertTrue(perConnectionEncryptionEnabled)
encrypted = mysettings["encrypted"]
perConnectionEncryptionSettings = mysettings["perConnectionEncryptionSettings"]
firstadapter = perConnectionEncryptionSettings[0]
self.assertEqual(firstadapter["adapterID"], adapterID)
self.assertEqual(firstadapter["encrypted"], False)
# main thing should still be encrypted just not our one adapter
self.assertTrue(encrypted)
speedify.encryption(False)
#this should both turn off encryption and wipe the custom settings
mysettings = speedify.show_settings()
perConnectionEncryptionEnabled = mysettings["perConnectionEncryptionEnabled"]
self.assertFalse(perConnectionEncryptionEnabled)
encrypted = mysettings["encrypted"]
self.assertFalse(encrypted)
# now let's test with only the adapter being encrypted
speedify.adapter_encryption(adapterID, True)
mysettings = speedify.show_settings()
perConnectionEncryptionEnabled = mysettings["perConnectionEncryptionEnabled"]
self.assertTrue(perConnectionEncryptionEnabled)
encrypted = mysettings["encrypted"]
perConnectionEncryptionSettings = mysettings["perConnectionEncryptionSettings"]
firstadapter = perConnectionEncryptionSettings[0]
self.assertEqual(firstadapter["adapterID"], adapterID)
self.assertEqual(firstadapter["encrypted"], True)
speedify.encryption(True)
mysettings = speedify.show_settings()
perConnectionEncryptionEnabled = mysettings["perConnectionEncryptionEnabled"]
self.assertFalse(perConnectionEncryptionEnabled)
encrypted = mysettings["encrypted"]
self.assertTrue(encrypted)
def _set_and_test_adapter_list(self, adapterIDs, priority, limit):
for adapterID in adapterIDs:
speedify.adapter_priority(adapterID, priority)
speedify.adapter_ratelimit(adapterID, limit)
speedify.adapter_datalimit_daily(adapterID, limit)
speedify.adapter_datalimit_monthly(adapterID, limit,0)
updated_adapters = speedify.show_adapters()
priorities = [adapter['priority'] for adapter in updated_adapters]
rate_limits = [adapter['rateLimit'] for adapter in updated_adapters]
daily_limits = [adapter['dataUsage']['usageDailyLimit'] for adapter in updated_adapters]
monthly_limits = [adapter['dataUsage']['usageMonthlyLimit'] for adapter in updated_adapters]
for set_priority, rate_limit, daily_limit, monthly_limit in zip(priorities, rate_limits, daily_limits, monthly_limits):
if (set_priority != Priority.NEVER.value):
self.assertEqual(set_priority, priority.value)
self.assertEqual(rate_limit, limit)
self.assertEqual(daily_limit, limit)
self.assertEqual(monthly_limit, limit)
if __name__ == '__main__':
speedifysettings.apply_speedify_settings(speedifysettings.speedify_defaults)
unittest.main()
speedifysettings.apply_speedify_settings(speedifysettings.speedify_defaults)
| true
| true
|
790e92ef4e3361d6c911e119421f3766c2e32775
| 4,954
|
py
|
Python
|
atores.py
|
NTMaia/pythonbirds
|
1164fc08668dbbe976e68a1e88cee10a529a76f2
|
[
"MIT"
] | null | null | null |
atores.py
|
NTMaia/pythonbirds
|
1164fc08668dbbe976e68a1e88cee10a529a76f2
|
[
"MIT"
] | null | null | null |
atores.py
|
NTMaia/pythonbirds
|
1164fc08668dbbe976e68a1e88cee10a529a76f2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import math
DESTRUIDO = 'Destruido'
ATIVO = 'Ativo'
GRAVIDADE = 10 # m/s^2
class Ator():
"""
Classe que representa um ator. Ele representa um ponto cartesiano na tela.
"""
_caracter_ativo = 'A'
_caracter_destruido = ' '
def __init__(self, x=0, y=0):
"""
Método de inicialização da classe. Deve inicializar os parâmetros x, y, caracter e status
:param x: Posição horizontal inicial do ator
:param y: Posição vertical inicial do ator
"""
self.y = y
self.x = x
self.status = ATIVO
def caracter(self):
return self._caracter_ativo if self.status == ATIVO else self._caracter_destruido
def calcular_posicao(self, tempo):
"""
Método que calcula a posição do ator em determinado tempo.
Deve-se imaginar que o tempo começa em 0 e avança de 0,01 segundos
:param tempo: o tempo do jogo
:return: posição x, y do ator
"""
return self.x, self.y
def colidir(self, outro_ator, intervalo=1):
"""
Método que executa lógica de colisão entre dois atores.
Só deve haver colisão se os dois atores tiverem seus status ativos.
Para colisão, é considerado um quadrado, com lado igual ao parâmetro intervalo, em volta do ponto onde se
encontra o ator. Se os atores estiverem dentro desse mesmo quadrado, seus status devem ser alterados para
destruido, seus caracteres para destruido também.
:param outro_ator: Ator a ser considerado na colisão
:param intervalo: Intervalo a ser considerado
:return:
"""
if self.status == ATIVO and outro_ator.status == ATIVO:
delta_x = abs(self.x - outro_ator.x)
delta_y = abs(self.y - outro_ator.y)
if delta_x <= intervalo and delta_y <= intervalo:
self.status = outro_ator.status = DESTRUIDO
class Obstaculo(Ator):
_caracter_ativo = 'O'
class Porco(Ator):
_caracter_ativo = '@'
_caracter_destruido = '+'
class DuploLancamentoExcecao(Exception):
pass
class Passaro(Ator):
velocidade_escalar = 10
def __init__(self, x=0, y=0):
"""
Método de inicialização de pássaro.
Deve chamar a inicialização de ator. Além disso, deve armazenar a posição inicial e incializar o tempo de
lançamento e angulo de lançamento
:param x:
:param y:
"""
super().__init__(x, y)
self._x_inicial = x
self._y_inicial = y
self._tempo_de_lancamento = None
self._angulo_de_lancamento = None # radianos
def foi_lancado(self):
"""
Método que retorna verdaeira se o pássaro já foi lançado e falso caso contrário
:return: booleano
"""
return not self._tempo_de_lancamento is None
def colidir_com_chao(self):
"""
Método que executa lógica de colisão com o chão. Toda vez que y for menor ou igual a 0,
o status dos Passaro deve ser alterado para destruido, bem como o seu caracter
"""
pass
def calcular_posicao(self, tempo):
"""
Método que cálcula a posição do passaro de acordo com o tempo.
Antes do lançamento o pássaro deve retornar o valor de sua posição inicial
Depois do lançamento o pássaro deve calcular de acordo com sua posição inicial, velocidade escalar,
ângulo de lancamento, gravidade (constante GRAVIDADE) e o tempo do jogo.
Após a colisão, ou seja, ter seus status destruido, o pássaro deve apenas retornar a última posição calculada.
:param tempo: tempo de jogo a ser calculada a posição
:return: posição x, y
"""
if self.foi_lancado():
delta_t = tempo - self._tempo_de_lancamento
self._calcular_posicao_vertical(delta_t)
return super().calcular_posicao(tempo)
def lancar(self, angulo, tempo_de_lancamento):
"""
Lógica que lança o pássaro. Deve armazenar o ângulo e o tempo de lançamento para posteriores cálculo.
O ângulo é passado em graus e deve ser transformado em radianos
:param angulo:
:param tempo_de_lancamento:
:return:
"""
self._angulo_de_lancamento = angulo
self._tempo_de_lancamento = tempo_de_lancamento
def _calcular_posicao_vertical(self, delta_t):
y_atual = self._y_inicial
angulo_radianos = math.radians(self._angulo_de_lancamento)
y_atual += self.velocidade_escalar * delta_t * math.sin(angulo_radianos)
y_atual -= (GRAVIDADE * delta_t ** 2) / 2
self.y = y_atual
class PassaroAmarelo(Passaro):
_caracter_ativo = 'A'
_caracter_destruido = 'a'
velocidade_escalar = 30
class PassaroVermelho(Passaro):
_caracter_ativo = 'V'
_caracter_destruido = 'v'
velocidade_escalar = 20
| 31.157233
| 118
| 0.648365
|
from __future__ import unicode_literals
import math
DESTRUIDO = 'Destruido'
ATIVO = 'Ativo'
GRAVIDADE = 10
class Ator():
_caracter_ativo = 'A'
_caracter_destruido = ' '
def __init__(self, x=0, y=0):
self.y = y
self.x = x
self.status = ATIVO
def caracter(self):
return self._caracter_ativo if self.status == ATIVO else self._caracter_destruido
def calcular_posicao(self, tempo):
return self.x, self.y
def colidir(self, outro_ator, intervalo=1):
if self.status == ATIVO and outro_ator.status == ATIVO:
delta_x = abs(self.x - outro_ator.x)
delta_y = abs(self.y - outro_ator.y)
if delta_x <= intervalo and delta_y <= intervalo:
self.status = outro_ator.status = DESTRUIDO
class Obstaculo(Ator):
_caracter_ativo = 'O'
class Porco(Ator):
_caracter_ativo = '@'
_caracter_destruido = '+'
class DuploLancamentoExcecao(Exception):
pass
class Passaro(Ator):
velocidade_escalar = 10
def __init__(self, x=0, y=0):
super().__init__(x, y)
self._x_inicial = x
self._y_inicial = y
self._tempo_de_lancamento = None
self._angulo_de_lancamento = None
def foi_lancado(self):
return not self._tempo_de_lancamento is None
def colidir_com_chao(self):
pass
def calcular_posicao(self, tempo):
if self.foi_lancado():
delta_t = tempo - self._tempo_de_lancamento
self._calcular_posicao_vertical(delta_t)
return super().calcular_posicao(tempo)
def lancar(self, angulo, tempo_de_lancamento):
self._angulo_de_lancamento = angulo
self._tempo_de_lancamento = tempo_de_lancamento
def _calcular_posicao_vertical(self, delta_t):
y_atual = self._y_inicial
angulo_radianos = math.radians(self._angulo_de_lancamento)
y_atual += self.velocidade_escalar * delta_t * math.sin(angulo_radianos)
y_atual -= (GRAVIDADE * delta_t ** 2) / 2
self.y = y_atual
class PassaroAmarelo(Passaro):
_caracter_ativo = 'A'
_caracter_destruido = 'a'
velocidade_escalar = 30
class PassaroVermelho(Passaro):
_caracter_ativo = 'V'
_caracter_destruido = 'v'
velocidade_escalar = 20
| true
| true
|
790e93ff7de575f52fa099e1154b4823f74ba258
| 1,979
|
py
|
Python
|
brexit-word-freq/app.py
|
brittwitham/brexit-word-freq
|
943c9c83aee30d2866571a6b07ad1895a73c02de
|
[
"MIT"
] | null | null | null |
brexit-word-freq/app.py
|
brittwitham/brexit-word-freq
|
943c9c83aee30d2866571a6b07ad1895a73c02de
|
[
"MIT"
] | null | null | null |
brexit-word-freq/app.py
|
brittwitham/brexit-word-freq
|
943c9c83aee30d2866571a6b07ad1895a73c02de
|
[
"MIT"
] | null | null | null |
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.graph_objs as go
from pymongo import MongoClient
import json
import os
client = MongoClient(os.environ.get("DATABASE"))
db = client.politics.brexit
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
data = list(db.find({}, {"_id": 0}))
data_df = pd.json_normalize(data).set_index('timestamp')
top_columns = data_df.sum().sort_values(ascending=False)
top_10 = top_columns[0:10].index.tolist()
top10df = data_df[top_10].fillna(0).astype(int)
df = top10df[-12:]
cols = list(df.columns)
# Set up plot
fig = go.Figure()
colors = {
'background': '#111111',
'text': '#7FDBFF'
}
app.layout = html.Div(children=[
html.H1(
children='#brexit',
style={
'textAlign': 'center',
'color': colors['text']
}
),
html.Div(children='Top Keywords used with the #brexit hashtag in the last 12 hours', style={
'textAlign': 'center',
'color': colors['text']
}),
dcc.Graph(
id='test-plot',
figure={
'data': [
go.Scatter(
x=df.index,
y=df[i],
name=i.replace('words.', ''),
line=dict(shape='spline', width=2),
opacity=0.8
) for i in cols[0:10]
],
'layout': go.Layout(
xaxis={'title': 'Time'},
yaxis={'title': 'Frequency'},
margin={'l': 40, 'b': 80, 't': 10, 'r': 10},
legend={'x': 0, 'y': 1},
hovermode='closest'
)
},
),
dcc.Interval(
id='interval-component',
interval=60*1000, # in milliseconds
n_intervals=0
)
])
if __name__ == '__main__':
app.run_server(debug=True)
| 24.432099
| 96
| 0.547246
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.graph_objs as go
from pymongo import MongoClient
import json
import os
client = MongoClient(os.environ.get("DATABASE"))
db = client.politics.brexit
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
data = list(db.find({}, {"_id": 0}))
data_df = pd.json_normalize(data).set_index('timestamp')
top_columns = data_df.sum().sort_values(ascending=False)
top_10 = top_columns[0:10].index.tolist()
top10df = data_df[top_10].fillna(0).astype(int)
df = top10df[-12:]
cols = list(df.columns)
fig = go.Figure()
colors = {
'background': '#111111',
'text': '#7FDBFF'
}
app.layout = html.Div(children=[
html.H1(
children='#brexit',
style={
'textAlign': 'center',
'color': colors['text']
}
),
html.Div(children='Top Keywords used with the #brexit hashtag in the last 12 hours', style={
'textAlign': 'center',
'color': colors['text']
}),
dcc.Graph(
id='test-plot',
figure={
'data': [
go.Scatter(
x=df.index,
y=df[i],
name=i.replace('words.', ''),
line=dict(shape='spline', width=2),
opacity=0.8
) for i in cols[0:10]
],
'layout': go.Layout(
xaxis={'title': 'Time'},
yaxis={'title': 'Frequency'},
margin={'l': 40, 'b': 80, 't': 10, 'r': 10},
legend={'x': 0, 'y': 1},
hovermode='closest'
)
},
),
dcc.Interval(
id='interval-component',
interval=60*1000,
n_intervals=0
)
])
if __name__ == '__main__':
app.run_server(debug=True)
| true
| true
|
790e948e136eebbcfb3e21e22100eb690d57bcb7
| 106
|
py
|
Python
|
matfin/utils/utils.py
|
bailez/matfin
|
23c78534f8a6f414066ef7f916b7b3b6096edbae
|
[
"MIT"
] | 2
|
2019-09-16T01:36:25.000Z
|
2020-10-29T01:21:28.000Z
|
matfin/utils/utils.py
|
bailez/matfin
|
23c78534f8a6f414066ef7f916b7b3b6096edbae
|
[
"MIT"
] | null | null | null |
matfin/utils/utils.py
|
bailez/matfin
|
23c78534f8a6f414066ef7f916b7b3b6096edbae
|
[
"MIT"
] | null | null | null |
class Errors:
def __init__(self):
pass
def min_nonetype(self):
pass
| 13.25
| 27
| 0.5
|
class Errors:
def __init__(self):
pass
def min_nonetype(self):
pass
| true
| true
|
790e9496b0319a06f10d52ec4a8bdf596b0d5dc3
| 1,543
|
py
|
Python
|
ykman/__init__.py
|
amake/yubikey-manager
|
ba049e1454a2435e4772b17fec88785a50373cf9
|
[
"BSD-2-Clause"
] | 1
|
2020-03-16T14:57:15.000Z
|
2020-03-16T14:57:15.000Z
|
ykman/__init__.py
|
amake/yubikey-manager
|
ba049e1454a2435e4772b17fec88785a50373cf9
|
[
"BSD-2-Clause"
] | 1
|
2018-05-07T07:34:23.000Z
|
2018-05-07T13:44:29.000Z
|
ykman/__init__.py
|
amake/yubikey-manager
|
ba049e1454a2435e4772b17fec88785a50373cf9
|
[
"BSD-2-Clause"
] | 1
|
2019-07-15T05:36:41.000Z
|
2019-07-15T05:36:41.000Z
|
# Copyright (c) 2015 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
with open(
os.path.join(
os.path.dirname(__file__), 'VERSION')) as version_file:
version = version_file.read().strip()
__version__ = version
| 40.605263
| 71
| 0.755023
|
import os
with open(
os.path.join(
os.path.dirname(__file__), 'VERSION')) as version_file:
version = version_file.read().strip()
__version__ = version
| true
| true
|
790e9517fc9e8f6c96158cd9c93faa4f3db7d729
| 3,878
|
py
|
Python
|
bughunter/action/assignment.py
|
ChrisTimperley/BugHunter
|
090201f491c50fff5a36507ea6861142570d18b9
|
[
"MIT"
] | 4
|
2017-03-01T05:10:01.000Z
|
2021-12-14T15:18:01.000Z
|
bughunter/action/assignment.py
|
ChrisTimperley/BugCollector
|
090201f491c50fff5a36507ea6861142570d18b9
|
[
"MIT"
] | 4
|
2016-10-16T22:57:17.000Z
|
2017-01-12T21:39:45.000Z
|
bughunter/action/assignment.py
|
ChrisTimperley/BugCollector
|
090201f491c50fff5a36507ea6861142570d18b9
|
[
"MIT"
] | null | null | null |
from bughunter.action.core import *
import cgum.statement
class ModifyAssignment(ReplaceRepairAction):
@staticmethod
def from_json(jsn, before, after):
return ReplaceRepairAction.from_json(ModifyAssignment, jsn, before, after)
@staticmethod
def detect(patch, stmts_bef, stmts_aft, actions):
modified = [a.frm() for a in actions['ModifyStatement'] \
if isinstance(a.frm(), cgum.statement.ExprStatement)]
assigns = []
for bef in modified:
assigns += \
bef.collect(lambda n: isinstance(n, cgum.expression.Assignment))
assigns = [(frm, patch.was_is(frm)) for frm in assigns]
actions['ModifyAssignment'] = \
[ModifyAssignment(frm, to) for (frm, to) in assigns if not to is None]
def parts(self):
return [self.to()]
class ReplaceAssignmentLHS(ReplaceRepairAction):
@staticmethod
def from_json(jsn, before, after):
return ReplaceRepairAction.from_json(ReplaceAssignmentLHS, jsn, before, after)
@staticmethod
def detect(patch, stmts_bef, stmts_aft, actions):
l = [(a.frm(), a.to()) for a in actions['ModifyAssignment']]
l = [(frm, to) for (frm, to) in l \
if frm.op().equivalent(to.op())]
l = [(frm, to) for (frm, to) in l \
if not frm.lhs().equivalent(to.lhs())]
l = [(frm, to) for (frm, to) in l \
if frm.rhs().equivalent(to.rhs())]
actions['ReplaceAssignmentLHS'] = \
[ReplaceAssignmentLHS(frm, to) for (frm, to) in l]
def frm_assignment(self):
return self.frm()
def to_assignment(self):
return self.to()
def frm_lhs(self):
return self.frm_assignment().lhs()
def to_lhs(self):
return self.to_assignment().lhs()
def parts(self):
return [self.to_lhs()]
class ReplaceAssignmentRHS(ReplaceRepairAction):
@staticmethod
def from_json(jsn, before, after):
return ReplaceRepairAction.from_json(ReplaceAssignmentRHS, jsn, before, after)
@staticmethod
def detect(patch, stmts_bef, stmts_aft, actions):
l = [(a.frm(), a.to()) for a in actions['ModifyAssignment']]
l = [(frm, to) for (frm, to) in l \
if frm.op().equivalent(to.op())]
l = [(frm, to) for (frm, to) in l \
if frm.lhs().equivalent(to.lhs())]
l = [(frm, to) for (frm, to) in l \
if not frm.rhs().equivalent(to.rhs())]
actions['ReplaceAssignmentRHS'] = \
[ReplaceAssignmentRHS(frm, to) for (frm, to) in l]
def frm_assignment(self):
return self.frm()
def to_assignment(self):
return self.to()
def frm_rhs(self):
return self.frm_assignment().rhs()
def to_rhs(self):
return self.to_assignment().rhs()
def parts(self):
return [self.to_rhs()]
class ReplaceAssignmentOp(RepairAction):
@staticmethod
def from_json(jsn, before, after):
return ReplaceRepairAction.from_json(ReplaceAssignmentOp, jsn, before, after)
@staticmethod
def detect(patch, stmts_bef, stmts_aft, actions):
l = [(a.frm(), a.to()) for a in actions['ModifyAssignment']]
l = [(frm, to) for (frm, to) in l \
if not frm.op().equivalent(to.op())]
l = [(frm, to) for (frm, to) in l \
if frm.lhs().equivalent(to.lhs())]
l = [(frm, to) for (frm, to) in l \
if frm.rhs().equivalent(to.rhs())]
actions['ReplaceAssignmentOp'] = \
[ReplaceAssignmentOp(frm, to) for (frm, to) in l]
def frm_assignment(self):
return self.frm()
def to_assignment(self):
return self.to()
def frm_op(self):
return self.frm_assignment().op()
def to_op(self):
return self.to_assignment().op()
def parts(self):
return [self.to_op()]
| 35.254545
| 86
| 0.596699
|
from bughunter.action.core import *
import cgum.statement
class ModifyAssignment(ReplaceRepairAction):
@staticmethod
def from_json(jsn, before, after):
return ReplaceRepairAction.from_json(ModifyAssignment, jsn, before, after)
@staticmethod
def detect(patch, stmts_bef, stmts_aft, actions):
modified = [a.frm() for a in actions['ModifyStatement'] \
if isinstance(a.frm(), cgum.statement.ExprStatement)]
assigns = []
for bef in modified:
assigns += \
bef.collect(lambda n: isinstance(n, cgum.expression.Assignment))
assigns = [(frm, patch.was_is(frm)) for frm in assigns]
actions['ModifyAssignment'] = \
[ModifyAssignment(frm, to) for (frm, to) in assigns if not to is None]
def parts(self):
return [self.to()]
class ReplaceAssignmentLHS(ReplaceRepairAction):
@staticmethod
def from_json(jsn, before, after):
return ReplaceRepairAction.from_json(ReplaceAssignmentLHS, jsn, before, after)
@staticmethod
def detect(patch, stmts_bef, stmts_aft, actions):
l = [(a.frm(), a.to()) for a in actions['ModifyAssignment']]
l = [(frm, to) for (frm, to) in l \
if frm.op().equivalent(to.op())]
l = [(frm, to) for (frm, to) in l \
if not frm.lhs().equivalent(to.lhs())]
l = [(frm, to) for (frm, to) in l \
if frm.rhs().equivalent(to.rhs())]
actions['ReplaceAssignmentLHS'] = \
[ReplaceAssignmentLHS(frm, to) for (frm, to) in l]
def frm_assignment(self):
return self.frm()
def to_assignment(self):
return self.to()
def frm_lhs(self):
return self.frm_assignment().lhs()
def to_lhs(self):
return self.to_assignment().lhs()
def parts(self):
return [self.to_lhs()]
class ReplaceAssignmentRHS(ReplaceRepairAction):
@staticmethod
def from_json(jsn, before, after):
return ReplaceRepairAction.from_json(ReplaceAssignmentRHS, jsn, before, after)
@staticmethod
def detect(patch, stmts_bef, stmts_aft, actions):
l = [(a.frm(), a.to()) for a in actions['ModifyAssignment']]
l = [(frm, to) for (frm, to) in l \
if frm.op().equivalent(to.op())]
l = [(frm, to) for (frm, to) in l \
if frm.lhs().equivalent(to.lhs())]
l = [(frm, to) for (frm, to) in l \
if not frm.rhs().equivalent(to.rhs())]
actions['ReplaceAssignmentRHS'] = \
[ReplaceAssignmentRHS(frm, to) for (frm, to) in l]
def frm_assignment(self):
return self.frm()
def to_assignment(self):
return self.to()
def frm_rhs(self):
return self.frm_assignment().rhs()
def to_rhs(self):
return self.to_assignment().rhs()
def parts(self):
return [self.to_rhs()]
class ReplaceAssignmentOp(RepairAction):
@staticmethod
def from_json(jsn, before, after):
return ReplaceRepairAction.from_json(ReplaceAssignmentOp, jsn, before, after)
@staticmethod
def detect(patch, stmts_bef, stmts_aft, actions):
l = [(a.frm(), a.to()) for a in actions['ModifyAssignment']]
l = [(frm, to) for (frm, to) in l \
if not frm.op().equivalent(to.op())]
l = [(frm, to) for (frm, to) in l \
if frm.lhs().equivalent(to.lhs())]
l = [(frm, to) for (frm, to) in l \
if frm.rhs().equivalent(to.rhs())]
actions['ReplaceAssignmentOp'] = \
[ReplaceAssignmentOp(frm, to) for (frm, to) in l]
def frm_assignment(self):
return self.frm()
def to_assignment(self):
return self.to()
def frm_op(self):
return self.frm_assignment().op()
def to_op(self):
return self.to_assignment().op()
def parts(self):
return [self.to_op()]
| true
| true
|
790e9587e3df9644c7c62a69564b8c0ebdbbcc61
| 2,219
|
py
|
Python
|
src/oci/management_agent/models/work_submission_key.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/management_agent/models/work_submission_key.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/management_agent/models/work_submission_key.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class WorkSubmissionKey(object):
"""
Work Submission Identifier
"""
def __init__(self, **kwargs):
"""
Initializes a new WorkSubmissionKey object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param work_submission_key:
The value to assign to the work_submission_key property of this WorkSubmissionKey.
:type work_submission_key: str
"""
self.swagger_types = {
'work_submission_key': 'str'
}
self.attribute_map = {
'work_submission_key': 'workSubmissionKey'
}
self._work_submission_key = None
@property
def work_submission_key(self):
"""
**[Required]** Gets the work_submission_key of this WorkSubmissionKey.
Work Submission Identifier
:return: The work_submission_key of this WorkSubmissionKey.
:rtype: str
"""
return self._work_submission_key
@work_submission_key.setter
def work_submission_key(self, work_submission_key):
"""
Sets the work_submission_key of this WorkSubmissionKey.
Work Submission Identifier
:param work_submission_key: The work_submission_key of this WorkSubmissionKey.
:type: str
"""
self._work_submission_key = work_submission_key
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 31.253521
| 245
| 0.68274
|
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class WorkSubmissionKey(object):
def __init__(self, **kwargs):
self.swagger_types = {
'work_submission_key': 'str'
}
self.attribute_map = {
'work_submission_key': 'workSubmissionKey'
}
self._work_submission_key = None
@property
def work_submission_key(self):
return self._work_submission_key
@work_submission_key.setter
def work_submission_key(self, work_submission_key):
self._work_submission_key = work_submission_key
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
790e95a19404e1861d1cbab10c43fd2f4ffb6cdf
| 757
|
py
|
Python
|
inf.py
|
sensiblecodeio/inf
|
41473129125c9a1b3f6dbff0cd559369afb9fc51
|
[
"MIT"
] | null | null | null |
inf.py
|
sensiblecodeio/inf
|
41473129125c9a1b3f6dbff0cd559369afb9fc51
|
[
"MIT"
] | null | null | null |
inf.py
|
sensiblecodeio/inf
|
41473129125c9a1b3f6dbff0cd559369afb9fc51
|
[
"MIT"
] | null | null | null |
from __future__ import division
import sys
class Inf(float):
__name__ = __name__
__file__ = __file__
@staticmethod
def div(p, q):
"""
``p / q`` returning the correct infinity instead of
raising ZeroDivisionError.
"""
from math import copysign
if q != 0.0:
# Normal case, no infinities.
return p / q
elif p == 0.0:
return p / q # Doesn't return, raises an Exception.
elif copysign(1, q) > 0:
# q is +0.0, return inf with same sign as p.
return copysign(inf, p)
else:
# q is -0.0, return inf with flipped sign.
return copysign(inf, -p)
sys.modules[__name__] = inf = Inf("+inf")
| 22.939394
| 64
| 0.535007
|
from __future__ import division
import sys
class Inf(float):
__name__ = __name__
__file__ = __file__
@staticmethod
def div(p, q):
from math import copysign
if q != 0.0:
return p / q
elif p == 0.0:
return p / q
elif copysign(1, q) > 0:
# q is +0.0, return inf with same sign as p.
return copysign(inf, p)
else:
# q is -0.0, return inf with flipped sign.
return copysign(inf, -p)
sys.modules[__name__] = inf = Inf("+inf")
| true
| true
|
790e96b4bc0d2d2c2326832011eac23cc98d5cf4
| 109,421
|
py
|
Python
|
code/exp/v18.py
|
okotaku/pet_finder
|
380e4f19172e06e92b5b752f59e2902efa6aee1f
|
[
"MIT"
] | 34
|
2019-07-31T01:17:18.000Z
|
2020-11-15T20:01:30.000Z
|
code/exp/v18.py
|
okotaku/pet_finder
|
380e4f19172e06e92b5b752f59e2902efa6aee1f
|
[
"MIT"
] | null | null | null |
code/exp/v18.py
|
okotaku/pet_finder
|
380e4f19172e06e92b5b752f59e2902efa6aee1f
|
[
"MIT"
] | 6
|
2019-07-31T07:21:35.000Z
|
2021-05-21T12:46:06.000Z
|
# -*- coding: utf-8 -*-
'''
feature: v1, 2, 3, 4, 10, 11
feature: v1, 2, 3, 4, 11, 13, 14, 17, 18, 19, 22, 23
model: v10
'''
import itertools
import json
import gc
import glob
import os
import time
import cv2
import re
import nltk
import torch
import imagehash
import lightgbm as lgb
import xgboost as xgb
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import scipy as sp
from scipy.stats import rankdata
from PIL import Image
from pymagnitude import Magnitude
from gensim.models import word2vec, KeyedVectors
from gensim.scripts.glove2word2vec import glove2word2vec
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from contextlib import contextmanager
from functools import partial
from itertools import combinations
from logging import getLogger, Formatter, StreamHandler, FileHandler, INFO
from keras.applications.densenet import preprocess_input as preprocess_input_dense
from keras.applications.densenet import DenseNet121
from keras.applications.inception_resnet_v2 import preprocess_input as preprocess_input_incep
from keras.applications.inception_resnet_v2 import InceptionResNetV2
from keras import backend as K
from keras.layers import GlobalAveragePooling2D, Input, Lambda, AveragePooling1D
from keras.models import Model
from keras.preprocessing.text import text_to_word_sequence
from sklearn.decomposition import LatentDirichletAllocation, TruncatedSVD, NMF
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.metrics import cohen_kappa_score, mean_squared_error
from sklearn.model_selection import GroupKFold, StratifiedKFold, train_test_split
from sklearn.pipeline import make_pipeline, make_union
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from sklearn.feature_extraction.text import _document_frequency
# ===============
# Constants
# ===============
COMPETITION_NAME = 'petfinder-adoption-prediction'
MODEL_NAME = 'v001'
logger = getLogger(COMPETITION_NAME)
LOGFORMAT = '%(asctime)s %(levelname)s %(message)s'
target = 'AdoptionSpeed'
len_train = 14993
len_test = 3948
T_flag = True
K_flag = True
G_flag = True
debug = False
# ===============
# Params
# ===============
seed = 777
kaeru_seed = 1337
n_splits = 5
np.random.seed(seed)
# feature engineering
n_components = 5
n_components_gege_img = 32
n_components_gege_txt = 16
img_size = 256
batch_size = 256
# model
MODEL_PARAMS = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'rmse',
'learning_rate': 0.01,
'num_leaves': 63,
'subsample': 0.9,
'subsample_freq': 1,
'colsample_bytree': 0.6,
'max_depth': 9,
'max_bin': 127,
'reg_alpha': 0.11,
'reg_lambda': 0.01,
'min_child_weight': 0.2,
'min_child_samples': 20,
'min_gain_to_split': 0.02,
'min_data_in_bin': 3,
'bin_construct_sample_cnt': 5000,
'cat_l2': 10,
'verbose': -1,
'nthread': -1,
'seed': 777,
}
KAERU_PARAMS = {'application': 'regression',
'boosting': 'gbdt',
'metric': 'rmse',
'num_leaves': 70,
'max_depth': 9,
'learning_rate': 0.01,
'max_bin': 32,
'bagging_freq': 2,
'bagging_fraction': 0.85,
'feature_fraction': 0.8,
'min_split_gain': 0.02,
'min_child_samples': 150,
'min_child_weight': 0.02,
'lambda_l2': 0.0475,
'verbosity': -1,
'seed': kaeru_seed}
ADV_PARAMS = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'auc',
'num_leaves': 64,
'learning_rate': 0.02,
'verbose': 0,
'lambda_l1': 0.1,
'seed': 1213
}
MODEL_PARAMS_XGB = {
'eval_metric': 'rmse',
'seed': 1337,
'eta': 0.01,
'subsample': 0.8,
'colsample_bytree': 0.85,
'tree_method': 'gpu_hist',
'device': 'gpu',
'silent': 1,
}
FIT_PARAMS = {
'num_boost_round': 5000,
'early_stopping_rounds': 100,
'verbose_eval': 5000,
}
# define
maxvalue_dict = {}
categorical_features = [
'Breed1',
'Breed2',
'Color1',
'Color2',
'Color3',
'Dewormed',
'FurLength',
'Gender',
'Health',
'MaturitySize',
'State',
'Sterilized',
'Type',
'Vaccinated',
'Type_main_breed',
'BreedName_main_breed',
'Type_second_breed',
'BreedName_second_breed',
'BreedName_main_breed_all',
]
contraction_mapping = {u"ain’t": u"is not", u"aren’t": u"are not", u"can’t": u"cannot", u"’cause": u"because",
u"could’ve": u"could have", u"couldn’t": u"could not", u"didn’t": u"did not",
u"doesn’t": u"does not", u"don’t": u"do not", u"hadn’t": u"had not",
u"hasn’t": u"has not", u"haven’t": u"have not", u"he’d": u"he would",
u"he’ll": u"he will", u"he’s": u"he is", u"how’d": u"how did", u"how’d’y": u"how do you",
u"how’ll": u"how will", u"how’s": u"how is", u"I’d": u"I would",
u"I’d’ve": u"I would have", u"I’ll": u"I will", u"I’ll’ve": u"I will have",
u"I’m": u"I am", u"I’ve": u"I have", u"i’d": u"i would", u"i’d’ve": u"i would have",
u"i’ll": u"i will", u"i’ll’ve": u"i will have", u"i’m": u"i am", u"i’ve": u"i have",
u"isn’t": u"is not", u"it’d": u"it would", u"it’d’ve": u"it would have",
u"it’ll": u"it will", u"it’ll’ve": u"it will have", u"it’s": u"it is",
u"let’s": u"let us", u"ma’am": u"madam", u"mayn’t": u"may not",
u"might’ve": u"might have", u"mightn’t": u"might not", u"mightn’t’ve": u"might not have",
u"must’ve": u"must have", u"mustn’t": u"must not", u"mustn’t’ve": u"must not have",
u"needn’t": u"need not", u"needn’t’ve": u"need not have", u"o’clock": u"of the clock",
u"oughtn’t": u"ought not", u"oughtn’t’ve": u"ought not have", u"shan’t": u"shall not",
u"sha’n’t": u"shall not", u"shan’t’ve": u"shall not have", u"she’d": u"she would",
u"she’d’ve": u"she would have", u"she’ll": u"she will", u"she’ll’ve": u"she will have",
u"she’s": u"she is", u"should’ve": u"should have", u"shouldn’t": u"should not",
u"shouldn’t’ve": u"should not have", u"so’ve": u"so have", u"so’s": u"so as",
u"this’s": u"this is", u"that’d": u"that would", u"that’d’ve": u"that would have",
u"that’s": u"that is", u"there’d": u"there would", u"there’d’ve": u"there would have",
u"there’s": u"there is", u"here’s": u"here is", u"they’d": u"they would",
u"they’d’ve": u"they would have", u"they’ll": u"they will",
u"they’ll’ve": u"they will have", u"they’re": u"they are", u"they’ve": u"they have",
u"to’ve": u"to have", u"wasn’t": u"was not", u"we’d": u"we would",
u"we’d’ve": u"we would have", u"we’ll": u"we will", u"we’ll’ve": u"we will have",
u"we’re": u"we are", u"we’ve": u"we have", u"weren’t": u"were not",
u"what’ll": u"what will", u"what’ll’ve": u"what will have", u"what’re": u"what are",
u"what’s": u"what is", u"what’ve": u"what have", u"when’s": u"when is",
u"when’ve": u"when have", u"where’d": u"where did", u"where’s": u"where is",
u"where’ve": u"where have", u"who’ll": u"who will", u"who’ll’ve": u"who will have",
u"who’s": u"who is", u"who’ve": u"who have", u"why’s": u"why is", u"why’ve": u"why have",
u"will’ve": u"will have", u"won’t": u"will not", u"won’t’ve": u"will not have",
u"would’ve": u"would have", u"wouldn’t": u"would not", u"wouldn’t’ve": u"would not have",
u"y’all": u"you all", u"y’all’d": u"you all would", u"y’all’d’ve": u"you all would have",
u"y’all’re": u"you all are", u"y’all’ve": u"you all have", u"you’d": u"you would",
u"you’d’ve": u"you would have", u"you’ll": u"you will", u"you’ll’ve": u"you will have",
u"you’re": u"you are", u"you’ve": u"you have", u"cat’s": u"cat is", u" whatapp ": u" whatapps ",
u" whatssapp ": u" whatapps ", u" whatssap ": u" whatapps ", u" whatspp ": u" whatapps ",
u" whastapp ": u" whatapps ", u" whatsap ": u" whatapps ", u" whassap ": u" whatapps ",
u" watapps ": u" whatapps ", u"wetfood": u"wet food", u"intetested": u"interested",
u"领养条件,": u"领养条件", u"谢谢。": u"谢谢",
u"别打我,记住,我有反抗的牙齿,但我不会咬你。remember": u"别打我,记住,我有反抗的牙齿,但我不会咬你。",
u"有你。do": u"有你。", u"名字name": u"名字", u"year,": u"year", u"work,your": u"work your",
u"too,will": u"too will", u"timtams": u"timtam", u"spay。": u"spay", u"shoulder,a": u"shoulder a",
u"sherpherd": u"shepherd", u"sherphed": u"shepherd", u"sherperd": u"shepherd",
u"sherpard": u"shepherd", u"serious。": u"serious", u"remember,i": u"remember i",
u"recover,": u"recover", u"refundable指定期限内结扎后会全数奉还": u"refundable",
u"puchong区,有没有人有增添家庭成员?": u"puchong", u"puchong救的": u"puchong",
u"puchong,": u"puchong", u"month。": u"month", u"month,": u"month",
u"microchip(做狗牌一定要有主人的电话号码)": u"microchip", u"maju。": u"maju", u"maincoone": u"maincoon",
u"lumpur。": u"lumpur", u"location:阿里玛,大山脚": u"location", u"life🐾🐾": u"life",
u"kibble,": u"kibble", u"home…": u"home", u"hand,but": u"hand but", u"hair,a": u"hair a",
u"grey、brown": u"grey brown", u"gray,": u"gray", u"free免费": u"free", u"food,or": u"food or",
u"dog/dog": u"dog", u"dijumpa": u"dijumpai", u"dibela": u"dibelai",
u"beauuuuuuuuutiful": u"beautiful", u"adopt🙏": u"adopt", u"addopt": u"adopt",
u"enxiety": u"anxiety", u"vaksin": u"vaccine"}
numerical_features = []
text_features = ['Name', 'Description', 'Description_Emb', 'Description_bow']
meta_text = ['BreedName_main_breed', 'BreedName_second_breed', 'annots_top_desc', 'sentiment_text',
'annots_top_desc_pick', 'sentiment_entities']
remove = ['index', 'seq_text', 'PetID', 'Name', 'Description', 'RescuerID', 'StateName', 'annots_top_desc',
'sentiment_text',
'sentiment_entities', 'Description_Emb', 'Description_bow', 'annots_top_desc_pick']
kaeru_drop_cols = ["2017GDPperCapita", "Bumiputra", "Chinese", "HDI", "Indian", "Latitude", "Longitude",
'color_red_score_mean_mean', 'color_red_score_mean_sum', 'color_blue_score_mean_mean',
'color_blue_score_mean_sum', 'color_green_score_mean_mean', 'color_green_score_mean_sum',
'dog_cat_scores_mean_mean', 'dog_cat_scores_mean_sum', 'dog_cat_topics_mean_mean',
'dog_cat_topics_mean_sum', 'is_dog_or_cat_mean_mean', 'is_dog_or_cat_mean_sum',
'len_text_mean_mean', 'len_text_mean_sum', 'StateID']
gege_drop_cols = ['2017GDPperCapita', 'Breed1_equals_Breed2', 'Bumiputra', 'Chinese',
'HDI', 'Indian', 'Latitude', 'Longitude', 'Pop_density', 'Urban_pop', 'Breed1_equals_Breed2',
'fix_Breed1', 'fix_Breed2', 'single_Breed', 'color_red_score_mean_mean', 'color_red_score_mean_sum',
'color_red_score_mean_var', 'color_blue_score_mean_mean', 'color_blue_score_mean_sum',
'color_blue_score_mean_var', 'color_green_score_mean_mean', 'color_green_score_mean_sum',
'color_green_score_mean_var', 'dog_cat_scores_mean_mean', 'dog_cat_scores_mean_sum',
'dog_cat_scores_mean_var', 'dog_cat_topics_mean_mean', 'dog_cat_topics_mean_sum',
'dog_cat_topics_mean_var', 'is_dog_or_cat_mean_mean', 'is_dog_or_cat_mean_sum',
'is_dog_or_cat_mean_var', 'len_text_mean_mean', 'len_text_mean_sum', 'len_text_mean_var']
use_cols = pd.read_csv("../input/pet-usecols/importance10.csv")
# use_cols = pd.read_csv("importance9.csv")
use_cols["gain"] = use_cols["gain"] / use_cols["gain"].sum()
use_cols = list(use_cols[use_cols.gain > 0.0002].feature.values)
ps = nltk.stem.PorterStemmer()
lc = nltk.stem.lancaster.LancasterStemmer()
sb = nltk.stem.snowball.SnowballStemmer('english')
# ===============
# Utility Functions
# ===============
def to_category(train, cat=None):
if cat is None:
cat = [col for col in train.columns if train[col].dtype == 'object']
for c in cat:
train[c], uniques = pd.factorize(train[c])
maxvalue_dict[c] = train[c].max() + 1
return train
def init_logger():
# Add handlers
handler = StreamHandler()
handler.setLevel(INFO)
handler.setFormatter(Formatter(LOGFORMAT))
fh_handler = FileHandler('{}.log'.format(MODEL_NAME))
fh_handler.setFormatter(Formatter(LOGFORMAT))
logger.setLevel(INFO)
logger.addHandler(handler)
logger.addHandler(fh_handler)
@contextmanager
def timer(name):
t0 = time.time()
yield
logger.info(f'[{name}] done in {time.time() - t0:.0f} s')
def load_image_and_hash(paths):
funcs = [
imagehash.average_hash,
imagehash.phash,
imagehash.dhash,
imagehash.whash,
# lambda x: imagehash.whash(x, mode='db4'),
]
petids = []
hashes = []
for path in paths:
image = Image.open(path)
imageid = path.split('/')[-1].split('.')[0][:-2]
petids.append(imageid)
hashes.append(np.array([f(image).hash for f in funcs]).reshape(256))
return petids, np.array(hashes).astype(np.int32)
def find_duplicates_all():
train_paths = glob.glob('../input/petfinder-adoption-prediction/train_images/*-1.jpg')
train_paths += glob.glob('../input/petfinder-adoption-prediction/train_images/*-2.jpg')
test_paths = glob.glob('../input/petfinder-adoption-prediction/test_images/*-1.jpg')
test_paths += glob.glob('../input/petfinder-adoption-prediction/test_images/*-2.jpg')
train_petids, train_hashes = load_image_and_hash(train_paths)
test_petids, test_hashes = load_image_and_hash(test_paths)
# sims = np.array([(train_hashes[i] == test_hashes).sum(axis=1)/256 for i in range(train_hashes.shape[0])])
train_hashes = torch.Tensor(train_hashes).cuda()
test_hashes = torch.Tensor(test_hashes).cuda()
sims = np.array(
[(train_hashes[i] == test_hashes).sum(dim=1).cpu().numpy() / 256 for i in range(train_hashes.shape[0])])
indices1 = np.where(sims > 0.9)
indices2 = np.where(indices1[0] != indices1[1])
petids1 = [train_petids[i] for i in indices1[0][indices2]]
petids2 = [test_petids[i] for i in indices1[1][indices2]]
dups = {tuple(sorted([petid1, petid2])): True for petid1, petid2 in zip(petids1, petids2)}
logger.info('found %d duplicates' % len(dups))
return dups
def submission_with_postprocess(y_pred):
df_sub = pd.read_csv('../input/petfinder-adoption-prediction/test/sample_submission.csv')
train = pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv')
df_sub["AdoptionSpeed"] = y_pred
# postprocess
duplicated = find_duplicates_all()
duplicated = pd.DataFrame(duplicated, index=range(0)).T.reset_index()
duplicated.columns = ['pet_id_0', 'pet_id_1']
duplicated_0 = duplicated.merge(train[['PetID', 'AdoptionSpeed']], how='left', left_on='pet_id_0',
right_on='PetID').dropna()
df_sub = df_sub.merge(duplicated_0[['pet_id_1', 'AdoptionSpeed']],
how='left', left_on='PetID', right_on='pet_id_1', suffixes=('_original', ''))
df_sub['AdoptionSpeed'].fillna(df_sub['AdoptionSpeed_original'], inplace=True)
df_sub = df_sub[['PetID', 'AdoptionSpeed']]
duplicated_1 = duplicated.merge(train[['PetID', 'AdoptionSpeed']],
how='left', left_on='pet_id_1', right_on='PetID').dropna()
df_sub = df_sub.merge(duplicated_1[['pet_id_0', 'AdoptionSpeed']],
how='left', left_on='PetID', right_on='pet_id_0', suffixes=('_original', ''))
df_sub['AdoptionSpeed'].fillna(df_sub['AdoptionSpeed_original'], inplace=True)
df_sub = df_sub[['PetID', 'AdoptionSpeed']]
df_sub['AdoptionSpeed'] = df_sub['AdoptionSpeed'].astype('int32')
# submission
df_sub.to_csv('submission.csv', index=False)
def submission(y_pred):
logger.info('making submission file...')
df_sub = pd.read_csv('../input/petfinder-adoption-prediction/test/sample_submission.csv')
df_sub[target] = y_pred
df_sub.to_csv('submission.csv', index=False)
def analyzer_bow(text):
stop_words = ['i', 'a', 'an', 'the', 'to', 'and', 'or', 'if', 'is', 'are', 'am', 'it', 'this', 'that', 'of', 'from',
'in', 'on']
text = text.lower() # 小文字化
text = text.replace('\n', '') # 改行削除
text = text.replace('\t', '') # タブ削除
puncts = r',.":)(-!?|;\'$&/[]>%=#*+\\•~@£·_{}©^®`<→°€™›♥←×§″′Â█½à…“★”–●â►−¢²¬░¶↑±¿▾═¦║―¥▓—‹─▒:¼⊕▼▪†■’▀¨▄♫☆é¯♦¤▲踾Ã⋅‘∞∙)↓、│(»,♪╩╚³・╦╣╔╗▬❤ïØ¹≤‡√。【】'
for punct in puncts:
text = text.replace(punct, f' {punct} ')
for bad_word in contraction_mapping:
if bad_word in text:
text = text.replace(bad_word, contraction_mapping[bad_word])
text = text.split(' ') # スペースで区切る
text = [sb.stem(t) for t in text]
words = []
for word in text:
if (re.compile(r'^.*[0-9]+.*$').fullmatch(word) is not None): # 数字が含まれるものは分割
for w in re.findall(r'(\d+|\D+)', word):
words.append(w)
continue
if word in stop_words: # ストップワードに含まれるものは除外
continue
if len(word) < 2: # 1文字、0文字(空文字)は除外
continue
words.append(word)
return " ".join(words)
def analyzer_embed(text):
text = text.lower() # 小文字化
text = text.replace('\n', '') # 改行削除
text = text.replace('\t', '') # タブ削除
puncts = r',.":)(-!?|;\'$&/[]>%=#*+\\•~@£·_{}©^®`<→°€™›♥←×§″′Â█½à…“★”–●â►−¢²¬░¶↑±¿▾═¦║―¥▓—‹─▒:¼⊕▼▪†■’▀¨▄♫☆é¯♦¤▲踾Ã⋅‘∞∙)↓、│(»,♪╩╚³・╦╣╔╗▬❤ïØ¹≤‡√。【】'
for punct in puncts:
text = text.replace(punct, f' {punct} ')
for bad_word in contraction_mapping:
if bad_word in text:
text = text.replace(bad_word, contraction_mapping[bad_word])
text = text.split(' ') # スペースで区切る
words = []
for word in text:
if (re.compile(r'^.*[0-9]+.*$').fullmatch(word) is not None): # 数字が含まれるものは分割
for w in re.findall(r'(\d+|\D+)', word):
words.append(w)
continue
if len(word) < 1: # 0文字(空文字)は除外
continue
words.append(word)
return " ".join(words)
def analyzer_k(text):
stop_words = ['i', 'a', 'an', 'the', 'to', 'and', 'or', 'if', 'is', 'are', 'am', 'it', 'this', 'that', 'of', 'from',
'in', 'on']
text = text.lower() # 小文字化
text = text.replace('\n', '') # 改行削除
text = text.replace('\t', '') # タブ削除
text = re.sub(re.compile(r'[!-\/:-@[-`{-~]'), ' ', text) # 記号をスペースに置き換え
text = text.split(' ') # スペースで区切る
words = []
for word in text:
if (re.compile(r'^.*[0-9]+.*$').fullmatch(word) is not None): # 数字が含まれるものは除外
continue
if word in stop_words: # ストップワードに含まれるものは除外
continue
if len(word) < 2: # 1文字、0文字(空文字)は除外
continue
words.append(word)
return words
# ===============
# Feature Engineering
# ===============
class GroupbyTransformer():
def __init__(self, param_dict=None):
self.param_dict = param_dict
def _get_params(self, p_dict):
key = p_dict['key']
if 'var' in p_dict.keys():
var = p_dict['var']
else:
var = self.var
if 'agg' in p_dict.keys():
agg = p_dict['agg']
else:
agg = self.agg
if 'on' in p_dict.keys():
on = p_dict['on']
else:
on = key
return key, var, agg, on
def _aggregate(self, dataframe):
self.features = []
for param_dict in self.param_dict:
key, var, agg, on = self._get_params(param_dict)
all_features = list(set(key + var))
new_features = self._get_feature_names(key, var, agg)
features = dataframe[all_features].groupby(key)[
var].agg(agg).reset_index()
features.columns = key + new_features
self.features.append(features)
return self
def _merge(self, dataframe, merge=True):
for param_dict, features in zip(self.param_dict, self.features):
key, var, agg, on = self._get_params(param_dict)
if merge:
dataframe = dataframe.merge(features, how='left', on=on)
else:
new_features = self._get_feature_names(key, var, agg)
dataframe = pd.concat([dataframe, features[new_features]], axis=1)
return dataframe
def transform(self, dataframe):
self._aggregate(dataframe)
return self._merge(dataframe, merge=True)
def _get_feature_names(self, key, var, agg):
_agg = []
for a in agg:
if not isinstance(a, str):
_agg.append(a.__name__)
else:
_agg.append(a)
return ['_'.join([a, v, 'groupby'] + key) for v in var for a in _agg]
def get_feature_names(self):
self.feature_names = []
for param_dict in self.param_dict:
key, var, agg, on = self._get_params(param_dict)
self.feature_names += self._get_feature_names(key, var, agg)
return self.feature_names
def get_numerical_features(self):
return self.get_feature_names()
class DiffGroupbyTransformer(GroupbyTransformer):
def _aggregate(self):
raise NotImplementedError
def _merge(self):
raise NotImplementedError
def transform(self, dataframe):
for param_dict in self.param_dict:
key, var, agg, on = self._get_params(param_dict)
for a in agg:
for v in var:
new_feature = '_'.join(['diff', a, v, 'groupby'] + key)
base_feature = '_'.join([a, v, 'groupby'] + key)
dataframe[new_feature] = dataframe[base_feature] - dataframe[v]
return dataframe
def _get_feature_names(self, key, var, agg):
_agg = []
for a in agg:
if not isinstance(a, str):
_agg.append(a.__name__)
else:
_agg.append(a)
return ['_'.join(['diff', a, v, 'groupby'] + key) for v in var for a in _agg]
class RatioGroupbyTransformer(GroupbyTransformer):
def _aggregate(self):
raise NotImplementedError
def _merge(self):
raise NotImplementedError
def transform(self, dataframe):
for param_dict in self.param_dict:
key, var, agg, on = self._get_params(param_dict)
for a in agg:
for v in var:
new_feature = '_'.join(['ratio', a, v, 'groupby'] + key)
base_feature = '_'.join([a, v, 'groupby'] + key)
dataframe[new_feature] = dataframe[v] / dataframe[base_feature]
return dataframe
def _get_feature_names(self, key, var, agg):
_agg = []
for a in agg:
if not isinstance(a, str):
_agg.append(a.__name__)
else:
_agg.append(a)
return ['_'.join(['ratio', a, v, 'groupby'] + key) for v in var for a in _agg]
class CategoryVectorizer():
def __init__(self, categorical_columns, n_components,
vectorizer=CountVectorizer(),
transformer=LatentDirichletAllocation(),
name='CountLDA'):
self.categorical_columns = categorical_columns
self.n_components = n_components
self.vectorizer = vectorizer
self.transformer = transformer
self.name = name + str(self.n_components)
def transform(self, dataframe):
features = []
for (col1, col2) in self.get_column_pairs():
try:
sentence = self.create_word_list(dataframe, col1, col2)
sentence = self.vectorizer.fit_transform(sentence)
feature = self.transformer.fit_transform(sentence)
feature = self.get_feature(dataframe, col1, col2, feature, name=self.name)
features.append(feature)
except:
pass
features = pd.concat(features, axis=1)
return features
def create_word_list(self, dataframe, col1, col2):
col1_size = int(dataframe[col1].values.max() + 1)
col2_list = [[] for _ in range(col1_size)]
for val1, val2 in zip(dataframe[col1].values, dataframe[col2].values):
col2_list[int(val1)].append(col2 + str(val2))
return [' '.join(map(str, ls)) for ls in col2_list]
def get_feature(self, dataframe, col1, col2, latent_vector, name=''):
features = np.zeros(
shape=(len(dataframe), self.n_components), dtype=np.float32)
self.columns = ['_'.join([name, col1, col2, str(i)])
for i in range(self.n_components)]
for i, val1 in enumerate(dataframe[col1]):
features[i, :self.n_components] = latent_vector[val1]
return pd.DataFrame(data=features, columns=self.columns)
def get_column_pairs(self):
return [(col1, col2) for col1, col2 in itertools.product(self.categorical_columns, repeat=2) if col1 != col2]
def get_numerical_features(self):
return self.columns
class BM25Transformer(BaseEstimator, TransformerMixin):
"""
Parameters
----------
use_idf : boolean, optional (default=True)
k1 : float, optional (default=2.0)
b : float, optional (default=0.75)
References
----------
Okapi BM25: a non-binary model - Introduction to Information Retrieval
http://nlp.stanford.edu/IR-book/html/htmledition/okapi-bm25-a-non-binary-model-1.html
"""
def __init__(self, use_idf=True, k1=2.0, b=0.75):
self.use_idf = use_idf
self.k1 = k1
self.b = b
def fit(self, X):
"""
Parameters
----------
X : sparse matrix, [n_samples, n_features] document-term matrix
"""
if not sp.sparse.issparse(X):
X = sp.sparse.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
idf = np.log((n_samples - df + 0.5) / (df + 0.5))
self._idf_diag = sp.sparse.spdiags(idf, diags=0, m=n_features, n=n_features)
doc_len = X.sum(axis=1)
self._average_document_len = np.average(doc_len)
return self
def transform(self, X, copy=True):
"""
Parameters
----------
X : sparse matrix, [n_samples, n_features] document-term matrix
copy : boolean, optional (default=True)
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.sparse.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.sparse.csr_matrix(X, dtype=np.float, copy=copy)
n_samples, n_features = X.shape
# Document length (number of terms) in each row
# Shape is (n_samples, 1)
doc_len = X.sum(axis=1)
# Number of non-zero elements in each row
# Shape is (n_samples, )
sz = X.indptr[1:] - X.indptr[0:-1]
# In each row, repeat `doc_len` for `sz` times
# Shape is (sum(sz), )
# Example
# -------
# dl = [4, 5, 6]
# sz = [1, 2, 3]
# rep = [4, 5, 5, 6, 6, 6]
rep = np.repeat(np.asarray(doc_len), sz)
# Compute BM25 score only for non-zero elements
nom = self.k1 + 1
denom = X.data + self.k1 * (1 - self.b + self.b * rep / self._average_document_len)
data = X.data * nom / denom
X = sp.sparse.csr_matrix((data, X.indices, X.indptr), shape=X.shape)
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
X = X * self._idf_diag
return X
# ===============
# For pet
# ===============
def merge_state_info(train):
states = pd.read_csv('../input/petfinder-adoption-prediction/state_labels.csv')
state_info = pd.read_csv('../input/state-info/state_info.csv')
state_info.rename(columns={
'Area (km2)': 'Area',
'Pop. density': 'Pop_density',
'Urban pop.(%)': 'Urban_pop',
'Bumiputra (%)': 'Bumiputra',
'Chinese (%)': 'Chinese',
'Indian (%)': 'Indian'
}, inplace=True)
state_info['Population'] = state_info['Population'].str.replace(',', '').astype('int32')
state_info['Area'] = state_info['Area'].str.replace(',', '').astype('int32')
state_info['Pop_density'] = state_info['Pop_density'].str.replace(',', '').astype('int32')
state_info['2017GDPperCapita'] = state_info['2017GDPperCapita'].str.replace(',', '').astype('float32')
state_info['StateName'] = state_info['StateName'].str.replace('FT ', '')
state_info['StateName'] = state_info['StateName'].str.replace('Malacca', 'Melaka')
state_info['StateName'] = state_info['StateName'].str.replace('Penang', 'Pulau Pinang')
states = states.merge(state_info, how='left', on='StateName')
train = train.merge(states, how='left', left_on='State', right_on='StateID')
return train
def merge_breed_name(train):
breeds = pd.read_csv('../input/petfinder-adoption-prediction/breed_labels.csv')
with open("../input/cat-and-dog-breeds-parameters/rating.json", 'r', encoding='utf-8') as f:
breed_data = json.load(f)
cat_breed = pd.DataFrame.from_dict(breed_data['cat_breeds']).T
dog_breed = pd.DataFrame.from_dict(breed_data['dog_breeds']).T
df = pd.concat([dog_breed, cat_breed], axis=0).reset_index().rename(columns={'index': 'BreedName'})
df.BreedName.replace(
{
'Siamese Cat': 'Siamese',
'Chinese Crested': 'Chinese Crested Dog',
'Australian Cattle Dog': 'Australian Cattle Dog/Blue Heeler',
'Yorkshire Terrier': 'Yorkshire Terrier Yorkie',
'Pembroke Welsh Corgi': 'Welsh Corgi',
'Sphynx': 'Sphynx (hairless cat)',
'Plott': 'Plott Hound',
'Korean Jindo Dog': 'Jindo',
'Anatolian Shepherd Dog': 'Anatolian Shepherd',
'Belgian Malinois': 'Belgian Shepherd Malinois',
'Belgian Sheepdog': 'Belgian Shepherd Dog Sheepdog',
'Belgian Tervuren': 'Belgian Shepherd Tervuren',
'Bengal Cats': 'Bengal',
'Bouvier des Flandres': 'Bouvier des Flanders',
'Brittany': 'Brittany Spaniel',
'Caucasian Shepherd Dog': 'Caucasian Sheepdog (Caucasian Ovtcharka)',
'Dandie Dinmont Terrier': 'Dandi Dinmont Terrier',
'Bulldog': 'English Bulldog',
'American English Coonhound': 'English Coonhound',
'Small Munsterlander Pointer': 'Munsterlander',
'Entlebucher Mountain Dog': 'Entlebucher',
'Exotic': 'Exotic Shorthair',
'Flat-Coated Retriever': 'Flat-coated Retriever',
'English Foxhound': 'Foxhound',
'Alaskan Klee Kai': 'Klee Kai',
'Newfoundland': 'Newfoundland Dog',
'Norwegian Forest': 'Norwegian Forest Cat',
'Nova Scotia Duck Tolling Retriever': 'Nova Scotia Duck-Tolling Retriever',
'American Pit Bull Terrier': 'Pit Bull Terrier',
'Ragdoll Cats': 'Ragdoll',
'Standard Schnauzer': 'Schnauzer',
'Scottish Terrier': 'Scottish Terrier Scottie',
'Chinese Shar-Pei': 'Shar Pei',
'Shetland Sheepdog': 'Shetland Sheepdog Sheltie',
'West Highland White Terrier': 'West Highland White Terrier Westie',
'Soft Coated Wheaten Terrier': 'Wheaten Terrier',
'Wirehaired Pointing Griffon': 'Wire-haired Pointing Griffon',
'Xoloitzcuintli': 'Wirehaired Terrier',
'Cane Corso': 'Cane Corso Mastiff',
'Havana Brown': 'Havana',
}, inplace=True
)
breeds = breeds.merge(df, how='left', on='BreedName')
breeds1_dic, breeds2_dic = {}, {}
for c in breeds.columns:
if c == "BreedID":
continue
breeds1_dic[c] = c + "_main_breed_all"
breeds2_dic[c] = c + "_second_breed_all"
train = train.merge(breeds.rename(columns=breeds1_dic), how='left', left_on='Breed1', right_on='BreedID')
train.drop(['BreedID'], axis=1, inplace=True)
train = train.merge(breeds.rename(columns=breeds2_dic), how='left', left_on='Breed2', right_on='BreedID')
train.drop(['BreedID'], axis=1, inplace=True)
return train
def merge_breed_name_sub(train):
breeds = pd.read_csv('../input/petfinder-adoption-prediction/breed_labels.csv')
df = pd.read_json('../input/cat-and-dog-breeds-parameters/rating.json')
cat_df = df.cat_breeds.dropna(0).reset_index().rename(columns={'index': 'BreedName'})
dog_df = df.dog_breeds.dropna(0).reset_index().rename(columns={'index': 'BreedName'})
cat = cat_df['cat_breeds'].apply(lambda x: pd.Series(x))
cat_df = pd.concat([cat_df, cat], axis=1).drop(['cat_breeds'], axis=1)
dog = dog_df['dog_breeds'].apply(lambda x: pd.Series(x))
dog_df = pd.concat([dog_df, cat], axis=1).drop(['dog_breeds'], axis=1)
df = pd.concat([dog_df, cat_df])
df.BreedName.replace(
{
'Siamese Cat': 'Siamese',
'Chinese Crested': 'Chinese Crested Dog',
'Australian Cattle Dog': 'Australian Cattle Dog/Blue Heeler',
'Yorkshire Terrier': 'Yorkshire Terrier Yorkie',
'Pembroke Welsh Corgi': 'Welsh Corgi',
'Sphynx': 'Sphynx (hairless cat)',
'Plott': 'Plott Hound',
'Korean Jindo Dog': 'Jindo',
'Anatolian Shepherd Dog': 'Anatolian Shepherd',
'Belgian Malinois': 'Belgian Shepherd Malinois',
'Belgian Sheepdog': 'Belgian Shepherd Dog Sheepdog',
'Belgian Tervuren': 'Belgian Shepherd Tervuren',
'Bengal Cats': 'Bengal',
'Bouvier des Flandres': 'Bouvier des Flanders',
'Brittany': 'Brittany Spaniel',
'Caucasian Shepherd Dog': 'Caucasian Sheepdog (Caucasian Ovtcharka)',
'Dandie Dinmont Terrier': 'Dandi Dinmont Terrier',
'Bulldog': 'English Bulldog',
'American English Coonhound': 'English Coonhound',
'Small Munsterlander Pointer': 'Munsterlander',
'Entlebucher Mountain Dog': 'Entlebucher',
'Exotic': 'Exotic Shorthair',
'Flat-Coated Retriever': 'Flat-coated Retriever',
'English Foxhound': 'Foxhound',
'Alaskan Klee Kai': 'Klee Kai',
'Newfoundland': 'Newfoundland Dog',
'Norwegian Forest': 'Norwegian Forest Cat',
'Nova Scotia Duck Tolling Retriever': 'Nova Scotia Duck-Tolling Retriever',
'American Pit Bull Terrier': 'Pit Bull Terrier',
'Ragdoll Cats': 'Ragdoll',
'Standard Schnauzer': 'Schnauzer',
'Scottish Terrier': 'Scottish Terrier Scottie',
'Chinese Shar-Pei': 'Shar Pei',
'Shetland Sheepdog': 'Shetland Sheepdog Sheltie',
'West Highland White Terrier': 'West Highland White Terrier Westie',
'Soft Coated Wheaten Terrier': 'Wheaten Terrier',
'Wirehaired Pointing Griffon': 'Wire-haired Pointing Griffon',
'Xoloitzcuintli': 'Wirehaired Terrier',
'Cane Corso': 'Cane Corso Mastiff',
'Havana Brown': 'Havana',
}, inplace=True
)
breeds = breeds.merge(df, how='left', on='BreedName')
train = train.merge(breeds.rename(columns={'BreedName': 'BreedName_main_breed'}), how='left', left_on='Breed1',
right_on='BreedID', suffixes=('', '_main_breed'))
train.drop(['BreedID'], axis=1, inplace=True)
train = train.merge(breeds.rename(columns={'BreedName': 'BreedName_second_breed'}), how='left', left_on='Breed2',
right_on='BreedID', suffixes=('', '_second_breed'))
train.drop(['BreedID'], axis=1, inplace=True)
return train
def merge_breed_ranking(train):
breeds = pd.read_csv('../input/breed-labels-with-ranks/breed_labels_with_ranks.csv').drop("BreedName", axis=1)
train = train.merge(breeds, how="left", left_on="fix_Breed1", right_on="BreedID")
train = train.rename(columns={"BreedCatRank": "BreedCatRank_main", "BreedDogRank": "BreedDogRank_main"})
train = train.merge(breeds, how="left", left_on="fix_Breed2", right_on="BreedID")
train = train.rename(columns={"BreedCatRank": "BreedCatRank_second", "BreedDogRank": "BreedDogRank_second"})
return train
def breed_mismatch(train):
breed_labels = pd.read_csv('../input/petfinder-adoption-prediction/breed_labels.csv')
dog_breed_labels_set = list(breed_labels[breed_labels['Type'] == 1]['BreedID'])
dog_breed_labels_set.remove(307)
train['breeds_mismatch'] = list((train['Type'] == 2) & (
(train['fix_Breed1'].isin(dog_breed_labels_set)) | (train['fix_Breed2'].isin(dog_breed_labels_set))))
train['breeds_mismatch'] = train['breeds_mismatch'].astype(int)
return train
def breed_mismatch_desc(train):
train['desc_contain_dog'] = train['Description'].str.lower().str.contains(' dog | dogs ')
train['desc_contain_cat'] = train['Description'].str.lower().str.contains(' cat | cats ')
train['desc_miss_match'] = list((train['Type'] == 1) & (train['desc_contain_cat']))
train['desc_miss_match'] = train['desc_miss_match'].astype(int)
return train
def breed_mismatch_meta(train):
train['annot_contain_dog'] = train['annots_top_desc'].str.lower().str.contains(' dog | dogs ')
train['annot_contain_cat'] = train['annots_top_desc'].str.lower().str.contains(' cat | cats ')
train['annot_miss_match'] = list((train['Type'] == 1) & (train['annot_contain_cat']))
train['annot_miss_match'] = train['annot_miss_match'].astype(int)
return train
def extract_emojis(text, emoji_list):
return ' '.join(c for c in text if c in emoji_list)
def merge_emoji(train):
emoji = pd.read_csv('../input/emoji-sentiment-data/Emoji_Sentiment_Data_v1.0.csv')
emoji2 = pd.read_csv('../input/emoji-sentiment-data/Emojitracker_20150604.csv')
emoji = emoji.merge(emoji2, how='left', on='Emoji', suffixes=('', '_tracker'))
emoji_list = emoji['Emoji'].values
train_emoji = train['Description'].apply(extract_emojis, emoji_list=emoji_list)
train_emoji = pd.DataFrame([train['PetID'], train_emoji]).T.set_index('PetID')
train_emoji = train_emoji['Description'].str.extractall('(' + ')|('.join(emoji_list) + ')')
train_emoji = train_emoji.fillna(method='bfill', axis=1).iloc[:, 0].reset_index().rename(columns={0: 'Emoji'})
train_emoji = train_emoji.merge(emoji, how='left', on='Emoji')
emoji_columns = ['Occurrences', 'Position', 'Negative', 'Neutral', 'Positive', 'Occurrences_tracker']
stats = ['mean', 'max', 'min', 'median', 'std']
g = train_emoji.groupby('PetID')[emoji_columns].agg(stats)
g.columns = [c + '_' + stat for c in emoji_columns for stat in stats]
train = train.merge(g, how='left', on='PetID')
return train
def get_interactions(train):
interaction_features = ['Age', 'Quantity']
for (c1, c2) in combinations(interaction_features, 2):
train[c1 + '_mul_' + c2] = train[c1] * train[c2]
train[c1 + '_div_' + c2] = train[c1] / train[c2]
return train
def get_text_features(train):
train['Length_Description'] = train['Description'].map(len)
train['Length_annots_top_desc'] = train['annots_top_desc'].map(len)
train['Lengths_sentiment_text'] = train['sentiment_text'].map(len)
train['Lengths_sentiment_entities'] = train['sentiment_entities'].map(len)
return train
def get_name_features(train):
train['num_name_chars'] = train['Name'].apply(len)
train['num_name_capitals'] = train['Name'].apply(lambda x: sum(1 for c in x if c.isupper()))
train['name_caps_vs_length'] = train.apply(lambda row: row['num_name_capitals'] / (row['num_name_chars'] + 1e-5),
axis=1)
train['num_name_exclamation_marks'] = train['Name'].apply(lambda x: x.count('!'))
train['num_name_question_marks'] = train['Name'].apply(lambda x: x.count('?'))
train['num_name_punctuation'] = train['Name'].apply(lambda x: sum(x.count(w) for w in '.,;:'))
train['num_name_symbols'] = train['Name'].apply(lambda x: sum(x.count(w) for w in '*&$%'))
train['num_name_words'] = train['Name'].apply(lambda x: len(x.split()))
return train
class MetaDataParser(object):
def __init__(self):
# sentiment files
train_sentiment_files = sorted(glob.glob('../input/petfinder-adoption-prediction/train_sentiment/*.json'))
test_sentiment_files = sorted(glob.glob('../input/petfinder-adoption-prediction/test_sentiment/*.json'))
sentiment_files = train_sentiment_files + test_sentiment_files
self.sentiment_files = pd.DataFrame(sentiment_files, columns=['sentiment_filename'])
self.sentiment_files['PetID'] = self.sentiment_files['sentiment_filename'].apply(
lambda x: x.split('/')[-1].split('.')[0])
# metadata files
train_metadata_files = sorted(glob.glob('../input/petfinder-adoption-prediction/train_metadata/*.json'))
test_metadata_files = sorted(glob.glob('../input/petfinder-adoption-prediction/test_metadata/*.json'))
metadata_files = train_metadata_files + test_metadata_files
self.metadata_files = pd.DataFrame(metadata_files, columns=['metadata_filename'])
self.metadata_files['PetID'] = self.metadata_files['metadata_filename'].apply(
lambda x: x.split('/')[-1].split('-')[0])
def open_json_file(self, filename):
with open(filename, 'r', encoding="utf-8") as f:
metadata_file = json.load(f)
return metadata_file
def get_stats(self, array, name):
stats = [np.mean, np.max, np.min, np.sum, np.var]
result = {}
if len(array):
for stat in stats:
result[name + '_' + stat.__name__] = stat(array)
else:
for stat in stats:
result[name + '_' + stat.__name__] = 0
return result
def parse_sentiment_file(self, file):
file_sentiment = file['documentSentiment']
file_entities = [x['name'] for x in file['entities']]
file_entities = ' '.join(file_entities)
file_sentences_text = [x['text']['content'] for x in file['sentences']]
file_sentences_text = ' '.join(file_sentences_text)
file_sentences_sentiment = [x['sentiment'] for x in file['sentences']]
file_sentences_sentiment_sum = pd.DataFrame.from_dict(
file_sentences_sentiment, orient='columns').sum()
file_sentences_sentiment_sum = file_sentences_sentiment_sum.add_prefix('document_sum_').to_dict()
file_sentences_sentiment_mean = pd.DataFrame.from_dict(
file_sentences_sentiment, orient='columns').mean()
file_sentences_sentiment_mean = file_sentences_sentiment_mean.add_prefix('document_mean_').to_dict()
file_sentences_sentiment_var = pd.DataFrame.from_dict(
file_sentences_sentiment, orient='columns').sum()
file_sentences_sentiment_var = file_sentences_sentiment_var.add_prefix('document_var_').to_dict()
file_sentiment.update(file_sentences_sentiment_mean)
file_sentiment.update(file_sentences_sentiment_sum)
file_sentiment.update(file_sentences_sentiment_var)
file_sentiment.update({"sentiment_text": file_sentences_text})
file_sentiment.update({"sentiment_entities": file_entities})
return pd.Series(file_sentiment)
def parse_metadata(self, file):
file_keys = list(file.keys())
if 'labelAnnotations' in file_keys:
label_annotations = file['labelAnnotations']
file_top_score = [x['score'] for x in label_annotations]
pick_value = int(len(label_annotations) * 0.3)
if pick_value == 0: pick_value = 1
file_top_score_pick = [x['score'] for x in label_annotations[:pick_value]]
file_top_desc = [x['description'] for x in label_annotations]
file_top_desc_pick = [x['description'] for x in label_annotations[:pick_value]]
dog_cat_scores = []
dog_cat_topics = []
is_dog_or_cat = []
for label in label_annotations:
if label['description'] == 'dog' or label['description'] == 'cat':
dog_cat_scores.append(label['score'])
dog_cat_topics.append(label['topicality'])
is_dog_or_cat.append(1)
else:
is_dog_or_cat.append(0)
else:
file_top_score = []
file_top_desc = []
dog_cat_scores = []
dog_cat_topics = []
is_dog_or_cat = []
file_top_score_pick = []
file_top_desc_pick = []
if 'faceAnnotations' in file_keys:
file_face = file['faceAnnotations']
n_faces = len(file_face)
else:
n_faces = 0
if 'textAnnotations' in file_keys:
text_annotations = file['textAnnotations']
file_n_text_annotations = len(text_annotations)
file_len_text = [len(text['description']) for text in text_annotations]
else:
file_n_text_annotations = 0
file_len_text = []
file_colors = file['imagePropertiesAnnotation']['dominantColors']['colors']
file_crops = file['cropHintsAnnotation']['cropHints']
file_color_score = [x['score'] for x in file_colors]
file_color_pixelfrac = [x['pixelFraction'] for x in file_colors]
file_color_red = [x['color']['red'] if 'red' in x['color'].keys() else 0 for x in file_colors]
file_color_blue = [x['color']['blue'] if 'blue' in x['color'].keys() else 0 for x in file_colors]
file_color_green = [x['color']['green'] if 'green' in x['color'].keys() else 0 for x in file_colors]
file_crop_conf = np.mean([x['confidence'] for x in file_crops])
file_crop_x = np.mean([x['boundingPoly']['vertices'][1]['x'] for x in file_crops])
file_crop_y = np.mean([x['boundingPoly']['vertices'][3]['y'] for x in file_crops])
if 'importanceFraction' in file_crops[0].keys():
file_crop_importance = np.mean([x['importanceFraction'] for x in file_crops])
else:
file_crop_importance = 0
metadata = {
'annots_top_desc': ' '.join(file_top_desc),
'annots_top_desc_pick': ' '.join(file_top_desc_pick),
'annots_score_pick_mean': np.mean(file_top_score_pick),
'n_faces': n_faces,
'n_text_annotations': file_n_text_annotations,
'crop_conf': file_crop_conf,
'crop_x': file_crop_x,
'crop_y': file_crop_y,
'crop_importance': file_crop_importance,
}
metadata.update(self.get_stats(file_top_score, 'annots_score_normal'))
metadata.update(self.get_stats(file_color_score, 'color_score'))
metadata.update(self.get_stats(file_color_pixelfrac, 'color_pixel_score'))
metadata.update(self.get_stats(file_color_red, 'color_red_score'))
metadata.update(self.get_stats(file_color_blue, 'color_blue_score'))
metadata.update(self.get_stats(file_color_green, 'color_green_score'))
metadata.update(self.get_stats(dog_cat_scores, 'dog_cat_scores'))
metadata.update(self.get_stats(dog_cat_topics, 'dog_cat_topics'))
metadata.update(self.get_stats(is_dog_or_cat, 'is_dog_or_cat'))
metadata.update(self.get_stats(file_len_text, 'len_text'))
metadata.update({"color_red_score_first": file_color_red[0] if len(file_color_red) > 0 else -1})
metadata.update({"color_blue_score_first": file_color_blue[0] if len(file_color_blue) > 0 else -1})
metadata.update({"color_green_score_first": file_color_green[0] if len(file_color_green) > 0 else -1})
metadata.update({"color_pixel_score_first": file_color_pixelfrac[0] if len(file_color_pixelfrac) > 0 else -1})
metadata.update({"color_score_first": file_color_score[0] if len(file_color_score) > 0 else -1})
metadata.update({"label_score_first": file_top_score[0] if len(file_top_score) > 0 else -1})
return pd.Series(metadata)
def _transform(self, path, sentiment=True):
file = self.open_json_file(path)
if sentiment:
result = self.parse_sentiment_file(file)
else:
result = self.parse_metadata(file)
return result
def pretrained_w2v(train_text, model, name):
train_corpus = [text_to_word_sequence(text) for text in train_text]
result = []
for text in train_corpus:
n_skip = 0
vec = np.zeros(model.vector_size)
for n_w, word in enumerate(text):
if word in model: # 0.9906
vec = vec + model.wv[word]
continue
word_ = word.upper()
if word_ in model: # 0.9909
vec = vec + model.wv[word_]
continue
word_ = word.capitalize()
if word_ in model: # 0.9925
vec = vec + model.wv[word_]
continue
word_ = ps.stem(word)
if word_ in model: # 0.9927
vec = vec + model.wv[word_]
continue
word_ = lc.stem(word)
if word_ in model: # 0.9932
vec = vec + model.wv[word_]
continue
word_ = sb.stem(word)
if word_ in model: # 0.9933
vec = vec + model.wv[word_]
continue
else:
n_skip += 1
continue
vec = vec / (n_w - n_skip + 1)
result.append(vec)
w2v_cols = ["{}{}".format(name, i) for i in range(1, model.vector_size + 1)]
result = pd.DataFrame(result)
result.columns = w2v_cols
return result
def w2v_pymagnitude(train_text, model, name):
train_corpus = [text_to_word_sequence(text) for text in train_text]
result = []
for text in train_corpus:
vec = np.zeros(model.dim)
for n_w, word in enumerate(text):
if word in model: # 0.9906
vec = vec + model.query(word)
continue
word_ = word.upper()
if word_ in model: # 0.9909
vec = vec + model.query(word_)
continue
word_ = word.capitalize()
if word_ in model: # 0.9925
vec = vec + model.query(word_)
continue
word_ = ps.stem(word)
if word_ in model: # 0.9927
vec = vec + model.query(word_)
continue
word_ = lc.stem(word)
if word_ in model: # 0.9932
vec = vec + model.query(word_)
continue
word_ = sb.stem(word)
if word_ in model: # 0.9933
vec = vec + model.query(word_)
continue
vec = vec + model.query(word)
vec = vec / (n_w + 1)
result.append(vec)
w2v_cols = ["{}{}".format(name, i) for i in range(1, model.dim + 1)]
result = pd.DataFrame(result)
result.columns = w2v_cols
return result
def doc2vec(description_k, d2v_param):
corpus = [TaggedDocument(words=analyzer_k(text), tags=[i]) for i, text in enumerate(description_k)]
doc2vecs = Doc2Vec(
documents=corpus, dm=1,
**d2v_param
) # dm == 1 -> dmpv, dm != 1 -> DBoW
doc2vecs = np.array([doc2vecs.infer_vector(analyzer_k(text)) for text in description_k])
doc2vec_df = pd.DataFrame()
doc2vec_df['d2v_mean'] = np.mean(doc2vecs, axis=1)
doc2vec_df['d2v_sum'] = np.sum(doc2vecs, axis=1)
doc2vec_df['d2v_max'] = np.max(doc2vecs, axis=1)
doc2vec_df['d2v_min'] = np.min(doc2vecs, axis=1)
doc2vec_df['d2v_median'] = np.median(doc2vecs, axis=1)
doc2vec_df['d2v_var'] = np.var(doc2vecs, axis=1)
return doc2vec_df
def resize_to_square(im):
old_size = im.shape[:2] # old_size is in (height, width) format
ratio = float(img_size) / max(old_size)
new_size = tuple([int(x * ratio) for x in old_size])
# new_size should be in (width, height) format
im = cv2.resize(im, (new_size[1], new_size[0]))
delta_w = img_size - new_size[1]
delta_h = img_size - new_size[0]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
color = [0, 0, 0]
new_im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
return new_im
def load_image(path, preprocesssing):
image = cv2.imread(path)
new_image = resize_to_square(image)
new_image = preprocesssing(new_image)
return new_image
def get_age_feats(df):
df["Age_year"] = (df["Age"] / 12).astype(np.int32)
over_1year_flag = df["Age"] / 12 >= 1
df.loc[over_1year_flag, "over_1year"] = 1
df.loc[~over_1year_flag, "over_1year"] = 0
return df
def freq_encoding(df, freq_cols):
for c in freq_cols:
count_df = df.groupby([c])['PetID'].count().reset_index()
count_df.columns = [c, '{}_freq'.format(c)]
df = df.merge(count_df, how='left', on=c)
return df
def getSize(filename):
st = os.stat(filename)
return st.st_size
def getDimensions(filename):
img_size = Image.open(filename).size
return img_size
def is_zh(in_str):
"""
SJISに変換して文字数が減れば簡体字があるので中国語
"""
return (set(in_str) - set(in_str.encode('sjis', 'ignore').decode('sjis'))) != set([])
# ===============
# Model
# ===============
def get_score(y_true, y_pred):
return cohen_kappa_score(y_true, y_pred, weights='quadratic')
def get_y():
return pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv', usecols=[target]).values.flatten()
def run_model(X_train, y_train, X_valid, y_valid, X_test,
categorical_features,
predictors, maxvalue_dict, fold_id, params, model_name):
train = lgb.Dataset(X_train, y_train,
categorical_feature=categorical_features,
feature_name=predictors)
valid = lgb.Dataset(X_valid, y_valid,
categorical_feature=categorical_features,
feature_name=predictors)
evals_result = {}
model = lgb.train(
params,
train,
valid_sets=[valid],
valid_names=['valid'],
evals_result=evals_result,
**FIT_PARAMS
)
logger.info(f'Best Iteration: {model.best_iteration}')
# train score
y_pred_train = model.predict(X_train)
train_rmse = np.sqrt(mean_squared_error(y_train, y_pred_train))
# validation score
y_pred_valid = model.predict(X_valid)
valid_rmse = np.sqrt(mean_squared_error(y_valid, y_pred_valid))
y_pred_valid = rankdata(y_pred_valid) / len(y_pred_valid)
# save model
model.save_model(f'{model_name}_fold{fold_id}.txt')
# predict test
y_pred_test = model.predict(X_test)
y_pred_test = rankdata(y_pred_test) / len(y_pred_test)
# save predictions
np.save(f'{model_name}_train_fold{fold_id}.npy', y_pred_valid)
np.save(f'{model_name}_test_fold{fold_id}.npy', y_pred_test)
return y_pred_valid, y_pred_test, train_rmse, valid_rmse
def run_xgb_model(X_train, y_train, X_valid, y_valid, X_test,
predictors, maxvalue_dict, fold_id, params, model_name):
d_train = xgb.DMatrix(data=X_train, label=y_train, feature_names=predictors)
d_valid = xgb.DMatrix(data=X_valid, label=y_valid, feature_names=predictors)
watchlist = [(d_train, 'train'), (d_valid, 'valid')]
model = xgb.train(dtrain=d_train, evals=watchlist, params=params, **FIT_PARAMS)
# train score
y_pred_train = model.predict(d_train, ntree_limit=model.best_ntree_limit)
train_rmse = np.sqrt(mean_squared_error(y_train, y_pred_train))
# validation score
y_pred_valid = model.predict(d_valid, ntree_limit=model.best_ntree_limit)
valid_rmse = np.sqrt(mean_squared_error(y_valid, y_pred_valid))
y_pred_valid = rankdata(y_pred_valid) / len(y_pred_valid)
# save model
model.save_model(f'{model_name}_fold{fold_id}.txt')
# predict test
y_pred_test = model.predict(xgb.DMatrix(data=X_test, feature_names=predictors), ntree_limit=model.best_ntree_limit)
y_pred_test = rankdata(y_pred_test) / len(y_pred_test)
# save predictions
np.save(f'{model_name}_train_fold{fold_id}.npy', y_pred_valid)
np.save(f'{model_name}_test_fold{fold_id}.npy', y_pred_test)
return y_pred_valid, y_pred_test, train_rmse, valid_rmse
def plot_mean_feature_importances(feature_importances, max_num=50, importance_type='gain', path=None):
mean_gain = feature_importances[[importance_type, 'feature']].groupby('feature').mean()
feature_importances['mean_' + importance_type] = feature_importances['feature'].map(mean_gain[importance_type])
if path is not None:
data = feature_importances.sort_values('mean_' + importance_type, ascending=False).iloc[:max_num, :]
plt.clf()
plt.figure(figsize=(16, 8))
sns.barplot(x=importance_type, y='feature', data=data)
plt.tight_layout()
plt.savefig(path)
return feature_importances
def to_bins(x, borders):
for i in range(len(borders)):
if x <= borders[i]:
return i
return len(borders)
class OptimizedRounder(object):
def __init__(self):
self.coef_ = 0
def _loss(self, coef, X, y, idx):
X_p = np.array([to_bins(pred, coef) for pred in X])
ll = -get_score(y, X_p)
return ll
def fit(self, X, y):
coef = [0.2, 0.4, 0.6, 0.8]
golden1 = 0.618
golden2 = 1 - golden1
ab_start = [(0.01, 0.3), (0.15, 0.56), (0.35, 0.75), (0.6, 0.9)]
for it1 in range(10):
for idx in range(4):
# golden section search
a, b = ab_start[idx]
# calc losses
coef[idx] = a
la = self._loss(coef, X, y, idx)
coef[idx] = b
lb = self._loss(coef, X, y, idx)
for it in range(20):
# choose value
if la > lb:
a = b - (b - a) * golden1
coef[idx] = a
la = self._loss(coef, X, y, idx)
else:
b = b - (b - a) * golden2
coef[idx] = b
lb = self._loss(coef, X, y, idx)
self.coef_ = {'x': coef}
def predict(self, X, coef):
X_p = np.array([to_bins(pred, coef) for pred in X])
return X_p
def coefficients(self):
return self.coef_['x']
class StratifiedGroupKFold():
def __init__(self, n_splits=5):
self.n_splits = n_splits
def split(self, X, y=None, groups=None):
fold = pd.DataFrame([X, y, groups]).T
fold.columns = ['X', 'y', 'groups']
fold['y'] = fold['y'].astype(int)
g = fold.groupby('groups')['y'].agg('mean').reset_index()
fold = fold.merge(g, how='left', on='groups', suffixes=('', '_mean'))
fold['y_mean'] = fold['y_mean'].apply(np.round)
fold['fold_id'] = 0
for unique_y in fold['y_mean'].unique():
mask = fold.y_mean == unique_y
selected = fold[mask].reset_index(drop=True)
cv = GroupKFold(n_splits=n_splits)
for i, (train_index, valid_index) in enumerate(
cv.split(range(len(selected)), y=None, groups=selected['groups'])):
selected.loc[valid_index, 'fold_id'] = i
fold.loc[mask, 'fold_id'] = selected['fold_id'].values
for i in range(self.n_splits):
indices = np.arange(len(fold))
train_index = indices[fold['fold_id'] != i]
valid_index = indices[fold['fold_id'] == i]
yield train_index, valid_index
if __name__ == '__main__':
init_logger()
t_cols, k_cols, g_cols = [], [], []
# load
train = pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv')
test = pd.read_csv('../input/petfinder-adoption-prediction/test/test.csv')
train = pd.concat([train, test], sort=True)
train[['Description', 'Name']] = train[['Description', 'Name']].astype(str)
train["Description_Emb"] = [analyzer_embed(text) for text in train["Description"]]
train["Description_bow"] = [analyzer_bow(text) for text in train["Description"]]
train['fix_Breed1'] = train['Breed1']
train['fix_Breed2'] = train['Breed2']
train.loc[train['Breed1'] == 0, 'fix_Breed1'] = train[train['Breed1'] == 0]['Breed2']
train.loc[train['Breed1'] == 0, 'fix_Breed2'] = train[train['Breed1'] == 0]['Breed1']
train['Breed1_equals_Breed2'] = (train['Breed1'] == train['Breed2']).astype(int)
train['single_Breed'] = (train['Breed1'] * train['Breed2'] == 0).astype(int)
train.drop(["Breed1", "Breed2"], axis=1)
train.rename(columns={"fix_Breed1": "Breed1", "fix_Breed2": "Breed2"})
logger.info(f'DataFrame shape: {train.shape}')
with timer('common features'):
with timer('merge additional state files'):
train = merge_state_info(train)
common_cols = list(train.columns)
with timer('merge additional breed rating files'):
orig_cols = list(train.columns)
train = merge_breed_name_sub(train)
t_cols += [c for c in train.columns if c not in orig_cols]
k_cols += [c for c in train.columns if c not in orig_cols]
orig_cols = list(train.columns)
train = merge_breed_name(train)
g_cols += [c for c in train.columns if c not in orig_cols and "_main_breed_all" in c] + [
"Type_second_breed"]
with timer('preprocess category features'):
train = to_category(train, cat=categorical_features)
train[text_features].fillna('missing', inplace=True)
with timer('preprocess metadata'): # 使ってるcolsがkaeruさんとtakuokoで違う kaeruさんがfirst系は全部使うが、takuokoは使わない
# TODO: parallelization
meta_parser = MetaDataParser()
sentiment_features = meta_parser.sentiment_files['sentiment_filename'].apply(
lambda x: meta_parser._transform(x, sentiment=True))
meta_parser.sentiment_files = pd.concat([meta_parser.sentiment_files, sentiment_features], axis=1,
sort=False)
meta_features = meta_parser.metadata_files['metadata_filename'].apply(
lambda x: meta_parser._transform(x, sentiment=False))
meta_parser.metadata_files = pd.concat([meta_parser.metadata_files, meta_features], axis=1, sort=False)
stats = ['mean']
columns = [c for c in sentiment_features.columns if c not in ['sentiment_text', 'sentiment_entities']]
g = meta_parser.sentiment_files[list(sentiment_features.columns) + ['PetID']].groupby('PetID').agg(stats)
g.columns = [c + '_' + stat for c in columns for stat in stats]
train = train.merge(g, how='left', on='PetID')
k_cols += [c for c in g.columns if re.match("\w*_mean_\w*mean", c)] + ["magnitude_mean", "score_mean"]
t_cols += [c for c in g.columns if re.match("\w*_sum_\w*mean", c)] + ["magnitude_mean", "score_mean"]
g_cols += list(g.columns)
stats = ['mean', 'min', 'max', 'median', 'var', 'sum', 'first']
columns = [c for c in meta_features.columns if c not in ['annots_top_desc', 'annots_top_desc_pick']]
g = meta_parser.metadata_files[columns + ['PetID']].groupby('PetID').agg(stats)
g.columns = [c + '_' + stat for c in columns for stat in stats]
train = train.merge(g, how='left', on='PetID')
k_cols += [c for c in g.columns if
("mean_mean" in c or "mean_sum" in c or "first_first" in c) and "annots_score_normal" not in c] + \
['crop_conf_first', 'crop_x_first', 'crop_y_first', 'crop_importance_first', 'crop_conf_mean',
'crop_conf_sum', 'crop_importance_mean', 'crop_importance_sum']
t_cols += [c for c in g.columns if ((re.match("\w*_sum_\w*(?<!sum)$", c) and "first" not in c) \
or (
"sum" not in c and "first" not in c)) and "annots_score_pick" not in c]
g_cols += [c for c in g.columns if
"mean_mean" in c or "mean_sum" in c or "mean_var" in c and "annots_score_pick" not in c] + \
['crop_conf_mean', 'crop_conf_sum', 'crop_conf_var', 'crop_importance_mean',
'crop_importance_sum', 'crop_importance_var']
with timer('preprocess metatext'):
meta_features = meta_parser.metadata_files[['PetID', 'annots_top_desc', 'annots_top_desc_pick']]
meta_features_all = meta_features.groupby('PetID')['annots_top_desc'].apply(
lambda x: " ".join(x)).reset_index()
train = train.merge(meta_features_all, how='left', on='PetID')
meta_features_pick = meta_features.groupby('PetID')['annots_top_desc_pick'].apply(
lambda x: " ".join(x)).reset_index()
train = train.merge(meta_features_pick, how='left', on='PetID')
sentiment_features = meta_parser.sentiment_files[['PetID', 'sentiment_text', 'sentiment_entities']]
sentiment_features_txt = sentiment_features.groupby('PetID')['sentiment_text'].apply(
lambda x: " ".join(x)).reset_index()
train = train.merge(sentiment_features_txt, how='left', on='PetID')
sentiment_features_entities = sentiment_features.groupby('PetID')['sentiment_entities'].apply(
lambda x: " ".join(x)).reset_index()
train = train.merge(sentiment_features_entities, how='left', on='PetID')
train[meta_text] = train[meta_text].astype(str)
train[meta_text].fillna("missing", inplace=True)
del meta_features_all, meta_features_pick, meta_features, sentiment_features;
gc.collect()
with timer('make image features'):
train_image_files = sorted(glob.glob('../input/petfinder-adoption-prediction/train_images/*.jpg'))
test_image_files = sorted(glob.glob('../input/petfinder-adoption-prediction/test_images/*.jpg'))
image_files = train_image_files + test_image_files
train_images = pd.DataFrame(image_files, columns=['image_filename'])
train_images['PetID'] = train_images['image_filename'].apply(lambda x: x.split('/')[-1].split('-')[0])
with timer('breed mismatch features'):
train = breed_mismatch(train)
train = breed_mismatch_desc(train)
train = breed_mismatch_meta(train)
t_cols += ['breeds_mismatch', 'desc_contain_dog', 'desc_contain_cat', 'desc_miss_match',
'annot_contain_dog', 'annot_contain_cat', 'annot_miss_match']
k_cols += ['breeds_mismatch', 'desc_miss_match', 'annot_miss_match']
with timer('preprocess densenet'):
if debug:
import feather
X = feather.read_dataframe("feature/dense121_2_X.feather")
gp_img = X.groupby("PetID").mean().reset_index()
train = pd.merge(train, gp_img, how="left", on="PetID")
gp_dense_first = X.groupby("PetID").first().reset_index()
t_cols += list(gp_img.drop("PetID", axis=1).columns)
del gp_img;
gc.collect()
else:
pet_ids = train_images['PetID'].values
img_pathes = train_images['image_filename'].values
n_batches = len(pet_ids) // batch_size + 1
inp = Input((256, 256, 3))
backbone = DenseNet121(input_tensor=inp,
weights='../input/densenet121weights/densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5',
include_top=False)
x = backbone.output
x = GlobalAveragePooling2D()(x)
x = Lambda(lambda x: K.expand_dims(x, axis=-1))(x)
x = AveragePooling1D(4)(x)
out = Lambda(lambda x: x[:, :, 0])(x)
m = Model(inp, out)
features = []
for b in range(n_batches):
start = b * batch_size
end = (b + 1) * batch_size
batch_pets = pet_ids[start: end]
batch_path = img_pathes[start: end]
batch_images = np.zeros((len(batch_pets), img_size, img_size, 3))
for i, (pet_id, path) in enumerate(zip(batch_pets, batch_path)):
try:
batch_images[i] = load_image(path, preprocess_input_dense)
except:
try:
batch_images[i] = load_image(path, preprocess_input_dense)
except:
pass
batch_preds = m.predict(batch_images)
for i, pet_id in enumerate(batch_pets):
features.append([pet_id] + list(batch_preds[i]))
X = pd.DataFrame(features,
columns=["PetID"] + ["dense121_2_{}".format(i) for i in range(batch_preds.shape[1])])
gp_img = X.groupby("PetID").mean().reset_index()
train = pd.merge(train, gp_img, how="left", on="PetID")
gp_dense_first = X.groupby("PetID").first().reset_index()
t_cols += list(gp_img.drop("PetID", axis=1).columns)
del m, gp_img;
gc.collect();
K.clear_session()
if T_flag:
with timer('takuoko features'):
orig_cols = train.columns
with timer('merge emoji files'):
train = merge_emoji(train)
with timer('preprocess breed files'):
train = merge_breed_ranking(train)
with timer('preprocess and simple features'):
train = get_interactions(train)
with timer('tfidf + svd / nmf / bm25'):
vectorizer = make_pipeline(
TfidfVectorizer(),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['Description_bow'])
X = pd.DataFrame(X, columns=['tfidf_svd_{}'.format(i) for i in range(n_components)]
+ ['tfidf_nmf_{}'.format(i) for i in range(n_components)]
+ ['tfidf_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('count + svd / nmf / bm25'):
vectorizer = make_pipeline(
CountVectorizer(),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['Description_bow'])
X = pd.DataFrame(X, columns=['count_svd_{}'.format(i) for i in range(n_components)]
+ ['count_nmf_{}'.format(i) for i in range(n_components)]
+ ['count_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('tfidf2 + svd / nmf / bm25'):
vectorizer = make_pipeline(
TfidfVectorizer(min_df=2, max_features=20000,
strip_accents='unicode', analyzer='word', token_pattern=r'\w{1,}',
ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1, stop_words='english'),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['Description_bow'])
X = pd.DataFrame(X, columns=['tfidf2_svd_{}'.format(i) for i in range(n_components)]
+ ['tfidf2_nmf_{}'.format(i) for i in range(n_components)]
+ ['tfidf2_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('count2 + svd / nmf / bm25'):
vectorizer = make_pipeline(
CountVectorizer(min_df=2, max_features=20000,
strip_accents='unicode', analyzer='word', token_pattern=r'\w{1,}',
ngram_range=(1, 3), stop_words='english'),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['Description_bow'])
X = pd.DataFrame(X, columns=['count2_svd_{}'.format(i) for i in range(n_components)]
+ ['count2_nmf_{}'.format(i) for i in range(n_components)]
+ ['count2_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('tfidf3 + svd / nmf / bm25'):
vectorizer = make_pipeline(
TfidfVectorizer(min_df=30, max_features=50000, binary=True,
strip_accents='unicode', analyzer='char', token_pattern=r'\w{1,}',
ngram_range=(3, 3), use_idf=1, smooth_idf=1, sublinear_tf=1, stop_words='english'),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['Description_bow'])
X = pd.DataFrame(X, columns=['tfidf3_svd_{}'.format(i) for i in range(n_components)]
+ ['tfidf3_nmf_{}'.format(i) for i in range(n_components)]
+ ['tfidf3_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('count3 + svd / nmf / bm25'):
vectorizer = make_pipeline(
CountVectorizer(min_df=30, max_features=50000, binary=True,
strip_accents='unicode', analyzer='char', token_pattern=r'\w{1,}',
ngram_range=(3, 3), stop_words='english'),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['Description_bow'])
X = pd.DataFrame(X, columns=['count3_svd_{}'.format(i) for i in range(n_components)]
+ ['count3_nmf_{}'.format(i) for i in range(n_components)]
+ ['count3_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('meta text bow/tfidf->svd / nmf / bm25'):
train['desc'] = ''
for c in ['BreedName_main_breed', 'BreedName_second_breed', 'annots_top_desc', 'sentiment_text']:
train['desc'] += ' ' + train[c].astype(str)
train["desc_bow"] = [analyzer_bow(text) for text in train["desc"]]
vectorizer = make_pipeline(
TfidfVectorizer(),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['desc_bow'])
X = pd.DataFrame(X, columns=['meta_desc_tfidf_svd_{}'.format(i) for i in range(n_components)]
+ ['meta_desc_tfidf_nmf_{}'.format(i) for i in range(n_components)]
+ ['meta_desc_tfidf_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
vectorizer = make_pipeline(
CountVectorizer(),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['desc_bow'])
X = pd.DataFrame(X, columns=['meta_desc_count_svd_{}'.format(i) for i in range(n_components)]
+ ['meta_desc_count_nmf_{}'.format(i) for i in range(n_components)]
+ ['meta_desc_count_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
train.drop(['desc_bow', 'desc'], axis=1, inplace=True)
with timer('description fasttext'):
embedding = '../input/quora-embedding/GoogleNews-vectors-negative300.bin'
model = KeyedVectors.load_word2vec_format(embedding, binary=True)
X = pretrained_w2v(train["Description_Emb"], model, name="gnvec")
train = pd.concat([train, X], axis=1)
del model;
gc.collect()
with timer('description glove'):
embedding = "../input/pymagnitude-data/glove.840B.300d.magnitude"
model = Magnitude(embedding)
X = w2v_pymagnitude(train["Description_Emb"], model, name="glove_mag")
train = pd.concat([train, X], axis=1)
del model;
gc.collect()
with timer('image features'):
train['num_images'] = train['PetID'].apply(lambda x: sum(train_images.PetID == x))
train['num_images_per_pet'] = train['num_images'] / train['Quantity']
with timer('make inception resnet features'):
if debug:
import feather
X = feather.read_dataframe("feature/inception_resnet.feather")
train = pd.concat((train, X), axis=1)
else:
pet_ids = train_images['PetID'].values
img_pathes = train_images['image_filename'].values
n_batches = len(pet_ids) // batch_size + 1
inp = Input((256, 256, 3))
backbone = InceptionResNetV2(input_tensor=inp,
weights='../input/inceptionresnetv2/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5',
include_top=False)
x = backbone.output
x = GlobalAveragePooling2D()(x)
x = Lambda(lambda x: K.expand_dims(x, axis=-1))(x)
x = AveragePooling1D(4)(x)
out = Lambda(lambda x: x[:, :, 0])(x)
m = Model(inp, out)
features = []
for b in range(n_batches):
start = b * batch_size
end = (b + 1) * batch_size
batch_pets = pet_ids[start: end]
batch_path = img_pathes[start: end]
batch_images = np.zeros((len(batch_pets), img_size, img_size, 3))
for i, (pet_id, path) in enumerate(zip(batch_pets, batch_path)):
try:
batch_images[i] = load_image(path, preprocess_input_incep)
except:
try:
batch_images[i] = load_image(path, preprocess_input_incep)
except:
pass
batch_preds = m.predict(batch_images)
for i, pet_id in enumerate(batch_pets):
features.append([pet_id] + list(batch_preds[i]))
X = pd.DataFrame(features, columns=["PetID"] + ["inception_resnet_{}".format(i) for i in
range(batch_preds.shape[1])])
gp_img = X.groupby("PetID").mean().reset_index()
train = pd.merge(train, gp_img, how="left", on="PetID")
del m, gp_img;
gc.collect();
K.clear_session()
with timer('aggregation'):
stats = ['mean', 'sum', 'median', 'min', 'max', 'var']
groupby_dict = [
{
'key': ['Name'],
'var': ['Age'],
'agg': ['count']
},
{
'key': ['RescuerID'],
'var': ['Age'],
'agg': ['count']
},
{
'key': ['RescuerID', 'State'],
'var': ['Age'],
'agg': ['count']
},
{
'key': ['RescuerID', 'Type'],
'var': ['Age'],
'agg': ['count']
},
{
'key': ['RescuerID'],
'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],
'agg': stats
},
{
'key': ['RescuerID', 'State'],
'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],
'agg': stats
},
{
'key': ['RescuerID', 'Type'],
'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],
'agg': stats
},
{
'key': ['Type', 'Breed1', 'Breed2'],
'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],
'agg': stats
},
{
'key': ['Type', 'Breed1'],
'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],
'agg': stats
},
{
'key': ['State'],
'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],
'agg': stats
},
{
'key': ['MaturitySize'],
'var': ['Age', 'Quantity', 'Sterilized', 'Fee'],
'agg': stats
},
]
nunique_dict = [
{
'key': ['State'],
'var': ['RescuerID'],
'agg': ['nunique']
},
{
'key': ['Dewormed'],
'var': ['RescuerID'],
'agg': ['nunique']
},
{
'key': ['Type'],
'var': ['RescuerID'],
'agg': ['nunique']
},
{
'key': ['Type', 'Breed1'],
'var': ['RescuerID'],
'agg': ['nunique']
},
]
groupby = GroupbyTransformer(param_dict=nunique_dict)
train = groupby.transform(train)
groupby = GroupbyTransformer(param_dict=groupby_dict)
train = groupby.transform(train)
diff = DiffGroupbyTransformer(param_dict=groupby_dict)
train = diff.transform(train)
ratio = RatioGroupbyTransformer(param_dict=groupby_dict)
train = ratio.transform(train)
with timer('category embedding'):
train[['BreedName_main_breed', 'BreedName_second_breed']] = \
train[['BreedName_main_breed', 'BreedName_second_breed']].astype("int32")
for c in categorical_features:
train[c] = train[c].fillna(train[c].max() + 1)
cv = CategoryVectorizer(categorical_features, n_components,
vectorizer=CountVectorizer(),
transformer=LatentDirichletAllocation(n_components=n_components, n_jobs=-1,
learning_method='online',
random_state=777),
name='CountLDA')
features1 = cv.transform(train).astype(np.float32)
cv = CategoryVectorizer(categorical_features, n_components,
vectorizer=CountVectorizer(),
transformer=TruncatedSVD(n_components=n_components, random_state=777),
name='CountSVD')
features2 = cv.transform(train).astype(np.float32)
train = pd.concat([train, features1, features2], axis=1)
t_cols += [c for c in train.columns if c not in orig_cols]
if K_flag or G_flag:
with timer('kaeru and gege features'):
with timer('text stats features'):
train = get_text_features(train)
k_cols += ['Length_Description', 'Length_annots_top_desc', 'Lengths_sentiment_text']
g_cols += ['Length_Description', 'Length_annots_top_desc', 'Lengths_sentiment_entities']
if K_flag:
with timer('kaeru features'):
orig_cols = train.columns
with timer('enginerring age'):
train = get_age_feats(train)
with timer('frequency encoding'):
freq_cols = ['BreedName_main_breed', 'BreedName_second_breed']
train = freq_encoding(train, freq_cols)
with timer('kanji feature'):
train['in_kanji'] = train.Description.apply(lambda x: is_zh(x))
with timer('tfidf + svd / nmf'):
vectorizer = TruncatedSVD(n_components=n_components, random_state=kaeru_seed)
X = vectorizer.fit_transform(train['Description'])
X = pd.DataFrame(X, columns=['tfidf_k_svd_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('description doc2vec'):
d2v_param = {
"features_num": 300,
"min_word_count": 10,
"context": 5,
"downsampling": 1e-3,
"epoch_num": 10
}
X = doc2vec(train["Description"], d2v_param)
train = pd.concat([train, X], axis=1)
with timer('annots_top_desc + svd / nmf'):
vectorizer = TruncatedSVD(n_components=n_components, random_state=kaeru_seed)
X = vectorizer.fit_transform(train['annots_top_desc_pick'])
X = pd.DataFrame(X, columns=['annots_top_desc_k_svd_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('densenet features'):
vectorizer = TruncatedSVD(n_components=n_components, random_state=kaeru_seed)
X = vectorizer.fit_transform(gp_dense_first.drop(['PetID'], axis=1))
X = pd.DataFrame(X, columns=['densenet121_svd_{}'.format(i) for i in range(n_components)])
X["PetID"] = gp_dense_first["PetID"]
train = pd.merge(train, X, how="left", on="PetID")
del vectorizer;
gc.collect()
with timer('aggregation'):
stats = ['mean', 'sum', 'min', 'max']
var = ['Age_k', 'MaturitySize_k', 'FurLength_k', 'Fee_k', 'Health_k']
for c in ['Age', 'MaturitySize', 'FurLength', 'Fee', 'Health']:
train[c + "_k"] = train[c]
groupby_dict = [
{
'key': ['RescuerID'],
'var': ['Age_k'],
'agg': ['count']
},
{
'key': ['RescuerID'],
'var': ['Age_k', 'Length_Description', 'Length_annots_top_desc', 'Lengths_sentiment_text'],
'agg': stats + ["var"]
},
{
'key': ['RescuerID'],
'var': ['MaturitySize_k', 'FurLength_k', 'Fee_k', 'Health_k'],
'agg': stats
}
]
groupby = GroupbyTransformer(param_dict=groupby_dict)
train = groupby.transform(train)
train.drop(var, axis=1, inplace=True)
k_cols += [c for c in train.columns if c not in orig_cols if c not in kaeru_drop_cols]
if G_flag:
with timer('gege features'):
orig_cols = train.columns
with timer('densenet features'):
vectorizer = TruncatedSVD(n_components=n_components_gege_img, random_state=kaeru_seed)
X = vectorizer.fit_transform(gp_dense_first.drop(['PetID'], axis=1))
X = pd.DataFrame(X, columns=['densenet121_g_svd_{}'.format(i) for i in range(n_components_gege_img)])
X["PetID"] = gp_dense_first["PetID"]
train = pd.merge(train, X, how="left", on="PetID")
del vectorizer, gp_dense_first;
gc.collect()
with timer('frequency encoding'):
freq_cols = ['RescuerID', 'Breed1', 'Breed2', 'Color1', 'Color2', 'Color3', 'State']
train = freq_encoding(train, freq_cols)
with timer('tfidf + svd'):
vectorizer = make_pipeline(
TfidfVectorizer(min_df=2, max_features=None,
strip_accents='unicode', analyzer='word', token_pattern=r'(?u)\b\w+\b',
ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1),
TruncatedSVD(n_components=n_components_gege_txt, random_state=kaeru_seed)
)
X = vectorizer.fit_transform(train['Description'])
X = pd.DataFrame(X, columns=['tfidf_g_svd_{}'.format(i) for i in range(n_components_gege_txt)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('annots tfidf + svd'):
vectorizer = make_pipeline(
TfidfVectorizer(min_df=2, max_features=None,
strip_accents='unicode', analyzer='word', token_pattern=r'(?u)\b\w+\b',
ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1),
TruncatedSVD(n_components=n_components_gege_txt, random_state=kaeru_seed)
)
X = vectorizer.fit_transform(train['annots_top_desc'])
X = pd.DataFrame(X, columns=['annots_top_desc_tfidf_g_svd_{}'.format(i) for i in
range(n_components_gege_txt)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('sentiment entities tfidf + svd'):
vectorizer = make_pipeline(
TfidfVectorizer(min_df=2, max_features=None,
strip_accents='unicode', analyzer='word', token_pattern=r'(?u)\b\w+\b',
ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1),
TruncatedSVD(n_components=n_components_gege_txt, random_state=kaeru_seed)
)
X = vectorizer.fit_transform(train['sentiment_entities'])
X = pd.DataFrame(X, columns=['sentiment_entities_tfidf_g_svd_{}'.format(i) for i in
range(n_components_gege_txt)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('image basic features'):
train_images['image_size'] = train_images['image_filename'].apply(getSize)
train_images['temp_size'] = train_images['image_filename'].apply(getDimensions)
train_images['width'] = train_images['temp_size'].apply(lambda x: x[0])
train_images['height'] = train_images['temp_size'].apply(lambda x: x[1])
train_images = train_images.drop(['temp_size'], axis=1)
aggs = {
'image_size': ['sum', 'mean', 'var'],
'width': ['sum', 'mean', 'var'],
'height': ['sum', 'mean', 'var'],
}
gp = train_images.groupby('PetID').agg(aggs)
new_columns = [k + '_' + agg for k in aggs.keys() for agg in aggs[k]]
gp.columns = new_columns
train = train.merge(gp.reset_index(), how="left", on="PetID")
g_cols += [c for c in train.columns if c not in orig_cols]
dtype_cols = ['BreedName_main_breed', 'BreedName_second_breed', 'BreedName_main_breed_all']
train[dtype_cols] = train[dtype_cols].astype("int32")
logger.info(train.head())
train.to_feather("all_data.feather")
np.save("common_cols.npy", np.array(common_cols))
np.save("t_cols.npy", np.array(t_cols))
np.save("k_cols.npy", np.array(k_cols))
np.save("g_cols.npy", np.array(g_cols))
if T_flag:
with timer('takuoko feature info'):
categorical_features_t = list(set(categorical_features) - set(remove))
predictors = list(set(common_cols + t_cols + categorical_features_t) - set([target] + remove))
predictors = [c for c in predictors if c in use_cols]
categorical_features_t = [c for c in categorical_features_t if c in predictors]
logger.info(f'predictors / use_cols = {len(predictors)} / {len(use_cols)}')
train = train.loc[:, ~train.columns.duplicated()]
X = train.loc[:, predictors]
y = train.loc[:, target]
rescuer_id = train.loc[:, 'RescuerID'].iloc[:len_train]
X_test = X[len_train:]
X = X[:len_train]
y = y[:len_train]
X.to_feather("X_train_t.feather")
X_test.reset_index(drop=True).to_feather("X_test_t.feather")
with timer('takuoko modeling'):
y_pred_t = np.empty(len_train, )
y_test_t = []
train_losses, valid_losses = [], []
# cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=1337)
# for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y)):
# cv = GroupKFold(n_splits=n_splits)
# for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y=None, groups=rescuer_id)):
cv = StratifiedGroupKFold(n_splits=n_splits)
for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y=y, groups=rescuer_id)):
X_train = X.loc[train_index, :]
X_valid = X.loc[valid_index, :]
y_train = y[train_index]
y_valid = y[valid_index]
pred_val, pred_test, train_rmse, valid_rmse = run_model(X_train, y_train, X_valid, y_valid, X_test,
categorical_features_t, predictors,
maxvalue_dict, fold_id, MODEL_PARAMS,
MODEL_NAME + "_t")
y_pred_t[valid_index] = pred_val
y_test_t.append(pred_test)
train_losses.append(train_rmse)
valid_losses.append(valid_rmse)
y_test_t = np.mean(y_test_t, axis=0)
logger.info(f'train RMSE = {np.mean(train_losses)}')
logger.info(f'valid RMSE = {np.mean(valid_losses)}')
np.save("y_test_t.npy", y_test_t)
np.save("y_oof_t.npy", y_pred_t)
if K_flag:
with timer('kaeru feature info'):
kaeru_cat_cols = None
predictors = list(set(common_cols + k_cols) - set([target] + remove + kaeru_drop_cols))
X = train.loc[:, predictors]
y = train.loc[:, target]
rescuer_id = train.loc[:, 'RescuerID'].iloc[:len_train]
X_test = X[len_train:]
X = X[:len_train]
y = y[:len_train]
X.to_feather("X_train_k.feather")
X_test.reset_index(drop=True).to_feather("X_test_k.feather")
with timer('kaeru modeling'):
y_pred_k = np.empty(len_train, )
y_test_k = []
train_losses, valid_losses = [], []
# cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=1337)
# for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y)):
cv = StratifiedGroupKFold(n_splits=n_splits)
for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y=y, groups=rescuer_id)):
X_train = X.loc[train_index, :]
X_valid = X.loc[valid_index, :]
y_train = y[train_index]
y_valid = y[valid_index]
pred_val, pred_test, train_rmse, valid_rmse = run_model(X_train, y_train, X_valid, y_valid, X_test,
kaeru_cat_cols, predictors, maxvalue_dict,
fold_id, KAERU_PARAMS, MODEL_NAME + "_k")
y_pred_k[valid_index] = pred_val
y_test_k.append(pred_test)
train_losses.append(train_rmse)
valid_losses.append(valid_rmse)
y_test_k = np.mean(y_test_k, axis=0)
logger.info(f'train RMSE = {np.mean(train_losses)}')
logger.info(f'valid RMSE = {np.mean(valid_losses)}')
np.save("y_test_k.npy", y_test_k)
np.save("y_oof_k.npy", y_pred_k)
if G_flag:
with timer('gege feature info'):
predictors = list(set(common_cols + g_cols) - set([target] + remove + gege_drop_cols))
categorical_features_g = [c for c in categorical_features if c in predictors]
X = train.loc[:, predictors]
y = train.loc[:, target]
rescuer_id = train.loc[:, 'RescuerID'].iloc[:len_train]
X_test = X[len_train:]
X = X[:len_train]
y = y[:len_train]
X.to_feather("X_train_g.feather")
X_test.reset_index(drop=True).to_feather("X_test_g.feather")
with timer('gege adversarial validation'):
train_idx = range(0, len_train)
X_adv = train.loc[:, predictors]
y_adv = np.array([0 for i in range(len(X))] + [1 for i in range(len(X_test))])
X_adv_tr, X_adv_tst, y_adv_tr, y_adv_tst = train_test_split(X_adv, y_adv, test_size=0.20, shuffle=True,
random_state=42)
lgtrain = lgb.Dataset(X_adv_tr, y_adv_tr,
categorical_feature=categorical_features_g,
feature_name=predictors)
lgvalid = lgb.Dataset(X_adv_tst, y_adv_tst,
categorical_feature=categorical_features_g,
feature_name=predictors)
lgb_adv = lgb.train(
ADV_PARAMS,
lgtrain,
num_boost_round=20000,
valid_sets=[lgtrain, lgvalid],
valid_names=['train', 'valid'],
early_stopping_rounds=500,
verbose_eval=20000
)
train_preds = lgb_adv.predict(X_adv.iloc[train_idx])
extract_idx = np.argsort(-train_preds)[:int(len(train_idx) * 0.85)]
del X_adv_tr, X_adv_tst, y_adv_tr, y_adv_tst, X_adv, y_adv, lgb_adv;
gc.collect()
with timer('gege modeling'):
X = X.iloc[extract_idx].reset_index(drop=True)
y = y[extract_idx].reset_index(drop=True)
rescuer_id = rescuer_id[extract_idx].reset_index(drop=True)
y_pred_g = np.empty(len(extract_idx), )
y_test_g = []
train_losses, valid_losses = [], []
cv = StratifiedGroupKFold(n_splits=n_splits)
for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y=y, groups=rescuer_id)):
X_train = X.loc[train_index, :]
X_valid = X.loc[valid_index, :]
y_train = y[train_index]
y_valid = y[valid_index]
pred_val, pred_test, train_rmse, valid_rmse = run_xgb_model(X_train, y_train,
X_valid, y_valid, X_test, predictors,
maxvalue_dict,
fold_id, MODEL_PARAMS_XGB,
MODEL_NAME + "_g")
y_pred_g[valid_index] = pred_val
y_test_g.append(pred_test)
train_losses.append(train_rmse)
valid_losses.append(valid_rmse)
y_test_g = np.mean(y_test_g, axis=0)
logger.info(f'train RMSE = {np.mean(train_losses)}')
logger.info(f'valid RMSE = {np.mean(valid_losses)}')
np.save("y_test_g.npy", y_test_g)
np.save("y_oof_g.npy", y_pred_g)
np.save("extract_idx.npy", extract_idx)
if T_flag and K_flag and G_flag:
y_pred = (y_pred_t[extract_idx] + y_pred_k[extract_idx] + y_pred_g) / 3
y_test = (y_test_t + y_test_k + y_test_g) / 3
elif T_flag and K_flag:
y_pred = y_pred_t * 0.5 + y_pred_k * 0.5
y_test = y_test_t * 0.5 + y_test_k * 0.5
elif T_flag and G_flag:
y_pred = y_pred_t[extract_idx] * 0.5 + y_pred_g * 0.5
y_test = y_test_t * 0.5 + y_test_g * 0.5
elif G_flag and K_flag:
y_pred = y_pred_g * 0.5 + y_pred_k[extract_idx] * 0.5
y_test = y_test_g * 0.5 + y_test_k * 0.5
elif T_flag:
y_pred = y_pred_t
y_test = y_test_t
elif K_flag:
y_pred = y_pred_k
y_test = y_test_k
elif G_flag:
y_pred = y_pred_g
y_test = y_test_g
with timer('optimize threshold'):
optR = OptimizedRounder()
optR.fit(y_pred, y)
coefficients = optR.coefficients()
y_pred = optR.predict(y_pred, coefficients)
score = get_score(y, y_pred)
logger.info(f'Coefficients = {coefficients}')
logger.info(f'QWK = {score}')
y_test = optR.predict(y_test, coefficients).astype(int)
with timer('postprocess'):
submission_with_postprocess(y_test)
| 45.611088
| 151
| 0.557434
|
import itertools
import json
import gc
import glob
import os
import time
import cv2
import re
import nltk
import torch
import imagehash
import lightgbm as lgb
import xgboost as xgb
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import scipy as sp
from scipy.stats import rankdata
from PIL import Image
from pymagnitude import Magnitude
from gensim.models import word2vec, KeyedVectors
from gensim.scripts.glove2word2vec import glove2word2vec
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from contextlib import contextmanager
from functools import partial
from itertools import combinations
from logging import getLogger, Formatter, StreamHandler, FileHandler, INFO
from keras.applications.densenet import preprocess_input as preprocess_input_dense
from keras.applications.densenet import DenseNet121
from keras.applications.inception_resnet_v2 import preprocess_input as preprocess_input_incep
from keras.applications.inception_resnet_v2 import InceptionResNetV2
from keras import backend as K
from keras.layers import GlobalAveragePooling2D, Input, Lambda, AveragePooling1D
from keras.models import Model
from keras.preprocessing.text import text_to_word_sequence
from sklearn.decomposition import LatentDirichletAllocation, TruncatedSVD, NMF
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.metrics import cohen_kappa_score, mean_squared_error
from sklearn.model_selection import GroupKFold, StratifiedKFold, train_test_split
from sklearn.pipeline import make_pipeline, make_union
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from sklearn.feature_extraction.text import _document_frequency
COMPETITION_NAME = 'petfinder-adoption-prediction'
MODEL_NAME = 'v001'
logger = getLogger(COMPETITION_NAME)
LOGFORMAT = '%(asctime)s %(levelname)s %(message)s'
target = 'AdoptionSpeed'
len_train = 14993
len_test = 3948
T_flag = True
K_flag = True
G_flag = True
debug = False
seed = 777
kaeru_seed = 1337
n_splits = 5
np.random.seed(seed)
n_components = 5
n_components_gege_img = 32
n_components_gege_txt = 16
img_size = 256
batch_size = 256
MODEL_PARAMS = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'rmse',
'learning_rate': 0.01,
'num_leaves': 63,
'subsample': 0.9,
'subsample_freq': 1,
'colsample_bytree': 0.6,
'max_depth': 9,
'max_bin': 127,
'reg_alpha': 0.11,
'reg_lambda': 0.01,
'min_child_weight': 0.2,
'min_child_samples': 20,
'min_gain_to_split': 0.02,
'min_data_in_bin': 3,
'bin_construct_sample_cnt': 5000,
'cat_l2': 10,
'verbose': -1,
'nthread': -1,
'seed': 777,
}
KAERU_PARAMS = {'application': 'regression',
'boosting': 'gbdt',
'metric': 'rmse',
'num_leaves': 70,
'max_depth': 9,
'learning_rate': 0.01,
'max_bin': 32,
'bagging_freq': 2,
'bagging_fraction': 0.85,
'feature_fraction': 0.8,
'min_split_gain': 0.02,
'min_child_samples': 150,
'min_child_weight': 0.02,
'lambda_l2': 0.0475,
'verbosity': -1,
'seed': kaeru_seed}
ADV_PARAMS = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'auc',
'num_leaves': 64,
'learning_rate': 0.02,
'verbose': 0,
'lambda_l1': 0.1,
'seed': 1213
}
MODEL_PARAMS_XGB = {
'eval_metric': 'rmse',
'seed': 1337,
'eta': 0.01,
'subsample': 0.8,
'colsample_bytree': 0.85,
'tree_method': 'gpu_hist',
'device': 'gpu',
'silent': 1,
}
FIT_PARAMS = {
'num_boost_round': 5000,
'early_stopping_rounds': 100,
'verbose_eval': 5000,
}
maxvalue_dict = {}
categorical_features = [
'Breed1',
'Breed2',
'Color1',
'Color2',
'Color3',
'Dewormed',
'FurLength',
'Gender',
'Health',
'MaturitySize',
'State',
'Sterilized',
'Type',
'Vaccinated',
'Type_main_breed',
'BreedName_main_breed',
'Type_second_breed',
'BreedName_second_breed',
'BreedName_main_breed_all',
]
contraction_mapping = {u"ain’t": u"is not", u"aren’t": u"are not", u"can’t": u"cannot", u"’cause": u"because",
u"could’ve": u"could have", u"couldn’t": u"could not", u"didn’t": u"did not",
u"doesn’t": u"does not", u"don’t": u"do not", u"hadn’t": u"had not",
u"hasn’t": u"has not", u"haven’t": u"have not", u"he’d": u"he would",
u"he’ll": u"he will", u"he’s": u"he is", u"how’d": u"how did", u"how’d’y": u"how do you",
u"how’ll": u"how will", u"how’s": u"how is", u"I’d": u"I would",
u"I’d’ve": u"I would have", u"I’ll": u"I will", u"I’ll’ve": u"I will have",
u"I’m": u"I am", u"I’ve": u"I have", u"i’d": u"i would", u"i’d’ve": u"i would have",
u"i’ll": u"i will", u"i’ll’ve": u"i will have", u"i’m": u"i am", u"i’ve": u"i have",
u"isn’t": u"is not", u"it’d": u"it would", u"it’d’ve": u"it would have",
u"it’ll": u"it will", u"it’ll’ve": u"it will have", u"it’s": u"it is",
u"let’s": u"let us", u"ma’am": u"madam", u"mayn’t": u"may not",
u"might’ve": u"might have", u"mightn’t": u"might not", u"mightn’t’ve": u"might not have",
u"must’ve": u"must have", u"mustn’t": u"must not", u"mustn’t’ve": u"must not have",
u"needn’t": u"need not", u"needn’t’ve": u"need not have", u"o’clock": u"of the clock",
u"oughtn’t": u"ought not", u"oughtn’t’ve": u"ought not have", u"shan’t": u"shall not",
u"sha’n’t": u"shall not", u"shan’t’ve": u"shall not have", u"she’d": u"she would",
u"she’d’ve": u"she would have", u"she’ll": u"she will", u"she’ll’ve": u"she will have",
u"she’s": u"she is", u"should’ve": u"should have", u"shouldn’t": u"should not",
u"shouldn’t’ve": u"should not have", u"so’ve": u"so have", u"so’s": u"so as",
u"this’s": u"this is", u"that’d": u"that would", u"that’d’ve": u"that would have",
u"that’s": u"that is", u"there’d": u"there would", u"there’d’ve": u"there would have",
u"there’s": u"there is", u"here’s": u"here is", u"they’d": u"they would",
u"they’d’ve": u"they would have", u"they’ll": u"they will",
u"they’ll’ve": u"they will have", u"they’re": u"they are", u"they’ve": u"they have",
u"to’ve": u"to have", u"wasn’t": u"was not", u"we’d": u"we would",
u"we’d’ve": u"we would have", u"we’ll": u"we will", u"we’ll’ve": u"we will have",
u"we’re": u"we are", u"we’ve": u"we have", u"weren’t": u"were not",
u"what’ll": u"what will", u"what’ll’ve": u"what will have", u"what’re": u"what are",
u"what’s": u"what is", u"what’ve": u"what have", u"when’s": u"when is",
u"when’ve": u"when have", u"where’d": u"where did", u"where’s": u"where is",
u"where’ve": u"where have", u"who’ll": u"who will", u"who’ll’ve": u"who will have",
u"who’s": u"who is", u"who’ve": u"who have", u"why’s": u"why is", u"why’ve": u"why have",
u"will’ve": u"will have", u"won’t": u"will not", u"won’t’ve": u"will not have",
u"would’ve": u"would have", u"wouldn’t": u"would not", u"wouldn’t’ve": u"would not have",
u"y’all": u"you all", u"y’all’d": u"you all would", u"y’all’d’ve": u"you all would have",
u"y’all’re": u"you all are", u"y’all’ve": u"you all have", u"you’d": u"you would",
u"you’d’ve": u"you would have", u"you’ll": u"you will", u"you’ll’ve": u"you will have",
u"you’re": u"you are", u"you’ve": u"you have", u"cat’s": u"cat is", u" whatapp ": u" whatapps ",
u" whatssapp ": u" whatapps ", u" whatssap ": u" whatapps ", u" whatspp ": u" whatapps ",
u" whastapp ": u" whatapps ", u" whatsap ": u" whatapps ", u" whassap ": u" whatapps ",
u" watapps ": u" whatapps ", u"wetfood": u"wet food", u"intetested": u"interested",
u"领养条件,": u"领养条件", u"谢谢。": u"谢谢",
u"别打我,记住,我有反抗的牙齿,但我不会咬你。remember": u"别打我,记住,我有反抗的牙齿,但我不会咬你。",
u"有你。do": u"有你。", u"名字name": u"名字", u"year,": u"year", u"work,your": u"work your",
u"too,will": u"too will", u"timtams": u"timtam", u"spay。": u"spay", u"shoulder,a": u"shoulder a",
u"sherpherd": u"shepherd", u"sherphed": u"shepherd", u"sherperd": u"shepherd",
u"sherpard": u"shepherd", u"serious。": u"serious", u"remember,i": u"remember i",
u"recover,": u"recover", u"refundable指定期限内结扎后会全数奉还": u"refundable",
u"puchong区,有没有人有增添家庭成员?": u"puchong", u"puchong救的": u"puchong",
u"puchong,": u"puchong", u"month。": u"month", u"month,": u"month",
u"microchip(做狗牌一定要有主人的电话号码)": u"microchip", u"maju。": u"maju", u"maincoone": u"maincoon",
u"lumpur。": u"lumpur", u"location:阿里玛,大山脚": u"location", u"life🐾🐾": u"life",
u"kibble,": u"kibble", u"home…": u"home", u"hand,but": u"hand but", u"hair,a": u"hair a",
u"grey、brown": u"grey brown", u"gray,": u"gray", u"free免费": u"free", u"food,or": u"food or",
u"dog/dog": u"dog", u"dijumpa": u"dijumpai", u"dibela": u"dibelai",
u"beauuuuuuuuutiful": u"beautiful", u"adopt🙏": u"adopt", u"addopt": u"adopt",
u"enxiety": u"anxiety", u"vaksin": u"vaccine"}
numerical_features = []
text_features = ['Name', 'Description', 'Description_Emb', 'Description_bow']
meta_text = ['BreedName_main_breed', 'BreedName_second_breed', 'annots_top_desc', 'sentiment_text',
'annots_top_desc_pick', 'sentiment_entities']
remove = ['index', 'seq_text', 'PetID', 'Name', 'Description', 'RescuerID', 'StateName', 'annots_top_desc',
'sentiment_text',
'sentiment_entities', 'Description_Emb', 'Description_bow', 'annots_top_desc_pick']
kaeru_drop_cols = ["2017GDPperCapita", "Bumiputra", "Chinese", "HDI", "Indian", "Latitude", "Longitude",
'color_red_score_mean_mean', 'color_red_score_mean_sum', 'color_blue_score_mean_mean',
'color_blue_score_mean_sum', 'color_green_score_mean_mean', 'color_green_score_mean_sum',
'dog_cat_scores_mean_mean', 'dog_cat_scores_mean_sum', 'dog_cat_topics_mean_mean',
'dog_cat_topics_mean_sum', 'is_dog_or_cat_mean_mean', 'is_dog_or_cat_mean_sum',
'len_text_mean_mean', 'len_text_mean_sum', 'StateID']
gege_drop_cols = ['2017GDPperCapita', 'Breed1_equals_Breed2', 'Bumiputra', 'Chinese',
'HDI', 'Indian', 'Latitude', 'Longitude', 'Pop_density', 'Urban_pop', 'Breed1_equals_Breed2',
'fix_Breed1', 'fix_Breed2', 'single_Breed', 'color_red_score_mean_mean', 'color_red_score_mean_sum',
'color_red_score_mean_var', 'color_blue_score_mean_mean', 'color_blue_score_mean_sum',
'color_blue_score_mean_var', 'color_green_score_mean_mean', 'color_green_score_mean_sum',
'color_green_score_mean_var', 'dog_cat_scores_mean_mean', 'dog_cat_scores_mean_sum',
'dog_cat_scores_mean_var', 'dog_cat_topics_mean_mean', 'dog_cat_topics_mean_sum',
'dog_cat_topics_mean_var', 'is_dog_or_cat_mean_mean', 'is_dog_or_cat_mean_sum',
'is_dog_or_cat_mean_var', 'len_text_mean_mean', 'len_text_mean_sum', 'len_text_mean_var']
use_cols = pd.read_csv("../input/pet-usecols/importance10.csv")
use_cols["gain"] = use_cols["gain"] / use_cols["gain"].sum()
use_cols = list(use_cols[use_cols.gain > 0.0002].feature.values)
ps = nltk.stem.PorterStemmer()
lc = nltk.stem.lancaster.LancasterStemmer()
sb = nltk.stem.snowball.SnowballStemmer('english')
def to_category(train, cat=None):
if cat is None:
cat = [col for col in train.columns if train[col].dtype == 'object']
for c in cat:
train[c], uniques = pd.factorize(train[c])
maxvalue_dict[c] = train[c].max() + 1
return train
def init_logger():
handler = StreamHandler()
handler.setLevel(INFO)
handler.setFormatter(Formatter(LOGFORMAT))
fh_handler = FileHandler('{}.log'.format(MODEL_NAME))
fh_handler.setFormatter(Formatter(LOGFORMAT))
logger.setLevel(INFO)
logger.addHandler(handler)
logger.addHandler(fh_handler)
@contextmanager
def timer(name):
t0 = time.time()
yield
logger.info(f'[{name}] done in {time.time() - t0:.0f} s')
def load_image_and_hash(paths):
funcs = [
imagehash.average_hash,
imagehash.phash,
imagehash.dhash,
imagehash.whash,
]
petids = []
hashes = []
for path in paths:
image = Image.open(path)
imageid = path.split('/')[-1].split('.')[0][:-2]
petids.append(imageid)
hashes.append(np.array([f(image).hash for f in funcs]).reshape(256))
return petids, np.array(hashes).astype(np.int32)
def find_duplicates_all():
train_paths = glob.glob('../input/petfinder-adoption-prediction/train_images/*-1.jpg')
train_paths += glob.glob('../input/petfinder-adoption-prediction/train_images/*-2.jpg')
test_paths = glob.glob('../input/petfinder-adoption-prediction/test_images/*-1.jpg')
test_paths += glob.glob('../input/petfinder-adoption-prediction/test_images/*-2.jpg')
train_petids, train_hashes = load_image_and_hash(train_paths)
test_petids, test_hashes = load_image_and_hash(test_paths)
train_hashes = torch.Tensor(train_hashes).cuda()
test_hashes = torch.Tensor(test_hashes).cuda()
sims = np.array(
[(train_hashes[i] == test_hashes).sum(dim=1).cpu().numpy() / 256 for i in range(train_hashes.shape[0])])
indices1 = np.where(sims > 0.9)
indices2 = np.where(indices1[0] != indices1[1])
petids1 = [train_petids[i] for i in indices1[0][indices2]]
petids2 = [test_petids[i] for i in indices1[1][indices2]]
dups = {tuple(sorted([petid1, petid2])): True for petid1, petid2 in zip(petids1, petids2)}
logger.info('found %d duplicates' % len(dups))
return dups
def submission_with_postprocess(y_pred):
df_sub = pd.read_csv('../input/petfinder-adoption-prediction/test/sample_submission.csv')
train = pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv')
df_sub["AdoptionSpeed"] = y_pred
duplicated = find_duplicates_all()
duplicated = pd.DataFrame(duplicated, index=range(0)).T.reset_index()
duplicated.columns = ['pet_id_0', 'pet_id_1']
duplicated_0 = duplicated.merge(train[['PetID', 'AdoptionSpeed']], how='left', left_on='pet_id_0',
right_on='PetID').dropna()
df_sub = df_sub.merge(duplicated_0[['pet_id_1', 'AdoptionSpeed']],
how='left', left_on='PetID', right_on='pet_id_1', suffixes=('_original', ''))
df_sub['AdoptionSpeed'].fillna(df_sub['AdoptionSpeed_original'], inplace=True)
df_sub = df_sub[['PetID', 'AdoptionSpeed']]
duplicated_1 = duplicated.merge(train[['PetID', 'AdoptionSpeed']],
how='left', left_on='pet_id_1', right_on='PetID').dropna()
df_sub = df_sub.merge(duplicated_1[['pet_id_0', 'AdoptionSpeed']],
how='left', left_on='PetID', right_on='pet_id_0', suffixes=('_original', ''))
df_sub['AdoptionSpeed'].fillna(df_sub['AdoptionSpeed_original'], inplace=True)
df_sub = df_sub[['PetID', 'AdoptionSpeed']]
df_sub['AdoptionSpeed'] = df_sub['AdoptionSpeed'].astype('int32')
df_sub.to_csv('submission.csv', index=False)
def submission(y_pred):
logger.info('making submission file...')
df_sub = pd.read_csv('../input/petfinder-adoption-prediction/test/sample_submission.csv')
df_sub[target] = y_pred
df_sub.to_csv('submission.csv', index=False)
def analyzer_bow(text):
stop_words = ['i', 'a', 'an', 'the', 'to', 'and', 'or', 'if', 'is', 'are', 'am', 'it', 'this', 'that', 'of', 'from',
'in', 'on']
text = text.lower()
text = text.replace('\n', '')
text = text.replace('\t', '')
puncts = r',.":)(-!?|;\'$&/[]>%=#*+\\•~@£·_{}©^®`<→°€™›♥←×§″′Â█½à…“★”–●â►−¢²¬░¶↑±¿▾═¦║―¥▓—‹─▒:¼⊕▼▪†■’▀¨▄♫☆é¯♦¤▲踾Ã⋅‘∞∙)↓、│(»,♪╩╚³・╦╣╔╗▬❤ïØ¹≤‡√。【】'
for punct in puncts:
text = text.replace(punct, f' {punct} ')
for bad_word in contraction_mapping:
if bad_word in text:
text = text.replace(bad_word, contraction_mapping[bad_word])
text = text.split(' ') # スペースで区切る
text = [sb.stem(t) for t in text]
words = []
for word in text:
if (re.compile(r'^.*[0-9]+.*$').fullmatch(word) is not None): # 数字が含まれるものは分割
for w in re.findall(r'(\d+|\D+)', word):
words.append(w)
continue
if word in stop_words: # ストップワードに含まれるものは除外
continue
if len(word) < 2: # 1文字、0文字(空文字)は除外
continue
words.append(word)
return " ".join(words)
def analyzer_embed(text):
text = text.lower() # 小文字化
text = text.replace('\n', '') # 改行削除
text = text.replace('\t', '') # タブ削除
puncts = r',.":)(-!?|;\'$&/[]>%=#*+\\•~@£·_{}©^®`<→°€™›♥←×§″′Â█½à…“★”–●â►−¢²¬░¶↑±¿▾═¦║―¥▓—‹─▒:¼⊕▼▪†■’▀¨▄♫☆é¯♦¤▲踾Ã⋅‘∞∙)↓、│(»,♪╩╚³・╦╣╔╗▬❤ïØ¹≤‡√。【】'
for punct in puncts:
text = text.replace(punct, f' {punct} ')
for bad_word in contraction_mapping:
if bad_word in text:
text = text.replace(bad_word, contraction_mapping[bad_word])
text = text.split(' ')
words = []
for word in text:
if (re.compile(r'^.*[0-9]+.*$').fullmatch(word) is not None):
for w in re.findall(r'(\d+|\D+)', word):
words.append(w)
continue
if len(word) < 1:
continue
words.append(word)
return " ".join(words)
def analyzer_k(text):
stop_words = ['i', 'a', 'an', 'the', 'to', 'and', 'or', 'if', 'is', 'are', 'am', 'it', 'this', 'that', 'of', 'from',
'in', 'on']
text = text.lower()
text = text.replace('\n', '')
text = text.replace('\t', '')
text = re.sub(re.compile(r'[!-\/:-@[-`{-~]'), ' ', text)
text = text.split(' ')
words = []
for word in text:
if (re.compile(r'^.*[0-9]+.*$').fullmatch(word) is not None):
continue
if word in stop_words:
continue
if len(word) < 2:
continue
words.append(word)
return words
class GroupbyTransformer():
def __init__(self, param_dict=None):
self.param_dict = param_dict
def _get_params(self, p_dict):
key = p_dict['key']
if 'var' in p_dict.keys():
var = p_dict['var']
else:
var = self.var
if 'agg' in p_dict.keys():
agg = p_dict['agg']
else:
agg = self.agg
if 'on' in p_dict.keys():
on = p_dict['on']
else:
on = key
return key, var, agg, on
def _aggregate(self, dataframe):
self.features = []
for param_dict in self.param_dict:
key, var, agg, on = self._get_params(param_dict)
all_features = list(set(key + var))
new_features = self._get_feature_names(key, var, agg)
features = dataframe[all_features].groupby(key)[
var].agg(agg).reset_index()
features.columns = key + new_features
self.features.append(features)
return self
def _merge(self, dataframe, merge=True):
for param_dict, features in zip(self.param_dict, self.features):
key, var, agg, on = self._get_params(param_dict)
if merge:
dataframe = dataframe.merge(features, how='left', on=on)
else:
new_features = self._get_feature_names(key, var, agg)
dataframe = pd.concat([dataframe, features[new_features]], axis=1)
return dataframe
def transform(self, dataframe):
self._aggregate(dataframe)
return self._merge(dataframe, merge=True)
def _get_feature_names(self, key, var, agg):
_agg = []
for a in agg:
if not isinstance(a, str):
_agg.append(a.__name__)
else:
_agg.append(a)
return ['_'.join([a, v, 'groupby'] + key) for v in var for a in _agg]
def get_feature_names(self):
self.feature_names = []
for param_dict in self.param_dict:
key, var, agg, on = self._get_params(param_dict)
self.feature_names += self._get_feature_names(key, var, agg)
return self.feature_names
def get_numerical_features(self):
return self.get_feature_names()
class DiffGroupbyTransformer(GroupbyTransformer):
def _aggregate(self):
raise NotImplementedError
def _merge(self):
raise NotImplementedError
def transform(self, dataframe):
for param_dict in self.param_dict:
key, var, agg, on = self._get_params(param_dict)
for a in agg:
for v in var:
new_feature = '_'.join(['diff', a, v, 'groupby'] + key)
base_feature = '_'.join([a, v, 'groupby'] + key)
dataframe[new_feature] = dataframe[base_feature] - dataframe[v]
return dataframe
def _get_feature_names(self, key, var, agg):
_agg = []
for a in agg:
if not isinstance(a, str):
_agg.append(a.__name__)
else:
_agg.append(a)
return ['_'.join(['diff', a, v, 'groupby'] + key) for v in var for a in _agg]
class RatioGroupbyTransformer(GroupbyTransformer):
def _aggregate(self):
raise NotImplementedError
def _merge(self):
raise NotImplementedError
def transform(self, dataframe):
for param_dict in self.param_dict:
key, var, agg, on = self._get_params(param_dict)
for a in agg:
for v in var:
new_feature = '_'.join(['ratio', a, v, 'groupby'] + key)
base_feature = '_'.join([a, v, 'groupby'] + key)
dataframe[new_feature] = dataframe[v] / dataframe[base_feature]
return dataframe
def _get_feature_names(self, key, var, agg):
_agg = []
for a in agg:
if not isinstance(a, str):
_agg.append(a.__name__)
else:
_agg.append(a)
return ['_'.join(['ratio', a, v, 'groupby'] + key) for v in var for a in _agg]
class CategoryVectorizer():
def __init__(self, categorical_columns, n_components,
vectorizer=CountVectorizer(),
transformer=LatentDirichletAllocation(),
name='CountLDA'):
self.categorical_columns = categorical_columns
self.n_components = n_components
self.vectorizer = vectorizer
self.transformer = transformer
self.name = name + str(self.n_components)
def transform(self, dataframe):
features = []
for (col1, col2) in self.get_column_pairs():
try:
sentence = self.create_word_list(dataframe, col1, col2)
sentence = self.vectorizer.fit_transform(sentence)
feature = self.transformer.fit_transform(sentence)
feature = self.get_feature(dataframe, col1, col2, feature, name=self.name)
features.append(feature)
except:
pass
features = pd.concat(features, axis=1)
return features
def create_word_list(self, dataframe, col1, col2):
col1_size = int(dataframe[col1].values.max() + 1)
col2_list = [[] for _ in range(col1_size)]
for val1, val2 in zip(dataframe[col1].values, dataframe[col2].values):
col2_list[int(val1)].append(col2 + str(val2))
return [' '.join(map(str, ls)) for ls in col2_list]
def get_feature(self, dataframe, col1, col2, latent_vector, name=''):
features = np.zeros(
shape=(len(dataframe), self.n_components), dtype=np.float32)
self.columns = ['_'.join([name, col1, col2, str(i)])
for i in range(self.n_components)]
for i, val1 in enumerate(dataframe[col1]):
features[i, :self.n_components] = latent_vector[val1]
return pd.DataFrame(data=features, columns=self.columns)
def get_column_pairs(self):
return [(col1, col2) for col1, col2 in itertools.product(self.categorical_columns, repeat=2) if col1 != col2]
def get_numerical_features(self):
return self.columns
class BM25Transformer(BaseEstimator, TransformerMixin):
def __init__(self, use_idf=True, k1=2.0, b=0.75):
self.use_idf = use_idf
self.k1 = k1
self.b = b
def fit(self, X):
if not sp.sparse.issparse(X):
X = sp.sparse.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
idf = np.log((n_samples - df + 0.5) / (df + 0.5))
self._idf_diag = sp.sparse.spdiags(idf, diags=0, m=n_features, n=n_features)
doc_len = X.sum(axis=1)
self._average_document_len = np.average(doc_len)
return self
def transform(self, X, copy=True):
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
X = sp.sparse.csr_matrix(X, copy=copy)
else:
X = sp.sparse.csr_matrix(X, dtype=np.float, copy=copy)
n_samples, n_features = X.shape
doc_len = X.sum(axis=1)
sz = X.indptr[1:] - X.indptr[0:-1]
rep = np.repeat(np.asarray(doc_len), sz)
nom = self.k1 + 1
denom = X.data + self.k1 * (1 - self.b + self.b * rep / self._average_document_len)
data = X.data * nom / denom
X = sp.sparse.csr_matrix((data, X.indices, X.indptr), shape=X.shape)
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
X = X * self._idf_diag
return X
def merge_state_info(train):
states = pd.read_csv('../input/petfinder-adoption-prediction/state_labels.csv')
state_info = pd.read_csv('../input/state-info/state_info.csv')
state_info.rename(columns={
'Area (km2)': 'Area',
'Pop. density': 'Pop_density',
'Urban pop.(%)': 'Urban_pop',
'Bumiputra (%)': 'Bumiputra',
'Chinese (%)': 'Chinese',
'Indian (%)': 'Indian'
}, inplace=True)
state_info['Population'] = state_info['Population'].str.replace(',', '').astype('int32')
state_info['Area'] = state_info['Area'].str.replace(',', '').astype('int32')
state_info['Pop_density'] = state_info['Pop_density'].str.replace(',', '').astype('int32')
state_info['2017GDPperCapita'] = state_info['2017GDPperCapita'].str.replace(',', '').astype('float32')
state_info['StateName'] = state_info['StateName'].str.replace('FT ', '')
state_info['StateName'] = state_info['StateName'].str.replace('Malacca', 'Melaka')
state_info['StateName'] = state_info['StateName'].str.replace('Penang', 'Pulau Pinang')
states = states.merge(state_info, how='left', on='StateName')
train = train.merge(states, how='left', left_on='State', right_on='StateID')
return train
def merge_breed_name(train):
breeds = pd.read_csv('../input/petfinder-adoption-prediction/breed_labels.csv')
with open("../input/cat-and-dog-breeds-parameters/rating.json", 'r', encoding='utf-8') as f:
breed_data = json.load(f)
cat_breed = pd.DataFrame.from_dict(breed_data['cat_breeds']).T
dog_breed = pd.DataFrame.from_dict(breed_data['dog_breeds']).T
df = pd.concat([dog_breed, cat_breed], axis=0).reset_index().rename(columns={'index': 'BreedName'})
df.BreedName.replace(
{
'Siamese Cat': 'Siamese',
'Chinese Crested': 'Chinese Crested Dog',
'Australian Cattle Dog': 'Australian Cattle Dog/Blue Heeler',
'Yorkshire Terrier': 'Yorkshire Terrier Yorkie',
'Pembroke Welsh Corgi': 'Welsh Corgi',
'Sphynx': 'Sphynx (hairless cat)',
'Plott': 'Plott Hound',
'Korean Jindo Dog': 'Jindo',
'Anatolian Shepherd Dog': 'Anatolian Shepherd',
'Belgian Malinois': 'Belgian Shepherd Malinois',
'Belgian Sheepdog': 'Belgian Shepherd Dog Sheepdog',
'Belgian Tervuren': 'Belgian Shepherd Tervuren',
'Bengal Cats': 'Bengal',
'Bouvier des Flandres': 'Bouvier des Flanders',
'Brittany': 'Brittany Spaniel',
'Caucasian Shepherd Dog': 'Caucasian Sheepdog (Caucasian Ovtcharka)',
'Dandie Dinmont Terrier': 'Dandi Dinmont Terrier',
'Bulldog': 'English Bulldog',
'American English Coonhound': 'English Coonhound',
'Small Munsterlander Pointer': 'Munsterlander',
'Entlebucher Mountain Dog': 'Entlebucher',
'Exotic': 'Exotic Shorthair',
'Flat-Coated Retriever': 'Flat-coated Retriever',
'English Foxhound': 'Foxhound',
'Alaskan Klee Kai': 'Klee Kai',
'Newfoundland': 'Newfoundland Dog',
'Norwegian Forest': 'Norwegian Forest Cat',
'Nova Scotia Duck Tolling Retriever': 'Nova Scotia Duck-Tolling Retriever',
'American Pit Bull Terrier': 'Pit Bull Terrier',
'Ragdoll Cats': 'Ragdoll',
'Standard Schnauzer': 'Schnauzer',
'Scottish Terrier': 'Scottish Terrier Scottie',
'Chinese Shar-Pei': 'Shar Pei',
'Shetland Sheepdog': 'Shetland Sheepdog Sheltie',
'West Highland White Terrier': 'West Highland White Terrier Westie',
'Soft Coated Wheaten Terrier': 'Wheaten Terrier',
'Wirehaired Pointing Griffon': 'Wire-haired Pointing Griffon',
'Xoloitzcuintli': 'Wirehaired Terrier',
'Cane Corso': 'Cane Corso Mastiff',
'Havana Brown': 'Havana',
}, inplace=True
)
breeds = breeds.merge(df, how='left', on='BreedName')
breeds1_dic, breeds2_dic = {}, {}
for c in breeds.columns:
if c == "BreedID":
continue
breeds1_dic[c] = c + "_main_breed_all"
breeds2_dic[c] = c + "_second_breed_all"
train = train.merge(breeds.rename(columns=breeds1_dic), how='left', left_on='Breed1', right_on='BreedID')
train.drop(['BreedID'], axis=1, inplace=True)
train = train.merge(breeds.rename(columns=breeds2_dic), how='left', left_on='Breed2', right_on='BreedID')
train.drop(['BreedID'], axis=1, inplace=True)
return train
def merge_breed_name_sub(train):
breeds = pd.read_csv('../input/petfinder-adoption-prediction/breed_labels.csv')
df = pd.read_json('../input/cat-and-dog-breeds-parameters/rating.json')
cat_df = df.cat_breeds.dropna(0).reset_index().rename(columns={'index': 'BreedName'})
dog_df = df.dog_breeds.dropna(0).reset_index().rename(columns={'index': 'BreedName'})
cat = cat_df['cat_breeds'].apply(lambda x: pd.Series(x))
cat_df = pd.concat([cat_df, cat], axis=1).drop(['cat_breeds'], axis=1)
dog = dog_df['dog_breeds'].apply(lambda x: pd.Series(x))
dog_df = pd.concat([dog_df, cat], axis=1).drop(['dog_breeds'], axis=1)
df = pd.concat([dog_df, cat_df])
df.BreedName.replace(
{
'Siamese Cat': 'Siamese',
'Chinese Crested': 'Chinese Crested Dog',
'Australian Cattle Dog': 'Australian Cattle Dog/Blue Heeler',
'Yorkshire Terrier': 'Yorkshire Terrier Yorkie',
'Pembroke Welsh Corgi': 'Welsh Corgi',
'Sphynx': 'Sphynx (hairless cat)',
'Plott': 'Plott Hound',
'Korean Jindo Dog': 'Jindo',
'Anatolian Shepherd Dog': 'Anatolian Shepherd',
'Belgian Malinois': 'Belgian Shepherd Malinois',
'Belgian Sheepdog': 'Belgian Shepherd Dog Sheepdog',
'Belgian Tervuren': 'Belgian Shepherd Tervuren',
'Bengal Cats': 'Bengal',
'Bouvier des Flandres': 'Bouvier des Flanders',
'Brittany': 'Brittany Spaniel',
'Caucasian Shepherd Dog': 'Caucasian Sheepdog (Caucasian Ovtcharka)',
'Dandie Dinmont Terrier': 'Dandi Dinmont Terrier',
'Bulldog': 'English Bulldog',
'American English Coonhound': 'English Coonhound',
'Small Munsterlander Pointer': 'Munsterlander',
'Entlebucher Mountain Dog': 'Entlebucher',
'Exotic': 'Exotic Shorthair',
'Flat-Coated Retriever': 'Flat-coated Retriever',
'English Foxhound': 'Foxhound',
'Alaskan Klee Kai': 'Klee Kai',
'Newfoundland': 'Newfoundland Dog',
'Norwegian Forest': 'Norwegian Forest Cat',
'Nova Scotia Duck Tolling Retriever': 'Nova Scotia Duck-Tolling Retriever',
'American Pit Bull Terrier': 'Pit Bull Terrier',
'Ragdoll Cats': 'Ragdoll',
'Standard Schnauzer': 'Schnauzer',
'Scottish Terrier': 'Scottish Terrier Scottie',
'Chinese Shar-Pei': 'Shar Pei',
'Shetland Sheepdog': 'Shetland Sheepdog Sheltie',
'West Highland White Terrier': 'West Highland White Terrier Westie',
'Soft Coated Wheaten Terrier': 'Wheaten Terrier',
'Wirehaired Pointing Griffon': 'Wire-haired Pointing Griffon',
'Xoloitzcuintli': 'Wirehaired Terrier',
'Cane Corso': 'Cane Corso Mastiff',
'Havana Brown': 'Havana',
}, inplace=True
)
breeds = breeds.merge(df, how='left', on='BreedName')
train = train.merge(breeds.rename(columns={'BreedName': 'BreedName_main_breed'}), how='left', left_on='Breed1',
right_on='BreedID', suffixes=('', '_main_breed'))
train.drop(['BreedID'], axis=1, inplace=True)
train = train.merge(breeds.rename(columns={'BreedName': 'BreedName_second_breed'}), how='left', left_on='Breed2',
right_on='BreedID', suffixes=('', '_second_breed'))
train.drop(['BreedID'], axis=1, inplace=True)
return train
def merge_breed_ranking(train):
breeds = pd.read_csv('../input/breed-labels-with-ranks/breed_labels_with_ranks.csv').drop("BreedName", axis=1)
train = train.merge(breeds, how="left", left_on="fix_Breed1", right_on="BreedID")
train = train.rename(columns={"BreedCatRank": "BreedCatRank_main", "BreedDogRank": "BreedDogRank_main"})
train = train.merge(breeds, how="left", left_on="fix_Breed2", right_on="BreedID")
train = train.rename(columns={"BreedCatRank": "BreedCatRank_second", "BreedDogRank": "BreedDogRank_second"})
return train
def breed_mismatch(train):
breed_labels = pd.read_csv('../input/petfinder-adoption-prediction/breed_labels.csv')
dog_breed_labels_set = list(breed_labels[breed_labels['Type'] == 1]['BreedID'])
dog_breed_labels_set.remove(307)
train['breeds_mismatch'] = list((train['Type'] == 2) & (
(train['fix_Breed1'].isin(dog_breed_labels_set)) | (train['fix_Breed2'].isin(dog_breed_labels_set))))
train['breeds_mismatch'] = train['breeds_mismatch'].astype(int)
return train
def breed_mismatch_desc(train):
train['desc_contain_dog'] = train['Description'].str.lower().str.contains(' dog | dogs ')
train['desc_contain_cat'] = train['Description'].str.lower().str.contains(' cat | cats ')
train['desc_miss_match'] = list((train['Type'] == 1) & (train['desc_contain_cat']))
train['desc_miss_match'] = train['desc_miss_match'].astype(int)
return train
def breed_mismatch_meta(train):
train['annot_contain_dog'] = train['annots_top_desc'].str.lower().str.contains(' dog | dogs ')
train['annot_contain_cat'] = train['annots_top_desc'].str.lower().str.contains(' cat | cats ')
train['annot_miss_match'] = list((train['Type'] == 1) & (train['annot_contain_cat']))
train['annot_miss_match'] = train['annot_miss_match'].astype(int)
return train
def extract_emojis(text, emoji_list):
return ' '.join(c for c in text if c in emoji_list)
def merge_emoji(train):
emoji = pd.read_csv('../input/emoji-sentiment-data/Emoji_Sentiment_Data_v1.0.csv')
emoji2 = pd.read_csv('../input/emoji-sentiment-data/Emojitracker_20150604.csv')
emoji = emoji.merge(emoji2, how='left', on='Emoji', suffixes=('', '_tracker'))
emoji_list = emoji['Emoji'].values
train_emoji = train['Description'].apply(extract_emojis, emoji_list=emoji_list)
train_emoji = pd.DataFrame([train['PetID'], train_emoji]).T.set_index('PetID')
train_emoji = train_emoji['Description'].str.extractall('(' + ')|('.join(emoji_list) + ')')
train_emoji = train_emoji.fillna(method='bfill', axis=1).iloc[:, 0].reset_index().rename(columns={0: 'Emoji'})
train_emoji = train_emoji.merge(emoji, how='left', on='Emoji')
emoji_columns = ['Occurrences', 'Position', 'Negative', 'Neutral', 'Positive', 'Occurrences_tracker']
stats = ['mean', 'max', 'min', 'median', 'std']
g = train_emoji.groupby('PetID')[emoji_columns].agg(stats)
g.columns = [c + '_' + stat for c in emoji_columns for stat in stats]
train = train.merge(g, how='left', on='PetID')
return train
def get_interactions(train):
interaction_features = ['Age', 'Quantity']
for (c1, c2) in combinations(interaction_features, 2):
train[c1 + '_mul_' + c2] = train[c1] * train[c2]
train[c1 + '_div_' + c2] = train[c1] / train[c2]
return train
def get_text_features(train):
train['Length_Description'] = train['Description'].map(len)
train['Length_annots_top_desc'] = train['annots_top_desc'].map(len)
train['Lengths_sentiment_text'] = train['sentiment_text'].map(len)
train['Lengths_sentiment_entities'] = train['sentiment_entities'].map(len)
return train
def get_name_features(train):
train['num_name_chars'] = train['Name'].apply(len)
train['num_name_capitals'] = train['Name'].apply(lambda x: sum(1 for c in x if c.isupper()))
train['name_caps_vs_length'] = train.apply(lambda row: row['num_name_capitals'] / (row['num_name_chars'] + 1e-5),
axis=1)
train['num_name_exclamation_marks'] = train['Name'].apply(lambda x: x.count('!'))
train['num_name_question_marks'] = train['Name'].apply(lambda x: x.count('?'))
train['num_name_punctuation'] = train['Name'].apply(lambda x: sum(x.count(w) for w in '.,;:'))
train['num_name_symbols'] = train['Name'].apply(lambda x: sum(x.count(w) for w in '*&$%'))
train['num_name_words'] = train['Name'].apply(lambda x: len(x.split()))
return train
class MetaDataParser(object):
def __init__(self):
train_sentiment_files = sorted(glob.glob('../input/petfinder-adoption-prediction/train_sentiment/*.json'))
test_sentiment_files = sorted(glob.glob('../input/petfinder-adoption-prediction/test_sentiment/*.json'))
sentiment_files = train_sentiment_files + test_sentiment_files
self.sentiment_files = pd.DataFrame(sentiment_files, columns=['sentiment_filename'])
self.sentiment_files['PetID'] = self.sentiment_files['sentiment_filename'].apply(
lambda x: x.split('/')[-1].split('.')[0])
train_metadata_files = sorted(glob.glob('../input/petfinder-adoption-prediction/train_metadata/*.json'))
test_metadata_files = sorted(glob.glob('../input/petfinder-adoption-prediction/test_metadata/*.json'))
metadata_files = train_metadata_files + test_metadata_files
self.metadata_files = pd.DataFrame(metadata_files, columns=['metadata_filename'])
self.metadata_files['PetID'] = self.metadata_files['metadata_filename'].apply(
lambda x: x.split('/')[-1].split('-')[0])
def open_json_file(self, filename):
with open(filename, 'r', encoding="utf-8") as f:
metadata_file = json.load(f)
return metadata_file
def get_stats(self, array, name):
stats = [np.mean, np.max, np.min, np.sum, np.var]
result = {}
if len(array):
for stat in stats:
result[name + '_' + stat.__name__] = stat(array)
else:
for stat in stats:
result[name + '_' + stat.__name__] = 0
return result
def parse_sentiment_file(self, file):
file_sentiment = file['documentSentiment']
file_entities = [x['name'] for x in file['entities']]
file_entities = ' '.join(file_entities)
file_sentences_text = [x['text']['content'] for x in file['sentences']]
file_sentences_text = ' '.join(file_sentences_text)
file_sentences_sentiment = [x['sentiment'] for x in file['sentences']]
file_sentences_sentiment_sum = pd.DataFrame.from_dict(
file_sentences_sentiment, orient='columns').sum()
file_sentences_sentiment_sum = file_sentences_sentiment_sum.add_prefix('document_sum_').to_dict()
file_sentences_sentiment_mean = pd.DataFrame.from_dict(
file_sentences_sentiment, orient='columns').mean()
file_sentences_sentiment_mean = file_sentences_sentiment_mean.add_prefix('document_mean_').to_dict()
file_sentences_sentiment_var = pd.DataFrame.from_dict(
file_sentences_sentiment, orient='columns').sum()
file_sentences_sentiment_var = file_sentences_sentiment_var.add_prefix('document_var_').to_dict()
file_sentiment.update(file_sentences_sentiment_mean)
file_sentiment.update(file_sentences_sentiment_sum)
file_sentiment.update(file_sentences_sentiment_var)
file_sentiment.update({"sentiment_text": file_sentences_text})
file_sentiment.update({"sentiment_entities": file_entities})
return pd.Series(file_sentiment)
def parse_metadata(self, file):
file_keys = list(file.keys())
if 'labelAnnotations' in file_keys:
label_annotations = file['labelAnnotations']
file_top_score = [x['score'] for x in label_annotations]
pick_value = int(len(label_annotations) * 0.3)
if pick_value == 0: pick_value = 1
file_top_score_pick = [x['score'] for x in label_annotations[:pick_value]]
file_top_desc = [x['description'] for x in label_annotations]
file_top_desc_pick = [x['description'] for x in label_annotations[:pick_value]]
dog_cat_scores = []
dog_cat_topics = []
is_dog_or_cat = []
for label in label_annotations:
if label['description'] == 'dog' or label['description'] == 'cat':
dog_cat_scores.append(label['score'])
dog_cat_topics.append(label['topicality'])
is_dog_or_cat.append(1)
else:
is_dog_or_cat.append(0)
else:
file_top_score = []
file_top_desc = []
dog_cat_scores = []
dog_cat_topics = []
is_dog_or_cat = []
file_top_score_pick = []
file_top_desc_pick = []
if 'faceAnnotations' in file_keys:
file_face = file['faceAnnotations']
n_faces = len(file_face)
else:
n_faces = 0
if 'textAnnotations' in file_keys:
text_annotations = file['textAnnotations']
file_n_text_annotations = len(text_annotations)
file_len_text = [len(text['description']) for text in text_annotations]
else:
file_n_text_annotations = 0
file_len_text = []
file_colors = file['imagePropertiesAnnotation']['dominantColors']['colors']
file_crops = file['cropHintsAnnotation']['cropHints']
file_color_score = [x['score'] for x in file_colors]
file_color_pixelfrac = [x['pixelFraction'] for x in file_colors]
file_color_red = [x['color']['red'] if 'red' in x['color'].keys() else 0 for x in file_colors]
file_color_blue = [x['color']['blue'] if 'blue' in x['color'].keys() else 0 for x in file_colors]
file_color_green = [x['color']['green'] if 'green' in x['color'].keys() else 0 for x in file_colors]
file_crop_conf = np.mean([x['confidence'] for x in file_crops])
file_crop_x = np.mean([x['boundingPoly']['vertices'][1]['x'] for x in file_crops])
file_crop_y = np.mean([x['boundingPoly']['vertices'][3]['y'] for x in file_crops])
if 'importanceFraction' in file_crops[0].keys():
file_crop_importance = np.mean([x['importanceFraction'] for x in file_crops])
else:
file_crop_importance = 0
metadata = {
'annots_top_desc': ' '.join(file_top_desc),
'annots_top_desc_pick': ' '.join(file_top_desc_pick),
'annots_score_pick_mean': np.mean(file_top_score_pick),
'n_faces': n_faces,
'n_text_annotations': file_n_text_annotations,
'crop_conf': file_crop_conf,
'crop_x': file_crop_x,
'crop_y': file_crop_y,
'crop_importance': file_crop_importance,
}
metadata.update(self.get_stats(file_top_score, 'annots_score_normal'))
metadata.update(self.get_stats(file_color_score, 'color_score'))
metadata.update(self.get_stats(file_color_pixelfrac, 'color_pixel_score'))
metadata.update(self.get_stats(file_color_red, 'color_red_score'))
metadata.update(self.get_stats(file_color_blue, 'color_blue_score'))
metadata.update(self.get_stats(file_color_green, 'color_green_score'))
metadata.update(self.get_stats(dog_cat_scores, 'dog_cat_scores'))
metadata.update(self.get_stats(dog_cat_topics, 'dog_cat_topics'))
metadata.update(self.get_stats(is_dog_or_cat, 'is_dog_or_cat'))
metadata.update(self.get_stats(file_len_text, 'len_text'))
metadata.update({"color_red_score_first": file_color_red[0] if len(file_color_red) > 0 else -1})
metadata.update({"color_blue_score_first": file_color_blue[0] if len(file_color_blue) > 0 else -1})
metadata.update({"color_green_score_first": file_color_green[0] if len(file_color_green) > 0 else -1})
metadata.update({"color_pixel_score_first": file_color_pixelfrac[0] if len(file_color_pixelfrac) > 0 else -1})
metadata.update({"color_score_first": file_color_score[0] if len(file_color_score) > 0 else -1})
metadata.update({"label_score_first": file_top_score[0] if len(file_top_score) > 0 else -1})
return pd.Series(metadata)
def _transform(self, path, sentiment=True):
file = self.open_json_file(path)
if sentiment:
result = self.parse_sentiment_file(file)
else:
result = self.parse_metadata(file)
return result
def pretrained_w2v(train_text, model, name):
train_corpus = [text_to_word_sequence(text) for text in train_text]
result = []
for text in train_corpus:
n_skip = 0
vec = np.zeros(model.vector_size)
for n_w, word in enumerate(text):
if word in model:
vec = vec + model.wv[word]
continue
word_ = word.upper()
if word_ in model:
vec = vec + model.wv[word_]
continue
word_ = word.capitalize()
if word_ in model:
vec = vec + model.wv[word_]
continue
word_ = ps.stem(word)
if word_ in model:
vec = vec + model.wv[word_]
continue
word_ = lc.stem(word)
if word_ in model:
vec = vec + model.wv[word_]
continue
word_ = sb.stem(word)
if word_ in model:
vec = vec + model.wv[word_]
continue
else:
n_skip += 1
continue
vec = vec / (n_w - n_skip + 1)
result.append(vec)
w2v_cols = ["{}{}".format(name, i) for i in range(1, model.vector_size + 1)]
result = pd.DataFrame(result)
result.columns = w2v_cols
return result
def w2v_pymagnitude(train_text, model, name):
train_corpus = [text_to_word_sequence(text) for text in train_text]
result = []
for text in train_corpus:
vec = np.zeros(model.dim)
for n_w, word in enumerate(text):
if word in model:
vec = vec + model.query(word)
continue
word_ = word.upper()
if word_ in model:
vec = vec + model.query(word_)
continue
word_ = word.capitalize()
if word_ in model:
vec = vec + model.query(word_)
continue
word_ = ps.stem(word)
if word_ in model:
vec = vec + model.query(word_)
continue
word_ = lc.stem(word)
if word_ in model:
vec = vec + model.query(word_)
continue
word_ = sb.stem(word)
if word_ in model:
vec = vec + model.query(word_)
continue
vec = vec + model.query(word)
vec = vec / (n_w + 1)
result.append(vec)
w2v_cols = ["{}{}".format(name, i) for i in range(1, model.dim + 1)]
result = pd.DataFrame(result)
result.columns = w2v_cols
return result
def doc2vec(description_k, d2v_param):
corpus = [TaggedDocument(words=analyzer_k(text), tags=[i]) for i, text in enumerate(description_k)]
doc2vecs = Doc2Vec(
documents=corpus, dm=1,
**d2v_param
)
doc2vecs = np.array([doc2vecs.infer_vector(analyzer_k(text)) for text in description_k])
doc2vec_df = pd.DataFrame()
doc2vec_df['d2v_mean'] = np.mean(doc2vecs, axis=1)
doc2vec_df['d2v_sum'] = np.sum(doc2vecs, axis=1)
doc2vec_df['d2v_max'] = np.max(doc2vecs, axis=1)
doc2vec_df['d2v_min'] = np.min(doc2vecs, axis=1)
doc2vec_df['d2v_median'] = np.median(doc2vecs, axis=1)
doc2vec_df['d2v_var'] = np.var(doc2vecs, axis=1)
return doc2vec_df
def resize_to_square(im):
old_size = im.shape[:2]
ratio = float(img_size) / max(old_size)
new_size = tuple([int(x * ratio) for x in old_size])
im = cv2.resize(im, (new_size[1], new_size[0]))
delta_w = img_size - new_size[1]
delta_h = img_size - new_size[0]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
color = [0, 0, 0]
new_im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
return new_im
def load_image(path, preprocesssing):
image = cv2.imread(path)
new_image = resize_to_square(image)
new_image = preprocesssing(new_image)
return new_image
def get_age_feats(df):
df["Age_year"] = (df["Age"] / 12).astype(np.int32)
over_1year_flag = df["Age"] / 12 >= 1
df.loc[over_1year_flag, "over_1year"] = 1
df.loc[~over_1year_flag, "over_1year"] = 0
return df
def freq_encoding(df, freq_cols):
for c in freq_cols:
count_df = df.groupby([c])['PetID'].count().reset_index()
count_df.columns = [c, '{}_freq'.format(c)]
df = df.merge(count_df, how='left', on=c)
return df
def getSize(filename):
st = os.stat(filename)
return st.st_size
def getDimensions(filename):
img_size = Image.open(filename).size
return img_size
def is_zh(in_str):
return (set(in_str) - set(in_str.encode('sjis', 'ignore').decode('sjis'))) != set([])
def get_score(y_true, y_pred):
return cohen_kappa_score(y_true, y_pred, weights='quadratic')
def get_y():
return pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv', usecols=[target]).values.flatten()
def run_model(X_train, y_train, X_valid, y_valid, X_test,
categorical_features,
predictors, maxvalue_dict, fold_id, params, model_name):
train = lgb.Dataset(X_train, y_train,
categorical_feature=categorical_features,
feature_name=predictors)
valid = lgb.Dataset(X_valid, y_valid,
categorical_feature=categorical_features,
feature_name=predictors)
evals_result = {}
model = lgb.train(
params,
train,
valid_sets=[valid],
valid_names=['valid'],
evals_result=evals_result,
**FIT_PARAMS
)
logger.info(f'Best Iteration: {model.best_iteration}')
y_pred_train = model.predict(X_train)
train_rmse = np.sqrt(mean_squared_error(y_train, y_pred_train))
y_pred_valid = model.predict(X_valid)
valid_rmse = np.sqrt(mean_squared_error(y_valid, y_pred_valid))
y_pred_valid = rankdata(y_pred_valid) / len(y_pred_valid)
model.save_model(f'{model_name}_fold{fold_id}.txt')
y_pred_test = model.predict(X_test)
y_pred_test = rankdata(y_pred_test) / len(y_pred_test)
np.save(f'{model_name}_train_fold{fold_id}.npy', y_pred_valid)
np.save(f'{model_name}_test_fold{fold_id}.npy', y_pred_test)
return y_pred_valid, y_pred_test, train_rmse, valid_rmse
def run_xgb_model(X_train, y_train, X_valid, y_valid, X_test,
predictors, maxvalue_dict, fold_id, params, model_name):
d_train = xgb.DMatrix(data=X_train, label=y_train, feature_names=predictors)
d_valid = xgb.DMatrix(data=X_valid, label=y_valid, feature_names=predictors)
watchlist = [(d_train, 'train'), (d_valid, 'valid')]
model = xgb.train(dtrain=d_train, evals=watchlist, params=params, **FIT_PARAMS)
y_pred_train = model.predict(d_train, ntree_limit=model.best_ntree_limit)
train_rmse = np.sqrt(mean_squared_error(y_train, y_pred_train))
y_pred_valid = model.predict(d_valid, ntree_limit=model.best_ntree_limit)
valid_rmse = np.sqrt(mean_squared_error(y_valid, y_pred_valid))
y_pred_valid = rankdata(y_pred_valid) / len(y_pred_valid)
model.save_model(f'{model_name}_fold{fold_id}.txt')
y_pred_test = model.predict(xgb.DMatrix(data=X_test, feature_names=predictors), ntree_limit=model.best_ntree_limit)
y_pred_test = rankdata(y_pred_test) / len(y_pred_test)
np.save(f'{model_name}_train_fold{fold_id}.npy', y_pred_valid)
np.save(f'{model_name}_test_fold{fold_id}.npy', y_pred_test)
return y_pred_valid, y_pred_test, train_rmse, valid_rmse
def plot_mean_feature_importances(feature_importances, max_num=50, importance_type='gain', path=None):
mean_gain = feature_importances[[importance_type, 'feature']].groupby('feature').mean()
feature_importances['mean_' + importance_type] = feature_importances['feature'].map(mean_gain[importance_type])
if path is not None:
data = feature_importances.sort_values('mean_' + importance_type, ascending=False).iloc[:max_num, :]
plt.clf()
plt.figure(figsize=(16, 8))
sns.barplot(x=importance_type, y='feature', data=data)
plt.tight_layout()
plt.savefig(path)
return feature_importances
def to_bins(x, borders):
for i in range(len(borders)):
if x <= borders[i]:
return i
return len(borders)
class OptimizedRounder(object):
def __init__(self):
self.coef_ = 0
def _loss(self, coef, X, y, idx):
X_p = np.array([to_bins(pred, coef) for pred in X])
ll = -get_score(y, X_p)
return ll
def fit(self, X, y):
coef = [0.2, 0.4, 0.6, 0.8]
golden1 = 0.618
golden2 = 1 - golden1
ab_start = [(0.01, 0.3), (0.15, 0.56), (0.35, 0.75), (0.6, 0.9)]
for it1 in range(10):
for idx in range(4):
a, b = ab_start[idx]
coef[idx] = a
la = self._loss(coef, X, y, idx)
coef[idx] = b
lb = self._loss(coef, X, y, idx)
for it in range(20):
if la > lb:
a = b - (b - a) * golden1
coef[idx] = a
la = self._loss(coef, X, y, idx)
else:
b = b - (b - a) * golden2
coef[idx] = b
lb = self._loss(coef, X, y, idx)
self.coef_ = {'x': coef}
def predict(self, X, coef):
X_p = np.array([to_bins(pred, coef) for pred in X])
return X_p
def coefficients(self):
return self.coef_['x']
class StratifiedGroupKFold():
def __init__(self, n_splits=5):
self.n_splits = n_splits
def split(self, X, y=None, groups=None):
fold = pd.DataFrame([X, y, groups]).T
fold.columns = ['X', 'y', 'groups']
fold['y'] = fold['y'].astype(int)
g = fold.groupby('groups')['y'].agg('mean').reset_index()
fold = fold.merge(g, how='left', on='groups', suffixes=('', '_mean'))
fold['y_mean'] = fold['y_mean'].apply(np.round)
fold['fold_id'] = 0
for unique_y in fold['y_mean'].unique():
mask = fold.y_mean == unique_y
selected = fold[mask].reset_index(drop=True)
cv = GroupKFold(n_splits=n_splits)
for i, (train_index, valid_index) in enumerate(
cv.split(range(len(selected)), y=None, groups=selected['groups'])):
selected.loc[valid_index, 'fold_id'] = i
fold.loc[mask, 'fold_id'] = selected['fold_id'].values
for i in range(self.n_splits):
indices = np.arange(len(fold))
train_index = indices[fold['fold_id'] != i]
valid_index = indices[fold['fold_id'] == i]
yield train_index, valid_index
if __name__ == '__main__':
init_logger()
t_cols, k_cols, g_cols = [], [], []
train = pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv')
test = pd.read_csv('../input/petfinder-adoption-prediction/test/test.csv')
train = pd.concat([train, test], sort=True)
train[['Description', 'Name']] = train[['Description', 'Name']].astype(str)
train["Description_Emb"] = [analyzer_embed(text) for text in train["Description"]]
train["Description_bow"] = [analyzer_bow(text) for text in train["Description"]]
train['fix_Breed1'] = train['Breed1']
train['fix_Breed2'] = train['Breed2']
train.loc[train['Breed1'] == 0, 'fix_Breed1'] = train[train['Breed1'] == 0]['Breed2']
train.loc[train['Breed1'] == 0, 'fix_Breed2'] = train[train['Breed1'] == 0]['Breed1']
train['Breed1_equals_Breed2'] = (train['Breed1'] == train['Breed2']).astype(int)
train['single_Breed'] = (train['Breed1'] * train['Breed2'] == 0).astype(int)
train.drop(["Breed1", "Breed2"], axis=1)
train.rename(columns={"fix_Breed1": "Breed1", "fix_Breed2": "Breed2"})
logger.info(f'DataFrame shape: {train.shape}')
with timer('common features'):
with timer('merge additional state files'):
train = merge_state_info(train)
common_cols = list(train.columns)
with timer('merge additional breed rating files'):
orig_cols = list(train.columns)
train = merge_breed_name_sub(train)
t_cols += [c for c in train.columns if c not in orig_cols]
k_cols += [c for c in train.columns if c not in orig_cols]
orig_cols = list(train.columns)
train = merge_breed_name(train)
g_cols += [c for c in train.columns if c not in orig_cols and "_main_breed_all" in c] + [
"Type_second_breed"]
with timer('preprocess category features'):
train = to_category(train, cat=categorical_features)
train[text_features].fillna('missing', inplace=True)
with timer('preprocess metadata'):
meta_parser = MetaDataParser()
sentiment_features = meta_parser.sentiment_files['sentiment_filename'].apply(
lambda x: meta_parser._transform(x, sentiment=True))
meta_parser.sentiment_files = pd.concat([meta_parser.sentiment_files, sentiment_features], axis=1,
sort=False)
meta_features = meta_parser.metadata_files['metadata_filename'].apply(
lambda x: meta_parser._transform(x, sentiment=False))
meta_parser.metadata_files = pd.concat([meta_parser.metadata_files, meta_features], axis=1, sort=False)
stats = ['mean']
columns = [c for c in sentiment_features.columns if c not in ['sentiment_text', 'sentiment_entities']]
g = meta_parser.sentiment_files[list(sentiment_features.columns) + ['PetID']].groupby('PetID').agg(stats)
g.columns = [c + '_' + stat for c in columns for stat in stats]
train = train.merge(g, how='left', on='PetID')
k_cols += [c for c in g.columns if re.match("\w*_mean_\w*mean", c)] + ["magnitude_mean", "score_mean"]
t_cols += [c for c in g.columns if re.match("\w*_sum_\w*mean", c)] + ["magnitude_mean", "score_mean"]
g_cols += list(g.columns)
stats = ['mean', 'min', 'max', 'median', 'var', 'sum', 'first']
columns = [c for c in meta_features.columns if c not in ['annots_top_desc', 'annots_top_desc_pick']]
g = meta_parser.metadata_files[columns + ['PetID']].groupby('PetID').agg(stats)
g.columns = [c + '_' + stat for c in columns for stat in stats]
train = train.merge(g, how='left', on='PetID')
k_cols += [c for c in g.columns if
("mean_mean" in c or "mean_sum" in c or "first_first" in c) and "annots_score_normal" not in c] + \
['crop_conf_first', 'crop_x_first', 'crop_y_first', 'crop_importance_first', 'crop_conf_mean',
'crop_conf_sum', 'crop_importance_mean', 'crop_importance_sum']
t_cols += [c for c in g.columns if ((re.match("\w*_sum_\w*(?<!sum)$", c) and "first" not in c) \
or (
"sum" not in c and "first" not in c)) and "annots_score_pick" not in c]
g_cols += [c for c in g.columns if
"mean_mean" in c or "mean_sum" in c or "mean_var" in c and "annots_score_pick" not in c] + \
['crop_conf_mean', 'crop_conf_sum', 'crop_conf_var', 'crop_importance_mean',
'crop_importance_sum', 'crop_importance_var']
with timer('preprocess metatext'):
meta_features = meta_parser.metadata_files[['PetID', 'annots_top_desc', 'annots_top_desc_pick']]
meta_features_all = meta_features.groupby('PetID')['annots_top_desc'].apply(
lambda x: " ".join(x)).reset_index()
train = train.merge(meta_features_all, how='left', on='PetID')
meta_features_pick = meta_features.groupby('PetID')['annots_top_desc_pick'].apply(
lambda x: " ".join(x)).reset_index()
train = train.merge(meta_features_pick, how='left', on='PetID')
sentiment_features = meta_parser.sentiment_files[['PetID', 'sentiment_text', 'sentiment_entities']]
sentiment_features_txt = sentiment_features.groupby('PetID')['sentiment_text'].apply(
lambda x: " ".join(x)).reset_index()
train = train.merge(sentiment_features_txt, how='left', on='PetID')
sentiment_features_entities = sentiment_features.groupby('PetID')['sentiment_entities'].apply(
lambda x: " ".join(x)).reset_index()
train = train.merge(sentiment_features_entities, how='left', on='PetID')
train[meta_text] = train[meta_text].astype(str)
train[meta_text].fillna("missing", inplace=True)
del meta_features_all, meta_features_pick, meta_features, sentiment_features;
gc.collect()
with timer('make image features'):
train_image_files = sorted(glob.glob('../input/petfinder-adoption-prediction/train_images/*.jpg'))
test_image_files = sorted(glob.glob('../input/petfinder-adoption-prediction/test_images/*.jpg'))
image_files = train_image_files + test_image_files
train_images = pd.DataFrame(image_files, columns=['image_filename'])
train_images['PetID'] = train_images['image_filename'].apply(lambda x: x.split('/')[-1].split('-')[0])
with timer('breed mismatch features'):
train = breed_mismatch(train)
train = breed_mismatch_desc(train)
train = breed_mismatch_meta(train)
t_cols += ['breeds_mismatch', 'desc_contain_dog', 'desc_contain_cat', 'desc_miss_match',
'annot_contain_dog', 'annot_contain_cat', 'annot_miss_match']
k_cols += ['breeds_mismatch', 'desc_miss_match', 'annot_miss_match']
with timer('preprocess densenet'):
if debug:
import feather
X = feather.read_dataframe("feature/dense121_2_X.feather")
gp_img = X.groupby("PetID").mean().reset_index()
train = pd.merge(train, gp_img, how="left", on="PetID")
gp_dense_first = X.groupby("PetID").first().reset_index()
t_cols += list(gp_img.drop("PetID", axis=1).columns)
del gp_img;
gc.collect()
else:
pet_ids = train_images['PetID'].values
img_pathes = train_images['image_filename'].values
n_batches = len(pet_ids) // batch_size + 1
inp = Input((256, 256, 3))
backbone = DenseNet121(input_tensor=inp,
weights='../input/densenet121weights/densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5',
include_top=False)
x = backbone.output
x = GlobalAveragePooling2D()(x)
x = Lambda(lambda x: K.expand_dims(x, axis=-1))(x)
x = AveragePooling1D(4)(x)
out = Lambda(lambda x: x[:, :, 0])(x)
m = Model(inp, out)
features = []
for b in range(n_batches):
start = b * batch_size
end = (b + 1) * batch_size
batch_pets = pet_ids[start: end]
batch_path = img_pathes[start: end]
batch_images = np.zeros((len(batch_pets), img_size, img_size, 3))
for i, (pet_id, path) in enumerate(zip(batch_pets, batch_path)):
try:
batch_images[i] = load_image(path, preprocess_input_dense)
except:
try:
batch_images[i] = load_image(path, preprocess_input_dense)
except:
pass
batch_preds = m.predict(batch_images)
for i, pet_id in enumerate(batch_pets):
features.append([pet_id] + list(batch_preds[i]))
X = pd.DataFrame(features,
columns=["PetID"] + ["dense121_2_{}".format(i) for i in range(batch_preds.shape[1])])
gp_img = X.groupby("PetID").mean().reset_index()
train = pd.merge(train, gp_img, how="left", on="PetID")
gp_dense_first = X.groupby("PetID").first().reset_index()
t_cols += list(gp_img.drop("PetID", axis=1).columns)
del m, gp_img;
gc.collect();
K.clear_session()
if T_flag:
with timer('takuoko features'):
orig_cols = train.columns
with timer('merge emoji files'):
train = merge_emoji(train)
with timer('preprocess breed files'):
train = merge_breed_ranking(train)
with timer('preprocess and simple features'):
train = get_interactions(train)
with timer('tfidf + svd / nmf / bm25'):
vectorizer = make_pipeline(
TfidfVectorizer(),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['Description_bow'])
X = pd.DataFrame(X, columns=['tfidf_svd_{}'.format(i) for i in range(n_components)]
+ ['tfidf_nmf_{}'.format(i) for i in range(n_components)]
+ ['tfidf_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('count + svd / nmf / bm25'):
vectorizer = make_pipeline(
CountVectorizer(),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['Description_bow'])
X = pd.DataFrame(X, columns=['count_svd_{}'.format(i) for i in range(n_components)]
+ ['count_nmf_{}'.format(i) for i in range(n_components)]
+ ['count_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('tfidf2 + svd / nmf / bm25'):
vectorizer = make_pipeline(
TfidfVectorizer(min_df=2, max_features=20000,
strip_accents='unicode', analyzer='word', token_pattern=r'\w{1,}',
ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1, stop_words='english'),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['Description_bow'])
X = pd.DataFrame(X, columns=['tfidf2_svd_{}'.format(i) for i in range(n_components)]
+ ['tfidf2_nmf_{}'.format(i) for i in range(n_components)]
+ ['tfidf2_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('count2 + svd / nmf / bm25'):
vectorizer = make_pipeline(
CountVectorizer(min_df=2, max_features=20000,
strip_accents='unicode', analyzer='word', token_pattern=r'\w{1,}',
ngram_range=(1, 3), stop_words='english'),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['Description_bow'])
X = pd.DataFrame(X, columns=['count2_svd_{}'.format(i) for i in range(n_components)]
+ ['count2_nmf_{}'.format(i) for i in range(n_components)]
+ ['count2_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('tfidf3 + svd / nmf / bm25'):
vectorizer = make_pipeline(
TfidfVectorizer(min_df=30, max_features=50000, binary=True,
strip_accents='unicode', analyzer='char', token_pattern=r'\w{1,}',
ngram_range=(3, 3), use_idf=1, smooth_idf=1, sublinear_tf=1, stop_words='english'),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['Description_bow'])
X = pd.DataFrame(X, columns=['tfidf3_svd_{}'.format(i) for i in range(n_components)]
+ ['tfidf3_nmf_{}'.format(i) for i in range(n_components)]
+ ['tfidf3_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('count3 + svd / nmf / bm25'):
vectorizer = make_pipeline(
CountVectorizer(min_df=30, max_features=50000, binary=True,
strip_accents='unicode', analyzer='char', token_pattern=r'\w{1,}',
ngram_range=(3, 3), stop_words='english'),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['Description_bow'])
X = pd.DataFrame(X, columns=['count3_svd_{}'.format(i) for i in range(n_components)]
+ ['count3_nmf_{}'.format(i) for i in range(n_components)]
+ ['count3_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('meta text bow/tfidf->svd / nmf / bm25'):
train['desc'] = ''
for c in ['BreedName_main_breed', 'BreedName_second_breed', 'annots_top_desc', 'sentiment_text']:
train['desc'] += ' ' + train[c].astype(str)
train["desc_bow"] = [analyzer_bow(text) for text in train["desc"]]
vectorizer = make_pipeline(
TfidfVectorizer(),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['desc_bow'])
X = pd.DataFrame(X, columns=['meta_desc_tfidf_svd_{}'.format(i) for i in range(n_components)]
+ ['meta_desc_tfidf_nmf_{}'.format(i) for i in range(n_components)]
+ ['meta_desc_tfidf_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
vectorizer = make_pipeline(
CountVectorizer(),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['desc_bow'])
X = pd.DataFrame(X, columns=['meta_desc_count_svd_{}'.format(i) for i in range(n_components)]
+ ['meta_desc_count_nmf_{}'.format(i) for i in range(n_components)]
+ ['meta_desc_count_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
train.drop(['desc_bow', 'desc'], axis=1, inplace=True)
with timer('description fasttext'):
embedding = '../input/quora-embedding/GoogleNews-vectors-negative300.bin'
model = KeyedVectors.load_word2vec_format(embedding, binary=True)
X = pretrained_w2v(train["Description_Emb"], model, name="gnvec")
train = pd.concat([train, X], axis=1)
del model;
gc.collect()
with timer('description glove'):
embedding = "../input/pymagnitude-data/glove.840B.300d.magnitude"
model = Magnitude(embedding)
X = w2v_pymagnitude(train["Description_Emb"], model, name="glove_mag")
train = pd.concat([train, X], axis=1)
del model;
gc.collect()
with timer('image features'):
train['num_images'] = train['PetID'].apply(lambda x: sum(train_images.PetID == x))
train['num_images_per_pet'] = train['num_images'] / train['Quantity']
with timer('make inception resnet features'):
if debug:
import feather
X = feather.read_dataframe("feature/inception_resnet.feather")
train = pd.concat((train, X), axis=1)
else:
pet_ids = train_images['PetID'].values
img_pathes = train_images['image_filename'].values
n_batches = len(pet_ids) // batch_size + 1
inp = Input((256, 256, 3))
backbone = InceptionResNetV2(input_tensor=inp,
weights='../input/inceptionresnetv2/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5',
include_top=False)
x = backbone.output
x = GlobalAveragePooling2D()(x)
x = Lambda(lambda x: K.expand_dims(x, axis=-1))(x)
x = AveragePooling1D(4)(x)
out = Lambda(lambda x: x[:, :, 0])(x)
m = Model(inp, out)
features = []
for b in range(n_batches):
start = b * batch_size
end = (b + 1) * batch_size
batch_pets = pet_ids[start: end]
batch_path = img_pathes[start: end]
batch_images = np.zeros((len(batch_pets), img_size, img_size, 3))
for i, (pet_id, path) in enumerate(zip(batch_pets, batch_path)):
try:
batch_images[i] = load_image(path, preprocess_input_incep)
except:
try:
batch_images[i] = load_image(path, preprocess_input_incep)
except:
pass
batch_preds = m.predict(batch_images)
for i, pet_id in enumerate(batch_pets):
features.append([pet_id] + list(batch_preds[i]))
X = pd.DataFrame(features, columns=["PetID"] + ["inception_resnet_{}".format(i) for i in
range(batch_preds.shape[1])])
gp_img = X.groupby("PetID").mean().reset_index()
train = pd.merge(train, gp_img, how="left", on="PetID")
del m, gp_img;
gc.collect();
K.clear_session()
with timer('aggregation'):
stats = ['mean', 'sum', 'median', 'min', 'max', 'var']
groupby_dict = [
{
'key': ['Name'],
'var': ['Age'],
'agg': ['count']
},
{
'key': ['RescuerID'],
'var': ['Age'],
'agg': ['count']
},
{
'key': ['RescuerID', 'State'],
'var': ['Age'],
'agg': ['count']
},
{
'key': ['RescuerID', 'Type'],
'var': ['Age'],
'agg': ['count']
},
{
'key': ['RescuerID'],
'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],
'agg': stats
},
{
'key': ['RescuerID', 'State'],
'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],
'agg': stats
},
{
'key': ['RescuerID', 'Type'],
'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],
'agg': stats
},
{
'key': ['Type', 'Breed1', 'Breed2'],
'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],
'agg': stats
},
{
'key': ['Type', 'Breed1'],
'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],
'agg': stats
},
{
'key': ['State'],
'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],
'agg': stats
},
{
'key': ['MaturitySize'],
'var': ['Age', 'Quantity', 'Sterilized', 'Fee'],
'agg': stats
},
]
nunique_dict = [
{
'key': ['State'],
'var': ['RescuerID'],
'agg': ['nunique']
},
{
'key': ['Dewormed'],
'var': ['RescuerID'],
'agg': ['nunique']
},
{
'key': ['Type'],
'var': ['RescuerID'],
'agg': ['nunique']
},
{
'key': ['Type', 'Breed1'],
'var': ['RescuerID'],
'agg': ['nunique']
},
]
groupby = GroupbyTransformer(param_dict=nunique_dict)
train = groupby.transform(train)
groupby = GroupbyTransformer(param_dict=groupby_dict)
train = groupby.transform(train)
diff = DiffGroupbyTransformer(param_dict=groupby_dict)
train = diff.transform(train)
ratio = RatioGroupbyTransformer(param_dict=groupby_dict)
train = ratio.transform(train)
with timer('category embedding'):
train[['BreedName_main_breed', 'BreedName_second_breed']] = \
train[['BreedName_main_breed', 'BreedName_second_breed']].astype("int32")
for c in categorical_features:
train[c] = train[c].fillna(train[c].max() + 1)
cv = CategoryVectorizer(categorical_features, n_components,
vectorizer=CountVectorizer(),
transformer=LatentDirichletAllocation(n_components=n_components, n_jobs=-1,
learning_method='online',
random_state=777),
name='CountLDA')
features1 = cv.transform(train).astype(np.float32)
cv = CategoryVectorizer(categorical_features, n_components,
vectorizer=CountVectorizer(),
transformer=TruncatedSVD(n_components=n_components, random_state=777),
name='CountSVD')
features2 = cv.transform(train).astype(np.float32)
train = pd.concat([train, features1, features2], axis=1)
t_cols += [c for c in train.columns if c not in orig_cols]
if K_flag or G_flag:
with timer('kaeru and gege features'):
with timer('text stats features'):
train = get_text_features(train)
k_cols += ['Length_Description', 'Length_annots_top_desc', 'Lengths_sentiment_text']
g_cols += ['Length_Description', 'Length_annots_top_desc', 'Lengths_sentiment_entities']
if K_flag:
with timer('kaeru features'):
orig_cols = train.columns
with timer('enginerring age'):
train = get_age_feats(train)
with timer('frequency encoding'):
freq_cols = ['BreedName_main_breed', 'BreedName_second_breed']
train = freq_encoding(train, freq_cols)
with timer('kanji feature'):
train['in_kanji'] = train.Description.apply(lambda x: is_zh(x))
with timer('tfidf + svd / nmf'):
vectorizer = TruncatedSVD(n_components=n_components, random_state=kaeru_seed)
X = vectorizer.fit_transform(train['Description'])
X = pd.DataFrame(X, columns=['tfidf_k_svd_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('description doc2vec'):
d2v_param = {
"features_num": 300,
"min_word_count": 10,
"context": 5,
"downsampling": 1e-3,
"epoch_num": 10
}
X = doc2vec(train["Description"], d2v_param)
train = pd.concat([train, X], axis=1)
with timer('annots_top_desc + svd / nmf'):
vectorizer = TruncatedSVD(n_components=n_components, random_state=kaeru_seed)
X = vectorizer.fit_transform(train['annots_top_desc_pick'])
X = pd.DataFrame(X, columns=['annots_top_desc_k_svd_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('densenet features'):
vectorizer = TruncatedSVD(n_components=n_components, random_state=kaeru_seed)
X = vectorizer.fit_transform(gp_dense_first.drop(['PetID'], axis=1))
X = pd.DataFrame(X, columns=['densenet121_svd_{}'.format(i) for i in range(n_components)])
X["PetID"] = gp_dense_first["PetID"]
train = pd.merge(train, X, how="left", on="PetID")
del vectorizer;
gc.collect()
with timer('aggregation'):
stats = ['mean', 'sum', 'min', 'max']
var = ['Age_k', 'MaturitySize_k', 'FurLength_k', 'Fee_k', 'Health_k']
for c in ['Age', 'MaturitySize', 'FurLength', 'Fee', 'Health']:
train[c + "_k"] = train[c]
groupby_dict = [
{
'key': ['RescuerID'],
'var': ['Age_k'],
'agg': ['count']
},
{
'key': ['RescuerID'],
'var': ['Age_k', 'Length_Description', 'Length_annots_top_desc', 'Lengths_sentiment_text'],
'agg': stats + ["var"]
},
{
'key': ['RescuerID'],
'var': ['MaturitySize_k', 'FurLength_k', 'Fee_k', 'Health_k'],
'agg': stats
}
]
groupby = GroupbyTransformer(param_dict=groupby_dict)
train = groupby.transform(train)
train.drop(var, axis=1, inplace=True)
k_cols += [c for c in train.columns if c not in orig_cols if c not in kaeru_drop_cols]
if G_flag:
with timer('gege features'):
orig_cols = train.columns
with timer('densenet features'):
vectorizer = TruncatedSVD(n_components=n_components_gege_img, random_state=kaeru_seed)
X = vectorizer.fit_transform(gp_dense_first.drop(['PetID'], axis=1))
X = pd.DataFrame(X, columns=['densenet121_g_svd_{}'.format(i) for i in range(n_components_gege_img)])
X["PetID"] = gp_dense_first["PetID"]
train = pd.merge(train, X, how="left", on="PetID")
del vectorizer, gp_dense_first;
gc.collect()
with timer('frequency encoding'):
freq_cols = ['RescuerID', 'Breed1', 'Breed2', 'Color1', 'Color2', 'Color3', 'State']
train = freq_encoding(train, freq_cols)
with timer('tfidf + svd'):
vectorizer = make_pipeline(
TfidfVectorizer(min_df=2, max_features=None,
strip_accents='unicode', analyzer='word', token_pattern=r'(?u)\b\w+\b',
ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1),
TruncatedSVD(n_components=n_components_gege_txt, random_state=kaeru_seed)
)
X = vectorizer.fit_transform(train['Description'])
X = pd.DataFrame(X, columns=['tfidf_g_svd_{}'.format(i) for i in range(n_components_gege_txt)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('annots tfidf + svd'):
vectorizer = make_pipeline(
TfidfVectorizer(min_df=2, max_features=None,
strip_accents='unicode', analyzer='word', token_pattern=r'(?u)\b\w+\b',
ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1),
TruncatedSVD(n_components=n_components_gege_txt, random_state=kaeru_seed)
)
X = vectorizer.fit_transform(train['annots_top_desc'])
X = pd.DataFrame(X, columns=['annots_top_desc_tfidf_g_svd_{}'.format(i) for i in
range(n_components_gege_txt)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('sentiment entities tfidf + svd'):
vectorizer = make_pipeline(
TfidfVectorizer(min_df=2, max_features=None,
strip_accents='unicode', analyzer='word', token_pattern=r'(?u)\b\w+\b',
ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1),
TruncatedSVD(n_components=n_components_gege_txt, random_state=kaeru_seed)
)
X = vectorizer.fit_transform(train['sentiment_entities'])
X = pd.DataFrame(X, columns=['sentiment_entities_tfidf_g_svd_{}'.format(i) for i in
range(n_components_gege_txt)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('image basic features'):
train_images['image_size'] = train_images['image_filename'].apply(getSize)
train_images['temp_size'] = train_images['image_filename'].apply(getDimensions)
train_images['width'] = train_images['temp_size'].apply(lambda x: x[0])
train_images['height'] = train_images['temp_size'].apply(lambda x: x[1])
train_images = train_images.drop(['temp_size'], axis=1)
aggs = {
'image_size': ['sum', 'mean', 'var'],
'width': ['sum', 'mean', 'var'],
'height': ['sum', 'mean', 'var'],
}
gp = train_images.groupby('PetID').agg(aggs)
new_columns = [k + '_' + agg for k in aggs.keys() for agg in aggs[k]]
gp.columns = new_columns
train = train.merge(gp.reset_index(), how="left", on="PetID")
g_cols += [c for c in train.columns if c not in orig_cols]
dtype_cols = ['BreedName_main_breed', 'BreedName_second_breed', 'BreedName_main_breed_all']
train[dtype_cols] = train[dtype_cols].astype("int32")
logger.info(train.head())
train.to_feather("all_data.feather")
np.save("common_cols.npy", np.array(common_cols))
np.save("t_cols.npy", np.array(t_cols))
np.save("k_cols.npy", np.array(k_cols))
np.save("g_cols.npy", np.array(g_cols))
if T_flag:
with timer('takuoko feature info'):
categorical_features_t = list(set(categorical_features) - set(remove))
predictors = list(set(common_cols + t_cols + categorical_features_t) - set([target] + remove))
predictors = [c for c in predictors if c in use_cols]
categorical_features_t = [c for c in categorical_features_t if c in predictors]
logger.info(f'predictors / use_cols = {len(predictors)} / {len(use_cols)}')
train = train.loc[:, ~train.columns.duplicated()]
X = train.loc[:, predictors]
y = train.loc[:, target]
rescuer_id = train.loc[:, 'RescuerID'].iloc[:len_train]
X_test = X[len_train:]
X = X[:len_train]
y = y[:len_train]
X.to_feather("X_train_t.feather")
X_test.reset_index(drop=True).to_feather("X_test_t.feather")
with timer('takuoko modeling'):
y_pred_t = np.empty(len_train, )
y_test_t = []
train_losses, valid_losses = [], []
cv = StratifiedGroupKFold(n_splits=n_splits)
for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y=y, groups=rescuer_id)):
X_train = X.loc[train_index, :]
X_valid = X.loc[valid_index, :]
y_train = y[train_index]
y_valid = y[valid_index]
pred_val, pred_test, train_rmse, valid_rmse = run_model(X_train, y_train, X_valid, y_valid, X_test,
categorical_features_t, predictors,
maxvalue_dict, fold_id, MODEL_PARAMS,
MODEL_NAME + "_t")
y_pred_t[valid_index] = pred_val
y_test_t.append(pred_test)
train_losses.append(train_rmse)
valid_losses.append(valid_rmse)
y_test_t = np.mean(y_test_t, axis=0)
logger.info(f'train RMSE = {np.mean(train_losses)}')
logger.info(f'valid RMSE = {np.mean(valid_losses)}')
np.save("y_test_t.npy", y_test_t)
np.save("y_oof_t.npy", y_pred_t)
if K_flag:
with timer('kaeru feature info'):
kaeru_cat_cols = None
predictors = list(set(common_cols + k_cols) - set([target] + remove + kaeru_drop_cols))
X = train.loc[:, predictors]
y = train.loc[:, target]
rescuer_id = train.loc[:, 'RescuerID'].iloc[:len_train]
X_test = X[len_train:]
X = X[:len_train]
y = y[:len_train]
X.to_feather("X_train_k.feather")
X_test.reset_index(drop=True).to_feather("X_test_k.feather")
with timer('kaeru modeling'):
y_pred_k = np.empty(len_train, )
y_test_k = []
train_losses, valid_losses = [], []
cv = StratifiedGroupKFold(n_splits=n_splits)
for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y=y, groups=rescuer_id)):
X_train = X.loc[train_index, :]
X_valid = X.loc[valid_index, :]
y_train = y[train_index]
y_valid = y[valid_index]
pred_val, pred_test, train_rmse, valid_rmse = run_model(X_train, y_train, X_valid, y_valid, X_test,
kaeru_cat_cols, predictors, maxvalue_dict,
fold_id, KAERU_PARAMS, MODEL_NAME + "_k")
y_pred_k[valid_index] = pred_val
y_test_k.append(pred_test)
train_losses.append(train_rmse)
valid_losses.append(valid_rmse)
y_test_k = np.mean(y_test_k, axis=0)
logger.info(f'train RMSE = {np.mean(train_losses)}')
logger.info(f'valid RMSE = {np.mean(valid_losses)}')
np.save("y_test_k.npy", y_test_k)
np.save("y_oof_k.npy", y_pred_k)
if G_flag:
with timer('gege feature info'):
predictors = list(set(common_cols + g_cols) - set([target] + remove + gege_drop_cols))
categorical_features_g = [c for c in categorical_features if c in predictors]
X = train.loc[:, predictors]
y = train.loc[:, target]
rescuer_id = train.loc[:, 'RescuerID'].iloc[:len_train]
X_test = X[len_train:]
X = X[:len_train]
y = y[:len_train]
X.to_feather("X_train_g.feather")
X_test.reset_index(drop=True).to_feather("X_test_g.feather")
with timer('gege adversarial validation'):
train_idx = range(0, len_train)
X_adv = train.loc[:, predictors]
y_adv = np.array([0 for i in range(len(X))] + [1 for i in range(len(X_test))])
X_adv_tr, X_adv_tst, y_adv_tr, y_adv_tst = train_test_split(X_adv, y_adv, test_size=0.20, shuffle=True,
random_state=42)
lgtrain = lgb.Dataset(X_adv_tr, y_adv_tr,
categorical_feature=categorical_features_g,
feature_name=predictors)
lgvalid = lgb.Dataset(X_adv_tst, y_adv_tst,
categorical_feature=categorical_features_g,
feature_name=predictors)
lgb_adv = lgb.train(
ADV_PARAMS,
lgtrain,
num_boost_round=20000,
valid_sets=[lgtrain, lgvalid],
valid_names=['train', 'valid'],
early_stopping_rounds=500,
verbose_eval=20000
)
train_preds = lgb_adv.predict(X_adv.iloc[train_idx])
extract_idx = np.argsort(-train_preds)[:int(len(train_idx) * 0.85)]
del X_adv_tr, X_adv_tst, y_adv_tr, y_adv_tst, X_adv, y_adv, lgb_adv;
gc.collect()
with timer('gege modeling'):
X = X.iloc[extract_idx].reset_index(drop=True)
y = y[extract_idx].reset_index(drop=True)
rescuer_id = rescuer_id[extract_idx].reset_index(drop=True)
y_pred_g = np.empty(len(extract_idx), )
y_test_g = []
train_losses, valid_losses = [], []
cv = StratifiedGroupKFold(n_splits=n_splits)
for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y=y, groups=rescuer_id)):
X_train = X.loc[train_index, :]
X_valid = X.loc[valid_index, :]
y_train = y[train_index]
y_valid = y[valid_index]
pred_val, pred_test, train_rmse, valid_rmse = run_xgb_model(X_train, y_train,
X_valid, y_valid, X_test, predictors,
maxvalue_dict,
fold_id, MODEL_PARAMS_XGB,
MODEL_NAME + "_g")
y_pred_g[valid_index] = pred_val
y_test_g.append(pred_test)
train_losses.append(train_rmse)
valid_losses.append(valid_rmse)
y_test_g = np.mean(y_test_g, axis=0)
logger.info(f'train RMSE = {np.mean(train_losses)}')
logger.info(f'valid RMSE = {np.mean(valid_losses)}')
np.save("y_test_g.npy", y_test_g)
np.save("y_oof_g.npy", y_pred_g)
np.save("extract_idx.npy", extract_idx)
if T_flag and K_flag and G_flag:
y_pred = (y_pred_t[extract_idx] + y_pred_k[extract_idx] + y_pred_g) / 3
y_test = (y_test_t + y_test_k + y_test_g) / 3
elif T_flag and K_flag:
y_pred = y_pred_t * 0.5 + y_pred_k * 0.5
y_test = y_test_t * 0.5 + y_test_k * 0.5
elif T_flag and G_flag:
y_pred = y_pred_t[extract_idx] * 0.5 + y_pred_g * 0.5
y_test = y_test_t * 0.5 + y_test_g * 0.5
elif G_flag and K_flag:
y_pred = y_pred_g * 0.5 + y_pred_k[extract_idx] * 0.5
y_test = y_test_g * 0.5 + y_test_k * 0.5
elif T_flag:
y_pred = y_pred_t
y_test = y_test_t
elif K_flag:
y_pred = y_pred_k
y_test = y_test_k
elif G_flag:
y_pred = y_pred_g
y_test = y_test_g
with timer('optimize threshold'):
optR = OptimizedRounder()
optR.fit(y_pred, y)
coefficients = optR.coefficients()
y_pred = optR.predict(y_pred, coefficients)
score = get_score(y, y_pred)
logger.info(f'Coefficients = {coefficients}')
logger.info(f'QWK = {score}')
y_test = optR.predict(y_test, coefficients).astype(int)
with timer('postprocess'):
submission_with_postprocess(y_test)
| true
| true
|
790e9709898353c8af26ad1da67d9d4d96aca8ee
| 8,625
|
py
|
Python
|
benchmarks/f3_wrong_hints_permutations/scaling_ltl_infinite_state/12-extending_bound_39.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 3
|
2021-04-23T23:29:26.000Z
|
2022-03-23T10:00:30.000Z
|
benchmarks/f3_wrong_hints_permutations/scaling_ltl_infinite_state/12-extending_bound_39.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | null | null | null |
benchmarks/f3_wrong_hints_permutations/scaling_ltl_infinite_state/12-extending_bound_39.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 1
|
2021-11-17T22:02:56.000Z
|
2021-11-17T22:02:56.000Z
|
from typing import Tuple, FrozenSet
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> Tuple[Iterable, msat_term,
msat_term, msat_term]:
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
bool_type = msat_get_bool_type(menv)
real_type = msat_get_rational_type(menv)
i = msat_declare_function(menv, "i", real_type)
i = msat_make_constant(menv, i)
r = msat_declare_function(menv, "r", real_type)
r = msat_make_constant(menv, r)
l = msat_declare_function(menv, "l", real_type)
l = msat_make_constant(menv, l)
inc_i = msat_declare_function(menv, "inc_i", bool_type)
inc_i = msat_make_constant(menv, inc_i)
x_i = msat_declare_function(menv, name_next("i"), real_type)
x_i = msat_make_constant(menv, x_i)
x_r = msat_declare_function(menv, name_next("r"), real_type)
x_r = msat_make_constant(menv, x_r)
x_l = msat_declare_function(menv, name_next("l"), real_type)
x_l = msat_make_constant(menv, x_l)
x_inc_i = msat_declare_function(menv, name_next("inc_i"), bool_type)
x_inc_i = msat_make_constant(menv, x_inc_i)
curr2next = {i: x_i, r: x_r, l: x_l, inc_i: x_inc_i}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
r_gt_0 = msat_make_gt(menv, r, zero)
r_lt_l = msat_make_lt(menv, r, l)
i_geq_0 = msat_make_geq(menv, i, zero)
init = msat_make_and(menv, r_gt_0, r_lt_l)
init = msat_make_and(menv, init,
msat_make_and(menv, i_geq_0,
msat_make_not(menv, inc_i)))
init = msat_make_and(menv, init, msat_make_gt(menv, l, zero))
# r' = r
trans = msat_make_equal(menv, x_r, r)
# i < l -> ((inc_i' & i' = i + 1) | (!inc_i' & i' = i)) & l' = l
i_lt_l = msat_make_lt(menv, i, l)
x_i_eq_i_p_1 = msat_make_and(menv, x_inc_i,
msat_make_equal(menv, x_i,
msat_make_plus(menv, i, one)))
x_i_eq_i = msat_make_and(menv, msat_make_not(menv, x_inc_i),
msat_make_equal(menv, x_i, i))
x_i_eq_i_p_1_or_i = msat_make_or(menv, x_i_eq_i_p_1, x_i_eq_i)
x_l_eq_l = msat_make_equal(menv, x_l, l)
x_i_eq_i_p_1_or_i_and_x_l_eq_l = msat_make_and(menv, x_i_eq_i_p_1_or_i,
x_l_eq_l)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_lt_l,
x_i_eq_i_p_1_or_i_and_x_l_eq_l))
# i >= l -> i' = 0 & l' = l + 1 & !inc_i'
i_geq_l = msat_make_geq(menv, i, l)
x_i_eq_0 = msat_make_equal(menv, x_i, zero)
x_l_eq_l_p_1 = msat_make_equal(menv, x_l, msat_make_plus(menv, l, one))
x_i_eq_0_and_x_l_eq_l_p_1 = msat_make_and(menv,
msat_make_and(menv, x_i_eq_0,
x_l_eq_l_p_1),
msat_make_not(menv, x_inc_i))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_geq_l,
x_i_eq_0_and_x_l_eq_l_p_1))
# (G F inc_i) -> ! G F r > i
G_F_x_i_gt_i = enc.make_G(enc.make_F(inc_i))
r_gt_i = msat_make_gt(menv, r, i)
n_G_F_r_gt_i = msat_make_not(menv, enc.make_G(enc.make_F(r_gt_i)))
ltl = msat_make_impl(menv, G_F_x_i_gt_i, n_G_F_r_gt_i)
return TermMap(curr2next), init, trans, ltl
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
i = mgr.Symbol("i", types.REAL)
r = mgr.Symbol("r", types.REAL)
l = mgr.Symbol("l", types.REAL)
inc_i = mgr.Symbol("inc_i", types.BOOL)
symbs = frozenset([i, r, l, inc_i])
x_i = symb_to_next(mgr, i)
x_r = symb_to_next(mgr, r)
x_l = symb_to_next(mgr, l)
x_inc_i = symb_to_next(mgr, inc_i)
res = []
n0 = mgr.Real(0)
n1 = mgr.Real(1)
loc = Location(env, mgr.LE(r, n0))
loc.set_progress(0, mgr.Equals(x_r, mgr.Minus(r, n1)))
h_r = Hint("h_r1", env, frozenset([r]), symbs)
h_r.set_locs([loc])
res.append(h_r)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc3", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1])
res.append(h_inc)
loc0 = Location(env, mgr.GE(i, n0))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(2, mgr.Equals(x_i, i))
loc2 = Location(env, mgr.GE(i, n0))
loc2.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i4", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1, loc2])
res.append(h_i)
loc = Location(env, mgr.GE(l, n0))
loc.set_progress(0, mgr.Equals(x_l, mgr.Plus(l, n1)))
h_l = Hint("h_l0", env, frozenset([l]), symbs)
h_l.set_locs([loc])
res.append(h_l)
loc = Location(env, mgr.Not(inc_i))
loc.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc1", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc])
res.append(h_inc)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(2, mgr.Equals(x_r, mgr.Plus(r, n1)))
loc2 = Location(env, mgr.GE(r, n0))
loc2.set_progress(0, mgr.Equals(x_r, r))
h_r = Hint("h_r4", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1, loc2])
res.append(h_r)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l2", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(2, mgr.Not(x_inc_i))
loc2 = Location(env, mgr.Not(inc_i))
loc2.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc4", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1, loc2])
res.append(h_inc)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r2", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1])
res.append(h_r)
loc = Location(env, mgr.GE(r, n0))
loc.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r0", env, frozenset([r]), symbs)
h_r.set_locs([loc])
res.append(h_r)
loc0 = Location(env, mgr.GE(i, n0), mgr.GE(l, n0),
stutterT=mgr.Equals(x_i, mgr.Plus(i, l)))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i3", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
stutter = mgr.Equals(x_i, i)
loc = Location(env, mgr.GE(i, n0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_i, mgr.Plus(i, n1)))
h_i = Hint("h_i0", env, frozenset([i]), symbs)
h_i.set_locs([loc])
res.append(h_i)
return frozenset(res)
| 35.9375
| 89
| 0.628406
|
from typing import Tuple, FrozenSet
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> Tuple[Iterable, msat_term,
msat_term, msat_term]:
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
bool_type = msat_get_bool_type(menv)
real_type = msat_get_rational_type(menv)
i = msat_declare_function(menv, "i", real_type)
i = msat_make_constant(menv, i)
r = msat_declare_function(menv, "r", real_type)
r = msat_make_constant(menv, r)
l = msat_declare_function(menv, "l", real_type)
l = msat_make_constant(menv, l)
inc_i = msat_declare_function(menv, "inc_i", bool_type)
inc_i = msat_make_constant(menv, inc_i)
x_i = msat_declare_function(menv, name_next("i"), real_type)
x_i = msat_make_constant(menv, x_i)
x_r = msat_declare_function(menv, name_next("r"), real_type)
x_r = msat_make_constant(menv, x_r)
x_l = msat_declare_function(menv, name_next("l"), real_type)
x_l = msat_make_constant(menv, x_l)
x_inc_i = msat_declare_function(menv, name_next("inc_i"), bool_type)
x_inc_i = msat_make_constant(menv, x_inc_i)
curr2next = {i: x_i, r: x_r, l: x_l, inc_i: x_inc_i}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
r_gt_0 = msat_make_gt(menv, r, zero)
r_lt_l = msat_make_lt(menv, r, l)
i_geq_0 = msat_make_geq(menv, i, zero)
init = msat_make_and(menv, r_gt_0, r_lt_l)
init = msat_make_and(menv, init,
msat_make_and(menv, i_geq_0,
msat_make_not(menv, inc_i)))
init = msat_make_and(menv, init, msat_make_gt(menv, l, zero))
trans = msat_make_equal(menv, x_r, r)
# i < l -> ((inc_i' & i' = i + 1) | (!inc_i' & i' = i)) & l' = l
i_lt_l = msat_make_lt(menv, i, l)
x_i_eq_i_p_1 = msat_make_and(menv, x_inc_i,
msat_make_equal(menv, x_i,
msat_make_plus(menv, i, one)))
x_i_eq_i = msat_make_and(menv, msat_make_not(menv, x_inc_i),
msat_make_equal(menv, x_i, i))
x_i_eq_i_p_1_or_i = msat_make_or(menv, x_i_eq_i_p_1, x_i_eq_i)
x_l_eq_l = msat_make_equal(menv, x_l, l)
x_i_eq_i_p_1_or_i_and_x_l_eq_l = msat_make_and(menv, x_i_eq_i_p_1_or_i,
x_l_eq_l)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_lt_l,
x_i_eq_i_p_1_or_i_and_x_l_eq_l))
i_geq_l = msat_make_geq(menv, i, l)
x_i_eq_0 = msat_make_equal(menv, x_i, zero)
x_l_eq_l_p_1 = msat_make_equal(menv, x_l, msat_make_plus(menv, l, one))
x_i_eq_0_and_x_l_eq_l_p_1 = msat_make_and(menv,
msat_make_and(menv, x_i_eq_0,
x_l_eq_l_p_1),
msat_make_not(menv, x_inc_i))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_geq_l,
x_i_eq_0_and_x_l_eq_l_p_1))
# (G F inc_i) -> ! G F r > i
G_F_x_i_gt_i = enc.make_G(enc.make_F(inc_i))
r_gt_i = msat_make_gt(menv, r, i)
n_G_F_r_gt_i = msat_make_not(menv, enc.make_G(enc.make_F(r_gt_i)))
ltl = msat_make_impl(menv, G_F_x_i_gt_i, n_G_F_r_gt_i)
return TermMap(curr2next), init, trans, ltl
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
i = mgr.Symbol("i", types.REAL)
r = mgr.Symbol("r", types.REAL)
l = mgr.Symbol("l", types.REAL)
inc_i = mgr.Symbol("inc_i", types.BOOL)
symbs = frozenset([i, r, l, inc_i])
x_i = symb_to_next(mgr, i)
x_r = symb_to_next(mgr, r)
x_l = symb_to_next(mgr, l)
x_inc_i = symb_to_next(mgr, inc_i)
res = []
n0 = mgr.Real(0)
n1 = mgr.Real(1)
loc = Location(env, mgr.LE(r, n0))
loc.set_progress(0, mgr.Equals(x_r, mgr.Minus(r, n1)))
h_r = Hint("h_r1", env, frozenset([r]), symbs)
h_r.set_locs([loc])
res.append(h_r)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc3", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1])
res.append(h_inc)
loc0 = Location(env, mgr.GE(i, n0))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(2, mgr.Equals(x_i, i))
loc2 = Location(env, mgr.GE(i, n0))
loc2.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i4", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1, loc2])
res.append(h_i)
loc = Location(env, mgr.GE(l, n0))
loc.set_progress(0, mgr.Equals(x_l, mgr.Plus(l, n1)))
h_l = Hint("h_l0", env, frozenset([l]), symbs)
h_l.set_locs([loc])
res.append(h_l)
loc = Location(env, mgr.Not(inc_i))
loc.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc1", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc])
res.append(h_inc)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(2, mgr.Equals(x_r, mgr.Plus(r, n1)))
loc2 = Location(env, mgr.GE(r, n0))
loc2.set_progress(0, mgr.Equals(x_r, r))
h_r = Hint("h_r4", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1, loc2])
res.append(h_r)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l2", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(2, mgr.Not(x_inc_i))
loc2 = Location(env, mgr.Not(inc_i))
loc2.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc4", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1, loc2])
res.append(h_inc)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r2", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1])
res.append(h_r)
loc = Location(env, mgr.GE(r, n0))
loc.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r0", env, frozenset([r]), symbs)
h_r.set_locs([loc])
res.append(h_r)
loc0 = Location(env, mgr.GE(i, n0), mgr.GE(l, n0),
stutterT=mgr.Equals(x_i, mgr.Plus(i, l)))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i3", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
stutter = mgr.Equals(x_i, i)
loc = Location(env, mgr.GE(i, n0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_i, mgr.Plus(i, n1)))
h_i = Hint("h_i0", env, frozenset([i]), symbs)
h_i.set_locs([loc])
res.append(h_i)
return frozenset(res)
| true
| true
|
790e99ef4637384e27573a7942ddb1fd48d782cb
| 3,818
|
py
|
Python
|
segmentation_models_pytorch/decoders/unet/decoder.py
|
navivokaj/segmentation_models.pytorch
|
5dbb5f6733515097cecc93f078c09e59ccbeb0c0
|
[
"MIT"
] | null | null | null |
segmentation_models_pytorch/decoders/unet/decoder.py
|
navivokaj/segmentation_models.pytorch
|
5dbb5f6733515097cecc93f078c09e59ccbeb0c0
|
[
"MIT"
] | null | null | null |
segmentation_models_pytorch/decoders/unet/decoder.py
|
navivokaj/segmentation_models.pytorch
|
5dbb5f6733515097cecc93f078c09e59ccbeb0c0
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from segmentation_models_pytorch.base import modules as md
class DecoderBlock(nn.Module):
def __init__(
self,
in_channels,
skip_channels,
out_channels,
use_batchnorm=True,
attention_type=None,
):
super().__init__()
self.conv1 = md.Conv2dReLU(
in_channels + skip_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.attention1 = md.Attention(attention_type, in_channels=in_channels + skip_channels)
self.conv2 = md.Conv2dReLU(
out_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.attention2 = md.Attention(attention_type, in_channels=out_channels)
def forward(self, x, skip=None):
x = F.interpolate(x, scale_factor=2, mode="nearest")
if skip is not None:
if skip.shape[-1] != x.shape[-1]:
skip = F.interpolate(skip, scale_factor=2, mode="nearest")
x = torch.cat([x, skip], dim=1)
x = self.attention1(x)
x = self.conv1(x)
x = self.conv2(x)
x = self.attention2(x)
return x
class CenterBlock(nn.Sequential):
def __init__(self, in_channels, out_channels, use_batchnorm=True):
conv1 = md.Conv2dReLU(
in_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
conv2 = md.Conv2dReLU(
out_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
super().__init__(conv1, conv2)
class UnetDecoder(nn.Module):
def __init__(
self,
encoder_channels,
decoder_channels,
n_blocks=5,
use_batchnorm=True,
attention_type=None,
center=False,
):
super().__init__()
if n_blocks != len(decoder_channels):
raise ValueError(
"Model depth is {}, but you provide `decoder_channels` for {} blocks.".format(
n_blocks, len(decoder_channels)
)
)
# remove first skip with same spatial resolution
encoder_channels = encoder_channels[1:]
# reverse channels to start from head of encoder
encoder_channels = encoder_channels[::-1]
# computing blocks input and output channels
head_channels = encoder_channels[0]
in_channels = [head_channels] + list(decoder_channels[:-1])
skip_channels = list(encoder_channels[1:]) + [0]
out_channels = decoder_channels
if center:
self.center = CenterBlock(head_channels, head_channels, use_batchnorm=use_batchnorm)
else:
self.center = nn.Identity()
# combine decoder keyword arguments
kwargs = dict(use_batchnorm=use_batchnorm, attention_type=attention_type)
blocks = [
DecoderBlock(in_ch, skip_ch, out_ch, **kwargs)
for in_ch, skip_ch, out_ch in zip(in_channels, skip_channels, out_channels)
]
self.blocks = nn.ModuleList(blocks)
def forward(self, *features):
features = features[1:] # remove first skip with same spatial resolution
features = features[::-1] # reverse channels to start from head of encoder
head = features[0]
skips = features[1:]
x = self.center(head)
for i, decoder_block in enumerate(self.blocks):
skip = skips[i] if i < len(skips) else None
x = decoder_block(x, skip)
return x
| 30.790323
| 96
| 0.587742
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from segmentation_models_pytorch.base import modules as md
class DecoderBlock(nn.Module):
def __init__(
self,
in_channels,
skip_channels,
out_channels,
use_batchnorm=True,
attention_type=None,
):
super().__init__()
self.conv1 = md.Conv2dReLU(
in_channels + skip_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.attention1 = md.Attention(attention_type, in_channels=in_channels + skip_channels)
self.conv2 = md.Conv2dReLU(
out_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.attention2 = md.Attention(attention_type, in_channels=out_channels)
def forward(self, x, skip=None):
x = F.interpolate(x, scale_factor=2, mode="nearest")
if skip is not None:
if skip.shape[-1] != x.shape[-1]:
skip = F.interpolate(skip, scale_factor=2, mode="nearest")
x = torch.cat([x, skip], dim=1)
x = self.attention1(x)
x = self.conv1(x)
x = self.conv2(x)
x = self.attention2(x)
return x
class CenterBlock(nn.Sequential):
def __init__(self, in_channels, out_channels, use_batchnorm=True):
conv1 = md.Conv2dReLU(
in_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
conv2 = md.Conv2dReLU(
out_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
super().__init__(conv1, conv2)
class UnetDecoder(nn.Module):
def __init__(
self,
encoder_channels,
decoder_channels,
n_blocks=5,
use_batchnorm=True,
attention_type=None,
center=False,
):
super().__init__()
if n_blocks != len(decoder_channels):
raise ValueError(
"Model depth is {}, but you provide `decoder_channels` for {} blocks.".format(
n_blocks, len(decoder_channels)
)
)
encoder_channels = encoder_channels[1:]
encoder_channels = encoder_channels[::-1]
head_channels = encoder_channels[0]
in_channels = [head_channels] + list(decoder_channels[:-1])
skip_channels = list(encoder_channels[1:]) + [0]
out_channels = decoder_channels
if center:
self.center = CenterBlock(head_channels, head_channels, use_batchnorm=use_batchnorm)
else:
self.center = nn.Identity()
kwargs = dict(use_batchnorm=use_batchnorm, attention_type=attention_type)
blocks = [
DecoderBlock(in_ch, skip_ch, out_ch, **kwargs)
for in_ch, skip_ch, out_ch in zip(in_channels, skip_channels, out_channels)
]
self.blocks = nn.ModuleList(blocks)
def forward(self, *features):
features = features[1:]
features = features[::-1]
head = features[0]
skips = features[1:]
x = self.center(head)
for i, decoder_block in enumerate(self.blocks):
skip = skips[i] if i < len(skips) else None
x = decoder_block(x, skip)
return x
| true
| true
|
790e9a4af6c3bbbfc20c0a4c4aa335a9fec0e5bf
| 8,290
|
py
|
Python
|
tsxv/splitTrainVal.py
|
DidierRLopes/timeseries-cv
|
8e9137c2774e92a6d7a80446e4022c27ca1b7991
|
[
"MIT"
] | 14
|
2021-05-30T16:29:42.000Z
|
2022-03-16T01:59:18.000Z
|
tsxv/splitTrainVal.py
|
DidierRLopes/TimeSeriesCrossValidation
|
c886670ba0c8c347b12639ec4a6fb549457c1ef1
|
[
"MIT"
] | null | null | null |
tsxv/splitTrainVal.py
|
DidierRLopes/TimeSeriesCrossValidation
|
c886670ba0c8c347b12639ec4a6fb549457c1ef1
|
[
"MIT"
] | 6
|
2021-01-06T12:22:34.000Z
|
2021-04-20T08:05:33.000Z
|
"""
Forward Chaining, K-Fold and Group K-Fold algorithms to split a given training dataset into train (X, y) and validation (Xcv, ycv) sets
"""
import numpy as np
def split_train_val_forwardChaining(sequence, numInputs, numOutputs, numJumps):
""" Returns sets to train and cross-validate a model using forward chaining technique
Parameters:
sequence (array) : Full training dataset
numInputs (int) : Number of inputs X and Xcv used at each training and validation
numOutputs (int) : Number of outputs y and ycv used at each training and validation
numJumps (int) : Number of sequence samples to be ignored between (X,y) sets
Returns:
X (2D array) : Array of numInputs arrays used for training
y (2D array) : Array of numOutputs arrays used for training
Xcv (2D array) : Array of numInputs arrays used for cross-validation
ycv (2D array) : Array of numOutputs arrays used for cross-validation
"""
X, y, Xcv, ycv = dict(), dict(), dict(), dict()
j=2; # Tracks index of CV set at each train/val split
# Iterate through all train/val splits
while 1:
start_ix=0; end_ix=0; startCv_ix=0; endCv_ix=0;
X_it, y_it, Xcv_it, ycv_it = list(), list(), list(), list()
i=0; # Index of individual training set at each train/val split
# Iterate until index of individual training set is smaller than index of cv set
while (i < j):
## TRAINING DATA
start_ix = numJumps*i;
end_ix = start_ix + numInputs;
seq_x = sequence[start_ix:end_ix]
X_it.append(seq_x)
seq_y = sequence[end_ix:end_ix+numOutputs]
y_it.append(seq_y)
i+=1;
# Once val data crosses time series length return
if (((end_ix+numInputs)+numOutputs) > len(sequence)):
break
## CROSS-VALIDATION DATA
startCv_ix = end_ix;
endCv_ix = end_ix + numInputs;
seq_xcv = sequence[startCv_ix:endCv_ix]
Xcv_it.append(seq_xcv)
seq_ycv = sequence[endCv_ix:endCv_ix+numOutputs]
ycv_it.append(seq_ycv)
## Add another train/val split
X[j-2] = np.array(X_it)
y[j-2] = np.array(y_it)
Xcv[j-2] = np.array(Xcv_it)
ycv[j-2] = np.array(ycv_it)
j+=1;
if (len(X)==0 or len(Xcv)==0):
print("The sequence provided does not has size enough to populate the return arrays")
return X, y, Xcv, ycv
def split_train_val_kFold(sequence, numInputs, numOutputs, numJumps):
""" Returns sets to train and cross-validate a model using K-Fold technique
Parameters:
sequence (array) : Full training dataset
numInputs (int) : Number of inputs X and Xcv used at each training
numOutputs (int) : Number of outputs y and ycv used at each training
numJumps (int) : Number of sequence samples to be ignored between (X,y) sets
Returns:
X (2D array) : Array of numInputs arrays used for training
y (2D array) : Array of numOutputs arrays used for training
Xcv (2D array) : Array of numInputs arrays used for cross-validation
ycv (2D array) : Array of numOutputs arrays used for cross-validation
"""
X, y, Xcv, ycv = dict(), dict(), dict(), dict()
j=2; # Tracks index of CV set at each train/val split
theEnd = 0; # Flag to terminate function
# Iterate until val set falls outside time series length
while 1:
start_ix=0; end_ix=0; startCv_ix=0; endCv_ix=0;
X_it, y_it, Xcv_it, ycv_it = list(), list(), list(), list()
i=0; # Index of individual training set at each train/val split
n=0; # Number of numJumps
# Iterate through all train/val splits
while 1:
if (i != j):
## TRAINING DATA
start_ix = endCv_ix + numJumps*n;
end_ix = start_ix + numInputs;
n +=1;
# Leave train/val split loop once training data crosses time series length
if end_ix+numOutputs > len(sequence):
break;
seq_x = sequence[start_ix:end_ix]
X_it.append(seq_x)
seq_y = sequence[end_ix:end_ix+numOutputs]
y_it.append(seq_y)
else:
## CROSS-VALIDATION DATA
startCv_ix = end_ix;
endCv_ix = end_ix + numInputs;
n = 0;
# Once val data crosses time series length exit tran/val split loop and return
if endCv_ix+numOutputs > len(sequence):
theEnd = 1;
break;
seq_xcv = sequence[startCv_ix:endCv_ix]
Xcv_it.append(seq_xcv)
seq_ycv = sequence[endCv_ix:endCv_ix+numOutputs]
ycv_it.append(seq_ycv)
i+=1;
# Only add a train/val split if the time series length has not been crossed
if (theEnd == 1):
break
## Add another train/val split
X[j-2] = np.array(X_it)
y[j-2] = np.array(y_it)
Xcv[j-2] = np.array(Xcv_it)
ycv[j-2] = np.array(ycv_it)
j+=1;
if (len(X)==0 or len(Xcv)==0):
print("The sequence provided does not has size enough to populate the return arrays")
return X, y, Xcv, ycv
def split_train_val_groupKFold(sequence, numInputs, numOutputs, numJumps):
""" Returns sets to train and cross-validate a model using group K-Fold technique
Parameters:
sequence (array) : Full training dataset
numInputs (int) : Number of inputs X and Xcv used at each training
numOutputs (int) : Number of outputs y and ycv used at each training
numJumps (int) : Number of sequence samples to be ignored between (X,y) sets
Returns:
X (2D array) : Array of numInputs arrays used for training
y (2D array) : Array of numOutputs arrays used for training
Xcv (2D array) : Array of numInputs arrays used for cross-validation
ycv (2D array) : Array of numOutputs arrays used for cross-validation
"""
X, y, Xcv, ycv = dict(), dict(), dict(), dict()
# Iterate through 5 train/val splits
for j in np.arange(5):
start_ix=0; end_ix=0; startCv_ix=0; endCv_ix=0;
X_it, y_it, Xcv_it, ycv_it = list(), list(), list(), list()
i=0; # Index of individual training set at each train/val split
n=0; # Number of numJumps
while 1:
if ((i+1+j)%(5) != 0):
# TRAINING DATA
start_ix = endCv_ix + numJumps*n;
end_ix = start_ix + numInputs;
n+=1;
# Leave train/val split loop once training data crosses time series length
if end_ix+numOutputs > len(sequence)-1:
break
seq_x = sequence[start_ix:end_ix]
X_it.append(seq_x)
seq_y = sequence[end_ix:end_ix+numOutputs]
y_it.append(seq_y)
else:
# CROSS-VALIDATION DATA
startCv_ix = end_ix;
endCv_ix = end_ix + numInputs;
n=0;
# Once val data crosses time series length return
if ((endCv_ix+numOutputs) > len(sequence)):
break
seq_xcv = sequence[startCv_ix:endCv_ix]
Xcv_it.append(seq_xcv)
seq_ycv = sequence[endCv_ix:endCv_ix+numOutputs]
ycv_it.append(seq_ycv)
i+=1;
## Add another train/val split
X[j] = np.array(X_it)
y[j] = np.array(y_it)
Xcv[j] = np.array(Xcv_it)
ycv[j] = np.array(ycv_it)
if (len(X)==0 or len(Xcv)==0):
print("The sequence provided does not has size enough to populate the return arrays")
return X, y, Xcv, ycv
| 37.681818
| 135
| 0.563088
|
import numpy as np
def split_train_val_forwardChaining(sequence, numInputs, numOutputs, numJumps):
X, y, Xcv, ycv = dict(), dict(), dict(), dict()
j=2;
while 1:
start_ix=0; end_ix=0; startCv_ix=0; endCv_ix=0;
X_it, y_it, Xcv_it, ycv_it = list(), list(), list(), list()
i=0;
while (i < j):
art_ix = numJumps*i;
end_ix = start_ix + numInputs;
seq_x = sequence[start_ix:end_ix]
X_it.append(seq_x)
seq_y = sequence[end_ix:end_ix+numOutputs]
y_it.append(seq_y)
i+=1;
if (((end_ix+numInputs)+numOutputs) > len(sequence)):
break
nd_ix;
endCv_ix = end_ix + numInputs;
seq_xcv = sequence[startCv_ix:endCv_ix]
Xcv_it.append(seq_xcv)
seq_ycv = sequence[endCv_ix:endCv_ix+numOutputs]
ycv_it.append(seq_ycv)
t)
y[j-2] = np.array(y_it)
Xcv[j-2] = np.array(Xcv_it)
ycv[j-2] = np.array(ycv_it)
j+=1;
if (len(X)==0 or len(Xcv)==0):
print("The sequence provided does not has size enough to populate the return arrays")
return X, y, Xcv, ycv
def split_train_val_kFold(sequence, numInputs, numOutputs, numJumps):
X, y, Xcv, ycv = dict(), dict(), dict(), dict()
j=2;
theEnd = 0;
while 1:
start_ix=0; end_ix=0; startCv_ix=0; endCv_ix=0;
X_it, y_it, Xcv_it, ycv_it = list(), list(), list(), list()
i=0;
n=0;
while 1:
if (i != j):
start_ix = endCv_ix + numJumps*n;
end_ix = start_ix + numInputs;
n +=1;
if end_ix+numOutputs > len(sequence):
break;
seq_x = sequence[start_ix:end_ix]
X_it.append(seq_x)
seq_y = sequence[end_ix:end_ix+numOutputs]
y_it.append(seq_y)
else:
v_ix = end_ix;
endCv_ix = end_ix + numInputs;
n = 0;
if endCv_ix+numOutputs > len(sequence):
theEnd = 1;
break;
seq_xcv = sequence[startCv_ix:endCv_ix]
Xcv_it.append(seq_xcv)
seq_ycv = sequence[endCv_ix:endCv_ix+numOutputs]
ycv_it.append(seq_ycv)
i+=1;
if (theEnd == 1):
break
t)
y[j-2] = np.array(y_it)
Xcv[j-2] = np.array(Xcv_it)
ycv[j-2] = np.array(ycv_it)
j+=1;
if (len(X)==0 or len(Xcv)==0):
print("The sequence provided does not has size enough to populate the return arrays")
return X, y, Xcv, ycv
def split_train_val_groupKFold(sequence, numInputs, numOutputs, numJumps):
X, y, Xcv, ycv = dict(), dict(), dict(), dict()
for j in np.arange(5):
start_ix=0; end_ix=0; startCv_ix=0; endCv_ix=0;
X_it, y_it, Xcv_it, ycv_it = list(), list(), list(), list()
i=0;
n=0;
while 1:
if ((i+1+j)%(5) != 0):
start_ix = endCv_ix + numJumps*n;
end_ix = start_ix + numInputs;
n+=1;
if end_ix+numOutputs > len(sequence)-1:
break
seq_x = sequence[start_ix:end_ix]
X_it.append(seq_x)
seq_y = sequence[end_ix:end_ix+numOutputs]
y_it.append(seq_y)
else:
startCv_ix = end_ix;
endCv_ix = end_ix + numInputs;
n=0;
if ((endCv_ix+numOutputs) > len(sequence)):
break
seq_xcv = sequence[startCv_ix:endCv_ix]
Xcv_it.append(seq_xcv)
seq_ycv = sequence[endCv_ix:endCv_ix+numOutputs]
ycv_it.append(seq_ycv)
i+=1;
y[j] = np.array(y_it)
Xcv[j] = np.array(Xcv_it)
ycv[j] = np.array(ycv_it)
if (len(X)==0 or len(Xcv)==0):
print("The sequence provided does not has size enough to populate the return arrays")
return X, y, Xcv, ycv
| true
| true
|
790e9b280fc1a0afde58e16d883f1eb73f4659cd
| 427
|
py
|
Python
|
odoo-13.0/addons/account/wizard/account_unreconcile.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
odoo-13.0/addons/account/wizard/account_unreconcile.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
odoo-13.0/addons/account/wizard/account_unreconcile.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
from odoo import models, api
class AccountUnreconcile(models.TransientModel):
_name = "account.unreconcile"
_description = "Account Unreconcile"
def trans_unrec(self):
context = dict(self._context or {})
if context.get('active_ids', False):
self.env['account.move.line'].browse(context.get('active_ids')).remove_move_reconcile()
return {'type': 'ir.actions.act_window_close'}
| 32.846154
| 99
| 0.686183
|
from odoo import models, api
class AccountUnreconcile(models.TransientModel):
_name = "account.unreconcile"
_description = "Account Unreconcile"
def trans_unrec(self):
context = dict(self._context or {})
if context.get('active_ids', False):
self.env['account.move.line'].browse(context.get('active_ids')).remove_move_reconcile()
return {'type': 'ir.actions.act_window_close'}
| true
| true
|
790e9c64da11166eaae2b86fdf01c316b798e7d7
| 52,258
|
py
|
Python
|
Code/DataHandlers/GraphModels.py
|
aricsanders/pyMez3
|
13e2b9900af2287db0cc42a0190d31da165ce174
|
[
"Unlicense"
] | null | null | null |
Code/DataHandlers/GraphModels.py
|
aricsanders/pyMez3
|
13e2b9900af2287db0cc42a0190d31da165ce174
|
[
"Unlicense"
] | null | null | null |
Code/DataHandlers/GraphModels.py
|
aricsanders/pyMez3
|
13e2b9900af2287db0cc42a0190d31da165ce174
|
[
"Unlicense"
] | null | null | null |
#-----------------------------------------------------------------------------
# Name: GraphModels
# Purpose: To store graphs used in network translations
# Author: Aric Sanders
# Created: 4/6/2016
# License: MIT License
#-----------------------------------------------------------------------------
"""
Graph Models stores sub classes of graphs that define data translations. All edges
or the functions that define translations from one format to another
are found in <a href="./Translations.m.html">`pyMez.Code.DataHandlers.Translations`</a>.
Currently, the module networkx is used to display the graph.
Examples
--------
#!python
>>from pyMez import *
>>image_graph=ImageGraph()
>>image_graph.set_state('png','my_png.png')
>>image_graph.move_to_node('EmbeddedHtml')
>>output=image_graph.data
>>print output
<h3><a href="../../../Examples/Html/GraphModels_Example.html">GraphModels Example</a></h3>
Requirements
------------
+ [sys](https://docs.python.org/2/library/sys.html)
+ [os](https://docs.python.org/2/library/os.html?highlight=os#module-os)
+ [networkx](http://networkx.github.io/)
+ [numpy](http://www.numpy.org/)
+ [pyMez](https://github.com/aricsanders/pyMez)
Help
---------------
<a href="./index.html">`pyMez.Code.DataHandlers`</a>
<div>
<a href="../../../pyMez_Documentation.html">Documentation Home</a> |
<a href="../../index.html">API Documentation Home</a> |
<a href="../../../Examples/html/Examples_Home.html">Examples Home</a> |
<a href="../../../Reference_Index.html">Index</a>
</div>
"""
#-----------------------------------------------------------------------------
# Standard Imports
import re
import datetime
import sys
import os
#-----------------------------------------------------------------------------
# Third Party Imports
sys.path.append(os.path.join(os.path.dirname( __file__ ), '..','..'))
try:
from Code.Utils.Alias import *
METHOD_ALIASES=1
except:
print("The module pyMez.Code.Utils.Alias was not found")
METHOD_ALIASES=0
pass
try:
from Code.DataHandlers.GeneralModels import *
except:
print("The module pyMez.Code.DataHandlers.GeneralModels was not found,"
"please put it on the python path")
raise ImportError
try:
from Code.DataHandlers.TouchstoneModels import *
except:
print("The module pyMez.Code.DataHandlers.TouchstoneModels was not found,"
"please put it on the python path")
raise ImportError
try:
from Code.DataHandlers.Translations import *
except:
print("The module pyMez.Code.DataHandlers.Translations was not found or had an error,"
"please put it on the python path or resolve the error")
raise ImportError
try:
import numpy as np
except:
print("The module numpy was not found,"
"please put it on the python path")
raise ImportError
try:
import networkx
except:
print("The module networkx was not found,"
"please put it on the python path")
raise ImportError
#-----------------------------------------------------------------------------
# Module Constants
#-----------------------------------------------------------------------------
# Module Functions
# as an example these functions are left.
#todo: Change the names
def edge_1_to_2(in_string):
"A Test function for an edge for a Graph"
return in_string.splitlines()
def edge_2_to_1(string_list):
"""A test function for an edge in a Graph"""
return string_list_collapse(string_list)
def visit_all_nodes(graph):
"""Visit all nodes visits each node on a graph"""
nodes=graph.node_names
for node in nodes:
graph.move_to_node(node)
def visit_and_print_all_nodes(graph):
"""Visits all the nodes in graph and prints graph.data after each move"""
nodes=graph.node_names
for node in nodes:
graph.move_to_node(node)
print((graph.data))
def to_node_name(node_data):
"""Creates a node name given an input object, does a bit of silly type selecting and name rearranging. This matches for 75%
of the cases. There are a lot of user defined nodes without a clear path to generate a name. For instance the DataTableGraph
node HpFile, does not save with a .hp extension so it would be auto named TxtFile if was only selected by the path name.
If it is auto selected it returns StringList because it is of the format ["file_path","schema_path"] """
# we retrieve the text version of the class name
class_name = node_data.__class__.__name__
node_name = class_name
# now for dict and list types we want to inspect the first Element to see what it is
if re.match('list', class_name):
node_name = "List"
try:
element_class_name = node_data[0].__class__.__name__
node_name = element_class_name + node_name
except:
pass
elif re.match('dict', class_name):
node_name = "Dictionary"
try:
element_class_name = list(node_data.values())[0].__class__.__name__
node_name = element_class_name + node_name
except:
pass
elif re.match('str', class_name):
node_name = "String"
# Now we have to check if it is an existing file name
if os.path.isfile(node_data):
node_name = "File"
extension = ""
try:
if re.search("\.", node_data):
extension = node_data.split(".")[-1]
node_name = extension.title() + node_name
except:
pass
elif fnmatch.fnmatch(node_data, "*.*"):
node_name = "File"
try:
if re.search("\.", node_data):
extension = node_data.split(".")[-1]
node_name = extension.title() + node_name
except:
pass
node_name = node_name.replace("str", "String").replace("dict", "Dictionary")
return (node_name)
def TableGraph_to_Links(table_graph, **options):
"""Converts a table graph to a set of download links with embedded data in them"""
defaults = {"base_name": None,
"nodes": ['XmlFile', 'CsvFile', 'ExcelFile', 'OdsFile', 'MatFile', 'HtmlFile', 'JsonFile'],
"extensions": ['xml', 'csv', 'xlsx', 'ods', 'mat', 'html', 'json'],
"mime_types": ['application/xml', 'text/plain',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'application/vnd.oasis.opendocument.spreadsheet',
'application/x-matlab-data', 'text/html', 'application/json']}
conversion_options = {}
for key, value in defaults.items():
conversion_options[key] = value
for key, value in options.items():
conversion_options[key] = value
if conversion_options["base_name"] is None:
base_name = 'test.txt'
else:
base_name = conversion_options["base_name"]
nodes = conversion_options["nodes"]
extensions = conversion_options["extensions"]
mime_types = conversion_options["mime_types"]
out_links = ""
for node_index, node in enumerate(nodes):
table_graph.move_to_node(node)
file_path = table_graph.data
in_file = open(file_path, 'rb')
content_string = in_file.read()
link = String_to_DownloadLink(content_string,
suggested_name=change_extension(base_name, extensions[node_index]),
mime_type=mime_types[node_index],
text=extensions[node_index])
if node_index == len(nodes) - 1:
out_links = out_links + link
else:
out_links = out_links + link + " | "
return out_links
def remove_circular_paths(path):
"""Removes pieces of the path that just end on the same node"""
# Todo: Track the error that leaves out a needed path sometimes
# See http://localhost:8888/notebooks/Two_Port_Matrix_Parameters_Debug_20170105_001.ipynb
edge_pattern=re.compile("edge_(?P<begin_node>\w+)_(?P<end_node>\w+)_(?P<iterator>\w+)")
past_locations=[]
for index,edge in enumerate(path):
match=re.match(edge_pattern,edge)
begin_node=match.groupdict()["begin_node"]
end_node=match.groupdict()["end_node"]
past_locations.append(begin_node)
#print("{0} is {1}".format("past_locations",past_locations))
new_path=[]
node_index=0
between_list=[False for item in past_locations]
while(node_index<len(past_locations)):
node=past_locations[node_index]
old_path=new_path
new_path=[]
# if you visit a location more than one
number_of_visits=past_locations.count(node)
if number_of_visits>1:
#print("{0} is {1}".format("node",node))
#print("{0} is {1}".format("past_locations",past_locations))
# Now find all the visits to that location
equality_list=[x==node for x in past_locations]
print(("{0} is {1}".format("equality_list",equality_list)))
# You are intially not between visits
between=False
# every time you cross that node you flip between, as long as there are
visit_number=0
for index,equality in enumerate(equality_list):
if equality:
# add one to the visit number
visit_number+=1
# Flip the between truth value if it is the first or last
# visits only
if visit_number==1 or visit_number==number_of_visits:
between=not between
between_list[index]=between or between_list[index]
else:
between_list[index]=between or between_list[index]
else:
between_list[index]=between or between_list[index]
#print("{0} is {1}".format("between_list",between_list))
for index,item in enumerate(between_list):
if not item:
new_path.append(path[index])
node_index+=1
if new_path in [[]]:
new_path=path
return new_path
#-----------------------------------------------------------------------------
# Module Classes
# getting around to adding a breadth first graph solver to Graph class
# modify the find_path method
class Graph(object):
"""The Graph class creates a content graph that has as nodes different formats. As
a format is added via graph.add_node() by specifying a node name and a function from an
existing node into the new one, and one exiting the node. Once a series of nodes exists
to enter the graph at a node use graph.set_state() the current data representing the
state is in the attribute graph.data. To move among the formats use graph.move_to_node('NodeName')
need to recode the find_path method using a shortest path alogrithm like
[Dijkstra](https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm).
"""
def __init__(self, **options):
"""Initializes the graph. The first 2 nodes and two edges forming a bijection between them are required"""
defaults = {"graph_name": "Graph",
"node_names": ['n1', 'n2'],
"node_descriptions": ["A plain string",
"A list of strings with no \\n, created with string.splitlines()"],
"current_node": 'n1',
"state": [1, 0],
"data": "This is a test string\n it has to have multiple lines \n and many characters 34%6\n^",
"edge_2_to_1": edge_2_to_1,
"edge_1_to_2": edge_1_to_2
}
self.options = {}
for key, value in defaults.items():
self.options[key] = value
for key, value in options.items():
self.options[key] = value
self.elements = ['graph_name', 'node_names', 'node_descriptions', 'current_node', 'state', 'data']
for element in self.elements:
self.__dict__[element] = self.options[element]
self.edges = []
self.edge_matrices = []
self.state_matrix = np.matrix(self.state).T
# Add the first 2 edges, required to intialize the graph properly
self.display_graph = networkx.DiGraph()
self.add_edge(self.node_names[0], self.node_names[1], self.options["edge_1_to_2"])
self.add_edge(self.node_names[1], self.node_names[0], self.options["edge_2_to_1"])
self.jumps = []
self.external_node_names = []
self.external_node_descriptions = []
self.display_layout = networkx.spring_layout(self.display_graph)
def get_description_dictionary(self):
"returns a dictionary of the form {NodeName:Node Description for all of the current nodes"
dictionary = {node_name: self.node_descriptions[index] for index, node_name in enumerate(self.node_names)}
return dictionary
def set_state(self, node_name, node_data):
"""Sets the graph state to be the state specified by node_name, and node_data"""
try:
current_node_state_position = self.node_names.index(node_name)
self.current_node = node_name
self.data = node_data
self.state = [0 for i in range(len(self.node_names))]
self.state[current_node_state_position] = 1
self.state_matrix = np.matrix(self.state).T
except:
print(("Could not set the state of graph: {0}".format(self.graph_name)))
raise
def add_edge(self, begin_node=None, end_node=None, edge_function=None):
"""Adds an edge mapping one node to another, required input is begin_node (it's name)
end_node, and the edge function"""
# check to see if edge is defined if it is increment a number
edge_match = re.compile("edge_{0}_{1}".format(begin_node, end_node))
keys = list(self.__dict__.keys())
# print keys
iterator = 0
for key in keys:
if re.match(edge_match, key):
iterator += 1
edge_name = "edge_{0}_{1}_{2:0>3d}".format(begin_node, end_node, iterator)
self.__dict__[edge_name] = edge_function
self.edges.append(edge_name)
edge_matrix = np.zeros((len(self.state), len(self.state)))
begin_position = self.node_names.index(begin_node)
end_position = self.node_names.index(end_node)
edge_matrix[end_position][begin_position] = 1
edge_matrix = np.matrix(edge_matrix)
self.edge_matrices.append(edge_matrix)
self.display_graph.add_edge(begin_node, end_node)
self.display_layout = networkx.spring_layout(self.display_graph)
def add_jump(self, begin_node=None, end_node=None, jump_function=None):
"""Adds a jump mapping one internal node to an external node, required input is begin_node (it's name)
end_node, and the edge function"""
# check to see if edge is defined if it is increment a number
jump_match = re.compile("jump_{0}_{1}".format(begin_node, end_node))
keys = list(self.__dict__.keys())
# print keys
iterator = 0
for key in keys:
if re.match(jump_match, key):
iterator += 1
jump_name = "jump_{0}_{1}_{2:0>3d}".format(begin_node, end_node, iterator)
self.__dict__[jump_name] = jump_function
self.jumps.append(jump_name)
self.display_graph.add_edge(begin_node, end_node)
self.display_layout = networkx.spring_layout(self.display_graph)
def move_to(self, path, **options):
"""Changes the state of the graph by moving along the path specified"""
defaults = {"debug": False, "verbose": False}
move_options = {}
for key, value in defaults.items():
move_options[key] = value
for key, value in options.items():
move_options[key] = value
if move_options["debug"]:
print(path)
for index, edge in enumerate(path):
# print edge
edge_pattern = 'edge_(?P<begin_node>\w+)_(?P<end_node>\w+)_(?P<iterator>\w+)'
match = re.match(edge_pattern, edge)
begin_node = match.groupdict()['begin_node']
end_node = match.groupdict()['end_node']
if move_options["verbose"]:
print(("moving {0} -> {1}".format(begin_node, end_node)))
# print self.data
self.data = self.__dict__[edge](self.data)
# print self.data
self.current_node = match.groupdict()['end_node']
self.state = [0 for i in range(len(self.node_names))]
position = self.node_names.index(self.current_node)
self.state[position] = 1
self.state_matrix = np.matrix(self.state).T
# print self.state
# print self.current_node
def virtual_move_to(self, path):
"""virtual_move_to simulates moving but does not change the state of the graph"""
# print path
temp_state = self.state
temp_data = self.data
temp_current_node = self.current_node
temp_node_names = self.node_names
for index, edge in enumerate(path):
# print edge
edge_pattern = 'edge_(?P<begin_node>\w+)_(?P<end_node>\w+)_(?P<iterator>\w+)'
match = re.match(edge_pattern, edge)
begin_node = match.groupdict()['begin_node']
end_node = match.groupdict()['end_node']
# print("moving {0} -> {1}".format(begin_node,end_node))
# print self.data
temp_data = self.__dict__[edge](temp_data)
# print self.data
temp_current_node = match.groupdict()['end_node']
temp_state = [0 for i in range(len(temp_node_names))]
position = temp_node_names.index(temp_current_node)
temp_state[position] = 1
# print temp_state
# print self.state
# print self.current_node
def __str__(self):
return str(self.data)
def add_node(self, node_name, edge_into_node_begin, edge_into_node_function, edge_out_node_end,
edge_out_node_function, node_description=None):
"""Adds a node to the graph. Required input is node_name (a string with no spaces),
a reference to an entering node,the function mapping the entering node to the new node,
a reference to an exiting node and the function mapping the
new node to the exiting node."""
# first check if node into and out of node is good
self.node_names.append(node_name)
self.state.append(0)
self.state_matrix = np.matrix(self.state).T
for index, matrix in enumerate(self.edge_matrices):
pad_row = np.zeros((1, len(matrix)))
new_matrix = np.concatenate((matrix, pad_row), axis=0)
pad_column = np.zeros((1, len(self.node_names)))
new_matrix = np.concatenate((new_matrix, pad_column.T), axis=1)
# print("New matrix is :\n{0}".format(new_matrix))
self.edge_matrices[index] = new_matrix
self.add_edge(begin_node=node_name, end_node=edge_out_node_end, edge_function=edge_out_node_function)
self.add_edge(begin_node=edge_into_node_begin, end_node=node_name, edge_function=edge_into_node_function)
if node_description:
self.node_descriptions.append(node_description)
self.display_graph.add_node(node_name)
self.display_graph.add_edge(node_name, edge_out_node_end)
self.display_graph.add_edge(edge_into_node_begin, node_name)
self.display_layout = networkx.spring_layout(self.display_graph)
def add_external_node(self, external_node_name, jump_into_node_begin,
jump_into_node_function, external_node_description=None):
"""Adds an external node to the graph. Required input is node_name (a string with no spaces),
a reference to an entering node,the function mapping the entering node to the new external node"""
# first check if node into and out of node is good
self.external_node_names.append(external_node_name)
self.add_jump(begin_node=jump_into_node_begin, end_node=external_node_name,
jump_function=jump_into_node_function)
if external_node_description:
self.external_node_descriptions.append(external_node_description)
self.display_graph.add_node(external_node_name)
self.display_graph.add_edge(jump_into_node_begin, external_node_name)
self.display_layout = networkx.spring_layout(self.display_graph)
def jump_to_external_node(self, external_node_name, **options):
"""Returns the result of the jump, the graph is left in the node that is the begining of the jump"""
end_node = external_node_name
jump_pattern = 'jump_(?P<begin_node>\w+)_{0}_(?P<iterator>\w+)'.format(end_node)
for jump in self.jumps[:]:
jump_match = re.match(jump_pattern, jump, re.IGNORECASE)
if jump_match:
jump_to_use = jump
begin_node = jump_match.groupdict()["begin_node"]
self.move_to_node(begin_node)
return self.__dict__[jump_to_use](self.data, **options)
def path_length(self, path, num_repeats=10):
"""Determines the length of a given path, currently the metric is based on the time to move to."""
begin_time = datetime.datetime.now()
# num_repeats=100
for i in range(num_repeats):
self.virtual_move_to(path)
end_time = datetime.datetime.now()
delta_t = end_time - begin_time
path_length = delta_t.total_seconds() / float(num_repeats)
if path_length == 0.0:
print("Warning the path length is less than 1 microsecond,"
"make sure num_repeats is high enough to measure it.")
return path_length
def is_path_valid(self, path):
"""Returns True if the path is valid from the current node position or False otherwise"""
null_state = [0 for i in range(len(self.node_names))]
null_state_matrix = np.matrix(null_state).T
new_state = np.matrix(self.state).T
for index, edge in enumerate(path):
# print index
# print edge
edge_position = self.edges.index(edge)
move_matrix = self.edge_matrices[edge_position]
# print move_matrix
new_state = move_matrix * new_state
if new_state.any() == null_state_matrix.any():
# print new_state
# print null_state_matrix
return False
return True
def get_entering_nodes(self, node):
"""Returns all nodes that have an edge that enter the specificed node"""
enter_edge_pattern = re.compile('edge_(?P<begin_node>\w+)_{0}_(?P<iterator>\w+)'.format(node))
enter_nodes = []
for index, edge in enumerate(self.edges):
enter_match = re.match(enter_edge_pattern, edge)
if enter_match:
enter_node = enter_match.groupdict()['begin_node']
enter_nodes.append(enter_node)
return enter_nodes
def get_entering_edges(self, node):
"""Returns all edges that enter the specificed node"""
enter_edge_pattern = re.compile('edge_(?P<begin_node>\w+)_{0}_(?P<iterator>\w+)'.format(node))
enter_edges = []
for index, edge in enumerate(self.edges):
if re.match(enter_edge_pattern, edge):
enter_edges.append(edge)
return enter_edges
def get_exiting_edges(self, node):
"""Returns all edges that exit the specificed node"""
exit_edge_pattern = re.compile('edge_{0}_(?P<end_node>\w+)_(?P<iterator>\w+)'.format(node))
exit_edges = []
for index, edge in enumerate(self.edges):
if re.match(exit_edge_pattern, edge):
exit_edges.append(edge)
return exit_edges
def get_exiting_nodes(self, node):
"""Returns all nodes that have an edge leaving the specificed node"""
exit_edge_pattern = re.compile('edge_{0}_(?P<end_node>\w+)_(?P<iterator>\w+)'.format(node))
exit_nodes = []
for index, edge in enumerate(self.edges):
exit_match = re.match(exit_edge_pattern, edge)
if exit_match:
exit_node = exit_match.groupdict()['end_node']
exit_nodes.append(exit_node)
return exit_nodes
def get_path(self, first_node, last_node, **options):
"""Returns the first path found between first node and last node, uses a breadth first search algorithm"""
defaults = {"debug": False, "method": "BreathFirst"}
self.get_path_options = {}
for key, value in defaults.items():
self.get_path_options[key] = value
for key, value in options.items():
self.get_path_options[key] = value
unvisited_nodes = self.node_names[:]
unvisited_nodes.remove(first_node)
visited_nodes = [first_node]
node_history = []
edge_history = []
path_queue = []
possible_paths = []
queue = []
current_edge = []
queue.append(first_node)
path = {first_node: []}
while queue:
# first remove the
current_node = queue.pop(0)
if path_queue != []:
current_edge = path_queue.pop(0)
edge_history.append(current_edge)
node_history.append(current_node)
if self.get_path_options["debug"]:
print(("current_node is {0}".format(current_node)))
print(("current_edge is {0}".format(current_edge)))
# if this node is the destination exit returning the path
if current_node == last_node:
if self.get_path_options["debug"]:
print(("Node path was found to be {0}".format(node_path)))
print(("path was found to be {0}".format(edge_path)))
print(("{0} is {1}".format("path", path)))
return path[last_node][::-1]
adjacent_nodes = self.get_exiting_nodes(current_node)
adjacent_paths = self.get_exiting_edges(current_node)
if self.get_path_options["debug"]:
print(("{0} are {1}".format("adjacent_nodes", adjacent_nodes)))
print(("{0} are {1}".format("adjacent_paths", adjacent_paths)))
current_history = edge_history
for node_index, node in enumerate(adjacent_nodes):
if node not in visited_nodes:
queue.append(node)
path_queue.append(adjacent_paths[node_index])
visited_nodes.append(node)
path[node] = [adjacent_paths[node_index]] + path[current_node]
path[node]
# possible_paths.append(current_path.append(node))
if self.get_path_options["debug"]:
print(("{0} is {1}".format("path_queue", path_queue)))
def move_to_node(self, node):
"""Moves from current_node to the specified node"""
path = self.get_path(self.current_node, node)
self.move_to(path)
def check_closed_path(self):
"""Checks that data is not changed for the first closed path found. Returns True if data==data after
moving around the closed path, False otherwise. Starting point is current_node """
temp_data = self.data
path = self.get_path(self.current_node, self.current_node)
if self.is_path_valid(path):
pass
else:
print("Path is not valid, graph definition is broken")
raise
out = temp_data == self.data
out_list = [self.current_node, path, out]
print(("The assertion that the data remains unchanged,\n"
"for node {0} following path {1} is {2}".format(*out_list)))
return out
def is_graph_isomorphic(self):
"""Returns True if all nodes have closed paths that preserve the data, False otherwise"""
out = True
for node in self.node_names:
self.move_to_node(node)
if not self.check_closed_path:
out = False
return out
def show(self, **options):
"""Shows the graph using matplotlib and networkx"""
# Should be seperated to allow for fixed presentation?
defaults = {"descriptions": False, "edge_descriptions": False, "save_plot": False,
"path": None, "active_node": True, "directory": None,
"specific_descriptor": self.graph_name.replace(" ", "_"),
"general_descriptor": "plot", "file_name": None,
"arrows": True, "node_size": 1000, "font_size": 10, "fix_layout": True}
show_options = {}
for key, value in defaults.items():
show_options[key] = value
for key, value in options.items():
show_options[key] = value
if show_options["directory"] is None:
show_options["directory"] = os.getcwd()
if show_options["active_node"]:
node_colors = []
for node in self.display_graph.nodes():
if node == self.current_node:
node_colors.append('b')
else:
if node in self.node_names:
node_colors.append('r')
elif node in self.external_node_names:
node_colors.append('g')
else:
node_colors = ['r' for node in self.node_names] + ['g' for node in self.node_names]
# print("{0} is {1}".format('node_colors',node_colors))
if show_options["descriptions"]:
node_labels = {node: self.node_descriptions[index] for index,
node in enumerate(self.node_names)}
if self.external_node_names:
for index, node in enumerate(self.external_node_names):
node_labels[node] = self.external_node_descriptions[index]
networkx.draw_networkx(self.display_graph, arrows=show_options["arrows"],
labels=node_labels, node_color=node_colors,
node_size=show_options["node_size"], font_size=show_options["font_size"],
pos=self.display_layout)
# print("{0} is {1}".format('node_labels',node_labels))
else:
networkx.draw_networkx(self.display_graph, arrows=show_options["arrows"], node_color=node_colors,
node_size=show_options["node_size"], font_size=show_options["font_size"],
pos=self.display_layout)
plt.axis('off')
plt.suptitle(self.options["graph_name"])
if show_options["file_name"] is None:
file_name = auto_name(specific_descriptor=show_options["specific_descriptor"],
general_descriptor=show_options["general_descriptor"],
directory=show_options["directory"], extension='png', padding=3)
else:
file_name = show_options["file_name"]
if show_options["save_plot"]:
# print file_name
if show_options["path"]:
plt.savefig(show_options["path"])
else:
plt.savefig(os.path.join(show_options["directory"], file_name))
else:
plt.show()
fig = plt.gcf()
return fig
class StringGraph(Graph):
"""String Graph is a graph relating different string forms"""
def __init__(self,**options):
"""Intializes the StringGraph Class by defining nodes and edges"""
defaults={"graph_name":"StringGraph",
"node_names":['String','StringList'],
"node_descriptions":["A plain string",
"A list of strings with no \\n, created with string.splitlines()"],
"current_node":'String',
"state":[1,0],
"data":"This is a test string\n it has to have multiple lines \n and many characters 34%6\n^",
"edge_2_to_1":edge_2_to_1,
"edge_1_to_2":edge_1_to_2
}
self.options={}
for key,value in defaults.items():
self.options[key]=value
for key,value in options.items():
self.options[key]=value
Graph.__init__(self,**self.options)
self.add_node("File","String",String_to_File,"String",File_to_String,node_description="Plain File")
self.add_node("CStringIo","String",String_to_CStringIo,"String",CStringIo_to_String,node_description="C File Like Object")
self.add_node("StringIo","String",String_to_StringIo,"String",StringIo_to_String,node_description="File Like Object")
self.add_edge(begin_node="StringList",end_node="File",edge_function=StringList_to_File)
# Changed from ColumnModeledGraph to TableGraph 12/14/2016 by AWS
class TableGraph(Graph):
"""Class that transforms column modeled data (table) from one format to another, use set_state to initialize to
your data.
#!python
defaults={"graph_name":"Table Graph",
"node_names":['DataFrame','AsciiDataTable'],
"node_descriptions":["Pandas Data Frame","AsciiDataTable"],
"current_node":'DataFrame',
"state":[1,0],
"data":pandas.DataFrame([[1,2,3],[3,4,5]],columns=["a","b","c"]),
"edge_2_to_1":AsciiDataTable_to_DataFrame,
"edge_1_to_2":DataFrame_to_AsciiDataTable}
"""
def __init__(self,**options):
defaults={"graph_name":"Table Graph",
"node_names":['DataFrame','AsciiDataTable'],
"node_descriptions":["Pandas Data Frame","AsciiDataTable"],
"current_node":'DataFrame',
"state":[1,0],
"data":pandas.DataFrame([[1,2,3],[3,4,5]],columns=["a","b","c"]),
"edge_2_to_1":AsciiDataTable_to_DataFrame,
"edge_1_to_2":DataFrame_to_AsciiDataTable}
self.options={}
for key,value in defaults.items():
self.options[key]=value
for key,value in options.items():
self.options[key]=value
Graph.__init__(self,**self.options)
self.add_node("HdfFile","DataFrame",DataFrame_to_HdfFile,
"DataFrame",HdfFile_to_DataFrame,
node_description="HDF File")
self.add_node("XmlDataTable","AsciiDataTable",AsciiDataTable_to_XmlDataTable,
"AsciiDataTable",XmlDataTable_to_AsciiDataTable,
node_description="XML Data Table")
# Need to add XML File and Html File using save and save_HTML()
self.add_node("ExcelFile","DataFrame",DataFrame_to_ExcelFile,
"DataFrame",ExcelFile_to_DataFrame,
node_description="Excel File")
self.add_node("OdsFile","ExcelFile",ExcelFile_to_OdsFile,
"ExcelFile",OdsFile_to_ExcelFile,"Open Office Spreadsheet")
self.add_node("HtmlString","DataFrame",DataFrame_to_HtmlString,
"DataFrame",HtmlString_to_DataFrame,
node_description="HTML String")
# Note a lot of the pandas reading and writing cause float64 round off errors
# applymap(lambda x: np.around(x,10) any all float fields will fix this
# also the column names move about in order
self.add_node("JsonFile","DataFrame",DataFrame_to_JsonFile,
"DataFrame",JsonFile_to_DataFrame,
node_description="JSON File")
self.add_node("JsonString","DataFrame",DataFrame_to_JsonString,
"DataFrame",JsonString_to_DataFrame,
node_description="JSON String")
self.add_node("CsvFile","DataFrame",DataFrame_to_CsvFile,
"DataFrame",CsvFile_to_DataFrame,
node_description="CSV File")
self.add_node("MatFile","AsciiDataTable",AsciiTable_to_MatFile,
"AsciiDataTable",MatFile_to_AsciiTable,
node_description="Matlab File")
self.add_node("XmlFile","XmlDataTable",XmlDataTable_to_XmlFile,
"XmlDataTable",XmlFile_to_XmlDataTable,
node_description="XML DataTable Saved As a File")
self.add_node("HtmlFile","HtmlString",HtmlString_to_HtmlFile,
"HtmlString",HtmlFile_to_HtmlString,
node_description="HTML File")
self.add_edge("DataFrame","HtmlFile",DataFrame_to_HtmlFile)
self.add_edge("JsonFile","XmlDataTable",JsonFile_to_XmlDataTable)
self.add_external_node("XsltResultString","XmlDataTable",XmlBase_to_XsltResultString,
external_node_description="XSLT Results String")
self.add_external_node("XsltResultFile","XmlDataTable",XmlBase_to_XsltResultFile,
external_node_description="XSLT Results File")
class ImageGraph(Graph):
"""A transformation graph for images node types are image formats and external nodes are
common image processing functions
#!python
defaults={"graph_name":"Image Graph",
"node_names":['Image','png'],
"node_descriptions":["PIL Image","png"],
"current_node":'Image',
"state":[1,0],
"data":PIL.Image.open(os.path.join(TESTS_DIRECTORY,'test.png')),
"edge_2_to_1":File_to_Image,
"edge_1_to_2":lambda x: Image_to_FileType(x,file_path="test",extension="png")}
"""
def __init__(self,**options):
defaults={"graph_name":"Image Graph",
"node_names":['Image','Png'],
"node_descriptions":["PIL Image","Png"],
"current_node":'Image',
"state":[1,0],
"data":PIL.Image.open(os.path.join(TESTS_DIRECTORY,'test.png')),
"edge_2_to_1":File_to_Image,
"edge_1_to_2":lambda x: Image_to_FileType(x,file_path="test",extension="png")}
self.options={}
for key,value in defaults.items():
self.options[key]=value
for key,value in options.items():
self.options[key]=value
Graph.__init__(self,**self.options)
self.add_node("Jpg","Image",lambda x: Image_to_FileType(x,file_path="test",extension="jpg"),
"Image",File_to_Image,node_description="Jpg File")
self.add_node("Tiff","Image",lambda x: Image_to_FileType(x,file_path="test",extension="tiff"),
"Image",File_to_Image,node_description="Tif File")
self.add_node("Gif","Image",lambda x: Image_to_FileType(x,file_path="test",extension="gif"),
"Image",File_to_Image,node_description="Gif File")
self.add_node("Bmp","Image",lambda x: Image_to_FileType(x,file_path="test",extension="bmp"),
"Image",File_to_Image,node_description="BMP File")
self.add_node("Base64","Png",PngFile_to_Base64,
"Png",Base64_to_PngFile,node_description="Base 64 PNG")
self.add_node("EmbeddedHtml","Base64",Base64Png_to_EmbeddedHtmlString,
"Base64",EmbeddedHtmlString_to_Base64Png,node_description="Embedded HTML of PNG")
self.add_node("Ndarray","Png",PngFile_to_Ndarray,
"Png",Ndarray_to_PngFile,node_description="Numpy Array")
self.add_node("MatplotlibFigure","Ndarray",Ndarray_to_MatplotlibFigure,
"Png",MatplotlibFigure_to_PngFile,node_description="MatplotlibFigure")
self.add_external_node("Thumbnail","Image",Image_to_ThumbnailFile,external_node_description="JPEG Thumbnail")
self.add_external_node("Matplotlib","Ndarray",Ndarray_to_Matplotlib,
external_node_description="Matplotlib Plot")
class MetadataGraph(Graph):
"""Metadata Graph is a graph representing the content of key,value metadata"""
def __init__(self,**options):
"""Intializes the metadata graph class"""
defaults={"graph_name":"Metadata Graph",
"node_names":['Dictionary','JsonString'],
"node_descriptions":["Python Dictionary","Json string"],
"current_node":'Dictionary',
"state":[1,0],
"data":{"a":"First","b":"Second"},
"edge_2_to_1":JsonString_to_Dictionary,
"edge_1_to_2":Dictionary_to_JsonString}
self.options={}
for key,value in defaults.items():
self.options[key]=value
for key,value in options.items():
self.options[key]=value
Graph.__init__(self,**self.options)
self.add_node("JsonFile","JsonString",JsonString_to_JsonFile,
"JsonString",JsonFile_to_JsonString,node_description="JSON File")
self.add_node("XmlString","Dictionary",Dictionary_to_XmlString,
"Dictionary",XmlString_to_Dictionary,node_description="XML string")
self.add_node("HtmlMetaString","Dictionary",Dictionary_to_HtmlMetaString,
"Dictionary",HtmlMetaString_to_Dictionary,node_description="HTML meta tags")
self.add_node("XmlTupleString","Dictionary",Dictionary_to_XmlTupleString,
"Dictionary",XmlTupleString_to_Dictionary,node_description="Tuple Line")
self.add_node("PickleFile","Dictionary",Dictionary_to_PickleFile,
"Dictionary",PickleFile_to_Dictionary,node_description="Pickled File")
self.add_node("ListList","Dictionary",Dictionary_to_ListList,
"Dictionary",ListList_to_Dictionary,node_description="List of lists")
self.add_node("HeaderList","Dictionary",Dictionary_to_HeaderList,
"Dictionary",HeaderList_to_Dictionary,node_description="Header List")
self.add_node("DataFrame","Dictionary",Dictionary_to_DataFrame,
"Dictionary",DataFrame_to_Dictionary,node_description="Pandas DataFrame")
self.add_node("AsciiDataTable","DataFrame",DataFrame_to_AsciiDataTable,
"DataFrame",AsciiDataTable_to_DataFrame,node_description="AsciiDataTable")
self.add_node("MatFile","AsciiDataTable",AsciiTable_to_MatFile,
"AsciiDataTable",MatFile_to_AsciiDataTableKeyValue,node_description="Matlab")
self.add_node("ExcelFile","DataFrame",DataFrame_to_ExcelFile,
"DataFrame",ExcelFile_to_DataFrame,node_description="excel")
self.add_node("HdfFile","DataFrame",DataFrame_to_HdfFile,
"DataFrame",HdfFile_to_DataFrame,node_description="hdf file")
self.add_node("CsvFile","DataFrame",DataFrame_to_CsvFile,
"DataFrame",CsvFile_to_DataFrame,node_description="CSV File")
self.add_node("HtmlFile","DataFrame",DataFrame_to_HtmlFile,
"DataFrame",HtmlFile_to_DataFrame,node_description="HTML Table File")
self.add_node("HtmlTableString","HtmlFile",HtmlFile_to_HtmlString,
"HtmlFile",HtmlString_to_HtmlFile,node_description="HTML Table String")
class TwoPortParameterGraph(Graph):
"""TwoPortParamterGraph is a content graph for two-port parameters,
it transforms between S,T,Y,Z,ABCD and H parameters and matrix versions.
#!python
defaults={"graph_name":"Two Port Parameter Graph",
"node_names":["SFrequencyList",'SFrequencyMatrixList'],
"node_descriptions":["S Parameters","S Parameters in a Matrix"],
"current_node":'SFrequencyList',
"state":[1,0],
"data":[[1.0,.9,.436,.436,.9]],
"edge_2_to_1":FrequencyMatrixList_to_FrequencyList,
"edge_1_to_2":FrequencyList_to_FrequencyMatrixList,
"frequency_units":"GHz",
"Z01":50,
"Z02":50 }
"""
def __init__(self,**options):
defaults={"graph_name":"Two Port Parameter Graph",
"node_names":["SFrequencyList",'SFrequencyMatrixList'],
"node_descriptions":["S Parameters","S Parameters in a Matrix"],
"current_node":'SFrequencyList',
"state":[1,0],
"data":[[1.0,.9,.436,.436,.9]],
"edge_2_to_1":FrequencyMatrixList_to_FrequencyList,
"edge_1_to_2":FrequencyList_to_FrequencyMatrixList,
"frequency_units":"GHz",
"Z01":50,
"Z02":50 }
graph_options={}
for key,value in defaults.items():
graph_options[key]=value
for key,value in options.items():
graph_options[key]=value
Graph.__init__(self,**graph_options)
self.add_node("TFrequencyMatrixList",
"SFrequencyMatrixList",SFrequencyMatrixList_to_TFrequencyMatrixList,
"SFrequencyMatrixList",TFrequencyMatrixList_to_SFrequencyMatrixList,
"T Parameters in a Matrix")
self.add_node("TFrequencyList",
"TFrequencyMatrixList",FrequencyMatrixList_to_FrequencyList,
"TFrequencyMatrixList",FrequencyList_to_FrequencyMatrixList,
"T Parameters")
self.add_node("ZFrequencyList",
"SFrequencyList",SFrequencyList_to_ZFrequencyList,
"TFrequencyList",ZFrequencyList_to_TFrequencyList,
"Z Parameters")
self.add_node("ZFrequencyMatrixList",
"ZFrequencyList",FrequencyList_to_FrequencyMatrixList,
"ZFrequencyList",FrequencyMatrixList_to_FrequencyList,
"Z Parameters in a matrix")
self.add_node("ABCDFrequencyList",
"ZFrequencyList",ZFrequencyList_to_ABCDFrequencyList,
"ZFrequencyList",ABCDFrequencyList_to_ZFrequencyList,
"ABCD Parameters")
self.add_node("ABCDFrequencyMatrixList",
"ABCDFrequencyList",FrequencyList_to_FrequencyMatrixList,
"ABCDFrequencyList",FrequencyMatrixList_to_FrequencyList,
"ABCD Parameters in a matrix")
self.add_node("HFrequencyList",
"ABCDFrequencyList",ABCDFrequencyList_to_HFrequencyList,
"ZFrequencyList",HFrequencyList_to_ZFrequencyList,
"h Parameters")
self.add_node("HFrequencyMatrixList",
"HFrequencyList",FrequencyList_to_FrequencyMatrixList,
"HFrequencyList",FrequencyMatrixList_to_FrequencyList,
"H Parameters in a matrix")
self.add_node("YFrequencyList",
"ABCDFrequencyList",ABCDFrequencyList_to_YFrequencyList,
"HFrequencyList",YFrequencyList_to_HFrequencyList,
"Y Parameters")
self.add_node("YFrequencyMatrixList",
"YFrequencyList",FrequencyList_to_FrequencyMatrixList,
"YFrequencyList",FrequencyMatrixList_to_FrequencyList,
"Y Parameters in a matrix")
self.add_edge(begin_node="ZFrequencyMatrixList",
end_node="YFrequencyMatrixList",
edge_function=ZFrequencyMatrixList_to_YFrequencyMatrixList)
self.add_edge(begin_node="SFrequencyMatrixList",
end_node="ZFrequencyMatrixList",
edge_function=SFrequencyMatrixList_to_ZFrequencyMatrixList)
self.add_edge(begin_node="ZFrequencyMatrixList",
end_node="TFrequencyMatrixList",
edge_function=ZFrequencyMatrixList_to_TFrequencyMatrixList)
self.add_edge(begin_node="ABCDFrequencyList",
end_node="SFrequencyList",
edge_function=ABCDFrequencyList_to_SFrequencyList)
class DataTableGraph(Graph):
""" Class that transforms a row modelled header and metadata to several different data types
#!python
defaults={"graph_name":"Data Table Graph",
"node_names":['DataFrameDictionary','AsciiDataTable'],
"node_descriptions":["Pandas Data Frame Dictionary","AsciiDataTable"],
"current_node":'DataFrameDictionary',
"state":[1,0],
"data":AsciiDataTable_to_DataFrameDictionary(TwoPortRawModel(os.path.join(TESTS_DIRECTORY,'TestFileTwoPortRaw.txt'))),
"edge_2_to_1":AsciiDataTable_to_DataFrameDictionary,
"edge_1_to_2":DataFrameDictionary_to_AsciiDataTable
}
"""
def __init__(self,**options):
defaults={"graph_name":"Data Table Graph",
"node_names":['DataFrameDictionary','AsciiDataTable'],
"node_descriptions":["Pandas Data Frame Dictionary","AsciiDataTable"],
"current_node":'DataFrameDictionary',
"state":[1,0],
"data":AsciiDataTable_to_DataFrameDictionary(TwoPortRawModel(os.path.join(TESTS_DIRECTORY,'TestFileTwoPortRaw.txt'))),
"edge_2_to_1":AsciiDataTable_to_DataFrameDictionary,
"edge_1_to_2":DataFrameDictionary_to_AsciiDataTable
}
graph_options={}
for key,value in defaults.items():
graph_options[key]=value
for key,value in options.items():
graph_options[key]=value
Graph.__init__(self, **graph_options)
self.add_node("ExcelFile", "DataFrameDictionary", DataFrameDictionary_to_ExcelFile,
"DataFrameDictionary", ExcelFile_to_DataFrameDictionary,
node_description="Excel Workbook")
self.add_node("HdfFile", "DataFrameDictionary", DataFrameDictionary_to_HdfFile,
"DataFrameDictionary", HdfFile_to_DataFrameDictionary, node_description="HD5 File")
self.add_node("CsvFile", "AsciiDataTable", AsciiDataTable_to_CsvFile,
"AsciiDataTable", File_to_AsciiDataTable, node_description="CSV File")
self.add_node("HpFile", "AsciiDataTable", AsciiDataTable_to_HpFile,
"AsciiDataTable", File_to_AsciiDataTable, node_description="hp format File")
self.add_external_node(external_node_name="XMLDataTable", jump_into_node_begin="AsciiDataTable",
jump_into_node_function=AsciiDataTable_to_XmlDataTable,
external_node_description="XMLDataTable")
#-----------------------------------------------------------------------------
# Module Scripts
#TODO: Add test_Graph script currently lives in jupyter-notebooks
#-----------------------------------------------------------------------------
# Module Runner
if __name__ == '__main__':
pass
| 49.816969
| 136
| 0.603793
|
import re
import datetime
import sys
import os
sys.path.append(os.path.join(os.path.dirname( __file__ ), '..','..'))
try:
from Code.Utils.Alias import *
METHOD_ALIASES=1
except:
print("The module pyMez.Code.Utils.Alias was not found")
METHOD_ALIASES=0
pass
try:
from Code.DataHandlers.GeneralModels import *
except:
print("The module pyMez.Code.DataHandlers.GeneralModels was not found,"
"please put it on the python path")
raise ImportError
try:
from Code.DataHandlers.TouchstoneModels import *
except:
print("The module pyMez.Code.DataHandlers.TouchstoneModels was not found,"
"please put it on the python path")
raise ImportError
try:
from Code.DataHandlers.Translations import *
except:
print("The module pyMez.Code.DataHandlers.Translations was not found or had an error,"
"please put it on the python path or resolve the error")
raise ImportError
try:
import numpy as np
except:
print("The module numpy was not found,"
"please put it on the python path")
raise ImportError
try:
import networkx
except:
print("The module networkx was not found,"
"please put it on the python path")
raise ImportError
def edge_1_to_2(in_string):
return in_string.splitlines()
def edge_2_to_1(string_list):
return string_list_collapse(string_list)
def visit_all_nodes(graph):
nodes=graph.node_names
for node in nodes:
graph.move_to_node(node)
def visit_and_print_all_nodes(graph):
nodes=graph.node_names
for node in nodes:
graph.move_to_node(node)
print((graph.data))
def to_node_name(node_data):
class_name = node_data.__class__.__name__
node_name = class_name
if re.match('list', class_name):
node_name = "List"
try:
element_class_name = node_data[0].__class__.__name__
node_name = element_class_name + node_name
except:
pass
elif re.match('dict', class_name):
node_name = "Dictionary"
try:
element_class_name = list(node_data.values())[0].__class__.__name__
node_name = element_class_name + node_name
except:
pass
elif re.match('str', class_name):
node_name = "String"
if os.path.isfile(node_data):
node_name = "File"
extension = ""
try:
if re.search("\.", node_data):
extension = node_data.split(".")[-1]
node_name = extension.title() + node_name
except:
pass
elif fnmatch.fnmatch(node_data, "*.*"):
node_name = "File"
try:
if re.search("\.", node_data):
extension = node_data.split(".")[-1]
node_name = extension.title() + node_name
except:
pass
node_name = node_name.replace("str", "String").replace("dict", "Dictionary")
return (node_name)
def TableGraph_to_Links(table_graph, **options):
defaults = {"base_name": None,
"nodes": ['XmlFile', 'CsvFile', 'ExcelFile', 'OdsFile', 'MatFile', 'HtmlFile', 'JsonFile'],
"extensions": ['xml', 'csv', 'xlsx', 'ods', 'mat', 'html', 'json'],
"mime_types": ['application/xml', 'text/plain',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'application/vnd.oasis.opendocument.spreadsheet',
'application/x-matlab-data', 'text/html', 'application/json']}
conversion_options = {}
for key, value in defaults.items():
conversion_options[key] = value
for key, value in options.items():
conversion_options[key] = value
if conversion_options["base_name"] is None:
base_name = 'test.txt'
else:
base_name = conversion_options["base_name"]
nodes = conversion_options["nodes"]
extensions = conversion_options["extensions"]
mime_types = conversion_options["mime_types"]
out_links = ""
for node_index, node in enumerate(nodes):
table_graph.move_to_node(node)
file_path = table_graph.data
in_file = open(file_path, 'rb')
content_string = in_file.read()
link = String_to_DownloadLink(content_string,
suggested_name=change_extension(base_name, extensions[node_index]),
mime_type=mime_types[node_index],
text=extensions[node_index])
if node_index == len(nodes) - 1:
out_links = out_links + link
else:
out_links = out_links + link + " | "
return out_links
def remove_circular_paths(path):
edge_pattern=re.compile("edge_(?P<begin_node>\w+)_(?P<end_node>\w+)_(?P<iterator>\w+)")
past_locations=[]
for index,edge in enumerate(path):
match=re.match(edge_pattern,edge)
begin_node=match.groupdict()["begin_node"]
end_node=match.groupdict()["end_node"]
past_locations.append(begin_node)
new_path=[]
node_index=0
between_list=[False for item in past_locations]
while(node_index<len(past_locations)):
node=past_locations[node_index]
old_path=new_path
new_path=[]
number_of_visits=past_locations.count(node)
if number_of_visits>1:
equality_list=[x==node for x in past_locations]
print(("{0} is {1}".format("equality_list",equality_list)))
between=False
visit_number=0
for index,equality in enumerate(equality_list):
if equality:
visit_number+=1
if visit_number==1 or visit_number==number_of_visits:
between=not between
between_list[index]=between or between_list[index]
else:
between_list[index]=between or between_list[index]
else:
between_list[index]=between or between_list[index]
for index,item in enumerate(between_list):
if not item:
new_path.append(path[index])
node_index+=1
if new_path in [[]]:
new_path=path
return new_path
class Graph(object):
def __init__(self, **options):
defaults = {"graph_name": "Graph",
"node_names": ['n1', 'n2'],
"node_descriptions": ["A plain string",
"A list of strings with no \\n, created with string.splitlines()"],
"current_node": 'n1',
"state": [1, 0],
"data": "This is a test string\n it has to have multiple lines \n and many characters 34%6\n^",
"edge_2_to_1": edge_2_to_1,
"edge_1_to_2": edge_1_to_2
}
self.options = {}
for key, value in defaults.items():
self.options[key] = value
for key, value in options.items():
self.options[key] = value
self.elements = ['graph_name', 'node_names', 'node_descriptions', 'current_node', 'state', 'data']
for element in self.elements:
self.__dict__[element] = self.options[element]
self.edges = []
self.edge_matrices = []
self.state_matrix = np.matrix(self.state).T
self.display_graph = networkx.DiGraph()
self.add_edge(self.node_names[0], self.node_names[1], self.options["edge_1_to_2"])
self.add_edge(self.node_names[1], self.node_names[0], self.options["edge_2_to_1"])
self.jumps = []
self.external_node_names = []
self.external_node_descriptions = []
self.display_layout = networkx.spring_layout(self.display_graph)
def get_description_dictionary(self):
dictionary = {node_name: self.node_descriptions[index] for index, node_name in enumerate(self.node_names)}
return dictionary
def set_state(self, node_name, node_data):
try:
current_node_state_position = self.node_names.index(node_name)
self.current_node = node_name
self.data = node_data
self.state = [0 for i in range(len(self.node_names))]
self.state[current_node_state_position] = 1
self.state_matrix = np.matrix(self.state).T
except:
print(("Could not set the state of graph: {0}".format(self.graph_name)))
raise
def add_edge(self, begin_node=None, end_node=None, edge_function=None):
edge_match = re.compile("edge_{0}_{1}".format(begin_node, end_node))
keys = list(self.__dict__.keys())
iterator = 0
for key in keys:
if re.match(edge_match, key):
iterator += 1
edge_name = "edge_{0}_{1}_{2:0>3d}".format(begin_node, end_node, iterator)
self.__dict__[edge_name] = edge_function
self.edges.append(edge_name)
edge_matrix = np.zeros((len(self.state), len(self.state)))
begin_position = self.node_names.index(begin_node)
end_position = self.node_names.index(end_node)
edge_matrix[end_position][begin_position] = 1
edge_matrix = np.matrix(edge_matrix)
self.edge_matrices.append(edge_matrix)
self.display_graph.add_edge(begin_node, end_node)
self.display_layout = networkx.spring_layout(self.display_graph)
def add_jump(self, begin_node=None, end_node=None, jump_function=None):
jump_match = re.compile("jump_{0}_{1}".format(begin_node, end_node))
keys = list(self.__dict__.keys())
iterator = 0
for key in keys:
if re.match(jump_match, key):
iterator += 1
jump_name = "jump_{0}_{1}_{2:0>3d}".format(begin_node, end_node, iterator)
self.__dict__[jump_name] = jump_function
self.jumps.append(jump_name)
self.display_graph.add_edge(begin_node, end_node)
self.display_layout = networkx.spring_layout(self.display_graph)
def move_to(self, path, **options):
defaults = {"debug": False, "verbose": False}
move_options = {}
for key, value in defaults.items():
move_options[key] = value
for key, value in options.items():
move_options[key] = value
if move_options["debug"]:
print(path)
for index, edge in enumerate(path):
edge_pattern = 'edge_(?P<begin_node>\w+)_(?P<end_node>\w+)_(?P<iterator>\w+)'
match = re.match(edge_pattern, edge)
begin_node = match.groupdict()['begin_node']
end_node = match.groupdict()['end_node']
if move_options["verbose"]:
print(("moving {0} -> {1}".format(begin_node, end_node)))
self.data = self.__dict__[edge](self.data)
self.current_node = match.groupdict()['end_node']
self.state = [0 for i in range(len(self.node_names))]
position = self.node_names.index(self.current_node)
self.state[position] = 1
self.state_matrix = np.matrix(self.state).T
def virtual_move_to(self, path):
temp_state = self.state
temp_data = self.data
temp_current_node = self.current_node
temp_node_names = self.node_names
for index, edge in enumerate(path):
edge_pattern = 'edge_(?P<begin_node>\w+)_(?P<end_node>\w+)_(?P<iterator>\w+)'
match = re.match(edge_pattern, edge)
begin_node = match.groupdict()['begin_node']
end_node = match.groupdict()['end_node']
temp_data = self.__dict__[edge](temp_data)
temp_current_node = match.groupdict()['end_node']
temp_state = [0 for i in range(len(temp_node_names))]
position = temp_node_names.index(temp_current_node)
temp_state[position] = 1
def __str__(self):
return str(self.data)
def add_node(self, node_name, edge_into_node_begin, edge_into_node_function, edge_out_node_end,
edge_out_node_function, node_description=None):
self.node_names.append(node_name)
self.state.append(0)
self.state_matrix = np.matrix(self.state).T
for index, matrix in enumerate(self.edge_matrices):
pad_row = np.zeros((1, len(matrix)))
new_matrix = np.concatenate((matrix, pad_row), axis=0)
pad_column = np.zeros((1, len(self.node_names)))
new_matrix = np.concatenate((new_matrix, pad_column.T), axis=1)
self.edge_matrices[index] = new_matrix
self.add_edge(begin_node=node_name, end_node=edge_out_node_end, edge_function=edge_out_node_function)
self.add_edge(begin_node=edge_into_node_begin, end_node=node_name, edge_function=edge_into_node_function)
if node_description:
self.node_descriptions.append(node_description)
self.display_graph.add_node(node_name)
self.display_graph.add_edge(node_name, edge_out_node_end)
self.display_graph.add_edge(edge_into_node_begin, node_name)
self.display_layout = networkx.spring_layout(self.display_graph)
def add_external_node(self, external_node_name, jump_into_node_begin,
jump_into_node_function, external_node_description=None):
self.external_node_names.append(external_node_name)
self.add_jump(begin_node=jump_into_node_begin, end_node=external_node_name,
jump_function=jump_into_node_function)
if external_node_description:
self.external_node_descriptions.append(external_node_description)
self.display_graph.add_node(external_node_name)
self.display_graph.add_edge(jump_into_node_begin, external_node_name)
self.display_layout = networkx.spring_layout(self.display_graph)
def jump_to_external_node(self, external_node_name, **options):
end_node = external_node_name
jump_pattern = 'jump_(?P<begin_node>\w+)_{0}_(?P<iterator>\w+)'.format(end_node)
for jump in self.jumps[:]:
jump_match = re.match(jump_pattern, jump, re.IGNORECASE)
if jump_match:
jump_to_use = jump
begin_node = jump_match.groupdict()["begin_node"]
self.move_to_node(begin_node)
return self.__dict__[jump_to_use](self.data, **options)
def path_length(self, path, num_repeats=10):
begin_time = datetime.datetime.now()
for i in range(num_repeats):
self.virtual_move_to(path)
end_time = datetime.datetime.now()
delta_t = end_time - begin_time
path_length = delta_t.total_seconds() / float(num_repeats)
if path_length == 0.0:
print("Warning the path length is less than 1 microsecond,"
"make sure num_repeats is high enough to measure it.")
return path_length
def is_path_valid(self, path):
null_state = [0 for i in range(len(self.node_names))]
null_state_matrix = np.matrix(null_state).T
new_state = np.matrix(self.state).T
for index, edge in enumerate(path):
edge_position = self.edges.index(edge)
move_matrix = self.edge_matrices[edge_position]
new_state = move_matrix * new_state
if new_state.any() == null_state_matrix.any():
return False
return True
def get_entering_nodes(self, node):
enter_edge_pattern = re.compile('edge_(?P<begin_node>\w+)_{0}_(?P<iterator>\w+)'.format(node))
enter_nodes = []
for index, edge in enumerate(self.edges):
enter_match = re.match(enter_edge_pattern, edge)
if enter_match:
enter_node = enter_match.groupdict()['begin_node']
enter_nodes.append(enter_node)
return enter_nodes
def get_entering_edges(self, node):
enter_edge_pattern = re.compile('edge_(?P<begin_node>\w+)_{0}_(?P<iterator>\w+)'.format(node))
enter_edges = []
for index, edge in enumerate(self.edges):
if re.match(enter_edge_pattern, edge):
enter_edges.append(edge)
return enter_edges
def get_exiting_edges(self, node):
exit_edge_pattern = re.compile('edge_{0}_(?P<end_node>\w+)_(?P<iterator>\w+)'.format(node))
exit_edges = []
for index, edge in enumerate(self.edges):
if re.match(exit_edge_pattern, edge):
exit_edges.append(edge)
return exit_edges
def get_exiting_nodes(self, node):
exit_edge_pattern = re.compile('edge_{0}_(?P<end_node>\w+)_(?P<iterator>\w+)'.format(node))
exit_nodes = []
for index, edge in enumerate(self.edges):
exit_match = re.match(exit_edge_pattern, edge)
if exit_match:
exit_node = exit_match.groupdict()['end_node']
exit_nodes.append(exit_node)
return exit_nodes
def get_path(self, first_node, last_node, **options):
defaults = {"debug": False, "method": "BreathFirst"}
self.get_path_options = {}
for key, value in defaults.items():
self.get_path_options[key] = value
for key, value in options.items():
self.get_path_options[key] = value
unvisited_nodes = self.node_names[:]
unvisited_nodes.remove(first_node)
visited_nodes = [first_node]
node_history = []
edge_history = []
path_queue = []
possible_paths = []
queue = []
current_edge = []
queue.append(first_node)
path = {first_node: []}
while queue:
current_node = queue.pop(0)
if path_queue != []:
current_edge = path_queue.pop(0)
edge_history.append(current_edge)
node_history.append(current_node)
if self.get_path_options["debug"]:
print(("current_node is {0}".format(current_node)))
print(("current_edge is {0}".format(current_edge)))
if current_node == last_node:
if self.get_path_options["debug"]:
print(("Node path was found to be {0}".format(node_path)))
print(("path was found to be {0}".format(edge_path)))
print(("{0} is {1}".format("path", path)))
return path[last_node][::-1]
adjacent_nodes = self.get_exiting_nodes(current_node)
adjacent_paths = self.get_exiting_edges(current_node)
if self.get_path_options["debug"]:
print(("{0} are {1}".format("adjacent_nodes", adjacent_nodes)))
print(("{0} are {1}".format("adjacent_paths", adjacent_paths)))
current_history = edge_history
for node_index, node in enumerate(adjacent_nodes):
if node not in visited_nodes:
queue.append(node)
path_queue.append(adjacent_paths[node_index])
visited_nodes.append(node)
path[node] = [adjacent_paths[node_index]] + path[current_node]
path[node]
if self.get_path_options["debug"]:
print(("{0} is {1}".format("path_queue", path_queue)))
def move_to_node(self, node):
path = self.get_path(self.current_node, node)
self.move_to(path)
def check_closed_path(self):
temp_data = self.data
path = self.get_path(self.current_node, self.current_node)
if self.is_path_valid(path):
pass
else:
print("Path is not valid, graph definition is broken")
raise
out = temp_data == self.data
out_list = [self.current_node, path, out]
print(("The assertion that the data remains unchanged,\n"
"for node {0} following path {1} is {2}".format(*out_list)))
return out
def is_graph_isomorphic(self):
out = True
for node in self.node_names:
self.move_to_node(node)
if not self.check_closed_path:
out = False
return out
def show(self, **options):
defaults = {"descriptions": False, "edge_descriptions": False, "save_plot": False,
"path": None, "active_node": True, "directory": None,
"specific_descriptor": self.graph_name.replace(" ", "_"),
"general_descriptor": "plot", "file_name": None,
"arrows": True, "node_size": 1000, "font_size": 10, "fix_layout": True}
show_options = {}
for key, value in defaults.items():
show_options[key] = value
for key, value in options.items():
show_options[key] = value
if show_options["directory"] is None:
show_options["directory"] = os.getcwd()
if show_options["active_node"]:
node_colors = []
for node in self.display_graph.nodes():
if node == self.current_node:
node_colors.append('b')
else:
if node in self.node_names:
node_colors.append('r')
elif node in self.external_node_names:
node_colors.append('g')
else:
node_colors = ['r' for node in self.node_names] + ['g' for node in self.node_names]
if show_options["descriptions"]:
node_labels = {node: self.node_descriptions[index] for index,
node in enumerate(self.node_names)}
if self.external_node_names:
for index, node in enumerate(self.external_node_names):
node_labels[node] = self.external_node_descriptions[index]
networkx.draw_networkx(self.display_graph, arrows=show_options["arrows"],
labels=node_labels, node_color=node_colors,
node_size=show_options["node_size"], font_size=show_options["font_size"],
pos=self.display_layout)
else:
networkx.draw_networkx(self.display_graph, arrows=show_options["arrows"], node_color=node_colors,
node_size=show_options["node_size"], font_size=show_options["font_size"],
pos=self.display_layout)
plt.axis('off')
plt.suptitle(self.options["graph_name"])
if show_options["file_name"] is None:
file_name = auto_name(specific_descriptor=show_options["specific_descriptor"],
general_descriptor=show_options["general_descriptor"],
directory=show_options["directory"], extension='png', padding=3)
else:
file_name = show_options["file_name"]
if show_options["save_plot"]:
if show_options["path"]:
plt.savefig(show_options["path"])
else:
plt.savefig(os.path.join(show_options["directory"], file_name))
else:
plt.show()
fig = plt.gcf()
return fig
class StringGraph(Graph):
def __init__(self,**options):
defaults={"graph_name":"StringGraph",
"node_names":['String','StringList'],
"node_descriptions":["A plain string",
"A list of strings with no \\n, created with string.splitlines()"],
"current_node":'String',
"state":[1,0],
"data":"This is a test string\n it has to have multiple lines \n and many characters 34%6\n^",
"edge_2_to_1":edge_2_to_1,
"edge_1_to_2":edge_1_to_2
}
self.options={}
for key,value in defaults.items():
self.options[key]=value
for key,value in options.items():
self.options[key]=value
Graph.__init__(self,**self.options)
self.add_node("File","String",String_to_File,"String",File_to_String,node_description="Plain File")
self.add_node("CStringIo","String",String_to_CStringIo,"String",CStringIo_to_String,node_description="C File Like Object")
self.add_node("StringIo","String",String_to_StringIo,"String",StringIo_to_String,node_description="File Like Object")
self.add_edge(begin_node="StringList",end_node="File",edge_function=StringList_to_File)
class TableGraph(Graph):
def __init__(self,**options):
defaults={"graph_name":"Table Graph",
"node_names":['DataFrame','AsciiDataTable'],
"node_descriptions":["Pandas Data Frame","AsciiDataTable"],
"current_node":'DataFrame',
"state":[1,0],
"data":pandas.DataFrame([[1,2,3],[3,4,5]],columns=["a","b","c"]),
"edge_2_to_1":AsciiDataTable_to_DataFrame,
"edge_1_to_2":DataFrame_to_AsciiDataTable}
self.options={}
for key,value in defaults.items():
self.options[key]=value
for key,value in options.items():
self.options[key]=value
Graph.__init__(self,**self.options)
self.add_node("HdfFile","DataFrame",DataFrame_to_HdfFile,
"DataFrame",HdfFile_to_DataFrame,
node_description="HDF File")
self.add_node("XmlDataTable","AsciiDataTable",AsciiDataTable_to_XmlDataTable,
"AsciiDataTable",XmlDataTable_to_AsciiDataTable,
node_description="XML Data Table")
self.add_node("ExcelFile","DataFrame",DataFrame_to_ExcelFile,
"DataFrame",ExcelFile_to_DataFrame,
node_description="Excel File")
self.add_node("OdsFile","ExcelFile",ExcelFile_to_OdsFile,
"ExcelFile",OdsFile_to_ExcelFile,"Open Office Spreadsheet")
self.add_node("HtmlString","DataFrame",DataFrame_to_HtmlString,
"DataFrame",HtmlString_to_DataFrame,
node_description="HTML String")
self.add_node("JsonFile","DataFrame",DataFrame_to_JsonFile,
"DataFrame",JsonFile_to_DataFrame,
node_description="JSON File")
self.add_node("JsonString","DataFrame",DataFrame_to_JsonString,
"DataFrame",JsonString_to_DataFrame,
node_description="JSON String")
self.add_node("CsvFile","DataFrame",DataFrame_to_CsvFile,
"DataFrame",CsvFile_to_DataFrame,
node_description="CSV File")
self.add_node("MatFile","AsciiDataTable",AsciiTable_to_MatFile,
"AsciiDataTable",MatFile_to_AsciiTable,
node_description="Matlab File")
self.add_node("XmlFile","XmlDataTable",XmlDataTable_to_XmlFile,
"XmlDataTable",XmlFile_to_XmlDataTable,
node_description="XML DataTable Saved As a File")
self.add_node("HtmlFile","HtmlString",HtmlString_to_HtmlFile,
"HtmlString",HtmlFile_to_HtmlString,
node_description="HTML File")
self.add_edge("DataFrame","HtmlFile",DataFrame_to_HtmlFile)
self.add_edge("JsonFile","XmlDataTable",JsonFile_to_XmlDataTable)
self.add_external_node("XsltResultString","XmlDataTable",XmlBase_to_XsltResultString,
external_node_description="XSLT Results String")
self.add_external_node("XsltResultFile","XmlDataTable",XmlBase_to_XsltResultFile,
external_node_description="XSLT Results File")
class ImageGraph(Graph):
def __init__(self,**options):
defaults={"graph_name":"Image Graph",
"node_names":['Image','Png'],
"node_descriptions":["PIL Image","Png"],
"current_node":'Image',
"state":[1,0],
"data":PIL.Image.open(os.path.join(TESTS_DIRECTORY,'test.png')),
"edge_2_to_1":File_to_Image,
"edge_1_to_2":lambda x: Image_to_FileType(x,file_path="test",extension="png")}
self.options={}
for key,value in defaults.items():
self.options[key]=value
for key,value in options.items():
self.options[key]=value
Graph.__init__(self,**self.options)
self.add_node("Jpg","Image",lambda x: Image_to_FileType(x,file_path="test",extension="jpg"),
"Image",File_to_Image,node_description="Jpg File")
self.add_node("Tiff","Image",lambda x: Image_to_FileType(x,file_path="test",extension="tiff"),
"Image",File_to_Image,node_description="Tif File")
self.add_node("Gif","Image",lambda x: Image_to_FileType(x,file_path="test",extension="gif"),
"Image",File_to_Image,node_description="Gif File")
self.add_node("Bmp","Image",lambda x: Image_to_FileType(x,file_path="test",extension="bmp"),
"Image",File_to_Image,node_description="BMP File")
self.add_node("Base64","Png",PngFile_to_Base64,
"Png",Base64_to_PngFile,node_description="Base 64 PNG")
self.add_node("EmbeddedHtml","Base64",Base64Png_to_EmbeddedHtmlString,
"Base64",EmbeddedHtmlString_to_Base64Png,node_description="Embedded HTML of PNG")
self.add_node("Ndarray","Png",PngFile_to_Ndarray,
"Png",Ndarray_to_PngFile,node_description="Numpy Array")
self.add_node("MatplotlibFigure","Ndarray",Ndarray_to_MatplotlibFigure,
"Png",MatplotlibFigure_to_PngFile,node_description="MatplotlibFigure")
self.add_external_node("Thumbnail","Image",Image_to_ThumbnailFile,external_node_description="JPEG Thumbnail")
self.add_external_node("Matplotlib","Ndarray",Ndarray_to_Matplotlib,
external_node_description="Matplotlib Plot")
class MetadataGraph(Graph):
def __init__(self,**options):
defaults={"graph_name":"Metadata Graph",
"node_names":['Dictionary','JsonString'],
"node_descriptions":["Python Dictionary","Json string"],
"current_node":'Dictionary',
"state":[1,0],
"data":{"a":"First","b":"Second"},
"edge_2_to_1":JsonString_to_Dictionary,
"edge_1_to_2":Dictionary_to_JsonString}
self.options={}
for key,value in defaults.items():
self.options[key]=value
for key,value in options.items():
self.options[key]=value
Graph.__init__(self,**self.options)
self.add_node("JsonFile","JsonString",JsonString_to_JsonFile,
"JsonString",JsonFile_to_JsonString,node_description="JSON File")
self.add_node("XmlString","Dictionary",Dictionary_to_XmlString,
"Dictionary",XmlString_to_Dictionary,node_description="XML string")
self.add_node("HtmlMetaString","Dictionary",Dictionary_to_HtmlMetaString,
"Dictionary",HtmlMetaString_to_Dictionary,node_description="HTML meta tags")
self.add_node("XmlTupleString","Dictionary",Dictionary_to_XmlTupleString,
"Dictionary",XmlTupleString_to_Dictionary,node_description="Tuple Line")
self.add_node("PickleFile","Dictionary",Dictionary_to_PickleFile,
"Dictionary",PickleFile_to_Dictionary,node_description="Pickled File")
self.add_node("ListList","Dictionary",Dictionary_to_ListList,
"Dictionary",ListList_to_Dictionary,node_description="List of lists")
self.add_node("HeaderList","Dictionary",Dictionary_to_HeaderList,
"Dictionary",HeaderList_to_Dictionary,node_description="Header List")
self.add_node("DataFrame","Dictionary",Dictionary_to_DataFrame,
"Dictionary",DataFrame_to_Dictionary,node_description="Pandas DataFrame")
self.add_node("AsciiDataTable","DataFrame",DataFrame_to_AsciiDataTable,
"DataFrame",AsciiDataTable_to_DataFrame,node_description="AsciiDataTable")
self.add_node("MatFile","AsciiDataTable",AsciiTable_to_MatFile,
"AsciiDataTable",MatFile_to_AsciiDataTableKeyValue,node_description="Matlab")
self.add_node("ExcelFile","DataFrame",DataFrame_to_ExcelFile,
"DataFrame",ExcelFile_to_DataFrame,node_description="excel")
self.add_node("HdfFile","DataFrame",DataFrame_to_HdfFile,
"DataFrame",HdfFile_to_DataFrame,node_description="hdf file")
self.add_node("CsvFile","DataFrame",DataFrame_to_CsvFile,
"DataFrame",CsvFile_to_DataFrame,node_description="CSV File")
self.add_node("HtmlFile","DataFrame",DataFrame_to_HtmlFile,
"DataFrame",HtmlFile_to_DataFrame,node_description="HTML Table File")
self.add_node("HtmlTableString","HtmlFile",HtmlFile_to_HtmlString,
"HtmlFile",HtmlString_to_HtmlFile,node_description="HTML Table String")
class TwoPortParameterGraph(Graph):
def __init__(self,**options):
defaults={"graph_name":"Two Port Parameter Graph",
"node_names":["SFrequencyList",'SFrequencyMatrixList'],
"node_descriptions":["S Parameters","S Parameters in a Matrix"],
"current_node":'SFrequencyList',
"state":[1,0],
"data":[[1.0,.9,.436,.436,.9]],
"edge_2_to_1":FrequencyMatrixList_to_FrequencyList,
"edge_1_to_2":FrequencyList_to_FrequencyMatrixList,
"frequency_units":"GHz",
"Z01":50,
"Z02":50 }
graph_options={}
for key,value in defaults.items():
graph_options[key]=value
for key,value in options.items():
graph_options[key]=value
Graph.__init__(self,**graph_options)
self.add_node("TFrequencyMatrixList",
"SFrequencyMatrixList",SFrequencyMatrixList_to_TFrequencyMatrixList,
"SFrequencyMatrixList",TFrequencyMatrixList_to_SFrequencyMatrixList,
"T Parameters in a Matrix")
self.add_node("TFrequencyList",
"TFrequencyMatrixList",FrequencyMatrixList_to_FrequencyList,
"TFrequencyMatrixList",FrequencyList_to_FrequencyMatrixList,
"T Parameters")
self.add_node("ZFrequencyList",
"SFrequencyList",SFrequencyList_to_ZFrequencyList,
"TFrequencyList",ZFrequencyList_to_TFrequencyList,
"Z Parameters")
self.add_node("ZFrequencyMatrixList",
"ZFrequencyList",FrequencyList_to_FrequencyMatrixList,
"ZFrequencyList",FrequencyMatrixList_to_FrequencyList,
"Z Parameters in a matrix")
self.add_node("ABCDFrequencyList",
"ZFrequencyList",ZFrequencyList_to_ABCDFrequencyList,
"ZFrequencyList",ABCDFrequencyList_to_ZFrequencyList,
"ABCD Parameters")
self.add_node("ABCDFrequencyMatrixList",
"ABCDFrequencyList",FrequencyList_to_FrequencyMatrixList,
"ABCDFrequencyList",FrequencyMatrixList_to_FrequencyList,
"ABCD Parameters in a matrix")
self.add_node("HFrequencyList",
"ABCDFrequencyList",ABCDFrequencyList_to_HFrequencyList,
"ZFrequencyList",HFrequencyList_to_ZFrequencyList,
"h Parameters")
self.add_node("HFrequencyMatrixList",
"HFrequencyList",FrequencyList_to_FrequencyMatrixList,
"HFrequencyList",FrequencyMatrixList_to_FrequencyList,
"H Parameters in a matrix")
self.add_node("YFrequencyList",
"ABCDFrequencyList",ABCDFrequencyList_to_YFrequencyList,
"HFrequencyList",YFrequencyList_to_HFrequencyList,
"Y Parameters")
self.add_node("YFrequencyMatrixList",
"YFrequencyList",FrequencyList_to_FrequencyMatrixList,
"YFrequencyList",FrequencyMatrixList_to_FrequencyList,
"Y Parameters in a matrix")
self.add_edge(begin_node="ZFrequencyMatrixList",
end_node="YFrequencyMatrixList",
edge_function=ZFrequencyMatrixList_to_YFrequencyMatrixList)
self.add_edge(begin_node="SFrequencyMatrixList",
end_node="ZFrequencyMatrixList",
edge_function=SFrequencyMatrixList_to_ZFrequencyMatrixList)
self.add_edge(begin_node="ZFrequencyMatrixList",
end_node="TFrequencyMatrixList",
edge_function=ZFrequencyMatrixList_to_TFrequencyMatrixList)
self.add_edge(begin_node="ABCDFrequencyList",
end_node="SFrequencyList",
edge_function=ABCDFrequencyList_to_SFrequencyList)
class DataTableGraph(Graph):
def __init__(self,**options):
defaults={"graph_name":"Data Table Graph",
"node_names":['DataFrameDictionary','AsciiDataTable'],
"node_descriptions":["Pandas Data Frame Dictionary","AsciiDataTable"],
"current_node":'DataFrameDictionary',
"state":[1,0],
"data":AsciiDataTable_to_DataFrameDictionary(TwoPortRawModel(os.path.join(TESTS_DIRECTORY,'TestFileTwoPortRaw.txt'))),
"edge_2_to_1":AsciiDataTable_to_DataFrameDictionary,
"edge_1_to_2":DataFrameDictionary_to_AsciiDataTable
}
graph_options={}
for key,value in defaults.items():
graph_options[key]=value
for key,value in options.items():
graph_options[key]=value
Graph.__init__(self, **graph_options)
self.add_node("ExcelFile", "DataFrameDictionary", DataFrameDictionary_to_ExcelFile,
"DataFrameDictionary", ExcelFile_to_DataFrameDictionary,
node_description="Excel Workbook")
self.add_node("HdfFile", "DataFrameDictionary", DataFrameDictionary_to_HdfFile,
"DataFrameDictionary", HdfFile_to_DataFrameDictionary, node_description="HD5 File")
self.add_node("CsvFile", "AsciiDataTable", AsciiDataTable_to_CsvFile,
"AsciiDataTable", File_to_AsciiDataTable, node_description="CSV File")
self.add_node("HpFile", "AsciiDataTable", AsciiDataTable_to_HpFile,
"AsciiDataTable", File_to_AsciiDataTable, node_description="hp format File")
self.add_external_node(external_node_name="XMLDataTable", jump_into_node_begin="AsciiDataTable",
jump_into_node_function=AsciiDataTable_to_XmlDataTable,
external_node_description="XMLDataTable")
if __name__ == '__main__':
pass
| true
| true
|
790e9d08248243d5cb911b08285fe9dfe822d1f5
| 164
|
py
|
Python
|
ABC180/ABC180_B.py
|
consommee/AtCoder
|
9beccf6a1202ca48491b4fcf748aa3dba0f12fb3
|
[
"MIT"
] | null | null | null |
ABC180/ABC180_B.py
|
consommee/AtCoder
|
9beccf6a1202ca48491b4fcf748aa3dba0f12fb3
|
[
"MIT"
] | null | null | null |
ABC180/ABC180_B.py
|
consommee/AtCoder
|
9beccf6a1202ca48491b4fcf748aa3dba0f12fb3
|
[
"MIT"
] | null | null | null |
import math
n=int(input())
c=list(map(int, input().split()))
print(sum([abs(i) for i in c]))
print(math.sqrt(sum([i*i for i in c])))
print(max([abs(i) for i in c]))
| 27.333333
| 39
| 0.628049
|
import math
n=int(input())
c=list(map(int, input().split()))
print(sum([abs(i) for i in c]))
print(math.sqrt(sum([i*i for i in c])))
print(max([abs(i) for i in c]))
| true
| true
|
790e9d26b2713e796f3d0ebb3b27759102676379
| 676
|
py
|
Python
|
emenuapi/menus/admin.py
|
dzbrozek/emenu-api
|
254b19a766b85caf51513541d3cdff06b97893f0
|
[
"MIT"
] | null | null | null |
emenuapi/menus/admin.py
|
dzbrozek/emenu-api
|
254b19a766b85caf51513541d3cdff06b97893f0
|
[
"MIT"
] | 5
|
2021-11-03T11:21:08.000Z
|
2021-11-04T09:22:00.000Z
|
emenuapi/menus/admin.py
|
dzbrozek/emenu-api
|
254b19a766b85caf51513541d3cdff06b97893f0
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from menus.models import Dish, Menu
@admin.register(Menu)
class MenuAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'description', 'created', 'updated')
search_fields = ('id', 'name', 'description')
list_filter = ('created', 'updated')
raw_id_fields = ('dishes',)
@admin.register(Dish)
class DishAdmin(admin.ModelAdmin):
list_display = (
'id',
'name',
'description',
'price',
'time_to_prepare',
'is_vegetarian',
'created',
'updated',
)
search_fields = ('id', 'name', 'description')
list_filter = ('is_vegetarian', 'created', 'updated')
| 25.037037
| 70
| 0.606509
|
from django.contrib import admin
from menus.models import Dish, Menu
@admin.register(Menu)
class MenuAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'description', 'created', 'updated')
search_fields = ('id', 'name', 'description')
list_filter = ('created', 'updated')
raw_id_fields = ('dishes',)
@admin.register(Dish)
class DishAdmin(admin.ModelAdmin):
list_display = (
'id',
'name',
'description',
'price',
'time_to_prepare',
'is_vegetarian',
'created',
'updated',
)
search_fields = ('id', 'name', 'description')
list_filter = ('is_vegetarian', 'created', 'updated')
| true
| true
|
790e9d785e220fab436da36b2f2ad86449371f35
| 826
|
py
|
Python
|
tests/svm/conftest.py
|
mdietrichstein/skpredict
|
f15416b61f5fc2693b4c85c690d664fbbb008f8b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/svm/conftest.py
|
mdietrichstein/skpredict
|
f15416b61f5fc2693b4c85c690d664fbbb008f8b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/svm/conftest.py
|
mdietrichstein/skpredict
|
f15416b61f5fc2693b4c85c690d664fbbb008f8b
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
import pandas as pd
from sklearn.model_selection import train_test_split
TEST_SIZE = 0.33
RANDOM_STATE = 42
@pytest.fixture(scope="module")
def binary_dataset():
df = pd.read_csv("./resources/heart.csv")
features = df.iloc[0:, :-1]
labels = df.iloc[0:, -1].values.ravel()
X_train, X_test, y_train, y_test = train_test_split(
features, labels, test_size=TEST_SIZE, random_state=RANDOM_STATE
)
return X_train, X_test, y_train
@pytest.fixture(scope="module")
def multiclass_dataset():
df = pd.read_csv("./resources/glass.csv")
features = df.iloc[0:, :-1]
labels = df.iloc[0:, -1].values.ravel()
X_train, X_test, y_train, _ = train_test_split(
features, labels, test_size=TEST_SIZE, random_state=RANDOM_STATE
)
return X_train, X_test, y_train
| 25.030303
| 72
| 0.691283
|
import pytest
import pandas as pd
from sklearn.model_selection import train_test_split
TEST_SIZE = 0.33
RANDOM_STATE = 42
@pytest.fixture(scope="module")
def binary_dataset():
df = pd.read_csv("./resources/heart.csv")
features = df.iloc[0:, :-1]
labels = df.iloc[0:, -1].values.ravel()
X_train, X_test, y_train, y_test = train_test_split(
features, labels, test_size=TEST_SIZE, random_state=RANDOM_STATE
)
return X_train, X_test, y_train
@pytest.fixture(scope="module")
def multiclass_dataset():
df = pd.read_csv("./resources/glass.csv")
features = df.iloc[0:, :-1]
labels = df.iloc[0:, -1].values.ravel()
X_train, X_test, y_train, _ = train_test_split(
features, labels, test_size=TEST_SIZE, random_state=RANDOM_STATE
)
return X_train, X_test, y_train
| true
| true
|
790e9dde65c2a90c47b9735b4bffaba61bbd081d
| 2,508
|
py
|
Python
|
nas/random.py
|
francotheengineer/autokeras
|
d873aa41226b958004c3ff1e5694912b9fad10e1
|
[
"MIT"
] | 1
|
2019-01-14T11:19:06.000Z
|
2019-01-14T11:19:06.000Z
|
nas/random.py
|
dspshin/autokeras
|
eac91bad8a90f78a68933992cc1ff4b7df4ee30f
|
[
"MIT"
] | 1
|
2018-12-09T16:46:30.000Z
|
2018-12-09T16:46:30.000Z
|
nas/random.py
|
dspshin/autokeras
|
eac91bad8a90f78a68933992cc1ff4b7df4ee30f
|
[
"MIT"
] | 2
|
2018-11-12T19:43:31.000Z
|
2018-11-26T08:14:32.000Z
|
from random import randrange
from autokeras.bayesian import SearchTree, contain
from autokeras.net_transformer import transform
from autokeras.search import Searcher
class RandomSearcher(Searcher):
""" Class to search for neural architectures using Random search strategy.
Attributes:
search_tree: The network morphism search tree
"""
def __init__(self, n_output_node, input_shape, path, metric, loss, generators, verbose,
trainer_args=None,
default_model_len=None,
default_model_width=None):
super(RandomSearcher, self).__init__(n_output_node, input_shape,
path, metric, loss, generators,
verbose, trainer_args, default_model_len,
default_model_width)
self.search_tree = SearchTree()
def generate(self, multiprocessing_queue):
"""Generate the next neural architecture.
Args:
multiprocessing_queue: the Queue for multiprocessing return value.
Returns:
list of 2-element tuples: generated_graph and other_info,
for random searcher the length of list is 1.
generated_graph: An instance of Graph.
other_info: Anything to be saved in the training queue together with the architecture.
"""
random_index = randrange(len(self.history))
model_id = self.history[random_index]['model_id']
graph = self.load_model_by_id(model_id)
new_father_id = None
generated_graph = None
for temp_graph in transform(graph):
if not contain(self.descriptors, temp_graph.extract_descriptor()):
new_father_id = model_id
generated_graph = temp_graph
break
if new_father_id is None:
new_father_id = 0
generated_graph = self.generators[0](self.n_classes, self.input_shape). \
generate(self.default_model_len, self.default_model_width)
return [(generated_graph, new_father_id)]
def update(self, other_info, model_id, *args):
""" Update the controller with evaluation result of a neural architecture.
Args:
other_info: Anything. In our case it is the father ID in the search tree.
model_id: An integer.
"""
father_id = other_info
self.search_tree.add_child(father_id, model_id)
| 40.451613
| 98
| 0.631978
|
from random import randrange
from autokeras.bayesian import SearchTree, contain
from autokeras.net_transformer import transform
from autokeras.search import Searcher
class RandomSearcher(Searcher):
def __init__(self, n_output_node, input_shape, path, metric, loss, generators, verbose,
trainer_args=None,
default_model_len=None,
default_model_width=None):
super(RandomSearcher, self).__init__(n_output_node, input_shape,
path, metric, loss, generators,
verbose, trainer_args, default_model_len,
default_model_width)
self.search_tree = SearchTree()
def generate(self, multiprocessing_queue):
random_index = randrange(len(self.history))
model_id = self.history[random_index]['model_id']
graph = self.load_model_by_id(model_id)
new_father_id = None
generated_graph = None
for temp_graph in transform(graph):
if not contain(self.descriptors, temp_graph.extract_descriptor()):
new_father_id = model_id
generated_graph = temp_graph
break
if new_father_id is None:
new_father_id = 0
generated_graph = self.generators[0](self.n_classes, self.input_shape). \
generate(self.default_model_len, self.default_model_width)
return [(generated_graph, new_father_id)]
def update(self, other_info, model_id, *args):
father_id = other_info
self.search_tree.add_child(father_id, model_id)
| true
| true
|
790e9df6891454e24bcccf46d1e898f0a5c15df8
| 4,576
|
py
|
Python
|
venv/Lib/site-packages/PySide2/examples/network/fortuneserver.py
|
Farhan-Malik/advance-hand-gesture
|
0ebe21ddd7c8c2eb14746678be57b33d38c47205
|
[
"MIT"
] | 41
|
2021-06-19T13:57:18.000Z
|
2021-12-02T17:08:53.000Z
|
venv/Lib/site-packages/PySide2/examples/network/fortuneserver.py
|
Farhan-Malik/advance-hand-gesture
|
0ebe21ddd7c8c2eb14746678be57b33d38c47205
|
[
"MIT"
] | 20
|
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
venvWIN/Lib/site-packages/PySide2/examples/network/fortuneserver.py
|
NeroNekro/PortableController
|
a8bbfc1b6c8cb2c919e48eb0104e42f436059b18
|
[
"BSD-3-Clause"
] | 4
|
2021-07-02T03:09:51.000Z
|
2021-11-25T13:00:10.000Z
|
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing/
##
## This file is part of the Qt for Python examples of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of The Qt Company Ltd nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
## $QT_END_LICENSE$
##
#############################################################################
"""PySide2 port of the network/fortuneserver example from Qt v5.x"""
import random
from PySide2 import QtCore, QtWidgets, QtNetwork
class Server(QtWidgets.QDialog):
def __init__(self, parent=None):
super(Server, self).__init__(parent)
statusLabel = QtWidgets.QLabel()
quitButton = QtWidgets.QPushButton("Quit")
quitButton.setAutoDefault(False)
self.tcpServer = QtNetwork.QTcpServer(self)
if not self.tcpServer.listen():
QtWidgets.QMessageBox.critical(self, "Fortune Server",
"Unable to start the server: %s." % self.tcpServer.errorString())
self.close()
return
statusLabel.setText("The server is running on port %d.\nRun the "
"Fortune Client example now." % self.tcpServer.serverPort())
self.fortunes = (
"You've been leading a dog's life. Stay off the furniture.",
"You've got to think about tomorrow.",
"You will be surprised by a loud noise.",
"You will feel hungry again in another hour.",
"You might have mail.",
"You cannot kill time without injuring eternity.",
"Computers are not intelligent. They only think they are.")
quitButton.clicked.connect(self.close)
self.tcpServer.newConnection.connect(self.sendFortune)
buttonLayout = QtWidgets.QHBoxLayout()
buttonLayout.addStretch(1)
buttonLayout.addWidget(quitButton)
buttonLayout.addStretch(1)
mainLayout = QtWidgets.QVBoxLayout()
mainLayout.addWidget(statusLabel)
mainLayout.addLayout(buttonLayout)
self.setLayout(mainLayout)
self.setWindowTitle("Fortune Server")
def sendFortune(self):
block = QtCore.QByteArray()
out = QtCore.QDataStream(block, QtCore.QIODevice.WriteOnly)
out.setVersion(QtCore.QDataStream.Qt_4_0)
out.writeUInt16(0)
fortune = self.fortunes[random.randint(0, len(self.fortunes) - 1)]
out.writeString(fortune)
out.device().seek(0)
out.writeUInt16(block.size() - 2)
clientConnection = self.tcpServer.nextPendingConnection()
clientConnection.disconnected.connect(clientConnection.deleteLater)
clientConnection.write(block)
clientConnection.disconnectFromHost()
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
server = Server()
random.seed(None)
sys.exit(server.exec_())
| 38.779661
| 85
| 0.664773
| true
| true
|
|
790ea13f67ed1a18d97bb62e80020f3ad78486f0
| 67
|
py
|
Python
|
kiberdrom_core/util/__init__.py
|
IlyaDanilenko/kiberdrom_core
|
c3050cdb7cbc22adc06f454bdafc081051511f04
|
[
"MIT"
] | null | null | null |
kiberdrom_core/util/__init__.py
|
IlyaDanilenko/kiberdrom_core
|
c3050cdb7cbc22adc06f454bdafc081051511f04
|
[
"MIT"
] | null | null | null |
kiberdrom_core/util/__init__.py
|
IlyaDanilenko/kiberdrom_core
|
c3050cdb7cbc22adc06f454bdafc081051511f04
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .core import *
| 16.75
| 23
| 0.597015
|
from .core import *
| true
| true
|
790ea1f994a4f821a2a9cb2614f6ae899533df2a
| 238
|
py
|
Python
|
app/meetings/api/urls.py
|
kaizumaki/owaranai-meter
|
aa4703f84116c9d6bac558f301de2389c9175fdf
|
[
"MIT"
] | 5
|
2021-02-04T14:16:12.000Z
|
2021-02-19T12:36:50.000Z
|
app/meetings/api/urls.py
|
kaizumaki/owaranai-meter
|
aa4703f84116c9d6bac558f301de2389c9175fdf
|
[
"MIT"
] | 27
|
2021-02-11T04:49:39.000Z
|
2021-08-22T07:34:39.000Z
|
app/meetings/api/urls.py
|
kaizumaki/owaranai-meter
|
aa4703f84116c9d6bac558f301de2389c9175fdf
|
[
"MIT"
] | 6
|
2021-02-04T14:16:28.000Z
|
2021-02-27T07:14:38.000Z
|
from django.urls import path
from meetings.api.views import (MeetingViewset)
from rest_framework import routers
router = routers.DefaultRouter(trailing_slash='/?')
router.register('meetings', MeetingViewset)
urlpatterns = router.urls
| 21.636364
| 51
| 0.806723
|
from django.urls import path
from meetings.api.views import (MeetingViewset)
from rest_framework import routers
router = routers.DefaultRouter(trailing_slash='/?')
router.register('meetings', MeetingViewset)
urlpatterns = router.urls
| true
| true
|
790ea2b9e02aa9a2a694c78097804ebfd6e268da
| 1,865
|
py
|
Python
|
fm/handlers/alarm/segment.py
|
sbworth/getnoc
|
a9a5647df31822062db3db7afe7ae1c005d166f7
|
[
"BSD-3-Clause"
] | null | null | null |
fm/handlers/alarm/segment.py
|
sbworth/getnoc
|
a9a5647df31822062db3db7afe7ae1c005d166f7
|
[
"BSD-3-Clause"
] | null | null | null |
fm/handlers/alarm/segment.py
|
sbworth/getnoc
|
a9a5647df31822062db3db7afe7ae1c005d166f7
|
[
"BSD-3-Clause"
] | null | null | null |
# ---------------------------------------------------------------------
# Segment handlers
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import logging
# NOC modules
from noc.sa.models.managedobject import ManagedObject
from noc.fm.models.activealarm import ActiveAlarm
logger = logging.getLogger(__name__)
def set_segment_redundancy(alarm):
"""
Set lost_redundancy to segment when redundant object is down
:param alarm:
:return:
"""
if alarm.root:
return # Already changed by root cause
mo = alarm.managed_object
seg = mo.segment
if seg.is_redundant and not seg.lost_redundancy:
u = mo.data.uplinks
if len(u) > 1:
logger.info("[%s] Redundancy lost for %s", alarm.id, seg.name)
seg.set_lost_redundancy(True)
def check_segment_redundancy(alarm):
"""
Reset lost_redundancy from segment when all redundant objects
are up
:param alarm:
:return:
"""
mo = alarm.managed_object
seg = mo.segment
if not seg.is_redundant or not seg.lost_redundancy:
return
u = mo.data.uplinks
if len(u) < 2:
return
seg_objects = list(seg.managed_objects.values_list("id", flat=True))
alarms = [
d["managed_object"]
for d in ActiveAlarm._get_collection().find(
{"managed_object": {"$in": seg_objects}}, {"_id": 0, "managed_object": 1}
)
if d["managed_object"] != mo.id
]
uplinks = ManagedObject.uplinks_for_objects(alarms)
if not any(x for x in uplinks.values() if len(x) > 1):
logger.info("[%s] Redundancy recovered for %s", alarm.id, seg.name)
seg.set_lost_redundancy(False)
| 30.57377
| 85
| 0.576408
|
import logging
from noc.sa.models.managedobject import ManagedObject
from noc.fm.models.activealarm import ActiveAlarm
logger = logging.getLogger(__name__)
def set_segment_redundancy(alarm):
if alarm.root:
return
mo = alarm.managed_object
seg = mo.segment
if seg.is_redundant and not seg.lost_redundancy:
u = mo.data.uplinks
if len(u) > 1:
logger.info("[%s] Redundancy lost for %s", alarm.id, seg.name)
seg.set_lost_redundancy(True)
def check_segment_redundancy(alarm):
mo = alarm.managed_object
seg = mo.segment
if not seg.is_redundant or not seg.lost_redundancy:
return
u = mo.data.uplinks
if len(u) < 2:
return
seg_objects = list(seg.managed_objects.values_list("id", flat=True))
alarms = [
d["managed_object"]
for d in ActiveAlarm._get_collection().find(
{"managed_object": {"$in": seg_objects}}, {"_id": 0, "managed_object": 1}
)
if d["managed_object"] != mo.id
]
uplinks = ManagedObject.uplinks_for_objects(alarms)
if not any(x for x in uplinks.values() if len(x) > 1):
logger.info("[%s] Redundancy recovered for %s", alarm.id, seg.name)
seg.set_lost_redundancy(False)
| true
| true
|
790ea2f0afdb0edf9afd302ebc5965f9803c5d23
| 431
|
py
|
Python
|
pygin/example_games/Balance/animations/particle_fade_animation.py
|
CarlosMatheus/Engine
|
1467a919ad4489d0d4cb041b5f02aa67c6be6664
|
[
"MIT"
] | 22
|
2018-12-29T16:31:19.000Z
|
2022-03-07T13:12:40.000Z
|
pygin/example_games/Balance/animations/particle_fade_animation.py
|
CarlosMatheus/Engine
|
1467a919ad4489d0d4cb041b5f02aa67c6be6664
|
[
"MIT"
] | 3
|
2018-08-14T14:45:40.000Z
|
2020-01-22T08:03:27.000Z
|
pygin/example_games/Balance/animations/particle_fade_animation.py
|
CarlosMatheus/Engine
|
1467a919ad4489d0d4cb041b5f02aa67c6be6664
|
[
"MIT"
] | 6
|
2018-05-29T20:02:02.000Z
|
2021-11-13T19:35:23.000Z
|
from pygin.components.animation import Animation
from pygin.key_frame import KeyFrame
class ParticleFadeAnimation(Animation):
def __init__(self, game_obj, duration):
key_frame_list = list()
key_frame_list.append(KeyFrame(0.0, alpha=255, interpolation="in_cubic"))
key_frame_list.append(KeyFrame(duration, alpha=0))
super().__init__(game_obj, key_frame_list, should_loop=False, unscaled=True)
| 35.916667
| 84
| 0.74942
|
from pygin.components.animation import Animation
from pygin.key_frame import KeyFrame
class ParticleFadeAnimation(Animation):
def __init__(self, game_obj, duration):
key_frame_list = list()
key_frame_list.append(KeyFrame(0.0, alpha=255, interpolation="in_cubic"))
key_frame_list.append(KeyFrame(duration, alpha=0))
super().__init__(game_obj, key_frame_list, should_loop=False, unscaled=True)
| true
| true
|
790ea45467f095c2a73dc3570997da10933b765e
| 5,584
|
py
|
Python
|
XIO/plugins/minicbf_interpreter.py
|
jsburg/xdsme
|
3fc9ed185ab78e1a42306edf24e681981eacd221
|
[
"BSD-3-Clause"
] | null | null | null |
XIO/plugins/minicbf_interpreter.py
|
jsburg/xdsme
|
3fc9ed185ab78e1a42306edf24e681981eacd221
|
[
"BSD-3-Clause"
] | null | null | null |
XIO/plugins/minicbf_interpreter.py
|
jsburg/xdsme
|
3fc9ed185ab78e1a42306edf24e681981eacd221
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
""" XIO plugin for the minicbf format of images (DECTRIS-PILATUS).
"""
__version__ = "0.2.1"
__author__ = "Pierre Legrand (pierre.legrand@synchrotron-soleil.fr)"
__date__ = "23-09-2012"
__copyright__ = "Copyright (c) 2009-2012 Pierre Legrand"
__license__ = "New BSD, http://www.opensource.org/licenses/bsd-license.php"
import time
HEADER_KEYS = ["Detector:", "Pixel_size", "Silicon", "Exposure_time",
"Exposure_period", "Tau", "Count_cutoff", "Threshold_setting",
"N_excluded_pixels","Excluded_pixels:", "Flat_field:", "Trim_directory:",
"Wavelength", "Energy_range", "Detector_distance", "Detector_Voffset",
"Beam_xy","Flux","Filter_transmission","Start_angle", "Angle_increment",
"Detector_2theta", "Polarization", "Alpha", "Kappa", "Phi", "Chi",
"Oscillation_axis", "N_oscillations"]
def date_time(timestr):
"from str return timestr + msec"
t_a, t_b = timestr.split(".")
return time.strptime(t_a, "%Y/%b/%d %H:%M:%S"), float("0."+t_b)
def date_seconds(timestr):
"from str return seconds"
t_a, msec = date_time(timestr)
return time.mktime(t_a) + msec
def get_edge_resolution(pixel_x, width, distance, wavelength):
"Calculate EdgeResolution"
from math import sin, atan
if abs(DISTANCE(distance)) > 0.0:
rad = 0.5 * float(FLOAT2(pixel_x)) * int(width)
return FLOAT1(wavelength)/(2*sin(0.5*atan(rad/DISTANCE(distance))))
else:
return 0.
FLOAT1 = lambda x: float(x.split()[0])
FLOAT2 = lambda x: float(x.split()[0])*1e3
def DISTANCE(inp):
args = inp.split()
try:
if args[1] == "m": return float(args[0])*1e3
except:
return float(args[0])
BEAMX = lambda x, y: float(x[x.find("(")+1:x.find(")")-1].split(",")[0])\
*FLOAT2(y)
BEAMY = lambda x, y: float(x[x.find("(")+1:x.find(")")-1].split(",")[1])\
*FLOAT2(y)
class Interpreter:
"Dummy class, container for standard Dict and Function."
HTD = {
# The adsc Header Translator Dictionary.
# Potential problems:
# - There are multiple SIZE1, SIZE2 instances.
# = The orientation of SIZE1 and SIZE2 is unknown
# Not a problem as long as SIZE1 = SIZE2..
'ExposureTime':(['Exposure_time'], FLOAT1),
'BeamX':(['Beam_xy', 'Pixel_size'], BEAMX),
'BeamY':(['Beam_xy', 'Pixel_size'], BEAMY),
'Distance':(['Detector_distance'], DISTANCE),
'Wavelength':(['Wavelength'], FLOAT1),
'PixelX':(['Pixel_size'], FLOAT2),
'PixelY':(['Pixel_size'], FLOAT2),
'Width':(['Binary-Size-Fastest-Dimension'], int),
'Height':(['Binary-Size-Second-Dimension'], int),
#'Message':(['MESSAGE'], lambda x: x.split(';')),
'PhiStart':(['Start_angle'], FLOAT1),
'PhiEnd':(['Start_angle', 'Angle_increment'], \
lambda x, y: FLOAT1(x)+FLOAT1(y)),
'PhiWidth':(['Angle_increment'], FLOAT1),
'EdgeResolution':(['Pixel_size','Binary-Size-Second-Dimension','Detector_distance','Wavelength'], \
get_edge_resolution),
# Added keys from Graeme's convention.
'TwoTheta':(['Detector_2theta'], FLOAT1), # No example yet...
'SerialNumber':(['Detector:'], str),
'HeaderSize':(['HEADER_SIZE'], int),
'OscAxis':(['Oscillation_axis'], lambda x: x.split(",")[0].lower().strip()),
'DateStr':(['DATE'], str),
'DateSeconds':(['DATE'], date_seconds),
}
SpecialRules = {
# No special rules for now
}
Identifiers = {
# Based on Serial Number. Contains (Synchrotron,BLname,DetectorType)
#413:('ESRF','ID14EH2','ADSC Q4'),
#420:('ESRF','ID14EH4','ADSC Q4R'),
}
def __init__(self):
self.raw_head_dict = None
def getRawHeadDict(self, raw_head):
"Intepret the ascii structure of the minicbf image header."
i_1 = 28+raw_head.find("_array_data.header_contents")
i_2 = raw_head.find("_array_data.data", i_1)
i_3 = raw_head.find("--CIF-BINARY-FORMAT-SECTION--", i_2)+29
i_4 = i_3+500
lis = [line[2:].strip().split(" ", 1) \
for line in raw_head[i_1:i_2].splitlines() \
if line and line[0]=="#"]
lis2 = [line[2:].strip().split(": ", 1) \
for line in raw_head[i_3:i_4].splitlines() \
if line and line[0:2]=="X-"]
# Filling the raw_header_dict with some default values,
# in case they are missing in the header.
self.raw_head_dict = {'Detector_2theta': "0.", 'MESSAGE': ''}
for val in lis:
if (val[0] in HEADER_KEYS):
if len(val) == 2:
self.raw_head_dict[val[0]] = val[1]
else:
self.raw_head_dict[val[0]] = None
self.raw_head_dict.update(dict([ val for val in lis2 \
if "Binary-" in val[0]]))
# Add some default values
self.raw_head_dict.update({'HEADER_SIZE': i_3})
self.raw_head_dict.update({'DATE': " ".join(lis[1])})
#self.raw_head_dict.update({'MESSAGE': '', 'TWO_THETA': '0',
# 'Beam_xy':"(1330.30, 1314.90)",
# 'Detector_distance': "0.4 m",
# 'Wavelength':"0.980 A",
# 'Angle_increment':"0.2 deg",
# 'Start_angle': "0. deg",
# 'Detector_2theta': "0. deg"})
return self.raw_head_dict
| 40.172662
| 103
| 0.566082
|
__version__ = "0.2.1"
__author__ = "Pierre Legrand (pierre.legrand@synchrotron-soleil.fr)"
__date__ = "23-09-2012"
__copyright__ = "Copyright (c) 2009-2012 Pierre Legrand"
__license__ = "New BSD, http://www.opensource.org/licenses/bsd-license.php"
import time
HEADER_KEYS = ["Detector:", "Pixel_size", "Silicon", "Exposure_time",
"Exposure_period", "Tau", "Count_cutoff", "Threshold_setting",
"N_excluded_pixels","Excluded_pixels:", "Flat_field:", "Trim_directory:",
"Wavelength", "Energy_range", "Detector_distance", "Detector_Voffset",
"Beam_xy","Flux","Filter_transmission","Start_angle", "Angle_increment",
"Detector_2theta", "Polarization", "Alpha", "Kappa", "Phi", "Chi",
"Oscillation_axis", "N_oscillations"]
def date_time(timestr):
t_a, t_b = timestr.split(".")
return time.strptime(t_a, "%Y/%b/%d %H:%M:%S"), float("0."+t_b)
def date_seconds(timestr):
t_a, msec = date_time(timestr)
return time.mktime(t_a) + msec
def get_edge_resolution(pixel_x, width, distance, wavelength):
from math import sin, atan
if abs(DISTANCE(distance)) > 0.0:
rad = 0.5 * float(FLOAT2(pixel_x)) * int(width)
return FLOAT1(wavelength)/(2*sin(0.5*atan(rad/DISTANCE(distance))))
else:
return 0.
FLOAT1 = lambda x: float(x.split()[0])
FLOAT2 = lambda x: float(x.split()[0])*1e3
def DISTANCE(inp):
args = inp.split()
try:
if args[1] == "m": return float(args[0])*1e3
except:
return float(args[0])
BEAMX = lambda x, y: float(x[x.find("(")+1:x.find(")")-1].split(",")[0])\
*FLOAT2(y)
BEAMY = lambda x, y: float(x[x.find("(")+1:x.find(")")-1].split(",")[1])\
*FLOAT2(y)
class Interpreter:
HTD = {
'ExposureTime':(['Exposure_time'], FLOAT1),
'BeamX':(['Beam_xy', 'Pixel_size'], BEAMX),
'BeamY':(['Beam_xy', 'Pixel_size'], BEAMY),
'Distance':(['Detector_distance'], DISTANCE),
'Wavelength':(['Wavelength'], FLOAT1),
'PixelX':(['Pixel_size'], FLOAT2),
'PixelY':(['Pixel_size'], FLOAT2),
'Width':(['Binary-Size-Fastest-Dimension'], int),
'Height':(['Binary-Size-Second-Dimension'], int),
'PhiStart':(['Start_angle'], FLOAT1),
'PhiEnd':(['Start_angle', 'Angle_increment'], \
lambda x, y: FLOAT1(x)+FLOAT1(y)),
'PhiWidth':(['Angle_increment'], FLOAT1),
'EdgeResolution':(['Pixel_size','Binary-Size-Second-Dimension','Detector_distance','Wavelength'], \
get_edge_resolution),
'TwoTheta':(['Detector_2theta'], FLOAT1), # No example yet...
'SerialNumber':(['Detector:'], str),
'HeaderSize':(['HEADER_SIZE'], int),
'OscAxis':(['Oscillation_axis'], lambda x: x.split(",")[0].lower().strip()),
'DateStr':(['DATE'], str),
'DateSeconds':(['DATE'], date_seconds),
}
SpecialRules = {
# No special rules for now
}
Identifiers = {
# Based on Serial Number. Contains (Synchrotron,BLname,DetectorType)
#413:('ESRF','ID14EH2','ADSC Q4'),
#420:('ESRF','ID14EH4','ADSC Q4R'),
}
def __init__(self):
self.raw_head_dict = None
def getRawHeadDict(self, raw_head):
i_1 = 28+raw_head.find("_array_data.header_contents")
i_2 = raw_head.find("_array_data.data", i_1)
i_3 = raw_head.find("--CIF-BINARY-FORMAT-SECTION--", i_2)+29
i_4 = i_3+500
lis = [line[2:].strip().split(" ", 1) \
for line in raw_head[i_1:i_2].splitlines() \
if line and line[0]=="#"]
lis2 = [line[2:].strip().split(": ", 1) \
for line in raw_head[i_3:i_4].splitlines() \
if line and line[0:2]=="X-"]
# Filling the raw_header_dict with some default values,
# in case they are missing in the header.
self.raw_head_dict = {'Detector_2theta': "0.", 'MESSAGE': ''}
for val in lis:
if (val[0] in HEADER_KEYS):
if len(val) == 2:
self.raw_head_dict[val[0]] = val[1]
else:
self.raw_head_dict[val[0]] = None
self.raw_head_dict.update(dict([ val for val in lis2 \
if "Binary-" in val[0]]))
# Add some default values
self.raw_head_dict.update({'HEADER_SIZE': i_3})
self.raw_head_dict.update({'DATE': " ".join(lis[1])})
#self.raw_head_dict.update({'MESSAGE': '', 'TWO_THETA': '0',
# 'Beam_xy':"(1330.30, 1314.90)",
# 'Detector_distance': "0.4 m",
# 'Wavelength':"0.980 A",
# 'Angle_increment':"0.2 deg",
# 'Start_angle': "0. deg",
# 'Detector_2theta': "0. deg"})
return self.raw_head_dict
| true
| true
|
790ea6506d8614ffad99579c37d058bcd8d9722b
| 317
|
py
|
Python
|
.history/forms_20200723155707.py
|
rkustas/taskmanager
|
3218b277a235c4e8d30b1d548ba28be3ab3f628f
|
[
"MIT"
] | null | null | null |
.history/forms_20200723155707.py
|
rkustas/taskmanager
|
3218b277a235c4e8d30b1d548ba28be3ab3f628f
|
[
"MIT"
] | null | null | null |
.history/forms_20200723155707.py
|
rkustas/taskmanager
|
3218b277a235c4e8d30b1d548ba28be3ab3f628f
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import StringField,SubmitField
from wtforms.validators import DataRequired
class AddTaskForm(FlaskForm):
title = StringField('Title', validators=[DataRequired()])
submit = SubmitField('Submit')
class DeleteTaskForm(FlaskForm):
submit = SubmitField('Delete')
| 28.818182
| 61
| 0.782334
|
from flask_wtf import FlaskForm
from wtforms import StringField,SubmitField
from wtforms.validators import DataRequired
class AddTaskForm(FlaskForm):
title = StringField('Title', validators=[DataRequired()])
submit = SubmitField('Submit')
class DeleteTaskForm(FlaskForm):
submit = SubmitField('Delete')
| true
| true
|
790ea6bb9863f0bd94d14860ef19c6de8ed43f80
| 11,246
|
py
|
Python
|
python/lib/socket.py
|
cschutijser/scion
|
054cef53b31a577ed224a090d6a4fd3883fd520b
|
[
"Apache-2.0"
] | null | null | null |
python/lib/socket.py
|
cschutijser/scion
|
054cef53b31a577ed224a090d6a4fd3883fd520b
|
[
"Apache-2.0"
] | 1
|
2020-03-20T01:28:56.000Z
|
2020-03-20T01:28:56.000Z
|
python/lib/socket.py
|
cschutijser/scion
|
054cef53b31a577ed224a090d6a4fd3883fd520b
|
[
"Apache-2.0"
] | 2
|
2020-03-14T16:03:27.000Z
|
2020-03-18T08:13:19.000Z
|
# Copyright 2015 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`socket` --- Low-level socket library
==========================================
"""
# Stdlib
import logging
import os
import selectors
import struct
from abc import abstractmethod
from errno import EHOSTUNREACH, ENETUNREACH
from socket import (
AF_INET,
AF_INET6,
AF_UNIX,
MSG_DONTWAIT,
SOCK_DGRAM,
SOCK_STREAM,
SOL_SOCKET,
SO_REUSEADDR,
socket,
)
# External
from external import ipaddress
# SCION
from lib.defines import SCION_BUFLEN
from lib.dispatcher import reg_dispatcher
from lib.errors import SCIONIOError
from lib.packet.host_addr import haddr_get_type, haddr_parse_interface
from lib.packet.scmp.errors import SCMPUnreachHost, SCMPUnreachNet
from lib.util import recv_all
from lib.thread import kill_self
from lib.types import AddrType
class Socket(object):
"""
Base class for socket wrappers
"""
@abstractmethod
def bind(self, addr, *args, **kwargs):
raise NotImplementedError
@abstractmethod
def send(self, data, dst=None):
raise NotImplementedError
@abstractmethod
def recv(self, block=True):
raise NotImplementedError
def close(self): # pragma: no cover
"""
Close the socket.
"""
self.sock.close()
def settimeout(self, timeout): # pragma: no cover
prev = self.sock.gettimeout()
self.sock.settimeout(timeout)
return prev
def is_active(self):
return True
class UDPSocket(Socket):
"""
Thin wrapper around BSD/POSIX UDP sockets.
"""
def __init__(self, bind=None, addr_type=AddrType.IPV6, reuse=False):
"""
Initialize a UDP socket, then call superclass init for socket options
and binding.
:param tuple bind:
Optional tuple of (`str`, `int`, `str`) describing respectively the
address and port to bind to, and an optional description.
:param addr_type:
Socket domain. Must be one of :const:`~lib.types.AddrType.IPV4`,
:const:`~lib.types.AddrType.IPV6` (default).
:param reuse:
Boolean value indicating whether SO_REUSEADDR option should be set.
"""
assert addr_type in (AddrType.IPV4, AddrType.IPV6)
self._addr_type = addr_type
af_domain = AF_INET6
if self._addr_type == AddrType.IPV4:
af_domain = AF_INET
self.sock = socket(af_domain, SOCK_DGRAM)
if reuse:
self.sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.port = None
if bind:
self.bind(*bind)
self.active = True
def bind(self, addr, port=0, desc=None):
"""
Bind socket to the specified address & port. If `addr` is ``None``, the
socket will bind to all interfaces.
:param str addr: Address to bind to (can be ``None``, see above).
:param int port: Port to bind to.
:param str desc: Optional purpose of the port.
"""
if addr is None:
addr = "::"
if self._addr_type == AddrType.IPV4:
addr = ""
try:
self.sock.bind((addr, port))
except OSError as e:
logging.critical("Error binding to [%s]:%s: %s", addr, port, e)
kill_self()
self.port = self.sock.getsockname()[1]
if desc:
logging.debug("%s bound to %s:%d", desc, addr, self.port)
def send(self, data, dst=None):
"""
Send data to a specified destination.
:param bytes data: Data to send.
:param tuple dst:
Tuple of (`str`, `int`) describing the destination address and port,
respectively.
"""
try:
ret = self.sock.sendto(data, dst)
except OSError as e:
errno = e.args[0]
logging.error("Error sending %dB to %s: %s", len(data), dst, e)
if errno == ENETUNREACH:
raise SCMPUnreachNet(dst)
elif errno == EHOSTUNREACH:
raise SCMPUnreachHost(dst)
return False
if ret != len(data):
logging.error("Wanted to send %dB, only sent %dB", len(data), ret)
return False
return True
def recv(self, block=True):
"""
Read data from socket.
:returns:
Tuple of (`bytes`, (`str`, `int`) containing the data, and remote
host/port respectively.
"""
flags = 0
if not block:
flags = MSG_DONTWAIT
while True:
try:
return self.sock.recvfrom(SCION_BUFLEN, flags)
except InterruptedError:
pass
class ReliableSocket(Socket):
"""
Wrapper around Unix socket with message framing functionality baked in
"""
COOKIE = bytes.fromhex("de00ad01be02ef03")
COOKIE_LEN = len(COOKIE)
def __init__(self, reg=None, bind_ip=(), bind_unix=None, sock=None):
"""
Initialise a socket of the specified type, and optionally bind it to an
address/port.
:param tuple reg:
Optional tuple of (`SCIONAddr`, `int`, `SVCType`, `bool`)
describing respectively the address, port, SVC type, and init value
to register with the dispatcher. In sockets that do not connect to
the dispatcher, this argument is None.
:param tuple bind_ip:
Optional tuple of (`SCIONAddr`, `int`) describing the address and port
of the bind address. Only needed if the bind address is different from
the public address.
:param tuple bind_unix:
Optional tuple of (`str`, `str`) describing path to bind to, and an
optional description.
:param sock:
Optional socket file object to build instance around.
"""
self.sock = sock or socket(AF_UNIX, SOCK_STREAM)
self.addr = None
if reg:
addr, port, init, svc = reg
self.registered = reg_dispatcher(
self, addr, port, bind_ip, init, svc)
if bind_unix:
self.bind(*bind_unix)
self.active = True
@classmethod
def from_socket(cls, sock):
return cls(None, sock=sock)
def bind(self, addr, desc=None):
self.addr = addr
# Use 0666 for socket permissions
old_mask = os.umask(0o111)
try:
self.sock.bind(addr)
except OSError as e:
logging.critical("Error binding to %s: %s", addr, e)
kill_self()
os.umask(old_mask)
self.sock.listen(5)
if desc:
logging.debug("%s bound to %s", desc, addr)
def accept(self, block=True):
prev = self.sock.gettimeout()
if not block:
self.sock.settimeout(0)
try:
s = self.sock.accept()[0]
except OSError as e:
logging.error("error accepting socket: %s", e)
return None
finally:
self.sock.settimeout(prev)
return ReliableSocket.from_socket(s)
def connect(self, addr):
self.sock.connect(addr)
def send(self, data, dst=None):
"""
Send data through the socket.
:param bytes data: Data to send.
"""
if dst:
dst_addr, dst_port = dst
if isinstance(dst_addr, str):
dst_addr = haddr_parse_interface(dst_addr)
addr_type = struct.pack("B", dst_addr.TYPE)
packed_dst = dst_addr.pack() + struct.pack("!H", dst_port)
else:
addr_type = struct.pack("B", AddrType.NONE)
packed_dst = b""
data_len = struct.pack("!I", len(data))
data = b"".join([self.COOKIE, addr_type, data_len, packed_dst, data])
try:
self.sock.sendall(data)
return True
except OSError as e:
logging.error("error in send: %s", e)
return False
def recv(self, block=True):
"""
Read data from socket.
:returns: bytestring containing received data.
"""
flags = 0
if not block:
flags = MSG_DONTWAIT
buf = recv_all(self.sock, self.COOKIE_LEN + 5, flags)
if not buf:
return None, None
cookie, addr_type, packet_len = struct.unpack("!8sBI", buf)
if cookie != self.COOKIE:
raise SCIONIOError("Dispatcher socket out of sync")
port_len = 0
if addr_type != AddrType.NONE:
port_len = 2
addr_len = haddr_get_type(addr_type).LEN
# We know there is data coming, block here to avoid sync problems.
buf = recv_all(self.sock, addr_len + port_len + packet_len, 0)
if addr_len > 0:
addr = buf[:addr_len]
port = struct.unpack("!H", buf[addr_len:addr_len + port_len])
sender = (str(ipaddress.ip_address(addr)), port)
else:
addr = ""
port = 0
sender = (None, None)
packet = buf[addr_len + port_len:]
return packet, sender
def close(self):
super().close()
if not self.addr:
return
try:
os.unlink(self.addr)
except OSError as e:
logging.critical("Error unlinking unix socket: %s", e)
kill_self()
class SocketMgr(object):
"""
:class:`Socket` manager.
"""
def __init__(self): # pragma: no cover
self._sel = selectors.DefaultSelector()
def add(self, sock, callback): # pragma: no cover
"""
Add new socket.
:param UDPSocket sock: UDPSocket to add.
"""
if not sock.is_active():
return
self._sel.register(sock.sock, selectors.EVENT_READ, (sock, callback))
def remove(self, sock): # pragma: no cover
"""
Remove socket.
:param UDPSocket sock: UDPSocket to remove.
"""
self._sel.unregister(sock.sock)
def select_(self, timeout=None):
"""
Return the set of UDPSockets that have data pending.
:param float timeout:
Number of seconds to wait for at least one UDPSocket to become
ready. ``None`` means wait forever.
"""
for key, _ in self._sel.select(timeout=timeout):
yield key.data
def close(self):
"""
Close all sockets.
"""
mapping = self._sel.get_map()
if mapping:
for entry in list(mapping.values()):
sock = entry.data[0]
self.remove(sock)
sock.close()
self._sel.close()
| 31.066298
| 82
| 0.577983
|
import logging
import os
import selectors
import struct
from abc import abstractmethod
from errno import EHOSTUNREACH, ENETUNREACH
from socket import (
AF_INET,
AF_INET6,
AF_UNIX,
MSG_DONTWAIT,
SOCK_DGRAM,
SOCK_STREAM,
SOL_SOCKET,
SO_REUSEADDR,
socket,
)
from external import ipaddress
from lib.defines import SCION_BUFLEN
from lib.dispatcher import reg_dispatcher
from lib.errors import SCIONIOError
from lib.packet.host_addr import haddr_get_type, haddr_parse_interface
from lib.packet.scmp.errors import SCMPUnreachHost, SCMPUnreachNet
from lib.util import recv_all
from lib.thread import kill_self
from lib.types import AddrType
class Socket(object):
@abstractmethod
def bind(self, addr, *args, **kwargs):
raise NotImplementedError
@abstractmethod
def send(self, data, dst=None):
raise NotImplementedError
@abstractmethod
def recv(self, block=True):
raise NotImplementedError
def close(self):
self.sock.close()
def settimeout(self, timeout):
prev = self.sock.gettimeout()
self.sock.settimeout(timeout)
return prev
def is_active(self):
return True
class UDPSocket(Socket):
def __init__(self, bind=None, addr_type=AddrType.IPV6, reuse=False):
assert addr_type in (AddrType.IPV4, AddrType.IPV6)
self._addr_type = addr_type
af_domain = AF_INET6
if self._addr_type == AddrType.IPV4:
af_domain = AF_INET
self.sock = socket(af_domain, SOCK_DGRAM)
if reuse:
self.sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.port = None
if bind:
self.bind(*bind)
self.active = True
def bind(self, addr, port=0, desc=None):
if addr is None:
addr = "::"
if self._addr_type == AddrType.IPV4:
addr = ""
try:
self.sock.bind((addr, port))
except OSError as e:
logging.critical("Error binding to [%s]:%s: %s", addr, port, e)
kill_self()
self.port = self.sock.getsockname()[1]
if desc:
logging.debug("%s bound to %s:%d", desc, addr, self.port)
def send(self, data, dst=None):
try:
ret = self.sock.sendto(data, dst)
except OSError as e:
errno = e.args[0]
logging.error("Error sending %dB to %s: %s", len(data), dst, e)
if errno == ENETUNREACH:
raise SCMPUnreachNet(dst)
elif errno == EHOSTUNREACH:
raise SCMPUnreachHost(dst)
return False
if ret != len(data):
logging.error("Wanted to send %dB, only sent %dB", len(data), ret)
return False
return True
def recv(self, block=True):
flags = 0
if not block:
flags = MSG_DONTWAIT
while True:
try:
return self.sock.recvfrom(SCION_BUFLEN, flags)
except InterruptedError:
pass
class ReliableSocket(Socket):
COOKIE = bytes.fromhex("de00ad01be02ef03")
COOKIE_LEN = len(COOKIE)
def __init__(self, reg=None, bind_ip=(), bind_unix=None, sock=None):
self.sock = sock or socket(AF_UNIX, SOCK_STREAM)
self.addr = None
if reg:
addr, port, init, svc = reg
self.registered = reg_dispatcher(
self, addr, port, bind_ip, init, svc)
if bind_unix:
self.bind(*bind_unix)
self.active = True
@classmethod
def from_socket(cls, sock):
return cls(None, sock=sock)
def bind(self, addr, desc=None):
self.addr = addr
old_mask = os.umask(0o111)
try:
self.sock.bind(addr)
except OSError as e:
logging.critical("Error binding to %s: %s", addr, e)
kill_self()
os.umask(old_mask)
self.sock.listen(5)
if desc:
logging.debug("%s bound to %s", desc, addr)
def accept(self, block=True):
prev = self.sock.gettimeout()
if not block:
self.sock.settimeout(0)
try:
s = self.sock.accept()[0]
except OSError as e:
logging.error("error accepting socket: %s", e)
return None
finally:
self.sock.settimeout(prev)
return ReliableSocket.from_socket(s)
def connect(self, addr):
self.sock.connect(addr)
def send(self, data, dst=None):
if dst:
dst_addr, dst_port = dst
if isinstance(dst_addr, str):
dst_addr = haddr_parse_interface(dst_addr)
addr_type = struct.pack("B", dst_addr.TYPE)
packed_dst = dst_addr.pack() + struct.pack("!H", dst_port)
else:
addr_type = struct.pack("B", AddrType.NONE)
packed_dst = b""
data_len = struct.pack("!I", len(data))
data = b"".join([self.COOKIE, addr_type, data_len, packed_dst, data])
try:
self.sock.sendall(data)
return True
except OSError as e:
logging.error("error in send: %s", e)
return False
def recv(self, block=True):
flags = 0
if not block:
flags = MSG_DONTWAIT
buf = recv_all(self.sock, self.COOKIE_LEN + 5, flags)
if not buf:
return None, None
cookie, addr_type, packet_len = struct.unpack("!8sBI", buf)
if cookie != self.COOKIE:
raise SCIONIOError("Dispatcher socket out of sync")
port_len = 0
if addr_type != AddrType.NONE:
port_len = 2
addr_len = haddr_get_type(addr_type).LEN
buf = recv_all(self.sock, addr_len + port_len + packet_len, 0)
if addr_len > 0:
addr = buf[:addr_len]
port = struct.unpack("!H", buf[addr_len:addr_len + port_len])
sender = (str(ipaddress.ip_address(addr)), port)
else:
addr = ""
port = 0
sender = (None, None)
packet = buf[addr_len + port_len:]
return packet, sender
def close(self):
super().close()
if not self.addr:
return
try:
os.unlink(self.addr)
except OSError as e:
logging.critical("Error unlinking unix socket: %s", e)
kill_self()
class SocketMgr(object):
def __init__(self):
self._sel = selectors.DefaultSelector()
def add(self, sock, callback):
if not sock.is_active():
return
self._sel.register(sock.sock, selectors.EVENT_READ, (sock, callback))
def remove(self, sock):
self._sel.unregister(sock.sock)
def select_(self, timeout=None):
for key, _ in self._sel.select(timeout=timeout):
yield key.data
def close(self):
mapping = self._sel.get_map()
if mapping:
for entry in list(mapping.values()):
sock = entry.data[0]
self.remove(sock)
sock.close()
self._sel.close()
| true
| true
|
790ea96619f499aab56bb208e4e3fd11df4af6bc
| 2,178
|
py
|
Python
|
frameworks/schema_dataclasses.py
|
mtag-dev/py-rest-stress-testing
|
37bd95575aa264734a980cef6b1a4506fdc24eb8
|
[
"MIT"
] | 1
|
2021-12-06T09:49:21.000Z
|
2021-12-06T09:49:21.000Z
|
frameworks/schema_dataclasses.py
|
mtag-dev/py-rest-stress-testing
|
37bd95575aa264734a980cef6b1a4506fdc24eb8
|
[
"MIT"
] | 42
|
2021-11-03T13:48:00.000Z
|
2022-03-28T17:15:26.000Z
|
frameworks/schema_dataclasses.py
|
mtag-dev/py-rest-stress-testing
|
37bd95575aa264734a980cef6b1a4506fdc24eb8
|
[
"MIT"
] | null | null | null |
from typing import List, Optional, Union
from dataclasses import dataclass, field
## - - - - - - -
## userinfo.json
## - - - - - - -
@dataclass
class UserAddress:
formatted: str = ""
@dataclass
class UserInfoResponse:
group_ids: List[str] = field(default_factory=list)
sub: str = ""
given_name: str = ""
name: str = ""
email: str = ""
phone_number: Optional[str] = None
address: Optional[UserAddress] = None
picture: Optional[str] = None
## - - - - - - -
## sprint.json
## - - - - - - -
@dataclass
class Sprint:
id: str
name: str
start: str
end: str
@dataclass
class ShortUserInfo:
id: str
name: str
picture: str
@dataclass
class Issue:
id: str
summary: str
description_short: str
index: int
status_id: str
story_points: int
assigned: Optional[ShortUserInfo]
modified_at: str
@dataclass
class SprintResponse:
sprint: Sprint
issues: List[Issue]
## - - - - - - -
## create-task.json
## - - - - - - -
@dataclass
class CreateTaskRequestBody:
author: str
assigned: str
summary: str
project: str
sprint: str
sprint: str
labels: List[str]
issue_type: str
components: List[str]
description: str
priority: str
@dataclass
class CreateTaskPerson:
id: str
email: str
name: str
picture: str
is_active: bool
@dataclass
class CreateTaskProject:
id: str
name: str
@dataclass
class CreateTaskStatus:
id: str
name: str
@dataclass
class CreateTaskActivity:
user_id: str
action: str
created_at: str
details: Optional[Union[CreateTaskPerson, CreateTaskStatus]] = None
@dataclass
class CreateTaskResponse:
author: CreateTaskPerson
assigned: CreateTaskPerson
summary: str
project: CreateTaskProject
sprint: str
labels: List[str]
issue_type: str
components: List[str]
description: str
priority: str
status: CreateTaskStatus
activity: List[CreateTaskActivity]
created_at: str
modified_at: str
## - - - - - - -
## update-task.json
## - - - - - - -
@dataclass
class UpdateTaskRequestBody(CreateTaskRequestBody):
pass
| 16.253731
| 71
| 0.636823
|
from typing import List, Optional, Union
from dataclasses import dataclass, field
ted: str = ""
@dataclass
class UserInfoResponse:
group_ids: List[str] = field(default_factory=list)
sub: str = ""
given_name: str = ""
name: str = ""
email: str = ""
phone_number: Optional[str] = None
address: Optional[UserAddress] = None
picture: Optional[str] = None
name: str
start: str
end: str
@dataclass
class ShortUserInfo:
id: str
name: str
picture: str
@dataclass
class Issue:
id: str
summary: str
description_short: str
index: int
status_id: str
story_points: int
assigned: Optional[ShortUserInfo]
modified_at: str
@dataclass
class SprintResponse:
sprint: Sprint
issues: List[Issue]
uthor: str
assigned: str
summary: str
project: str
sprint: str
sprint: str
labels: List[str]
issue_type: str
components: List[str]
description: str
priority: str
@dataclass
class CreateTaskPerson:
id: str
email: str
name: str
picture: str
is_active: bool
@dataclass
class CreateTaskProject:
id: str
name: str
@dataclass
class CreateTaskStatus:
id: str
name: str
@dataclass
class CreateTaskActivity:
user_id: str
action: str
created_at: str
details: Optional[Union[CreateTaskPerson, CreateTaskStatus]] = None
@dataclass
class CreateTaskResponse:
author: CreateTaskPerson
assigned: CreateTaskPerson
summary: str
project: CreateTaskProject
sprint: str
labels: List[str]
issue_type: str
components: List[str]
description: str
priority: str
status: CreateTaskStatus
activity: List[CreateTaskActivity]
created_at: str
modified_at: str
TaskRequestBody):
pass
| true
| true
|
790ea9ff4c7d70fb8c547b4ddb51f8931cc0d2ff
| 8,820
|
py
|
Python
|
configurable_control_gym/envs/cartpole.py
|
takuseno/configurable-control-gym
|
9837e6c03c5a7fe6711b32dc70fe5e432e7d96c3
|
[
"MIT"
] | 2
|
2020-08-03T12:49:13.000Z
|
2021-07-26T22:43:08.000Z
|
configurable_control_gym/envs/cartpole.py
|
takuseno/configurable-control-gym
|
9837e6c03c5a7fe6711b32dc70fe5e432e7d96c3
|
[
"MIT"
] | null | null | null |
configurable_control_gym/envs/cartpole.py
|
takuseno/configurable-control-gym
|
9837e6c03c5a7fe6711b32dc70fe5e432e7d96c3
|
[
"MIT"
] | 1
|
2021-07-23T11:05:10.000Z
|
2021-07-23T11:05:10.000Z
|
import math
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
class CartPoleEnv(gym.Env):
"""
Description:
A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The pendulum starts upright, and the goal is to prevent it from falling over by increasing and reducing the cart's velocity.
Source:
This environment corresponds to the version of the cart-pole problem described by Barto, Sutton, and Anderson
Observation:
Type: Box(4)
Num Observation Min Max
0 Cart Position -4.8 4.8
1 Cart Velocity -Inf Inf
2 Pole Angle -24 deg 24 deg
3 Pole Velocity At Tip -Inf Inf
Actions:
Type: Discrete(2)
Num Action
0 Push cart to the left
1 Push cart to the right
Note: The amount the velocity that is reduced or increased is not fixed; it depends on the angle the pole is pointing. This is because the center of gravity of the pole increases the amount of energy needed to move the cart underneath it
Reward:
Reward is 1 for every step taken, including the termination step
Starting State:
All observations are assigned a uniform random value in [-0.05..0.05]
Episode Termination:
Pole Angle is more than 12 degrees
Cart Position is more than 2.4 (center of the cart reaches the edge of the display)
Episode length is greater than 200
Solved Requirements
Considered solved when the average reward is greater than or equal to 195.0 over 100 consecutive trials.
"""
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 50
}
def __init__(self, force=10.0, length=0.5, mass=0.1):
self.gravity = 9.8
self.masscart = 1.0
self.tau = 0.02 # seconds between state updates
self.kinematics_integrator = 'euler'
if isinstance(force, list):
self.force_mag_set = force
else:
self.force_mag_set = None
self.force_mag = force
if isinstance(length, list):
self.length_set = length
else:
self.length_set = None
self.length = length
if isinstance(mass, list):
self.masspole_set = mass
else:
self.masspole_set = None
self.masspole = mass
# Angle at which to fail the episode
self.theta_threshold_radians = 12 * 2 * math.pi / 360
self.x_threshold = 2.4
# Angle limit set to 2 * theta_threshold_radians so failing observation is still within bounds
high = np.array([
self.x_threshold * 2,
np.finfo(np.float32).max,
self.theta_threshold_radians * 2,
np.finfo(np.float32).max])
self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(1,), dtype=np.float32)
self.observation_space = spaces.Box(-high, high, dtype=np.float32)
self.seed()
self.viewer = None
self.state = None
self.steps_beyond_done = None
self.steps_in_episode = 0
def _sample_parameter(self):
if self.force_mag_set is not None:
set_index = self.np_random.randint(len(self.force_mag_set))
self.force_mag = self.np_random.uniform(
self.force_mag_set[set_index][0],
self.force_mag_set[set_index][1])
if self.length_set is not None:
set_index = self.np_random.randint(len(self.length_set))
self.length = self.np_random.uniform(self.length_set[set_index][0],
self.length_set[set_index][1])
if self.masspole_set is not None:
set_index = self.np_random.randint(len(self.masspole_set))
self.masspole = self.np_random.uniform(
self.masspole_set[set_index][0],
self.masspole_set[set_index][1])
self.polemass_length = (self.masspole * self.length)
self.total_mass = (self.masspole + self.masscart)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
state = self.state
x, x_dot, theta, theta_dot = state
force = self.force_mag * action[0]
costheta = math.cos(theta)
sintheta = math.sin(theta)
temp = (force + self.polemass_length * theta_dot * theta_dot * sintheta) / self.total_mass
thetaacc = (self.gravity * sintheta - costheta* temp) / (self.length * (4.0/3.0 - self.masspole * costheta * costheta / self.total_mass))
xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass
if self.kinematics_integrator == 'euler':
x = x + self.tau * x_dot
x_dot = x_dot + self.tau * xacc
theta = theta + self.tau * theta_dot
theta_dot = theta_dot + self.tau * thetaacc
else: # semi-implicit euler
x_dot = x_dot + self.tau * xacc
x = x + self.tau * x_dot
theta_dot = theta_dot + self.tau * thetaacc
theta = theta + self.tau * theta_dot
self.state = (x,x_dot,theta,theta_dot)
done = x < -self.x_threshold \
or x > self.x_threshold \
or theta < -self.theta_threshold_radians \
or theta > self.theta_threshold_radians
done = bool(done)
self.steps_in_episode += 1
if not done:
reward = 1.0
elif self.steps_beyond_done is None:
# Pole just fell!
self.steps_beyond_done = 0
reward = 1.0
else:
if self.steps_beyond_done == 0:
logger.warn("You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.")
self.steps_beyond_done += 1
reward = 0.0
info = {}
info['success'] = self.steps_in_episode >= 195
return np.array(self.state), reward, done, info
def reset(self):
self._sample_parameter()
self.state = self.np_random.uniform(low=-0.05, high=0.05, size=(4,))
self.steps_beyond_done = None
self.steps_in_episode = 0
return np.array(self.state)
def render(self, mode='human'):
screen_width = 600
screen_height = 400
world_width = self.x_threshold*2
scale = screen_width/world_width
carty = 100 # TOP OF CART
polewidth = 10.0
polelen = scale * (2 * self.length)
cartwidth = 50.0
cartheight = 30.0
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
l,r,t,b = -cartwidth/2, cartwidth/2, cartheight/2, -cartheight/2
axleoffset =cartheight/4.0
cart = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
self.carttrans = rendering.Transform()
cart.add_attr(self.carttrans)
self.viewer.add_geom(cart)
l,r,t,b = -polewidth/2,polewidth/2,polelen-polewidth/2,-polewidth/2
pole = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
pole.set_color(.8,.6,.4)
self.poletrans = rendering.Transform(translation=(0, axleoffset))
pole.add_attr(self.poletrans)
pole.add_attr(self.carttrans)
self.viewer.add_geom(pole)
self.axle = rendering.make_circle(polewidth/2)
self.axle.add_attr(self.poletrans)
self.axle.add_attr(self.carttrans)
self.axle.set_color(.5,.5,.8)
self.viewer.add_geom(self.axle)
self.track = rendering.Line((0,carty), (screen_width,carty))
self.track.set_color(0,0,0)
self.viewer.add_geom(self.track)
self._pole_geom = pole
if self.state is None: return None
# Edit the pole polygon vertex
pole = self._pole_geom
l,r,t,b = -polewidth/2,polewidth/2,polelen-polewidth/2,-polewidth/2
pole.v = [(l,b), (l,t), (r,t), (r,b)]
x = self.state
cartx = x[0]*scale+screen_width/2.0 # MIDDLE OF CART
self.carttrans.set_translation(cartx, carty)
self.poletrans.set_rotation(-x[2])
return self.viewer.render(return_rgb_array = mode=='rgb_array')
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
| 39.72973
| 245
| 0.593424
|
import math
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
class CartPoleEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 50
}
def __init__(self, force=10.0, length=0.5, mass=0.1):
self.gravity = 9.8
self.masscart = 1.0
self.tau = 0.02
self.kinematics_integrator = 'euler'
if isinstance(force, list):
self.force_mag_set = force
else:
self.force_mag_set = None
self.force_mag = force
if isinstance(length, list):
self.length_set = length
else:
self.length_set = None
self.length = length
if isinstance(mass, list):
self.masspole_set = mass
else:
self.masspole_set = None
self.masspole = mass
self.theta_threshold_radians = 12 * 2 * math.pi / 360
self.x_threshold = 2.4
high = np.array([
self.x_threshold * 2,
np.finfo(np.float32).max,
self.theta_threshold_radians * 2,
np.finfo(np.float32).max])
self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(1,), dtype=np.float32)
self.observation_space = spaces.Box(-high, high, dtype=np.float32)
self.seed()
self.viewer = None
self.state = None
self.steps_beyond_done = None
self.steps_in_episode = 0
def _sample_parameter(self):
if self.force_mag_set is not None:
set_index = self.np_random.randint(len(self.force_mag_set))
self.force_mag = self.np_random.uniform(
self.force_mag_set[set_index][0],
self.force_mag_set[set_index][1])
if self.length_set is not None:
set_index = self.np_random.randint(len(self.length_set))
self.length = self.np_random.uniform(self.length_set[set_index][0],
self.length_set[set_index][1])
if self.masspole_set is not None:
set_index = self.np_random.randint(len(self.masspole_set))
self.masspole = self.np_random.uniform(
self.masspole_set[set_index][0],
self.masspole_set[set_index][1])
self.polemass_length = (self.masspole * self.length)
self.total_mass = (self.masspole + self.masscart)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
state = self.state
x, x_dot, theta, theta_dot = state
force = self.force_mag * action[0]
costheta = math.cos(theta)
sintheta = math.sin(theta)
temp = (force + self.polemass_length * theta_dot * theta_dot * sintheta) / self.total_mass
thetaacc = (self.gravity * sintheta - costheta* temp) / (self.length * (4.0/3.0 - self.masspole * costheta * costheta / self.total_mass))
xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass
if self.kinematics_integrator == 'euler':
x = x + self.tau * x_dot
x_dot = x_dot + self.tau * xacc
theta = theta + self.tau * theta_dot
theta_dot = theta_dot + self.tau * thetaacc
else:
x_dot = x_dot + self.tau * xacc
x = x + self.tau * x_dot
theta_dot = theta_dot + self.tau * thetaacc
theta = theta + self.tau * theta_dot
self.state = (x,x_dot,theta,theta_dot)
done = x < -self.x_threshold \
or x > self.x_threshold \
or theta < -self.theta_threshold_radians \
or theta > self.theta_threshold_radians
done = bool(done)
self.steps_in_episode += 1
if not done:
reward = 1.0
elif self.steps_beyond_done is None:
self.steps_beyond_done = 0
reward = 1.0
else:
if self.steps_beyond_done == 0:
logger.warn("You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.")
self.steps_beyond_done += 1
reward = 0.0
info = {}
info['success'] = self.steps_in_episode >= 195
return np.array(self.state), reward, done, info
def reset(self):
self._sample_parameter()
self.state = self.np_random.uniform(low=-0.05, high=0.05, size=(4,))
self.steps_beyond_done = None
self.steps_in_episode = 0
return np.array(self.state)
def render(self, mode='human'):
screen_width = 600
screen_height = 400
world_width = self.x_threshold*2
scale = screen_width/world_width
carty = 100
polewidth = 10.0
polelen = scale * (2 * self.length)
cartwidth = 50.0
cartheight = 30.0
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
l,r,t,b = -cartwidth/2, cartwidth/2, cartheight/2, -cartheight/2
axleoffset =cartheight/4.0
cart = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
self.carttrans = rendering.Transform()
cart.add_attr(self.carttrans)
self.viewer.add_geom(cart)
l,r,t,b = -polewidth/2,polewidth/2,polelen-polewidth/2,-polewidth/2
pole = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
pole.set_color(.8,.6,.4)
self.poletrans = rendering.Transform(translation=(0, axleoffset))
pole.add_attr(self.poletrans)
pole.add_attr(self.carttrans)
self.viewer.add_geom(pole)
self.axle = rendering.make_circle(polewidth/2)
self.axle.add_attr(self.poletrans)
self.axle.add_attr(self.carttrans)
self.axle.set_color(.5,.5,.8)
self.viewer.add_geom(self.axle)
self.track = rendering.Line((0,carty), (screen_width,carty))
self.track.set_color(0,0,0)
self.viewer.add_geom(self.track)
self._pole_geom = pole
if self.state is None: return None
pole = self._pole_geom
l,r,t,b = -polewidth/2,polewidth/2,polelen-polewidth/2,-polewidth/2
pole.v = [(l,b), (l,t), (r,t), (r,b)]
x = self.state
cartx = x[0]*scale+screen_width/2.0
self.carttrans.set_translation(cartx, carty)
self.poletrans.set_rotation(-x[2])
return self.viewer.render(return_rgb_array = mode=='rgb_array')
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
| true
| true
|
790eaa491884e40dcd87e04684adcaa3a65828c5
| 1,313
|
py
|
Python
|
enex2notion/cli_notion.py
|
vzhd1701/enex2notion
|
d9e0811af6f2c779caf1328c6daa0d6f81290fb3
|
[
"MIT"
] | 49
|
2021-12-12T04:13:24.000Z
|
2022-03-31T12:58:57.000Z
|
enex2notion/cli_notion.py
|
vzhd1701/enex2notion
|
d9e0811af6f2c779caf1328c6daa0d6f81290fb3
|
[
"MIT"
] | 11
|
2021-12-03T10:49:54.000Z
|
2022-03-29T20:00:30.000Z
|
enex2notion/cli_notion.py
|
vzhd1701/enex2notion
|
d9e0811af6f2c779caf1328c6daa0d6f81290fb3
|
[
"MIT"
] | 3
|
2022-02-04T13:25:21.000Z
|
2022-03-07T17:54:36.000Z
|
import logging
import sys
from notion.block import PageBlock
from notion.client import NotionClient
from requests import HTTPError, codes
from enex2notion.utils_exceptions import BadTokenException
logger = logging.getLogger(__name__)
def get_root(token, name):
if not token:
logger.warning(
"No token provided, dry run mode. Nothing will be uploaded to Notion!"
)
return None
try:
client = get_notion_client(token)
except BadTokenException:
logger.error("Invalid token provided!")
sys.exit(1)
return get_import_root(client, name)
def get_notion_client(token):
try:
return NotionClient(token_v2=token)
except HTTPError as e: # pragma: no cover
if e.response.status_code == codes["unauthorized"]:
raise BadTokenException
raise
def get_import_root(client, title):
try:
top_pages = client.get_top_level_pages()
except KeyError: # pragma: no cover
# Need empty account to test
top_pages = []
for page in top_pages:
if isinstance(page, PageBlock) and page.title == title:
logger.info(f"'{title}' page found")
return page
logger.info(f"Creating '{title}' page...")
return client.current_space.add_page(title)
| 25.25
| 82
| 0.665651
|
import logging
import sys
from notion.block import PageBlock
from notion.client import NotionClient
from requests import HTTPError, codes
from enex2notion.utils_exceptions import BadTokenException
logger = logging.getLogger(__name__)
def get_root(token, name):
if not token:
logger.warning(
"No token provided, dry run mode. Nothing will be uploaded to Notion!"
)
return None
try:
client = get_notion_client(token)
except BadTokenException:
logger.error("Invalid token provided!")
sys.exit(1)
return get_import_root(client, name)
def get_notion_client(token):
try:
return NotionClient(token_v2=token)
except HTTPError as e:
if e.response.status_code == codes["unauthorized"]:
raise BadTokenException
raise
def get_import_root(client, title):
try:
top_pages = client.get_top_level_pages()
except KeyError:
top_pages = []
for page in top_pages:
if isinstance(page, PageBlock) and page.title == title:
logger.info(f"'{title}' page found")
return page
logger.info(f"Creating '{title}' page...")
return client.current_space.add_page(title)
| true
| true
|
790eaaa759816e509f3f5f24589e258cfc969ee9
| 346
|
py
|
Python
|
sb3_contrib/__init__.py
|
cyprienc/stable-baselines3-contrib
|
342fe531bfa5af13a15099f16fbc71feceb5ba1e
|
[
"MIT"
] | null | null | null |
sb3_contrib/__init__.py
|
cyprienc/stable-baselines3-contrib
|
342fe531bfa5af13a15099f16fbc71feceb5ba1e
|
[
"MIT"
] | null | null | null |
sb3_contrib/__init__.py
|
cyprienc/stable-baselines3-contrib
|
342fe531bfa5af13a15099f16fbc71feceb5ba1e
|
[
"MIT"
] | null | null | null |
import os
from sb3_contrib.ppo_mask import MaskablePPO
from sb3_contrib.qrdqn import QRDQN
from sb3_contrib.tqc import TQC
from sb3_contrib.trpo import TRPO
# Read version from file
version_file = os.path.join(os.path.dirname(__file__), "version.txt")
with open(version_file, "r") as file_handler:
__version__ = file_handler.read().strip()
| 28.833333
| 69
| 0.791908
|
import os
from sb3_contrib.ppo_mask import MaskablePPO
from sb3_contrib.qrdqn import QRDQN
from sb3_contrib.tqc import TQC
from sb3_contrib.trpo import TRPO
version_file = os.path.join(os.path.dirname(__file__), "version.txt")
with open(version_file, "r") as file_handler:
__version__ = file_handler.read().strip()
| true
| true
|
790eab206fb124c88e97a56d4b9422cf52199180
| 2,380
|
py
|
Python
|
keras_metrics/__init__.py
|
netrack/keras-metrics
|
daed203c31b6dfecaef1b8344accf7494cb7e78d
|
[
"MIT"
] | 180
|
2018-06-03T06:37:42.000Z
|
2021-05-24T05:36:58.000Z
|
AIDeveloper/keras_metrics/__init__.py
|
maikherbig/AIDeveloper
|
34d03cc6953680255fb0d2d0406331e70b7eaa50
|
[
"BSD-3-Clause"
] | 51
|
2020-01-13T07:54:13.000Z
|
2022-03-17T09:11:56.000Z
|
keras_metrics/__init__.py
|
netrack/keras-metrics
|
daed203c31b6dfecaef1b8344accf7494cb7e78d
|
[
"MIT"
] | 33
|
2018-06-21T09:18:43.000Z
|
2020-02-28T16:56:26.000Z
|
from functools import partial
from keras_metrics import metrics as m
from keras_metrics import casts
__version__ = "1.2.1"
def metric_fn(cls, cast_strategy):
def fn(label=0, **kwargs):
metric = cls(label=label, cast_strategy=cast_strategy, **kwargs)
metric.__name__ = "%s_%s" % (cast_strategy.__name__, cls.__name__)
return metric
return fn
binary_metric = partial(
metric_fn, cast_strategy=casts.binary)
categorical_metric = partial(
metric_fn, cast_strategy=casts.categorical)
sparse_categorical_metric = partial(
metric_fn, cast_strategy=casts.sparse_categorical)
binary_true_positive = binary_metric(m.true_positive)
binary_true_negative = binary_metric(m.true_negative)
binary_false_positive = binary_metric(m.false_positive)
binary_false_negative = binary_metric(m.false_negative)
binary_precision = binary_metric(m.precision)
binary_recall = binary_metric(m.recall)
binary_f1_score = binary_metric(m.f1_score)
binary_average_recall = binary_metric(m.average_recall)
categorical_true_positive = categorical_metric(m.true_positive)
categorical_true_negative = categorical_metric(m.true_negative)
categorical_false_positive = categorical_metric(m.false_positive)
categorical_false_negative = categorical_metric(m.false_negative)
categorical_precision = categorical_metric(m.precision)
categorical_recall = categorical_metric(m.recall)
categorical_f1_score = categorical_metric(m.f1_score)
categorical_average_recall = categorical_metric(m.average_recall)
sparse_categorical_true_positive = sparse_categorical_metric(m.true_positive)
sparse_categorical_true_negative = sparse_categorical_metric(m.true_negative)
sparse_categorical_false_positive = sparse_categorical_metric(m.false_positive)
sparse_categorical_false_negative = sparse_categorical_metric(m.false_negative)
sparse_categorical_precision = sparse_categorical_metric(m.precision)
sparse_categorical_recall = sparse_categorical_metric(m.recall)
sparse_categorical_f1_score = sparse_categorical_metric(m.f1_score)
sparse_categorical_average_recall = sparse_categorical_metric(m.average_recall)
# For backward compatibility.
true_positive = binary_true_positive
true_negative = binary_true_negative
false_positive = binary_false_positive
false_negative = binary_false_negative
precision = binary_precision
recall = binary_recall
f1_score = binary_f1_score
| 35.522388
| 79
| 0.843697
|
from functools import partial
from keras_metrics import metrics as m
from keras_metrics import casts
__version__ = "1.2.1"
def metric_fn(cls, cast_strategy):
def fn(label=0, **kwargs):
metric = cls(label=label, cast_strategy=cast_strategy, **kwargs)
metric.__name__ = "%s_%s" % (cast_strategy.__name__, cls.__name__)
return metric
return fn
binary_metric = partial(
metric_fn, cast_strategy=casts.binary)
categorical_metric = partial(
metric_fn, cast_strategy=casts.categorical)
sparse_categorical_metric = partial(
metric_fn, cast_strategy=casts.sparse_categorical)
binary_true_positive = binary_metric(m.true_positive)
binary_true_negative = binary_metric(m.true_negative)
binary_false_positive = binary_metric(m.false_positive)
binary_false_negative = binary_metric(m.false_negative)
binary_precision = binary_metric(m.precision)
binary_recall = binary_metric(m.recall)
binary_f1_score = binary_metric(m.f1_score)
binary_average_recall = binary_metric(m.average_recall)
categorical_true_positive = categorical_metric(m.true_positive)
categorical_true_negative = categorical_metric(m.true_negative)
categorical_false_positive = categorical_metric(m.false_positive)
categorical_false_negative = categorical_metric(m.false_negative)
categorical_precision = categorical_metric(m.precision)
categorical_recall = categorical_metric(m.recall)
categorical_f1_score = categorical_metric(m.f1_score)
categorical_average_recall = categorical_metric(m.average_recall)
sparse_categorical_true_positive = sparse_categorical_metric(m.true_positive)
sparse_categorical_true_negative = sparse_categorical_metric(m.true_negative)
sparse_categorical_false_positive = sparse_categorical_metric(m.false_positive)
sparse_categorical_false_negative = sparse_categorical_metric(m.false_negative)
sparse_categorical_precision = sparse_categorical_metric(m.precision)
sparse_categorical_recall = sparse_categorical_metric(m.recall)
sparse_categorical_f1_score = sparse_categorical_metric(m.f1_score)
sparse_categorical_average_recall = sparse_categorical_metric(m.average_recall)
true_positive = binary_true_positive
true_negative = binary_true_negative
false_positive = binary_false_positive
false_negative = binary_false_negative
precision = binary_precision
recall = binary_recall
f1_score = binary_f1_score
| true
| true
|
790eab2aab0f40b91977170e40f32fa3aa393bda
| 2,623
|
bzl
|
Python
|
lib/private/yq.bzl
|
alexeagle/bazel-lib
|
02be4c5f47b476e686943f53ddd47a672a51a265
|
[
"Apache-2.0"
] | null | null | null |
lib/private/yq.bzl
|
alexeagle/bazel-lib
|
02be4c5f47b476e686943f53ddd47a672a51a265
|
[
"Apache-2.0"
] | null | null | null |
lib/private/yq.bzl
|
alexeagle/bazel-lib
|
02be4c5f47b476e686943f53ddd47a672a51a265
|
[
"Apache-2.0"
] | null | null | null |
"""Implementation for yq rule"""
_yq_attrs = {
"srcs": attr.label_list(
allow_files = [".yaml", ".json", ".xml"],
mandatory = True,
allow_empty = True,
),
"expression": attr.string(mandatory = False),
"args": attr.string_list(),
"outs": attr.output_list(mandatory = True),
}
def is_split_operation(args):
for arg in args:
if arg.startswith("-s") or arg.startswith("--split-exp"):
return True
return False
def _escape_path(path):
return "/".join([".." for t in path.split("/")]) + "/"
def _yq_impl(ctx):
yq_bin = ctx.toolchains["@aspect_bazel_lib//lib:yq_toolchain_type"].yqinfo.bin
outs = ctx.outputs.outs
args = ctx.attr.args[:]
inputs = ctx.files.srcs[:]
split_operation = is_split_operation(args)
if "eval" in args or "eval-all" in args:
fail("Do not pass 'eval' or 'eval-all' into yq; this is already set based on the number of srcs")
if not split_operation and len(outs) > 1:
fail("Cannot specify multiple outputs when -s or --split-exp is not set")
if "-i" in args or "--inplace" in args:
fail("Cannot use arg -i or --inplace as it is not bazel-idiomatic to update the input file; consider using write_source_files to write back to the source tree")
if len(ctx.attr.srcs) == 0 and "-n" not in args and "--null-input" not in args:
args = args + ["--null-input"]
# For split operations, yq outputs files in the same directory so we
# must cd to the correct output dir before executing it
bin_dir = ctx.bin_dir.path + "/" + ctx.label.package
escape_bin_dir = _escape_path(bin_dir)
cmd = "cd {bin_dir} && {yq} {args} {eval_cmd} {expression} {sources} {maybe_out}".format(
bin_dir = ctx.bin_dir.path + "/" + ctx.label.package,
yq = escape_bin_dir + yq_bin.path,
eval_cmd = "eval" if len(inputs) <= 1 else "eval-all",
args = " ".join(args),
expression = "'%s'" % ctx.attr.expression if ctx.attr.expression else "",
sources = " ".join(["'%s%s'" % (escape_bin_dir, file.path) for file in ctx.files.srcs]),
# In the -s/--split-exr case, the out file names are determined by the yq expression
maybe_out = (" > %s%s" % (escape_bin_dir, outs[0].path)) if len(outs) == 1 else "",
)
ctx.actions.run_shell(
tools = [yq_bin],
inputs = inputs,
outputs = outs,
command = cmd,
mnemonic = "yq",
)
return DefaultInfo(files = depset(outs), runfiles = ctx.runfiles(outs))
yq_lib = struct(
attrs = _yq_attrs,
implementation = _yq_impl,
)
| 37.471429
| 168
| 0.617995
|
_yq_attrs = {
"srcs": attr.label_list(
allow_files = [".yaml", ".json", ".xml"],
mandatory = True,
allow_empty = True,
),
"expression": attr.string(mandatory = False),
"args": attr.string_list(),
"outs": attr.output_list(mandatory = True),
}
def is_split_operation(args):
for arg in args:
if arg.startswith("-s") or arg.startswith("--split-exp"):
return True
return False
def _escape_path(path):
return "/".join([".." for t in path.split("/")]) + "/"
def _yq_impl(ctx):
yq_bin = ctx.toolchains["@aspect_bazel_lib//lib:yq_toolchain_type"].yqinfo.bin
outs = ctx.outputs.outs
args = ctx.attr.args[:]
inputs = ctx.files.srcs[:]
split_operation = is_split_operation(args)
if "eval" in args or "eval-all" in args:
fail("Do not pass 'eval' or 'eval-all' into yq; this is already set based on the number of srcs")
if not split_operation and len(outs) > 1:
fail("Cannot specify multiple outputs when -s or --split-exp is not set")
if "-i" in args or "--inplace" in args:
fail("Cannot use arg -i or --inplace as it is not bazel-idiomatic to update the input file; consider using write_source_files to write back to the source tree")
if len(ctx.attr.srcs) == 0 and "-n" not in args and "--null-input" not in args:
args = args + ["--null-input"]
bin_dir = ctx.bin_dir.path + "/" + ctx.label.package
escape_bin_dir = _escape_path(bin_dir)
cmd = "cd {bin_dir} && {yq} {args} {eval_cmd} {expression} {sources} {maybe_out}".format(
bin_dir = ctx.bin_dir.path + "/" + ctx.label.package,
yq = escape_bin_dir + yq_bin.path,
eval_cmd = "eval" if len(inputs) <= 1 else "eval-all",
args = " ".join(args),
expression = "'%s'" % ctx.attr.expression if ctx.attr.expression else "",
sources = " ".join(["'%s%s'" % (escape_bin_dir, file.path) for file in ctx.files.srcs]),
maybe_out = (" > %s%s" % (escape_bin_dir, outs[0].path)) if len(outs) == 1 else "",
)
ctx.actions.run_shell(
tools = [yq_bin],
inputs = inputs,
outputs = outs,
command = cmd,
mnemonic = "yq",
)
return DefaultInfo(files = depset(outs), runfiles = ctx.runfiles(outs))
yq_lib = struct(
attrs = _yq_attrs,
implementation = _yq_impl,
)
| true
| true
|
790eabb2766051a446539a216425ba5de2c4af11
| 8,788
|
py
|
Python
|
2021/changeLineWidth.py
|
zhangjq933/HowtoSim_Script
|
d958cc6cc743106e8f6ddf58dead6551a8ac7784
|
[
"MIT"
] | 79
|
2019-04-01T04:35:01.000Z
|
2022-03-30T10:59:32.000Z
|
2021/changeLineWidth.py
|
raflzhang/HowtoSim_Script
|
90fb8cca87d47d2c45b8ff5d07a35e8a6c846685
|
[
"MIT"
] | 1
|
2020-03-29T20:52:06.000Z
|
2020-03-30T05:35:30.000Z
|
2021/changeLineWidth.py
|
raflzhang/HowtoSim_Script
|
90fb8cca87d47d2c45b8ff5d07a35e8a6c846685
|
[
"MIT"
] | 73
|
2019-05-07T10:26:53.000Z
|
2022-03-24T02:25:08.000Z
|
# coding=utf-8
import os, clr
os.chdir(os.path.dirname(__file__))
clr.AddReference('System.Drawing')
clr.AddReference('System.Windows.Forms')
from System import Drawing, Array, ComponentModel, Diagnostics, IO
from System.Windows import Forms
import System.Object as object
import System.String as string
from System.Windows.Forms import MessageBox
#----------------------------------------------------------------------------
from collections import OrderedDict
import logging
logging.basicConfig(filename='./message.log', level=logging.DEBUG, filemode='w', format='%(message)s')
import re
import ScriptEnv
ScriptEnv.Initialize("Ansoft.ElectronicsDesktop")
oDesktop.RestoreWindow()
oDesktop.ClearMessages("", "", 2)
oProject = oDesktop.GetActiveProject()
oDesign = oProject.GetActiveDesign()
oEditor = oDesign.GetActiveEditor()
lines = oEditor.FindObjects('type', 'line')
def getLayerLineInfo():
data = OrderedDict()
for layer in oEditor.GetStackupLayerNames():
objs = oEditor.FindObjects('layer', layer)
data[layer] = list(set(lines).intersection(set(objs)))
result = OrderedDict()
for layer in data:
if not bool(data[layer]):
continue
result[layer] = {}
for line in data[layer]:
net = oEditor.GetPropertyValue('BaseElementTab', line, 'Net')
line_width = oEditor.GetPropertyValue('BaseElementTab', line, 'LineWidth')
try:
result[layer][net] +=[(line, line_width)]
except:
result[layer][net] = [(line, line_width)]
return result
def changeLineWidth(line, width):
oEditor.ChangeProperty(
[
"NAME:AllTabs",
[
"NAME:BaseElementTab",
[
"NAME:PropServers",
line
],
[
"NAME:ChangedProps",
[
"NAME:LineWidth",
"Value:=" , width
]
]
]
])
#----------------------------------------------------------------------------
class MyForm(Forms.Form):
def __init__(self):
self.label1 = Forms.Label()
self.label2 = Forms.Label()
self.listBox_selection = Forms.ListBox()
self.comboBox_layer = Forms.ComboBox()
self.textBox_net = Forms.TextBox()
self.label3 = Forms.Label()
self.textBox_linewidth = Forms.TextBox()
self.button_change = Forms.Button()
self.label4 = Forms.Label()
self.SuspendLayout()
# label1
self.label1.AutoSize = True
self.label1.Location = Drawing.Point(13, 10)
self.label1.Name = "label1"
self.label1.Size = Drawing.Size(50, 19)
self.label1.TabIndex = 0
self.label1.Text = "Layer:"
# label2
self.label2.AutoSize = True
self.label2.Location = Drawing.Point(13, 72)
self.label2.Name = "label2"
self.label2.Size = Drawing.Size(103, 19)
self.label2.TabIndex = 1
self.label2.Text = "Net Keyword:"
self.label2.Click += self.label2_Click
# listBox_selection
self.listBox_selection.FormattingEnabled = True
self.listBox_selection.ItemHeight = 19
self.listBox_selection.Location = Drawing.Point(174, 32)
self.listBox_selection.Name = "listBox_selection"
self.listBox_selection.SelectionMode = Forms.SelectionMode.MultiExtended
self.listBox_selection.Size = Drawing.Size(225, 308)
self.listBox_selection.TabIndex = 2
self.listBox_selection.SelectedIndexChanged += self.listBox_selection_SelectedIndexChanged
# comboBox_layer
self.comboBox_layer.FormattingEnabled = True
self.comboBox_layer.Location = Drawing.Point(13, 32)
self.comboBox_layer.Name = "comboBox_layer"
self.comboBox_layer.Size = Drawing.Size(151, 27)
self.comboBox_layer.TabIndex = 3
self.comboBox_layer.SelectedIndexChanged += self.comboBox_layer_SelectedIndexChanged
# textBox_net
self.textBox_net.Location = Drawing.Point(13, 94)
self.textBox_net.Name = "textBox_net"
self.textBox_net.Size = Drawing.Size(151, 27)
self.textBox_net.TabIndex = 4
self.textBox_net.Text = ".*"
self.textBox_net.TextChanged += self.textBox_net_TextChanged
# label3
self.label3.AutoSize = True
self.label3.Location = Drawing.Point(13, 207)
self.label3.Name = "label3"
self.label3.Size = Drawing.Size(88, 19)
self.label3.TabIndex = 5
self.label3.Text = "Line Width:"
# textBox_linewidth
self.textBox_linewidth.Location = Drawing.Point(13, 229)
self.textBox_linewidth.Name = "textBox_linewidth"
self.textBox_linewidth.Size = Drawing.Size(151, 27)
self.textBox_linewidth.TabIndex = 6
# button_change
self.button_change.Font = Drawing.Font("Microsoft JhengHei UI", 12, Drawing.FontStyle.Bold, Drawing.GraphicsUnit.Point)
self.button_change.Location = Drawing.Point(13, 278)
self.button_change.Name = "button_change"
self.button_change.Size = Drawing.Size(151, 62)
self.button_change.TabIndex = 7
self.button_change.Text = "CHANGE"
self.button_change.UseVisualStyleBackColor = True
self.button_change.Click += self.button_change_Click
# label4
self.label4.AutoSize = True
self.label4.Location = Drawing.Point(174, 10)
self.label4.Name = "label4"
self.label4.Size = Drawing.Size(104, 19)
self.label4.TabIndex = 8
self.label4.Text = "Net Selection:"
# Form1
self.AutoScaleDimensions = Drawing.SizeF(9, 19)
self.AutoScaleMode = Forms.AutoScaleMode.Font
self.ClientSize = Drawing.Size(412, 353)
self.Controls.Add(self.label4)
self.Controls.Add(self.button_change)
self.Controls.Add(self.textBox_linewidth)
self.Controls.Add(self.label3)
self.Controls.Add(self.textBox_net)
self.Controls.Add(self.comboBox_layer)
self.Controls.Add(self.listBox_selection)
self.Controls.Add(self.label2)
self.Controls.Add(self.label1)
self.FormBorderStyle = Forms.FormBorderStyle.FixedSingle
self.MaximizeBox = False
self.MinimizeBox = False
self.MinimumSize = Drawing.Size(400, 400)
self.Name = "Form1"
self.Padding = Forms.Padding(10)
self.SizeGripStyle = Forms.SizeGripStyle.Show
self.StartPosition = Forms.FormStartPosition.CenterScreen
self.Text = "Line Width Editor"
self.TopMost = True
self.Load += self.Form1_Load
self.ResumeLayout(False)
self.PerformLayout()
def refreshListBox(self):
self.listBox_selection.Items.Clear()
for net in self.info[self.comboBox_layer.Text]:
if re.search(self.textBox_net.Text, net):
width = self.info[self.comboBox_layer.Text][net][0][1]
self.listBox_selection.Items.Add('{} - {}'.format(net, width))
def textBox_net_TextChanged(self, sender, e):
self.refreshListBox()
def label2_Click(self, sender, e):
pass
def listBox_selection_SelectedIndexChanged(self, sender, e):
pass
def comboBox_layer_SelectedIndexChanged(self, sender, e):
self.refreshListBox()
def button_change_Click(self, sender, e):
try:
new_width = self.textBox_linewidth.Text
for net_width in self.listBox_selection.SelectedItems:
net = net_width.split()[0]
for n, (line, width) in enumerate(self.info[self.comboBox_layer.Text][net]):
changeLineWidth(line, new_width)
self.info[self.comboBox_layer.Text][net][n] = (line, new_width)
self.refreshListBox()
except:
logging.exception('Error')
MessageBox.Show('Invalid Input!')
self.refreshListBox()
def Form1_Load(self, sender, e):
self.info = getLayerLineInfo()
for layer in self.info:
self.comboBox_layer.Items.Add(layer)
self.comboBox_layer.SelectedIndex = 0
if __name__ == '__main__':
form = MyForm()
form.ShowDialog()
form = MyForm()
form.Dispose()
AddWarningMessage('Good Bye!')
#form.Show()
#oDesktop.PauseScript()
| 38.375546
| 128
| 0.600819
|
import os, clr
os.chdir(os.path.dirname(__file__))
clr.AddReference('System.Drawing')
clr.AddReference('System.Windows.Forms')
from System import Drawing, Array, ComponentModel, Diagnostics, IO
from System.Windows import Forms
import System.Object as object
import System.String as string
from System.Windows.Forms import MessageBox
from collections import OrderedDict
import logging
logging.basicConfig(filename='./message.log', level=logging.DEBUG, filemode='w', format='%(message)s')
import re
import ScriptEnv
ScriptEnv.Initialize("Ansoft.ElectronicsDesktop")
oDesktop.RestoreWindow()
oDesktop.ClearMessages("", "", 2)
oProject = oDesktop.GetActiveProject()
oDesign = oProject.GetActiveDesign()
oEditor = oDesign.GetActiveEditor()
lines = oEditor.FindObjects('type', 'line')
def getLayerLineInfo():
data = OrderedDict()
for layer in oEditor.GetStackupLayerNames():
objs = oEditor.FindObjects('layer', layer)
data[layer] = list(set(lines).intersection(set(objs)))
result = OrderedDict()
for layer in data:
if not bool(data[layer]):
continue
result[layer] = {}
for line in data[layer]:
net = oEditor.GetPropertyValue('BaseElementTab', line, 'Net')
line_width = oEditor.GetPropertyValue('BaseElementTab', line, 'LineWidth')
try:
result[layer][net] +=[(line, line_width)]
except:
result[layer][net] = [(line, line_width)]
return result
def changeLineWidth(line, width):
oEditor.ChangeProperty(
[
"NAME:AllTabs",
[
"NAME:BaseElementTab",
[
"NAME:PropServers",
line
],
[
"NAME:ChangedProps",
[
"NAME:LineWidth",
"Value:=" , width
]
]
]
])
class MyForm(Forms.Form):
def __init__(self):
self.label1 = Forms.Label()
self.label2 = Forms.Label()
self.listBox_selection = Forms.ListBox()
self.comboBox_layer = Forms.ComboBox()
self.textBox_net = Forms.TextBox()
self.label3 = Forms.Label()
self.textBox_linewidth = Forms.TextBox()
self.button_change = Forms.Button()
self.label4 = Forms.Label()
self.SuspendLayout()
self.label1.AutoSize = True
self.label1.Location = Drawing.Point(13, 10)
self.label1.Name = "label1"
self.label1.Size = Drawing.Size(50, 19)
self.label1.TabIndex = 0
self.label1.Text = "Layer:"
self.label2.AutoSize = True
self.label2.Location = Drawing.Point(13, 72)
self.label2.Name = "label2"
self.label2.Size = Drawing.Size(103, 19)
self.label2.TabIndex = 1
self.label2.Text = "Net Keyword:"
self.label2.Click += self.label2_Click
self.listBox_selection.FormattingEnabled = True
self.listBox_selection.ItemHeight = 19
self.listBox_selection.Location = Drawing.Point(174, 32)
self.listBox_selection.Name = "listBox_selection"
self.listBox_selection.SelectionMode = Forms.SelectionMode.MultiExtended
self.listBox_selection.Size = Drawing.Size(225, 308)
self.listBox_selection.TabIndex = 2
self.listBox_selection.SelectedIndexChanged += self.listBox_selection_SelectedIndexChanged
self.comboBox_layer.FormattingEnabled = True
self.comboBox_layer.Location = Drawing.Point(13, 32)
self.comboBox_layer.Name = "comboBox_layer"
self.comboBox_layer.Size = Drawing.Size(151, 27)
self.comboBox_layer.TabIndex = 3
self.comboBox_layer.SelectedIndexChanged += self.comboBox_layer_SelectedIndexChanged
self.textBox_net.Location = Drawing.Point(13, 94)
self.textBox_net.Name = "textBox_net"
self.textBox_net.Size = Drawing.Size(151, 27)
self.textBox_net.TabIndex = 4
self.textBox_net.Text = ".*"
self.textBox_net.TextChanged += self.textBox_net_TextChanged
self.label3.AutoSize = True
self.label3.Location = Drawing.Point(13, 207)
self.label3.Name = "label3"
self.label3.Size = Drawing.Size(88, 19)
self.label3.TabIndex = 5
self.label3.Text = "Line Width:"
self.textBox_linewidth.Location = Drawing.Point(13, 229)
self.textBox_linewidth.Name = "textBox_linewidth"
self.textBox_linewidth.Size = Drawing.Size(151, 27)
self.textBox_linewidth.TabIndex = 6
self.button_change.Font = Drawing.Font("Microsoft JhengHei UI", 12, Drawing.FontStyle.Bold, Drawing.GraphicsUnit.Point)
self.button_change.Location = Drawing.Point(13, 278)
self.button_change.Name = "button_change"
self.button_change.Size = Drawing.Size(151, 62)
self.button_change.TabIndex = 7
self.button_change.Text = "CHANGE"
self.button_change.UseVisualStyleBackColor = True
self.button_change.Click += self.button_change_Click
self.label4.AutoSize = True
self.label4.Location = Drawing.Point(174, 10)
self.label4.Name = "label4"
self.label4.Size = Drawing.Size(104, 19)
self.label4.TabIndex = 8
self.label4.Text = "Net Selection:"
self.AutoScaleDimensions = Drawing.SizeF(9, 19)
self.AutoScaleMode = Forms.AutoScaleMode.Font
self.ClientSize = Drawing.Size(412, 353)
self.Controls.Add(self.label4)
self.Controls.Add(self.button_change)
self.Controls.Add(self.textBox_linewidth)
self.Controls.Add(self.label3)
self.Controls.Add(self.textBox_net)
self.Controls.Add(self.comboBox_layer)
self.Controls.Add(self.listBox_selection)
self.Controls.Add(self.label2)
self.Controls.Add(self.label1)
self.FormBorderStyle = Forms.FormBorderStyle.FixedSingle
self.MaximizeBox = False
self.MinimizeBox = False
self.MinimumSize = Drawing.Size(400, 400)
self.Name = "Form1"
self.Padding = Forms.Padding(10)
self.SizeGripStyle = Forms.SizeGripStyle.Show
self.StartPosition = Forms.FormStartPosition.CenterScreen
self.Text = "Line Width Editor"
self.TopMost = True
self.Load += self.Form1_Load
self.ResumeLayout(False)
self.PerformLayout()
def refreshListBox(self):
self.listBox_selection.Items.Clear()
for net in self.info[self.comboBox_layer.Text]:
if re.search(self.textBox_net.Text, net):
width = self.info[self.comboBox_layer.Text][net][0][1]
self.listBox_selection.Items.Add('{} - {}'.format(net, width))
def textBox_net_TextChanged(self, sender, e):
self.refreshListBox()
def label2_Click(self, sender, e):
pass
def listBox_selection_SelectedIndexChanged(self, sender, e):
pass
def comboBox_layer_SelectedIndexChanged(self, sender, e):
self.refreshListBox()
def button_change_Click(self, sender, e):
try:
new_width = self.textBox_linewidth.Text
for net_width in self.listBox_selection.SelectedItems:
net = net_width.split()[0]
for n, (line, width) in enumerate(self.info[self.comboBox_layer.Text][net]):
changeLineWidth(line, new_width)
self.info[self.comboBox_layer.Text][net][n] = (line, new_width)
self.refreshListBox()
except:
logging.exception('Error')
MessageBox.Show('Invalid Input!')
self.refreshListBox()
def Form1_Load(self, sender, e):
self.info = getLayerLineInfo()
for layer in self.info:
self.comboBox_layer.Items.Add(layer)
self.comboBox_layer.SelectedIndex = 0
if __name__ == '__main__':
form = MyForm()
form.ShowDialog()
form = MyForm()
form.Dispose()
AddWarningMessage('Good Bye!')
| true
| true
|
790ead42dcb15afcce656866dd10d5b70d4fa931
| 634
|
py
|
Python
|
rdd/nasaApacheWebLogs/UnionLogSolutions.py
|
shubozhang/pyspark-tutorial
|
244f69bc75ad4238a00151dc7802bbb63e6f35e1
|
[
"MIT"
] | null | null | null |
rdd/nasaApacheWebLogs/UnionLogSolutions.py
|
shubozhang/pyspark-tutorial
|
244f69bc75ad4238a00151dc7802bbb63e6f35e1
|
[
"MIT"
] | null | null | null |
rdd/nasaApacheWebLogs/UnionLogSolutions.py
|
shubozhang/pyspark-tutorial
|
244f69bc75ad4238a00151dc7802bbb63e6f35e1
|
[
"MIT"
] | null | null | null |
from pyspark import SparkContext, SparkConf
def isNotHeader(line: str):
return not (line.startswith("host") and "bytes" in line)
if __name__ == "__main__":
conf = SparkConf().setAppName("unionLogs").setMaster("local[*]")
sc = SparkContext(conf = conf)
julyFirstLogs = sc.textFile("in/nasa_19950701.tsv")
augustFirstLogs = sc.textFile("in/nasa_19950801.tsv")
aggregatedLogLines = julyFirstLogs.union(augustFirstLogs)
cleanLogLines = aggregatedLogLines.filter(isNotHeader)
sample = cleanLogLines.sample(withReplacement = True, fraction = 0.1)
sample.saveAsTextFile("out/sample_nasa_logs.csv")
| 31.7
| 73
| 0.733438
|
from pyspark import SparkContext, SparkConf
def isNotHeader(line: str):
return not (line.startswith("host") and "bytes" in line)
if __name__ == "__main__":
conf = SparkConf().setAppName("unionLogs").setMaster("local[*]")
sc = SparkContext(conf = conf)
julyFirstLogs = sc.textFile("in/nasa_19950701.tsv")
augustFirstLogs = sc.textFile("in/nasa_19950801.tsv")
aggregatedLogLines = julyFirstLogs.union(augustFirstLogs)
cleanLogLines = aggregatedLogLines.filter(isNotHeader)
sample = cleanLogLines.sample(withReplacement = True, fraction = 0.1)
sample.saveAsTextFile("out/sample_nasa_logs.csv")
| true
| true
|
790ead91c052011dccbb2364857a6b2b2af82a03
| 4,202
|
py
|
Python
|
cishouseholds/pyspark_utils.py
|
ONS-SST/cis_households
|
e475df5929e6763a46cd05aff1f7e960ccbe8e21
|
[
"MIT"
] | null | null | null |
cishouseholds/pyspark_utils.py
|
ONS-SST/cis_households
|
e475df5929e6763a46cd05aff1f7e960ccbe8e21
|
[
"MIT"
] | 252
|
2021-05-19T11:12:43.000Z
|
2022-03-02T10:39:10.000Z
|
cishouseholds/pyspark_utils.py
|
ONS-SST/cis_households
|
e475df5929e6763a46cd05aff1f7e960ccbe8e21
|
[
"MIT"
] | null | null | null |
from typing import Any
from typing import Mapping
from pandas.core.frame import DataFrame
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType
from cishouseholds.pipeline.config import get_config
sessions = {
"s": (
SparkSession.builder.config("spark.executor.memory", "1g")
.config("spark.executor.cores", 1)
.config("spark.dynamicAllocation.enabled", "true")
.config("spark.dynamicAllocation.maxExecutors", 3)
.config("spark.sql.shuffle.partitions", 12)
.config("spark.ui.showConsoleProgress", "false")
.config("spark.sql.legacy.allowCreatingManagedTableUsingNonemptyLocation", "true")
.config("spark.shuffle.service.enabled", "true")
.config("spark.sql.crossJoin.enabled", "true")
.appName("cishouseholds")
.enableHiveSupport()
.getOrCreate()
),
"m": (
SparkSession.builder.config("spark.executor.memory", "6g")
.config("spark.executor.cores", 3)
.config("spark.dynamicAllocation.enabled", "true")
.config("spark.dynamicAllocation.maxExecutors", 3)
.config("spark.sql.shuffle.partitions", 18)
.config("spark.ui.showConsoleProgress", "false")
.config("spark.sql.legacy.allowCreatingManagedTableUsingNonemptyLocation", "true")
.config("spark.shuffle.service.enabled", "true")
.config("spark.debug.maxToStringFields", 2000)
.config("spark.sql.crossJoin.enabled", "true")
.appName("cishouseholds")
.enableHiveSupport()
.getOrCreate()
),
"l": (
SparkSession.builder.config("spark.executor.memory", "10g")
.config("spark.yarn.executor.memoryOverhead", "1g")
.config("spark.executor.cores", 5)
.config("spark.dynamicAllocation.enabled", "true")
.config("spark.dynamicAllocation.maxExecutors", 5)
.config("spark.sql.shuffle.partitions", 200)
.config("spark.ui.showConsoleProgress", "false")
.config("spark.sql.legacy.allowCreatingManagedTableUsingNonemptyLocation", "true")
.config("spark.shuffle.service.enabled", "true")
.config("spark.sql.crossJoin.enabled", "true")
.appName("cishouseholds")
.enableHiveSupport()
.getOrCreate()
),
"xl": (
SparkSession.builder.config("spark.executor.memory", "20g")
.config("spark.yarn.executor.memoryOverhead", "2g")
.config("spark.executor.cores", 5)
.config("spark.dynamicAllocation.enabled", "true")
.config("spark.dynamicAllocation.maxExecutors", 12)
.config("spark.sql.shuffle.partitions", 240)
.config("spark.shuffle.service.enabled", "true")
.config("spark.ui.showConsoleProgress", "false")
.config("spark.sql.legacy.allowCreatingManagedTableUsingNonemptyLocation", "true")
.config("spark.shuffle.service.enabled", "true")
.config("spark.sql.crossJoin.enabled", "true")
.appName("cishouseholds")
.enableHiveSupport()
.getOrCreate()
),
}
def convert_cerberus_schema_to_pyspark(schema: Mapping[str, Any]) -> StructType:
"""
Convert a cerberus validation schema to a pyspark schema.
Assumes that schema is not nested.
The following are required in spark schema:
* `nullable` is False by default
* `metadata` is an empty dict by default
* `name` is the name of the field
"""
fields = [
{"metadata": {}, "name": name, "nullable": True, **values}
for name, values in schema.items()
if isinstance(values, dict)
]
return StructType.fromJson({"fields": fields, "type": "struct"})
def get_or_create_spark_session() -> SparkSession:
"""
Create a spark_session, hiding console progress and enabling HIVE table overwrite.
Session size is configured via pipeline config.
"""
config = get_config()
session_size = config.get("pyspark_session_size", "m")
spark_session = sessions[session_size]
return spark_session
def column_to_list(df: DataFrame, column_name: str):
"""Fast collection of all records in a column to a standard list."""
return [row[column_name] for row in df.collect()]
| 39.271028
| 90
| 0.6604
|
from typing import Any
from typing import Mapping
from pandas.core.frame import DataFrame
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType
from cishouseholds.pipeline.config import get_config
sessions = {
"s": (
SparkSession.builder.config("spark.executor.memory", "1g")
.config("spark.executor.cores", 1)
.config("spark.dynamicAllocation.enabled", "true")
.config("spark.dynamicAllocation.maxExecutors", 3)
.config("spark.sql.shuffle.partitions", 12)
.config("spark.ui.showConsoleProgress", "false")
.config("spark.sql.legacy.allowCreatingManagedTableUsingNonemptyLocation", "true")
.config("spark.shuffle.service.enabled", "true")
.config("spark.sql.crossJoin.enabled", "true")
.appName("cishouseholds")
.enableHiveSupport()
.getOrCreate()
),
"m": (
SparkSession.builder.config("spark.executor.memory", "6g")
.config("spark.executor.cores", 3)
.config("spark.dynamicAllocation.enabled", "true")
.config("spark.dynamicAllocation.maxExecutors", 3)
.config("spark.sql.shuffle.partitions", 18)
.config("spark.ui.showConsoleProgress", "false")
.config("spark.sql.legacy.allowCreatingManagedTableUsingNonemptyLocation", "true")
.config("spark.shuffle.service.enabled", "true")
.config("spark.debug.maxToStringFields", 2000)
.config("spark.sql.crossJoin.enabled", "true")
.appName("cishouseholds")
.enableHiveSupport()
.getOrCreate()
),
"l": (
SparkSession.builder.config("spark.executor.memory", "10g")
.config("spark.yarn.executor.memoryOverhead", "1g")
.config("spark.executor.cores", 5)
.config("spark.dynamicAllocation.enabled", "true")
.config("spark.dynamicAllocation.maxExecutors", 5)
.config("spark.sql.shuffle.partitions", 200)
.config("spark.ui.showConsoleProgress", "false")
.config("spark.sql.legacy.allowCreatingManagedTableUsingNonemptyLocation", "true")
.config("spark.shuffle.service.enabled", "true")
.config("spark.sql.crossJoin.enabled", "true")
.appName("cishouseholds")
.enableHiveSupport()
.getOrCreate()
),
"xl": (
SparkSession.builder.config("spark.executor.memory", "20g")
.config("spark.yarn.executor.memoryOverhead", "2g")
.config("spark.executor.cores", 5)
.config("spark.dynamicAllocation.enabled", "true")
.config("spark.dynamicAllocation.maxExecutors", 12)
.config("spark.sql.shuffle.partitions", 240)
.config("spark.shuffle.service.enabled", "true")
.config("spark.ui.showConsoleProgress", "false")
.config("spark.sql.legacy.allowCreatingManagedTableUsingNonemptyLocation", "true")
.config("spark.shuffle.service.enabled", "true")
.config("spark.sql.crossJoin.enabled", "true")
.appName("cishouseholds")
.enableHiveSupport()
.getOrCreate()
),
}
def convert_cerberus_schema_to_pyspark(schema: Mapping[str, Any]) -> StructType:
fields = [
{"metadata": {}, "name": name, "nullable": True, **values}
for name, values in schema.items()
if isinstance(values, dict)
]
return StructType.fromJson({"fields": fields, "type": "struct"})
def get_or_create_spark_session() -> SparkSession:
config = get_config()
session_size = config.get("pyspark_session_size", "m")
spark_session = sessions[session_size]
return spark_session
def column_to_list(df: DataFrame, column_name: str):
return [row[column_name] for row in df.collect()]
| true
| true
|
790eafa18e5a22240a98a743b38eb3a53048bc2a
| 839
|
py
|
Python
|
models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/PyTorch/pt_personreid-res18_market1501_176_80_1.1G_1.3/code/network/__init__.py
|
guochunhe/Vitis-AI
|
e86b6efae11f8703ee647e4a99004dc980b84989
|
[
"Apache-2.0"
] | 1
|
2020-12-18T14:49:19.000Z
|
2020-12-18T14:49:19.000Z
|
models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/PyTorch/pt_personreid-res50_market1501_256_128_5.4G_1.3/code/network/__init__.py
|
guochunhe/Vitis-AI
|
e86b6efae11f8703ee647e4a99004dc980b84989
|
[
"Apache-2.0"
] | null | null | null |
models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/PyTorch/pt_personreid-res50_market1501_256_128_5.4G_1.3/code/network/__init__.py
|
guochunhe/Vitis-AI
|
e86b6efae11f8703ee647e4a99004dc980b84989
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# encoding: utf-8
"""
@author: liaoxingyu
@contact: xyliao1993@qq.com
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .baseline import Baseline
| 28.931034
| 74
| 0.771156
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .baseline import Baseline
| true
| true
|
790eb016ed428bf6f36cf13323eb606f36352516
| 8,815
|
py
|
Python
|
assignments/2021/assignment1/cs231n/classifiers/k_nearest_neighbor.py
|
Michellemingxuan/stanford_cs231n
|
b1d0a5a4a3b2fe5d685e34a4ebd810cbc56ec143
|
[
"MIT"
] | null | null | null |
assignments/2021/assignment1/cs231n/classifiers/k_nearest_neighbor.py
|
Michellemingxuan/stanford_cs231n
|
b1d0a5a4a3b2fe5d685e34a4ebd810cbc56ec143
|
[
"MIT"
] | null | null | null |
assignments/2021/assignment1/cs231n/classifiers/k_nearest_neighbor.py
|
Michellemingxuan/stanford_cs231n
|
b1d0a5a4a3b2fe5d685e34a4ebd810cbc56ec143
|
[
"MIT"
] | null | null | null |
from builtins import range
from builtins import object
import numpy as np
from past.builtins import xrange
class KNearestNeighbor(object):
""" a kNN classifier with L2 distance """
def __init__(self):
pass
def train(self, X, y):
"""
Train the classifier. For k-nearest neighbors this is just
memorizing the training data.
Inputs:
- X: A numpy array of shape (num_train, D) containing the training data
consisting of num_train samples each of dimension D.
- y: A numpy array of shape (N,) containing the training labels, where
y[i] is the label for X[i].
"""
self.X_train = X
self.y_train = y
def predict(self, X, k=1, num_loops=0):
"""
Predict labels for test data using this classifier.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data consisting
of num_test samples each of dimension D.
- k: The number of nearest neighbors that vote for the predicted labels.
- num_loops: Determines which implementation to use to compute distances
between training points and testing points.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
if num_loops == 0:
dists = self.compute_distances_no_loops(X)
elif num_loops == 1:
dists = self.compute_distances_one_loop(X)
elif num_loops == 2:
dists = self.compute_distances_two_loops(X)
else:
raise ValueError("Invalid value %d for num_loops" % num_loops)
return self.predict_labels(dists, k=k)
def compute_distances_two_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a nested loop over both the training data and the
test data.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data.
Returns:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
is the Euclidean distance between the ith test point and the jth training
point.
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in range(num_test):
for j in range(num_train):
#####################################################################
# TODO: #
# Compute the l2 distance between the ith test point and the jth #
# training point, and store the result in dists[i, j]. You should #
# not use a loop over dimension, nor use np.linalg.norm(). #
#####################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
dists[i, j] = np.sqrt(sum((X[i, ] - self.X_train[j, ]) ** 2))
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return dists
def compute_distances_one_loop(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a single loop over the test data.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in range(num_test):
#######################################################################
# TODO: #
# Compute the l2 distance between the ith test point and all training #
# points, and store the result in dists[i, :]. #
# Do not use np.linalg.norm(). #
#######################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
dists[i, :] = np.sqrt(np.sum((self.X_train - X[i, :]) ** 2, 1))
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return dists
def compute_distances_no_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using no explicit loops.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
#########################################################################
# TODO: #
# Compute the l2 distance between all test points and all training #
# points without using any explicit loops, and store the result in #
# dists. #
# #
# You should implement this function using only basic array operations; #
# in particular you should not use functions from scipy, #
# nor use np.linalg.norm(). #
# #
# HINT: Try to formulate the l2 distance using matrix multiplication #
# and two broadcast sums. #
#########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
dists = np.sqrt(
np.sum((self.X_train[np.newaxis, :] - X[np.newaxis, :].reshape((num_test, 1, X.shape[1]))) ** 2, 2))
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return dists
def predict_labels(self, dists, k=1):
"""
Given a matrix of distances between test points and training points,
predict a label for each test point.
Inputs:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
gives the distance betwen the ith test point and the jth training point.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
num_test = dists.shape[0]
y_pred = np.zeros(num_test)
for i in range(num_test):
# A list of length k storing the labels of the k nearest neighbors to
# the ith test point.
closest_y = []
#########################################################################
# TODO: #
# Use the distance matrix to find the k nearest neighbors of the ith #
# testing point, and use self.y_train to find the labels of these #
# neighbors. Store these labels in closest_y. #
# Hint: Look up the function numpy.argsort. #
#########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
closest_y = self.y_train[dists[i, ].argsort()[:k]]
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
#########################################################################
# TODO: #
# Now that you have found the labels of the k nearest neighbors, you #
# need to find the most common label in the list closest_y of labels. #
# Store this label in y_pred[i]. Break ties by choosing the smaller #
# label. #
#########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
unique, counts = np.unique(closest_y, return_counts=True)
y_pred[i] = unique[np.argmax(counts)]
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return y_pred
| 47.392473
| 112
| 0.473511
|
from builtins import range
from builtins import object
import numpy as np
from past.builtins import xrange
class KNearestNeighbor(object):
def __init__(self):
pass
def train(self, X, y):
self.X_train = X
self.y_train = y
def predict(self, X, k=1, num_loops=0):
if num_loops == 0:
dists = self.compute_distances_no_loops(X)
elif num_loops == 1:
dists = self.compute_distances_one_loop(X)
elif num_loops == 2:
dists = self.compute_distances_two_loops(X)
else:
raise ValueError("Invalid value %d for num_loops" % num_loops)
return self.predict_labels(dists, k=k)
def compute_distances_two_loops(self, X):
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in range(num_test):
for j in range(num_train):
| true
| true
|
790eb0595d544aed4675acfaf8ae9eb4be14a58f
| 526
|
py
|
Python
|
Algorithm/coding_interviews/Python/sword-for-offer/57_find_num_with_sum.py
|
ck76/awesome-cs
|
48cba4081dc5290f07e305850b9a3a7e8a590b64
|
[
"Apache-2.0"
] | 1
|
2021-11-16T13:37:41.000Z
|
2021-11-16T13:37:41.000Z
|
Algorithm/coding_interviews/Python/sword-for-offer/57_find_num_with_sum.py
|
ck76/awesome-cs
|
48cba4081dc5290f07e305850b9a3a7e8a590b64
|
[
"Apache-2.0"
] | null | null | null |
Algorithm/coding_interviews/Python/sword-for-offer/57_find_num_with_sum.py
|
ck76/awesome-cs
|
48cba4081dc5290f07e305850b9a3a7e8a590b64
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2019/3/10 7:44 PM
# @Author : xiaoliji
# @Email : yutian9527@gmail.com
"""
找出和为s的数字。
>>> nums = [1, 2, 4, 7, 11, 15]
>>> FindNumbersWithSum(nums, 15)
(4, 11)
"""
def FindNumbersWithSum(array: list, tsum: int) -> tuple:
l, r = 0, len(array)-1
while l < r:
if array[l] + array[r] < tsum:
l += 1
elif array[l]+array[r] > tsum:
r -= 1
else:
return array[l], array[r]
return []
| 21.916667
| 56
| 0.48289
|
def FindNumbersWithSum(array: list, tsum: int) -> tuple:
l, r = 0, len(array)-1
while l < r:
if array[l] + array[r] < tsum:
l += 1
elif array[l]+array[r] > tsum:
r -= 1
else:
return array[l], array[r]
return []
| true
| true
|
790eb18722b1eac69480ab1eb00f7c121f6ba66c
| 2,520
|
py
|
Python
|
build/linux/unbundle/replace_gn_files.py
|
google-ar/chromium
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 777
|
2017-08-29T15:15:32.000Z
|
2022-03-21T05:29:41.000Z
|
build/linux/unbundle/replace_gn_files.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 66
|
2017-08-30T18:31:18.000Z
|
2021-08-02T10:59:35.000Z
|
build/linux/unbundle/replace_gn_files.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 123
|
2017-08-30T01:19:34.000Z
|
2022-03-17T22:55:31.000Z
|
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Replaces GN files in tree with files from here that
make the build use system libraries.
"""
from __future__ import print_function
import argparse
import os
import shutil
import sys
REPLACEMENTS = {
'ffmpeg': 'third_party/ffmpeg/BUILD.gn',
'flac': 'third_party/flac/BUILD.gn',
'harfbuzz-ng': 'third_party/harfbuzz-ng/BUILD.gn',
'icu': 'third_party/icu/BUILD.gn',
'libevent': 'base/third_party/libevent/BUILD.gn',
'libjpeg': 'build/secondary/third_party/libjpeg_turbo/BUILD.gn',
'libpng': 'third_party/libpng/BUILD.gn',
'libvpx': 'third_party/libvpx/BUILD.gn',
'libwebp': 'third_party/libwebp/BUILD.gn',
'libxml': 'third_party/libxml/BUILD.gn',
'libxslt': 'third_party/libxslt/BUILD.gn',
're2': 'third_party/re2/BUILD.gn',
'snappy': 'third_party/snappy/BUILD.gn',
'yasm': 'third_party/yasm/yasm_assemble.gni',
'zlib': 'third_party/zlib/BUILD.gn',
}
def DoMain(argv):
my_dirname = os.path.dirname(__file__)
source_tree_root = os.path.abspath(
os.path.join(my_dirname, '..', '..', '..'))
parser = argparse.ArgumentParser()
parser.add_argument('--system-libraries', nargs='*', default=[])
parser.add_argument('--undo', action='store_true')
args = parser.parse_args(argv)
handled_libraries = set()
for lib, path in REPLACEMENTS.items():
if lib not in args.system_libraries:
continue
handled_libraries.add(lib)
if args.undo:
# Restore original file, and also remove the backup.
# This is meant to restore the source tree to its original state.
os.rename(os.path.join(source_tree_root, path + '.orig'),
os.path.join(source_tree_root, path))
else:
# Create a backup copy for --undo.
shutil.copyfile(os.path.join(source_tree_root, path),
os.path.join(source_tree_root, path + '.orig'))
# Copy the GN file from directory of this script to target path.
shutil.copyfile(os.path.join(my_dirname, '%s.gn' % lib),
os.path.join(source_tree_root, path))
unhandled_libraries = set(args.system_libraries) - handled_libraries
if unhandled_libraries:
print('Unrecognized system libraries requested: %s' % ', '.join(
sorted(unhandled_libraries)), file=sys.stderr)
return 1
return 0
if __name__ == '__main__':
sys.exit(DoMain(sys.argv[1:]))
| 31.5
| 72
| 0.687698
|
from __future__ import print_function
import argparse
import os
import shutil
import sys
REPLACEMENTS = {
'ffmpeg': 'third_party/ffmpeg/BUILD.gn',
'flac': 'third_party/flac/BUILD.gn',
'harfbuzz-ng': 'third_party/harfbuzz-ng/BUILD.gn',
'icu': 'third_party/icu/BUILD.gn',
'libevent': 'base/third_party/libevent/BUILD.gn',
'libjpeg': 'build/secondary/third_party/libjpeg_turbo/BUILD.gn',
'libpng': 'third_party/libpng/BUILD.gn',
'libvpx': 'third_party/libvpx/BUILD.gn',
'libwebp': 'third_party/libwebp/BUILD.gn',
'libxml': 'third_party/libxml/BUILD.gn',
'libxslt': 'third_party/libxslt/BUILD.gn',
're2': 'third_party/re2/BUILD.gn',
'snappy': 'third_party/snappy/BUILD.gn',
'yasm': 'third_party/yasm/yasm_assemble.gni',
'zlib': 'third_party/zlib/BUILD.gn',
}
def DoMain(argv):
my_dirname = os.path.dirname(__file__)
source_tree_root = os.path.abspath(
os.path.join(my_dirname, '..', '..', '..'))
parser = argparse.ArgumentParser()
parser.add_argument('--system-libraries', nargs='*', default=[])
parser.add_argument('--undo', action='store_true')
args = parser.parse_args(argv)
handled_libraries = set()
for lib, path in REPLACEMENTS.items():
if lib not in args.system_libraries:
continue
handled_libraries.add(lib)
if args.undo:
os.rename(os.path.join(source_tree_root, path + '.orig'),
os.path.join(source_tree_root, path))
else:
shutil.copyfile(os.path.join(source_tree_root, path),
os.path.join(source_tree_root, path + '.orig'))
shutil.copyfile(os.path.join(my_dirname, '%s.gn' % lib),
os.path.join(source_tree_root, path))
unhandled_libraries = set(args.system_libraries) - handled_libraries
if unhandled_libraries:
print('Unrecognized system libraries requested: %s' % ', '.join(
sorted(unhandled_libraries)), file=sys.stderr)
return 1
return 0
if __name__ == '__main__':
sys.exit(DoMain(sys.argv[1:]))
| true
| true
|
790eb24236f11b45a2ecb4687164521b20be05ea
| 8,442
|
py
|
Python
|
mmdet3d/models/dense_heads/assigner/assign_result.py
|
yangzilongdmgy/merge_monster_3d
|
0595e36749d32c3d5537a3f707727a137c82076e
|
[
"Apache-2.0"
] | null | null | null |
mmdet3d/models/dense_heads/assigner/assign_result.py
|
yangzilongdmgy/merge_monster_3d
|
0595e36749d32c3d5537a3f707727a137c82076e
|
[
"Apache-2.0"
] | null | null | null |
mmdet3d/models/dense_heads/assigner/assign_result.py
|
yangzilongdmgy/merge_monster_3d
|
0595e36749d32c3d5537a3f707727a137c82076e
|
[
"Apache-2.0"
] | null | null | null |
# Modification 2020 RangiLyu
# Copyright 2018-2019 Open-MMLab.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ...utils import util_mixins
class AssignResult(util_mixins.NiceRepr):
"""
Stores assignments between predicted and truth boxes.
Attributes:
num_gts (int): the number of truth boxes considered when computing this
assignment
gt_inds (LongTensor): for each predicted box indicates the 1-based
index of the assigned truth box. 0 means unassigned and -1 means
ignore.
max_overlaps (FloatTensor): the iou between the predicted box and its
assigned truth box.
labels (None | LongTensor): If specified, for each predicted box
indicates the category label of the assigned truth box.
Example:
>>> # An assign result between 4 predicted boxes and 9 true boxes
>>> # where only two boxes were assigned.
>>> num_gts = 9
>>> max_overlaps = torch.LongTensor([0, .5, .9, 0])
>>> gt_inds = torch.LongTensor([-1, 1, 2, 0])
>>> labels = torch.LongTensor([0, 3, 4, 0])
>>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels)
>>> print(str(self)) # xdoctest: +IGNORE_WANT
<AssignResult(num_gts=9, gt_inds.shape=(4,), max_overlaps.shape=(4,),
labels.shape=(4,))>
>>> # Force addition of gt labels (when adding gt as proposals)
>>> new_labels = torch.LongTensor([3, 4, 5])
>>> self.add_gt_(new_labels)
>>> print(str(self)) # xdoctest: +IGNORE_WANT
<AssignResult(num_gts=9, gt_inds.shape=(7,), max_overlaps.shape=(7,),
labels.shape=(7,))>
"""
def __init__(self, num_gts, gt_inds, max_overlaps, labels=None):
self.num_gts = num_gts
self.gt_inds = gt_inds
self.max_overlaps = max_overlaps
self.labels = labels
# Interface for possible user-defined properties
self._extra_properties = {}
@property
def num_preds(self):
"""int: the number of predictions in this assignment"""
return len(self.gt_inds)
def set_extra_property(self, key, value):
"""Set user-defined new property."""
assert key not in self.info
self._extra_properties[key] = value
def get_extra_property(self, key):
"""Get user-defined property."""
return self._extra_properties.get(key, None)
@property
def info(self):
"""dict: a dictionary of info about the object"""
basic_info = {
"num_gts": self.num_gts,
"num_preds": self.num_preds,
"gt_inds": self.gt_inds,
"max_overlaps": self.max_overlaps,
"labels": self.labels,
}
basic_info.update(self._extra_properties)
return basic_info
def __nice__(self):
"""str: a "nice" summary string describing this assign result"""
parts = []
parts.append(f"num_gts={self.num_gts!r}")
if self.gt_inds is None:
parts.append(f"gt_inds={self.gt_inds!r}")
else:
parts.append(f"gt_inds.shape={tuple(self.gt_inds.shape)!r}")
if self.max_overlaps is None:
parts.append(f"max_overlaps={self.max_overlaps!r}")
else:
parts.append("max_overlaps.shape=" f"{tuple(self.max_overlaps.shape)!r}")
if self.labels is None:
parts.append(f"labels={self.labels!r}")
else:
parts.append(f"labels.shape={tuple(self.labels.shape)!r}")
return ", ".join(parts)
@classmethod
def random(cls, **kwargs):
"""Create random AssignResult for tests or debugging.
Args:
num_preds: number of predicted boxes
num_gts: number of true boxes
p_ignore (float): probability of a predicted box assinged to an
ignored truth
p_assigned (float): probability of a predicted box not being
assigned
p_use_label (float | bool): with labels or not
rng (None | int | numpy.random.RandomState): seed or state
Returns:
:obj:`AssignResult`: Randomly generated assign results.
Example:
>>> from nanodet.model.head.assigner.assign_result import AssignResult
>>> self = AssignResult.random()
>>> print(self.info)
"""
rng = kwargs.get("rng", None)
num_gts = kwargs.get("num_gts", None)
num_preds = kwargs.get("num_preds", None)
p_ignore = kwargs.get("p_ignore", 0.3)
p_assigned = kwargs.get("p_assigned", 0.7)
p_use_label = kwargs.get("p_use_label", 0.5)
num_classes = kwargs.get("p_use_label", 3)
import numpy as np
if rng is None:
rng = np.random.mtrand._rand
elif isinstance(rng, int):
rng = np.random.RandomState(rng)
else:
rng = rng
if num_gts is None:
num_gts = rng.randint(0, 8)
if num_preds is None:
num_preds = rng.randint(0, 16)
if num_gts == 0:
max_overlaps = torch.zeros(num_preds, dtype=torch.float32)
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
if p_use_label is True or p_use_label < rng.rand():
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = None
else:
import numpy as np
# Create an overlap for each predicted box
max_overlaps = torch.from_numpy(rng.rand(num_preds))
# Construct gt_inds for each predicted box
is_assigned = torch.from_numpy(rng.rand(num_preds) < p_assigned)
# maximum number of assignments constraints
n_assigned = min(num_preds, min(num_gts, is_assigned.sum()))
assigned_idxs = np.where(is_assigned)[0]
rng.shuffle(assigned_idxs)
assigned_idxs = assigned_idxs[0:n_assigned]
assigned_idxs.sort()
is_assigned[:] = 0
is_assigned[assigned_idxs] = True
is_ignore = torch.from_numpy(rng.rand(num_preds) < p_ignore) & is_assigned
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
true_idxs = np.arange(num_gts)
rng.shuffle(true_idxs)
true_idxs = torch.from_numpy(true_idxs)
gt_inds[is_assigned] = true_idxs[:n_assigned]
gt_inds = torch.from_numpy(rng.randint(1, num_gts + 1, size=num_preds))
gt_inds[is_ignore] = -1
gt_inds[~is_assigned] = 0
max_overlaps[~is_assigned] = 0
if p_use_label is True or p_use_label < rng.rand():
if num_classes == 0:
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = torch.from_numpy(
# remind that we set FG labels to [0, num_class-1]
# since mmdet v2.0
# BG cat_id: num_class
rng.randint(0, num_classes, size=num_preds)
)
labels[~is_assigned] = 0
else:
labels = None
self = cls(num_gts, gt_inds, max_overlaps, labels)
return self
def add_gt_(self, gt_labels):
"""Add ground truth as assigned results.
Args:
gt_labels (torch.Tensor): Labels of gt boxes
"""
self_inds = torch.arange(
1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device
)
self.gt_inds = torch.cat([self_inds, self.gt_inds])
self.max_overlaps = torch.cat(
[self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps]
)
if self.labels is not None:
self.labels = torch.cat([gt_labels, self.labels])
| 37.026316
| 86
| 0.592514
|
import torch
from ...utils import util_mixins
class AssignResult(util_mixins.NiceRepr):
def __init__(self, num_gts, gt_inds, max_overlaps, labels=None):
self.num_gts = num_gts
self.gt_inds = gt_inds
self.max_overlaps = max_overlaps
self.labels = labels
self._extra_properties = {}
@property
def num_preds(self):
return len(self.gt_inds)
def set_extra_property(self, key, value):
assert key not in self.info
self._extra_properties[key] = value
def get_extra_property(self, key):
return self._extra_properties.get(key, None)
@property
def info(self):
basic_info = {
"num_gts": self.num_gts,
"num_preds": self.num_preds,
"gt_inds": self.gt_inds,
"max_overlaps": self.max_overlaps,
"labels": self.labels,
}
basic_info.update(self._extra_properties)
return basic_info
def __nice__(self):
parts = []
parts.append(f"num_gts={self.num_gts!r}")
if self.gt_inds is None:
parts.append(f"gt_inds={self.gt_inds!r}")
else:
parts.append(f"gt_inds.shape={tuple(self.gt_inds.shape)!r}")
if self.max_overlaps is None:
parts.append(f"max_overlaps={self.max_overlaps!r}")
else:
parts.append("max_overlaps.shape=" f"{tuple(self.max_overlaps.shape)!r}")
if self.labels is None:
parts.append(f"labels={self.labels!r}")
else:
parts.append(f"labels.shape={tuple(self.labels.shape)!r}")
return ", ".join(parts)
@classmethod
def random(cls, **kwargs):
rng = kwargs.get("rng", None)
num_gts = kwargs.get("num_gts", None)
num_preds = kwargs.get("num_preds", None)
p_ignore = kwargs.get("p_ignore", 0.3)
p_assigned = kwargs.get("p_assigned", 0.7)
p_use_label = kwargs.get("p_use_label", 0.5)
num_classes = kwargs.get("p_use_label", 3)
import numpy as np
if rng is None:
rng = np.random.mtrand._rand
elif isinstance(rng, int):
rng = np.random.RandomState(rng)
else:
rng = rng
if num_gts is None:
num_gts = rng.randint(0, 8)
if num_preds is None:
num_preds = rng.randint(0, 16)
if num_gts == 0:
max_overlaps = torch.zeros(num_preds, dtype=torch.float32)
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
if p_use_label is True or p_use_label < rng.rand():
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = None
else:
import numpy as np
max_overlaps = torch.from_numpy(rng.rand(num_preds))
is_assigned = torch.from_numpy(rng.rand(num_preds) < p_assigned)
n_assigned = min(num_preds, min(num_gts, is_assigned.sum()))
assigned_idxs = np.where(is_assigned)[0]
rng.shuffle(assigned_idxs)
assigned_idxs = assigned_idxs[0:n_assigned]
assigned_idxs.sort()
is_assigned[:] = 0
is_assigned[assigned_idxs] = True
is_ignore = torch.from_numpy(rng.rand(num_preds) < p_ignore) & is_assigned
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
true_idxs = np.arange(num_gts)
rng.shuffle(true_idxs)
true_idxs = torch.from_numpy(true_idxs)
gt_inds[is_assigned] = true_idxs[:n_assigned]
gt_inds = torch.from_numpy(rng.randint(1, num_gts + 1, size=num_preds))
gt_inds[is_ignore] = -1
gt_inds[~is_assigned] = 0
max_overlaps[~is_assigned] = 0
if p_use_label is True or p_use_label < rng.rand():
if num_classes == 0:
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = torch.from_numpy(
rng.randint(0, num_classes, size=num_preds)
)
labels[~is_assigned] = 0
else:
labels = None
self = cls(num_gts, gt_inds, max_overlaps, labels)
return self
def add_gt_(self, gt_labels):
self_inds = torch.arange(
1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device
)
self.gt_inds = torch.cat([self_inds, self.gt_inds])
self.max_overlaps = torch.cat(
[self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps]
)
if self.labels is not None:
self.labels = torch.cat([gt_labels, self.labels])
| true
| true
|
790eb2559f043c83a117b44792d5ec5eea6716df
| 6,960
|
py
|
Python
|
plugins/String/test.py
|
dregad/Limnoria
|
986913628929c9018e01b82b53638aced50ab0de
|
[
"BSD-3-Clause"
] | 1
|
2021-12-04T20:55:17.000Z
|
2021-12-04T20:55:17.000Z
|
plugins/String/test.py
|
dregad/Limnoria
|
986913628929c9018e01b82b53638aced50ab0de
|
[
"BSD-3-Clause"
] | null | null | null |
plugins/String/test.py
|
dregad/Limnoria
|
986913628929c9018e01b82b53638aced50ab0de
|
[
"BSD-3-Clause"
] | null | null | null |
###
# Copyright (c) 2003-2005, Jeremiah Fincher
# Copyright (c) 2009, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import re
from supybot.test import *
import supybot.utils as utils
nicks = ['fatjim','scn','moshez','LordVan','MetaCosm','pythong','fishfart',
'alb','d0rt','jemfinch','StyxAlso','fors','deltab','gd',
'hellz_hunter','are_j|pub_comp','jason_','dreid','sayke_','winjer',
'TenOfTen','GoNoVas','queuetue','the|zzz','Hellfried','Therion',
'shro','DaCa','rexec','polin8','r0ky','aaron_','ironfroggy','eugene',
'faassen','tirloni','mackstann','Yhg1s','ElBarono','vegai','shang',
'typo_','kikoforgetme','asqui','TazyTiggy','fab','nixman','liiwi',
'AdamV','paolo','red_one','_AleX_','lament','jamessan','supybot',
'macr0_zzz','plaisthos','redghost','disco','mphardy','gt3','mathie',
'jonez','r0ky-office','tic','d33p','ES3merge','talin','af','flippo',
'sholden','ameoba','shepherg','j2','Acapnotic','dash','merlin262',
'Taaus','_moshez','rik','jafo__','blk-majik','JT__','itamar',
'kermit-','davidmccabe','glyph','jojo','dave_p','goo','hyjinx',
'SamB','exarkun','drewp','Ragica','skylan','redgore','k3','Ra1stlin',
'StevenK','carball','h3x','carljm','_jacob','teratorn','frangen',
'phed','datazone','Yaggo','acct_','nowhere','pyn','ThomasWaldmann',
'dunker','pilotLight','brainless','LoganH_','jmpnz','steinn',
'EliasREC','lowks__','OldSmrf','Mad77','snibril','delta','psy',
'skimpIzu','Kengur','MoonFallen','kotkis','Hyperi']
def group(seq, groupSize, noneFill=True):
"""Groups a given sequence into sublists of length groupSize."""
ret = []
L = []
i = groupSize
for elt in seq:
if i > 0:
L.append(elt)
else:
ret.append(L)
i = groupSize
L = []
L.append(elt)
i -= 1
if L:
if noneFill:
while len(L) < groupSize:
L.append(None)
ret.append(L)
return ret
class StringTestCase(PluginTestCase):
plugins = ('String', 'Format', 'Status')
def testLen(self):
self.assertResponse('len foo', '3')
self.assertHelp('len')
def testNoErrors(self):
self.assertNotError('levenshtein Python Perl')
def testSoundex(self):
self.assertNotError('soundex jemfinch')
self.assertNotRegexp('soundex foobar 3:30', 'ValueError')
def testChr(self):
for i in range(256):
c = chr(i)
regexp = r'%s|%s' % (re.escape(c), re.escape(repr(c)))
self.assertRegexp('chr %s' % i, regexp)
def testOrd(self):
for c in map(chr, range(256)):
i = ord(c)
self.assertResponse('ord %s' % utils.str.dqrepr(c), str(i))
def testUnicode(self):
self.assertResponse('unicodename ☃', 'SNOWMAN')
self.assertResponse('unicodesearch SNOWMAN', '☃')
#self.assertResponse('unicodename ?',
# 'No name found for this character.')
self.assertResponse('unicodesearch FOO',
'Error: No character found with this name.')
def testMd5(self):
self.assertResponse('md5 supybot', '1360578d1276e945cc235654a53f9c65')
def testEncodeDecode(self):
# This no longer works correctly. It almost seems like were throwing
# in a repr() somewhere.
s = 'the recalcitrant jamessan tests his scramble function'
self.assertNotRegexp('encode aldkfja foobar', 'LookupError')
self.assertNotRegexp('decode asdflkj foobar', 'LookupError')
self.assertResponse('decode zlib [encode zlib %s]' % s, s)
self.assertRegexp('decode base64 $BCfBg7;9D;R(B', 'padded with')
def testRe(self):
self.assertResponse('re "m/system time/" [status cpu]', 'system time')
self.assertResponse('re s/user/luser/g user user', 'luser luser')
self.assertResponse('re s/user/luser/ user user', 'luser user')
self.assertNotRegexp('re m/foo/ bar', 'has no attribute')
self.assertResponse('re m/a\\S+y/ "the bot angryman is hairy"', 'angry')
self.assertResponse('re m/a\\S+y/g "the bot angryman is hairy"',
'angry and airy')
def testReNotEmptyString(self):
self.assertError('re s//foo/g blah')
def testReWorksWithJustCaret(self):
self.assertResponse('re s/^/foo/ bar', 'foobar')
def testReNoEscapingUnpackListOfWrongSize(self):
self.assertNotRegexp('re foo bar baz', 'unpack list of wrong size')
def testReBug850931(self):
self.assertResponse(r're s/\b(\w+)\b/\1./g foo bar baz',
'foo. bar. baz.')
def testNotOverlongRe(self):
self.assertError('re [strjoin "" s/./ [eval \'xxx\'*400]] blah blah')
def testXor(self):
# This no longer works correctly. It almost seems like were throwing
# in a repr() somewhere.
L = [nick for nick in nicks if '|' not in nick and
'[' not in nick and
']' not in nick]
for s0, s1, s2, s3, s4, s5, s6, s7, s8, s9 in group(L, 10):
data = '%s%s%s%s%s%s%s%s%s' % (s0, s1, s2, s3, s4, s5, s6, s7, s8)
self.assertResponse('xor %s [xor %s %s]' % (s9, s9, data), data)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| 43.5
| 80
| 0.625862
|
mport re
from supybot.test import *
import supybot.utils as utils
nicks = ['fatjim','scn','moshez','LordVan','MetaCosm','pythong','fishfart',
'alb','d0rt','jemfinch','StyxAlso','fors','deltab','gd',
'hellz_hunter','are_j|pub_comp','jason_','dreid','sayke_','winjer',
'TenOfTen','GoNoVas','queuetue','the|zzz','Hellfried','Therion',
'shro','DaCa','rexec','polin8','r0ky','aaron_','ironfroggy','eugene',
'faassen','tirloni','mackstann','Yhg1s','ElBarono','vegai','shang',
'typo_','kikoforgetme','asqui','TazyTiggy','fab','nixman','liiwi',
'AdamV','paolo','red_one','_AleX_','lament','jamessan','supybot',
'macr0_zzz','plaisthos','redghost','disco','mphardy','gt3','mathie',
'jonez','r0ky-office','tic','d33p','ES3merge','talin','af','flippo',
'sholden','ameoba','shepherg','j2','Acapnotic','dash','merlin262',
'Taaus','_moshez','rik','jafo__','blk-majik','JT__','itamar',
'kermit-','davidmccabe','glyph','jojo','dave_p','goo','hyjinx',
'SamB','exarkun','drewp','Ragica','skylan','redgore','k3','Ra1stlin',
'StevenK','carball','h3x','carljm','_jacob','teratorn','frangen',
'phed','datazone','Yaggo','acct_','nowhere','pyn','ThomasWaldmann',
'dunker','pilotLight','brainless','LoganH_','jmpnz','steinn',
'EliasREC','lowks__','OldSmrf','Mad77','snibril','delta','psy',
'skimpIzu','Kengur','MoonFallen','kotkis','Hyperi']
def group(seq, groupSize, noneFill=True):
ret = []
L = []
i = groupSize
for elt in seq:
if i > 0:
L.append(elt)
else:
ret.append(L)
i = groupSize
L = []
L.append(elt)
i -= 1
if L:
if noneFill:
while len(L) < groupSize:
L.append(None)
ret.append(L)
return ret
class StringTestCase(PluginTestCase):
plugins = ('String', 'Format', 'Status')
def testLen(self):
self.assertResponse('len foo', '3')
self.assertHelp('len')
def testNoErrors(self):
self.assertNotError('levenshtein Python Perl')
def testSoundex(self):
self.assertNotError('soundex jemfinch')
self.assertNotRegexp('soundex foobar 3:30', 'ValueError')
def testChr(self):
for i in range(256):
c = chr(i)
regexp = r'%s|%s' % (re.escape(c), re.escape(repr(c)))
self.assertRegexp('chr %s' % i, regexp)
def testOrd(self):
for c in map(chr, range(256)):
i = ord(c)
self.assertResponse('ord %s' % utils.str.dqrepr(c), str(i))
def testUnicode(self):
self.assertResponse('unicodename ☃', 'SNOWMAN')
self.assertResponse('unicodesearch SNOWMAN', '☃')
self.assertResponse('unicodesearch FOO',
'Error: No character found with this name.')
def testMd5(self):
self.assertResponse('md5 supybot', '1360578d1276e945cc235654a53f9c65')
def testEncodeDecode(self):
s = 'the recalcitrant jamessan tests his scramble function'
self.assertNotRegexp('encode aldkfja foobar', 'LookupError')
self.assertNotRegexp('decode asdflkj foobar', 'LookupError')
self.assertResponse('decode zlib [encode zlib %s]' % s, s)
self.assertRegexp('decode base64 $BCfBg7;9D;R(B', 'padded with')
def testRe(self):
self.assertResponse('re "m/system time/" [status cpu]', 'system time')
self.assertResponse('re s/user/luser/g user user', 'luser luser')
self.assertResponse('re s/user/luser/ user user', 'luser user')
self.assertNotRegexp('re m/foo/ bar', 'has no attribute')
self.assertResponse('re m/a\\S+y/ "the bot angryman is hairy"', 'angry')
self.assertResponse('re m/a\\S+y/g "the bot angryman is hairy"',
'angry and airy')
def testReNotEmptyString(self):
self.assertError('re s//foo/g blah')
def testReWorksWithJustCaret(self):
self.assertResponse('re s/^/foo/ bar', 'foobar')
def testReNoEscapingUnpackListOfWrongSize(self):
self.assertNotRegexp('re foo bar baz', 'unpack list of wrong size')
def testReBug850931(self):
self.assertResponse(r're s/\b(\w+)\b/\1./g foo bar baz',
'foo. bar. baz.')
def testNotOverlongRe(self):
self.assertError('re [strjoin "" s/./ [eval \'xxx\'*400]] blah blah')
def testXor(self):
L = [nick for nick in nicks if '|' not in nick and
'[' not in nick and
']' not in nick]
for s0, s1, s2, s3, s4, s5, s6, s7, s8, s9 in group(L, 10):
data = '%s%s%s%s%s%s%s%s%s' % (s0, s1, s2, s3, s4, s5, s6, s7, s8)
self.assertResponse('xor %s [xor %s %s]' % (s9, s9, data), data)
| true
| true
|
790eb3eb87fab7bae92f2aa41b7b1327b2277dc8
| 17,245
|
py
|
Python
|
tests/pipeline/test_node.py
|
laisbsc/kedro
|
abdb51f1fc5a247dc92cca63010cf06a581c5462
|
[
"Apache-2.0"
] | null | null | null |
tests/pipeline/test_node.py
|
laisbsc/kedro
|
abdb51f1fc5a247dc92cca63010cf06a581c5462
|
[
"Apache-2.0"
] | null | null | null |
tests/pipeline/test_node.py
|
laisbsc/kedro
|
abdb51f1fc5a247dc92cca63010cf06a581c5462
|
[
"Apache-2.0"
] | 1
|
2021-08-22T08:16:22.000Z
|
2021-08-22T08:16:22.000Z
|
# Copyright 2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from functools import partial, update_wrapper, wraps
from typing import Callable
import pytest
from kedro.pipeline import node
# Different dummy func based on the number of arguments
def constant_output():
return "output" # pragma: no cover
def identity(input1: str):
return input1 # pragma: no cover
def biconcat(input1: str, input2: str):
return input1 + input2 # pragma: no cover
def triconcat(input1: str, input2: str, input3: str):
return input1 + input2 + input3 # pragma: no cover
@pytest.fixture
def simple_tuple_node_list():
return [
(identity, "A", "B"),
(biconcat, ["A", "B"], "C"),
(identity, "C", ["D", "E"]),
(biconcat, ["H", "I"], ["J", "K"]),
(identity, "J", dict(result="K")),
(biconcat, ["J", "K"], dict(result="L")),
(identity, dict(input1="J"), "L"),
(identity, dict(input1="J"), ["L", "M"]),
(identity, dict(input1="J"), dict(result="K")),
(constant_output, None, "M"),
(biconcat, ["N", "O"], None),
(lambda x: None, "F", "G"),
(lambda x: ("a", "b"), "G", ["X", "Y"]),
]
class TestValidNode:
def test_valid(self, simple_tuple_node_list):
nodes = [node(*tup) for tup in simple_tuple_node_list]
assert len(nodes) == len(simple_tuple_node_list)
def test_get_node_func(self):
test_node = node(identity, "A", "B")
assert test_node.func is identity
def test_set_node_func(self):
test_node = node(identity, "A", "B")
test_node.func = decorated_identity
assert test_node.func is decorated_identity
def test_labelled(self):
assert "labeled_node: <lambda>([input1]) -> [output1]" in str(
node(lambda x: None, "input1", "output1", name="labeled_node")
)
def test_call(self):
dummy_node = node(
biconcat, inputs=["input1", "input2"], outputs="output", name="myname"
)
actual = dummy_node(input1="in1", input2="in2")
expected = dummy_node.run(dict(input1="in1", input2="in2"))
assert actual == expected
def test_call_with_non_keyword_arguments(self):
dummy_node = node(
biconcat, inputs=["input1", "input2"], outputs="output", name="myname"
)
pattern = r"__call__\(\) takes 1 positional argument but 2 were given"
with pytest.raises(TypeError, match=pattern):
dummy_node("in1", input2="in2")
def test_run_with_duplicate_inputs_list(self):
dummy_node = node(func=biconcat, inputs=["input1", "input1"], outputs="output")
actual = dummy_node.run(dict(input1="in1"))
assert actual == {"output": "in1in1"}
def test_run_with_duplicate_inputs_dict(self):
dummy_node = node(
func=biconcat, inputs={"input1": "in1", "input2": "in1"}, outputs="output"
)
actual = dummy_node.run(dict(in1="hello"))
assert actual == {"output": "hellohello"}
def test_no_input(self):
assert "constant_output(None) -> [output1]" in str(
node(constant_output, None, "output1")
)
def test_no_output(self):
assert "<lambda>([input1]) -> None" in str(node(lambda x: None, "input1", None))
def test_inputs_none(self):
dummy_node = node(constant_output, None, "output")
assert dummy_node.inputs == []
def test_inputs_str(self):
dummy_node = node(identity, "input1", "output1")
assert dummy_node.inputs == ["input1"]
def test_inputs_dict(self):
dummy_node = node(
biconcat,
{"input1": "in1", "input2": "in2"},
["output2", "output1", "last node"],
)
inputs = dummy_node.inputs
assert isinstance(inputs, list)
assert len(inputs) == 2
assert set(inputs) == {"in1", "in2"}
def test_inputs_list(self):
dummy_node = node(
triconcat,
["input1", "input2", "another node"],
["output1", "output2", "last node"],
)
assert dummy_node.inputs == ["input1", "input2", "another node"]
def test_outputs_none(self):
dummy_node = node(identity, "input", None)
assert dummy_node.outputs == []
def test_outputs_str(self):
dummy_node = node(identity, "input1", "output1")
assert dummy_node.outputs == ["output1"]
def test_outputs_dict(self):
dummy_node = node(
biconcat, ["input1", "input2"], {"output1": "out1", "output2": "out2"}
)
outputs = dummy_node.outputs
assert isinstance(outputs, list)
assert len(outputs) == 2
assert set(outputs) == {"out1", "out2"}
def test_outputs_list(self):
dummy_node = node(
triconcat,
["input2", "input1", "another node"],
["output2", "output1", "last node"],
)
assert dummy_node.outputs == ["output2", "output1", "last node"]
@pytest.mark.parametrize(
"confirms_arg,expected",
[
(None, []),
([], []),
("foo", ["foo"]),
(["foo"], ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
],
)
def test_confirms(self, confirms_arg, expected):
dummy_node = node(identity, "input", None, confirms=confirms_arg)
assert dummy_node.confirms == expected
class TestNodeComparisons:
def test_node_equals(self):
first = node(identity, "input1", "output1", name="a_node")
second = node(identity, "input1", "output1", name="a_node")
assert first == second
assert first is not second
def test_node_less_than(self):
first = node(identity, "input1", "output1", name="A")
second = node(identity, "input1", "output1", name="B")
assert first < second
assert first is not second
def test_node_invalid_equals(self):
n = node(identity, "input1", "output1", name="a_node")
assert n != "hello"
def test_node_invalid_less_than(self):
n = node(identity, "input1", "output1", name="a_node")
pattern = "'<' not supported between instances of 'Node' and 'str'"
with pytest.raises(TypeError, match=pattern):
n < "hello" # pylint: disable=pointless-statement
def test_different_input_list_order_not_equal(self):
first = node(biconcat, ["input1", "input2"], "output1", name="A")
second = node(biconcat, ["input2", "input1"], "output1", name="A")
assert first != second
def test_different_output_list_order_not_equal(self):
first = node(identity, "input1", ["output1", "output2"], name="A")
second = node(identity, "input1", ["output2", "output1"], name="A")
assert first != second
def test_different_input_dict_order_equal(self):
first = node(biconcat, {"input1": "a", "input2": "b"}, "output1", name="A")
second = node(biconcat, {"input2": "b", "input1": "a"}, "output1", name="A")
assert first == second
def test_different_output_dict_order_equal(self):
first = node(identity, "input1", {"output1": "a", "output2": "b"}, name="A")
second = node(identity, "input1", {"output2": "b", "output1": "a"}, name="A")
assert first == second
def test_input_dict_list_not_equal(self):
first = node(biconcat, ["input1", "input2"], "output1", name="A")
second = node(
biconcat, {"input1": "input1", "input2": "input2"}, "output1", name="A"
)
assert first != second
def test_output_dict_list_not_equal(self):
first = node(identity, "input1", ["output1", "output2"], name="A")
second = node(
identity, "input1", {"output1": "output1", "output2": "output2"}, name="A"
)
assert first != second
def bad_input_type_node():
return lambda x: None, ("A", "D"), "B"
def bad_output_type_node():
return lambda x: None, "A", {"B", "C"}
def bad_function_type_node():
return "A", "B", "C"
def no_input_or_output_node():
return constant_output, None, None
def input_same_as_output_node():
return biconcat, ["A", "B"], dict(a="A")
def duplicate_output_dict_node():
return identity, "A", dict(a="A", b="A")
def duplicate_output_list_node():
return identity, "A", ["A", "A"]
@pytest.mark.parametrize(
"func, expected",
[
(bad_input_type_node, r"`inputs` type must be one of "),
(bad_output_type_node, r"`outputs` type must be one of "),
(bad_function_type_node, r"first argument must be a function"),
(no_input_or_output_node, r"it must have some `inputs` or `outputs`"),
(
input_same_as_output_node,
r"A node cannot have the same inputs and outputs: {\'A\'}",
),
(
duplicate_output_dict_node,
r"Failed to create node identity"
r"\(\[A\]\) -> \[A,A\] due to "
r"duplicate output\(s\) {\'A\'}.",
),
(
duplicate_output_list_node,
r"Failed to create node identity"
r"\(\[A\]\) -> \[A,A\] due to "
r"duplicate output\(s\) {\'A\'}.",
),
],
)
def test_bad_node(func, expected):
with pytest.raises(ValueError, match=expected):
node(*func())
def inconsistent_input_size():
return identity, ["A", "B"], "C"
def inconsistent_input_args():
def dummy_func_args(*args):
return "".join([*args]) # pragma: no cover
return dummy_func_args, {"a": "A"}, "B"
def inconsistent_input_kwargs():
def dummy_func_args(**kwargs):
return list(kwargs.values()) # pragma: no cover
return dummy_func_args, "A", "B"
lambda_identity = lambda input1: input1 # noqa: disable=E731
def lambda_inconsistent_input_size():
return lambda_identity, ["A", "B"], "C"
partial_identity = partial(identity)
def partial_inconsistent_input_size():
return partial_identity, ["A", "B"], "C"
@pytest.mark.parametrize(
"func, expected",
[
(
inconsistent_input_size,
r"Inputs of 'identity' function expected \[\'input1\'\], but got \[\'A\', \'B\'\]",
),
(
inconsistent_input_args,
r"Inputs of 'dummy_func_args' function expected \[\'args\'\], but got {\'a\': \'A\'}",
),
(
inconsistent_input_kwargs,
r"Inputs of 'dummy_func_args' function expected \[\'kwargs\'\], but got A",
),
(
lambda_inconsistent_input_size,
r"Inputs of '<lambda>' function expected \[\'input1\'\], but got \[\'A\', \'B\'\]",
),
(
partial_inconsistent_input_size,
r"Inputs of '<partial>' function expected \[\'input1\'\], but got \[\'A\', \'B\'\]",
),
],
)
def test_bad_input(func, expected):
with pytest.raises(TypeError, match=expected):
node(*func())
def apply_f(func: Callable) -> Callable:
@wraps(func)
def with_f(*args, **kwargs):
return func(*[f"f({a})" for a in args], **kwargs)
return with_f
def apply_g(func: Callable) -> Callable:
@wraps(func)
def with_g(*args, **kwargs):
return func(*[f"g({a})" for a in args], **kwargs)
return with_g
def apply_h(func: Callable) -> Callable:
@wraps(func)
def with_h(*args, **kwargs):
return func(*[f"h({a})" for a in args], **kwargs)
return with_h
def apply_ij(func: Callable) -> Callable:
@wraps(func)
def with_ij(*args, **kwargs):
return func(*[f"ij({a})" for a in args], **kwargs)
return with_ij
@apply_f
def decorated_identity(value):
return value
class TestTagDecorator:
def test_apply_decorators(self):
old_node = node(apply_g(decorated_identity), "input", "output", name="node")
pattern = (
"The node's `decorate` API will be deprecated in Kedro 0.18.0."
"Please use a node's Hooks to extend the node's behaviour in a pipeline."
"For more information, please visit"
"https://kedro.readthedocs.io/en/stable/07_extend_kedro/04_hooks.html"
)
with pytest.warns(DeprecationWarning, match=re.escape(pattern)):
new_node = old_node.decorate(apply_h, apply_ij)
result = new_node.run(dict(input=1))
assert old_node.name == new_node.name
assert "output" in result
assert result["output"] == "f(g(ij(h(1))))"
def test_tag_nodes(self):
tagged_node = node(identity, "input", "output", tags=["hello"]).tag(["world"])
assert "hello" in tagged_node.tags
assert "world" in tagged_node.tags
assert len(tagged_node.tags) == 2
def test_tag_nodes_single_tag(self):
tagged_node = node(identity, "input", "output", tags="hello").tag("world")
assert "hello" in tagged_node.tags
assert "world" in tagged_node.tags
assert len(tagged_node.tags) == 2
def test_tag_and_decorate(self):
tagged_node = node(identity, "input", "output", tags=["hello"])
tagged_node = tagged_node.decorate(apply_f)
tagged_node = tagged_node.tag(["world"])
assert "hello" in tagged_node.tags
assert "world" in tagged_node.tags
assert tagged_node.run(dict(input=1))["output"] == "f(1)"
class TestNames:
def test_named(self):
n = node(identity, ["in"], ["out"], name="name")
assert str(n) == "name: identity([in]) -> [out]"
assert n.name == "name"
assert n.short_name == "name"
@pytest.mark.parametrize("bad_name", ["name,with,comma", "name with space"])
def test_invalid_name(self, bad_name):
pattern = (
f"'{bad_name}' is not a valid node name. "
f"It must contain only letters, digits, hyphens, "
f"underscores and/or fullstops."
)
with pytest.raises(ValueError, match=re.escape(pattern)):
node(identity, ["in"], ["out"], name=bad_name)
def test_namespaced(self):
n = node(identity, ["in"], ["out"], namespace="namespace")
assert str(n) == "identity([in]) -> [out]"
assert n.name == "namespace.identity([in]) -> [out]"
assert n.short_name == "Identity"
def test_named_and_namespaced(self):
n = node(identity, ["in"], ["out"], name="name", namespace="namespace")
assert str(n) == "name: identity([in]) -> [out]"
assert n.name == "namespace.name"
assert n.short_name == "name"
def test_function(self):
n = node(identity, ["in"], ["out"])
assert str(n) == "identity([in]) -> [out]"
assert n.name == "identity([in]) -> [out]"
assert n.short_name == "Identity"
def test_lambda(self):
n = node(lambda a: a, ["in"], ["out"])
assert str(n) == "<lambda>([in]) -> [out]"
assert n.name == "<lambda>([in]) -> [out]"
assert n.short_name == "<Lambda>"
def test_partial(self):
n = node(partial(identity), ["in"], ["out"])
assert str(n) == "<partial>([in]) -> [out]"
assert n.name == "<partial>([in]) -> [out]"
assert n.short_name == "<Partial>"
def test_updated_partial(self):
n = node(update_wrapper(partial(identity), identity), ["in"], ["out"])
assert str(n) == "identity([in]) -> [out]"
assert n.name == "identity([in]) -> [out]"
assert n.short_name == "Identity"
def test_updated_partial_dict_inputs(self):
n = node(
update_wrapper(partial(biconcat, input1=["in1"]), biconcat),
dict(input2="in2"),
["out"],
)
assert str(n) == "biconcat([in2]) -> [out]"
assert n.name == "biconcat([in2]) -> [out]"
assert n.short_name == "Biconcat"
| 33.747554
| 98
| 0.595071
|
import re
from functools import partial, update_wrapper, wraps
from typing import Callable
import pytest
from kedro.pipeline import node
def constant_output():
return "output"
def identity(input1: str):
return input1
def biconcat(input1: str, input2: str):
return input1 + input2
def triconcat(input1: str, input2: str, input3: str):
return input1 + input2 + input3
@pytest.fixture
def simple_tuple_node_list():
return [
(identity, "A", "B"),
(biconcat, ["A", "B"], "C"),
(identity, "C", ["D", "E"]),
(biconcat, ["H", "I"], ["J", "K"]),
(identity, "J", dict(result="K")),
(biconcat, ["J", "K"], dict(result="L")),
(identity, dict(input1="J"), "L"),
(identity, dict(input1="J"), ["L", "M"]),
(identity, dict(input1="J"), dict(result="K")),
(constant_output, None, "M"),
(biconcat, ["N", "O"], None),
(lambda x: None, "F", "G"),
(lambda x: ("a", "b"), "G", ["X", "Y"]),
]
class TestValidNode:
def test_valid(self, simple_tuple_node_list):
nodes = [node(*tup) for tup in simple_tuple_node_list]
assert len(nodes) == len(simple_tuple_node_list)
def test_get_node_func(self):
test_node = node(identity, "A", "B")
assert test_node.func is identity
def test_set_node_func(self):
test_node = node(identity, "A", "B")
test_node.func = decorated_identity
assert test_node.func is decorated_identity
def test_labelled(self):
assert "labeled_node: <lambda>([input1]) -> [output1]" in str(
node(lambda x: None, "input1", "output1", name="labeled_node")
)
def test_call(self):
dummy_node = node(
biconcat, inputs=["input1", "input2"], outputs="output", name="myname"
)
actual = dummy_node(input1="in1", input2="in2")
expected = dummy_node.run(dict(input1="in1", input2="in2"))
assert actual == expected
def test_call_with_non_keyword_arguments(self):
dummy_node = node(
biconcat, inputs=["input1", "input2"], outputs="output", name="myname"
)
pattern = r"__call__\(\) takes 1 positional argument but 2 were given"
with pytest.raises(TypeError, match=pattern):
dummy_node("in1", input2="in2")
def test_run_with_duplicate_inputs_list(self):
dummy_node = node(func=biconcat, inputs=["input1", "input1"], outputs="output")
actual = dummy_node.run(dict(input1="in1"))
assert actual == {"output": "in1in1"}
def test_run_with_duplicate_inputs_dict(self):
dummy_node = node(
func=biconcat, inputs={"input1": "in1", "input2": "in1"}, outputs="output"
)
actual = dummy_node.run(dict(in1="hello"))
assert actual == {"output": "hellohello"}
def test_no_input(self):
assert "constant_output(None) -> [output1]" in str(
node(constant_output, None, "output1")
)
def test_no_output(self):
assert "<lambda>([input1]) -> None" in str(node(lambda x: None, "input1", None))
def test_inputs_none(self):
dummy_node = node(constant_output, None, "output")
assert dummy_node.inputs == []
def test_inputs_str(self):
dummy_node = node(identity, "input1", "output1")
assert dummy_node.inputs == ["input1"]
def test_inputs_dict(self):
dummy_node = node(
biconcat,
{"input1": "in1", "input2": "in2"},
["output2", "output1", "last node"],
)
inputs = dummy_node.inputs
assert isinstance(inputs, list)
assert len(inputs) == 2
assert set(inputs) == {"in1", "in2"}
def test_inputs_list(self):
dummy_node = node(
triconcat,
["input1", "input2", "another node"],
["output1", "output2", "last node"],
)
assert dummy_node.inputs == ["input1", "input2", "another node"]
def test_outputs_none(self):
dummy_node = node(identity, "input", None)
assert dummy_node.outputs == []
def test_outputs_str(self):
dummy_node = node(identity, "input1", "output1")
assert dummy_node.outputs == ["output1"]
def test_outputs_dict(self):
dummy_node = node(
biconcat, ["input1", "input2"], {"output1": "out1", "output2": "out2"}
)
outputs = dummy_node.outputs
assert isinstance(outputs, list)
assert len(outputs) == 2
assert set(outputs) == {"out1", "out2"}
def test_outputs_list(self):
dummy_node = node(
triconcat,
["input2", "input1", "another node"],
["output2", "output1", "last node"],
)
assert dummy_node.outputs == ["output2", "output1", "last node"]
@pytest.mark.parametrize(
"confirms_arg,expected",
[
(None, []),
([], []),
("foo", ["foo"]),
(["foo"], ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
],
)
def test_confirms(self, confirms_arg, expected):
dummy_node = node(identity, "input", None, confirms=confirms_arg)
assert dummy_node.confirms == expected
class TestNodeComparisons:
def test_node_equals(self):
first = node(identity, "input1", "output1", name="a_node")
second = node(identity, "input1", "output1", name="a_node")
assert first == second
assert first is not second
def test_node_less_than(self):
first = node(identity, "input1", "output1", name="A")
second = node(identity, "input1", "output1", name="B")
assert first < second
assert first is not second
def test_node_invalid_equals(self):
n = node(identity, "input1", "output1", name="a_node")
assert n != "hello"
def test_node_invalid_less_than(self):
n = node(identity, "input1", "output1", name="a_node")
pattern = "'<' not supported between instances of 'Node' and 'str'"
with pytest.raises(TypeError, match=pattern):
n < "hello"
def test_different_input_list_order_not_equal(self):
first = node(biconcat, ["input1", "input2"], "output1", name="A")
second = node(biconcat, ["input2", "input1"], "output1", name="A")
assert first != second
def test_different_output_list_order_not_equal(self):
first = node(identity, "input1", ["output1", "output2"], name="A")
second = node(identity, "input1", ["output2", "output1"], name="A")
assert first != second
def test_different_input_dict_order_equal(self):
first = node(biconcat, {"input1": "a", "input2": "b"}, "output1", name="A")
second = node(biconcat, {"input2": "b", "input1": "a"}, "output1", name="A")
assert first == second
def test_different_output_dict_order_equal(self):
first = node(identity, "input1", {"output1": "a", "output2": "b"}, name="A")
second = node(identity, "input1", {"output2": "b", "output1": "a"}, name="A")
assert first == second
def test_input_dict_list_not_equal(self):
first = node(biconcat, ["input1", "input2"], "output1", name="A")
second = node(
biconcat, {"input1": "input1", "input2": "input2"}, "output1", name="A"
)
assert first != second
def test_output_dict_list_not_equal(self):
first = node(identity, "input1", ["output1", "output2"], name="A")
second = node(
identity, "input1", {"output1": "output1", "output2": "output2"}, name="A"
)
assert first != second
def bad_input_type_node():
return lambda x: None, ("A", "D"), "B"
def bad_output_type_node():
return lambda x: None, "A", {"B", "C"}
def bad_function_type_node():
return "A", "B", "C"
def no_input_or_output_node():
return constant_output, None, None
def input_same_as_output_node():
return biconcat, ["A", "B"], dict(a="A")
def duplicate_output_dict_node():
return identity, "A", dict(a="A", b="A")
def duplicate_output_list_node():
return identity, "A", ["A", "A"]
@pytest.mark.parametrize(
"func, expected",
[
(bad_input_type_node, r"`inputs` type must be one of "),
(bad_output_type_node, r"`outputs` type must be one of "),
(bad_function_type_node, r"first argument must be a function"),
(no_input_or_output_node, r"it must have some `inputs` or `outputs`"),
(
input_same_as_output_node,
r"A node cannot have the same inputs and outputs: {\'A\'}",
),
(
duplicate_output_dict_node,
r"Failed to create node identity"
r"\(\[A\]\) -> \[A,A\] due to "
r"duplicate output\(s\) {\'A\'}.",
),
(
duplicate_output_list_node,
r"Failed to create node identity"
r"\(\[A\]\) -> \[A,A\] due to "
r"duplicate output\(s\) {\'A\'}.",
),
],
)
def test_bad_node(func, expected):
with pytest.raises(ValueError, match=expected):
node(*func())
def inconsistent_input_size():
return identity, ["A", "B"], "C"
def inconsistent_input_args():
def dummy_func_args(*args):
return "".join([*args])
return dummy_func_args, {"a": "A"}, "B"
def inconsistent_input_kwargs():
def dummy_func_args(**kwargs):
return list(kwargs.values())
return dummy_func_args, "A", "B"
lambda_identity = lambda input1: input1
def lambda_inconsistent_input_size():
return lambda_identity, ["A", "B"], "C"
partial_identity = partial(identity)
def partial_inconsistent_input_size():
return partial_identity, ["A", "B"], "C"
@pytest.mark.parametrize(
"func, expected",
[
(
inconsistent_input_size,
r"Inputs of 'identity' function expected \[\'input1\'\], but got \[\'A\', \'B\'\]",
),
(
inconsistent_input_args,
r"Inputs of 'dummy_func_args' function expected \[\'args\'\], but got {\'a\': \'A\'}",
),
(
inconsistent_input_kwargs,
r"Inputs of 'dummy_func_args' function expected \[\'kwargs\'\], but got A",
),
(
lambda_inconsistent_input_size,
r"Inputs of '<lambda>' function expected \[\'input1\'\], but got \[\'A\', \'B\'\]",
),
(
partial_inconsistent_input_size,
r"Inputs of '<partial>' function expected \[\'input1\'\], but got \[\'A\', \'B\'\]",
),
],
)
def test_bad_input(func, expected):
with pytest.raises(TypeError, match=expected):
node(*func())
def apply_f(func: Callable) -> Callable:
@wraps(func)
def with_f(*args, **kwargs):
return func(*[f"f({a})" for a in args], **kwargs)
return with_f
def apply_g(func: Callable) -> Callable:
@wraps(func)
def with_g(*args, **kwargs):
return func(*[f"g({a})" for a in args], **kwargs)
return with_g
def apply_h(func: Callable) -> Callable:
@wraps(func)
def with_h(*args, **kwargs):
return func(*[f"h({a})" for a in args], **kwargs)
return with_h
def apply_ij(func: Callable) -> Callable:
@wraps(func)
def with_ij(*args, **kwargs):
return func(*[f"ij({a})" for a in args], **kwargs)
return with_ij
@apply_f
def decorated_identity(value):
return value
class TestTagDecorator:
def test_apply_decorators(self):
old_node = node(apply_g(decorated_identity), "input", "output", name="node")
pattern = (
"The node's `decorate` API will be deprecated in Kedro 0.18.0."
"Please use a node's Hooks to extend the node's behaviour in a pipeline."
"For more information, please visit"
"https://kedro.readthedocs.io/en/stable/07_extend_kedro/04_hooks.html"
)
with pytest.warns(DeprecationWarning, match=re.escape(pattern)):
new_node = old_node.decorate(apply_h, apply_ij)
result = new_node.run(dict(input=1))
assert old_node.name == new_node.name
assert "output" in result
assert result["output"] == "f(g(ij(h(1))))"
def test_tag_nodes(self):
tagged_node = node(identity, "input", "output", tags=["hello"]).tag(["world"])
assert "hello" in tagged_node.tags
assert "world" in tagged_node.tags
assert len(tagged_node.tags) == 2
def test_tag_nodes_single_tag(self):
tagged_node = node(identity, "input", "output", tags="hello").tag("world")
assert "hello" in tagged_node.tags
assert "world" in tagged_node.tags
assert len(tagged_node.tags) == 2
def test_tag_and_decorate(self):
tagged_node = node(identity, "input", "output", tags=["hello"])
tagged_node = tagged_node.decorate(apply_f)
tagged_node = tagged_node.tag(["world"])
assert "hello" in tagged_node.tags
assert "world" in tagged_node.tags
assert tagged_node.run(dict(input=1))["output"] == "f(1)"
class TestNames:
def test_named(self):
n = node(identity, ["in"], ["out"], name="name")
assert str(n) == "name: identity([in]) -> [out]"
assert n.name == "name"
assert n.short_name == "name"
@pytest.mark.parametrize("bad_name", ["name,with,comma", "name with space"])
def test_invalid_name(self, bad_name):
pattern = (
f"'{bad_name}' is not a valid node name. "
f"It must contain only letters, digits, hyphens, "
f"underscores and/or fullstops."
)
with pytest.raises(ValueError, match=re.escape(pattern)):
node(identity, ["in"], ["out"], name=bad_name)
def test_namespaced(self):
n = node(identity, ["in"], ["out"], namespace="namespace")
assert str(n) == "identity([in]) -> [out]"
assert n.name == "namespace.identity([in]) -> [out]"
assert n.short_name == "Identity"
def test_named_and_namespaced(self):
n = node(identity, ["in"], ["out"], name="name", namespace="namespace")
assert str(n) == "name: identity([in]) -> [out]"
assert n.name == "namespace.name"
assert n.short_name == "name"
def test_function(self):
n = node(identity, ["in"], ["out"])
assert str(n) == "identity([in]) -> [out]"
assert n.name == "identity([in]) -> [out]"
assert n.short_name == "Identity"
def test_lambda(self):
n = node(lambda a: a, ["in"], ["out"])
assert str(n) == "<lambda>([in]) -> [out]"
assert n.name == "<lambda>([in]) -> [out]"
assert n.short_name == "<Lambda>"
def test_partial(self):
n = node(partial(identity), ["in"], ["out"])
assert str(n) == "<partial>([in]) -> [out]"
assert n.name == "<partial>([in]) -> [out]"
assert n.short_name == "<Partial>"
def test_updated_partial(self):
n = node(update_wrapper(partial(identity), identity), ["in"], ["out"])
assert str(n) == "identity([in]) -> [out]"
assert n.name == "identity([in]) -> [out]"
assert n.short_name == "Identity"
def test_updated_partial_dict_inputs(self):
n = node(
update_wrapper(partial(biconcat, input1=["in1"]), biconcat),
dict(input2="in2"),
["out"],
)
assert str(n) == "biconcat([in2]) -> [out]"
assert n.name == "biconcat([in2]) -> [out]"
assert n.short_name == "Biconcat"
| true
| true
|
790eb41ffd1c2aed2bd72ddd41c785d9c7dd4f88
| 14,268
|
py
|
Python
|
app_backend/views/rack.py
|
zhanghe06/bearing_project
|
78a20fc321f72d3ae05c7ab7e52e01d02904e3fc
|
[
"MIT"
] | 1
|
2020-06-21T04:08:26.000Z
|
2020-06-21T04:08:26.000Z
|
app_backend/views/rack.py
|
zhanghe06/bearing_project
|
78a20fc321f72d3ae05c7ab7e52e01d02904e3fc
|
[
"MIT"
] | 13
|
2019-10-18T17:19:32.000Z
|
2022-01-13T00:44:43.000Z
|
app_backend/views/rack.py
|
zhanghe06/bearing_project
|
78a20fc321f72d3ae05c7ab7e52e01d02904e3fc
|
[
"MIT"
] | 5
|
2019-02-07T03:15:16.000Z
|
2021-09-04T14:06:28.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: rack.py
@time: 2018-04-06 18:22
"""
from __future__ import unicode_literals
from datetime import datetime
from flask import (
request,
flash,
render_template,
url_for,
redirect,
abort,
jsonify,
Blueprint,
)
from flask_babel import gettext as _
from flask_login import login_required
from app_backend import app
from app_backend import excel
from app_backend.api.inventory import count_inventory
from app_backend.api.rack import (
get_rack_pagination,
get_rack_row_by_id,
add_rack,
edit_rack,
get_rack_choices,
# rack_current_stats,
# rack_former_stats,
)
from app_backend.api.rack import (
get_rack_rows,
# get_distinct_brand,
)
from app_backend.api.warehouse import (
get_warehouse_choices,
)
from app_backend.forms.rack import (
RackSearchForm,
RackAddForm,
RackEditForm,
)
from app_backend.models.model_bearing import Rack
from app_backend.permissions.rack import (
permission_rack_section_add,
permission_rack_section_search,
permission_rack_section_export,
permission_rack_section_get,
permission_rack_section_edit,
permission_rack_section_del,
)
from app_common.maps.default import DEFAULT_SEARCH_CHOICES_INT_OPTION
from app_common.maps.operations import OPERATION_EXPORT, OPERATION_DELETE
from app_common.maps.status_delete import (
STATUS_DEL_OK,
STATUS_DEL_NO)
# 定义蓝图
bp_rack = Blueprint('rack', __name__, url_prefix='/rack')
# 加载配置
DOCUMENT_INFO = app.config.get('DOCUMENT_INFO', {})
PER_PAGE_BACKEND = app.config.get('PER_PAGE_BACKEND', 20)
AJAX_SUCCESS_MSG = app.config.get('AJAX_SUCCESS_MSG', {'result': True})
AJAX_FAILURE_MSG = app.config.get('AJAX_FAILURE_MSG', {'result': False})
@bp_rack.route('/lists.html', methods=['GET', 'POST'])
@login_required
@permission_rack_section_search.require(http_exception=403)
def lists():
"""
货架列表
:return:
"""
template_name = 'rack/lists.html'
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack lists')
# 搜索条件
form = RackSearchForm(request.form)
form.warehouse_id.choices = get_warehouse_choices()
# app.logger.info('')
search_condition = [
Rack.status_delete == STATUS_DEL_NO,
]
if request.method == 'POST':
# 表单校验失败
if not form.validate_on_submit():
flash(_('Search Failure'), 'danger')
# 单独处理csrf_token
if hasattr(form, 'csrf_token') and getattr(form, 'csrf_token').errors:
map(lambda x: flash(x, 'danger'), form.csrf_token.errors)
else:
if form.warehouse_id.data != DEFAULT_SEARCH_CHOICES_INT_OPTION:
search_condition.append(Rack.warehouse_id == form.warehouse_id.data)
if form.name.data:
search_condition.append(Rack.name == form.name.data)
# 处理导出
if form.op.data == OPERATION_EXPORT:
# 检查导出权限
if not permission_rack_section_export.can():
abort(403)
column_names = Rack.__table__.columns.keys()
query_sets = get_rack_rows(*search_condition)
return excel.make_response_from_query_sets(
query_sets=query_sets,
column_names=column_names,
file_type='csv',
file_name='%s.csv' % _('rack lists')
)
# 批量删除
if form.op.data == OPERATION_DELETE:
# 检查删除权限
if not permission_rack_section_del.can():
abort(403)
rack_ids = request.form.getlist('rack_id')
permitted = True
for rack_id in rack_ids:
# 检查是否正在使用
# 库存
if count_inventory(**{'rack_id': rack_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
flash(_('Del Failure, %(ext_msg)s', ext_msg=ext_msg), 'danger')
permitted = False
break
if permitted:
result_total = True
for rack_id in rack_ids:
current_time = datetime.utcnow()
rack_data = {
'status_delete': STATUS_DEL_OK,
'delete_time': current_time,
'update_time': current_time,
}
result = edit_rack(rack_id, rack_data)
result_total = result_total and result
if result_total:
flash(_('Del Success'), 'success')
else:
flash(_('Del Failure'), 'danger')
# 翻页数据
pagination = get_rack_pagination(form.page.data, PER_PAGE_BACKEND, *search_condition)
# 渲染模板
return render_template(
template_name,
form=form,
pagination=pagination,
**document_info
)
@bp_rack.route('/<int:rack_id>/info.html')
@login_required
@permission_rack_section_get.require(http_exception=403)
def info(rack_id):
"""
货架详情
:param rack_id:
:return:
"""
# 详情数据
rack_info = get_rack_row_by_id(rack_id)
# 检查资源是否存在
if not rack_info:
abort(404)
# 检查资源是否删除
if rack_info.status_delete == STATUS_DEL_OK:
abort(410)
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack info')
# 渲染模板
return render_template('rack/info.html', rack_info=rack_info, **document_info)
@bp_rack.route('/add.html', methods=['GET', 'POST'])
@login_required
@permission_rack_section_add.require(http_exception=403)
def add():
"""
创建货架
:return:
"""
template_name = 'rack/add.html'
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack add')
# 加载创建表单
form = RackAddForm(request.form)
form.warehouse_id.choices = get_warehouse_choices(option_type='create')
# 进入创建页面
if request.method == 'GET':
# 渲染页面
return render_template(
template_name,
form=form,
**document_info
)
# 处理创建请求
if request.method == 'POST':
# 表单校验失败
if not form.validate_on_submit():
flash(_('Add Failure'), 'danger')
return render_template(
template_name,
form=form,
**document_info
)
# 表单校验成功
current_time = datetime.utcnow()
rack_data = {
'warehouse_id': form.warehouse_id.data,
'name': form.name.data,
'create_time': current_time,
'update_time': current_time,
}
result = add_rack(rack_data)
# 创建操作成功
if result:
flash(_('Add Success'), 'success')
return redirect(request.args.get('next') or url_for('rack.lists'))
# 创建操作失败
else:
flash(_('Add Failure'), 'danger')
return render_template(
template_name,
form=form,
**document_info
)
@bp_rack.route('/<int:rack_id>/edit.html', methods=['GET', 'POST'])
@login_required
@permission_rack_section_edit.require(http_exception=403)
def edit(rack_id):
"""
货架编辑
"""
rack_info = get_rack_row_by_id(rack_id)
# 检查资源是否存在
if not rack_info:
abort(404)
# 检查资源是否删除
if rack_info.status_delete == STATUS_DEL_OK:
abort(410)
template_name = 'rack/edit.html'
# 加载编辑表单
form = RackEditForm(request.form)
form.warehouse_id.choices = get_warehouse_choices(option_type='update')
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack edit')
# 进入编辑页面
if request.method == 'GET':
# 表单赋值
form.warehouse_id.data = rack_info.warehouse_id
form.name.data = rack_info.name
# form.create_time.data = rack_info.create_time
# form.update_time.data = rack_info.update_time
# 渲染页面
return render_template(
template_name,
rack_id=rack_id,
form=form,
**document_info
)
# 处理编辑请求
if request.method == 'POST':
# 表单校验失败
if not form.validate_on_submit():
flash(_('Edit Failure'), 'danger')
return render_template(
template_name,
rack_id=rack_id,
form=form,
**document_info
)
# 表单校验成功
current_time = datetime.utcnow()
rack_data = {
'warehouse_id': form.warehouse_id.data,
'name': form.name.data,
'update_time': current_time,
}
result = edit_rack(rack_id, rack_data)
# 编辑操作成功
if result:
flash(_('Edit Success'), 'success')
return redirect(request.args.get('next') or url_for('rack.lists'))
# 编辑操作失败
else:
flash(_('Edit Failure'), 'danger')
return render_template(
template_name,
rack_id=rack_id,
form=form,
**document_info
)
@bp_rack.route('/ajax/del', methods=['GET', 'POST'])
@login_required
def ajax_delete():
"""
货架删除
:return:
"""
ajax_success_msg = AJAX_SUCCESS_MSG.copy()
ajax_failure_msg = AJAX_FAILURE_MSG.copy()
# 检查删除权限
if not permission_rack_section_del.can():
ext_msg = _('Permission Denied')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查请求方法
if not (request.method == 'GET' and request.is_xhr):
ext_msg = _('Method Not Allowed')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查请求参数
rack_id = request.args.get('rack_id', 0, type=int)
if not rack_id:
ext_msg = _('ID does not exist')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
rack_info = get_rack_row_by_id(rack_id)
# 检查资源是否存在
if not rack_info:
ext_msg = _('ID does not exist')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查资源是否删除
if rack_info.status_delete == STATUS_DEL_OK:
ext_msg = _('Already deleted')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查是否正在使用
# 库存
if count_inventory(**{'rack_id': rack_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
current_time = datetime.utcnow()
rack_data = {
'status_delete': STATUS_DEL_OK,
'delete_time': current_time,
'update_time': current_time,
}
result = edit_rack(rack_id, rack_data)
if result:
ajax_success_msg['msg'] = _('Del Success')
return jsonify(ajax_success_msg)
else:
ajax_failure_msg['msg'] = _('Del Failure')
return jsonify(ajax_failure_msg)
@bp_rack.route('/ajax/get_rack_choices', methods=['GET', 'POST'])
@login_required
def ajax_get_rack_choices():
"""
货架选项
:return:
"""
warehouse_id = request.args.get('warehouse_id', 0, type=int)
rack_choices = get_rack_choices(warehouse_id)
return jsonify(rack_choices)
# @bp_rack.route('/ajax/stats', methods=['GET', 'POST'])
# @login_required
# def ajax_stats():
# """
# 获取货架统计
# :return:
# """
# time_based = request.args.get('time_based', 'hour')
# result_rack_current = rack_current_stats(time_based)
# result_rack_former = rack_former_stats(time_based)
#
# line_chart_data = {
# 'labels': [label for label, _ in result_rack_current],
# 'datasets': [
# {
# 'label': '在职',
# 'backgroundColor': 'rgba(220,220,220,0.5)',
# 'borderColor': 'rgba(220,220,220,1)',
# 'pointBackgroundColor': 'rgba(220,220,220,1)',
# 'pointBorderColor': '#fff',
# 'pointBorderWidth': 2,
# 'data': [data for _, data in result_rack_current]
# },
# {
# 'label': '离职',
# 'backgroundColor': 'rgba(151,187,205,0.5)',
# 'borderColor': 'rgba(151,187,205,1)',
# 'pointBackgroundColor': 'rgba(151,187,205,1)',
# 'pointBorderColor': '#fff',
# 'pointBorderWidth': 2,
# 'data': [data for _, data in result_rack_former]
# }
# ]
# }
# return json.dumps(line_chart_data, default=json_default)
#
#
# @bp_rack.route('/stats.html')
# @login_required
# @permission_rack_section_stats.require(http_exception=403)
# def stats():
# """
# 货架统计
# :return:
# """
# # 统计数据
# time_based = request.args.get('time_based', 'hour')
# if time_based not in ['hour', 'date', 'month']:
# abort(404)
# # 文档信息
# document_info = DOCUMENT_INFO.copy()
# document_info['TITLE'] = _('rack stats')
# # 渲染模板
# return render_template(
# 'rack/stats.html',
# time_based=time_based,
# **document_info
# )
#
#
# @bp_rack.route('/<int:rack_id>/stats.html')
# @login_required
# @permission_rack_section_stats.require(http_exception=403)
# def stats_item(rack_id):
# """
# 货架统计明细
# :param rack_id:
# :return:
# """
# rack_info = get_rack_row_by_id(rack_id)
# # 检查资源是否存在
# if not rack_info:
# abort(404)
# # 检查资源是否删除
# if rack_info.status_delete == STATUS_DEL_OK:
# abort(410)
#
# # 统计数据
# rack_stats_item_info = get_rack_row_by_id(rack_id)
# # 文档信息
# document_info = DOCUMENT_INFO.copy()
# document_info['TITLE'] = _('rack stats item')
# # 渲染模板
# return render_template(
# 'rack/stats_item.html',
# rack_stats_item_info=rack_stats_item_info,
# **document_info
# )
| 29.418557
| 91
| 0.593146
|
from __future__ import unicode_literals
from datetime import datetime
from flask import (
request,
flash,
render_template,
url_for,
redirect,
abort,
jsonify,
Blueprint,
)
from flask_babel import gettext as _
from flask_login import login_required
from app_backend import app
from app_backend import excel
from app_backend.api.inventory import count_inventory
from app_backend.api.rack import (
get_rack_pagination,
get_rack_row_by_id,
add_rack,
edit_rack,
get_rack_choices,
)
from app_backend.api.rack import (
get_rack_rows,
)
from app_backend.api.warehouse import (
get_warehouse_choices,
)
from app_backend.forms.rack import (
RackSearchForm,
RackAddForm,
RackEditForm,
)
from app_backend.models.model_bearing import Rack
from app_backend.permissions.rack import (
permission_rack_section_add,
permission_rack_section_search,
permission_rack_section_export,
permission_rack_section_get,
permission_rack_section_edit,
permission_rack_section_del,
)
from app_common.maps.default import DEFAULT_SEARCH_CHOICES_INT_OPTION
from app_common.maps.operations import OPERATION_EXPORT, OPERATION_DELETE
from app_common.maps.status_delete import (
STATUS_DEL_OK,
STATUS_DEL_NO)
bp_rack = Blueprint('rack', __name__, url_prefix='/rack')
DOCUMENT_INFO = app.config.get('DOCUMENT_INFO', {})
PER_PAGE_BACKEND = app.config.get('PER_PAGE_BACKEND', 20)
AJAX_SUCCESS_MSG = app.config.get('AJAX_SUCCESS_MSG', {'result': True})
AJAX_FAILURE_MSG = app.config.get('AJAX_FAILURE_MSG', {'result': False})
@bp_rack.route('/lists.html', methods=['GET', 'POST'])
@login_required
@permission_rack_section_search.require(http_exception=403)
def lists():
template_name = 'rack/lists.html'
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack lists')
form = RackSearchForm(request.form)
form.warehouse_id.choices = get_warehouse_choices()
search_condition = [
Rack.status_delete == STATUS_DEL_NO,
]
if request.method == 'POST':
if not form.validate_on_submit():
flash(_('Search Failure'), 'danger')
if hasattr(form, 'csrf_token') and getattr(form, 'csrf_token').errors:
map(lambda x: flash(x, 'danger'), form.csrf_token.errors)
else:
if form.warehouse_id.data != DEFAULT_SEARCH_CHOICES_INT_OPTION:
search_condition.append(Rack.warehouse_id == form.warehouse_id.data)
if form.name.data:
search_condition.append(Rack.name == form.name.data)
if form.op.data == OPERATION_EXPORT:
if not permission_rack_section_export.can():
abort(403)
column_names = Rack.__table__.columns.keys()
query_sets = get_rack_rows(*search_condition)
return excel.make_response_from_query_sets(
query_sets=query_sets,
column_names=column_names,
file_type='csv',
file_name='%s.csv' % _('rack lists')
)
if form.op.data == OPERATION_DELETE:
if not permission_rack_section_del.can():
abort(403)
rack_ids = request.form.getlist('rack_id')
permitted = True
for rack_id in rack_ids:
if count_inventory(**{'rack_id': rack_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
flash(_('Del Failure, %(ext_msg)s', ext_msg=ext_msg), 'danger')
permitted = False
break
if permitted:
result_total = True
for rack_id in rack_ids:
current_time = datetime.utcnow()
rack_data = {
'status_delete': STATUS_DEL_OK,
'delete_time': current_time,
'update_time': current_time,
}
result = edit_rack(rack_id, rack_data)
result_total = result_total and result
if result_total:
flash(_('Del Success'), 'success')
else:
flash(_('Del Failure'), 'danger')
pagination = get_rack_pagination(form.page.data, PER_PAGE_BACKEND, *search_condition)
return render_template(
template_name,
form=form,
pagination=pagination,
**document_info
)
@bp_rack.route('/<int:rack_id>/info.html')
@login_required
@permission_rack_section_get.require(http_exception=403)
def info(rack_id):
rack_info = get_rack_row_by_id(rack_id)
if not rack_info:
abort(404)
if rack_info.status_delete == STATUS_DEL_OK:
abort(410)
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack info')
return render_template('rack/info.html', rack_info=rack_info, **document_info)
@bp_rack.route('/add.html', methods=['GET', 'POST'])
@login_required
@permission_rack_section_add.require(http_exception=403)
def add():
template_name = 'rack/add.html'
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack add')
form = RackAddForm(request.form)
form.warehouse_id.choices = get_warehouse_choices(option_type='create')
if request.method == 'GET':
return render_template(
template_name,
form=form,
**document_info
)
if request.method == 'POST':
if not form.validate_on_submit():
flash(_('Add Failure'), 'danger')
return render_template(
template_name,
form=form,
**document_info
)
current_time = datetime.utcnow()
rack_data = {
'warehouse_id': form.warehouse_id.data,
'name': form.name.data,
'create_time': current_time,
'update_time': current_time,
}
result = add_rack(rack_data)
if result:
flash(_('Add Success'), 'success')
return redirect(request.args.get('next') or url_for('rack.lists'))
else:
flash(_('Add Failure'), 'danger')
return render_template(
template_name,
form=form,
**document_info
)
@bp_rack.route('/<int:rack_id>/edit.html', methods=['GET', 'POST'])
@login_required
@permission_rack_section_edit.require(http_exception=403)
def edit(rack_id):
rack_info = get_rack_row_by_id(rack_id)
if not rack_info:
abort(404)
if rack_info.status_delete == STATUS_DEL_OK:
abort(410)
template_name = 'rack/edit.html'
form = RackEditForm(request.form)
form.warehouse_id.choices = get_warehouse_choices(option_type='update')
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack edit')
if request.method == 'GET':
form.warehouse_id.data = rack_info.warehouse_id
form.name.data = rack_info.name
return render_template(
template_name,
rack_id=rack_id,
form=form,
**document_info
)
if request.method == 'POST':
if not form.validate_on_submit():
flash(_('Edit Failure'), 'danger')
return render_template(
template_name,
rack_id=rack_id,
form=form,
**document_info
)
current_time = datetime.utcnow()
rack_data = {
'warehouse_id': form.warehouse_id.data,
'name': form.name.data,
'update_time': current_time,
}
result = edit_rack(rack_id, rack_data)
if result:
flash(_('Edit Success'), 'success')
return redirect(request.args.get('next') or url_for('rack.lists'))
else:
flash(_('Edit Failure'), 'danger')
return render_template(
template_name,
rack_id=rack_id,
form=form,
**document_info
)
@bp_rack.route('/ajax/del', methods=['GET', 'POST'])
@login_required
def ajax_delete():
ajax_success_msg = AJAX_SUCCESS_MSG.copy()
ajax_failure_msg = AJAX_FAILURE_MSG.copy()
if not permission_rack_section_del.can():
ext_msg = _('Permission Denied')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
if not (request.method == 'GET' and request.is_xhr):
ext_msg = _('Method Not Allowed')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
rack_id = request.args.get('rack_id', 0, type=int)
if not rack_id:
ext_msg = _('ID does not exist')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
rack_info = get_rack_row_by_id(rack_id)
if not rack_info:
ext_msg = _('ID does not exist')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
if rack_info.status_delete == STATUS_DEL_OK:
ext_msg = _('Already deleted')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
if count_inventory(**{'rack_id': rack_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
current_time = datetime.utcnow()
rack_data = {
'status_delete': STATUS_DEL_OK,
'delete_time': current_time,
'update_time': current_time,
}
result = edit_rack(rack_id, rack_data)
if result:
ajax_success_msg['msg'] = _('Del Success')
return jsonify(ajax_success_msg)
else:
ajax_failure_msg['msg'] = _('Del Failure')
return jsonify(ajax_failure_msg)
@bp_rack.route('/ajax/get_rack_choices', methods=['GET', 'POST'])
@login_required
def ajax_get_rack_choices():
warehouse_id = request.args.get('warehouse_id', 0, type=int)
rack_choices = get_rack_choices(warehouse_id)
return jsonify(rack_choices)
# 获取货架统计
# :return:
# """
# 货架统计
# :return:
# """
# 货架统计明细
# :param rack_id:
# :return:
# """
| true
| true
|
790eb53163105b71fb9f054f268a141352767bf0
| 3,322
|
py
|
Python
|
deepblast/utils.py
|
athbaltzis/deepblast
|
63d29fd162e537de1630d4f98f2b559b61a611e3
|
[
"BSD-3-Clause"
] | 29
|
2020-09-20T17:34:08.000Z
|
2022-03-21T11:47:13.000Z
|
deepblast/utils.py
|
athbaltzis/deepblast
|
63d29fd162e537de1630d4f98f2b559b61a611e3
|
[
"BSD-3-Clause"
] | 50
|
2020-06-19T23:56:17.000Z
|
2020-09-12T22:20:26.000Z
|
deepblast/utils.py
|
athbaltzis/deepblast
|
63d29fd162e537de1630d4f98f2b559b61a611e3
|
[
"BSD-3-Clause"
] | 9
|
2020-09-17T20:27:10.000Z
|
2021-09-23T01:11:17.000Z
|
import os
import numpy as np
from scipy.stats import multivariate_normal
import inspect
from sklearn.metrics.pairwise import pairwise_distances
def sample(transition_matrix, means, covs, start_state, n_samples,
random_state):
n_states, n_features, _ = covs.shape
states = np.zeros(n_samples, dtype='int')
emissions = np.zeros((n_samples, n_features))
for i in range(n_samples):
if i == 0:
prev_state = start_state
else:
prev_state = states[i - 1]
state = random_state.choice(n_states,
p=transition_matrix[:, prev_state])
emissions[i] = random_state.multivariate_normal(
means[state], covs[state])
states[i] = state
return emissions, states
def make_data(T=20):
"""
Sample data from a HMM model and compute associated CRF potentials.
"""
random_state = np.random.RandomState(0)
d = 0.2
e = 0.1
transition_matrix = np.array([[1 - 2 * d, d, d], [1 - e, e, 0],
[1 - e, 0, e]])
means = np.array([[0, 0], [10, 0], [5, -5]])
covs = np.array([[[1, 0], [0, 1]], [[.2, 0], [0, .3]], [[2, 0], [0, 1]]])
start_state = 0
emissions, states = sample(transition_matrix,
means,
covs,
start_state,
n_samples=T,
random_state=random_state)
emission_log_likelihood = []
for mean, cov in zip(means, covs):
rv = multivariate_normal(mean, cov)
emission_log_likelihood.append(rv.logpdf(emissions)[:, np.newaxis])
emission_log_likelihood = np.concatenate(emission_log_likelihood, axis=1)
log_transition_matrix = np.log(transition_matrix)
# CRF potential from HMM model
theta = emission_log_likelihood[:, :, np.newaxis] \
+ log_transition_matrix[np.newaxis, :, :]
return states, emissions, theta
def make_alignment_data():
rng = np.random.RandomState(0)
m, n = 2, 2
X = rng.randn(m, 3)
Y = rng.randn(n, 3)
return pairwise_distances(X, Y) / 10
def get_data_path(fn, subfolder='data'):
"""Return path to filename ``fn`` in the data folder.
During testing it is often necessary to load data files. This
function returns the full path to files in the ``data`` subfolder
by default.
Parameters
----------
fn : str
File name.
subfolder : str, defaults to ``data``
Name of the subfolder that contains the data.
Returns
-------
str
Inferred absolute path to the test data for the module where
``get_data_path(fn)`` is called.
Notes
-----
The requested path may not point to an existing file, as its
existence is not checked.
This is from skbio's code base
https://github.com/biocore/scikit-bio/blob/master/skbio/util/_testing.py#L50
"""
# getouterframes returns a list of tuples: the second tuple
# contains info about the caller, and the second element is its
# filename
callers_filename = inspect.getouterframes(inspect.currentframe())[1][1]
path = os.path.dirname(os.path.abspath(callers_filename))
data_path = os.path.join(path, subfolder, fn)
return data_path
| 33.897959
| 80
| 0.606562
|
import os
import numpy as np
from scipy.stats import multivariate_normal
import inspect
from sklearn.metrics.pairwise import pairwise_distances
def sample(transition_matrix, means, covs, start_state, n_samples,
random_state):
n_states, n_features, _ = covs.shape
states = np.zeros(n_samples, dtype='int')
emissions = np.zeros((n_samples, n_features))
for i in range(n_samples):
if i == 0:
prev_state = start_state
else:
prev_state = states[i - 1]
state = random_state.choice(n_states,
p=transition_matrix[:, prev_state])
emissions[i] = random_state.multivariate_normal(
means[state], covs[state])
states[i] = state
return emissions, states
def make_data(T=20):
random_state = np.random.RandomState(0)
d = 0.2
e = 0.1
transition_matrix = np.array([[1 - 2 * d, d, d], [1 - e, e, 0],
[1 - e, 0, e]])
means = np.array([[0, 0], [10, 0], [5, -5]])
covs = np.array([[[1, 0], [0, 1]], [[.2, 0], [0, .3]], [[2, 0], [0, 1]]])
start_state = 0
emissions, states = sample(transition_matrix,
means,
covs,
start_state,
n_samples=T,
random_state=random_state)
emission_log_likelihood = []
for mean, cov in zip(means, covs):
rv = multivariate_normal(mean, cov)
emission_log_likelihood.append(rv.logpdf(emissions)[:, np.newaxis])
emission_log_likelihood = np.concatenate(emission_log_likelihood, axis=1)
log_transition_matrix = np.log(transition_matrix)
theta = emission_log_likelihood[:, :, np.newaxis] \
+ log_transition_matrix[np.newaxis, :, :]
return states, emissions, theta
def make_alignment_data():
rng = np.random.RandomState(0)
m, n = 2, 2
X = rng.randn(m, 3)
Y = rng.randn(n, 3)
return pairwise_distances(X, Y) / 10
def get_data_path(fn, subfolder='data'):
callers_filename = inspect.getouterframes(inspect.currentframe())[1][1]
path = os.path.dirname(os.path.abspath(callers_filename))
data_path = os.path.join(path, subfolder, fn)
return data_path
| true
| true
|
790eb54ddf76ac8637ab3494b96e13577f241ade
| 6,598
|
py
|
Python
|
slim/types/TreatmentTypes.py
|
magicicada/slim
|
e6e966dfa88145f0f571e9479ea22ed7ce61fd57
|
[
"MIT"
] | 3
|
2021-10-06T20:09:53.000Z
|
2022-01-05T11:40:57.000Z
|
slim/types/TreatmentTypes.py
|
resistance-modelling/slim
|
ce05d40f56f5263cb039973af3e187cffc1d00b4
|
[
"MIT"
] | 143
|
2021-07-16T09:44:23.000Z
|
2022-03-29T16:27:40.000Z
|
slim/types/TreatmentTypes.py
|
resistance-modelling/slim
|
ce05d40f56f5263cb039973af3e187cffc1d00b4
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from abc import abstractmethod, ABC
from decimal import Decimal
from enum import Enum
from typing import Dict, cast
import numpy as np
# A few extra general types
from slim.simulation.lice_population import LicePopulation, GenoDistrib, GenoTreatmentValue,\
Alleles, GenoTreatmentDistrib
Money = Decimal
class Treatment(Enum):
"""
A stub for treatment types
TODO: add other treatments here
"""
EMB = 0
THERMOLICER = 1
class GeneticMechanism(Enum):
"""
Genetic mechanism to be used when generating egg genotypes
"""
DISCRETE = 1
MATERNAL = 2
class HeterozygousResistance(Enum):
"""
Resistance in a monogenic, heterozygous setting.
"""
DOMINANT = 1
INCOMPLETELY_DOMINANT = 2
RECESSIVE = 3
TreatmentResistance = Dict[HeterozygousResistance, float]
class TreatmentParams(ABC):
"""
Abstract class for all the treatments
"""
name = ""
def __init__(self, payload):
self.quadratic_fish_mortality_coeffs = np.array(payload["quadratic_fish_mortality_coeffs"])
self.effect_delay: int = payload["effect_delay"]
self.application_period: int = payload["application_period"]
@staticmethod
def parse_pheno_resistance(pheno_resistance_dict: dict) -> TreatmentResistance:
return {HeterozygousResistance[key.upper()]: val for key, val in pheno_resistance_dict.items()}
def __get_mortality_pp_increase(self, temperature: float, fish_mass: float) -> float:
"""Get the mortality percentage point difference increase.
:param temperature: the temperature in Celsius
:param fish_mass: the fish mass (in grams)
:returns: Mortality percentage point difference increase
"""
# TODO: is this the right way to solve this?
fish_mass_indicator = 1 if fish_mass > 2000 else 0
input = np.array([1, temperature, fish_mass_indicator, temperature ** 2, temperature * fish_mass_indicator,
fish_mass_indicator ** 2])
return max(float(self.quadratic_fish_mortality_coeffs.dot(input)), 0)
@abstractmethod
def delay(self, average_temperature: float): # pragma: no cover
"""
Delay before treatment should have a noticeable effect
"""
@staticmethod
def get_allele_heterozygous_trait(alleles: Alleles):
"""
Get the allele heterozygous type
"""
# should we move this?
if 'A' in alleles:
if 'a' in alleles:
trait = HeterozygousResistance.INCOMPLETELY_DOMINANT
else:
trait = HeterozygousResistance.DOMINANT
else:
trait = HeterozygousResistance.RECESSIVE
return trait
@abstractmethod
def get_lice_treatment_mortality_rate(
self, lice_population: LicePopulation, temperature: float) -> GenoTreatmentDistrib:
"""
Calculate the mortality rates of this treatment
"""
def get_fish_mortality_occurrences(
self,
temperature: float,
fish_mass: float,
num_fish: float,
efficacy_window: float,
mortality_events: int
):
"""Get the number of fish that die due to treatment
:param temperature: the temperature of the cage
:param num_fish: the number of fish
:param fish_mass: the average fish mass (in grams)
:param efficacy_window: the length of the efficacy window
:param mortality_events: the number of fish mortality events to subtract from
"""
predicted_pp_increase = self.__get_mortality_pp_increase(temperature, fish_mass)
mortality_events_pp = 100 * mortality_events / num_fish
predicted_deaths = ((predicted_pp_increase + mortality_events_pp) * num_fish / 100) \
- mortality_events
predicted_deaths /= efficacy_window
return predicted_deaths
class ChemicalTreatment(TreatmentParams):
"""Trait for all chemical treatments"""
def __init__(self, payload):
super().__init__(payload)
self.pheno_resistance = self.parse_pheno_resistance(payload["pheno_resistance"])
self.price_per_kg = Money(payload["price_per_kg"])
self.durability_temp_ratio: float = payload["durability_temp_ratio"]
class ThermalTreatment(TreatmentParams):
"""Trait for all thermal-based treatments"""
def __init__(self, payload):
super().__init__(payload)
self.price_per_application = Money(payload["price_per_application"])
# NOTE: these are currently unused
# self.exposure_temperature: float = payload["exposure_temperature"]
# self.exposure_length: float = payload["efficacy"]
class EMB(ChemicalTreatment):
"""Emamectin Benzoate"""
name = "EMB"
def delay(self, average_temperature: float):
return self.durability_temp_ratio / average_temperature
def get_lice_treatment_mortality_rate(self, lice_population: LicePopulation, _temperature=None):
susceptible_populations = [lice_population.geno_by_lifestage[stage] for stage in
LicePopulation.susceptible_stages]
num_susc_per_geno = GenoDistrib.batch_sum(susceptible_populations)
geno_treatment_distrib = {geno: GenoTreatmentValue(0.0, 0) for geno in num_susc_per_geno}
for geno, num_susc in num_susc_per_geno.items():
trait = self.get_allele_heterozygous_trait(geno)
susceptibility_factor = 1.0 - self.pheno_resistance[trait]
geno_treatment_distrib[geno] = GenoTreatmentValue(susceptibility_factor, cast(int, num_susc))
return geno_treatment_distrib
class Thermolicer(ThermalTreatment):
name = "Thermolicer"
def delay(self, _):
return 1 # effects noticeable the next day
def get_lice_treatment_mortality_rate(
self, lice_population: LicePopulation, temperature: float) -> GenoTreatmentDistrib:
if temperature >= 12:
efficacy = 0.8
else:
efficacy = 0.99
susceptible_populations = [lice_population.geno_by_lifestage[stage] for stage in
LicePopulation.susceptible_stages]
num_susc_per_geno = cast(GenoDistrib, GenoDistrib.batch_sum(susceptible_populations))
geno_treatment_distrib = {geno: GenoTreatmentValue(efficacy, cast(int, num_susc))
for geno, num_susc in num_susc_per_geno.items()}
return geno_treatment_distrib
| 34.544503
| 115
| 0.677933
|
from __future__ import annotations
from abc import abstractmethod, ABC
from decimal import Decimal
from enum import Enum
from typing import Dict, cast
import numpy as np
from slim.simulation.lice_population import LicePopulation, GenoDistrib, GenoTreatmentValue,\
Alleles, GenoTreatmentDistrib
Money = Decimal
class Treatment(Enum):
EMB = 0
THERMOLICER = 1
class GeneticMechanism(Enum):
DISCRETE = 1
MATERNAL = 2
class HeterozygousResistance(Enum):
DOMINANT = 1
INCOMPLETELY_DOMINANT = 2
RECESSIVE = 3
TreatmentResistance = Dict[HeterozygousResistance, float]
class TreatmentParams(ABC):
name = ""
def __init__(self, payload):
self.quadratic_fish_mortality_coeffs = np.array(payload["quadratic_fish_mortality_coeffs"])
self.effect_delay: int = payload["effect_delay"]
self.application_period: int = payload["application_period"]
@staticmethod
def parse_pheno_resistance(pheno_resistance_dict: dict) -> TreatmentResistance:
return {HeterozygousResistance[key.upper()]: val for key, val in pheno_resistance_dict.items()}
def __get_mortality_pp_increase(self, temperature: float, fish_mass: float) -> float:
fish_mass_indicator = 1 if fish_mass > 2000 else 0
input = np.array([1, temperature, fish_mass_indicator, temperature ** 2, temperature * fish_mass_indicator,
fish_mass_indicator ** 2])
return max(float(self.quadratic_fish_mortality_coeffs.dot(input)), 0)
@abstractmethod
def delay(self, average_temperature: float):
@staticmethod
def get_allele_heterozygous_trait(alleles: Alleles):
if 'A' in alleles:
if 'a' in alleles:
trait = HeterozygousResistance.INCOMPLETELY_DOMINANT
else:
trait = HeterozygousResistance.DOMINANT
else:
trait = HeterozygousResistance.RECESSIVE
return trait
@abstractmethod
def get_lice_treatment_mortality_rate(
self, lice_population: LicePopulation, temperature: float) -> GenoTreatmentDistrib:
def get_fish_mortality_occurrences(
self,
temperature: float,
fish_mass: float,
num_fish: float,
efficacy_window: float,
mortality_events: int
):
predicted_pp_increase = self.__get_mortality_pp_increase(temperature, fish_mass)
mortality_events_pp = 100 * mortality_events / num_fish
predicted_deaths = ((predicted_pp_increase + mortality_events_pp) * num_fish / 100) \
- mortality_events
predicted_deaths /= efficacy_window
return predicted_deaths
class ChemicalTreatment(TreatmentParams):
def __init__(self, payload):
super().__init__(payload)
self.pheno_resistance = self.parse_pheno_resistance(payload["pheno_resistance"])
self.price_per_kg = Money(payload["price_per_kg"])
self.durability_temp_ratio: float = payload["durability_temp_ratio"]
class ThermalTreatment(TreatmentParams):
def __init__(self, payload):
super().__init__(payload)
self.price_per_application = Money(payload["price_per_application"])
class EMB(ChemicalTreatment):
name = "EMB"
def delay(self, average_temperature: float):
return self.durability_temp_ratio / average_temperature
def get_lice_treatment_mortality_rate(self, lice_population: LicePopulation, _temperature=None):
susceptible_populations = [lice_population.geno_by_lifestage[stage] for stage in
LicePopulation.susceptible_stages]
num_susc_per_geno = GenoDistrib.batch_sum(susceptible_populations)
geno_treatment_distrib = {geno: GenoTreatmentValue(0.0, 0) for geno in num_susc_per_geno}
for geno, num_susc in num_susc_per_geno.items():
trait = self.get_allele_heterozygous_trait(geno)
susceptibility_factor = 1.0 - self.pheno_resistance[trait]
geno_treatment_distrib[geno] = GenoTreatmentValue(susceptibility_factor, cast(int, num_susc))
return geno_treatment_distrib
class Thermolicer(ThermalTreatment):
name = "Thermolicer"
def delay(self, _):
return 1
def get_lice_treatment_mortality_rate(
self, lice_population: LicePopulation, temperature: float) -> GenoTreatmentDistrib:
if temperature >= 12:
efficacy = 0.8
else:
efficacy = 0.99
susceptible_populations = [lice_population.geno_by_lifestage[stage] for stage in
LicePopulation.susceptible_stages]
num_susc_per_geno = cast(GenoDistrib, GenoDistrib.batch_sum(susceptible_populations))
geno_treatment_distrib = {geno: GenoTreatmentValue(efficacy, cast(int, num_susc))
for geno, num_susc in num_susc_per_geno.items()}
return geno_treatment_distrib
| true
| true
|
790eb660dec288f5b6fd4bcac1aded1ed2bcfc79
| 4,355
|
py
|
Python
|
test/TEX/subdir_variantdir_include2.py
|
Valkatraz/scons
|
5e70c65f633dcecc035751c9f0c6f894088df8a0
|
[
"MIT"
] | 1,403
|
2017-11-23T14:24:01.000Z
|
2022-03-30T20:59:39.000Z
|
test/TEX/subdir_variantdir_include2.py
|
Valkatraz/scons
|
5e70c65f633dcecc035751c9f0c6f894088df8a0
|
[
"MIT"
] | 3,708
|
2017-11-27T13:47:12.000Z
|
2022-03-29T17:21:17.000Z
|
test/TEX/subdir_variantdir_include2.py
|
Valkatraz/scons
|
5e70c65f633dcecc035751c9f0c6f894088df8a0
|
[
"MIT"
] | 281
|
2017-12-01T23:48:38.000Z
|
2022-03-31T15:25:44.000Z
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
r"""
Verify that we execute TeX in a subdirectory (if that's where the document
resides) by checking that all the auxiliary files get created there and
not in the top-level directory. Test this when variantDir is used
Add use of \include and \includegraphics from within the included file
Also check that we find files
Test case courtesy Joel B. Mohler.
"""
import TestSCons
test = TestSCons.TestSCons()
latex = test.where_is('latex')
if not latex:
test.skip_test("Could not find 'latex'; skipping test.\n")
pdflatex = test.where_is('pdflatex')
if not pdflatex:
test.skip_test("Could not find 'pdflatex'; skipping test.\n")
test.subdir('docs')
test.subdir(['docs','content'])
test.subdir(['docs','fig'])
test.write('SConstruct', """\
import os
env = Environment(TOOLS = ['tex', 'pdftex'])
env.VariantDir('build', 'docs', duplicate=0)
pdf = env.PDF('build/main.tex')
""")
test.write(['docs','main.tex'],
r"""\documentclass{article}
\usepackage{makeidx}
\makeindex
\begin{document}
Hi there.
\index{info}
\include{content/chapter}
\printindex{}
\end{document}
""")
test.write(['docs','content','chapter.tex'],
r"""Sub-document 1
\input{content/subchap}
""")
test.write(['docs','content','subchap.tex'], """\
Sub-chapter 2
""")
#test.run(arguments = '.')
#test.run(arguments = '.', stderr=None, stdout=None)
# next line tests that side effect nodes get disambiguated
# and their directories created in a variantDir before
# the builder tries to populate them and fails
test.run(arguments = 'build/main.pdf', stderr=None, stdout=None)
test.must_exist(['build', 'main.aux'])
test.must_exist(['build', 'main.fls'])
test.must_exist(['build', 'main.idx'])
test.must_exist(['build', 'main.ilg'])
test.must_exist(['build', 'main.ind'])
test.must_exist(['build', 'main.log'])
test.must_exist(['build', 'main.pdf'])
test.must_exist(['build', 'content', 'chapter.aux'])
test.must_not_exist('main.aux')
test.must_not_exist('main.dvi')
test.must_not_exist('main.idx')
test.must_not_exist('main.ilg')
test.must_not_exist('main.ind')
test.must_not_exist('main.log')
test.must_not_exist('main.pdf')
test.must_not_exist(['docs', 'main.aux'])
test.must_not_exist(['docs', 'main.dvi'])
test.must_not_exist(['docs', 'main.idx'])
test.must_not_exist(['docs', 'main.ilg'])
test.must_not_exist(['docs', 'main.ind'])
test.must_not_exist(['docs', 'main.log'])
test.must_not_exist(['docs', 'main.pdf'])
test.must_not_exist(['docs', 'content', 'main.aux'])
test.must_not_exist(['docs', 'content', 'main.dvi'])
test.must_not_exist(['docs', 'content', 'main.idx'])
test.must_not_exist(['docs', 'content', 'main.ilg'])
test.must_not_exist(['docs', 'content', 'main.ind'])
test.must_not_exist(['docs', 'content', 'main.log'])
test.must_not_exist(['docs', 'content', 'main.pdf'])
test.must_not_exist(['docs', 'content', 'chapter.aux'])
test.up_to_date(arguments = '.', stderr=None, stdout=None)
test.write(['docs','content', 'subchap.tex'], """\
Sub-document 2a
""")
test.not_up_to_date(arguments = '.')
#test.up_to_date(arguments = '.', stderr=None, stdout=None)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 30.034483
| 74
| 0.723766
|
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
test = TestSCons.TestSCons()
latex = test.where_is('latex')
if not latex:
test.skip_test("Could not find 'latex'; skipping test.\n")
pdflatex = test.where_is('pdflatex')
if not pdflatex:
test.skip_test("Could not find 'pdflatex'; skipping test.\n")
test.subdir('docs')
test.subdir(['docs','content'])
test.subdir(['docs','fig'])
test.write('SConstruct', """\
import os
env = Environment(TOOLS = ['tex', 'pdftex'])
env.VariantDir('build', 'docs', duplicate=0)
pdf = env.PDF('build/main.tex')
""")
test.write(['docs','main.tex'],
r"""\documentclass{article}
\usepackage{makeidx}
\makeindex
\begin{document}
Hi there.
\index{info}
\include{content/chapter}
\printindex{}
\end{document}
""")
test.write(['docs','content','chapter.tex'],
r"""Sub-document 1
\input{content/subchap}
""")
test.write(['docs','content','subchap.tex'], """\
Sub-chapter 2
""")
test.run(arguments = 'build/main.pdf', stderr=None, stdout=None)
test.must_exist(['build', 'main.aux'])
test.must_exist(['build', 'main.fls'])
test.must_exist(['build', 'main.idx'])
test.must_exist(['build', 'main.ilg'])
test.must_exist(['build', 'main.ind'])
test.must_exist(['build', 'main.log'])
test.must_exist(['build', 'main.pdf'])
test.must_exist(['build', 'content', 'chapter.aux'])
test.must_not_exist('main.aux')
test.must_not_exist('main.dvi')
test.must_not_exist('main.idx')
test.must_not_exist('main.ilg')
test.must_not_exist('main.ind')
test.must_not_exist('main.log')
test.must_not_exist('main.pdf')
test.must_not_exist(['docs', 'main.aux'])
test.must_not_exist(['docs', 'main.dvi'])
test.must_not_exist(['docs', 'main.idx'])
test.must_not_exist(['docs', 'main.ilg'])
test.must_not_exist(['docs', 'main.ind'])
test.must_not_exist(['docs', 'main.log'])
test.must_not_exist(['docs', 'main.pdf'])
test.must_not_exist(['docs', 'content', 'main.aux'])
test.must_not_exist(['docs', 'content', 'main.dvi'])
test.must_not_exist(['docs', 'content', 'main.idx'])
test.must_not_exist(['docs', 'content', 'main.ilg'])
test.must_not_exist(['docs', 'content', 'main.ind'])
test.must_not_exist(['docs', 'content', 'main.log'])
test.must_not_exist(['docs', 'content', 'main.pdf'])
test.must_not_exist(['docs', 'content', 'chapter.aux'])
test.up_to_date(arguments = '.', stderr=None, stdout=None)
test.write(['docs','content', 'subchap.tex'], """\
Sub-document 2a
""")
test.not_up_to_date(arguments = '.')
test.pass_test()
| true
| true
|
790eb702ce487ee58b2ce443409170bbba6308b9
| 333
|
py
|
Python
|
scripts/whatsup.py
|
foretheta/whatsup
|
7a77631d6fc72aa15f2aac3a04d0734be95f6bb5
|
[
"Apache-2.0"
] | 7
|
2020-09-01T20:10:08.000Z
|
2021-08-12T01:48:06.000Z
|
scripts/whatsup.py
|
foretheta/whatsup
|
7a77631d6fc72aa15f2aac3a04d0734be95f6bb5
|
[
"Apache-2.0"
] | 1
|
2021-09-26T06:12:22.000Z
|
2021-09-26T06:12:22.000Z
|
scripts/whatsup.py
|
foretheta/whatsup
|
7a77631d6fc72aa15f2aac3a04d0734be95f6bb5
|
[
"Apache-2.0"
] | null | null | null |
import click
import subprocess
import os
@click.group()
def cli():
...
@cli.command()
def deploy():
click.echo("Running chalice deploy")
output = subprocess.check_output(f"source {os.environ['VIRTUAL_ENV']}/bin/activate && chalice deploy",shell=True)
click.echo(output)
click.echo(os.environ["VIRTUAL_ENV"])
| 18.5
| 117
| 0.693694
|
import click
import subprocess
import os
@click.group()
def cli():
...
@cli.command()
def deploy():
click.echo("Running chalice deploy")
output = subprocess.check_output(f"source {os.environ['VIRTUAL_ENV']}/bin/activate && chalice deploy",shell=True)
click.echo(output)
click.echo(os.environ["VIRTUAL_ENV"])
| true
| true
|
790eb87068522c15bbbaf75a4eba02c100a77081
| 3,410
|
py
|
Python
|
models/MiDaS.py
|
valgur/GIMP-ML-Hub
|
11bdfcc894ac53543b2178e67eaf29bf9815049f
|
[
"MIT"
] | 16
|
2020-07-20T03:32:08.000Z
|
2021-12-24T12:50:01.000Z
|
models/MiDaS.py
|
valgur/GIMP-ML-Hub
|
11bdfcc894ac53543b2178e67eaf29bf9815049f
|
[
"MIT"
] | 4
|
2020-07-20T12:47:24.000Z
|
2021-04-24T11:00:41.000Z
|
models/MiDaS.py
|
valgur/GIMP-ML
|
11bdfcc894ac53543b2178e67eaf29bf9815049f
|
[
"MIT"
] | 1
|
2020-06-09T10:02:00.000Z
|
2020-06-09T10:02:00.000Z
|
import sys
import numpy as np
import torch
import torch.hub
from PIL import Image
from torchvision.transforms import Compose
from _model_base import ModelBase, handle_alpha
from _util import apply_colormap, to_rgb
# Simplified transforms from
# https://github.com/intel-isl/MiDaS/blob/master/models/transforms.py
class Resize:
def __init__(self, width, height, image_interpolation_method=Image.BICUBIC):
self.__width = width
self.__height = height
self.__multiple_of = 32
self.__image_interpolation_method = image_interpolation_method
def constrain_to_multiple_of(self, x):
return (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
def get_size(self, width, height):
scale_height = self.__height / height
scale_width = self.__width / width
# scale such that output size is upper bound
if scale_width < scale_height:
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
new_height = self.constrain_to_multiple_of(scale_height * height)
new_width = self.constrain_to_multiple_of(scale_width * width)
return new_width, new_height
def __call__(self, image):
width, height = self.get_size(image.shape[1], image.shape[0])
resized = Image.fromarray(image).resize((width, height), self.__image_interpolation_method)
return np.array(resized)
class NormalizeImage:
def __init__(self, mean, std):
self.__mean = mean
self.__std = std
def __call__(self, image):
return (image - self.__mean) / self.__std
class PrepareForNet:
def __call__(self, image):
image = np.transpose(image, (2, 0, 1))
image = np.ascontiguousarray(image, dtype=np.float32)
tensor = torch.from_numpy(image)
return tensor.unsqueeze(0)
class MiDaS(ModelBase):
def __init__(self):
super().__init__()
self.hub_repo = "intel-isl/MiDaS"
def load_model(self):
model = torch.hub.load(self.hub_repo, "MiDaS", pretrained=True)
model.to(self.device)
model.eval()
return model
@staticmethod
def get_transform():
return Compose([
Resize(384, 384),
lambda x: x / 255.,
NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
PrepareForNet()
])
@handle_alpha
@torch.no_grad()
def predict(self, input_image, colormap=None):
h, w, d = input_image.shape
assert d == 3, "Input image must be RGB"
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
transform = self.get_transform()
image_tensor = transform(input_image).to(self.device)
prediction = self.model.forward(image_tensor)
prediction = torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=(h, w),
mode="bicubic",
align_corners=False,
)
disp = prediction.squeeze().cpu().numpy()
disp /= disp.max()
if colormap:
out = apply_colormap(disp, colormap)
else:
out = to_rgb(disp)
return (out * 255).astype(np.uint8)
model = MiDaS()
if __name__ == '__main__':
rpc_url = sys.argv[1]
model.process_rpc(rpc_url)
| 29.145299
| 99
| 0.633724
|
import sys
import numpy as np
import torch
import torch.hub
from PIL import Image
from torchvision.transforms import Compose
from _model_base import ModelBase, handle_alpha
from _util import apply_colormap, to_rgb
class Resize:
def __init__(self, width, height, image_interpolation_method=Image.BICUBIC):
self.__width = width
self.__height = height
self.__multiple_of = 32
self.__image_interpolation_method = image_interpolation_method
def constrain_to_multiple_of(self, x):
return (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
def get_size(self, width, height):
scale_height = self.__height / height
scale_width = self.__width / width
if scale_width < scale_height:
scale_height = scale_width
else:
scale_width = scale_height
new_height = self.constrain_to_multiple_of(scale_height * height)
new_width = self.constrain_to_multiple_of(scale_width * width)
return new_width, new_height
def __call__(self, image):
width, height = self.get_size(image.shape[1], image.shape[0])
resized = Image.fromarray(image).resize((width, height), self.__image_interpolation_method)
return np.array(resized)
class NormalizeImage:
def __init__(self, mean, std):
self.__mean = mean
self.__std = std
def __call__(self, image):
return (image - self.__mean) / self.__std
class PrepareForNet:
def __call__(self, image):
image = np.transpose(image, (2, 0, 1))
image = np.ascontiguousarray(image, dtype=np.float32)
tensor = torch.from_numpy(image)
return tensor.unsqueeze(0)
class MiDaS(ModelBase):
def __init__(self):
super().__init__()
self.hub_repo = "intel-isl/MiDaS"
def load_model(self):
model = torch.hub.load(self.hub_repo, "MiDaS", pretrained=True)
model.to(self.device)
model.eval()
return model
@staticmethod
def get_transform():
return Compose([
Resize(384, 384),
lambda x: x / 255.,
NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
PrepareForNet()
])
@handle_alpha
@torch.no_grad()
def predict(self, input_image, colormap=None):
h, w, d = input_image.shape
assert d == 3, "Input image must be RGB"
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
transform = self.get_transform()
image_tensor = transform(input_image).to(self.device)
prediction = self.model.forward(image_tensor)
prediction = torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=(h, w),
mode="bicubic",
align_corners=False,
)
disp = prediction.squeeze().cpu().numpy()
disp /= disp.max()
if colormap:
out = apply_colormap(disp, colormap)
else:
out = to_rgb(disp)
return (out * 255).astype(np.uint8)
model = MiDaS()
if __name__ == '__main__':
rpc_url = sys.argv[1]
model.process_rpc(rpc_url)
| true
| true
|
790eb8a8191fce34f76f66f4d3ffcffcec6ff9d7
| 841
|
py
|
Python
|
examples/plotting/file/candlestick.py
|
goncaloperes/bokeh
|
b857d2d17d7c19779bb0a7be2601d8238fb1d5e9
|
[
"BSD-3-Clause"
] | 1
|
2021-10-30T00:32:00.000Z
|
2021-10-30T00:32:00.000Z
|
examples/plotting/file/candlestick.py
|
Deng-Fankang/bokeh
|
894731860c53b7c9ddd0057dee85cf064278dc0e
|
[
"BSD-3-Clause"
] | 12
|
2020-08-26T20:19:29.000Z
|
2020-08-26T20:19:52.000Z
|
examples/plotting/file/candlestick.py
|
Deng-Fankang/bokeh
|
894731860c53b7c9ddd0057dee85cf064278dc0e
|
[
"BSD-3-Clause"
] | 2
|
2021-01-12T18:22:24.000Z
|
2021-10-30T00:32:02.000Z
|
from math import pi
import pandas as pd
from bokeh.plotting import figure, output_file, show
from bokeh.sampledata.stocks import MSFT
df = pd.DataFrame(MSFT)[:50]
df["date"] = pd.to_datetime(df["date"])
inc = df.close > df.open
dec = df.open > df.close
w = 12*60*60*1000 # half day in ms
TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
p = figure(x_axis_type="datetime", tools=TOOLS, plot_width=1000, title = "MSFT Candlestick")
p.xaxis.major_label_orientation = pi/4
p.grid.grid_line_alpha=0.3
p.segment(df.date, df.high, df.date, df.low, color="black")
p.vbar(df.date[inc], w, df.open[inc], df.close[inc], fill_color="#D5E1DD", line_color="black")
p.vbar(df.date[dec], w, df.open[dec], df.close[dec], fill_color="#F2583E", line_color="black")
output_file("candlestick.html", title="candlestick.py example")
show(p) # open a browser
| 30.035714
| 94
| 0.72176
|
from math import pi
import pandas as pd
from bokeh.plotting import figure, output_file, show
from bokeh.sampledata.stocks import MSFT
df = pd.DataFrame(MSFT)[:50]
df["date"] = pd.to_datetime(df["date"])
inc = df.close > df.open
dec = df.open > df.close
w = 12*60*60*1000
TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
p = figure(x_axis_type="datetime", tools=TOOLS, plot_width=1000, title = "MSFT Candlestick")
p.xaxis.major_label_orientation = pi/4
p.grid.grid_line_alpha=0.3
p.segment(df.date, df.high, df.date, df.low, color="black")
p.vbar(df.date[inc], w, df.open[inc], df.close[inc], fill_color="#D5E1DD", line_color="black")
p.vbar(df.date[dec], w, df.open[dec], df.close[dec], fill_color="#F2583E", line_color="black")
output_file("candlestick.html", title="candlestick.py example")
show(p)
| true
| true
|
790eba203760fc449369cdd11240df8b7774ce60
| 156
|
py
|
Python
|
selenzy_wrapper/selenzy/__init__.py
|
brsynth/selenzy-wrapper
|
4d56d4de4811348477ffeda30b3214d082c096c7
|
[
"MIT"
] | null | null | null |
selenzy_wrapper/selenzy/__init__.py
|
brsynth/selenzy-wrapper
|
4d56d4de4811348477ffeda30b3214d082c096c7
|
[
"MIT"
] | null | null | null |
selenzy_wrapper/selenzy/__init__.py
|
brsynth/selenzy-wrapper
|
4d56d4de4811348477ffeda30b3214d082c096c7
|
[
"MIT"
] | null | null | null |
from .Selenzy import (
readData,
updateScore,
analyse,
seqScore
)
from .Selenzy2 import (
analyse2
)
from .newtax import (
newtax
)
| 12
| 23
| 0.634615
|
from .Selenzy import (
readData,
updateScore,
analyse,
seqScore
)
from .Selenzy2 import (
analyse2
)
from .newtax import (
newtax
)
| true
| true
|
790eba2bab3ca5a0df3081bf1214c98005a123c1
| 7,396
|
py
|
Python
|
openbook_follows/views.py
|
TamaraAbells/okuna-api
|
f87d8e80d2f182c01dbce68155ded0078ee707e4
|
[
"MIT"
] | 164
|
2019-07-29T17:59:06.000Z
|
2022-03-19T21:36:01.000Z
|
openbook_follows/views.py
|
TamaraAbells/okuna-api
|
f87d8e80d2f182c01dbce68155ded0078ee707e4
|
[
"MIT"
] | 188
|
2019-03-16T09:53:25.000Z
|
2019-07-25T14:57:24.000Z
|
openbook_follows/views.py
|
TamaraAbells/okuna-api
|
f87d8e80d2f182c01dbce68155ded0078ee707e4
|
[
"MIT"
] | 80
|
2019-08-03T17:49:08.000Z
|
2022-02-28T16:56:33.000Z
|
# Create your views here.
from django.contrib.auth import get_user_model
from django.db import transaction
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from django.utils.translation import gettext as _
from openbook_common.responses import ApiMessageResponse
from openbook_common.serializers import CommonFollowRequestSerializer
from openbook_moderation.permissions import IsNotSuspended
from openbook_common.utils.helpers import normalise_request_data
from openbook_follows.serializers import FollowUserRequestSerializer, FollowSerializer, \
DeleteFollowSerializer, UpdateFollowSerializer, FollowUserSerializer, RequestToFollowUserSerializer, \
ApproveUserFollowRequestSerializer, RejectUserFollowRequestSerializer, ReceivedFollowRequestsRequestSerializer
class ReceivedFollowRequests(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def get(self, request):
query_params = request.query_params.dict()
user = request.user
serializer = ReceivedFollowRequestsRequestSerializer(data=query_params)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
max_id = data.get('max_id')
count = data.get('count', 10)
received_follow_requests = user.get_received_follow_requests(max_id=max_id).order_by(
'-id')[:count]
response_serializer = CommonFollowRequestSerializer(received_follow_requests, many=True,
context={'request': request})
return Response(response_serializer.data, status=status.HTTP_200_OK)
class RequestToFollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def put(self, request):
serializer = RequestToFollowUserSerializer(data=request.data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_request_to_follow_username = data.get('username')
user = request.user
with transaction.atomic():
follow_request = user.create_follow_request_for_user_with_username(user_to_request_to_follow_username)
response_serializer = CommonFollowRequestSerializer(follow_request, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_201_CREATED)
class CancelRequestToFollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
serializer = RequestToFollowUserSerializer(data=request.data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_cancel_request_for = data.get('username')
user = request.user
with transaction.atomic():
user.delete_follow_request_for_user_with_username(user_to_cancel_request_for)
return ApiMessageResponse(_('Follow request cancelled.'), status=status.HTTP_200_OK)
class ApproveUserFollowRequest(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
serializer = ApproveUserFollowRequestSerializer(data=request.data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_approve_follow_request_from_username = data.get('username')
user = request.user
with transaction.atomic():
user.approve_follow_request_from_user_with_username(
user_username=user_to_approve_follow_request_from_username)
return ApiMessageResponse(_('Follow request approved.'), status=status.HTTP_200_OK)
class RejectUserFollowRequest(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
serializer = RejectUserFollowRequestSerializer(data=request.data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_reject_follow_request_from_username = data.get('username')
user = request.user
with transaction.atomic():
user.reject_follow_request_from_user_with_username(
user_username=user_to_reject_follow_request_from_username)
return ApiMessageResponse(_('Follow request rejected.'), status=status.HTTP_200_OK)
class FollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
request_data = _prepare_request_data_for_validation(request.data)
serializer = FollowUserRequestSerializer(data=request_data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
lists_ids = data.get('lists_ids')
user_to_follow_username = data.get('username')
user = request.user
User = get_user_model()
user_to_follow = User.objects.get(username=user_to_follow_username)
with transaction.atomic():
follow = user.follow_user_with_id(user_to_follow.pk, lists_ids=lists_ids)
response_serializer = FollowSerializer(follow, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_201_CREATED)
class UnfollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
user = request.user
serializer = DeleteFollowSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_unfollow_username = data.get('username')
User = get_user_model()
user_to_unfollow = User.objects.get(username=user_to_unfollow_username)
with transaction.atomic():
user.unfollow_user_with_id(user_to_unfollow.pk)
response_serializer = FollowUserSerializer(user_to_unfollow, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_200_OK)
class UpdateFollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
request_data = _prepare_request_data_for_validation(request.data)
user = request.user
serializer = UpdateFollowSerializer(data=request_data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
lists_ids = data.get('lists_ids')
followed_user_username = data.get('username')
User = get_user_model()
followed_user = User.objects.get(username=followed_user_username)
with transaction.atomic():
follow = user.update_follow_for_user_with_id(followed_user.pk, lists_ids=lists_ids)
response_serializer = FollowSerializer(follow, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_200_OK)
def _prepare_request_data_for_validation(request_data):
request_data_copy = normalise_request_data(request_data)
lists_ids = request_data_copy.get('lists_ids', None)
if isinstance(lists_ids, str):
lists_ids = lists_ids.split(',')
request_data_copy['lists_ids'] = lists_ids
return request_data_copy
| 37.543147
| 114
| 0.741211
|
from django.contrib.auth import get_user_model
from django.db import transaction
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from django.utils.translation import gettext as _
from openbook_common.responses import ApiMessageResponse
from openbook_common.serializers import CommonFollowRequestSerializer
from openbook_moderation.permissions import IsNotSuspended
from openbook_common.utils.helpers import normalise_request_data
from openbook_follows.serializers import FollowUserRequestSerializer, FollowSerializer, \
DeleteFollowSerializer, UpdateFollowSerializer, FollowUserSerializer, RequestToFollowUserSerializer, \
ApproveUserFollowRequestSerializer, RejectUserFollowRequestSerializer, ReceivedFollowRequestsRequestSerializer
class ReceivedFollowRequests(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def get(self, request):
query_params = request.query_params.dict()
user = request.user
serializer = ReceivedFollowRequestsRequestSerializer(data=query_params)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
max_id = data.get('max_id')
count = data.get('count', 10)
received_follow_requests = user.get_received_follow_requests(max_id=max_id).order_by(
'-id')[:count]
response_serializer = CommonFollowRequestSerializer(received_follow_requests, many=True,
context={'request': request})
return Response(response_serializer.data, status=status.HTTP_200_OK)
class RequestToFollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def put(self, request):
serializer = RequestToFollowUserSerializer(data=request.data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_request_to_follow_username = data.get('username')
user = request.user
with transaction.atomic():
follow_request = user.create_follow_request_for_user_with_username(user_to_request_to_follow_username)
response_serializer = CommonFollowRequestSerializer(follow_request, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_201_CREATED)
class CancelRequestToFollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
serializer = RequestToFollowUserSerializer(data=request.data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_cancel_request_for = data.get('username')
user = request.user
with transaction.atomic():
user.delete_follow_request_for_user_with_username(user_to_cancel_request_for)
return ApiMessageResponse(_('Follow request cancelled.'), status=status.HTTP_200_OK)
class ApproveUserFollowRequest(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
serializer = ApproveUserFollowRequestSerializer(data=request.data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_approve_follow_request_from_username = data.get('username')
user = request.user
with transaction.atomic():
user.approve_follow_request_from_user_with_username(
user_username=user_to_approve_follow_request_from_username)
return ApiMessageResponse(_('Follow request approved.'), status=status.HTTP_200_OK)
class RejectUserFollowRequest(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
serializer = RejectUserFollowRequestSerializer(data=request.data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_reject_follow_request_from_username = data.get('username')
user = request.user
with transaction.atomic():
user.reject_follow_request_from_user_with_username(
user_username=user_to_reject_follow_request_from_username)
return ApiMessageResponse(_('Follow request rejected.'), status=status.HTTP_200_OK)
class FollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
request_data = _prepare_request_data_for_validation(request.data)
serializer = FollowUserRequestSerializer(data=request_data, context={"request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
lists_ids = data.get('lists_ids')
user_to_follow_username = data.get('username')
user = request.user
User = get_user_model()
user_to_follow = User.objects.get(username=user_to_follow_username)
with transaction.atomic():
follow = user.follow_user_with_id(user_to_follow.pk, lists_ids=lists_ids)
response_serializer = FollowSerializer(follow, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_201_CREATED)
class UnfollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
user = request.user
serializer = DeleteFollowSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
user_to_unfollow_username = data.get('username')
User = get_user_model()
user_to_unfollow = User.objects.get(username=user_to_unfollow_username)
with transaction.atomic():
user.unfollow_user_with_id(user_to_unfollow.pk)
response_serializer = FollowUserSerializer(user_to_unfollow, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_200_OK)
class UpdateFollowUser(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
request_data = _prepare_request_data_for_validation(request.data)
user = request.user
serializer = UpdateFollowSerializer(data=request_data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
lists_ids = data.get('lists_ids')
followed_user_username = data.get('username')
User = get_user_model()
followed_user = User.objects.get(username=followed_user_username)
with transaction.atomic():
follow = user.update_follow_for_user_with_id(followed_user.pk, lists_ids=lists_ids)
response_serializer = FollowSerializer(follow, context={"request": request})
return Response(response_serializer.data, status=status.HTTP_200_OK)
def _prepare_request_data_for_validation(request_data):
request_data_copy = normalise_request_data(request_data)
lists_ids = request_data_copy.get('lists_ids', None)
if isinstance(lists_ids, str):
lists_ids = lists_ids.split(',')
request_data_copy['lists_ids'] = lists_ids
return request_data_copy
| true
| true
|
790eba80fffd8687db251eb9826ebd6207bd2e00
| 1,241
|
py
|
Python
|
setup.py
|
dgrtwo/dplython
|
9c36e9e3b9d6a457e8b974c30f6725abde947f1c
|
[
"MIT"
] | 3
|
2016-05-19T02:58:54.000Z
|
2017-12-01T10:37:55.000Z
|
setup.py
|
dgrtwo/dplython
|
9c36e9e3b9d6a457e8b974c30f6725abde947f1c
|
[
"MIT"
] | null | null | null |
setup.py
|
dgrtwo/dplython
|
9c36e9e3b9d6a457e8b974c30f6725abde947f1c
|
[
"MIT"
] | 1
|
2019-11-04T17:50:54.000Z
|
2019-11-04T17:50:54.000Z
|
"""Install Dplython."""
from setuptools import setup, find_packages
setup(
name="dplython",
version="0.0.4",
description="Dplyr-style operations on top of pandas DataFrame.",
url="https://github.com/dodger487/dplython",
download_url="https://github.com/dodger487/dplython/tarball/0.0.4",
packages=find_packages(),
license="MIT",
keywords="pandas data dplyr",
package_data={"dplython": ["data/diamonds.csv"]},
package_dir={"dplython": "dplython"},
install_requires=["numpy", "pandas", "six"],
author="Chris Riederer",
author_email="OfficialChrisEmail@gmail.com",
maintainer="Chris Riederer",
maintainer_email="OfficialChrisEmail@gmail.com",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Environment :: Web Environment",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Scientific/Engineering",
]
)
| 34.472222
| 71
| 0.639807
|
from setuptools import setup, find_packages
setup(
name="dplython",
version="0.0.4",
description="Dplyr-style operations on top of pandas DataFrame.",
url="https://github.com/dodger487/dplython",
download_url="https://github.com/dodger487/dplython/tarball/0.0.4",
packages=find_packages(),
license="MIT",
keywords="pandas data dplyr",
package_data={"dplython": ["data/diamonds.csv"]},
package_dir={"dplython": "dplython"},
install_requires=["numpy", "pandas", "six"],
author="Chris Riederer",
author_email="OfficialChrisEmail@gmail.com",
maintainer="Chris Riederer",
maintainer_email="OfficialChrisEmail@gmail.com",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Environment :: Web Environment",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Scientific/Engineering",
]
)
| true
| true
|
790eba842712649a0e5573f38254e1b35da936d9
| 764
|
py
|
Python
|
loggingExer/moduleExer/start.py
|
terasakisatoshi/pythonCodes
|
baee095ecee96f6b5ec6431267cdc6c40512a542
|
[
"MIT"
] | null | null | null |
loggingExer/moduleExer/start.py
|
terasakisatoshi/pythonCodes
|
baee095ecee96f6b5ec6431267cdc6c40512a542
|
[
"MIT"
] | null | null | null |
loggingExer/moduleExer/start.py
|
terasakisatoshi/pythonCodes
|
baee095ecee96f6b5ec6431267cdc6c40512a542
|
[
"MIT"
] | null | null | null |
import logging
from logging import basicConfig, getLogger
from lib import do_something
def main():
log_fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
basicConfig(format=log_fmt,
level="DEBUG")
logger = getLogger(__name__)
for i in range(3):
logger.info("---------------{}-th trial------------".format(i))
logger.debug("debugging...")
logger.info("tell information")
logger.warn("warn it comes from something like ...")
logger.error("Ops some error is occured")
logger.critical("critical event happened")
logger.debug("It's raining again")
logger.info("with hail the size of hailstones")
do_something()
if __name__ == '__main__':
main()
| 28.296296
| 71
| 0.602094
|
import logging
from logging import basicConfig, getLogger
from lib import do_something
def main():
log_fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
basicConfig(format=log_fmt,
level="DEBUG")
logger = getLogger(__name__)
for i in range(3):
logger.info("---------------{}-th trial------------".format(i))
logger.debug("debugging...")
logger.info("tell information")
logger.warn("warn it comes from something like ...")
logger.error("Ops some error is occured")
logger.critical("critical event happened")
logger.debug("It's raining again")
logger.info("with hail the size of hailstones")
do_something()
if __name__ == '__main__':
main()
| true
| true
|
790ebad5d5bc81f70e6b8a4c28488ca2eefa0b81
| 268
|
py
|
Python
|
src/core/migrations/0040_merge_20190205_0807.py
|
metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3
|
c754d3b1b401906a21640b8eacb6b724a448b31c
|
[
"MIT"
] | null | null | null |
src/core/migrations/0040_merge_20190205_0807.py
|
metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3
|
c754d3b1b401906a21640b8eacb6b724a448b31c
|
[
"MIT"
] | null | null | null |
src/core/migrations/0040_merge_20190205_0807.py
|
metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3
|
c754d3b1b401906a21640b8eacb6b724a448b31c
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.2 on 2019-02-05 08:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0038_merge_20190203_1423'),
('core', '0039_auto_20190205_0609'),
]
operations = [
]
| 17.866667
| 47
| 0.641791
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0038_merge_20190203_1423'),
('core', '0039_auto_20190205_0609'),
]
operations = [
]
| true
| true
|
790ebc56bf69c688e3cdf8e7fc4e72565848d180
| 13,006
|
py
|
Python
|
tests/components/mobile_app/test_webhook.py
|
SmarthomeNinja/core
|
f4b8a95205ea7d4126fc5e704da532cd8eed937e
|
[
"Apache-2.0"
] | 6
|
2020-07-18T16:33:25.000Z
|
2021-09-26T09:52:04.000Z
|
tests/components/mobile_app/test_webhook.py
|
SmarthomeNinja/core
|
f4b8a95205ea7d4126fc5e704da532cd8eed937e
|
[
"Apache-2.0"
] | 38
|
2020-07-23T07:13:12.000Z
|
2022-03-31T06:01:46.000Z
|
tests/components/mobile_app/test_webhook.py
|
SmarthomeNinja/core
|
f4b8a95205ea7d4126fc5e704da532cd8eed937e
|
[
"Apache-2.0"
] | 3
|
2021-05-18T16:42:18.000Z
|
2021-07-19T22:04:21.000Z
|
"""Webhook tests for mobile_app."""
import logging
import pytest
from homeassistant.components.camera import SUPPORT_STREAM as CAMERA_SUPPORT_STREAM
from homeassistant.components.mobile_app.const import CONF_SECRET
from homeassistant.components.zone import DOMAIN as ZONE_DOMAIN
from homeassistant.const import CONF_WEBHOOK_ID
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.setup import async_setup_component
from .const import CALL_SERVICE, FIRE_EVENT, REGISTER_CLEARTEXT, RENDER_TEMPLATE, UPDATE
from tests.async_mock import patch
from tests.common import async_mock_service
_LOGGER = logging.getLogger(__name__)
def encrypt_payload(secret_key, payload):
"""Return a encrypted payload given a key and dictionary of data."""
try:
from nacl.secret import SecretBox
from nacl.encoding import Base64Encoder
except (ImportError, OSError):
pytest.skip("libnacl/libsodium is not installed")
return
import json
keylen = SecretBox.KEY_SIZE
prepped_key = secret_key.encode("utf-8")
prepped_key = prepped_key[:keylen]
prepped_key = prepped_key.ljust(keylen, b"\0")
payload = json.dumps(payload).encode("utf-8")
return (
SecretBox(prepped_key).encrypt(payload, encoder=Base64Encoder).decode("utf-8")
)
def decrypt_payload(secret_key, encrypted_data):
"""Return a decrypted payload given a key and a string of encrypted data."""
try:
from nacl.secret import SecretBox
from nacl.encoding import Base64Encoder
except (ImportError, OSError):
pytest.skip("libnacl/libsodium is not installed")
return
import json
keylen = SecretBox.KEY_SIZE
prepped_key = secret_key.encode("utf-8")
prepped_key = prepped_key[:keylen]
prepped_key = prepped_key.ljust(keylen, b"\0")
decrypted_data = SecretBox(prepped_key).decrypt(
encrypted_data, encoder=Base64Encoder
)
decrypted_data = decrypted_data.decode("utf-8")
return json.loads(decrypted_data)
async def test_webhook_handle_render_template(create_registrations, webhook_client):
"""Test that we render templates properly."""
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]),
json=RENDER_TEMPLATE,
)
assert resp.status == 200
json = await resp.json()
assert json == {"one": "Hello world"}
async def test_webhook_handle_call_services(hass, create_registrations, webhook_client):
"""Test that we call services properly."""
calls = async_mock_service(hass, "test", "mobile_app")
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]),
json=CALL_SERVICE,
)
assert resp.status == 200
assert len(calls) == 1
async def test_webhook_handle_fire_event(hass, create_registrations, webhook_client):
"""Test that we can fire events."""
events = []
@callback
def store_event(event):
"""Helepr to store events."""
events.append(event)
hass.bus.async_listen("test_event", store_event)
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]), json=FIRE_EVENT
)
assert resp.status == 200
json = await resp.json()
assert json == {}
assert len(events) == 1
assert events[0].data["hello"] == "yo world"
async def test_webhook_update_registration(webhook_client, authed_api_client):
"""Test that a we can update an existing registration via webhook."""
register_resp = await authed_api_client.post(
"/api/mobile_app/registrations", json=REGISTER_CLEARTEXT
)
assert register_resp.status == 201
register_json = await register_resp.json()
webhook_id = register_json[CONF_WEBHOOK_ID]
update_container = {"type": "update_registration", "data": UPDATE}
update_resp = await webhook_client.post(
f"/api/webhook/{webhook_id}", json=update_container
)
assert update_resp.status == 200
update_json = await update_resp.json()
assert update_json["app_version"] == "2.0.0"
assert CONF_WEBHOOK_ID not in update_json
assert CONF_SECRET not in update_json
async def test_webhook_handle_get_zones(hass, create_registrations, webhook_client):
"""Test that we can get zones properly."""
await async_setup_component(
hass, ZONE_DOMAIN, {ZONE_DOMAIN: {}},
)
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]),
json={"type": "get_zones"},
)
assert resp.status == 200
json = await resp.json()
assert len(json) == 1
zones = sorted(json, key=lambda entry: entry["entity_id"])
assert zones[0]["entity_id"] == "zone.home"
async def test_webhook_handle_get_config(hass, create_registrations, webhook_client):
"""Test that we can get config properly."""
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]),
json={"type": "get_config"},
)
assert resp.status == 200
json = await resp.json()
if "components" in json:
json["components"] = set(json["components"])
if "whitelist_external_dirs" in json:
json["whitelist_external_dirs"] = set(json["whitelist_external_dirs"])
hass_config = hass.config.as_dict()
expected_dict = {
"latitude": hass_config["latitude"],
"longitude": hass_config["longitude"],
"elevation": hass_config["elevation"],
"unit_system": hass_config["unit_system"],
"location_name": hass_config["location_name"],
"time_zone": hass_config["time_zone"],
"components": hass_config["components"],
"version": hass_config["version"],
"theme_color": "#03A9F4", # Default frontend theme color
}
assert expected_dict == json
async def test_webhook_returns_error_incorrect_json(
webhook_client, create_registrations, caplog
):
"""Test that an error is returned when JSON is invalid."""
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]), data="not json"
)
assert resp.status == 400
json = await resp.json()
assert json == {}
assert "invalid JSON" in caplog.text
async def test_webhook_handle_decryption(webhook_client, create_registrations):
"""Test that we can encrypt/decrypt properly."""
key = create_registrations[0]["secret"]
data = encrypt_payload(key, RENDER_TEMPLATE["data"])
container = {"type": "render_template", "encrypted": True, "encrypted_data": data}
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[0]["webhook_id"]), json=container
)
assert resp.status == 200
webhook_json = await resp.json()
assert "encrypted_data" in webhook_json
decrypted_data = decrypt_payload(key, webhook_json["encrypted_data"])
assert decrypted_data == {"one": "Hello world"}
async def test_webhook_requires_encryption(webhook_client, create_registrations):
"""Test that encrypted registrations only accept encrypted data."""
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[0]["webhook_id"]),
json=RENDER_TEMPLATE,
)
assert resp.status == 400
webhook_json = await resp.json()
assert "error" in webhook_json
assert webhook_json["success"] is False
assert webhook_json["error"]["code"] == "encryption_required"
async def test_webhook_update_location(hass, webhook_client, create_registrations):
"""Test that location can be updated."""
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]),
json={
"type": "update_location",
"data": {"gps": [1, 2], "gps_accuracy": 10, "altitude": -10},
},
)
assert resp.status == 200
state = hass.states.get("device_tracker.test_1_2")
assert state is not None
assert state.attributes["latitude"] == 1.0
assert state.attributes["longitude"] == 2.0
assert state.attributes["gps_accuracy"] == 10
assert state.attributes["altitude"] == -10
async def test_webhook_enable_encryption(hass, webhook_client, create_registrations):
"""Test that encryption can be added to a reg initially created without."""
webhook_id = create_registrations[1]["webhook_id"]
enable_enc_resp = await webhook_client.post(
f"/api/webhook/{webhook_id}", json={"type": "enable_encryption"},
)
assert enable_enc_resp.status == 200
enable_enc_json = await enable_enc_resp.json()
assert len(enable_enc_json) == 1
assert CONF_SECRET in enable_enc_json
key = enable_enc_json["secret"]
enc_required_resp = await webhook_client.post(
f"/api/webhook/{webhook_id}", json=RENDER_TEMPLATE,
)
assert enc_required_resp.status == 400
enc_required_json = await enc_required_resp.json()
assert "error" in enc_required_json
assert enc_required_json["success"] is False
assert enc_required_json["error"]["code"] == "encryption_required"
enc_data = encrypt_payload(key, RENDER_TEMPLATE["data"])
container = {
"type": "render_template",
"encrypted": True,
"encrypted_data": enc_data,
}
enc_resp = await webhook_client.post(f"/api/webhook/{webhook_id}", json=container)
assert enc_resp.status == 200
enc_json = await enc_resp.json()
assert "encrypted_data" in enc_json
decrypted_data = decrypt_payload(key, enc_json["encrypted_data"])
assert decrypted_data == {"one": "Hello world"}
async def test_webhook_camera_stream_non_existent(
hass, create_registrations, webhook_client
):
"""Test fetching camera stream URLs for a non-existent camera."""
webhook_id = create_registrations[1]["webhook_id"]
resp = await webhook_client.post(
f"/api/webhook/{webhook_id}",
json={
"type": "stream_camera",
"data": {"camera_entity_id": "camera.doesnt_exist"},
},
)
assert resp.status == 400
webhook_json = await resp.json()
assert webhook_json["success"] is False
async def test_webhook_camera_stream_non_hls(
hass, create_registrations, webhook_client
):
"""Test fetching camera stream URLs for a non-HLS/stream-supporting camera."""
hass.states.async_set("camera.non_stream_camera", "idle", {"supported_features": 0})
webhook_id = create_registrations[1]["webhook_id"]
resp = await webhook_client.post(
f"/api/webhook/{webhook_id}",
json={
"type": "stream_camera",
"data": {"camera_entity_id": "camera.non_stream_camera"},
},
)
assert resp.status == 200
webhook_json = await resp.json()
assert webhook_json["hls_path"] is None
assert (
webhook_json["mjpeg_path"]
== "/api/camera_proxy_stream/camera.non_stream_camera"
)
async def test_webhook_camera_stream_stream_available(
hass, create_registrations, webhook_client
):
"""Test fetching camera stream URLs for an HLS/stream-supporting camera."""
hass.states.async_set(
"camera.stream_camera", "idle", {"supported_features": CAMERA_SUPPORT_STREAM}
)
webhook_id = create_registrations[1]["webhook_id"]
with patch(
"homeassistant.components.camera.async_request_stream",
return_value="/api/streams/some_hls_stream",
):
resp = await webhook_client.post(
f"/api/webhook/{webhook_id}",
json={
"type": "stream_camera",
"data": {"camera_entity_id": "camera.stream_camera"},
},
)
assert resp.status == 200
webhook_json = await resp.json()
assert webhook_json["hls_path"] == "/api/streams/some_hls_stream"
assert webhook_json["mjpeg_path"] == "/api/camera_proxy_stream/camera.stream_camera"
async def test_webhook_camera_stream_stream_available_but_errors(
hass, create_registrations, webhook_client
):
"""Test fetching camera stream URLs for an HLS/stream-supporting camera but that streaming errors."""
hass.states.async_set(
"camera.stream_camera", "idle", {"supported_features": CAMERA_SUPPORT_STREAM}
)
webhook_id = create_registrations[1]["webhook_id"]
with patch(
"homeassistant.components.camera.async_request_stream",
side_effect=HomeAssistantError(),
):
resp = await webhook_client.post(
f"/api/webhook/{webhook_id}",
json={
"type": "stream_camera",
"data": {"camera_entity_id": "camera.stream_camera"},
},
)
assert resp.status == 200
webhook_json = await resp.json()
assert webhook_json["hls_path"] is None
assert webhook_json["mjpeg_path"] == "/api/camera_proxy_stream/camera.stream_camera"
| 31.799511
| 105
| 0.685068
|
import logging
import pytest
from homeassistant.components.camera import SUPPORT_STREAM as CAMERA_SUPPORT_STREAM
from homeassistant.components.mobile_app.const import CONF_SECRET
from homeassistant.components.zone import DOMAIN as ZONE_DOMAIN
from homeassistant.const import CONF_WEBHOOK_ID
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.setup import async_setup_component
from .const import CALL_SERVICE, FIRE_EVENT, REGISTER_CLEARTEXT, RENDER_TEMPLATE, UPDATE
from tests.async_mock import patch
from tests.common import async_mock_service
_LOGGER = logging.getLogger(__name__)
def encrypt_payload(secret_key, payload):
try:
from nacl.secret import SecretBox
from nacl.encoding import Base64Encoder
except (ImportError, OSError):
pytest.skip("libnacl/libsodium is not installed")
return
import json
keylen = SecretBox.KEY_SIZE
prepped_key = secret_key.encode("utf-8")
prepped_key = prepped_key[:keylen]
prepped_key = prepped_key.ljust(keylen, b"\0")
payload = json.dumps(payload).encode("utf-8")
return (
SecretBox(prepped_key).encrypt(payload, encoder=Base64Encoder).decode("utf-8")
)
def decrypt_payload(secret_key, encrypted_data):
try:
from nacl.secret import SecretBox
from nacl.encoding import Base64Encoder
except (ImportError, OSError):
pytest.skip("libnacl/libsodium is not installed")
return
import json
keylen = SecretBox.KEY_SIZE
prepped_key = secret_key.encode("utf-8")
prepped_key = prepped_key[:keylen]
prepped_key = prepped_key.ljust(keylen, b"\0")
decrypted_data = SecretBox(prepped_key).decrypt(
encrypted_data, encoder=Base64Encoder
)
decrypted_data = decrypted_data.decode("utf-8")
return json.loads(decrypted_data)
async def test_webhook_handle_render_template(create_registrations, webhook_client):
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]),
json=RENDER_TEMPLATE,
)
assert resp.status == 200
json = await resp.json()
assert json == {"one": "Hello world"}
async def test_webhook_handle_call_services(hass, create_registrations, webhook_client):
calls = async_mock_service(hass, "test", "mobile_app")
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]),
json=CALL_SERVICE,
)
assert resp.status == 200
assert len(calls) == 1
async def test_webhook_handle_fire_event(hass, create_registrations, webhook_client):
events = []
@callback
def store_event(event):
events.append(event)
hass.bus.async_listen("test_event", store_event)
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]), json=FIRE_EVENT
)
assert resp.status == 200
json = await resp.json()
assert json == {}
assert len(events) == 1
assert events[0].data["hello"] == "yo world"
async def test_webhook_update_registration(webhook_client, authed_api_client):
register_resp = await authed_api_client.post(
"/api/mobile_app/registrations", json=REGISTER_CLEARTEXT
)
assert register_resp.status == 201
register_json = await register_resp.json()
webhook_id = register_json[CONF_WEBHOOK_ID]
update_container = {"type": "update_registration", "data": UPDATE}
update_resp = await webhook_client.post(
f"/api/webhook/{webhook_id}", json=update_container
)
assert update_resp.status == 200
update_json = await update_resp.json()
assert update_json["app_version"] == "2.0.0"
assert CONF_WEBHOOK_ID not in update_json
assert CONF_SECRET not in update_json
async def test_webhook_handle_get_zones(hass, create_registrations, webhook_client):
await async_setup_component(
hass, ZONE_DOMAIN, {ZONE_DOMAIN: {}},
)
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]),
json={"type": "get_zones"},
)
assert resp.status == 200
json = await resp.json()
assert len(json) == 1
zones = sorted(json, key=lambda entry: entry["entity_id"])
assert zones[0]["entity_id"] == "zone.home"
async def test_webhook_handle_get_config(hass, create_registrations, webhook_client):
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]),
json={"type": "get_config"},
)
assert resp.status == 200
json = await resp.json()
if "components" in json:
json["components"] = set(json["components"])
if "whitelist_external_dirs" in json:
json["whitelist_external_dirs"] = set(json["whitelist_external_dirs"])
hass_config = hass.config.as_dict()
expected_dict = {
"latitude": hass_config["latitude"],
"longitude": hass_config["longitude"],
"elevation": hass_config["elevation"],
"unit_system": hass_config["unit_system"],
"location_name": hass_config["location_name"],
"time_zone": hass_config["time_zone"],
"components": hass_config["components"],
"version": hass_config["version"],
"theme_color": "#03A9F4",
}
assert expected_dict == json
async def test_webhook_returns_error_incorrect_json(
webhook_client, create_registrations, caplog
):
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]), data="not json"
)
assert resp.status == 400
json = await resp.json()
assert json == {}
assert "invalid JSON" in caplog.text
async def test_webhook_handle_decryption(webhook_client, create_registrations):
key = create_registrations[0]["secret"]
data = encrypt_payload(key, RENDER_TEMPLATE["data"])
container = {"type": "render_template", "encrypted": True, "encrypted_data": data}
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[0]["webhook_id"]), json=container
)
assert resp.status == 200
webhook_json = await resp.json()
assert "encrypted_data" in webhook_json
decrypted_data = decrypt_payload(key, webhook_json["encrypted_data"])
assert decrypted_data == {"one": "Hello world"}
async def test_webhook_requires_encryption(webhook_client, create_registrations):
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[0]["webhook_id"]),
json=RENDER_TEMPLATE,
)
assert resp.status == 400
webhook_json = await resp.json()
assert "error" in webhook_json
assert webhook_json["success"] is False
assert webhook_json["error"]["code"] == "encryption_required"
async def test_webhook_update_location(hass, webhook_client, create_registrations):
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]),
json={
"type": "update_location",
"data": {"gps": [1, 2], "gps_accuracy": 10, "altitude": -10},
},
)
assert resp.status == 200
state = hass.states.get("device_tracker.test_1_2")
assert state is not None
assert state.attributes["latitude"] == 1.0
assert state.attributes["longitude"] == 2.0
assert state.attributes["gps_accuracy"] == 10
assert state.attributes["altitude"] == -10
async def test_webhook_enable_encryption(hass, webhook_client, create_registrations):
webhook_id = create_registrations[1]["webhook_id"]
enable_enc_resp = await webhook_client.post(
f"/api/webhook/{webhook_id}", json={"type": "enable_encryption"},
)
assert enable_enc_resp.status == 200
enable_enc_json = await enable_enc_resp.json()
assert len(enable_enc_json) == 1
assert CONF_SECRET in enable_enc_json
key = enable_enc_json["secret"]
enc_required_resp = await webhook_client.post(
f"/api/webhook/{webhook_id}", json=RENDER_TEMPLATE,
)
assert enc_required_resp.status == 400
enc_required_json = await enc_required_resp.json()
assert "error" in enc_required_json
assert enc_required_json["success"] is False
assert enc_required_json["error"]["code"] == "encryption_required"
enc_data = encrypt_payload(key, RENDER_TEMPLATE["data"])
container = {
"type": "render_template",
"encrypted": True,
"encrypted_data": enc_data,
}
enc_resp = await webhook_client.post(f"/api/webhook/{webhook_id}", json=container)
assert enc_resp.status == 200
enc_json = await enc_resp.json()
assert "encrypted_data" in enc_json
decrypted_data = decrypt_payload(key, enc_json["encrypted_data"])
assert decrypted_data == {"one": "Hello world"}
async def test_webhook_camera_stream_non_existent(
hass, create_registrations, webhook_client
):
webhook_id = create_registrations[1]["webhook_id"]
resp = await webhook_client.post(
f"/api/webhook/{webhook_id}",
json={
"type": "stream_camera",
"data": {"camera_entity_id": "camera.doesnt_exist"},
},
)
assert resp.status == 400
webhook_json = await resp.json()
assert webhook_json["success"] is False
async def test_webhook_camera_stream_non_hls(
hass, create_registrations, webhook_client
):
hass.states.async_set("camera.non_stream_camera", "idle", {"supported_features": 0})
webhook_id = create_registrations[1]["webhook_id"]
resp = await webhook_client.post(
f"/api/webhook/{webhook_id}",
json={
"type": "stream_camera",
"data": {"camera_entity_id": "camera.non_stream_camera"},
},
)
assert resp.status == 200
webhook_json = await resp.json()
assert webhook_json["hls_path"] is None
assert (
webhook_json["mjpeg_path"]
== "/api/camera_proxy_stream/camera.non_stream_camera"
)
async def test_webhook_camera_stream_stream_available(
hass, create_registrations, webhook_client
):
hass.states.async_set(
"camera.stream_camera", "idle", {"supported_features": CAMERA_SUPPORT_STREAM}
)
webhook_id = create_registrations[1]["webhook_id"]
with patch(
"homeassistant.components.camera.async_request_stream",
return_value="/api/streams/some_hls_stream",
):
resp = await webhook_client.post(
f"/api/webhook/{webhook_id}",
json={
"type": "stream_camera",
"data": {"camera_entity_id": "camera.stream_camera"},
},
)
assert resp.status == 200
webhook_json = await resp.json()
assert webhook_json["hls_path"] == "/api/streams/some_hls_stream"
assert webhook_json["mjpeg_path"] == "/api/camera_proxy_stream/camera.stream_camera"
async def test_webhook_camera_stream_stream_available_but_errors(
hass, create_registrations, webhook_client
):
hass.states.async_set(
"camera.stream_camera", "idle", {"supported_features": CAMERA_SUPPORT_STREAM}
)
webhook_id = create_registrations[1]["webhook_id"]
with patch(
"homeassistant.components.camera.async_request_stream",
side_effect=HomeAssistantError(),
):
resp = await webhook_client.post(
f"/api/webhook/{webhook_id}",
json={
"type": "stream_camera",
"data": {"camera_entity_id": "camera.stream_camera"},
},
)
assert resp.status == 200
webhook_json = await resp.json()
assert webhook_json["hls_path"] is None
assert webhook_json["mjpeg_path"] == "/api/camera_proxy_stream/camera.stream_camera"
| true
| true
|
790ebc8206dd9a84200612f2e11974711b763a6d
| 1,284
|
py
|
Python
|
eval/obs_and_M_split.py
|
maxinye/AIC_Weather_Forecasting
|
ef43a9371ecfce27d52b4102adf940a0cf625d7b
|
[
"MIT"
] | 19
|
2020-04-03T09:59:57.000Z
|
2022-03-17T09:12:03.000Z
|
eval/obs_and_M_split.py
|
jhhuang96/AIC_Weather_Forecasting
|
e04b9b3570d7377847b35f8ba67943988e24744d
|
[
"MIT"
] | 1
|
2021-11-09T07:40:49.000Z
|
2021-11-09T07:40:49.000Z
|
eval/obs_and_M_split.py
|
jhhuang96/AIC_Weather_Forecasting
|
e04b9b3570d7377847b35f8ba67943988e24744d
|
[
"MIT"
] | 10
|
2020-06-29T11:56:44.000Z
|
2022-03-17T09:12:04.000Z
|
import pandas as pd
from datetime import datetime
import os
def datelist(beginDate, endDate):
date_l=[datetime.strftime(x,'%Y-%m-%d') for x in list(pd.date_range(start=beginDate, end=endDate))]
return date_l
begin_date='2018-10-28'
end_date='2018-11-03'
dates=datelist(begin_date,end_date)
if not os.path.exists('obs'):
os.mkdir('obs')
if not os.path.exists('fore'):
os.mkdir('fore')
if __name__=='__main__':
for date in dates:
obs_and_M_filepath = 'obs_and_M/' + date + '.csv'
obs_and_M = pd.read_csv(obs_and_M_filepath)
print(obs_and_M.info())
for col in obs_and_M.columns:
obs_and_M[col] = obs_and_M[col].fillna(-9999)
obs_and_M.round(3)
obs_and_M['FORE_data'] = ' ' + obs_and_M['FORE_data']
obs = pd.DataFrame(obs_and_M, columns=['FORE_data', 't2m_obs', 'rh2m_obs', 'w10m_obs'])
obs.columns = [' OBS_data', ' t2m', ' rh2m', ' w10m']
obs.to_csv('obs/' + date + '_1_obs.csv', index=False, float_format='%.03f')
M = pd.DataFrame(obs_and_M, columns=['FORE_data', 't2m_M', 'rh2m_M', 'w10m_M'])
M.columns = ['FORE_data', ' t2m', ' rh2m', ' w10m']
M.to_csv('fore/' + date + '_1_M.csv', index=False, float_format='%.03f')
| 38.909091
| 103
| 0.613707
|
import pandas as pd
from datetime import datetime
import os
def datelist(beginDate, endDate):
date_l=[datetime.strftime(x,'%Y-%m-%d') for x in list(pd.date_range(start=beginDate, end=endDate))]
return date_l
begin_date='2018-10-28'
end_date='2018-11-03'
dates=datelist(begin_date,end_date)
if not os.path.exists('obs'):
os.mkdir('obs')
if not os.path.exists('fore'):
os.mkdir('fore')
if __name__=='__main__':
for date in dates:
obs_and_M_filepath = 'obs_and_M/' + date + '.csv'
obs_and_M = pd.read_csv(obs_and_M_filepath)
print(obs_and_M.info())
for col in obs_and_M.columns:
obs_and_M[col] = obs_and_M[col].fillna(-9999)
obs_and_M.round(3)
obs_and_M['FORE_data'] = ' ' + obs_and_M['FORE_data']
obs = pd.DataFrame(obs_and_M, columns=['FORE_data', 't2m_obs', 'rh2m_obs', 'w10m_obs'])
obs.columns = [' OBS_data', ' t2m', ' rh2m', ' w10m']
obs.to_csv('obs/' + date + '_1_obs.csv', index=False, float_format='%.03f')
M = pd.DataFrame(obs_and_M, columns=['FORE_data', 't2m_M', 'rh2m_M', 'w10m_M'])
M.columns = ['FORE_data', ' t2m', ' rh2m', ' w10m']
M.to_csv('fore/' + date + '_1_M.csv', index=False, float_format='%.03f')
| true
| true
|
790ebd430cd808d0de68b41c62833b8afbeea970
| 1,258
|
py
|
Python
|
nc_vote/vote/views.py
|
dave-a-fox/VoteNC2020
|
ea4735a927d74a867287ded7340f04485377f3fc
|
[
"MIT"
] | null | null | null |
nc_vote/vote/views.py
|
dave-a-fox/VoteNC2020
|
ea4735a927d74a867287ded7340f04485377f3fc
|
[
"MIT"
] | null | null | null |
nc_vote/vote/views.py
|
dave-a-fox/VoteNC2020
|
ea4735a927d74a867287ded7340f04485377f3fc
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
from .models import BallotText, Candidate, District
def index(request):
districts = District.objects.all
context = {'districts': districts}
return render(request, 'vote/index.html', context)
def ballot(request, district_num):
ballot_list = BallotText.objects.all
context = {'ballot_list': ballot_list}
return render(request, 'vote/'+str(district_num)+'/ballot.html', context)
def votetotals(request):
candidates = Candidate.objects.all
return render(request, 'vote/votetotals.html', {"candidates": candidates})
def tally(request):
if request.method == "POST":
list = request.POST
candidates_id = list.items()
all_candidates = Candidate.objects.all()
for id in candidates_id:
print(id[1])
for candidate in all_candidates:
print(candidate.candidate_text)
if candidate.candidate_text == id[1]:
print(candidate.candidate_text + " " + id[1])
candidate.votes += 1
candidate.save()
return render(request, 'vote/votetotals.html')
else:
return render(request, 'vote/votetotals.html')
| 26.208333
| 78
| 0.636725
|
from django.shortcuts import render
from .models import BallotText, Candidate, District
def index(request):
districts = District.objects.all
context = {'districts': districts}
return render(request, 'vote/index.html', context)
def ballot(request, district_num):
ballot_list = BallotText.objects.all
context = {'ballot_list': ballot_list}
return render(request, 'vote/'+str(district_num)+'/ballot.html', context)
def votetotals(request):
candidates = Candidate.objects.all
return render(request, 'vote/votetotals.html', {"candidates": candidates})
def tally(request):
if request.method == "POST":
list = request.POST
candidates_id = list.items()
all_candidates = Candidate.objects.all()
for id in candidates_id:
print(id[1])
for candidate in all_candidates:
print(candidate.candidate_text)
if candidate.candidate_text == id[1]:
print(candidate.candidate_text + " " + id[1])
candidate.votes += 1
candidate.save()
return render(request, 'vote/votetotals.html')
else:
return render(request, 'vote/votetotals.html')
| true
| true
|
790ebd77437f194de1b476c2101e95f29fc52fd1
| 363
|
py
|
Python
|
client/data_manage/data_dir.py
|
ivigns/mipt-cis-docs
|
fe96e7630b4d127dfe241a1aedfbd14b692b4996
|
[
"MIT"
] | null | null | null |
client/data_manage/data_dir.py
|
ivigns/mipt-cis-docs
|
fe96e7630b4d127dfe241a1aedfbd14b692b4996
|
[
"MIT"
] | 9
|
2021-03-09T18:42:13.000Z
|
2021-05-06T15:34:16.000Z
|
client/data_manage/data_dir.py
|
ivigns/mipt-cis-docs
|
fe96e7630b4d127dfe241a1aedfbd14b692b4996
|
[
"MIT"
] | null | null | null |
import os
import PyQt5.QtCore as qc
DATA_DIR = 'MiptCisDocs'
WRITABLE_LOCATION = qc.QStandardPaths.writableLocation(
qc.QStandardPaths.StandardLocation.AppDataLocation
)
def get_data_dir() -> str:
data_dir = os.path.abspath(os.path.join(WRITABLE_LOCATION, DATA_DIR))
if not os.path.exists(data_dir):
os.mkdir(data_dir)
return data_dir
| 22.6875
| 73
| 0.749311
|
import os
import PyQt5.QtCore as qc
DATA_DIR = 'MiptCisDocs'
WRITABLE_LOCATION = qc.QStandardPaths.writableLocation(
qc.QStandardPaths.StandardLocation.AppDataLocation
)
def get_data_dir() -> str:
data_dir = os.path.abspath(os.path.join(WRITABLE_LOCATION, DATA_DIR))
if not os.path.exists(data_dir):
os.mkdir(data_dir)
return data_dir
| true
| true
|
790ebd83489431c1fcb29bf8324aa14264a7f595
| 21,412
|
py
|
Python
|
deeptrio/variant_caller_test.py
|
tahashmi/deepvariant
|
441c1809d3290f4a20b29a0a0bbf8ecfb929a6e3
|
[
"BSD-3-Clause"
] | 4
|
2019-03-30T13:25:25.000Z
|
2020-10-14T18:47:21.000Z
|
deeptrio/variant_caller_test.py
|
FrogEnthusiast7/deepvariant
|
84516dfacd1ed856a34507becb21848aa12e77a8
|
[
"BSD-3-Clause"
] | 1
|
2021-06-18T15:04:47.000Z
|
2021-06-18T15:04:47.000Z
|
deeptrio/variant_caller_test.py
|
FrogEnthusiast7/deepvariant
|
84516dfacd1ed856a34507becb21848aa12e77a8
|
[
"BSD-3-Clause"
] | 1
|
2019-09-04T16:59:18.000Z
|
2019-09-04T16:59:18.000Z
|
# Copyright 2017 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for deepvariant .variant_caller."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
if 'google' in sys.modules and 'google.protobuf' not in sys.modules:
del sys.modules['google']
from absl.testing import absltest
from absl.testing import parameterized
import mock
import numpy as np
import numpy.testing as npt
from deeptrio import testdata
from deeptrio import variant_caller
from deepvariant.protos import deepvariant_pb2
from third_party.nucleus.util import variant_utils
from third_party.nucleus.util import variantcall_utils
def setUpModule():
testdata.init()
def _reference_model_options(p_error, max_gq, gq_resolution=1):
return deepvariant_pb2.VariantCallerOptions(
sample_name='UNKNOWN',
p_error=p_error,
max_gq=max_gq,
gq_resolution=gq_resolution,
ploidy=2)
class PlaceholderVariantCaller(variant_caller.VariantCaller):
"""A placeholder VariantCaller.
This class provides a get_candidates implementation and so allows
the base class to be instantiated and its methods tested.
"""
def __init__(self,
p_error,
max_gq,
gq_resolution=1,
use_cache_table=False,
max_cache_coverage=100):
super(PlaceholderVariantCaller, self).__init__(
options=_reference_model_options(p_error, max_gq, gq_resolution),
use_cache_table=use_cache_table,
max_cache_coverage=max_cache_coverage)
def get_candidates(self, allele_counter, target_sample):
return None
class VariantCallerTests(parameterized.TestCase):
def fake_allele_counter(self, start_pos, counts):
allele_counter = mock.Mock()
# pylint: disable=g-complex-comprehension
allele_counter.summary_counts.return_value = [
deepvariant_pb2.AlleleCountSummary(
ref_supporting_read_count=n_ref,
total_read_count=n_ref + n_alt,
ref_base=ref,
reference_name='chr1',
position=start_pos + i)
for i, (n_alt, n_ref, ref) in enumerate(counts)
]
allele_counter.counts.return_value = counts
# pylint: enable=g-complex-comprehension
return allele_counter
# R code to produce the testdata expectation table.
# expected <- function(n_ref, n_alt, perr, max_gq = 100) {
# p_ref <- dbinom(n_alt, n_ref, perr)
# p_het <- dbinom(n_alt, n_ref, 0.5)
# p_alt <- dbinom(n_ref - n_alt, n_ref, perr)
# raw <- c(p_ref, p_het, p_alt)
# norm <- raw / sum(raw)
# gq = min(floor(-10 * log10(1 - norm[1])), max_gq)
# likelihoods = paste(sprintf("%.6f", log10(norm)), collapse=", ")
# likelihoods = paste("[", likelihoods, "]", sep="")
# result = paste(n_ref, n_alt, perr, 100, 1, likelihoods, gq, sep=", ")
# cat(paste("[", result, "],\n", sep=""))
# }
#
# for (n in c(10, 20)) {
# for (k in seq(0, n)) {
# expected(n, k, 0.01)
# }
# }
#
# for (perr in c(0.1, 0.01, 0.001, 0.0001)) {
# expected(10, 0, perr)
# expected(10, 1, perr)
# }
#
# for (n_ref in c(10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 1000, 10000)) {
# expected(n_ref, 0, 0.01)
# }
@parameterized.parameters(
# No coverage case.
[0, 0, 0.01, 100, [-0.477121, -0.477121, -0.477121], 1],
# Test systematically values of n and k.
[10, 0, 0.01, 100, [-0.000469, -2.967121, -19.956821], 29],
[10, 1, 0.01, 100, [-0.044109, -1.015126, -16.009190], 10],
[10, 2, 0.01, 100, [-1.063830, -0.039211, -13.037641], 0],
[10, 3, 0.01, 100, [-3.020668, -0.000414, -11.003209], 0],
[10, 4, 0.01, 100, [-5.015893, -0.000004, -9.007163], 0],
[10, 5, 0.01, 100, [-7.011524, -0.000000, -7.011524], 0],
[10, 6, 0.01, 100, [-9.007163, -0.000004, -5.015893], 0],
[10, 7, 0.01, 100, [-11.003209, -0.000414, -3.020668], 0],
[10, 8, 0.01, 100, [-13.037641, -0.039211, -1.063830], 0],
[10, 9, 0.01, 100, [-16.009190, -1.015126, -0.044109], 0],
[10, 10, 0.01, 100, [-19.956821, -2.967121, -0.000469], 0],
[20, 0, 0.01, 100, [-0.000001, -5.933304, -39.912704], 59],
[20, 1, 0.01, 100, [-0.000050, -3.937719, -35.921484], 39],
[20, 2, 0.01, 100, [-0.004935, -1.946968, -31.935098], 19],
[20, 3, 0.01, 100, [-0.328657, -0.275056, -28.267550], 2],
[20, 4, 0.01, 100, [-2.053097, -0.003860, -26.000720], 0],
[20, 5, 0.01, 100, [-4.044911, -0.000039, -24.001263], 0],
[20, 6, 0.01, 100, [-6.040508, -0.000000, -22.005589], 0],
[20, 7, 0.01, 100, [-8.036143, -0.000000, -20.009954], 0],
[20, 8, 0.01, 100, [-10.031778, -0.000000, -18.014319], 0],
[20, 9, 0.01, 100, [-12.027413, -0.000000, -16.018683], 0],
[20, 10, 0.01, 100, [-14.023048, -0.000000, -14.023048], 0],
[20, 11, 0.01, 100, [-16.018683, -0.000000, -12.027413], 0],
[20, 12, 0.01, 100, [-18.014319, -0.000000, -10.031778], 0],
[20, 13, 0.01, 100, [-20.009954, -0.000000, -8.036143], 0],
[20, 14, 0.01, 100, [-22.005589, -0.000000, -6.040508], 0],
[20, 15, 0.01, 100, [-24.001263, -0.000039, -4.044911], 0],
[20, 16, 0.01, 100, [-26.000720, -0.003860, -2.053097], 0],
[20, 17, 0.01, 100, [-28.267550, -0.275056, -0.328657], 0],
[20, 18, 0.01, 100, [-31.935098, -1.946968, -0.004935], 0],
[20, 19, 0.01, 100, [-35.921484, -3.937719, -0.000050], 0],
[20, 20, 0.01, 100, [-39.912704, -5.933304, -0.000001], 0],
# Testing different values of p_error.
[10, 0, 0.1, 100, [-0.001215, -2.553940, -9.543640], 25],
[10, 1, 0.1, 100, [-0.010811, -1.609294, -7.644752], 16],
[10, 0, 0.01, 100, [-0.000469, -2.967121, -19.956821], 29],
[10, 1, 0.01, 100, [-0.044109, -1.015126, -16.009190], 10],
[10, 0, 0.001, 100, [-0.000428, -3.006383, -29.996083], 30],
[10, 1, 0.001, 100, [-0.297847, -0.304236, -24.294371], 3],
[10, 0, 1e-04, 100, [-0.000424, -3.010290, -39.999990], 30],
[10, 1, 1e-04, 100, [-1.032394, -0.042303, -33.032046], 0],
# Test scaling of calculation with more coverage, hitting max_gq.
[10, 0, 0.01, 100, [-0.000469, -2.967121, -19.956821], 29],
[20, 0, 0.01, 100, [-0.000001, -5.933304, -39.912704], 59],
[30, 0, 0.01, 100, [-0.000000, -8.899956, -59.869056], 88],
[40, 0, 0.01, 100, [-0.000000, -11.866608, -79.825408], 100],
[50, 0, 0.01, 100, [-0.000000, -14.833260, -99.781760], 100],
[60, 0, 0.01, 100, [0.000000, -17.799911, -119.738112], 100],
[70, 0, 0.01, 100, [0.000000, -20.766563, -139.694464], 100],
[80, 0, 0.01, 100, [0.000000, -23.733215, -159.650816], 100],
[90, 0, 0.01, 100, [0.000000, -26.699867, -179.607168], 100],
[100, 0, 0.01, 100, [0.000000, -29.666519, -199.563519], 100],
)
def test_ref_calc(self, total_n, alt_n, p_error, max_gq, expected_likelihoods,
expected_gq):
caller = PlaceholderVariantCaller(p_error, max_gq)
gq, likelihoods = caller.reference_confidence(total_n - alt_n, total_n)
npt.assert_allclose(expected_likelihoods, likelihoods, atol=1e-6)
self.assertEqual(expected_gq, gq)
@parameterized.parameters(
# Values below max_allowed_reads are returned without modification.
[0, 10, 100, (0, 10)],
[5, 10, 100, (5, 10)],
[10, 10, 100, (10, 10)],
[10, 100, 100, (10, 100)],
[100, 100, 100, (100, 100)],
# Checks that the rescaling works when n_total_reads > max_allowed.
[0, 200, 100, (0, 100)],
[0, 200, 100, (0, 100)],
[0, 1000, 100, (0, 100)],
[0, 10000, 100, (0, 100)],
[1, 200, 100, (1, 100)],
[1, 1000, 100, (1, 100)],
[1, 10000, 100, (1, 100)],
[1, 100000, 100, (1, 100)],
[2, 200, 100, (1, 100)],
[3, 200, 100, (2, 100)],
[4, 200, 100, (2, 100)],
[10, 200, 100, (5, 100)],
[50, 200, 100, (25, 100)],
[100, 200, 100, (50, 100)],
[200, 200, 100, (100, 100)],
# I saw a bug at runtime, and the testcase makes sure we scale values of
# n_ref_reads close to n_total_reads appropriately.
[99, 100, 100, (99, 100)],
)
def test_rescale_read_counts(self, n_ref, n_total, max_allowed_reads,
expected):
actual = variant_caller._rescale_read_counts_if_necessary(
n_ref, n_total, max_allowed_reads)
self.assertEqual(actual, expected)
# pylint: disable=g-complex-comprehension
@parameterized.parameters((n_ref, n_alt_fraction)
for n_ref in [1000, 10000, 100000, 1000000]
for n_alt_fraction in [0.0, 0.01, 0.02])
# pylint: enable=g-complex-comprehension
def test_handles_large_reference_counts(self, n_ref, n_alt_fraction):
"""Tests that we don't blow up when the coverage gets really high."""
caller = PlaceholderVariantCaller(0.01, 100)
n_alt = int(n_alt_fraction * n_ref)
gq, likelihoods = caller._calc_reference_confidence(n_ref, n_ref + n_alt)
self.assertTrue(
np.isfinite(likelihoods).all(),
'Non-finite likelihoods {}'.format(likelihoods))
self.assertEqual(100, gq)
@parameterized.parameters(*variant_caller.CANONICAL_DNA_BASES)
def test_gvcf_basic(self, ref):
options = _reference_model_options(0.01, 100)
caller = PlaceholderVariantCaller(0.01, 100)
allele_counter = self.fake_allele_counter(100, [(0, 0, ref)])
gvcfs = list(caller.make_gvcfs(allele_counter.summary_counts()))
self.assertLen(gvcfs, 1)
self.assertGVCF(
gvcfs[0],
ref=ref,
gq=1.0,
start=100,
end=101,
min_dp=0,
chrom='chr1',
gls=[-0.47712125472] * 3,
sample_name=options.sample_name)
@parameterized.parameters('N', 'R', 'W', 'B')
def test_gvcf_basic_skips_iupac_ref_base(self, ref):
caller = PlaceholderVariantCaller(0.01, 100)
allele_counter = self.fake_allele_counter(100, [(0, 0, ref)])
self.assertEmpty(list(caller.make_gvcfs(allele_counter.summary_counts())))
@parameterized.parameters('X', '>', '!')
def test_gvcf_basic_raises_with_bad_ref_base(self, ref):
caller = PlaceholderVariantCaller(0.01, 100)
allele_counter = self.fake_allele_counter(100, [(0, 0, ref)])
with self.assertRaisesRegexp(ValueError,
'Invalid reference base={}'.format(ref)):
list(caller.make_gvcfs(allele_counter.summary_counts()))
def assertGVCF(self,
gvcf,
ref,
gq,
start,
end,
min_dp,
chrom='chr1',
gls=None,
sample_name=None,
gts=None):
if chrom:
self.assertEqual(gvcf.reference_name, chrom)
call = variant_utils.only_call(gvcf)
self.assertNotEmpty(gvcf.reference_name)
self.assertEqual(gvcf.reference_bases, ref)
self.assertEqual(gvcf.alternate_bases, ['<*>'])
self.assertEqual(gvcf.start, start)
self.assertEqual(gvcf.end, end if end else start + 1)
self.assertEqual(variantcall_utils.get_gq(call), gq)
self.assertNotEmpty(call.genotype_likelihood)
self.assertIn('MIN_DP', call.info)
self.assertLen(call.info['MIN_DP'].values, 1)
self.assertEqual(variantcall_utils.get_min_dp(call), min_dp)
if gls is not None:
npt.assert_allclose(list(gvcf.calls[0].genotype_likelihood), gls)
if sample_name:
self.assertEqual(gvcf.calls[0].call_set_name, sample_name)
if gts is not None:
self.assertEqual(list(gvcf.calls[0].genotype), gts)
@parameterized.parameters(
# Check some basics.
([(0, 0, 'A')], [dict(start=1, end=2, ref='A', gq=1, min_dp=0)]),
# Two equal records are merged, and the reference base is the first one.
([(0, 0, 'A'),
(0, 0, 'C')], [dict(start=1, end=3, ref='A', gq=1, min_dp=0)]),
([(0, 0, 'C'),
(0, 0, 'A')], [dict(start=1, end=3, ref='C', gq=1, min_dp=0)]),
# Three equal records are merged into a single block.
([(0, 0, 'A'), (0, 0, 'C'),
(0, 0, 'T')], [dict(start=1, end=4, ref='A', gq=1, min_dp=0)]),
# We don't merge together different GQ value blocks:
([(0, 0, 'A'), (0, 100, 'C')], [
dict(start=1, end=2, ref='A', gq=1, min_dp=0),
dict(start=2, end=3, ref='C', gq=100, min_dp=100),
]),
([(0, 100, 'A'), (0, 0, 'C')], [
dict(start=1, end=2, ref='A', gq=100, min_dp=100),
dict(start=2, end=3, ref='C', gq=1, min_dp=0),
]),
([(0, 0, 'A'), (0, 20, 'C'), (0, 100, 'T')], [
dict(start=1, end=2, ref='A', gq=1, min_dp=0),
dict(start=2, end=3, ref='C', gq=59, min_dp=20),
dict(start=3, end=4, ref='T', gq=100, min_dp=100),
]),
)
def test_make_gvcfs(self, counts, expecteds):
allele_counts = self.fake_allele_counter(1, counts).summary_counts()
caller = PlaceholderVariantCaller(0.01, 100)
gvcfs = list(caller.make_gvcfs(allele_counts))
self.assertLen(gvcfs, len(expecteds))
for actual, expected in zip(gvcfs, expecteds):
self.assertGVCF(actual, **expected)
@parameterized.parameters(
dict(
gq_resolution=1,
expecteds=[
dict(start=1, end=2, ref='A', gq=53, min_dp=18),
dict(start=2, end=3, ref='C', gq=56, min_dp=19),
dict(start=3, end=4, ref='A', gq=0, min_dp=35),
dict(start=4, end=5, ref='T', gq=0, min_dp=20),
dict(start=5, end=6, ref='A', gq=0, min_dp=16),
dict(start=6, end=7, ref='A', gq=72, min_dp=31),
dict(start=7, end=8, ref='C', gq=83, min_dp=35),
dict(start=8, end=9, ref='T', gq=59, min_dp=20),
dict(start=9, end=10, ref='G', gq=56, min_dp=19),
]),
# Binning by 3 does not cause any records to be merged.
dict(
gq_resolution=3,
expecteds=[
dict(start=1, end=2, ref='A', gq=53, min_dp=18),
dict(start=2, end=3, ref='C', gq=56, min_dp=19),
dict(start=3, end=4, ref='A', gq=0, min_dp=35),
dict(start=4, end=5, ref='T', gq=0, min_dp=20),
dict(start=5, end=6, ref='A', gq=0, min_dp=16),
dict(start=6, end=7, ref='A', gq=72, min_dp=31),
dict(start=7, end=8, ref='C', gq=83, min_dp=35),
dict(start=8, end=9, ref='T', gq=59, min_dp=20),
dict(start=9, end=10, ref='G', gq=56, min_dp=19),
]),
# Binning by 4 causes the first merge, of the first two records.
dict(
gq_resolution=4,
expecteds=[
dict(start=1, end=3, ref='A', gq=53, min_dp=18),
dict(start=3, end=4, ref='A', gq=0, min_dp=35),
dict(start=4, end=5, ref='T', gq=0, min_dp=20),
dict(start=5, end=6, ref='A', gq=0, min_dp=16),
dict(start=6, end=7, ref='A', gq=72, min_dp=31),
dict(start=7, end=8, ref='C', gq=83, min_dp=35),
dict(start=8, end=9, ref='T', gq=59, min_dp=20),
dict(start=9, end=10, ref='G', gq=56, min_dp=19),
]),
dict(
gq_resolution=10,
expecteds=[
dict(start=1, end=3, ref='A', gq=53, min_dp=18),
dict(start=3, end=4, ref='A', gq=0, min_dp=35),
dict(start=4, end=5, ref='T', gq=0, min_dp=20),
dict(start=5, end=6, ref='A', gq=0, min_dp=16),
dict(start=6, end=7, ref='A', gq=72, min_dp=31),
dict(start=7, end=8, ref='C', gq=83, min_dp=35),
dict(start=8, end=10, ref='T', gq=56, min_dp=19),
]),
dict(
gq_resolution=45,
expecteds=[
dict(start=1, end=3, ref='A', gq=53, min_dp=18),
dict(start=3, end=4, ref='A', gq=0, min_dp=35),
dict(start=4, end=5, ref='T', gq=0, min_dp=20),
dict(start=5, end=6, ref='A', gq=0, min_dp=16),
dict(start=6, end=10, ref='A', gq=56, min_dp=19),
]),
)
def test_quantize_gvcfs(self, gq_resolution, expecteds):
# Each count tuple is n_alt, n_ref, ref_base.
# The third, fourth, and the fifth ones should never be merged, since
# either het or hom_alt has bigger GL than hom_ref.
counts = [(0, 18, 'A'), (0, 19, 'C'), (35, 0, 'A'), (10, 10, 'T'),
(4, 12, 'A'), (1, 30, 'A'), (1, 34, 'C'), (0, 20, 'T'),
(0, 19, 'G')]
allele_counts = self.fake_allele_counter(1, counts).summary_counts()
caller = PlaceholderVariantCaller(0.01, 100, gq_resolution)
gvcfs = list(caller.make_gvcfs(allele_counts))
self.assertLen(gvcfs, len(expecteds))
for actual, expected in zip(gvcfs, expecteds):
self.assertGVCF(actual, **expected)
@parameterized.parameters(True, False)
def test_gvcfs_counts(self, include_gvcfs):
# Only tests the 'gvcfs' creation part of calls_and_gvcfs. The `calls`
# portion of this method needs to be tested in subclasses, which have
# implemented the get_candidates method.
counts = [(0, 0, 'A'), (10, 10, 'G'), (0, 0, 'G'), (0, 0, 'G'),
(10, 10, 'T')]
caller = PlaceholderVariantCaller(0.01, 100)
allele_counter = self.fake_allele_counter(10, counts)
allele_counters = {}
allele_counters['sample_id'] = allele_counter
_, gvcfs = caller.calls_and_gvcfs(allele_counters, include_gvcfs,
'sample_id')
# We expect our gvcfs to occur at the 10 position and that 12 and 13 have
# been merged into a 2 bp block, if enabled. Otherwise should be empty.
if include_gvcfs:
self.assertLen(gvcfs, 4)
# Expected diploid genotype likelihoods when there's no coverage. The
# chance of having each genotype is 1/3, in log10 space.
flat_gls = np.log10([1.0 / 3] * 3)
self.assertGVCF(
gvcfs[0], ref='A', start=10, end=11, gq=1, min_dp=0, gls=flat_gls)
self.assertGVCF(
gvcfs[1],
ref='G',
start=11,
end=12,
gq=0,
min_dp=20,
gls=np.array([-14.0230482368, -7.993606e-15, -14.0230482368]),
# The genotype should NOT be called here ("./.") as the likelihood
# for het is greater than hom_ref.
gts=[-1, -1])
self.assertGVCF(
gvcfs[2], ref='G', start=12, end=14, gq=1, min_dp=0, gls=flat_gls)
else:
self.assertEmpty(gvcfs)
_CACHE_COVERAGE = 20 # Outside class so we can refer to it in @Parameters.
class VariantCallerCacheTests(parameterized.TestCase):
@classmethod
def setUpClass(cls):
super(VariantCallerCacheTests, cls).setUpClass()
cls.raw_caller = PlaceholderVariantCaller(0.1, 50, use_cache_table=False)
cls.cache_caller = PlaceholderVariantCaller(
0.1, 50, use_cache_table=True, max_cache_coverage=_CACHE_COVERAGE)
# pylint: disable=g-complex-comprehension
@parameterized.parameters((n_alt, n_total)
for n_total in range(_CACHE_COVERAGE + 1)
for n_alt in range(n_total + 1))
# pylint: enable=g-complex-comprehension
def test_caching(self, n_alt, n_total):
# Note that we only expect the gq and gls to be close if we are not
# rescaling the counts, so we are only looping over values that should be
# cached. In practice the cache is set to values sufficiently large that
# these differences don't matter, but for this test we are limiting the
# cache size to a small value in _CACHE_COVERAGE so we can test that the
# cache lookups are correct.
raw_gq, raw_gls = self.raw_caller.reference_confidence(n_alt, n_total)
cache_gq, cache_gls = self.cache_caller.reference_confidence(n_alt, n_total)
self.assertEqual(raw_gq, cache_gq)
npt.assert_allclose(raw_gls, cache_gls)
if __name__ == '__main__':
absltest.main()
| 43.432049
| 80
| 0.600738
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
if 'google' in sys.modules and 'google.protobuf' not in sys.modules:
del sys.modules['google']
from absl.testing import absltest
from absl.testing import parameterized
import mock
import numpy as np
import numpy.testing as npt
from deeptrio import testdata
from deeptrio import variant_caller
from deepvariant.protos import deepvariant_pb2
from third_party.nucleus.util import variant_utils
from third_party.nucleus.util import variantcall_utils
def setUpModule():
testdata.init()
def _reference_model_options(p_error, max_gq, gq_resolution=1):
return deepvariant_pb2.VariantCallerOptions(
sample_name='UNKNOWN',
p_error=p_error,
max_gq=max_gq,
gq_resolution=gq_resolution,
ploidy=2)
class PlaceholderVariantCaller(variant_caller.VariantCaller):
def __init__(self,
p_error,
max_gq,
gq_resolution=1,
use_cache_table=False,
max_cache_coverage=100):
super(PlaceholderVariantCaller, self).__init__(
options=_reference_model_options(p_error, max_gq, gq_resolution),
use_cache_table=use_cache_table,
max_cache_coverage=max_cache_coverage)
def get_candidates(self, allele_counter, target_sample):
return None
class VariantCallerTests(parameterized.TestCase):
def fake_allele_counter(self, start_pos, counts):
allele_counter = mock.Mock()
allele_counter.summary_counts.return_value = [
deepvariant_pb2.AlleleCountSummary(
ref_supporting_read_count=n_ref,
total_read_count=n_ref + n_alt,
ref_base=ref,
reference_name='chr1',
position=start_pos + i)
for i, (n_alt, n_ref, ref) in enumerate(counts)
]
allele_counter.counts.return_value = counts
return allele_counter
@parameterized.parameters(
[0, 0, 0.01, 100, [-0.477121, -0.477121, -0.477121], 1],
[10, 0, 0.01, 100, [-0.000469, -2.967121, -19.956821], 29],
[10, 1, 0.01, 100, [-0.044109, -1.015126, -16.009190], 10],
[10, 2, 0.01, 100, [-1.063830, -0.039211, -13.037641], 0],
[10, 3, 0.01, 100, [-3.020668, -0.000414, -11.003209], 0],
[10, 4, 0.01, 100, [-5.015893, -0.000004, -9.007163], 0],
[10, 5, 0.01, 100, [-7.011524, -0.000000, -7.011524], 0],
[10, 6, 0.01, 100, [-9.007163, -0.000004, -5.015893], 0],
[10, 7, 0.01, 100, [-11.003209, -0.000414, -3.020668], 0],
[10, 8, 0.01, 100, [-13.037641, -0.039211, -1.063830], 0],
[10, 9, 0.01, 100, [-16.009190, -1.015126, -0.044109], 0],
[10, 10, 0.01, 100, [-19.956821, -2.967121, -0.000469], 0],
[20, 0, 0.01, 100, [-0.000001, -5.933304, -39.912704], 59],
[20, 1, 0.01, 100, [-0.000050, -3.937719, -35.921484], 39],
[20, 2, 0.01, 100, [-0.004935, -1.946968, -31.935098], 19],
[20, 3, 0.01, 100, [-0.328657, -0.275056, -28.267550], 2],
[20, 4, 0.01, 100, [-2.053097, -0.003860, -26.000720], 0],
[20, 5, 0.01, 100, [-4.044911, -0.000039, -24.001263], 0],
[20, 6, 0.01, 100, [-6.040508, -0.000000, -22.005589], 0],
[20, 7, 0.01, 100, [-8.036143, -0.000000, -20.009954], 0],
[20, 8, 0.01, 100, [-10.031778, -0.000000, -18.014319], 0],
[20, 9, 0.01, 100, [-12.027413, -0.000000, -16.018683], 0],
[20, 10, 0.01, 100, [-14.023048, -0.000000, -14.023048], 0],
[20, 11, 0.01, 100, [-16.018683, -0.000000, -12.027413], 0],
[20, 12, 0.01, 100, [-18.014319, -0.000000, -10.031778], 0],
[20, 13, 0.01, 100, [-20.009954, -0.000000, -8.036143], 0],
[20, 14, 0.01, 100, [-22.005589, -0.000000, -6.040508], 0],
[20, 15, 0.01, 100, [-24.001263, -0.000039, -4.044911], 0],
[20, 16, 0.01, 100, [-26.000720, -0.003860, -2.053097], 0],
[20, 17, 0.01, 100, [-28.267550, -0.275056, -0.328657], 0],
[20, 18, 0.01, 100, [-31.935098, -1.946968, -0.004935], 0],
[20, 19, 0.01, 100, [-35.921484, -3.937719, -0.000050], 0],
[20, 20, 0.01, 100, [-39.912704, -5.933304, -0.000001], 0],
[10, 0, 0.1, 100, [-0.001215, -2.553940, -9.543640], 25],
[10, 1, 0.1, 100, [-0.010811, -1.609294, -7.644752], 16],
[10, 0, 0.01, 100, [-0.000469, -2.967121, -19.956821], 29],
[10, 1, 0.01, 100, [-0.044109, -1.015126, -16.009190], 10],
[10, 0, 0.001, 100, [-0.000428, -3.006383, -29.996083], 30],
[10, 1, 0.001, 100, [-0.297847, -0.304236, -24.294371], 3],
[10, 0, 1e-04, 100, [-0.000424, -3.010290, -39.999990], 30],
[10, 1, 1e-04, 100, [-1.032394, -0.042303, -33.032046], 0],
[10, 0, 0.01, 100, [-0.000469, -2.967121, -19.956821], 29],
[20, 0, 0.01, 100, [-0.000001, -5.933304, -39.912704], 59],
[30, 0, 0.01, 100, [-0.000000, -8.899956, -59.869056], 88],
[40, 0, 0.01, 100, [-0.000000, -11.866608, -79.825408], 100],
[50, 0, 0.01, 100, [-0.000000, -14.833260, -99.781760], 100],
[60, 0, 0.01, 100, [0.000000, -17.799911, -119.738112], 100],
[70, 0, 0.01, 100, [0.000000, -20.766563, -139.694464], 100],
[80, 0, 0.01, 100, [0.000000, -23.733215, -159.650816], 100],
[90, 0, 0.01, 100, [0.000000, -26.699867, -179.607168], 100],
[100, 0, 0.01, 100, [0.000000, -29.666519, -199.563519], 100],
)
def test_ref_calc(self, total_n, alt_n, p_error, max_gq, expected_likelihoods,
expected_gq):
caller = PlaceholderVariantCaller(p_error, max_gq)
gq, likelihoods = caller.reference_confidence(total_n - alt_n, total_n)
npt.assert_allclose(expected_likelihoods, likelihoods, atol=1e-6)
self.assertEqual(expected_gq, gq)
@parameterized.parameters(
[0, 10, 100, (0, 10)],
[5, 10, 100, (5, 10)],
[10, 10, 100, (10, 10)],
[10, 100, 100, (10, 100)],
[100, 100, 100, (100, 100)],
[0, 200, 100, (0, 100)],
[0, 200, 100, (0, 100)],
[0, 1000, 100, (0, 100)],
[0, 10000, 100, (0, 100)],
[1, 200, 100, (1, 100)],
[1, 1000, 100, (1, 100)],
[1, 10000, 100, (1, 100)],
[1, 100000, 100, (1, 100)],
[2, 200, 100, (1, 100)],
[3, 200, 100, (2, 100)],
[4, 200, 100, (2, 100)],
[10, 200, 100, (5, 100)],
[50, 200, 100, (25, 100)],
[100, 200, 100, (50, 100)],
[200, 200, 100, (100, 100)],
[99, 100, 100, (99, 100)],
)
def test_rescale_read_counts(self, n_ref, n_total, max_allowed_reads,
expected):
actual = variant_caller._rescale_read_counts_if_necessary(
n_ref, n_total, max_allowed_reads)
self.assertEqual(actual, expected)
@parameterized.parameters((n_ref, n_alt_fraction)
for n_ref in [1000, 10000, 100000, 1000000]
for n_alt_fraction in [0.0, 0.01, 0.02])
def test_handles_large_reference_counts(self, n_ref, n_alt_fraction):
caller = PlaceholderVariantCaller(0.01, 100)
n_alt = int(n_alt_fraction * n_ref)
gq, likelihoods = caller._calc_reference_confidence(n_ref, n_ref + n_alt)
self.assertTrue(
np.isfinite(likelihoods).all(),
'Non-finite likelihoods {}'.format(likelihoods))
self.assertEqual(100, gq)
@parameterized.parameters(*variant_caller.CANONICAL_DNA_BASES)
def test_gvcf_basic(self, ref):
options = _reference_model_options(0.01, 100)
caller = PlaceholderVariantCaller(0.01, 100)
allele_counter = self.fake_allele_counter(100, [(0, 0, ref)])
gvcfs = list(caller.make_gvcfs(allele_counter.summary_counts()))
self.assertLen(gvcfs, 1)
self.assertGVCF(
gvcfs[0],
ref=ref,
gq=1.0,
start=100,
end=101,
min_dp=0,
chrom='chr1',
gls=[-0.47712125472] * 3,
sample_name=options.sample_name)
@parameterized.parameters('N', 'R', 'W', 'B')
def test_gvcf_basic_skips_iupac_ref_base(self, ref):
caller = PlaceholderVariantCaller(0.01, 100)
allele_counter = self.fake_allele_counter(100, [(0, 0, ref)])
self.assertEmpty(list(caller.make_gvcfs(allele_counter.summary_counts())))
@parameterized.parameters('X', '>', '!')
def test_gvcf_basic_raises_with_bad_ref_base(self, ref):
caller = PlaceholderVariantCaller(0.01, 100)
allele_counter = self.fake_allele_counter(100, [(0, 0, ref)])
with self.assertRaisesRegexp(ValueError,
'Invalid reference base={}'.format(ref)):
list(caller.make_gvcfs(allele_counter.summary_counts()))
def assertGVCF(self,
gvcf,
ref,
gq,
start,
end,
min_dp,
chrom='chr1',
gls=None,
sample_name=None,
gts=None):
if chrom:
self.assertEqual(gvcf.reference_name, chrom)
call = variant_utils.only_call(gvcf)
self.assertNotEmpty(gvcf.reference_name)
self.assertEqual(gvcf.reference_bases, ref)
self.assertEqual(gvcf.alternate_bases, ['<*>'])
self.assertEqual(gvcf.start, start)
self.assertEqual(gvcf.end, end if end else start + 1)
self.assertEqual(variantcall_utils.get_gq(call), gq)
self.assertNotEmpty(call.genotype_likelihood)
self.assertIn('MIN_DP', call.info)
self.assertLen(call.info['MIN_DP'].values, 1)
self.assertEqual(variantcall_utils.get_min_dp(call), min_dp)
if gls is not None:
npt.assert_allclose(list(gvcf.calls[0].genotype_likelihood), gls)
if sample_name:
self.assertEqual(gvcf.calls[0].call_set_name, sample_name)
if gts is not None:
self.assertEqual(list(gvcf.calls[0].genotype), gts)
@parameterized.parameters(
([(0, 0, 'A')], [dict(start=1, end=2, ref='A', gq=1, min_dp=0)]),
([(0, 0, 'A'),
(0, 0, 'C')], [dict(start=1, end=3, ref='A', gq=1, min_dp=0)]),
([(0, 0, 'C'),
(0, 0, 'A')], [dict(start=1, end=3, ref='C', gq=1, min_dp=0)]),
([(0, 0, 'A'), (0, 0, 'C'),
(0, 0, 'T')], [dict(start=1, end=4, ref='A', gq=1, min_dp=0)]),
([(0, 0, 'A'), (0, 100, 'C')], [
dict(start=1, end=2, ref='A', gq=1, min_dp=0),
dict(start=2, end=3, ref='C', gq=100, min_dp=100),
]),
([(0, 100, 'A'), (0, 0, 'C')], [
dict(start=1, end=2, ref='A', gq=100, min_dp=100),
dict(start=2, end=3, ref='C', gq=1, min_dp=0),
]),
([(0, 0, 'A'), (0, 20, 'C'), (0, 100, 'T')], [
dict(start=1, end=2, ref='A', gq=1, min_dp=0),
dict(start=2, end=3, ref='C', gq=59, min_dp=20),
dict(start=3, end=4, ref='T', gq=100, min_dp=100),
]),
)
def test_make_gvcfs(self, counts, expecteds):
allele_counts = self.fake_allele_counter(1, counts).summary_counts()
caller = PlaceholderVariantCaller(0.01, 100)
gvcfs = list(caller.make_gvcfs(allele_counts))
self.assertLen(gvcfs, len(expecteds))
for actual, expected in zip(gvcfs, expecteds):
self.assertGVCF(actual, **expected)
@parameterized.parameters(
dict(
gq_resolution=1,
expecteds=[
dict(start=1, end=2, ref='A', gq=53, min_dp=18),
dict(start=2, end=3, ref='C', gq=56, min_dp=19),
dict(start=3, end=4, ref='A', gq=0, min_dp=35),
dict(start=4, end=5, ref='T', gq=0, min_dp=20),
dict(start=5, end=6, ref='A', gq=0, min_dp=16),
dict(start=6, end=7, ref='A', gq=72, min_dp=31),
dict(start=7, end=8, ref='C', gq=83, min_dp=35),
dict(start=8, end=9, ref='T', gq=59, min_dp=20),
dict(start=9, end=10, ref='G', gq=56, min_dp=19),
]),
# Binning by 3 does not cause any records to be merged.
dict(
gq_resolution=3,
expecteds=[
dict(start=1, end=2, ref='A', gq=53, min_dp=18),
dict(start=2, end=3, ref='C', gq=56, min_dp=19),
dict(start=3, end=4, ref='A', gq=0, min_dp=35),
dict(start=4, end=5, ref='T', gq=0, min_dp=20),
dict(start=5, end=6, ref='A', gq=0, min_dp=16),
dict(start=6, end=7, ref='A', gq=72, min_dp=31),
dict(start=7, end=8, ref='C', gq=83, min_dp=35),
dict(start=8, end=9, ref='T', gq=59, min_dp=20),
dict(start=9, end=10, ref='G', gq=56, min_dp=19),
]),
# Binning by 4 causes the first merge, of the first two records.
dict(
gq_resolution=4,
expecteds=[
dict(start=1, end=3, ref='A', gq=53, min_dp=18),
dict(start=3, end=4, ref='A', gq=0, min_dp=35),
dict(start=4, end=5, ref='T', gq=0, min_dp=20),
dict(start=5, end=6, ref='A', gq=0, min_dp=16),
dict(start=6, end=7, ref='A', gq=72, min_dp=31),
dict(start=7, end=8, ref='C', gq=83, min_dp=35),
dict(start=8, end=9, ref='T', gq=59, min_dp=20),
dict(start=9, end=10, ref='G', gq=56, min_dp=19),
]),
dict(
gq_resolution=10,
expecteds=[
dict(start=1, end=3, ref='A', gq=53, min_dp=18),
dict(start=3, end=4, ref='A', gq=0, min_dp=35),
dict(start=4, end=5, ref='T', gq=0, min_dp=20),
dict(start=5, end=6, ref='A', gq=0, min_dp=16),
dict(start=6, end=7, ref='A', gq=72, min_dp=31),
dict(start=7, end=8, ref='C', gq=83, min_dp=35),
dict(start=8, end=10, ref='T', gq=56, min_dp=19),
]),
dict(
gq_resolution=45,
expecteds=[
dict(start=1, end=3, ref='A', gq=53, min_dp=18),
dict(start=3, end=4, ref='A', gq=0, min_dp=35),
dict(start=4, end=5, ref='T', gq=0, min_dp=20),
dict(start=5, end=6, ref='A', gq=0, min_dp=16),
dict(start=6, end=10, ref='A', gq=56, min_dp=19),
]),
)
def test_quantize_gvcfs(self, gq_resolution, expecteds):
# Each count tuple is n_alt, n_ref, ref_base.
# The third, fourth, and the fifth ones should never be merged, since
# either het or hom_alt has bigger GL than hom_ref.
counts = [(0, 18, 'A'), (0, 19, 'C'), (35, 0, 'A'), (10, 10, 'T'),
(4, 12, 'A'), (1, 30, 'A'), (1, 34, 'C'), (0, 20, 'T'),
(0, 19, 'G')]
allele_counts = self.fake_allele_counter(1, counts).summary_counts()
caller = PlaceholderVariantCaller(0.01, 100, gq_resolution)
gvcfs = list(caller.make_gvcfs(allele_counts))
self.assertLen(gvcfs, len(expecteds))
for actual, expected in zip(gvcfs, expecteds):
self.assertGVCF(actual, **expected)
@parameterized.parameters(True, False)
def test_gvcfs_counts(self, include_gvcfs):
# Only tests the 'gvcfs' creation part of calls_and_gvcfs. The `calls`
# portion of this method needs to be tested in subclasses, which have
# implemented the get_candidates method.
counts = [(0, 0, 'A'), (10, 10, 'G'), (0, 0, 'G'), (0, 0, 'G'),
(10, 10, 'T')]
caller = PlaceholderVariantCaller(0.01, 100)
allele_counter = self.fake_allele_counter(10, counts)
allele_counters = {}
allele_counters['sample_id'] = allele_counter
_, gvcfs = caller.calls_and_gvcfs(allele_counters, include_gvcfs,
'sample_id')
# We expect our gvcfs to occur at the 10 position and that 12 and 13 have
# been merged into a 2 bp block, if enabled. Otherwise should be empty.
if include_gvcfs:
self.assertLen(gvcfs, 4)
# Expected diploid genotype likelihoods when there's no coverage. The
flat_gls = np.log10([1.0 / 3] * 3)
self.assertGVCF(
gvcfs[0], ref='A', start=10, end=11, gq=1, min_dp=0, gls=flat_gls)
self.assertGVCF(
gvcfs[1],
ref='G',
start=11,
end=12,
gq=0,
min_dp=20,
gls=np.array([-14.0230482368, -7.993606e-15, -14.0230482368]),
gts=[-1, -1])
self.assertGVCF(
gvcfs[2], ref='G', start=12, end=14, gq=1, min_dp=0, gls=flat_gls)
else:
self.assertEmpty(gvcfs)
_CACHE_COVERAGE = 20
class VariantCallerCacheTests(parameterized.TestCase):
@classmethod
def setUpClass(cls):
super(VariantCallerCacheTests, cls).setUpClass()
cls.raw_caller = PlaceholderVariantCaller(0.1, 50, use_cache_table=False)
cls.cache_caller = PlaceholderVariantCaller(
0.1, 50, use_cache_table=True, max_cache_coverage=_CACHE_COVERAGE)
@parameterized.parameters((n_alt, n_total)
for n_total in range(_CACHE_COVERAGE + 1)
for n_alt in range(n_total + 1))
def test_caching(self, n_alt, n_total):
# cache size to a small value in _CACHE_COVERAGE so we can test that the
# cache lookups are correct.
raw_gq, raw_gls = self.raw_caller.reference_confidence(n_alt, n_total)
cache_gq, cache_gls = self.cache_caller.reference_confidence(n_alt, n_total)
self.assertEqual(raw_gq, cache_gq)
npt.assert_allclose(raw_gls, cache_gls)
if __name__ == '__main__':
absltest.main()
| true
| true
|
790ebd8bc6c0ac06a99896b061a2c625e1fdb2c4
| 14,433
|
py
|
Python
|
tensorflow/contrib/learn/python/learn/basic_session_run_hooks.py
|
topsun888/tensorflow
|
bad7c50b9dc9789ad7dd0a62daca40b7269841ed
|
[
"Apache-2.0"
] | 1
|
2017-03-24T12:08:25.000Z
|
2017-03-24T12:08:25.000Z
|
tensorflow/contrib/learn/python/learn/basic_session_run_hooks.py
|
topsun888/tensorflow
|
bad7c50b9dc9789ad7dd0a62daca40b7269841ed
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/learn/python/learn/basic_session_run_hooks.py
|
topsun888/tensorflow
|
bad7c50b9dc9789ad7dd0a62daca40b7269841ed
|
[
"Apache-2.0"
] | 1
|
2020-07-09T22:02:18.000Z
|
2020-07-09T22:02:18.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Some common SessionRunHook classes.
@@
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import six
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import session_run_hook
from tensorflow.contrib.learn.python.learn.session_run_hook import SessionRunArgs
from tensorflow.contrib.learn.python.learn.summary_writer_cache import SummaryWriterCache
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training_util
class LoggingTensorHook(session_run_hook.SessionRunHook):
"""Prints given tensors every N iteration.
The tensors will be printed to the log, with `INFO` severity.
"""
def __init__(self, tensors, every_n_iter=100):
"""Initializes a LoggingHook monitor.
Args:
tensors: `dict` of tag to tensors/names or
`iterable` of tensors/names.
every_n_iter: `int`, print every N iteration.
Raises:
ValueError: if `every_n_iter` is non-positive.
"""
if every_n_iter <= 0:
raise ValueError("Invalid every_n_iter=%s." % every_n_iter)
if not isinstance(tensors, dict):
tensors = {item: item for item in tensors}
self._tensors = tensors
self._every_n_iter = every_n_iter
def begin(self):
self._iter_count = 0
# Convert names to tensors if given
self._current_tensors = {tag: _as_graph_element(tensor)
for (tag, tensor) in self._tensors.items()}
def before_run(self, run_context): # pylint: disable=unused-argument
if self._iter_count % self._every_n_iter == 0:
return SessionRunArgs(self._current_tensors)
else:
return None
def after_run(self, run_context, run_values):
_ = run_context
if self._iter_count % self._every_n_iter == 0:
stats = []
for tag in sorted(self._current_tensors.keys()):
stats.append("%s = %s" % (tag, run_values.results[tag]))
logging.info("%s", ", ".join(stats))
self._iter_count += 1
class StopAtStepHook(session_run_hook.SessionRunHook):
"""Monitor to request stop at a specified step."""
def __init__(self, num_steps=None, last_step=None):
"""Create a StopAtStep Hook.
This hook requests stop after either a number of steps have been
executed or a last step has been reached. Only of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `after_run()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
self._num_steps = num_steps
self._last_step = last_step
def begin(self):
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created to use StopAtStepHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_values.results
if self._last_step is None:
self._last_step = global_step + self._num_steps - 1
if global_step >= self._last_step:
run_context.request_stop()
class CheckpointSaverHook(session_run_hook.SessionRunHook):
"""Saves checkpoints every N steps or seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None):
"""Initialize CheckpointSaverHook monitor.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
"""
logging.info("Create CheckpointSaverHook.")
self._saver = saver
self._checkpoint_dir = checkpoint_dir
self._summary_writer = SummaryWriterCache.get(checkpoint_dir)
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._save_secs = save_secs
self._save_steps = save_steps
self._last_saved_time = None
self._last_saved_step = None
if save_steps is None and save_secs is None:
raise ValueError("Either save_steps or save_secs should be provided")
if (save_steps is not None) and (save_secs is not None):
raise ValueError("Can not provide both save_steps and save_secs.")
def begin(self):
self._last_saved_time = None
self._last_saved_step = None
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use CheckpointSaverHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
if self._last_saved_time is None:
# Write graph in the first call.
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir,
"graph.pbtxt")
self._summary_writer.add_graph(ops.get_default_graph())
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_values.results
if self._last_saved_time is None:
self._save(global_step, run_context.session)
if self._save_steps is not None:
if global_step >= self._last_saved_step + self._save_steps:
self._save(global_step, run_context.session)
if self._save_secs is not None:
if time.time() >= self._last_saved_time + self._save_secs:
self._save(global_step, run_context.session)
def end(self, session):
last_step = session.run(contrib_variables.get_global_step())
self._save(last_step, session)
def _save(self, step, session):
"""Saves the latest checkpoint."""
if step == self._last_saved_step:
return
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
self._last_saved_time = time.time()
self._last_saved_step = step
if self._saver is None:
self._scaffold.saver.save(session, self._save_path, global_step=step)
else:
self._saver.save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
class StepCounterHook(session_run_hook.SessionRunHook):
"""Steps per second monitor."""
def __init__(self, every_n_steps=100, output_dir=None, summary_writer=None):
self._summary_tag = "global_step/sec"
self._every_n_steps = every_n_steps
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = SummaryWriterCache.get(output_dir)
def begin(self):
self._last_reported_time = None
self._last_reported_step = None
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use StepCounterHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
_ = run_context
if not self._summary_writer:
return
global_step = run_values.results
current_time = time.time()
if self._last_reported_time is None:
self._last_reported_step = global_step
self._last_reported_time = current_time
else:
if global_step >= self._every_n_steps + self._last_reported_step:
added_steps = global_step - self._last_reported_step
elapsed_time = current_time - self._last_reported_time
steps_per_sec = added_steps / elapsed_time
summary = Summary(value=[Summary.Value(
tag=self._summary_tag, simple_value=steps_per_sec)])
self._summary_writer.add_summary(summary, global_step)
self._last_reported_step = global_step
self._last_reported_time = current_time
class NanLossDuringTrainingError(RuntimeError):
def __str__(self):
return "NaN loss during training."
class NanTensorHook(session_run_hook.SessionRunHook):
"""NaN Loss monitor.
Monitors loss and stops training if loss is NaN.
Can either fail with exception or just stop training.
"""
def __init__(self, loss_tensor, fail_on_nan_loss=True):
"""Initializes NanLoss monitor.
Args:
loss_tensor: `Tensor`, the loss tensor.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
"""
self._loss_tensor = loss_tensor
self._fail_on_nan_loss = fail_on_nan_loss
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._loss_tensor)
def after_run(self, run_context, run_values):
if np.isnan(run_values.results):
failure_message = "Model diverged with loss = NaN."
if self._fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError
else:
logging.warning(failure_message)
# We don't raise an error but we request stop without an exception.
run_context.request_stop()
class SummarySaverHook(session_run_hook.SessionRunHook):
"""Saves summaries every N steps."""
def __init__(self,
save_steps=100,
output_dir=None,
summary_writer=None,
scaffold=None,
summary_op=None):
"""Initializes a `SummarySaver` monitor.
Args:
save_steps: `int`, save summaries every N steps. See `EveryN`.
output_dir: `string`, the directory to save the summaries to. Only used
if no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
summary_op: `Tensor` of type `string`. A serialized `Summary` protocol
buffer, as output by TF summary methods like `scalar_summary` or
`merge_all_summaries`.
"""
# TODO(ipolosukhin): Implement every N seconds.
self._summary_op = summary_op
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = SummaryWriterCache.get(output_dir)
self._scaffold = scaffold
self._save_steps = save_steps
# TODO(mdan): Throw an error if output_dir and summary_writer are None.
def begin(self):
self._last_saved_step = None
self._request_summary = True
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use SummarySaverHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
requests = {"global_step": self._global_step_tensor}
if self._request_summary:
if self._summary_op is not None:
requests["summary"] = self._summary_op
elif self._scaffold.summary_op is not None:
requests["summary"] = self._scaffold.summary_op
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
_ = run_context
if not self._summary_writer:
return
global_step = run_values.results["global_step"]
if self._last_saved_step is None:
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START), global_step)
if self._request_summary:
self._last_saved_step = global_step
if "summary" in run_values.results:
self._summary_writer.add_summary(run_values.results["summary"],
global_step)
self._request_summary = (
global_step >= self._last_saved_step + self._save_steps - 1)
def end(self, session=None):
if self._summary_writer:
self._summary_writer.flush()
def _as_graph_element(obj):
"""Retrieves Graph element."""
graph = ops.get_default_graph()
if not isinstance(obj, six.string_types):
if not hasattr(obj, "graph") or obj.graph != graph:
raise ValueError("Passed %s should have graph attribute that is equal "
"to current graph %s." % (obj, graph))
return obj
if ":" in obj:
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element(obj + ":0")
# Check that there is no :1 (e.g. it's single output).
try:
graph.as_graph_element(obj + ":1")
except (KeyError, ValueError):
pass
else:
raise ValueError("Name %s is ambiguous, "
"as this `Operation` has multiple outputs "
"(at least 2)." % obj)
return element
| 35.992519
| 89
| 0.701933
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import six
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import session_run_hook
from tensorflow.contrib.learn.python.learn.session_run_hook import SessionRunArgs
from tensorflow.contrib.learn.python.learn.summary_writer_cache import SummaryWriterCache
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training_util
class LoggingTensorHook(session_run_hook.SessionRunHook):
def __init__(self, tensors, every_n_iter=100):
if every_n_iter <= 0:
raise ValueError("Invalid every_n_iter=%s." % every_n_iter)
if not isinstance(tensors, dict):
tensors = {item: item for item in tensors}
self._tensors = tensors
self._every_n_iter = every_n_iter
def begin(self):
self._iter_count = 0
self._current_tensors = {tag: _as_graph_element(tensor)
for (tag, tensor) in self._tensors.items()}
def before_run(self, run_context):
if self._iter_count % self._every_n_iter == 0:
return SessionRunArgs(self._current_tensors)
else:
return None
def after_run(self, run_context, run_values):
_ = run_context
if self._iter_count % self._every_n_iter == 0:
stats = []
for tag in sorted(self._current_tensors.keys()):
stats.append("%s = %s" % (tag, run_values.results[tag]))
logging.info("%s", ", ".join(stats))
self._iter_count += 1
class StopAtStepHook(session_run_hook.SessionRunHook):
def __init__(self, num_steps=None, last_step=None):
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
self._num_steps = num_steps
self._last_step = last_step
def begin(self):
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created to use StopAtStepHook.")
def before_run(self, run_context):
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_values.results
if self._last_step is None:
self._last_step = global_step + self._num_steps - 1
if global_step >= self._last_step:
run_context.request_stop()
class CheckpointSaverHook(session_run_hook.SessionRunHook):
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None):
logging.info("Create CheckpointSaverHook.")
self._saver = saver
self._checkpoint_dir = checkpoint_dir
self._summary_writer = SummaryWriterCache.get(checkpoint_dir)
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._save_secs = save_secs
self._save_steps = save_steps
self._last_saved_time = None
self._last_saved_step = None
if save_steps is None and save_secs is None:
raise ValueError("Either save_steps or save_secs should be provided")
if (save_steps is not None) and (save_secs is not None):
raise ValueError("Can not provide both save_steps and save_secs.")
def begin(self):
self._last_saved_time = None
self._last_saved_step = None
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use CheckpointSaverHook.")
def before_run(self, run_context):
if self._last_saved_time is None:
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir,
"graph.pbtxt")
self._summary_writer.add_graph(ops.get_default_graph())
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_values.results
if self._last_saved_time is None:
self._save(global_step, run_context.session)
if self._save_steps is not None:
if global_step >= self._last_saved_step + self._save_steps:
self._save(global_step, run_context.session)
if self._save_secs is not None:
if time.time() >= self._last_saved_time + self._save_secs:
self._save(global_step, run_context.session)
def end(self, session):
last_step = session.run(contrib_variables.get_global_step())
self._save(last_step, session)
def _save(self, step, session):
if step == self._last_saved_step:
return
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
self._last_saved_time = time.time()
self._last_saved_step = step
if self._saver is None:
self._scaffold.saver.save(session, self._save_path, global_step=step)
else:
self._saver.save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
class StepCounterHook(session_run_hook.SessionRunHook):
def __init__(self, every_n_steps=100, output_dir=None, summary_writer=None):
self._summary_tag = "global_step/sec"
self._every_n_steps = every_n_steps
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = SummaryWriterCache.get(output_dir)
def begin(self):
self._last_reported_time = None
self._last_reported_step = None
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use StepCounterHook.")
def before_run(self, run_context):
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
_ = run_context
if not self._summary_writer:
return
global_step = run_values.results
current_time = time.time()
if self._last_reported_time is None:
self._last_reported_step = global_step
self._last_reported_time = current_time
else:
if global_step >= self._every_n_steps + self._last_reported_step:
added_steps = global_step - self._last_reported_step
elapsed_time = current_time - self._last_reported_time
steps_per_sec = added_steps / elapsed_time
summary = Summary(value=[Summary.Value(
tag=self._summary_tag, simple_value=steps_per_sec)])
self._summary_writer.add_summary(summary, global_step)
self._last_reported_step = global_step
self._last_reported_time = current_time
class NanLossDuringTrainingError(RuntimeError):
def __str__(self):
return "NaN loss during training."
class NanTensorHook(session_run_hook.SessionRunHook):
def __init__(self, loss_tensor, fail_on_nan_loss=True):
self._loss_tensor = loss_tensor
self._fail_on_nan_loss = fail_on_nan_loss
def before_run(self, run_context):
return SessionRunArgs(self._loss_tensor)
def after_run(self, run_context, run_values):
if np.isnan(run_values.results):
failure_message = "Model diverged with loss = NaN."
if self._fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError
else:
logging.warning(failure_message)
run_context.request_stop()
class SummarySaverHook(session_run_hook.SessionRunHook):
def __init__(self,
save_steps=100,
output_dir=None,
summary_writer=None,
scaffold=None,
summary_op=None):
# TODO(ipolosukhin): Implement every N seconds.
self._summary_op = summary_op
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = SummaryWriterCache.get(output_dir)
self._scaffold = scaffold
self._save_steps = save_steps
# TODO(mdan): Throw an error if output_dir and summary_writer are None.
def begin(self):
self._last_saved_step = None
self._request_summary = True
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use SummarySaverHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
requests = {"global_step": self._global_step_tensor}
if self._request_summary:
if self._summary_op is not None:
requests["summary"] = self._summary_op
elif self._scaffold.summary_op is not None:
requests["summary"] = self._scaffold.summary_op
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
_ = run_context
if not self._summary_writer:
return
global_step = run_values.results["global_step"]
if self._last_saved_step is None:
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START), global_step)
if self._request_summary:
self._last_saved_step = global_step
if "summary" in run_values.results:
self._summary_writer.add_summary(run_values.results["summary"],
global_step)
self._request_summary = (
global_step >= self._last_saved_step + self._save_steps - 1)
def end(self, session=None):
if self._summary_writer:
self._summary_writer.flush()
def _as_graph_element(obj):
graph = ops.get_default_graph()
if not isinstance(obj, six.string_types):
if not hasattr(obj, "graph") or obj.graph != graph:
raise ValueError("Passed %s should have graph attribute that is equal "
"to current graph %s." % (obj, graph))
return obj
if ":" in obj:
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element(obj + ":0")
# Check that there is no :1 (e.g. it's single output).
try:
graph.as_graph_element(obj + ":1")
except (KeyError, ValueError):
pass
else:
raise ValueError("Name %s is ambiguous, "
"as this `Operation` has multiple outputs "
"(at least 2)." % obj)
return element
| true
| true
|
790ebe6901ce2c482c9ba637f3d89943c6472b08
| 6,294
|
py
|
Python
|
elastalert/alerter/slack_alerter.py
|
JasperJuergensen/elastalert
|
8033361083b5edad1845ad9b307b8280ef278da7
|
[
"Apache-2.0"
] | 2
|
2020-06-19T13:02:19.000Z
|
2021-02-11T19:35:46.000Z
|
elastalert/alerter/slack_alerter.py
|
JasperJuergensen/elastalert
|
8033361083b5edad1845ad9b307b8280ef278da7
|
[
"Apache-2.0"
] | 9
|
2020-04-09T15:40:37.000Z
|
2022-01-19T17:49:22.000Z
|
elastalert/alerter/slack_alerter.py
|
JasperJuergensen/elastalert
|
8033361083b5edad1845ad9b307b8280ef278da7
|
[
"Apache-2.0"
] | null | null | null |
import copy
import json
import logging
import warnings
import requests
from elastalert.alerter import Alerter
from elastalert.exceptions import EAException
from elastalert.utils.time import DateTimeEncoder
from elastalert.utils.util import lookup_es_key
from requests import RequestException
log = logging.getLogger(__name__)
class SlackAlerter(Alerter):
""" Creates a Slack room message for each alert """
required_options = frozenset(["slack_webhook_url"])
def __init__(self, rule):
super(SlackAlerter, self).__init__(rule)
self.slack_webhook_url = self.rule["slack_webhook_url"]
if isinstance(self.slack_webhook_url, str):
self.slack_webhook_url = [self.slack_webhook_url]
self.slack_proxy = self.rule.get("slack_proxy", None)
self.slack_username_override = self.rule.get(
"slack_username_override", "elastalert"
)
self.slack_channel_override = self.rule.get("slack_channel_override", "")
if isinstance(self.slack_channel_override, str):
self.slack_channel_override = [self.slack_channel_override]
self.slack_title_link = self.rule.get("slack_title_link", "")
self.slack_title = self.rule.get("slack_title", "")
self.slack_emoji_override = self.rule.get("slack_emoji_override", ":ghost:")
self.slack_icon_url_override = self.rule.get("slack_icon_url_override", "")
self.slack_msg_color = self.rule.get("slack_msg_color", "danger")
self.slack_parse_override = self.rule.get("slack_parse_override", "none")
self.slack_text_string = self.rule.get("slack_text_string", "")
self.slack_alert_fields = self.rule.get("slack_alert_fields", "")
self.slack_ignore_ssl_errors = self.rule.get("slack_ignore_ssl_errors", False)
self.slack_timeout = self.rule.get("slack_timeout", 10)
self.slack_ca_certs = self.rule.get("slack_ca_certs")
self.slack_attach_kibana_discover_url = self.rule.get(
"slack_attach_kibana_discover_url", False
)
self.slack_kibana_discover_color = self.rule.get(
"slack_kibana_discover_color", "#ec4b98"
)
self.slack_kibana_discover_title = self.rule.get(
"slack_kibana_discover_title", "Discover in Kibana"
)
def format_body(self, body):
# https://api.slack.com/docs/formatting
return body
def get_aggregation_summary_text__maximum_width(self):
width = super(SlackAlerter, self).get_aggregation_summary_text__maximum_width()
# Reduced maximum width for prettier Slack display.
return min(width, 75)
def get_aggregation_summary_text(self, matches):
text = super(SlackAlerter, self).get_aggregation_summary_text(matches)
if text:
text = "```\n{0}```\n".format(text)
return text
def populate_fields(self, matches):
alert_fields = []
for arg in self.slack_alert_fields:
arg = copy.copy(arg)
arg["value"] = lookup_es_key(matches[0], arg["value"])
alert_fields.append(arg)
return alert_fields
def alert(self, matches):
body = self.create_alert_body(matches)
body = self.format_body(body)
# post to slack
headers = {"content-type": "application/json"}
# set https proxy, if it was provided
proxies = {"https": self.slack_proxy} if self.slack_proxy else None
payload = {
"username": self.slack_username_override,
"parse": self.slack_parse_override,
"text": self.slack_text_string,
"attachments": [
{
"color": self.slack_msg_color,
"title": self.create_title(matches),
"text": body,
"mrkdwn_in": ["text", "pretext"],
"fields": [],
}
],
}
# if we have defined fields, populate noteable fields for the alert
if self.slack_alert_fields != "":
payload["attachments"][0]["fields"] = self.populate_fields(matches)
if self.slack_icon_url_override != "":
payload["icon_url"] = self.slack_icon_url_override
else:
payload["icon_emoji"] = self.slack_emoji_override
if self.slack_title != "":
payload["attachments"][0]["title"] = self.slack_title
if self.slack_title_link != "":
payload["attachments"][0]["title_link"] = self.slack_title_link
if self.slack_attach_kibana_discover_url:
kibana_discover_url = lookup_es_key(matches[0], "kibana_discover_url")
if kibana_discover_url:
payload["attachments"].append(
{
"color": self.slack_kibana_discover_color,
"title": self.slack_kibana_discover_title,
"title_link": kibana_discover_url,
}
)
for url in self.slack_webhook_url:
for channel_override in self.slack_channel_override:
try:
if self.slack_ca_certs:
verify = self.slack_ca_certs
else:
verify = not self.slack_ignore_ssl_errors
if self.slack_ignore_ssl_errors:
requests.packages.urllib3.disable_warnings()
payload["channel"] = channel_override
response = requests.post(
url,
data=json.dumps(payload, cls=DateTimeEncoder),
headers=headers,
verify=verify,
proxies=proxies,
timeout=self.slack_timeout,
)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to slack: %s" % e)
log.info("Alert '%s' sent to Slack" % self.rule["name"])
def get_info(self):
return {
"type": "slack",
"slack_username_override": self.slack_username_override,
}
| 40.606452
| 87
| 0.601525
|
import copy
import json
import logging
import warnings
import requests
from elastalert.alerter import Alerter
from elastalert.exceptions import EAException
from elastalert.utils.time import DateTimeEncoder
from elastalert.utils.util import lookup_es_key
from requests import RequestException
log = logging.getLogger(__name__)
class SlackAlerter(Alerter):
required_options = frozenset(["slack_webhook_url"])
def __init__(self, rule):
super(SlackAlerter, self).__init__(rule)
self.slack_webhook_url = self.rule["slack_webhook_url"]
if isinstance(self.slack_webhook_url, str):
self.slack_webhook_url = [self.slack_webhook_url]
self.slack_proxy = self.rule.get("slack_proxy", None)
self.slack_username_override = self.rule.get(
"slack_username_override", "elastalert"
)
self.slack_channel_override = self.rule.get("slack_channel_override", "")
if isinstance(self.slack_channel_override, str):
self.slack_channel_override = [self.slack_channel_override]
self.slack_title_link = self.rule.get("slack_title_link", "")
self.slack_title = self.rule.get("slack_title", "")
self.slack_emoji_override = self.rule.get("slack_emoji_override", ":ghost:")
self.slack_icon_url_override = self.rule.get("slack_icon_url_override", "")
self.slack_msg_color = self.rule.get("slack_msg_color", "danger")
self.slack_parse_override = self.rule.get("slack_parse_override", "none")
self.slack_text_string = self.rule.get("slack_text_string", "")
self.slack_alert_fields = self.rule.get("slack_alert_fields", "")
self.slack_ignore_ssl_errors = self.rule.get("slack_ignore_ssl_errors", False)
self.slack_timeout = self.rule.get("slack_timeout", 10)
self.slack_ca_certs = self.rule.get("slack_ca_certs")
self.slack_attach_kibana_discover_url = self.rule.get(
"slack_attach_kibana_discover_url", False
)
self.slack_kibana_discover_color = self.rule.get(
"slack_kibana_discover_color", "#ec4b98"
)
self.slack_kibana_discover_title = self.rule.get(
"slack_kibana_discover_title", "Discover in Kibana"
)
def format_body(self, body):
return body
def get_aggregation_summary_text__maximum_width(self):
width = super(SlackAlerter, self).get_aggregation_summary_text__maximum_width()
return min(width, 75)
def get_aggregation_summary_text(self, matches):
text = super(SlackAlerter, self).get_aggregation_summary_text(matches)
if text:
text = "```\n{0}```\n".format(text)
return text
def populate_fields(self, matches):
alert_fields = []
for arg in self.slack_alert_fields:
arg = copy.copy(arg)
arg["value"] = lookup_es_key(matches[0], arg["value"])
alert_fields.append(arg)
return alert_fields
def alert(self, matches):
body = self.create_alert_body(matches)
body = self.format_body(body)
headers = {"content-type": "application/json"}
proxies = {"https": self.slack_proxy} if self.slack_proxy else None
payload = {
"username": self.slack_username_override,
"parse": self.slack_parse_override,
"text": self.slack_text_string,
"attachments": [
{
"color": self.slack_msg_color,
"title": self.create_title(matches),
"text": body,
"mrkdwn_in": ["text", "pretext"],
"fields": [],
}
],
}
if self.slack_alert_fields != "":
payload["attachments"][0]["fields"] = self.populate_fields(matches)
if self.slack_icon_url_override != "":
payload["icon_url"] = self.slack_icon_url_override
else:
payload["icon_emoji"] = self.slack_emoji_override
if self.slack_title != "":
payload["attachments"][0]["title"] = self.slack_title
if self.slack_title_link != "":
payload["attachments"][0]["title_link"] = self.slack_title_link
if self.slack_attach_kibana_discover_url:
kibana_discover_url = lookup_es_key(matches[0], "kibana_discover_url")
if kibana_discover_url:
payload["attachments"].append(
{
"color": self.slack_kibana_discover_color,
"title": self.slack_kibana_discover_title,
"title_link": kibana_discover_url,
}
)
for url in self.slack_webhook_url:
for channel_override in self.slack_channel_override:
try:
if self.slack_ca_certs:
verify = self.slack_ca_certs
else:
verify = not self.slack_ignore_ssl_errors
if self.slack_ignore_ssl_errors:
requests.packages.urllib3.disable_warnings()
payload["channel"] = channel_override
response = requests.post(
url,
data=json.dumps(payload, cls=DateTimeEncoder),
headers=headers,
verify=verify,
proxies=proxies,
timeout=self.slack_timeout,
)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to slack: %s" % e)
log.info("Alert '%s' sent to Slack" % self.rule["name"])
def get_info(self):
return {
"type": "slack",
"slack_username_override": self.slack_username_override,
}
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.