input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
"""
Get metric definitions for a worker pool of a hostingEnvironment (App
Service Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param worker_pool_name: Name of worker pool
:type worker_pool_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`MetricDefinitionCollection
<azure.mgmt.web.models.MetricDefinitionCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/metricdefinitions'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'workerPoolName': self._serialize.url("worker_pool_name", worker_pool_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('MetricDefinitionCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_hosting_environment_multi_role_usages(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""
Get usages for a multiRole pool of a hostingEnvironment (App Service
Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`UsageCollection
<azure.mgmt.web.models.UsageCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/usages'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('UsageCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_hosting_environment_web_worker_usages(
self, resource_group_name, name, worker_pool_name, custom_headers=None, raw=False, **operation_config):
"""
Get usages for a worker pool of a hostingEnvironment (App Service
Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param worker_pool_name: Name of worker pool
:type worker_pool_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`UsageCollection
<azure.mgmt.web.models.UsageCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/usages'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'workerPoolName': self._serialize.url("worker_pool_name", worker_pool_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('UsageCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_hosting_environment_sites(
self, resource_group_name, name, properties_to_include=None, custom_headers=None, raw=False, **operation_config):
"""
Get all sites on the hostingEnvironment (App Service Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param properties_to_include: Comma separated list of site properties
to include
:type properties_to_include: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`SiteCollection <azure.mgmt.web.models.SiteCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/sites'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if properties_to_include is not None:
query_parameters['propertiesToInclude'] = self._serialize.query("properties_to_include", properties_to_include, 'str')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SiteCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_hosting_environment_web_hosting_plans(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""
Get all serverfarms (App Service Plans) on the hostingEnvironment (App
Service Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ServerFarmCollection
<azure.mgmt.web.models.ServerFarmCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/webhostingplans'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServerFarmCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_hosting_environment_server_farms(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""
Get all serverfarms (App Service Plans) on the hostingEnvironment (App
Service Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ServerFarmCollection
<azure.mgmt.web.models.ServerFarmCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/serverfarms'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServerFarmCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_multi_role_pools(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""
Get all multi role pools
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`WorkerPoolCollection
<azure.mgmt.web.models.WorkerPoolCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, | |
import matplotlib.pyplot as plt
from pathlib import Path
import pandas as pd
import os
import numpy as np
def get_file_paths(file_directory):
file_paths = os.listdir(file_directory)
file_paths = list(filter(lambda f_path: os.path.isdir(file_directory / f_path), file_paths))
return file_paths
def plot_day(plot_directory, df_phases_day, sdp_name, start_time, df_comparison_values, plot_method, comparison_label):
sdp_directory = plot_directory / sdp_name
if not os.path.exists(sdp_directory):
os.makedirs(sdp_directory)
plt.figure(1)
plt.ylabel('Phases')
p_counter = 1
relevant_plot = False
transgressions_sum = 0
for df_p_day in df_phases_day:
if not df_p_day.empty:
transgressions = plot_method(df_p_day, p_counter)
transgressions_sum += transgressions
relevant_plot = relevant_plot or transgressions > 0
p_counter = p_counter + 1
if relevant_plot and not df_comparison_values.empty:
df_comparison_values.plot(figsize=(24, 6), linewidth=0.5, color='grey', label=comparison_label)
if relevant_plot:
legend = plt.legend(fontsize='x-large', loc='lower left')
for line in legend.get_lines():
line.set_linewidth(4.0)
plot_path = plot_directory / sdp_name / start_time
if relevant_plot:
plt.savefig(plot_path)
plt.close(1)
if transgressions_sum > 0:
print(start_time)
print(transgressions_sum)
return transgressions_sum
def plot_pickle_daywise(pickle_directory, plot_directory, plot_method, comparison_series_func):
transgression_sum = 0
nmbr_elements_sum = 0
file_paths = get_file_paths(pickle_directory)
print(file_paths)
for path in file_paths:
print(path)
comparison_label, df_comparison_values = comparison_series_func(path)
# df_mean_values = pd.read_pickle(pickle_directory/(path+'season_aggregation')).sort_index()
path = pickle_directory / Path(path)
df_phases = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
nmbr_elements_sum += sum(map(lambda df: df.shape[0], df_phases))
day = pd.Timedelta('1d')
min_date = min(list(map(lambda df: df.index.min(), df_phases))).date()
max_date = max(list(map(lambda df: df.index.max(), df_phases))).date()
print(min_date)
print(max_date)
for start_time in pd.date_range(min_date, max_date, freq='d'):
end_time = start_time + day
# df_day = df.loc[df.index>start_time and df.index<end_time, :]
df_phases_day = list(map(lambda df: df.loc[start_time:end_time], df_phases))
df_comparison_values_day = df_comparison_values.loc[start_time:end_time]
# print(start_time.date())
transgression_sum += plot_day(plot_directory, df_phases_day, path.name, str(start_time.date()),
df_comparison_values_day, plot_method, comparison_label)
return transgression_sum, nmbr_elements_sum
def plot_station_dif_anomalies(pickle_directory, base_plot_directory, anomaly_threshold):
plot_directory = base_plot_directory / ("StationDif_" + str(anomaly_threshold).replace(".", "_"))
def plot_station_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.StationDif) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "meanStationAverage", pd.read_pickle(pickle_directory / 'meanStationValues')
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_station_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def plot_phase_dif_anomalies(pickle_directory, base_plot_directory, anomaly_threshold):
plot_directory = base_plot_directory / ("PhaseDif_" + str(anomaly_threshold).replace(".", "_"))
def plot_station_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.phase_dif) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "", pd.DataFrame()
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_station_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def plot_season_dif_anomalies(pickle_directory, base_plot_directory, anomaly_threshold):
# anomaly_threshold = 3.2270145810536146
plot_directory = base_plot_directory / ("SeasDif_" + str(anomaly_threshold).replace(".", "_"))
def plot_season_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.SeasDif) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "meanSeasonalAverage", pd.read_pickle(
pickle_directory / (station_name + 'season_aggregation')).sort_index()
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_season_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def plot_trafo_dif_anomalies(pickle_directory, base_plot_directory):
anomaly_threshold = 1.5
plot_directory = base_plot_directory / ("TrafoDif_" + str(anomaly_threshold).replace(".", "_"))
def plot_trafo_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.Value.diff()) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "", pd.DataFrame()
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_trafo_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def plot_trafo_dif_anomalies_v2(pickle_directory, base_plot_directory, anomaly_threshold):
plot_directory = base_plot_directory / ("TrafoDif_v2_" + str(anomaly_threshold).replace(".", "_"))
def plot_trafo_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.trafo) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "", pd.DataFrame()
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_trafo_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def plot_time_dif_anomalies(pickle_directory, base_plot_directory, anomaly_threshold):
plot_directory = base_plot_directory / ("TimeDif_" + str(anomaly_threshold).replace(".", "_"))
def plot_time_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.time_passed) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "", pd.DataFrame()
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_time_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def get_quintiles(pickle_directory, quantile):
file_paths = get_file_paths(pickle_directory)
print(file_paths)
aggregated_series = pd.Series()
for path in file_paths:
print(path)
path = pickle_directory / Path(path)
df_phases = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
for df_p in df_phases:
ser = df_p.time_passed.reset_index(drop=True).abs()
aggregated_series = aggregated_series.append(ser, ignore_index=True)
threshold = aggregated_series.quantile(q=quantile)
print(threshold)
return threshold
def show_df2(pickle_name, pickle_dir=Path('pickles')):
path = pickle_dir / pickle_name
df_phases_h = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
# df_phases = list(map(lambda p: pd.read_pickle(path / ("phase" + p)), ['1', '2', '3']))
df_p_h = df_phases_h[0][['Value']].rename(columns={'Value': 'p1'}).loc[
pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_h['p2'] = df_phases_h[1][['Value']].loc[pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_h['p3'] = df_phases_h[2][['Value']].loc[pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_h['t1'] = df_phases_h[0][['trafo']].loc[pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_h['t2'] = df_phases_h[1][['trafo']].loc[pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_h['t3'] = df_phases_h[2][['trafo']].loc[pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_dif = pd.DataFrame()
df_p_dif['p1'] = df_p_h['p1'].diff() / df_p_h['p1'].index.to_series().diff().dt.total_seconds()
df_p_dif['p2'] = df_p_h['p2'].diff() / df_p_h['p2'].index.to_series().diff().dt.total_seconds()
df_p_dif['p3'] = df_p_h['p3'].diff() / df_p_h['p3'].index.to_series().diff().dt.total_seconds()
df_p_dif_a = df_p_dif.loc[abs(df_p_dif['p1']) >= 0.15].loc[abs(df_p_dif['p2']) >= 0.15].loc[
abs(df_p_dif['p3']) >= 0.15]
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(df_p_dif_a)
print(df_p_h)
def show_df(pickle_name, pickle_dir=Path('pickles')):
path = pickle_dir / pickle_name
df_phases_h = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
# df_phases = list(map(lambda p: pd.read_pickle(path / ("phase" + p)), ['1', '2', '3']))
df_p_h = df_phases_h[0][['Value']].rename(columns={'Value': 'p1'}).loc[
pd.datetime(2017, 8, 7):pd.datetime(2017, 8, 8)]
df_p_h['p2'] = df_phases_h[1][['Value']].loc[pd.datetime(2017, 8, 7):pd.datetime(2017, 8, 8)]
df_p_h['p3'] = df_phases_h[2][['Value']].loc[pd.datetime(2017, 8, 7):pd.datetime(2017, 8, 8)]
df_p_h['t1'] = df_phases_h[0][['trafo']].loc[pd.datetime(2017, 8, 7):pd.datetime(2017, 8, 8)]
df_p_h['t2'] = df_phases_h[1][['trafo']].loc[pd.datetime(2017, 8, 7):pd.datetime(2017, 8, 8)]
df_p_h['t3'] = df_phases_h[2][['trafo']].loc[pd.datetime(2017, 8, 7):pd.datetime(2017, 8, 8)]
df_p_dif = pd.DataFrame()
df_p_dif['p1'] = df_p_h['p1'].diff() / df_p_h['p1'].index.to_series().diff().dt.total_seconds()
df_p_dif['p2'] = df_p_h['p2'].diff() / df_p_h['p2'].index.to_series().diff().dt.total_seconds()
df_p_dif['p3'] = df_p_h['p3'].diff() / df_p_h['p3'].index.to_series().diff().dt.total_seconds()
df_p_dif_a = df_p_dif.loc[abs(df_p_dif['p1']) >= 0.15].loc[abs(df_p_dif['p2']) >= 0.15].loc[
abs(df_p_dif['p3']) >= 0.15]
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print(df_p_dif_a)
print(df_p_h[['p1', 'p2', 'p3']])
def construct_overview2():
file_paths = os.listdir("./../pickles")
df_ps = []
for fp in file_paths:
path = Path("./../pickles") / fp
df_phases = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
df_ps.append(df_phases)
df_table = pd.DataFrame(columns=["Messungszeitraum [d]", "MA Median [s]", "MA Mean [s]", "Max U [V]",
"Min U [V]", "Average U [V]"])
for df_phases in df_ps:
time_dif = pd.Series()
voltage = pd.Series()
for df_p in df_phases:
time_dif = time_dif.append(df_p['time_passed'], ignore_index=True)
voltage = voltage.append(df_p["Value"], ignore_index=True)
med_time_dif = time_dif.median()
mean_time_dif = time_dif.mean()
voltage_min = min(voltage)
voltage_max = max(voltage)
voltage_mean = voltage.mean()
length = (df_phases[0].index[-1] - df_phases[0].index[0]).days
name = df_phases[0]["ServiceDeliveryPoint"][0]
name = name[-4:]
df_table = df_table.append(pd.Series(name=name,
data={"MA Median [s]": med_time_dif, "MA Mean [s]":mean_time_dif,
"Messungszeitraum [d]": length, "Max U [V]":voltage_max,
"Min U [V]":voltage_min, "Average U [V]":voltage_mean}))
df_table1 = df_table.copy()
df_table.index.name = "Station"
# df_t = df_table.astype("object").copy()
print("x")
def construct_overview():
file_paths = os.listdir("./../pickles")
df_ps = []
for fp in file_paths:
path = Path("./../pickles") / fp
df_phases = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
df_ps.append(df_phases)
n_p = 0
df_table = pd.DataFrame(columns=["Datenpunkte", "Sprunga.", "Zeita.", "Phasena.", "Saisona.",
"Stationsa.", "Messungszeitraum [d]", "Messungsabstand [s]"])
tr, ph, st, se, ti = 0, 0, 0, 0, 0
for df_phases in df_ps:
n_tr = 0
n_ph = 0
n_st = 0
n_se = 0
n_ti = 0
time_dif = pd.Series()
n_p_h = 0
for df_p in df_phases:
n_tr = n_tr + df_p[abs(df_p['trafo']) > 0.1].shape[0]
n_ph = n_ph + df_p[abs(df_p['phase_dif']) > 7.34].shape[0]
n_st = n_st + df_p[abs(df_p['StationDif']) > 8.772].shape[0]
n_se = n_se + df_p[abs(df_p['SeasDif']) > 5.87].shape[0]
n_ti = n_ti + df_p[abs(df_p['time_passed']) > 179].shape[0]
n_p = n_p + df_p.shape[0]
n_p_h = n_p_h + df_p.shape[0]
print(n_tr)
time_dif = time_dif.append(df_p['time_passed'], ignore_index=True)
med_time_dif = time_dif.median()
length = (df_phases[0].index[-1] - df_phases[0].index[0]).days
name = df_phases[0]["ServiceDeliveryPoint"][0]
name = name[-4:]
df_table = df_table.append(pd.Series(name=name,
data={"Datenpunkte": n_p_h, "Sprunga.": n_tr, "Zeita.": n_ti,
"Phasena.": n_ph, "Saisona.": n_se,
"Stationsa.": n_st, "Messungsabstand [s]": med_time_dif,
"Messungszeitraum [d]": length}))
tr, ti, ph, se, st = tr + n_tr, ti + n_ti, ph + n_ph, se + n_se, st + n_st
df_table1 = df_table.copy()
df_table.drop(columns=["Messungszeitraum [d]", "Messungsabstand [s]"], inplace=True)
df_table.index.name = "Station"
df_table = df_table.append(pd.Series(name="gesamt", data={"Datenpunkte": n_p, "Sprunga.": tr, "Zeita.": ti,
"Phasena.": ph, "Saisona.": se,
"Stationsa.": st}))
df_table = df_table.append(pd.Series(name="anteil", data={"Datenpunkte": n_p / n_p, "Sprunga.": tr / n_p,
"Zeita.": ti / n_p,
"Phasena.": ph / n_p, "Saisona.": se / n_p,
"Stationsa.": st / n_p}))
df_t = df_table.astype("object").copy()
df_t.Datenpunkte = df_t.Datenpunkte.astype("int")
print("x")
def main():
construct_overview2()
# pickle_directory = Path("pickles")
# base_plot_dir = Path("plots")
# quantile = .999
# anomaly_threshold = get_quintiles(pickle_directory, quantile)
# plot_time_dif_anomalies(pickle_directory, base_plot_dir, anomaly_threshold)
# plot_trafo_dif_anomalies_v2(pickle_directory, base_plot_dir, 0.15)
# plot_trafo_dif_anomalies_v2(pickle_directory, base_plot_dir, 0.1)
# show_df('NW00000000000BISMARKSTRASSNV04609', pickle_directory)
# df_table = pd.DataFrame
# ... n_p = 0
# ... for | |
<reponame>Anton-Latukha/wakatime
# -*- coding: utf-8 -*-
"""
wakatime.arguments
~~~~~~~~~~~~~~~~~~
Command-line arguments.
:copyright: (c) 2016 <NAME>.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import os
import re
import time
import traceback
from .__about__ import __version__
from .compat import basestring
from .configs import getConfigFile, parseConfigFile
from .constants import AUTH_ERROR, DEFAULT_SYNC_OFFLINE_ACTIVITY, SUCCESS
from .packages import argparse
class FileAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if isinstance(values, basestring) and values.startswith('"'):
values = re.sub(r'\\"', '"', values.strip('"'))
try:
if os.path.isfile(values):
values = os.path.realpath(values)
except: # pragma: nocover
pass
setattr(namespace, self.dest, values)
class StoreWithoutQuotes(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if isinstance(values, basestring) and values.startswith('"'):
values = re.sub(r'\\"', '"', values.strip('"'))
setattr(namespace, self.dest, values)
def parse_arguments():
"""Parse command line arguments and configs from ~/.wakatime.cfg.
Command line arguments take precedence over config file settings.
Returns instances of ArgumentParser and SafeConfigParser.
"""
# define supported command line arguments
parser = argparse.ArgumentParser(description='Common interface for the ' +
'WakaTime api.')
parser.add_argument('--entity', dest='entity', metavar='FILE',
action=FileAction,
help='Absolute path to file for the heartbeat. Can ' +
'also be a url, domain or app when ' +
'--entity-type is not file.')
parser.add_argument('--file', dest='file', action=FileAction,
help=argparse.SUPPRESS)
parser.add_argument('--key', dest='key', action=StoreWithoutQuotes,
metavar='API_KEY',
help='Your wakatime api key; uses api_key from ' +
'~/.wakatime.cfg by default.')
parser.add_argument('--write', dest='is_write', action='store_true',
help='When set, tells api this heartbeat was ' +
'triggered from writing to a file.')
parser.add_argument('--plugin', dest='plugin', action=StoreWithoutQuotes,
help='Optional text editor plugin name and version ' +
'for User-Agent header.')
parser.add_argument('--time', dest='timestamp', metavar='time',
type=float, action=StoreWithoutQuotes,
help='Optional floating-point unix epoch timestamp. ' +
'Uses current time by default.')
parser.add_argument('--lineno', dest='lineno', action=StoreWithoutQuotes,
metavar='INT',
help='Optional line number. This is the current ' +
'line being edited.')
parser.add_argument('--cursorpos', dest='cursorpos',
metavar='INT', action=StoreWithoutQuotes,
help='Optional cursor position in the current file.')
parser.add_argument('--entity-type', dest='entity_type',
action=StoreWithoutQuotes,
help='Entity type for this heartbeat. Can be ' +
'"file", "domain" or "app". Defaults to "file".')
parser.add_argument('--category', dest='category',
action=StoreWithoutQuotes,
help='Category of this heartbeat activity. Can be ' +
'"coding", "building", "indexing", ' +
'"debugging", "running tests", ' +
'"writing tests", "manual testing", ' +
'"code reviewing", "browsing", or "designing". ' +
'Defaults to "coding".')
parser.add_argument('--proxy', dest='proxy', action=StoreWithoutQuotes,
help='Optional proxy configuration. Supports HTTPS '+
'and SOCKS proxies. For example: '+
'https://user:pass@host:port or '+
'socks5://user:pass@host:port or ' +
'domain\\user:pass')
parser.add_argument('--no-ssl-verify', dest='nosslverify',
action='store_true',
help='Disables SSL certificate verification for HTTPS '+
'requests. By default, SSL certificates are ' +
'verified.')
parser.add_argument('--ssl-certs-file', dest='ssl_certs_file',
metavar='FILE', action=StoreWithoutQuotes,
help='Override the bundled Python Requests CA certs ' +
'file. By default, uses certifi for ca certs.')
parser.add_argument('--project', dest='project', action=StoreWithoutQuotes,
help='Optional project name.')
parser.add_argument('--alternate-project', dest='alternate_project',
metavar='PROJECT', action=StoreWithoutQuotes,
help='Optional alternate project name. ' +
'Auto-discovered project takes priority.')
parser.add_argument('--alternate-language', dest='alternate_language',
action=StoreWithoutQuotes,
help=argparse.SUPPRESS)
parser.add_argument('--language', dest='language',
action=StoreWithoutQuotes,
help='Optional language name. If valid, takes ' +
'priority over auto-detected language.')
parser.add_argument('--local-file', dest='local_file', metavar='FILE',
action=FileAction,
help='Absolute path to local file for the ' +
'heartbeat. When --entity is a remote file, ' +
'this local file will be used for stats and ' +
'just the value of --entity sent with heartbeat.')
parser.add_argument('--hostname', dest='hostname',
action=StoreWithoutQuotes,
help='Hostname of current machine.')
parser.add_argument('--disable-offline', dest='offline',
action='store_false',
help='Disables offline time logging instead of ' +
'queuing logged time.')
parser.add_argument('--disableoffline', dest='offline_deprecated',
action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('--hide-file-names', dest='hide_file_names',
action='store_true',
help='Obfuscate filenames. Will not send file names ' +
'to api.')
parser.add_argument('--hide-filenames', dest='hide_filenames',
action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('--hidefilenames', dest='hidefilenames',
action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('--hide-project-names', dest='hide_project_names',
action='store_true',
help='Obfuscate project names. When a project ' +
'folder is detected instead of using the ' +
'folder name as the project, a ' +
'.wakatime-project file is created with a ' +
'random project name.')
parser.add_argument('--hide-branch-names', dest='hide_branch_names',
action='store_true',
help='Obfuscate branch names. Will not send revision ' +
'control branch names to api.')
parser.add_argument('--exclude', dest='exclude', action='append',
metavar='PATH',
help='Filename patterns to exclude from logging. ' +
'POSIX regex syntax. Can be used more than once.')
parser.add_argument('--exclude-unknown-project',
dest='exclude_unknown_project', action='store_true',
help='When set, any activity where the project ' +
'cannot be detected will be ignored.')
parser.add_argument('--include', dest='include', action='append',
metavar='PATH',
help='Filename patterns to log. When used in ' +
'combination with --exclude, files matching ' +
'include will still be logged. POSIX regex ' +
'syntax. Can be used more than once.')
parser.add_argument('--include-only-with-project-file',
dest='include_only_with_project_file',
action='store_true',
help='Disables tracking folders unless they contain ' +
'a .wakatime-project file. Defaults to false.')
parser.add_argument('--ignore', dest='ignore', action='append',
help=argparse.SUPPRESS)
parser.add_argument('--extra-heartbeats', dest='extra_heartbeats',
action='store_true',
help='Reads extra heartbeats from STDIN as a JSON ' +
'array until EOF.')
parser.add_argument('--log-file', dest='log_file',
metavar='FILE', action=StoreWithoutQuotes,
help='Defaults to ~/.wakatime.log.')
parser.add_argument('--logfile', dest='logfile', action=StoreWithoutQuotes,
help=argparse.SUPPRESS)
parser.add_argument('--api-url', dest='api_url', action=StoreWithoutQuotes,
metavar='URL',
help='Heartbeats api url. For debugging with a ' +
'local server.')
parser.add_argument('--apiurl', dest='apiurl', action=StoreWithoutQuotes,
help=argparse.SUPPRESS)
parser.add_argument('--timeout', dest='timeout', type=int,
metavar='SECONDS', action=StoreWithoutQuotes,
help='Number of seconds to wait when sending ' +
'heartbeats to api. Defaults to 60 seconds.')
parser.add_argument('--sync-offline-activity', metavar='AMOUNT',
dest='sync_offline_activity',
action=StoreWithoutQuotes,
help='Amount of offline activity to sync from your ' +
'local ~/.wakatime.db sqlite3 file to your ' +
'WakaTime Dashboard before exiting. Can be ' +
'"none" or a positive integer number. Defaults ' +
'to 100, meaning for every heartbeat sent ' +
'while online, 100 offline heartbeats are ' +
'synced. Can be used without --entity to only ' +
'sync offline activity without generating new ' +
'heartbeats.')
parser.add_argument('--today', dest='today',
action='store_true',
help='Prints dashboard time for Today, then exits.')
parser.add_argument('--config', dest='config', action=StoreWithoutQuotes,
metavar='FILE', help='Defaults to ~/.wakatime.cfg.')
parser.add_argument('--config-section', dest='config_section',
metavar='SECTION', action=StoreWithoutQuotes,
help='Optional config section when reading or ' +
'writing a config key. Defaults to [settings].')
parser.add_argument('--config-read', dest='config_read',
metavar='KEY', action=StoreWithoutQuotes,
help='Prints value for the given config key, then ' +
'exits.')
parser.add_argument('--config-write', dest='config_write',
nargs=2, metavar=('KEY', 'VALUE'),
action=StoreWithoutQuotes,
help='Writes value to a config key, then exits. ' +
'Expects two arguments, key and value.')
parser.add_argument('--verbose', dest='verbose', action='store_true',
help='Turns on debug messages in log file.')
parser.add_argument('--version', action='version', version=__version__)
# parse command line arguments
args = parser.parse_args()
# parse ~/.wakatime.cfg file
configs = parseConfigFile(args.config)
if args.config_read:
section = args.config_section or 'settings'
key = args.config_read
print(configs.get(section, key))
raise SystemExit(SUCCESS)
if args.config_write:
section = args.config_section or 'settings'
key = args.config_write[0]
val = args.config_write[1]
if not configs.has_section(section):
configs.add_section(section)
configs.set(section, key, val)
with open(args.config or getConfigFile(), 'w', encoding='utf-8') as fh:
configs.write(fh)
raise SystemExit(SUCCESS)
# use current unix epoch timestamp by default
if not args.timestamp:
args.timestamp = time.time()
# update args from configs
if not args.hostname:
if configs.has_option('settings', 'hostname'):
args.hostname = configs.get('settings', 'hostname')
if not args.key:
default_key = None
if configs.has_option('settings', 'api_key'):
default_key = configs.get('settings', 'api_key')
elif configs.has_option('settings', 'apikey'):
default_key = configs.get('settings', 'apikey')
if default_key:
args.key = default_key
else:
try:
parser.error('Missing api key. Find your api key from wakatime.com/settings/api-key.')
except SystemExit:
raise SystemExit(AUTH_ERROR)
is_valid = not not re.match(r'^[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12}$', args.key, re.I)
if not is_valid:
try:
parser.error('Invalid api key. Find your api key from wakatime.com/settings/api-key.')
except SystemExit:
raise SystemExit(AUTH_ERROR)
if not args.entity:
if args.file:
args.entity = args.file
elif (not args.sync_offline_activity or args.sync_offline_activity == 'none') and not args.today:
parser.error('argument --entity is required')
if not args.sync_offline_activity:
args.sync_offline_activity = DEFAULT_SYNC_OFFLINE_ACTIVITY
if args.sync_offline_activity == 'none':
args.sync_offline_activity = 0
try:
args.sync_offline_activity = int(args.sync_offline_activity)
if args.sync_offline_activity < 0:
raise Exception('Error')
except:
parser.error('argument --sync-offline-activity must be "none" or an integer number')
if not args.language and args.alternate_language:
args.language = args.alternate_language
if not args.exclude:
args.exclude = []
if configs.has_option('settings', 'ignore'):
try:
for pattern in configs.get('settings', 'ignore').split("\n"):
if pattern.strip() != '':
args.exclude.append(pattern)
except TypeError: # pragma: nocover
pass
if configs.has_option('settings', 'exclude'):
try:
for pattern in configs.get('settings', 'exclude').split("\n"):
if pattern.strip() != '':
args.exclude.append(pattern)
except TypeError: # pragma: nocover
pass
if not args.include_only_with_project_file and configs.has_option('settings', 'include_only_with_project_file'):
args.include_only_with_project_file = configs.get('settings', 'include_only_with_project_file') == 'true'
if not args.include:
args.include = []
if configs.has_option('settings', 'include'):
try:
for pattern in configs.get('settings', 'include').split("\n"):
if pattern.strip() != '':
args.include.append(pattern)
except TypeError: # pragma: nocover
pass
if not args.exclude_unknown_project and configs.has_option('settings', 'exclude_unknown_project'):
args.exclude_unknown_project = configs.getboolean('settings', 'exclude_unknown_project')
_boolean_or_list('hide_file_names', args, configs, alternative_names=['hide_filenames', 'hidefilenames'])
_boolean_or_list('hide_project_names', args, configs, alternative_names=['hide_projectnames', 'hideprojectnames'])
_boolean_or_list('hide_branch_names', args, configs, alternative_names=['hide_branchnames', 'hidebranchnames'], default=None)
if args.offline_deprecated:
args.offline = False
if args.offline and configs.has_option('settings', 'offline'):
args.offline = configs.getboolean('settings', 'offline')
if not args.proxy and configs.has_option('settings', 'proxy'):
args.proxy = configs.get('settings', 'proxy')
if args.proxy:
pattern = r'^((https?|socks5)://)?([^:@]+(:([^:@])+)?@)?[^:]+(:\d+)?$'
if '\\' in args.proxy:
pattern = r'^.*\\.+$'
is_valid = not not re.match(pattern, args.proxy, re.I)
if not is_valid:
parser.error('Invalid proxy. Must be in format ' +
'https://user:pass@host:port or ' +
'socks5://user:pass@host:port or ' +
'domain\\user:pass.')
if configs.has_option('settings', | |
<reponame>cajal/inception_loop2019<filename>staticnet_analyses/utils.py<gh_stars>1-10
is_cuda = lambda m: next(m.parameters()).is_cuda
from datajoint.expression import QueryExpression
import numpy as np
import torch
import torch.nn as nn
import pandas as pd
from contextlib import contextmanager
import hashlib
from scipy import ndimage
from scipy import signal
import sys
from os import path
import copy
from neuro_data.static_images import transforms
from neuro_data.static_images.datasets import StaticImageSet
import cv2
import imageio
import matplotlib.pyplot as plt
from itertools import product, zip_longest
import seaborn as sns
# Utility functions to save and load datasets
def pack_transform(t):
if isinstance(t, transforms.Normalizer):
fields = ['buggy', 'normalize_per_image', 'exclude', 'stats_source']
t_config = {f: getattr(t, f) for f in fields}
t_config['type'] = 'Normalizer'
elif isinstance(t, transforms.Subsample):
t_config = dict(idx=t.idx)
t_config['type'] = 'Subsample'
elif isinstance(t, transforms.ToTensor):
t_config = dict(cuda=t.cuda)
t_config['type'] = 'ToTensor'
else:
raise ValueError('Packing transform type {} is not supported yet'.format(t.__class__.__name__))
return t_config
def unpack_transform(dataset, t_config):
t_type = t_config.pop('type')
if t_type == 'Normalizer':
return transforms.Normalizer(dataset, **t_config)
elif t_type == 'Subsample':
return transforms.Subsample(**t_config)
elif t_type == 'ToTensor':
return transforms.ToTensor(**t_config)
def pack_dataset(ds, filename=None):
if filename is None:
filename = ds._fid.filename
configs = dict(filename=filename, data_keys=ds.data_keys, cache_raw=ds.cache_raw, stats_source=ds.stats_source)
configs['transforms'] = [pack_transform(t) for t in ds.transforms]
return configs
def unpack_dataset(ds_config, filename=None):
ds_config = copy.deepcopy(ds_config)
t_configs = ds_config.pop('transforms')
data_keys = ds_config.pop('data_keys')
saved_filename = ds_config.pop('filename')
if filename is None:
filename = saved_filename
new_ds = StaticImageSet(filename, *data_keys, **ds_config)
transforms = [unpack_transform(new_ds, t) for t in t_configs]
new_ds.transforms.extend(transforms)
return new_ds
def load_dataset(filename, ds_path=None):
if ds_path is None:
ds_path = path.join(path.split(filename)[0], 'dataset_config.pt')
dataset_config = torch.load(ds_path)
return unpack_dataset(dataset_config, filename)
def list_hash(values):
"""
Returns MD5 digest hash values for a list of values
"""
hashed = hashlib.md5()
for v in values:
hashed.update(str(v).encode())
return hashed.hexdigest()
def key_hash(key):
"""
32-byte hash used for lookup of primary keys of jobs
"""
hashed = hashlib.md5()
for k, v in sorted(key.items()):
hashed.update(str(v).encode())
return hashed.hexdigest()
class silence:
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = None
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout = self._original_stdout
def safe_key(self):
data = self.proj(*self.heading.non_blobs).fetch()
return data[self.heading.primary_key]
def fetchdf(self, *args, **kwargs):
return pd.DataFrame(self.fetch(*args, **kwargs))
QueryExpression.safe_key = safe_key
QueryExpression.fetchdf = fetchdf
from attorch.layers import SpatialTransformerPyramid2d, SpatialTransformerPooled2d
from attorch.constraints import positive
import torch.nn.functional as F
def disc_center_forward(self, x, shift=None):
if self.positive:
positive(self.features)
self.grid.data = torch.clamp(self.grid.data, -1, 1)
N, c, h, w = x.size()
m = self.gauss_pyramid.scale_n + 1
feat = self.features.view(1, m * c, self.outdims)
pools = []
for xx in self.gauss_pyramid(x):
N, ch, img_h, img_w = xx.size()
ctr_h, ctr_w = img_h // 2, img_w // 2
pools.append(xx[..., ctr_h, ctr_w].unsqueeze(-1).expand(N, ch, self.outdims))
y = torch.cat(pools, dim=1)
y = (y * feat).sum(1).view(N, self.outdims)
if self.bias is not None:
y = y + self.bias
return y
def disc_center_forward(self, x, shift=None):
if self.positive:
positive(self.features)
self.grid.data = torch.clamp(self.grid.data, -1, 1)
N, c, h, w = x.size()
m = self.gauss_pyramid.scale_n + 1
feat = self.features.view(1, m * c, self.outdims)
pools = []
for xx in self.gauss_pyramid(x):
N, ch, img_h, img_w = xx.size()
ctr_h, ctr_w = img_h // 2, img_w // 2
pools.append(xx[..., ctr_h, ctr_w].unsqueeze(-1).expand(N, ch, self.outdims))
y = torch.cat(pools, dim=1)
y = (y * feat).sum(1).view(N, self.outdims)
if self.bias is not None:
y = y + self.bias
return y
def simple_forward(self, x, shift=None):
if self.positive:
positive(self.features)
N, ch, h, w = x.size()
feat = self.features.view(1, -1, self.outdims)
feat = feat[:, 0:ch, :]
ctr_h, ctr_w = h // 2, w // 2
y = x[..., ctr_h, ctr_w].unsqueeze(-1).expand(N, ch, self.outdims)
y = (y * feat).sum(1).view(N, self.outdims)
if self.bias is not None:
y = y + self.bias
return y
def fixed_forward(self, x, shift=None):
if self.positive:
positive(self.features)
N, ch, h, w = x.size()
feat = self.features.view(1, -1, self.outdims)
feat_scale = feat.mean()
ctr_h, ctr_w = h // 2, w // 2
y = x[..., ctr_h, ctr_w].unsqueeze(-1).expand(N, ch, self.outdims)
y = (y * feat_scale).sum(1).view(N, self.outdims)
if self.bias is not None:
y = y + self.bias
return y
def trunc_center_forward(self, x, shift=None):
if self.positive:
positive(self.features)
self.grid.data = torch.clamp(self.grid.data, -1, 1)
N, c, h, w = x.size()
m = self.gauss_pyramid.scale_n + 1
feat = self.features.view(1, m * c, self.outdims)
start = self.scale_idx * c
feat = feat[:, start:(start+c), :]
pools = []
for i, xx in enumerate(self.gauss_pyramid(x)):
if i == self.scale_idx:
N, ch, img_h, img_w = xx.size()
ctr_h, ctr_w = img_h // 2, img_w // 2
pools.append(xx[..., ctr_h, ctr_w].unsqueeze(-1).expand(N, ch, self.outdims))
y = torch.cat(pools, dim=1)
y = (y * feat).sum(1).view(N, self.outdims)
if self.bias is not None:
y = y + self.bias
return y
def discretized_forward(self, x, shift=None):
if self.positive:
positive(self.features)
self.grid.data = torch.clamp(self.grid.data, -1, 1)
N, c, h, w = x.size()
m = self.gauss_pyramid.scale_n + 1
feat = self.features.view(1, m * c, self.outdims)
if shift is None:
grid = self.grid.expand(N, self.outdims, 1, 2)
else:
grid = self.grid.expand(N, self.outdims, 1, 2) + shift[:, None, None, :]
pools = []
for xx in self.gauss_pyramid(x):
_, _, img_h, img_w = xx.size()
img_shape = torch.tensor([img_w, img_h]).type_as(grid)
adj_grid = torch.round((grid + 1) / 2 * (img_shape - 1)) * 2 / (img_shape - 1) - 1
pools.append(F.grid_sample(xx, adj_grid))
y = torch.cat(pools, dim=1).squeeze(-1)
y = (y * feat).sum(1).view(N, self.outdims)
if self.bias is not None:
y = y + self.bias
return y
def indexed_forward(self, x, shift=None):
if self.positive:
positive(self.features)
self.grid.data = torch.clamp(self.grid.data, -1, 1)
N, c, w, h = x.size()
m = self.gauss_pyramid.scale_n + 1
feat = self.features.view(1, m * c, self.outdims)
if shift is None:
grid = self.grid.expand(N, self.outdims, 1, 2)
else:
grid = self.grid.expand(N, self.outdims, 1, 2) + shift[:, None, None, :]
pools = []
for xx in self.gauss_pyramid(x):
_, _, img_w, img_h = xx.size()
img_indexer = torch.tensor([(img_w -1) * img_h, img_h - 1]).type_as(grid)
(grid + 1) / 2
img_index = torch.round((grid + 1) / 2 * (img_shape - 1))
pools.append(F.grid_sample(xx, adj_grid))
y = torch.cat(pools, dim=1).squeeze(-1)
y = (y * feat).sum(1).view(N, self.outdims)
if self.bias is not None:
y = y + self.bias
return y
# define alternate forward on SpatialTransformerPyramid2d
def all_forward(self, x, shift=None):
if self.positive:
positive(self.features)
self.grid.data = torch.clamp(self.grid.data, -1, 1)
N, c, w, h = x.size()
m = self.gauss_pyramid.scale_n + 1
feat = self.features.view(m * c, self.outdims, 1, 1).permute([1, 0, 2, 3]).contiguous()
stacked = torch.cat(self.gauss_pyramid(x), dim=1)
y = F.conv2d(stacked, feat, self.bias)
_, _, yw, yh = y.size()
return y
def center_forward(self, x, shift=None):
grid_backup = self.grid.data.clone()
self.grid.data.zero_()
output = self.old_forward(x, shift=shift)
self.grid.data = grid_backup
return output
@contextmanager
def discrete_readout():
print('Using discretized forward on SpatialTransformerPyramid2d')
old_forward = SpatialTransformerPyramid2d.forward
SpatialTransformerPyramid2d.forward = discretized_forward
yield
print('Restoring normal forward on SpatialTransformerPyramid2d')
SpatialTransformerPyramid2d.forward = old_forward
@contextmanager
def simple_readout():
print('Using simplified forward on SpatialTransformerPyramid2d')
old_forward = SpatialTransformerPyramid2d.forward
SpatialTransformerPyramid2d.forward = simple_forward
yield
print('Restoring normal forward on SpatialTransformerPyramid2d')
SpatialTransformerPyramid2d.forward = old_forward
@contextmanager
def fixed_readout():
print('Using fixed forward on SpatialTransformerPyramid2d')
old_forward = SpatialTransformerPyramid2d.forward
SpatialTransformerPyramid2d.forward = fixed_forward
yield
print('Restoring normal forward on SpatialTransformerPyramid2d')
SpatialTransformerPyramid2d.forward = old_forward
@contextmanager
def disc_center_readout():
print('Using discrete centered forward on SpatialTransformerPyramid2d')
SpatialTransformerPyramid2d.old_forward = SpatialTransformerPyramid2d.forward
SpatialTransformerPyramid2d.forward = disc_center_forward
yield
print('Restoring normal forward on SpatialTransformerPyramid2d')
SpatialTransformerPyramid2d.forward = SpatialTransformerPyramid2d.old_forward
@contextmanager
def trunc_center_readout(scale_idx=0):
print('Using truncated centered forward with scale={} on SpatialTransformerPyramid2d'.format(scale_idx))
SpatialTransformerPyramid2d.old_forward = SpatialTransformerPyramid2d.forward
SpatialTransformerPyramid2d.forward = trunc_center_forward
SpatialTransformerPyramid2d.scale_idx = scale_idx
yield
print('Restoring normal forward on SpatialTransformerPyramid2d')
SpatialTransformerPyramid2d.forward = SpatialTransformerPyramid2d.old_forward
@contextmanager
def alt_forward():
print('Using alternate forward on SpatialTransformerPyramid2d')
old_forward = SpatialTransformerPyramid2d.forward
SpatialTransformerPyramid2d.forward = all_forward
yield
print('Restoring normal forward on SpatialTransformerPyramid2d')
SpatialTransformerPyramid2d.forward = old_forward
@contextmanager
def center_readout():
print('Using centeralized forward on SpatialTransformerPyramid2d')
SpatialTransformerPyramid2d.old_forward = SpatialTransformerPyramid2d.forward
SpatialTransformerPyramid2d.forward = center_forward
yield
print('Restoring normal forward on SpatialTransformerPyramid2d')
SpatialTransformerPyramid2d.forward = SpatialTransformerPyramid2d.old_forward
SpatialTransformerPyramid2d.alt_forward = alt_forward
SpatialTransformerPyramid2d.center_readout = center_readout
SpatialTransformerPyramid2d.disc_center_readout = disc_center_readout
SpatialTransformerPyramid2d.discrete_readout = discrete_readout
SpatialTransformerPyramid2d.trunc_center_readout = trunc_center_readout
SpatialTransformerPyramid2d.simple_readout = simple_readout
SpatialTransformerPyramid2d.fixed_readout = fixed_readout
# # define alternate forward on SpatialTransformerPooled2d
#
# def forward(self, x, shift=None):
# if self.positive:
# positive(self.features)
# self.grid.data = torch.clamp(self.grid.data, -1, 1)
# N, c, w, h = x.size()
# m = self.pool_steps + 1
# feat = self.features.view(1, m * c, self.outdims)
#
# if shift is None:
# grid = self.grid.expand(N, self.outdims, 1, 2)
# else:
# grid = self.grid.expand(N, self.outdims, 1, 2) + shift[:, None, None, :]
#
# pools = [F.grid_sample(x, grid)]
# for _ in range(self.pool_steps):
# x = self.avg(x)
# pools.append(F.grid_sample(x, grid))
# y = torch.cat(pools, dim=1)
# y = (y.squeeze(-1) * feat).sum(1).view(N, self.outdims)
#
# if self.bias is not None:
# y = y + self.bias
# return y
def fft_smooth(grad, factor=1/4):
"""
Tones down the gradient with 1/sqrt(f) filter in the Fourier domain.
Equivalent to low-pass filtering in the spatial domain.
"""
if factor == 0:
return grad
#h, w = grad.size()[-2:]
# grad = tf.transpose(grad, [0, 3, 1, 2])
# grad_fft = tf.fft2d(tf.cast(grad, tf.complex64))
h, w = grad.size()[-2:]
# grad = tf.transpose(grad, [0, 3, 1, 2])
# grad_fft = tf.fft2d(tf.cast(grad, tf.complex64))
tw | |
<gh_stars>1-10
#!/usr/bin/python
import numpy as np
import timeit
from enum import Enum
import logging
__author__ = 'Reem'
_log = logging.getLogger(__name__)
D_ROWS = 0
D_COLS = 1
D_ROWS_COLS = 2
class Levels(Enum):
detail = 4
middle = 2
overview = 0
# reads a csv file (using full file path) and returns the data table with the IDs
def get_full_table(file):
# data = np.genfromtxt(file, dtype=None, delimiter=',', names=True)
data = np.genfromtxt(file, dtype='object', delimiter=',')
row_ids = data[1:][:, 0]
col_ids = data[0, :][1:]
table = data[1:, 1:]
return Table(row_ids, col_ids, table)
# Helper functions #
# get the IDs available only in the first one
def get_deleted_ids(ids1, ids2):
# might want to use this instead for numpy arrays http://docs.scipy.org/doc/numpy/reference/generated/numpy.setdiff1d.html
return list(set(ids1) - set(ids2))
# get the IDs available only in the second one
def get_added_ids(ids1, ids2):
return list(set(ids2) - set(ids1))
def get_intersection(ids1, ids2):
# return np.array(list(set(ids1).intersection(ids2)))
return np.intersect1d(ids1, ids2)
def get_union_ids(ids1, ids2):
if ids1.shape[0] < ids2.shape[0]:
first = ids2
second = ids1
else:
first = ids1
second = ids2
u = np.array(second, dtype='object')
# u = list(second)
deleted = get_deleted_ids(first, second)
for i in deleted:
index1 = np.where(first == i)[0][0]
if index1 == 0:
# it's deleted from the first position
# add it at position index1
u = np.insert(u, 0, i)
else:
# it's somewhere in the middle, find the position of the one before
index1 -= 1
pre_element = first[index1]
while pre_element not in u:
index1 -= 1
if index1 >= 0:
pre_element = first[index1]
else:
print("CORNER CASE: there's no element before that exists in the list then just add it at 0!")
pre_element = None
break
if pre_element is not None:
pre_index = np.where(u == pre_element)[0][0]
# insert the new element after the pre_element
u = np.insert(u, pre_index + 1, i)
else:
u = np.insert(u, 0, i)
# todo if the index is not available
return u
# this returns values between [0,1]
def normalize_float01(diff_matrix):
min = diff_matrix.min()
max = diff_matrix.max()
# notice float(1)/2 * m gives a float result better than m/2
# x = (x - min) / (max - min)
normalized = (diff_matrix - min) * (float(1) / (max - min))
return normalized
# this returns values between [-1,1]
def normalize_float_11(diff_matrix):
min = diff_matrix.min()
max = diff_matrix.max()
max = abs(min) if abs(min) > abs(max) else abs(max)
# notice float(1)/2 * m gives a float result better than m/2
if max > 0:
normalized = diff_matrix * (float(1) / max)
return normalized
return None
# helping functions from caleydo
def assign_ids(ids, idtype):
import phovea_server.plugin
manager = phovea_server.plugin.lookup('idmanager')
return np.array(manager(ids, idtype))
# calcuate the percentage of changed cells regarding the intersection table
def calc_ch_percentage(chc, rc, cc):
return float(chc) / (rc * cc)
def generate_diff_from_files(file1, file2):
# full_table1 = get_full_table(file1)
# full_table2 = get_full_table(file2)
# todo use the classes
# return generate_diff(full_table1, full_table2, None, None, 2)
pass
# Table data structure
class Table:
def __init__(self, rows, cols, content):
self.row_ids = np.asarray(rows, 'object').astype(str)
self.col_ids = np.asarray(cols, 'object').astype(str)
self.content = content
# Diff object data structure
class Diff:
def __init__(self, content=None, structure=None, merge=None, reorder=None, union=None, direction=D_ROWS_COLS):
self._direction = direction
self.content = [] if content is None else content
self.structure = {} if structure is None else structure
self.merge = {} if merge is None else merge
self.reorder = {'rows': [], 'cols': []} if reorder is None else reorder
self.union = {} if union is None else union
# todo decide if the union should be here or in the diff finder
def add_union(self, union):
self.union = {}
self.union["uc_ids"] = union["uc_ids"].tolist()
self.union["c_ids"] = union["c_ids"]
self.union["ur_ids"] = union["ur_ids"].tolist()
self.union["r_ids"] = union["r_ids"]
def serialize(self):
return {
"content": self.content,
"structure": self.structure,
"merge": self.merge,
"reorder": self.reorder,
"union": self.union
}
def unserialize(self, json_obj):
self.content = json_obj['content'] if 'content' in list(json_obj.keys()) else []
self.structure = json_obj['structure'] if 'structure' in list(json_obj.keys()) else {}
self.merge = json_obj['merge'] if 'merge' in list(json_obj.keys()) else {}
self.reorder = json_obj['reorder'] if 'reorder' in list(json_obj.keys()) else {'rows': [], 'cols': []}
self.union = json_obj['union'] if 'union' in list(json_obj.keys()) else {}
return self
def content_counts_percell(self):
return float(len(self.content))
def content_ratio_percell(self, ucells, counts=None):
if counts is None:
counts = self.content_counts_percell()
return counts / ucells
# rowAdd rowDel colAdd colDel
def struct_counts(self, urows, ucols, dir, st_op):
operation = st_op + "eted" if st_op == "del" else st_op + "ed"
return float(len(self.structure[operation + "_" + dir + "s"]))
# rowAdd rowDel colAdd colDel
def struct_ratio(self, urows, ucols, dir, st_op, counts=None):
rows = urows if dir == "row" else ucols
if counts is None:
counts = self.struct_counts(urows, ucols, dir, st_op)
return counts / rows
def struct_add_counts(self, width, height):
addc = 0
h = height
w = width
if "deleted_rows" in self.structure:
h -= len(self.structure["deleted_rows"])
if "deleted_cols" in self.structure:
w -= len(self.structure["deleted_cols"])
if "added_rows" in self.structure:
addc += len(self.structure["added_rows"]) * w
h -= len(self.structure["added_rows"])
if "added_cols" in self.structure:
addc += len(self.structure["added_cols"]) * h
w -= len(self.structure["added_cols"]) # we might need this later!
# the type here should be just add but i'm using row-add for css
return float(addc)
def struct_add_ratio(self, width, height, counts=None):
cells = width * height
if counts is None:
counts = self.struct_add_counts(width, height)
return counts / cells
def struct_del_counts(self, width, height):
delc = 0
h = height
w = width
if "deleted_rows" in self.structure:
delc += len(self.structure["deleted_rows"]) * w
h -= len(self.structure["deleted_rows"])
if "deleted_cols" in self.structure:
delc += len(self.structure["deleted_cols"]) * h
# the type here should be just add and del but i'm using row-add and row-del for css
return float(delc)
def struct_del_ratio(self, width, height, counts=None):
cells = width * height
if counts is None:
counts = self.struct_del_counts(width, height)
return counts / cells
def nochange_counts(self, width, height):
h = height
w = width
# the height without the removed or added rows
if "deleted_rows" in self.structure:
h -= len(self.structure["deleted_rows"])
if "added_rows" in self.structure:
h -= len(self.structure["added_rows"])
# if "rows" in self.reorder:
# h -= len(self.reorder["rows"])
# the width without the deleted or removed cols
if "deleted_cols" in self.structure:
w -= len(self.structure["deleted_cols"])
if "added_cols" in self.structure:
w -= len(self.structure["added_cols"])
# if "cols" in self.reorder:
# w -= len(self.reorder["cols"])
# the rest cells without the changed ones
noc = (h * w) - len(self.content)
return float(noc)
def nochange_ratio(self, width, height, counts=None):
cells = width * height
if counts is None:
counts = self.nochange_counts(width, height)
return counts / cells
def nochange_rows_counts(self, width, height):
h = height
w = width
# the width without the deleted or removed cols
if "deleted_cols" in self.structure:
w -= len(self.structure["deleted_cols"])
if "added_cols" in self.structure:
w -= len(self.structure["added_cols"])
# if "cols" in self.reorder:
# w -= len(self.reorder["cols"])
# the height without the removed or added rows
if "deleted_rows" in self.structure:
h -= len(self.structure["deleted_rows"])
if "added_rows" in self.structure:
h -= len(self.structure["added_rows"])
# if "rows" in self.reorder:
# h -= len(self.reorder["rows"])
# the rest cells without the changed ones
noc = (h * w) - len(self.content)
return float(noc)
def nochange_rows_ratio(self, width, height, counts=None):
h = height
w = width
# the width without the deleted or removed cols
if "deleted_cols" in self.structure:
w -= len(self.structure["deleted_cols"])
if "added_cols" in self.structure:
w -= len(self.structure["added_cols"])
# if "cols" in self.reorder:
# w -= len(self.reorder["cols"])
cells = w * h
if counts is None:
counts = self.nochange_rows_counts(width, height)
return counts / cells
def nochange_cols_counts(self, width, height):
h = height
w = width
# the height without the removed or added rows
if "deleted_rows" in self.structure:
h -= len(self.structure["deleted_rows"])
if "added_rows" in self.structure:
h -= len(self.structure["added_rows"])
# if "rows" in self.reorder:
# h -= len(self.reorder["rows"])
# the width without the deleted or removed cols
if "deleted_cols" in self.structure:
w -= len(self.structure["deleted_cols"])
if "added_cols" in self.structure:
w -= len(self.structure["added_cols"])
# if "cols" in self.reorder:
# w -= len(self.reorder["cols"])
# the rest cells without the changed ones
noc = (h * w) - len(self.content)
return float(noc)
def nochange_cols_ratio(self, width, height, counts=None):
h = height
w = width
# the height without the removed or added rows
if "deleted_rows" in self.structure:
h -= len(self.structure["deleted_rows"])
if "added_rows" in self.structure:
h -= len(self.structure["added_rows"])
# if "rows" in self.reorder:
# h -= len(self.reorder["rows"])
cells = w * | |
the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({ATP_SLP_EEO: -1.0,
ATP_SLP: EEO_Abnd})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
reaction = Reaction('EEO_ATP_HYDRO')
reaction.name = 'ATP (excess) consumed via hydrolysis'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({ATP_HYDR_EEO: -1.0,
ATP_HYDR: EEO_Abnd})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
reaction = Reaction('EEO_ATP_IMF')
reaction.name = 'ATP produced via ion motive force '
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({ATP_IMF_EEO: -1.0,
ATP_IMF: EEO_Abnd})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
reaction = Reaction('EEO_ATP_TRANS')
reaction.name = 'ATP consumed for transport'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({ATP_TRANS_EEO: -1.0,
ATP_TRANS: EEO_Abnd})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
reaction = Reaction('EEO_ATP_BIOMASS')
reaction.name = 'ATP consumed via biomass equation'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({ATP_BIOMASS_EEO: -1.0,
ATP_BIOMASS: EEO_Abnd})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
if TransEnergetics == True:
##Acetate Transport Energy
deltaG_trans_grad_Acetate = R*(T+273.15)*(math.log(S_Acetate/C_in_Acetate))
ATP_trans_Acetate = -1*(deltaG_trans_grad_Acetate + deltaG_pH)/ deltaG_ATP_Hydrolysis
if ATP_trans_Acetate > 0:
Constraint_trans_Acetate = model.problem.Constraint(model.reactions.EEO_Acetate_Transport_ATP.flux_expression - ATP_trans_Acetate * model.reactions.EEO_Acetate_export.flux_expression, lb=0, ub=0)
model.add_cons_vars(Constraint_trans_Acetate)
##Propionate Transport Energy
deltaG_trans_grad_Propionate = R*(T+273.15)*(math.log(S_Propionate/C_in_Propionate))
ATP_trans_Propionate = -1*(deltaG_trans_grad_Propionate + deltaG_pH)/ deltaG_ATP_Hydrolysis
if ATP_trans_Propionate > 0:
Constraint_trans_Propionate = model.problem.Constraint(model.reactions.EEO_Propionate_Transport_ATP.flux_expression - ATP_trans_Propionate* model.reactions.EEO_Propionate_export.flux_expression, lb=0, ub=0)
model.add_cons_vars(Constraint_trans_Propionate)
##Butyrate Transport Energy
deltaG_trans_grad_Butyrate = R*(T+273.15)*(math.log(S_Butyrate/C_in_Butyrate))
ATP_trans_Butyrate = -1*(deltaG_trans_grad_Butyrate + deltaG_pH)/ deltaG_ATP_Hydrolysis
if ATP_trans_Butyrate > 0:
Constraint_trans_Butyrate = model.problem.Constraint(model.reactions.EEO_Butyrate_Transport_ATP.flux_expression - ATP_trans_Butyrate* model.reactions.EEO_Butyrate_export.flux_expression, lb=0, ub=0)
model.add_cons_vars(Constraint_trans_Butyrate)
##Valerate Transport Energy
deltaG_trans_grad_Valerate = R*(T+273.15)*(math.log(S_Valerate/C_in_Valerate))
ATP_trans_Valerate = -1*(deltaG_trans_grad_Valerate + deltaG_pH)/ deltaG_ATP_Hydrolysis
if ATP_trans_Valerate > 0:
Constraint_trans_Valerate = model.problem.Constraint(model.reactions.EEO_Valerate_Transport_ATP.flux_expression - ATP_trans_Valerate* model.reactions.EEO_Valerate_export.flux_expression, lb=0, ub=0)
model.add_cons_vars(Constraint_trans_Valerate)
##Hexanoate Transport Energy
deltaG_trans_grad_Hexanoate = R*(T+273.15)*(math.log(S_Hexanoate/C_in_Hexanoate))
ATP_trans_Hexanoate = -1*(deltaG_trans_grad_Hexanoate + deltaG_pH)/ deltaG_ATP_Hydrolysis
if ATP_trans_Hexanoate > 0:
Constraint_trans_Hexanoate = model.problem.Constraint(model.reactions.EEO_Hexanoate_Transport_ATP.flux_expression - ATP_trans_Hexanoate* model.reactions.EEO_Hexanoate_export.flux_expression, lb=0, ub=0)
model.add_cons_vars(Constraint_trans_Hexanoate)
##Heptanoate Transport Energy
deltaG_trans_grad_Heptanoate = R*(T+273.15)*(math.log(S_Heptanoate/C_in_Heptanoate))
ATP_trans_Heptanoate = -1*(deltaG_trans_grad_Heptanoate + deltaG_pH)/ deltaG_ATP_Hydrolysis
if ATP_trans_Heptanoate > 0:
Constraint_trans_Heptanoate = model.problem.Constraint(model.reactions.EEO_Heptanoate_Transport_ATP.flux_expression - ATP_trans_Heptanoate* model.reactions.EEO_Heptanoate_export.flux_expression, lb=0, ub=0)
model.add_cons_vars(Constraint_trans_Heptanoate)
##Octanoate Transport Energy
deltaG_trans_grad_Octanoate = R*(T+273.15)*(math.log(S_Octanoate/C_in_Octanoate))
ATP_trans_Octanoate = -1*(deltaG_trans_grad_Octanoate + deltaG_pH)/ deltaG_ATP_Hydrolysis
if ATP_trans_Octanoate > 0:
Constraint_trans_Octanoate = model.problem.Constraint(model.reactions.EEO_Octanoate_Transport_ATP.flux_expression - ATP_trans_Octanoate* model.reactions.EEO_Octanoate_export.flux_expression, lb=0, ub=0)
model.add_cons_vars(Constraint_trans_Octanoate)
##Lactate Transport Energy
deltaG_trans_grad_Lactate = R*(T+273.15)*(math.log(S_Lactate/C_in_Lactate))
ATP_trans_Lactate = -1*(deltaG_trans_grad_Lactate + deltaG_pH)/ deltaG_ATP_Hydrolysis
if ATP_trans_Lactate > 0:
Constraint_trans_Lactate = model.problem.Constraint(model.reactions.EEO_Lactate_Transport_ATP.flux_expression - ATP_trans_Lactate* model.reactions.EEO_Lactate_export.flux_expression, lb=0, ub=0)
model.add_cons_vars(Constraint_trans_Lactate)
##Proton Transport Energy
S_H = 10*math.exp(-pH_out)
C_in_H = 10*math.exp(-pH_in)
deltaG_trans_grad_Proton = R*(T+273.15)*(math.log(S_H/C_in_H))
ATP_trans_Proton = 1*(deltaG_trans_grad_Proton + deltaG_Sai)/ deltaG_ATP_Hydrolysis
if ATP_trans_Proton > 0:
Constraint_trans_Proton = model.problem.Constraint(model.reactions.EEO_Proton_Transport_ATP.flux_expression - ATP_trans_Proton* model.reactions.EEO_H_export.flux_expression, lb=0, ub=0)
model.add_cons_vars(Constraint_trans_Proton)
##Ethanol Transport Energy
deltaG_trans_grad_Ethanol = R*(T+273.15)*(math.log(S_Ethanol/C_in_Ethanol))
ATP_trans_Ethanol = -1*(deltaG_trans_grad_Ethanol)/ deltaG_ATP_Hydrolysis
if ATP_trans_Ethanol > 0:
Constraint_trans_Ethanol = model.problem.Constraint(model.reactions.EEO_Ethanol_Transport_ATP.flux_expression - ATP_trans_Ethanol* model.reactions.EEO_Ethanol_export.flux_expression, lb=0, ub=0)
model.add_cons_vars(Constraint_trans_Ethanol)
if HAO_Rel_Abnd > 0:
####Homoacetogenic Organisms (HAO)#####
ATP_SLP_HAO = Metabolite('ATP_SLP_HAO', formula='', name='', compartment='HAOe', charge=0)
ATP_IMF_HAO = Metabolite('ATP_IMF_HAO', formula='', name='', compartment='HAOe', charge=0)
ATP_BIOMASS_HAO = Metabolite('ATP_BIOMASS_HAO', formula='', name='', compartment='HAOe', charge=0)
ATP_HYDR_HAO = Metabolite('ATP_HYDR_HAO', formula='', name='', compartment='HAOe', charge=0)
ATP_TRANS_HAO = Metabolite('ATP_TRANS_HAO', formula='', name='', compartment='HAOe', charge=0)
h2o_HAOc = Metabolite('h2o_HAOc', formula='H2O', name='H2O', compartment='HAOc', charge=0)
atp_HAOc = Metabolite('atp_HAOc', formula='C10H12N5O13P3', name='ATP', compartment='HAOc', charge=-4)
adp_HAOc = Metabolite('adp_HAOc', formula='C10H12N5O10P2', name='ADP', compartment='HAOc', charge=-3)
h_HAOc = Metabolite('h_HAOc', formula='H', name='H+', compartment='HAOc', charge=1)
pi_HAOc = Metabolite('pi_HAOc', formula='HO4P', name='xylose-D', compartment='HAOc', charge=-2)
actp_HAOc = Metabolite('actp_HAOc', formula='C2H3O5P', name='Acetyl phosphate', compartment='HAOc', charge=-2)
##Formate metabolism
# coa_HAOc + pyr_HAOc <-> accoa_HAOc + for_HAOc
for_HAOc = Metabolite('for_HAOc', formula='CHO2', name='Formate', compartment='HAOc', charge=-1)
accoa_HAOc = Metabolite('accoa_HAOc', formula='C23H34N7O17P3S', name='Acetyl-CoA', compartment='HAOc', charge=-4)
coa_HAOc = Metabolite('coa_HAOc', formula='C21H32N7O16P3S', name='Coenzyme A', compartment='HAOc', charge=-4)
pyr_HAOc = Metabolite('pyr_HAOc', formula='C3H3O3', name='Pyruvate', compartment='HAOc', charge=-1)
reaction = Reaction('HAO_PFL')
reaction.name = 'Pyruvate formate lyase'
reaction.subsystem = 'Transport'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({pyr_HAOc: -1.0,
coa_HAOc: -1.0,
accoa_HAOc: 1.0,
for_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
##Acetate Metabolism
#ac_HAOc + atp_HAOc <-> actp_HAOc + adp_HAOc
ac_HAOc = Metabolite('ac_HAOc', formula='C2H3O2', name='Acetate', compartment='HAOc', charge=-1)
reaction = Reaction('HAO_ACKr')
reaction.name = 'Acetate kinase'
reaction.subsystem = 'Acetate Metabolism'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({ac_HAOc: -1.0,
atp_HAOc: -1.0,
actp_HAOc: 1.0,
adp_HAOc: 1.0,
ATP_SLP_HAO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#accoa_HAOc + pi_HAOc <-> actp_HAOc + coa_HAOc
reaction = Reaction('HAO_PTAr')
reaction.name = 'Phosphotransacetylase'
reaction.subsystem = 'Acetate Metabolism'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({accoa_HAOc: -1.0,
pi_HAOc: -1.0,
actp_HAOc: 1.0,
coa_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#accoa_HAOc + h2o_HAOc -> ac_HAOc + coa_HAOc + h_HAOc
reaction = Reaction('HAO_ACOAH')
reaction.name = 'Acteyl-CoA hydrolase'
reaction.subsystem = 'Acetate Metabolism'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({accoa_HAOc: -1.0,
h2o_HAOc: -1.0,
ac_HAOc: 1.0,
coa_HAOc: 1.0,
h_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Pyruvate Oxidation
#coa_HAOc + pyr_HAOc + fdox_HAOc <-> accoa_HAOc + co2_HAOc + fdred_HAOc + h_HAOc
fdred_HAOc = Metabolite('fdred_HAOc', formula='Fe8S8X', name='Ferredoxin (reduced) 2[4Fe-4S]', compartment='HAOc',
charge=-2)
fdox_HAOc = Metabolite('fdox_HAOc', formula='Fe8S8X', name='Ferredoxin (oxidized) 2[4Fe-4S]', compartment='HAOc',
charge=0)
co2_HAOc = Metabolite('co2_HAOc', formula='CO2', name='CO2', compartment='HAOc', charge=0)
reaction = Reaction('HAO_PFOR')
# This reaction differs from BiGG database because a different ferredoxin is used and H+ is a product for mass and charge balance
reaction.name = '*Pyruvate flavodoxin oxidoreductase'
reaction.subsystem = 'Pyruvate Oxidation'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({coa_HAOc: -1.0,
pyr_HAOc: -1.0,
fdox_HAOc: -1.0,
accoa_HAOc: 1.0,
co2_HAOc: 1.0,
fdred_HAOc: 1.0,
h_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#coa_HAOc + nad_HAOc + pyr_HAOc <-> accoa_HAOc + co2_HAOc + nadh_HAOc
nad_HAOc = Metabolite('nad_HAOc', formula='C21H26N7O14P2', name='Nicotinamide adenine dinucleotide', compartment='HAOc',
charge=-1)
nadh_HAOc = Metabolite('nadh_HAOc', formula='C21H27N7O14P2', name='Nicotinamide adenine dinucleotide - reduced',
compartment='HAOc', charge=-2)
reaction = Reaction('HAO_PDH')
#This reaction differs from BiGG database because a different ferredoxin is used and H+ is a product for mass and charge balance
reaction.name = 'Pyruvate dehdyrogenase'
reaction.subsystem = 'Pyruvate Oxidation'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({coa_HAOc: -1.0,
pyr_HAOc: -1.0,
nad_HAOc: -1.0,
accoa_HAOc: 1.0,
co2_HAOc: 1.0,
nadh_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
##Hydrogen Metabolism
h2_HAOc = Metabolite('h2_HAOc', formula='H2', name='Hydrogen', compartment='HAOc', charge=0)
#fdred_HAOc + 2.0 h_HAOc <-> h2_HAOc + fdox_HAOc
reaction = Reaction('HAO_HYD1')
#The reaction in BiGG uses a different ferredoxin
#BiGG reaction is not balanced for H
reaction.name = '(FeFe)-hydrogenase, cytoplasm'
reaction.subsystem = 'Hydrogen Generation'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({fdred_HAOc: -1.0,
h_HAOc: -2.0,
h2_HAOc: 1.0,
fdox_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#fdred_HAOc + 3.0 h_HAOc <-> h2_HAOc + fdox_HAOc + h_HAOi
h_HAOi = Metabolite('h_HAOi', formula='H', name='H+', compartment='HAOi', charge=1)
reaction = Reaction('HAO_ECH')
#The reaction in BiGG uses a different ferredoxin
#BiGG reaction is not balanced for H
reaction.name = 'Energy conserving hydrogenase'
reaction.subsystem = 'Hydrogen Generation'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({fdred_HAOc: -1.0,
h_HAOc: -3.0,
h2_HAOc: 1.0,
fdox_HAOc: 1.0,
h_HAOi: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#fdred_HAOc + nadh_HAOc + 3.0 h_HAOc <-> 2.0 h2_HAOc + fdox_HAOc + nad_HAOc
reaction = Reaction('HAO_HYDABC')
#The reaction in BiGG uses a different ferredoxin
#BiGG reaction is not balanced for H
reaction.name = 'Electron confurcating hydrogenase'
reaction.subsystem = 'Hydrogen Generation'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({fdred_HAOc: -1.0,
nadh_HAOc: -1.0,
h_HAOc: -3.0,
h2_HAOc: 2.0,
fdox_HAOc: 1.0,
nad_HAOc: 1.0})
model.add_reactions([reaction])
#Adding this reaction with the ferredoxin hydrogenase reaction creates a loop in the model
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#h2_HAOe <-> h2_HAOc
h2_HAOe = Metabolite('h2_HAOe', formula='H2', name='Hydrogen', compartment='HAOe', charge=0)
reaction = Reaction('HAO_H2t')
reaction.name = 'Hydrogen transport'
reaction.subsystem = 'Transport'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({h2_HAOe: -1.0,
h2_HAOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#h2_HAOe <-> h2_e
reaction = Reaction('HAO_EX_h2')
reaction.name = 'HAO h2 exchange'
reaction.subsystem = 'Exchange'
reaction.lower_bound | |
\text{cond} \cdot \sigma_1`.
If :attr:`cond`\ `= None` (default), :attr:`cond` is set to the machine precision of the dtype of :attr:`A`.
This function returns the solution to the problem and some extra information in a named tuple of
four tensors `(solution, residuals, rank, singular_values)`. For inputs :attr:`A`, :attr:`B`
of shape `(*, m, n)`, `(*, m, k)` respectively, it cointains
- `solution`: the least squares solution. It has shape `(*, n, k)`.
- `residuals`: the squared residuals of the solutions, that is, :math:`\|AX - B\|_F^2`.
It has shape equal to the batch dimensions of :attr:`A`.
It is computed when `m > n` and every matrix in :attr:`A` is full-rank,
otherwise, it is an empty tensor.
If :attr:`A` is a batch of matrices and any matrix in the batch is not full rank,
then an empty tensor is returned. This behavior may change in a future PyTorch release.
- `rank`: tensor of ranks of the matrices in :attr:`A`.
It has shape equal to the batch dimensions of :attr:`A`.
It is computed when :attr:`driver` is one of (`'gelsy'`, `'gelsd'`, `'gelss'`),
otherwise it is an empty tensor.
- `singular_values`: tensor of singular values of the matrices in :attr:`A`.
It has shape `(*, min(m, n))`.
It is computed when :attr:`driver` is one of (`'gelsd'`, `'gelss'`),
otherwise it is an empty tensor.
.. note::
While `X = \ `:attr:`A`\ `.pinv() @ \ `:attr:`B`, this function computes the
solution in a faster and more numerically stable way than performing the
computations separately.
.. warning::
The default value of :attr:`rcond` may change in a future PyTorch release.
It is therefore recommended to use a fixed value to avoid potential
breaking changes.
Args:
A (Tensor): lhs tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
B (Tensor): rhs tensor of shape `(*, m, k)` where `*` is zero or more batch dimensions.
rcond (float, optional): used to determine the effective rank of :attr:`A`.
If :attr:`rcond`\ `= None`, :attr:`rcond` is set to the machine
precision of the dtype of :attr:`A` times `max(m, n)`. Default: `None`.
Keyword args:
driver (str, optional): name of the LAPACK/MAGMA method to be used.
If `None`, `'gelsy'` is used for CPU inputs and `'gels'` for CUDA inputs.
Default: `None`.
Returns:
A named tuple `(solution, residuals, rank, singular_values)`.
Examples::
>>> a = torch.tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12]], dtype=torch.float)
>>> a.unsqueeze_(0)
>>> b = torch.tensor([[[2, 5, 1], [3, 2, 1], [5, 1, 9]],
[[4, 2, 9], [2, 0, 3], [2, 5, 3]]], dtype=torch.float)
>>> x = torch.linalg.lstsq(a, b).solution
>>> torch.dist(x, a.pinverse() @ b)
tensor(2.0862e-07)
>>> sv = torch.linalg.lstsq(a, driver='gelsd').singular_values
>>> torch.dist(sv, a.svd().S)
tensor(5.7220e-06)
>>> a[:, 0].zero_()
>>> xx, rank, _ = torch.linalg.lstsq(a, b)
>>> rank
tensor([2])
.. _condition number:
https://pytorch.org/docs/master/linalg.html#torch.linalg.cond
.. _full description of these drivers:
https://www.netlib.org/lapack/lug/node27.html
""")
matrix_power = _add_docstr(_linalg.linalg_matrix_power, r"""
matrix_power(A, n, *, out=None) -> Tensor
Computes the `n`-th power of a square matrix for an integer `n`.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
If :attr:`n`\ `= 0`, it returns the identity matrix (or batch) of the same shape
as :attr:`A`. If :attr:`n` is negative, it returns the inverse of each matrix
(if invertible) raised to the power of `abs(n)`.
.. note::
Consider using :func:`torch.linalg.solve` if possible for multiplying a matrix on the left by
a negative power as, if :attr:`n`\ `> 0`::
matrix_power(torch.linalg.solve(A, B), n) == matrix_power(A, -n) @ B
It is always prefered to use :func:`~solve` when possible, as it is faster and more
numerically stable than computing :math:`A^{-n}` explicitly.
.. seealso::
:func:`torch.linalg.solve` computes :attr:`A`\ `.inv() @ \ `:attr:`B` with a
numerically stable algorithm.
Args:
A (Tensor): tensor of shape `(*, m, m)` where `*` is zero or more batch dimensions.
n (int): the exponent.
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Raises:
RuntimeError: if :attr:`n`\ `< 0` and the matrix :attr:`A` or any matrix in the
batch of matrices :attr:`A` is not invertible.
Examples::
>>> a = torch.randn(3, 3)
>>> a
tensor([[-0.2270, 0.6663, -1.3515],
[-0.9838, -0.4002, -1.9313],
[-0.7886, -0.0450, 0.0528]])
>>> torch.linalg.matrix_power(a, 0)
tensor([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
>>> torch.linalg.matrix_power(a, 3)
tensor([[ 1.0756, 0.4980, 0.0100],
[-1.6617, 1.4994, -1.9980],
[-0.4509, 0.2731, 0.8001]])
>>> torch.linalg.matrix_power(a.expand(2, -1, -1), -2)
tensor([[[ 0.2640, 0.4571, -0.5511],
[-1.0163, 0.3491, -1.5292],
[-0.4899, 0.0822, 0.2773]],
[[ 0.2640, 0.4571, -0.5511],
[-1.0163, 0.3491, -1.5292],
[-0.4899, 0.0822, 0.2773]]])
""")
matrix_rank = _add_docstr(_linalg.linalg_matrix_rank, r"""
matrix_rank(A, tol=None, hermitian=False, *, out=None) -> Tensor
Computes the numerical rank of a matrix.
The matrix rank is computed as the number of singular values
(or eigenvalues in absolute value when :attr:`hermitian`\ `= True`)
that are greater than the specified :attr:`tol` threshold.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
If :attr:`hermitian`\ `= True`, :attr:`A` is assumed to be Hermitian if complex or
symmetric if real, but this is not checked internally. Instead, just the lower
triangular part of the matrix is used in the computations.
If :attr:`tol` is not specified and :attr:`A` is a matrix of dimensions `(m, n)`,
the tolerance is set to be
.. math::
\text{tol} = \sigma_1 \max(m, n) \varepsilon
where :math:`\sigma_1` is the largest singular value
(or eigenvalue in absolute value when :attr:`hermitian`\ `= True`), and
:math:`\varepsilon` is the epsilon value for the dtype of :attr:`A` (see :class:`torch.finfo`).
If :attr:`A` is a batch of matrices, :attr:`tol` is computed this way for every element of
the batch.
""" + fr"""
.. note:: The matrix rank is computed using singular value decomposition
:func:`torch.linalg.svd` if :attr:`hermitian`\ `= False` (default) and the eigenvalue
decomposition :func:`torch.linalg.eigvalsh` when :attr:`hermitian`\ `= True`.
{common_notes["sync_note"]}
""" + r"""
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
tol (float, Tensor, optional): the tolerance value. See above for the value it takes when `None`.
Default: `None`.
hermitian(bool, optional): indicates whether :attr:`A` is Hermitian if complex
or symmetric if real. Default: `False`.
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Examples::
>>> a = torch.eye(10)
>>> torch.linalg.matrix_rank(a)
tensor(10)
>>> b = torch.eye(10)
>>> b[0, 0] = 0
>>> torch.linalg.matrix_rank(b)
tensor(9)
>>> a = torch.randn(4, 3, 2)
>>> torch.linalg.matrix_rank(a)
tensor([2, 2, 2, 2])
>>> a = torch.randn(2, 4, 2, 3)
>>> torch.linalg.matrix_rank(a)
tensor([[2, 2, 2, 2],
[2, 2, 2, 2]])
>>> a = torch.randn(2, 4, 3, 3, dtype=torch.complex64)
>>> torch.linalg.matrix_rank(a)
tensor([[3, 3, 3, 3],
[3, 3, 3, 3]])
>>> torch.linalg.matrix_rank(a, hermitian=True)
tensor([[3, 3, 3, 3],
[3, 3, 3, 3]])
>>> torch.linalg.matrix_rank(a, tol=1.0)
tensor([[3, 2, 2, 2],
[1, 2, 1, 2]])
>>> torch.linalg.matrix_rank(a, tol=1.0, hermitian=True)
tensor([[2, 2, 2, 1],
[1, 2, 2, 2]])
""")
norm = _add_docstr(_linalg.linalg_norm, r"""
linalg.norm(A, ord=None, dim=None, keepdim=False, *, out=None, dtype=None) -> Tensor
Computes a vector or matrix norm.
If :attr:`A` is complex valued, it computes the norm of :attr:`A`\ `.abs()`
Supports input of float, double, cfloat and cdouble dtypes.
Whether this function computes a vector or matrix norm is determined as follows:
- If :attr:`dim` is an `int`, the vector norm will be computed.
- If :attr:`dim` is a `2`-`tuple`, the matrix norm will be computed.
- If :attr:`dim`\ `= None` and :attr:`ord`\ `= None`,
:attr:`A` will be flattened to 1D and the `2`-norm of the resulting vector will be computed.
- If :attr:`dim`\ `= None` and :attr:`ord` `!= None`, :attr:`A` must be 1D or 2D.
:attr:`ord` defines the norm that is computed. The following norms are supported:
====================== ========================= ========================================================
:attr:`ord` norm for matrices norm for vectors
====================== ========================= ========================================================
`None` (default) Frobenius norm `2`-norm (see below)
`'fro'` Frobenius norm -- not supported --
`'nuc'` nuclear norm -- not supported --
`inf` `max(sum(abs(x), dim=1))` `max(abs(x))`
`-inf` `min(sum(abs(x), dim=1))` `min(abs(x))`
`0` -- not supported -- `sum(x != 0)`
`1` `max(sum(abs(x), dim=0))` as below
`-1` `min(sum(abs(x), dim=0))` as below
`2` largest singular value as below
`-2` smallest singular value as below
other `int` or `float` -- not supported -- `sum(abs(x)^{ord})^{(1 / ord)}`
====================== ========================= ========================================================
where `inf` refers to `float('inf')`, NumPy's `inf` object, or any equivalent object.
.. seealso::
:func:`torch.linalg.vector_norm` computes a vector norm.
:func:`torch.linalg.matrix_norm` computes a matrix norm.
The above functions are often clearer and more flexible than | |
= V_{i+1}(:,k)'*VV_{i+1}(:,j)
R[k, j] = np.dot(mat[:, indspk], A[:, j])
# UU_{i+1}(:,j) = UU_{i+1}(:,j) - U_{i+1}(:,k)B_{i+1}(k,j)
# VV_{i+1}(:,j) = VV_{i+1}(:,j) - V_{i+1}(:,k)A_{i+1}(k,j)
# prod[0:m, j] = prod[0:m, j] - T[s*i+k, s*(i-1)+j]*U[0:m, s*i+k]
# prod[0:n, j] = prod[0:n, j] - T[inds+j, indspk]*V[0:n, indspk]
return mat, R, reorth
# TODO move this to tests? how?
# def debug_bidiag(i, s, inds, A, B, U, V, T):
# print("\n ********* DEBUGGING BLOCKBIDIAG: ************\n")
# # We will check the recurrence relations listed in Karimi, Toutounian
# print("\n Iteration i = {}, inds = {}\n".format(i, inds))
# E1 = np.zeros((inds+s, s))
# E1[0:s, :] = np.eye(s,s)
# errorRecurrence1 = sp.norm(B-np.dot(U[:,0:inds+s], np.dot(E1, B1)))
# print("\n B - UU(i+1)*E1*B1 = {}\n".format(errorRecurrence1))
# #
# # AVk = Ukp1Tk
# errorRecurrence2 = sp.norm(np.dot(A, V[:, 0:inds])
# - np.dot(U[:, 0:inds+s], T[0:inds+s, 0:inds]))
# print("\n A*VV(i) - UU(i+1)T(i) = {}\n".format(errorRecurrence2))
# #
# # ATUkp1 = VkTkT + Vkp1Akp1Ekp1T
# Eip1 = np.zeros((inds+s, s))
# Eip1[inds:inds+s, :] = np.eye(s,s)
# errorRecurrence3 = sp.norm(np.dot(A.T, U[:, 0:inds+s])
# - np.dot(V[:, 0:inds], T[0:inds+s, 0:inds].T)
# - np.dot(V[:, inds:inds+s],
# np.dot(Aip1, Eip1.T)))
# print("\n A.T*UU(i+1)-VV(i)*T(i).T-V(i+1)*A(i+1)*E(i+1).T = {}\n"
# .format(errorRecurrence3))
def polardecomp(W, options):
if options["polar"] == "ns":
# This is the Newton-Schultz iteration
#[U, H] = polar_newton_schultz(W, 1e-8)
U = polar_newton_schultz(W, 1e-8)
else:
print("**** POLAR OPTION NOT YET IMPLEMENTED")
return U
def polar_newton_schultz(A, tol_cgce):
m, n = A.shape
if m > n:
[Q, R] = sp.qr(A, mode='economic')
A = R.copy()
elif m < n:
raise("Error: m must be greater or equal to n")
X = A/sp.norm(A, 2)
k = 0
# deltaold = 10.
# delta = 1.
normdif = 1.0
# main loop
while normdif > tol_cgce: # and delta <= deltaold/2.0
Xnew = 1.5*X - 0.5*np.dot(X, np.dot(X.T, X))
normdif = sp.norm(Xnew - X, 'fro')
# deltaold = delta
# delta = sp.norm(Xnew-X, 'fro')/sp.norm(Xnew, 'fro')
X = Xnew.copy()
k = k + 1
# U = X.copy()
# H1 = np.dot(X.T, A)
# H = 0.5*(H1+H1.T)
if m > n:
X = np.dot(Q, X)
return X # , H
def eb_solver(problem, options, fileobj):
"""
Expansion-Balance solver
Here we consider always :math:`m=n`, :math:`p=q`, :math:`C=I`.
Thus the problem has to be
.. math::
\\min \\lVert A_{n\\times n}X_{n\\times p}-B_{n\\times p}\\rVert_F^2
\\qquad s.t. X^TX=I_{p\\times p}
References: :cite:`ZhanDu06` and :cite:`BergKnol84`.
"""
problem.stats["nbiter"] = 0
problem.stats["fev"] = 0
problem.stats["svd"] = 0
if options["full_results"]:
problem.stats["total_fun"] = []
problem.stats["total_crit"] = []
m, n, p, q = problem.sizes
exitcode = 0
msg = ''
# Initialization (X = G)
# From [1], p. 973:
# "The initial guess G(0) can be a solution to the balance problem
# min norm(AG-[B, Bhat], 'fro') s. t. G'*G=I
# with an expansion [B, Bhat] of B. In ref. [BergKnol84], Bhat was simply
# set to be zero or a randomly chosen matrix. A better initial guess
# Bhat = AE
# was suggested in ref. [ZhanDu06] with E the eigenvector matrix of A.T*A
# corresponding to its n-k smallest eigenvalues."
# G(n,n) = [X(n,p), H(n,n-p)]
Bhat = np.zeros((n, n-p))
B = np.concatenate((problem.B, Bhat), axis=1)
# Find the SVD of A.T*B = USV.T, and define G(0) = U*V.T
U, S, VT = sp.svd(np.dot(problem.A.T, B))
problem.stats["svd"] = problem.stats["svd"] + 1
G = np.dot(U, VT)
# X = np.copy(G[0:n, 0:p])
X = np.zeros((n, p))
f = sp.norm(np.dot(problem.A, X) - problem.B, 'fro')**2
problem.stats["fev"] = problem.stats["fev"] + 1
if options["full_results"]:
problem.stats["total_fun"].append(f)
problem.stats["total_crit"].append(f)
if options["verbose"] > 0:
print("=========================================", file=fileobj)
print(" EB Solver", fileobj)
print("=========================================", file=fileobj)
print("Options: {}".format(options), file=fileobj)
print("Execution date: {}; {}\n"
.format(datetime.datetime.now().date(),
datetime.datetime.now().time()), file=fileobj)
print(" nbiter f fold-f tol*fold",
file=fileobj)
print("===========================================================",
file=fileobj)
print(" {0:>4} {1:>16.4e}".format(0, f), file=fileobj)
criticality = False
nbiter = 0
while not criticality and nbiter < options["maxiter"]:
# Solve the expansion problem
# min norm(AG-[B, AH], 'fro') s.t. G'G=I
# by finding the svd of A.T[B, AH].
H = G[0:n, p:n]
AH = np.dot(problem.A, H)
B = np.concatenate((problem.B, AH), axis=1)
# Find the SVD of A.T*B = USV.T, and define G = U*V.T
U, S, VT = sp.svd(np.dot(problem.A.T, B))
problem.stats["svd"] = problem.stats["svd"]+1
G = np.dot(U, VT)
X = np.copy(G[0:n, 0:p])
# X = 0*G[0:n, 0:p]
fold = f
f = sp.norm(np.dot(problem.A, X) - problem.B, 'fro')**2
problem.stats["fev"] = problem.stats["fev"]+1
# Check for convergence
criticality = (np.abs(fold - f) < options["tol"]*fold) or \
(np.abs(f) < options["tol"])
if options["full_results"]:
problem.stats["total_fun"].append(f)
problem.stats["total_crit"].append(min(np.abs(fold-f)/np.abs(fold),
np.abs(fold)))
# Print and loop back
nbiter = nbiter + 1
if options["verbose"] > 0:
print(" {0:>4} {1:>16.4e} {2:>16.4e} {3:>16.4e}"
.format(nbiter, f, fold-f, options["tol"]*fold),
file=fileobj)
# ===================================================== end while
if nbiter >= options["maxiter"]:
msg = _status_message["maxiter"]
exitcode = 1
print('Warning: ' + msg, file=fileobj)
else:
exitcode = 0
msg = _status_message["success"]
problem.stats["nbiter"] = nbiter
return X, f, exitcode, msg
def gpi_solver(problem, options, fileobj):
"""
Generalized Power Iteration solver
Here we consider always C=I.
Thus the problem has to be
.. math::
\\min \\lVert A_{m\\times n}X_{n\\times p}-B_{m\\times p}\\rVert_F^2
\\qquad s.t. X^TX=I_{p\\times p}
References: :cite:`NieZhanLi17`
"""
problem.stats["nbiter"] = 0
problem.stats["fev"] = 0
problem.stats["svd"] = 0
if options["full_results"]:
problem.stats["total_fun"] = []
problem.stats["total_crit"] = []
m, n, p, q = problem.sizes
exitcode = 0
msg = ''
# Initialization (X = I)
X = np.zeros((n, p))
# X[0:p,0:p] = np.eye(p,p)
# X = problem.Xsol
E = np.dot(problem.A.T, problem.A)
# gamma is a constant times the largest eigenvalue of E, such that
# gamma*I - E is positive definite.
vals = spl.eigs(E, k=1, return_eigenvectors=False)
gamma = vals[0]
H = 2*(gamma*np.eye(n, n) - E)
ATB = 2*np.dot(problem.A.T, problem.B)
f = sp.norm(np.dot(problem.A, X) - problem.B, 'fro')**2
problem.stats["fev"] = problem.stats["fev"]+1
if options["full_results"]:
problem.stats["total_fun"].append(f)
problem.stats["total_crit"].append(f)
if options["verbose"] > 0:
print("=========================================", file=fileobj)
print(" GPI Solver", file=fileobj)
print("=========================================", file=fileobj)
print("Options: {}".format(options), file=fileobj)
print("Execution date: {}; {}\n"
.format(datetime.datetime.now().date(),
datetime.datetime.now().time()), file=fileobj)
print(" nbiter f fold-f ")
print("===================================================")
print(" {0:>4} {1:>16.4e}".format(0, f), file=fileobj)
criticality = False
nbiter = 0
while not criticality and nbiter < options["maxiter"]:
M = np.dot(H, X) + ATB
# Find the SVD of M
U, S, VT = sp.svd(M, full_matrices=False)
problem.stats["svd"] = problem.stats["svd"]+1
X = np.dot(U, VT)
fold = f
f = sp.norm(np.dot(problem.A, X) - problem.B, 'fro')**2
problem.stats["fev"] = problem.stats["fev"]+1
# Check for convergence
criticality = ((np.abs(fold - f) < options["tol"])
or (np.abs(f) < options["tol"]))
crit = min(np.abs(fold-f), np.abs(fold))
if options["full_results"]:
problem.stats["total_fun"].append(f)
problem.stats["total_crit"].append(crit)
# Print and loop back
nbiter = nbiter + 1
if options["verbose"] > 0:
print(" {0:>4} {1:>16.4e} {2:>16.4e}".format(nbiter, f, fold-f),
file=fileobj)
# ===================================================== end while
if nbiter >= options["maxiter"]:
msg = _status_message["maxiter"]
exitcode = 1
print('Warning: ' + msg, file=fileobj)
else:
exitcode = 0
msg = _status_message["success"]
problem.stats["nbiter"] = nbiter
# Sometimes, X assumes some imaginary garbage values.
return X.real, f, crit, exitcode, msg
def gbb_solver(problem, largedim, smalldim, X, A, B, options, inner, fileobj,
B1=None, blobopprod=0.0):
"""
Curvilinear search solver for problems of the type
.. math::
\\min \\lVert AXC - B\\rVert_F^2 \\qquad s.t. X^TX = I
References: :cite:`WenYin13`.
"""
# Original implementation: OptStiefelGBB.m, by <NAME>, <NAME>
# (Version 1.0, 2010/10)
# Original documentation:
# min F(X), S.t., X'*X = I_k, where X \in R^{n,k}
#
# H = [G, X]*[X -G]'
# U = 0.5*tau*[G, X]; V = [X -G]
# X(tau) = X - 2*U * inv( I + V'*U ) * V'*X
# -------------------------------------
# U = -[G,X]; V = [X -G]; VU = V'*U;
# X(tau) = X - tau*U * inv( I + 0.5*tau*VU ) * V'*X
# -------------------------------------
# OBS. Since in our case X is always real, we do not consider the complex
# case here.
# OBS 2. The code below has been removed
# parameters for the nonmontone line search by Raydan
# if ~isfield(opts, 'STPEPS')
# opts.STPEPS = 1e-10;
# end
problem.stats["nbiter"] = 0
problem.stats["fev"] = 0
problem.stats["grad"] = 0
problem.stats["linear_solver"] = 0
problem.stats["feasi"] = 0
# "total_fun" and "total_grad" store the criticality info
# for each iteration
if options["full_results"]:
problem.stats["total_fun"] = []
problem.stats["total_grad"] = []
# A(largedim, smalldim)
# B(largedim, q)
# problem.C(p, q)
# X(smalldim, p)
m, n, p, q = problem.sizes # original sizes, not reduced
exitcode = 0
msg = ''
cost = []
tau = options["tau"]
xtol = options["xtol"]
ftol | |
<reponame>mahs4d/tsetmc-webservice<gh_stars>1-10
from datetime import date
from decimal import Decimal
from enum import Enum
from typing import List
import zeep
class Flow(Enum):
GENERAL = 0
BOURSE = 1
FARABOURSE = 2
ATI = 3
PAYE_BOURSE = 4
PAYE_FARABOURSE = 5
class WebserviceClient:
def __init__(self, username: str, password: str):
self._username = username
self._password = password
self._soap_service = zeep.Client(wsdl='http://service.tsetmc.com/webservice/TsePublicV2.asmx?WSDL') \
.bind('TsePublicV2', 'TsePublicV2Soap12')
def client_type(self) -> List[dict]:
"""
ClientType
اطلاعات معاملات به تفکیک حقیقی و حقوقی
"""
result = self._soap_service.ClientType(UserName=self._username, Password=self._password)
ret = []
for val in result['_value_1'][1]['_value_1']:
resco = val['Data']
ret.append(_element_to_dict(resco))
return ret
def option(self) -> List[dict]:
"""
option
اختیار معامله
"""
result = self._soap_service.Option(UserName=self._username, Password=self._password)
ret = []
for val in result['_value_1']['_value_1']:
resco = val['Option']
ret.append(_element_to_dict(resco))
return ret
def nsc_start(self) -> List[dict]:
"""
NSCStart
اطلاعات 20 روز آخری که بازار باز بوده است را ارائه می کند.
"""
result = self._soap_service.NSCStart(UserName=self._username, Password=self._password)
ret = []
for val in result['_value_1']['_value_1']:
resco = val['TseNSCStart']
ret.append(_element_to_dict(resco))
return ret
def inst_affect(self) -> List[dict]:
"""
InstAffect
اطلاعات تاثیر نمادها در شاخص را ارائه مي کند.
"""
result = self._soap_service.InstAffect(UserName=self._username, Password=self._password)
ret = []
for val in result['_value_1']['_value_1']:
resco = val['InstAffectList']
ret.append(_element_to_dict(resco))
return ret
def power_instrument(self) -> List[dict]:
"""
PowerInstrument
فهرست نمادهای فعال بازار برق را ارائه مي کند.
"""
result = self._soap_service.PowerInstrument(UserName=self._username, Password=self._password)
ret = []
for val in result['_value_1']['_value_1']:
resco = val['Instrument']
ret.append(_element_to_dict(resco))
return ret
def msg(self) -> List[dict]:
"""
Msg
پیغامهای ناظر بازار را ارائه مي کند.
"""
result = self._soap_service.Msg(UserName=self._username, Password=self._password)
ret = []
for val in result['_value_1']['_value_1']:
resco = val['TseMsg']
ret.append(_element_to_dict(resco))
return ret
def sub_sector(self) -> List[dict]:
"""
SubSector
اطلاعات زیر گروه هاي صنعت را ارائه مي کند.
"""
result = self._soap_service.SubSector(UserName=self._username, Password=self._password)
ret = []
for val in result['_value_1']['_value_1']:
resco = val['SubSector']
ret.append(_element_to_dict(resco))
return ret
def sector(self) -> List[dict]:
"""
Sector
اطلاعات گروه هاي صنعت را ارائه مي کند.
"""
result = self._soap_service.Sector(UserName=self._username, Password=self._password)
ret = []
for val in result['_value_1']['_value_1']:
resco = val['TseSectorList']
ret.append(_element_to_dict(resco))
return ret
def board(self) -> List[dict]:
"""
Board
ليست تابلوها را ارائه مي کند.
"""
result = self._soap_service.Board(UserName=self._username, Password=self._password)
ret = []
for val in result['_value_1']['_value_1']:
resco = val['TseBoardList']
ret.append(_element_to_dict(resco))
return ret
def market_value(self) -> Decimal:
"""
MarketValue
آخرین ارزش بازار را ارائه مي کند.
"""
result = self._soap_service.MarketValue(UserName=self._username, Password=self._password)
return result
def company(self, flow: Flow) -> List[dict]:
"""
Company
اطلاعات مربوط به شرکتها را ارائه مي کند.
"""
result = self._soap_service.Company(UserName=self._username, Password=self._password, Flow=flow.value)
ret = []
for val in result['_value_1']['_value_1']:
resco = val['TseCompanyName']
ret.append(_element_to_dict(resco))
return ret
def instrument(self, flow: Flow) -> List[dict]:
"""
Instrument
اطلاعات مربوط به نمادها را ارائه مي کند.
"""
result = self._soap_service.Instrument(UserName=self._username, Password=self._password, Flow=flow.value)
ret = []
for val in result['_value_1']['_value_1']:
resco = val['TseInstruments']
ret.append(_element_to_dict(resco))
return ret
def share_change(self, flow: Flow) -> List[dict]:
"""
ShareChange
فهرست افزایش سرمایه ها
"""
result = self._soap_service.ShareChange(UserName=self._username, Password=self._password, Flow=flow.value)
ret = []
for val in result['_value_1']['_value_1']:
resco = val['TseShare']
ret.append(_element_to_dict(resco))
return ret
def adj_price(self, flow: Flow) -> List[dict]:
"""
AdjPrice
اطلاعات تعدیل قیمت نمادها را ارائه مي کند.
"""
result = self._soap_service.AdjPrice(UserName=self._username, Password=self._password, Flow=flow.value)
ret = []
for val in result['_value_1']['_value_1']:
resco = val['TseAdjPrice']
ret.append(_element_to_dict(resco))
return ret
def market_activity_last(self, flow: Flow) -> List[dict]:
"""
MarketActivityLast
اطلاعات آمار معاملات را آخرین روز را ارائه مي کند.
"""
result = self._soap_service.MarketActivityLast(UserName=self._username, Password=self._password,
Flow=flow.value)
ret = []
for val in result['_value_1']['_value_1']:
resco = val['MarketOverview']
ret.append(_element_to_dict(resco))
return ret
def instruments_state(self, flow: Flow) -> List[dict]:
"""
InstrumentsState
اطلاعات وضعیت نمادها را ارائه می کند. در صورتی که نمادی در این متود وجود نداشته باشد به معنی باز بودن نماد است.
"""
result = self._soap_service.InstrumentsState(UserName=self._username, Password=self._password, Flow=flow.value)
ret = []
for val in result['_value_1']['_value_1']:
resco = val['InstrumentsSTates']
ret.append(_element_to_dict(resco))
return ret
def top(self, flow: Flow) -> List[dict]:
"""
TOP
اطلاعات قیمت تئوریک گشایش
"""
result = self._soap_service.TOP(UserName=self._username, Password=self._password, Flow=flow.value)
ret = []
for val in result['_value_1']['_value_1']:
resco = val['TseTop']
ret.append(_element_to_dict(resco))
return ret
def static_thresholds(self, flow: Flow) -> List[dict]:
"""
StaticThresholds
کمینه و بیشینه قیمت مجاز نمادها
"""
result = self._soap_service.StaticThresholds(UserName=self._username, Password=self._password, Flow=flow.value)
ret = []
for val in result['_value_1']['_value_1']:
resco = val['StaticThresholds']
ret.append(_element_to_dict(resco))
return ret
def best_limits_all_ins(self, flow: Flow) -> List[dict]:
"""
BestLimitsAllIns
تقاضاهاي برتر خريد و فروش همه نمادها را ارائه مي کند.
"""
result = self._soap_service.BestLimitsAllIns(UserName=self._username, Password=self._password, Flow=flow.value)
ret = []
for val in result['_value_1']['_value_1']:
resco = val['InstBestLimit']
ret.append(_element_to_dict(resco))
return ret
def inst_with_best_limit(self, flow: Flow) -> List[dict]:
"""
InstWithBestLimit
اطلاعات آمار معاملات را در يک بازه زماني در تابلويي خاص ارائه مي کند.
"""
result = self._soap_service.InstWithBestLimit(UserName=self._username, Password=self._password,
Flow=flow.value)
ret = []
for val in result['_value_1']['_value_1']:
resco = val['InstNames']
ret.append(_element_to_dict(resco))
return ret
def index_b1_last_day_last_data(self, flow: Flow) -> List[dict]:
"""
IndexB1LastDayLastData
اطلاعات آخرين روز شاخص ها را ارائه مي کند.
"""
result = self._soap_service.IndexB1LastDayLastData(UserName=self._username, Password=self._password,
Flow=flow.value)
ret = []
for val in result['_value_1']['_value_1']:
resco = val['IndexB1LastDayLastData']
ret.append(_element_to_dict(resco))
return ret
def trade_last_day(self, flow: Flow) -> List[dict]:
"""
TradeLastDay
اطلاعات معاملات آخرين روز را ارائه مي کند.
"""
result = self._soap_service.TradeLastDay(UserName=self._username, Password=self._password, Flow=flow.value)
ret = []
for val in result['_value_1']['_value_1']:
resco = val['TradeLastDay']
ret.append(_element_to_dict(resco))
return ret
def market_value_by_date(self, date: date) -> Decimal:
"""
MarketValueByDate
ارزش بازار در یک تاریخ مشخص را ارائه مي کند.
"""
result = self._soap_service.MarketValueByDate(UserName=self._username, Password=self._password,
DEven=_date_to_int(date))
return result
def future_information(self, date: date) -> List[dict]:
"""
FutureInformation
اطلاعات بازار آتی را ارائه مي کند.
"""
result = self._soap_service.FutureInformation(UserName=self._username, Password=self._password,
DEven=_date_to_int(date))
ret = []
for val in result['_value_1'][1]['_value_1']:
resco = val['TseAlert']
ret.append(_element_to_dict(resco))
return ret
def sector_state(self, date: date) -> List[dict]:
"""
SectorState
اطلاعات وضعیت گروه هاي صنعت را ارائه مي کند.
"""
result = self._soap_service.SectorState(UserName=self._username, Password=self._password,
DEven=_date_to_int(date))
ret = []
for val in result['_value_1']['_value_1']:
resco = val['SectorState']
ret.append(_element_to_dict(resco))
return ret
def index_b2(self, date: date) -> List[dict]:
"""
IndexB2
اطلاعات سابقه شاخص ها را ارائه مي کند.
"""
result = self._soap_service.IndexB2(UserName=self._username, Password=self._password, DEven=_date_to_int(date))
ret = []
for val in result['_value_1']['_value_1']:
resco = val['TseIndexB2']
ret.append(_element_to_dict(resco))
return ret
def trade_one_day(self, flow: Flow, date: date) -> List[dict]:
"""
TradeOneDay
5اطلاعات معاملات روزانه را ارائه مي کند.
"""
result = self._soap_service.TradeOneDay(UserName=self._username, Password=self._password, Flow=flow.value,
SelDate=_date_to_int(date))
ret = []
for val in result['_value_1']['_value_1']:
resco = val['TradeSelectedDate']
ret.append(_element_to_dict(resco))
return ret
def trade_one_day_all(self, flow: Flow, date: date) -> List[dict]:
"""
TradeOneDayAll
اطلاعات معاملات روزانه را ارائه مي کند.
"""
result = self._soap_service.TradeOneDayAll(UserName=self._username, Password=self._password, Flow=flow.value,
SelDate=_date_to_int(date))
ret = []
for val in result['_value_1']['_value_1']:
resco = val['TradeSelectedDateAll']
ret.append(_element_to_dict(resco))
return ret
def instrument_filter_by_date(self, flow: Flow, date: date) -> List[dict]:
"""
InstrumentFilterByDate
اطلاعات مربوط به نمادهای جدید و یا تغییر یافته از یک تاریخ به بعد را ارائه مي کند.
"""
result = self._soap_service.InstrumentFilterByDate(UserName=self._username, Password=self._password,
DEven=_date_to_int(date),
Flow=flow.value)
ret = []
for val in result['_value_1']['_value_1']:
resco = val['Instrument']
ret.append(_element_to_dict(resco))
return ret
def market_activity_daily(self, from_date: date, to_date: date) -> List[dict]:
"""
MarketActivityDaily
اطلاعات آمار معاملات را در يک بازه زماني ارائه مي کند.
"""
result = self._soap_service.MarketActivityDaily(UserName=self._username, Password=self._password,
DateFrom=_date_to_int(from_date), DateTo=_date_to_int(to_date))
ret = []
for val in result['_value_1']['_value_1']:
resco = val['LastDayTrade']
ret.append(_element_to_dict(resco))
return ret
def best_limit_one_ins(self, instrument_code: int) -> List[dict]:
"""
BestLimitOneIns
5 تقاضاي برتر خريد و فروش يک نماد را ارائه مي کند.
"""
result = self._soap_service.BestLimitOneIns(UserName=self._username, Password=self._password,
InsCode=instrument_code)
ret = []
for val in result['_value_1']['_value_1']:
resco = val['InstBestLimit']
ret.append(_element_to_dict(resco))
return ret
def inst_trade(self, instrument_code: int, from_date: date, to_date: date) -> List[dict]:
"""
InstTrade
اطلاعات آمار معاملات يک نماد را در يک بازه زماني را ارائه مي کند.
"""
result = self._soap_service.InstTrade(UserName=self._username, Password=self._password, Inscode=instrument_code,
DateFrom=_date_to_int(from_date), DateTo=_date_to_int(to_date))
ret = []
for val in result['_value_1']['_value_1']:
resco = val['TradeSelectedDate']
ret.append(_element_to_dict(resco))
return ret
def instruments_state_change(self, instrument_code: int, date: date) -> List[dict]:
"""
InstrumentsStateChange
تغییر وضعیت نماد
"""
result = self._soap_service.InstrumentsStateChange(UserName=self._username, Password=<PASSWORD>,
Inscode=instrument_code, DEven=_date_to_int(date))
ret = []
for val in result['_value_1']['_value_1']:
resco = val['InstrumentsStateChange']
ret.append(_element_to_dict(resco))
return ret
def index_b1_last_day_one_inst(self, index_code: int, flow: Flow) -> List[dict]:
"""
IndexB1LastDayOneInst
اطلاعات آخرين روز شاخص ها را ارائه مي کند.
"""
result = self._soap_service.IndexB1LastDayOneInst(UserName=self._username, Password=self._password,
Flow=flow.value, IdxCode=index_code)
ret = []
for val in result['_value_1']['_value_1']:
resco = val['IndexB1LastDay']
ret.append(_element_to_dict(resco))
return ret
def index_instrument(self, index_code: int, flow: Flow) -> List[dict]:
"""
IndexInstrument
اطلاعات نمادهاي هر شاخص را ارائه مي کند.
"""
result = self._soap_service.IndexInstrument(UserName=self._username, Password=self._password,
Flow=flow.value, IdxCode=index_code)
ret = []
for val in result['_value_1']['_value_1']:
resco | |
# Author: <NAME> <<EMAIL>>
# License: Python Software Foundation License
"""
A Python wrapper to access Amazon Web Service(AWS) E-Commerce Serive APIs,
based upon pyamazon (http://www.josephson.org/projects/pyamazon/), enhanced
to meet the latest AWS specification(http://www.amazon.com/webservices).
This module defines the following classes:
- `Bag`, a generic container for the python objects
- `ListIterator`, a forward iterator adapter
- `PaginatedIterator`, a page-based iterator using lazy evaluation
Exception classes:
- `AWSException`
- `NoLicenseKey`
- `BadLocale`
- `BadOption`
- `ExactParameterRequirement`
- `ExceededMaximumParameterValues`
- `InsufficientParameterValues`
- `InternalError`
- `InvalidEnumeratedParameter`
- `InvalidISO8601Time`
- `InvalidOperationForMarketplace`
- `InvalidOperationParameter`
- `InvalidParameterCombination`
- `InvalidParameterValue`
- `InvalidResponseGroup`
- `InvalidServiceParameter`
- `InvalidSubscriptionId`
- `InvalidXSLTAddress`
- `MaximumParameterRequirement`
- `MinimumParameterRequirement`
- `MissingOperationParameter`
- `MissingParameterCombination`
- `MissingParameters`
- `MissingParameterValueCombination`
- `MissingServiceParameter`
- `ParameterOutOfRange`
- `ParameterRepeatedInRequest`
- `RestrictedParameterValueCombination`
- `XSLTTransformationError`
Functions:
- `setLocale`
- `getLocale`
- `setLicenseKey`
- `getLicenseKey`
- `getVersion`
- `setOptions`
- `getOptions`
- `buildRequest`
- `buildException`
- `query`
- `SimpleObject`
- `Collection`
- `Pagination`
- `unmarshal`
- `ItemLookup`
- `XMLItemLookup`
- `ItemSearch`
- `XMLItemSearch`
- `SimilarityLookup`
- `XMLSimilarityLookup`
- `ListLookup`
- `XMLListLookup`
- `ListSearch`
- `XMLListSearch`
- `CartCreate`
- `XMLCartCreate`
- `CartAdd`
- `XMLCartAdd`
- `CartGet`
- `XMLCartGet`
- `CartModify`
- `XMLCartModify`
- `CartClear`
- `XMLCartClear`
- `SellerLookup`
- `XMLSellerLookup`
- `SellerListingLookup`
- `XMLSellerListingLookup`
- `SellerListingSearch`
- `XMLSellerListingSearch`
- `CustomerContentSearch`
- `XMLCustomerContentSearch`
- `CustomerContentLookup`
- `XMLCustomerContentLookup`
- `BrowseNodeLookup`
- `XMLBrowseNodeLookup`
- `Help`
- `XMLHelp`
- `TransactionLookup`
- `XMLTransactionLookup`
Accroding to the ECS specification, there are two implementation foo and XMLfoo, for example, `ItemLookup` and `XMLItemLookup`. foo returns a Python object, XMLfoo returns the raw XML file.
How To Use This Module
======================
(See the individual classes, methods, and attributes for details.)
1. Apply for a Amazon Web Service API key from Amazon Web Service:
https://aws-portal.amazon.com/gp/aws/developer/registration/index.html
2. Import it: ``import pyaws.ecs``
3. Setup the license key: ``ecs.setLicenseKey('YOUR-KEY-FROM-AWS')``
or you could use the environment variable AMAZON_Meta.license_key
Optional:
a) setup other options, like AssociateTag, MerchantID, Validate
b) export the http_proxy environment variable if you want to use proxy
c) setup the locale if your locale is not ``us``
4. Send query to the AWS, and manupilate the returned python object.
"""
__author__ = "<NAME> <<EMAIL>>"
__version__ = "0.3.0"
__license__ = "Python Software Foundation"
__docformat__ = 'restructuredtext'
import os, urllib, string
from xml.dom import minidom
import hmac
import hashlib
import base64
from time import strftime
class Meta:
license_key = None
secret_key = None
locale = "us"
version = "2007-04-04"
options = {}
locales = {
None : "ecs.amazonaws.com",
"us" : "ecs.amazonaws.com",
"uk" : "ecs.amazonaws.co.uk",
"de" : "ecs.amazonaws.co.de",
"jp" : "ecs.amazonaws.co.jp",
"fr" : "ecs.amazonaws.co.fr",
"ca" : "ecs.amazonaws.co.ca",
}
def __buildPlugins():
"""
Build plugins used in unmarshal
Return the dict like:
Operation => { 'isByPassed'=>(...), 'isPivoted'=>(...),
'isCollective'=>(...), 'isCollected'=>(...),
isPaged=> { key1: (...), key2: (...), ... }
"""
"""
ResponseGroups heirachy:
Parent => children,
The benefit of this layer is to reduce the redundency, when
the child ResponseGroup change, it propaged to the parent
automatically
"""
rgh = {
'CustomerFull': ('CustomerInfo', 'CustomerLists', 'CustomerReviews'),
'Large': ('Accessories', 'BrowseNodes', 'ListmaniaLists', 'Medium', 'Offers', 'Reviews', 'Similarities', 'Tracks'),
'ListFull': ('ListInfo', 'ListItems'),
'ListInfo': ('ListMinimum', ),
'ListItems': ('ListMinimum', ),
'Medium': ('EditorialReview', 'Images', 'ItemAttributes', 'OfferSummary', 'Request', 'SalesRank', 'Small'),
'OfferFull': ('Offers',),
'Offers': ('OfferSummary',),
'Variations': ('VariationMinimum', 'VariationSummary')
}
"""
ResponseGroup and corresponding plugins:
ResponseGroup=>(isBypassed, isPivoted, isCollective, isCollected, isPaged)
isPaged is defined as:
{ kwItems : (kwPage, kwTotalResults, pageSize) }
- kwItems: string, the tagname of collection
- kwPage: string, the tagname of page
- kwTotalResults: string, the tagname of length
- pageSize: constant integer, the size of each page
CODE DEBT:
- Do we need to remove the ResponseGroup in rgh.keys()? At least, Medium does not
introduce any new attributes.
"""
rgps = {
'Accessories': ((), (), ('Accessories',), ('Accessory',), {}),
'AlternateVersions': ((), (), (), (), {}),
'BrowseNodeInfo': ((), (), ('Children', 'Ancestors'), ('BrowseNode',), {}),
'BrowseNodes': ((), (), ('Children', 'Ancestors', 'BrowseNodes'), ('BrowseNode',), {}),
'Cart': ((), (), (), (), {}),
'CartNewReleases': ((), (), (), (), {}),
'CartTopSellers': ((), (), (), (), {}),
'CartSimilarities': ((), (), (), (), {}),
'Collections': ((), (), (), (), {}),
'CustomerFull': ((), (), (), (), {}),
'CustomerInfo': ((), (), ('Customers',), ('Customer',), {}),
'CustomerLists': ((), (), ('Customers',), ('Customer',), {}),
'CustomerReviews': ((), (), ('Customers', 'CustomerReviews',),('Customer', 'Review'),{}),
'EditorialReview': ((), (), ('EditorialReviews',), ('EditorialReview',), {}),
'Help': ((), (), ('RequiredParameters', 'AvailableParameters',
'DefaultResponseGroups', 'AvailableResponseGroups'),
('Parameter', 'ResponseGroup'), {}),
'Images': ((), (), ('ImageSets',), ('ImageSet',), {}),
'ItemAttributes': ((), ('ItemAttributes',), (), (), {}),
'ItemIds': ((), (), (), (), {}),
'ItemLookup.Small': ((), ('ItemAttributes',), (), ('Item',),
{'Items': ('OfferPage', 'OfferPages', 10) }),
'ItemSearch.Small': ((), ('ItemAttributes',), (), ('Item',),
{'Items': ('ItemPage', 'TotalPages', 10) }),
'Large': ((), (), (), (), {}),
'ListFull': ((), (), (), ('ListItem', ), {}),
'ListInfo': ((), (), (), (), {}),
'ListItems': ((), (), ('Lists',), ('ListItem', 'List'), {'List': ('ProductPage',
'TotalPages', 10)}),
'ListmaniaLists': ((), (), ('ListmaniaLists', ), ('ListmaniaList',), {}),
'ListMinimum': ((), (), (), (), {}),
'Medium': ((), (), (), (), {}),
'MerchantItemAttributes': ((), (), (), (), {}),
'NewReleases': ((), (), ('NewReleases',), ('NewRelease',), {}),
'OfferFull': ((), (), (), (), {}),
'OfferListings': ((), (), (), (), {}),
'Offers': ((), (), (), ('Offer',), {'Offers': ('OfferPage', 'TotalOfferPages', 10)}),
'OfferSummary': ((), (), (), (), {}),
'Request': (('Request',), (), (), (), {}),
'Reviews': ((), (), ('CustomerReviews', ),('Review',), {}),
'SalesRank': ((), (), (), (), {}),
'SearchBins': ((), (), ('SearchBinSets',), ('SearchBinSet',), {}),
'SimilarityLookup.Small': ((), ('ItemAttributes',), ('Items',), ('Item',), {}),
'Seller': ((), (), (), (), {}),
'SellerListing': ((), (), (), (), {}),
'Similarities': ((), (), ('SimilarProducts',), ('SimilarProduct',), {}),
'Small': ((), (), (), (), {}),
'Subjects': ((), (), ('Subjects',), ('Subject',), {}),
'TopSellers': ((), (), ('TopSellers',), ('TopSeller',), {}),
'Tracks': ((), ('Tracks',), (), (), {}),
'TransactionDetails': ((), (), ('Transactions', 'TransactionItems', 'Shipments'),
('Transaction', 'TransactionItem', 'Shipment'), {}),
'Variations': ((), (), (), (), {}),
'VariationMinimum': ((), (), ('Variations',), ('Variation',), {}),
'VariationImages': ((), (), (), (), {}),
'VariationSummary':((), (), (), (), {})
}
"""
Operation=>ResponseGroups
"""
orgs = {
'BrowseNodeLookup': ('Request', 'BrowseNodeInfo', 'NewReleases', 'TopSellers'),
'CartAdd': ('Cart', 'Request', 'CartSimilarities', 'CartTopSellers', 'NewReleases'),
'CartClear': ('Cart', 'Request'),
'CartCreate': ('Cart', 'Request', 'CartSimilarities', 'CartTopSellers', 'CartNewReleases'),
'CartGet': ('Cart', 'Request', 'CartSimilarities', 'CartTopSellers', 'CartNewReleases'),
'CartModify': ('Cart', 'Request', 'CartSimilarities', 'CartTopSellers', 'CartNewReleases'),
'CustomerContentLookup': ('Request', 'CustomerInfo', 'CustomerReviews', 'CustomerLists', 'CustomerFull'),
'CustomerContentSearch': ('Request', 'CustomerInfo'),
'Help': ('Request', 'Help'),
'ItemLookup': ('Request', 'ItemLookup.Small', 'Accessories', 'BrowseNodes', 'EditorialReview', 'Images', 'ItemAttributes', 'ItemIds', 'Large', 'ListmaniaLists', 'Medium', 'MerchantItemAttributes', 'OfferFull', 'Offers', 'OfferSummary', 'Reviews', 'SalesRank', 'Similarities', 'Subjects', 'Tracks', 'VariationImages', 'VariationMinimum', 'Variations', 'VariationSummary'),
'ItemSearch': ('Request', 'ItemSearch.Small', 'Accessories', 'BrowseNodes', 'EditorialReview', 'ItemAttributes', 'ItemIds', 'Large', 'ListmaniaLists', 'Medium', 'MerchantItemAttributes', 'OfferFull', 'Offers', 'OfferSummary', 'Reviews', 'SalesRank', 'SearchBins', 'Similarities', 'Subjects', 'Tracks', 'VariationMinimum', 'Variations', 'VariationSummary'),
'ListLookup': ('Request', 'ListInfo', 'Accessories', 'BrowseNodes', 'EditorialReview', 'Images', 'ItemAttributes', 'ItemIds', 'Large', 'ListFull', 'ListItems', 'ListmaniaLists', 'Medium', 'Offers', 'OfferSummary', 'Reviews', 'SalesRank', 'Similarities', 'Subjects', 'Tracks', 'VariationMinimum', 'Variations', 'VariationSummary'),
'ListSearch': ('Request', 'ListInfo', 'ListMinimum'),
'SellerListingLookup': ('Request', 'SellerListing'),
'SellerListingSearch': ('Request', 'SellerListing'),
'SellerLookup': ('Request', 'Seller'),
'SimilarityLookup': ('Request', 'SimilarityLookup.Small', 'Accessories', 'BrowseNodes', 'EditorialReview', 'Images', 'ItemAttributes', 'ItemIds', 'Large', 'ListmaniaLists', 'Medium', 'Offers', 'OfferSummary', 'Reviews', 'SalesRank', 'Similarities', 'Tracks', 'VariationMinimum', 'Variations', 'VariationSummary'),
'TransactionLookup':('Request', 'TransactionDetails')
}
def collapse(responseGroups):
l = []
for x in responseGroups:
l.append(x)
if x in rgh.keys():
l.extend( collapse(rgh[x]) )
return l
def mergePlugins(responseGroups, index):
#return reduce(lambda x, y: x.update(set(rgps[y][index])), responseGroups, set())
# this magic reduce does not work, using the primary implementation first.
# CODEDEBT: magic number !
if index == 4:
s = dict()
else:
s = set()
map(lambda x: s.update(rgps[x][index]), responseGroups)
return s
def unionPlugins(responseGroups):
return dict( [ (key, mergePlugins(collapse(responseGroups), index)) for index, key in enumerate(['isBypassed', 'isPivoted', 'isCollective', 'isCollected', 'isPaged']) ])
return dict( [ (k, unionPlugins(v)) for k, v in orgs.items() ] )
__plugins = __buildPlugins()
# Basic class for ECS
class Bag :
"""A generic container for the python objects"""
def __repr__(self):
return '<Bag instance: ' + self.__dict__.__repr__() + '>'
class ListIterator(list):
pass
class PaginatedIterator(ListIterator):
def __init__(self, XMLSearch, arguments, keywords, element, plugins):
"""
Initialize a `PaginatedIterator` object.
Parameters:
- `XMLSearch`: a function, the query to get the DOM
- `arguments`: a dictionary, `XMLSearch`'s arguments
- `keywords`: a tuple, (kwItems, (kwPage, kwTotalPages, pageSize) )
- `element`: a DOM element, the root of the collection
- `plugins`: a dictionary, collection of plugged objects
"""
kwItems, (kwPage, kwTotalPages, pageSize) = keywords
self.search = XMLSearch
self.arguments = arguments
self.plugins = plugins
self.keywords ={'Items':kwItems, 'Page':kwPage}
self.total_page = int(element.getElementsByTagName(kwTotalPages).item(0).firstChild.data)
self.page = 1
self.cache = unmarshal(XMLSearch, arguments, element, plugins, ListIterator())
def __iter__(self):
while True:
for x in self.cache:
yield x
self.page += 1
if self.page > self.total_page:
raise StopIteration
self.arguments[self.keywords['Page']] = self.page
dom = self.search(** self.arguments)
self.cache = unmarshal(self.search, self.arguments, dom.getElementsByTagName(self.keywords['Items']).item(0), self.plugins, ListIterator())
def SimpleObject(XMLSearch, arguments, kwItem, plugins=None):
"""Return simple object from `unmarshal`"""
dom = XMLSearch(** arguments)
return unmarshal(XMLSearch, arguments, dom.getElementsByTagName(kwItem).item(0), plugins)
def Collection(XMLSearch, arguments, kwItems, plugins=None):
"""Return collection of objects from `unmarshal` using ListIterator interface."""
dom = XMLSearch(** arguments)
return unmarshal(XMLSearch, arguments, dom.getElementsByTagName(kwItems).item(0), plugins, ListIterator())
def Pagination(XMLSearch, arguments, keywords, plugins):
return PaginatedIterator(XMLSearch, arguments, keywords,
XMLSearch(** arguments).getElementsByTagName(keywords[0]).item(0),
plugins)
# Exception classes
class AWSException(Exception) : pass
class NoLicenseKey(AWSException) : pass
class BadLocale(AWSException) : pass
class BadOption(AWSException): pass
# Runtime exception
class ExactParameterRequirement(AWSException): pass
class ExceededMaximumParameterValues(AWSException): | |
<filename>agent0/common/atari_wrappers.py
import copy
from collections import deque, defaultdict
import cv2
import gym
import numpy as np
from gym import spaces
from lz4.block import compress
cv2.ocl.setUseOpenCL(False)
class ClipActionsWrapper(gym.Wrapper):
def step(self, action: int):
action = np.nan_to_num(action)
action = np.clip(action, self.action_space.low, self.action_space.high)
return self.env.step(action)
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30, noop_num=None):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = noop_num
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) # pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if self.lives > lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, force_reset=False, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done or force_reset:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
# noinspection PyArgumentList
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,) + env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
info = {}
done = None
for i in range(self._skip):
obs_, reward, done, info = self.env.step(action)
if i == self._skip - 2:
self._obs_buffer[0] = obs_
if i == self._skip - 1:
self._obs_buffer[1] = obs_
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class NormReward(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
return np.log1p(reward) if reward >= 0 else -np.log1p(-reward)
class GaussianReward(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
return np.random.normal(float(reward), 0.25)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):
"""
Warp frames to 84x84 as done in the Nature paper and later work.
If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which
observation should be warped.
"""
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8,
)
if self._key is None:
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._key is None:
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self._width, self._height), interpolation=cv2.INTER_AREA
)
if self._grayscale:
frame = np.expand_dims(frame, -1)
if self._key is None:
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
self.frames_ = self.frames.copy()
shp = env.observation_space.shape
self.observation_space = gym.spaces.Box(low=0, high=255, shape=((shp[0] * k,) + shp[1:]),
dtype=env.observation_space.dtype)
def reset(self, **kwargs):
ob = self.env.reset(**kwargs)
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return np.concatenate(self.frames, axis=0)
def restore(self, state):
self.ale.restoreSystemState(state)
self.frames = self.frames_.copy()
return np.concatenate(self.frames_, axis=0)
def clone(self):
self.frames_ = self.frames.copy()
return self.ale.cloneSystemState()
class NStepEnv(gym.Wrapper):
def __init__(self, env, n, discount):
gym.Wrapper.__init__(self, env)
self.n = n
self.discount = discount
self.tracker = deque(maxlen=n)
self.last_obs = None
def reset(self, **kwargs):
ob = self.env.reset(**kwargs)
self.last_obs = ob
return ob
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.tracker.append((self.last_obs, action, reward, done, info))
self.last_obs = ob
r_discounted = 0
done_discounted = False
bad_transit = False
for _, _, r, d, inf in reversed(self.tracker):
r_discounted = r_discounted * self.discount * (1 - d) + r
if d:
done_discounted = True
if 'counter' in inf:
bad_transit = True
info.update(
prev_obs=self.tracker[0][0],
prev_action=self.tracker[0][1],
prev_reward=r_discounted,
prev_done=done_discounted,
prev_bad_transit=bad_transit,
)
return ob, reward, done, info
class StateCountEnv(gym.Wrapper):
def __init__(self, env):
gym.Wrapper.__init__(self, env)
self.ep_counter = defaultdict(int)
self.ep_len = 0
self.obs_ = None
self.max_count = 10
def reset(self, **kwargs):
if len(self.ep_counter) > 0 and max(self.ep_counter.values()) > self.max_count:
kwargs.update(force_reset=True)
ob = self.env.reset(**kwargs)
self.ep_counter.clear()
self.obs_ = ob
self.ep_len = 0
return ob
def step(self, action):
ob, reward, done, info = self.env.step(action)
key = hash((self.obs_.tobytes(), action))
self.ep_counter[key] += 1
self.ep_len += 1
if self.ep_len > 100 and self.ep_counter[key] > self.max_count:
info.update(counter=(self.max_count, self.ep_len))
done = True
self.obs_ = ob
return ob, reward, done, info
class RewardStatEnv(gym.Wrapper):
def __init__(self, env):
gym.Wrapper.__init__(self, env)
self.steps = 0
self.real_reward = 0
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.steps += 1
self.real_reward += reward
if done:
if self.was_real_done or 'counter' in info:
info.update(real_reward=self.real_reward, steps=self.steps, real_done=self.was_real_done)
self.steps = 0
self.real_reward = 0
return ob, reward, done, info
class EpRecordEnv(gym.Wrapper):
def __init__(self, env):
gym.Wrapper.__init__(self, env)
self.cur_ep = []
self.best_return = float('-inf')
def reset(self, **kwargs):
ob = self.env.reset(**kwargs)
self.ob = ob
return ob
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.cur_ep.append((compress(self.ob), action))
if 'real_reward' in info and info['real_reward'] > self.best_return:
self.best_return = info['real_reward']
info.update(best_ep=copy.deepcopy(self.cur_ep))
self.cur_ep = []
return ob, reward, done, info
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class TransposeImage(gym.ObservationWrapper):
def __init__(self, env=None):
super(TransposeImage, self).__init__(env)
obs_shape = self.observation_space.shape
self.observation_space = spaces.Box(
self.observation_space.low[0, 0, 0],
self.observation_space.high[0, 0, 0],
[obs_shape[2], obs_shape[1], obs_shape[0]],
dtype=self.observation_space.dtype)
def observation(self, observation):
return observation.transpose(2, 0, 1)
def _process_frame42(frame):
frame = frame[34:34 + 160, :160]
# Resize by half, then down to 42x42 (essentially mipmapping). If
# we resize directly we lose pixels that, when mapped to 42x42,
# aren't close enough to the pixel boundary.
frame = cv2.resize(frame, (80, 80))
frame = cv2.resize(frame, (42, 42))
frame = frame.mean(2, keepdims=True)
frame = frame.astype(np.float32)
frame *= (1.0 / 255.0)
frame = np.moveaxis(frame, -1, 0)
return frame
class AtariRescale42x42(gym.ObservationWrapper):
def __init__(self, env=None):
super(AtariRescale42x42, self).__init__(env)
self.observation_space = spaces.Box(0.0, 1.0, [1, 42, 42])
def observation(self, observation):
return _process_frame42(observation)
class NormalizedEnv(gym.ObservationWrapper):
def __init__(self, env=None):
super(NormalizedEnv, self).__init__(env)
self.state_mean = 0
self.state_std = 0
self.alpha = 0.9999
self.num_steps = 0
def observation(self, observation):
self.num_steps += 1
self.state_mean = self.state_mean * self.alpha + observation.mean() * (1 - self.alpha)
self.state_std = self.state_std * self.alpha + observation.std() * (1 - self.alpha)
unbiased_mean = self.state_mean / (1 - pow(self.alpha, self.num_steps))
unbiased_std = self.state_std / (1 | |
<gh_stars>10-100
import json
import os
import time
from csep.core import forecasts
from csep.core import catalogs
from csep.core import poisson_evaluations
from csep.core import catalog_evaluations
from csep.core import regions
from csep.core.repositories import (
load_json,
write_json
)
from csep.core.exceptions import CSEPCatalogException
from csep.utils import datasets
from csep.utils import readers
from csep.core.forecasts import GriddedForecast, CatalogForecast
from csep.models import (
EvaluationResult,
CatalogNumberTestResult,
CatalogSpatialTestResult,
CatalogMagnitudeTestResult,
CatalogPseudolikelihoodTestResult,
CalibrationTestResult
)
from csep.utils.time_utils import (
utc_now_datetime,
strptime_to_utc_datetime,
datetime_to_utc_epoch,
epoch_time_to_utc_datetime,
utc_now_epoch
)
# this defines what is imported on a `from csep import *`
__all__ = [
'load_json',
'write_json',
'catalogs',
'datasets',
'regions',
'poisson_evaluations',
'catalog_evaluations',
'forecasts',
'load_stochastic_event_sets',
'load_catalog',
'query_comcat',
'load_evaluation_result',
'load_gridded_forecast',
'load_catalog_forecast',
'utc_now_datetime',
'strptime_to_utc_datetime',
'datetime_to_utc_epoch',
'epoch_time_to_utc_datetime',
'utc_now_epoch'
]
def load_stochastic_event_sets(filename, type='csv', format='native', **kwargs):
""" General function to load stochastic event sets
This function returns a generator to iterate through a collection of catalogs.
To load a forecast and include metadata use :func:`csep.load_catalog_forecast`.
Args:
filename (str): name of file or directory where stochastic event sets live.
type (str): either 'ucerf3' or 'csep' depending on the type of observed_catalog to load
format (str): ('csep' or 'native') if native catalogs are not converted to csep format.
kwargs (dict): see the documentation of that class corresponding to the type you selected
for the kwargs options
Returns:
(generator): :class:`~csep.core.catalogs.AbstractBaseCatalog`
"""
if type not in ('ucerf3', 'csv'):
raise ValueError("type must be one of the following: (ucerf3)")
# use mapping to dispatch to correct function
# in general, stochastic event sets are loaded with classmethods and single catalogs use the
# constructor
mapping = {'ucerf3': catalogs.UCERF3Catalog.load_catalogs,
'csv': catalogs.CSEPCatalog.load_ascii_catalogs}
# dispatch to proper loading function
result = mapping[type](filename, **kwargs)
# factory function to load catalogs from different classes
while True:
try:
catalog = next(result)
except StopIteration:
return
except Exception:
raise
if format == 'native':
yield catalog
elif format == 'csep':
yield catalog.get_csep_format()
else:
raise ValueError('format must be either "native" or "csep!')
def load_catalog(filename, type='csep-csv', format='native', loader=None, apply_filters=False, **kwargs):
""" General function to load single catalog
See corresponding class documentation for additional parameters.
Args:
type (str): ('ucerf3', 'csep-csv', 'zmap', 'jma-csv', 'ndk') default is 'csep-csv'
format (str): ('native', 'csep') determines whether the catalog should be converted into the csep
formatted catalog or kept as native.
apply_filters (bool): if true, will apply filters and spatial filter to catalog. time-varying magnitude completeness
will still need to be applied. filters kwarg should be included. see catalog
documentation for more details.
Returns (:class:`~csep.core.catalogs.AbstractBaseCatalog`)
"""
if type not in ('ucerf3', 'csep-csv', 'zmap', 'jma-csv', 'ingv_horus', 'ingv_emrcmt', 'ndk') and loader is None:
raise ValueError("type must be one of the following: ('ucerf3', 'csep-csv', 'zmap', 'jma-csv', 'ndk', 'ingv_horus', 'ingv_emrcmt').")
# map to correct catalog class, at some point these could be abstracted into configuration file
# this maps a human readable string to the correct catalog class and the correct loader function
class_loader_mapping = {
'ucerf3': {
'class': catalogs.UCERF3Catalog,
'loader': None
},
'csep-csv': {
'class': catalogs.CSEPCatalog,
'loader': readers.csep_ascii
},
'zmap': {
'class': catalogs.CSEPCatalog,
'loader': readers.zmap_ascii
},
'jma-csv': {
'class': catalogs.CSEPCatalog,
'loader': readers.jma_csv,
},
'ndk': {
'class': catalogs.CSEPCatalog,
'loader': readers.ndk
},
'ingv_horus': {
'class': catalogs.CSEPCatalog,
'loader': readers.ingv_horus
},
'ingv_emrcmt': {
'class': catalogs.CSEPCatalog,
'loader': readers.ingv_emrcmt
}
}
# treat json files using the from_dict() member instead of constructor
catalog_class = class_loader_mapping[type]['class']
if os.path.splitext(filename)[-1][1:] == 'json':
catalog = catalog_class.load_json(filename, **kwargs)
else:
if loader is None:
loader = class_loader_mapping[type]['loader']
catalog = catalog_class.load_catalog(filename=filename, loader=loader, **kwargs)
# convert to csep format if needed
if format == 'native':
return_val = catalog
elif format == 'csep':
return_val = catalog.get_csep_format()
else:
raise ValueError('format must be either "native" or "csep"')
if apply_filters:
try:
return_val = return_val.filter().filter_spatial()
except CSEPCatalogException:
return_val = return_val.filter()
return return_val
def query_comcat(start_time, end_time, min_magnitude=2.50,
min_latitude=31.50, max_latitude=43.00,
min_longitude=-125.40, max_longitude=-113.10, verbose=True,
apply_filters=False, **kwargs):
"""
Access Comcat catalog through web service
Args:
start_time: datetime object of start of catalog
end_time: datetime object for end of catalog
min_magnitude: minimum magnitude to query
min_latitude: maximum magnitude to query
max_latitude: max latitude of bounding box
min_longitude: min latitude of bounding box
max_longitude: max longitude of bounding box
region: :class:`csep.core.regions.CartesianGrid2D
verbose (bool): print catalog summary statistics
Returns:
:class:`csep.core.catalogs.ComcatCatalog
"""
# Timezone should be in UTC
t0 = time.time()
eventlist = readers._query_comcat(start_time=start_time, end_time=end_time,
min_magnitude=min_magnitude,
min_latitude=min_latitude, max_latitude=max_latitude,
min_longitude=min_longitude, max_longitude=max_longitude)
t1 = time.time()
comcat = catalogs.CSEPCatalog(data=eventlist, date_accessed=utc_now_datetime(), **kwargs)
print("Fetched ComCat catalog in {} seconds.\n".format(t1 - t0))
if apply_filters:
try:
comcat = comcat.filter().filter_spatial()
except CSEPCatalogException:
comcat = comcat.filter()
if verbose:
print("Downloaded catalog from ComCat with following parameters")
print("Start Date: {}\nEnd Date: {}".format(str(comcat.start_time), str(comcat.end_time)))
print("Min Latitude: {} and Max Latitude: {}".format(comcat.min_latitude, comcat.max_latitude))
print("Min Longitude: {} and Max Longitude: {}".format(comcat.min_longitude, comcat.max_longitude))
print("Min Magnitude: {}".format(comcat.min_magnitude))
print(f"Found {comcat.event_count} events in the ComCat catalog.")
return comcat
def load_evaluation_result(fname):
""" Load evaluation result stored as json file
Returns:
:class:`csep.core.evaluations.EvaluationResult`
"""
# tries to return the correct class for the evaluation result. if it cannot find the type simply returns the basic result.
evaluation_result_factory = {
'default': EvaluationResult,
'EvaluationResult': EvaluationResult,
'CatalogNumberTestResult': CatalogNumberTestResult,
'CatalogSpatialTestResult': CatalogSpatialTestResult,
'CatalogMagnitudeTestResult': CatalogMagnitudeTestResult,
'CatalogPseudoLikelihoodTestResult': CatalogPseudolikelihoodTestResult,
'CalibrationTestResult': CalibrationTestResult
}
with open(fname, 'r') as json_file:
json_dict = json.load(json_file)
try:
evaluation_type = json_dict['type']
except:
evaluation_type = 'default'
eval_result = evaluation_result_factory[evaluation_type].from_dict(json_dict)
return eval_result
def load_gridded_forecast(fname, loader=None, **kwargs):
""" Loads grid based forecast from hard-disk.
The function loads the forecast provided with at the filepath defined by fname. The function attempts to understand
the file format based on the extension of the filepath. Optionally, if loader function is provided, that function
will be used to load the forecast. The loader function should return a :class:`csep.core.forecasts.GriddedForecast`
class with the region and magnitude members correctly assigned.
File extensions:
.dat -> CSEP ascii format
.xml -> CSEP xml format (not yet implemented)
.h5 -> CSEP hdf5 format (not yet implemented)
.bin -> CSEP binary format (not yet implemented)
Args:
fname (str): path of grid based forecast
loader (func): function to load forecast in bespoke format needs to return :class:`csep.core.forecasts.GriddedForecast`
and first argument should be required and the filename of the forecast to load
called as loader(func, **kwargs).
**kwargs: passed into loader function
Throws:
FileNotFoundError: when the file extension is not known and a loader is not provided.
AttributeError: if loader is provided and is not callable.
Returns:
:class:`csep.core.forecasts.GriddedForecast`
"""
# mapping from file extension to loader function, new formats by default they need to be added here
forecast_loader_mapping = {
'dat': GriddedForecast.load_ascii,
'xml': None,
'h5': None,
'bin': None
}
# sanity checks
if not os.path.exists(fname):
raise FileNotFoundError(f"Could not locate file {fname}. Unable to load forecast.")
# sanity checks
if loader is not None and not callable(loader):
raise AttributeError("Loader must be callable. Unable to load forecast.")
extension = os.path.splitext(fname)[-1][1:]
if extension not in forecast_loader_mapping.keys() and loader is None:
raise AttributeError("File extension should be in ('dat','xml','h5','bin') if loader not provided.")
if extension in ('xml','h5','bin'):
raise NotImplementedError
# assign default loader
if loader is None:
loader = forecast_loader_mapping[extension]
forecast = loader(fname, **kwargs)
# final sanity check
if not isinstance(forecast, GriddedForecast):
raise ValueError("Forecast not instance of GriddedForecast")
return forecast
def load_catalog_forecast(fname, catalog_loader=None, format='native', type='ascii', **kwargs):
""" General function to handle loading catalog forecasts.
Currently, just a simple wrapper, but can contain more complex logic in the future.
Args:
fname (str): pathname to the forecast file or directory containing the forecast files
catalog_loader (func): callable that can load catalogs, see load_stochastic_event_sets above.
format (str): either 'native' or 'csep'. if 'csep', will attempt to be returned into csep catalog format. used to convert between
observed_catalog type.
type (str): either 'ucerf3' or 'csep', determines the catalog format of the forecast. if loader is provided, then
this parameter is ignored.
**kwargs: other keyword arguments passed to the :class:`csep.core.forecasts.CatalogForecast`.
Returns:
:class:`csep.core.forecasts.CatalogForecast`
"""
# sanity checks
if not os.path.exists(fname):
raise FileNotFoundError(f"Could not locate file {fname}. Unable to load forecast.")
# sanity checks
if catalog_loader is not None and not callable(catalog_loader):
raise AttributeError("Loader must be callable. Unable to load forecast.")
# factory methods for loading different types of catalogs
catalog_loader_mapping = {
'ascii': catalogs.CSEPCatalog.load_ascii_catalogs,
'ucerf3': catalogs.UCERF3Catalog.load_catalogs
}
if catalog_loader is None:
catalog_loader = catalog_loader_mapping[type]
# try and parse information from filename and send to forecast constructor
if format == 'native' and type=='ascii':
try:
basename = str(os.path.basename(fname.rstrip('/')).split('.')[0])
split_fname = | |
from collections import namedtuple
import operator
import typing
import warnings
import numpy as np
import audeer
from audmetric.core.utils import (
assert_equal_length,
infer_labels,
scores_per_subgroup_and_class,
)
def accuracy(
truth: typing.Sequence[typing.Any],
prediction: typing.Sequence[typing.Any],
labels: typing.Sequence[typing.Union[str, int]] = None
) -> float:
r"""Classification accuracy.
.. math::
\text{accuracy} = \frac{\text{number of correct predictions}}
{\text{number of incorrect predictions}}
Args:
truth: ground truth values/classes
prediction: predicted values/classes
labels: included labels in preferred ordering.
Sample is considered in computation if either prediction or
ground truth (logical OR) is contained in labels.
If no labels are supplied,
they will inferred from :math:`\{\text{prediction}, \text{truth}\}`
and ordered alphabetically.
Returns:
accuracy of prediction :math:`\in [0, 1]`
Raises:
ValueError: if ``truth`` and ``prediction`` differ in length
Example:
>>> accuracy([0, 0], [0, 1])
0.5
"""
assert_equal_length(truth, prediction)
if labels is None:
labels = infer_labels(truth, prediction)
prediction = np.array(prediction)
truth = np.array(truth)
# keep where both prediction and truth contained in `labels`
label_mask = np.nonzero(
np.logical_or(
np.isin(truth, labels),
np.isin(prediction, labels)
)
)
truth = truth[label_mask]
prediction = prediction[label_mask]
if len(prediction) == 0:
return np.nan
else:
return float(sum(prediction == truth) / len(prediction))
def concordance_cc(
truth: typing.Sequence[float],
prediction: typing.Sequence[float],
) -> float:
r"""Concordance correlation coefficient.
.. math::
\rho_c = \frac{2\rho\sigma_\text{prediction}\sigma_\text{truth}}
{\sigma_\text{prediction}^2 + \sigma_\text{truth}^2 + (
\mu_\text{prediction}-\mu_\text{truth})^2}
where :math:`\rho` is the Pearson correlation coefficient,
:math:`\mu` the mean and :math:`\sigma^2` the variance.
Args:
truth: ground truth values
prediction: predicted values
Returns:
concordance correlation coefficient :math:`\in [-1, 1]`
Raises:
ValueError: if ``truth`` and ``prediction`` differ in length
Example:
>>> concordance_cc([0, 1, 2], [0, 1, 1])
0.6666666666666666
"""
assert_equal_length(truth, prediction)
prediction = np.array(prediction)
truth = np.array(truth)
if len(prediction) < 2:
return np.NaN
r = pearson_cc(prediction, truth)
x_mean = prediction.mean()
y_mean = truth.mean()
x_std = prediction.std()
y_std = truth.std()
denominator = (
x_std * x_std
+ y_std * y_std
+ (x_mean - y_mean) * (x_mean - y_mean)
)
if denominator == 0:
ccc = np.nan
else:
ccc = 2 * r * x_std * y_std / denominator
return float(ccc)
def confusion_matrix(
truth: typing.Sequence[typing.Any],
prediction: typing.Sequence[typing.Any],
labels: typing.Sequence[typing.Any] = None,
*,
normalize: bool = False,
) -> typing.List[typing.List[typing.Union[int, float]]]:
r"""Confusion matrix.
Args:
truth: ground truth values/classes
prediction: predicted values/classes
labels: included labels in preferred ordering.
If no labels are supplied,
they will inferred from :math:`\{\text{prediction}, \text{truth}\}`
and ordered alphabetically.
normalize: normalize confusion matrix over the rows
Returns:
confusion matrix
Raises:
ValueError: if ``truth`` and ``prediction`` differ in length
Example:
>>> truth = [0, 1, 2]
>>> prediction = [0, 2, 0]
>>> confusion_matrix(truth, prediction)
[[1, 0, 0], [0, 0, 1], [1, 0, 0]]
"""
assert_equal_length(truth, prediction)
if labels is None:
labels = infer_labels(truth, prediction)
truth = np.array(truth)
prediction = np.array(prediction)
matrix = []
for row in labels:
row_indices = np.where(truth == row)
y_row = prediction[row_indices]
row_matrix = []
for column in labels:
row_matrix += [len(np.where(y_row == column)[0])]
matrix += [row_matrix]
if normalize:
for idx, row in enumerate(matrix):
if np.sum(row) != 0:
row_sum = float(np.sum(row))
matrix[idx] = [x / row_sum for x in row]
return matrix
def detection_error_tradeoff(
truth: typing.Union[
typing.Union[bool, int],
typing.Sequence[typing.Union[bool, int]]
],
prediction: typing.Union[
typing.Union[bool, int, float],
typing.Sequence[typing.Union[bool, int, float]]
],
) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]:
r"""Detection error tradeoff for verification experiments.
The `detection error tradeoff (DET)`_
is a graph showing
the false non-match rate (FNMR)
against the false match rate (FMR).
The FNMR indicates
how often an enrolled speaker was missed.
The FMR indicates
how often an impostor was verified as the enrolled speaker.
This function does not return a figure,
but the FMR and FNMR,
together with the corresponding verification thresholds
at which a similarity value
was regarded to belong to the enrolled speaker.
``truth`` may only contain entries like ``[1, 0, True, False...]``,
whereas prediction values
can also contain similarity scores, e.g. ``[0.8, 0.1, ...]``.
The implementation is identical with the one provided
by the pyeer_ package.
.. _detection error tradeoff (DET): https://en.wikipedia.org/wiki/Detection_error_tradeoff
.. _pyeer: https://github.com/manuelaguadomtz/pyeer
Args:
truth: ground truth classes
prediction: predicted classes or similarity scores
Returns:
* false match rate (FMR)
* false non-match rate (FNMR)
* verification thresholds
Raises:
ValueError: if ``truth`` contains values
different from ``1, 0, True, False``
Example:
>>> truth = [1, 0]
>>> prediction = [0.9, 0.1]
>>> detection_error_tradeoff(truth, prediction)
(array([1., 0.]), array([0., 0.]), array([0.1, 0.9]))
""" # noqa: E501
truth = np.array(truth)
allowed_truth_values = set([1, 0, True, False])
if not set(truth).issubset(allowed_truth_values):
raise ValueError(
"'truth' is only allowed to contain "
"[1, 0, True, False], "
'yours contains:\n'
f"[{', '.join([str(t) for t in set(truth)])}]"
)
truth = truth.astype(bool)
prediction = np.array(prediction).astype(np.float64)
# Genuine matching scores
gscores = prediction[truth]
# Impostor matching scores
iscores = prediction[~truth]
gscores_number = len(gscores)
iscores_number = len(iscores)
# Labeling genuine scores as 1 and impostor scores as 0
gscores = list(zip(gscores, [1] * gscores_number))
iscores = list(zip(iscores, [0] * iscores_number))
# Stacking scores
scores = np.array(sorted(gscores + iscores, key=operator.itemgetter(0)))
cumul = np.cumsum(scores[:, 1])
# Grouping scores
thresholds, u_indices = np.unique(scores[:, 0], return_index=True)
# Calculating FNM and FM distributions
fnm = cumul[u_indices] - scores[u_indices][:, 1] # rejecting s < t
fm = iscores_number - (u_indices - fnm)
# Calculating FMR and FNMR
fnmr = fnm / gscores_number
fmr = fm / iscores_number
return fmr, fnmr, thresholds
def edit_distance(
truth: typing.Union[str, typing.Sequence[int]],
prediction: typing.Union[str, typing.Sequence[int]]
) -> int:
r"""Edit distance between two strings of characters or sequences of ints.
The implementation follows the `Wagner-Fischer algorithm`_.
.. _Wagner-Fischer algorithm:
https://en.wikipedia.org/wiki/Wagner%E2%80%93Fischer_algorithm
Args:
truth: ground truth sequence
prediction: predicted sequence
Returns:
edit distance
Example:
>>> truth = 'lorem'
>>> prediction = 'lorm'
>>> edit_distance(truth, prediction)
1
>>> truth = [0, 1, 2]
>>> prediction = [0, 1]
>>> edit_distance(truth, prediction)
1
"""
if truth == prediction:
return 0
elif len(prediction) == 0:
return len(truth)
elif len(truth) == 0:
return len(prediction)
m0 = [None] * (len(truth) + 1)
m1 = [None] * (len(truth) + 1)
for i in range(len(m0)):
m0[i] = i
for i in range(len(prediction)):
m1[0] = i + 1
for j in range(len(truth)):
cost = 0 if prediction[i] == truth[j] else 1
m1[j + 1] = min(m1[j] + 1, # deletion
m0[j + 1] + 1, # insertion
m0[j] + cost) # substitution
for j in range(len(m0)):
m0[j] = m1[j]
return m1[len(truth)]
def equal_error_rate(
truth: typing.Union[
typing.Union[bool, int],
typing.Sequence[typing.Union[bool, int]]
],
prediction: typing.Union[
typing.Union[bool, int, float],
typing.Sequence[typing.Union[bool, int, float]]
],
) -> typing.Tuple[float, namedtuple]:
r"""Equal error rate for verification tasks.
The equal error rate (EER) is the point
where false non-match rate (FNMR)
and the impostors or false match rate (FMR)
are identical.
The FNMR indicates
how often an enrolled speaker was missed.
The FMR indicates
how often an impostor was verified as the enrolled speaker.
In practice the score distribution is not continuous
and an interval is returned instead.
The EER value will be set as the midpoint
of this interval::footcite:`Maio2002`
.. math::
\text{EER} = \frac{
\min(\text{FNMR}[t], \text{FMR}[t])
+ \max(\text{FNMR}[t], \text{FMR}[t])
}{2}
with :math:`t = \text{argmin}(|\text{FNMR} - \text{FMR}|)`.
``truth`` may only contain entries like ``[1, 0, True, False...]``,
whereas prediction values
can also contain similarity scores, e.g. ``[0.8, 0.1, ...]``.
The implementation is identical with the one provided
by the pyeer_ package.
.. footbibliography::
.. _pyeer: https://github.com/manuelaguadomtz/pyeer
Args:
truth: ground truth classes
prediction: predicted classes or similarity scores
Returns:
* equal error rate (EER)
* namedtuple containing
``fmr``,
``fnmr``,
``thresholds``,
``threshold``
whereas the last one corresponds to the threshold
corresponding to the returned EER
Raises:
ValueError: if ``truth`` contains values
different from ``1, 0, True, False``
Example:
>>> truth = [0, 1, 0, 1, 0]
>>> prediction = [0.2, 0.8, 0.4, 0.5, 0.5]
>>> eer, stats = equal_error_rate(truth, prediction)
>>> eer
0.16666666666666666
>>> stats.threshold
0.5
"""
Stats = namedtuple(
'stats',
[
'fmr', # False match rates (FMR)
'fnmr', # False non-match rates (FNMR)
'thresholds', # Thresholds
'threshold', # verification threshold for EER
],
)
fmr, fnmr, thresholds = detection_error_tradeoff(truth, prediction)
diff = fmr - fnmr
# t1 and t2 are our time indices
t2 = np.where(diff <= 0)[0]
if len(t2) > 0:
t2 = t2[0]
else:
warnings.warn(
'The false | |
def source_defines_messageID(self):
"""True if this message is :attr:`CON<Type_CON>` or :attr:`NON<Type_NON>`.
:attr:`CON<Type_CON>` and :attr:`NON<Type_NON>` messages are
responsible for selecting a :attr:`messageID` at the
:attr:`source_endpoint`.
:attr:`ACK<Type_ACK>` and :attr:`RST<Type_RST>` messages are
message-level responses to a :attr:`messageID` that was
selected by their :attr:`destination_endpoint`.
"""
return self.source_originates_type(self.__type)
def is_confirmable(self):
"""True if this message is :coapsect:`confirmable<2.1>`,
i.e. will be :coapsect:`retransmitted<4.2>` for reliability,
and an acknowledgement or reset is expected.
"""
return self.Type_CON == self.__type
def is_non_confirmable(self):
"""True if this message is :coapsect:`non-confirmable<2.1>`,
meaning the CoAP layer :coapsect:`will not retransmit<4.3>`
it, and an acknowledgement is not expected.
"""
return self.Type_NON == self.__type
def is_acknowledgement(self):
"""True if this message is an :coapsect:`acknowledgement<1.2>`
that a particular confirmable message with :attr:`messageID`
was received.
"""
return self.Type_ACK == self.__type
def is_reset(self):
"""True if this message is an indication that a particular
message with :attr:`messageID` arrived but that the receiver
could not process it.
"""
return self.Type_RST == self.__type
def _get_type(self):
"""The type of the message as :attr:`Type_CON`,
:attr:`Type_NON`, :attr:`Type_ACK`, or :attr:`Type_RST`. This
is a read-only attribute.
"""
return self.__type
messageType = property(_get_type)
def _get_type_name(self):
"""The type of the message as a three-letter descriptive name
(:attr:`CON<Type_CON>`, :attr:`NON<Type_NON>`,
:attr:`ACK<Type_ACK>`, :attr:`RST<Type_RST>`). This is a
read-only attribute.
"""
return ('CON', 'NON', 'ACK', 'RST')[self.__type]
messageTypeName = property(_get_type_name)
@staticmethod
def code_as_tuple(code):
"""Validate *code* and return it as a ``(class, detail)`` tuple."""
if isinstance(code, tuple):
if 2 != len(code):
raise ValueError(code)
(clazz, detail) = code
if not (0 <= clazz and clazz <= 7):
raise ValueError(code)
if not (0 <= detail and detail <= 31):
raise ValueError(code)
elif isinstance(code, int):
if (0 > code) or (255 < code):
raise ValueError(code)
code = (code >> 5, code & 0x1F)
else:
raise TypeError(code)
return code
@staticmethod
def code_as_integer(code):
"""Validate *code* and return it as an integer.
The packed encoding of ``(class, detail)`` has the 3-bit code
class combined with the 5-bit code detail, as: ``(class << 5)
| detail``.
"""
(clazz, detail) = Message.code_as_tuple(code)
return (clazz << 5) | detail
def _get_code(self):
"""The message code, expressed as a tuple ``(class, detail)``
where *class* is an integer value from 0 through 7 and
*detail* is an integer value from 0 through 31.
A code of ``None`` is allowed only when a raw :class:`Message`
is created, and a valid code must be assigned before the
message may be transmitted.
For convenience, the code may also be set from its packed
format defined by ``(class << 5) | detail``. Decimal code
representation such as ``4.03`` is not supported.
"""
return self.__code
def _set_code(self, code):
self.__code = self.code_as_tuple(code)
code = property(_get_code, _set_code)
def _get_packed_code(self):
"""Return :attr:`code` in its packed form as an unsigned 8-bit integer.
This will raise
:exc:`ValueError<python:exceptions.ValueError>` if
:attr:`code` has not been assigned.
"""
if self.__code is None:
raise ValueError(None)
return self.code_as_integer(self.__code)
packed_code = property(_get_packed_code)
def _get_messageID(self):
"""An integer between 0 and 65535, inclusive, uniquely
identifying a confirmable or non-confirmable message among
those recently transmitted by its sender. This value is used
to correlate confirmable and non-confirmable messages with
acknowledgement and reset messages. It is not used for
request/response correlation.
"""
return self.__messageID
def _set_messageID(self, message_id):
if not isinstance(message_id, int):
raise TypeError(message_id)
if not ((0 <= message_id) and (message_id <= 65535)):
raise ValueError(message_id)
self.__messageID = message_id
messageID = property(_get_messageID, _set_messageID)
def _get_token(self):
"""The :coapsect:`token<5.3.1>` associated with the message.
Tokens are used to :coapsect:`match<5.3.2>` requests with
responses. The token must be a :class:`bytes` instance with
length between 0 and 8 octets, inclusive.
"""
return self.__token
def _set_token(self, token):
if not isinstance(token, bytes):
raise TypeError(token)
if len(token) > 8:
raise ValueError(token)
self.__token = token
token = property(_get_token, _set_token)
def _get_options(self):
"""The list of :coapsect:`options<5.10>` associated with the
message.
Absence of options is represented by an empty list. Elements
of the list must be :class:`coapy.option.UrOption` (subclass)
instances. The list object is owned by the message instance.
Assignment to it will replace its contents. The contents will
be rearranged in a stable sort by option
:attr:`number<coapy.option.UrOption.number>` as needed by
operations performed on the message.
"""
return self.__options
def _set_options(self, value):
self.__options[:] = coapy.option.sorted_options(value)
def _sort_options(self):
"""Sort the :attr:`options` list and return a reference to it.
"""
self.__options[:] = coapy.option.sorted_options(self.__options)
return self.__options
options = property(_get_options, _set_options)
def maxAge(self):
"""Return the :coapsect:`Max-Age<5.6.1>` value for the message.
This is the value of the :meth:`coapy.option.MaxAge` option if
present, or its default value of 60 (seconds) if the option is
missing. The value ``None`` is returned if the message is
not one in which :meth:`coapy.option.MaxAge` may appear (i.e.,
not a :class:`Response` message).
"""
if not isinstance(self, Response):
return None
opt = coapy.option.MaxAge.first_match(self.options)
if opt is None:
max_age = 60
else:
max_age = opt.value
return max_age
def _get_payload(self):
"""The payload or content of the message. This may be
``None`` if no payload exists; otherwise it must be a
non-empty :class:`bytes` instance. As a convenience, an empty
:class:`bytes` string is equivalent to setting the payload to
``None``.
The representation of the payload should be conveyed by a
:class:`ContentFormat<coapy.option.ContentFormat>` option.
"""
return self.__payload
def _set_payload(self, payload):
if (payload is not None) and not isinstance(payload, bytes):
raise TypeError(payload)
if (payload is not None) and (0 == len(payload)):
payload = None
self.__payload = payload
payload = property(_get_payload, _set_payload)
def __init__(self, confirmable=False, acknowledgement=False, reset=False,
code=None, messageID=None, token=None, options=None, payload=None):
if confirmable:
self.__type = self.Type_CON
elif acknowledgement:
self.__type = self.Type_ACK
elif reset:
self.__type = self.Type_RST
else:
self.__type = self.Type_NON
if code is None:
self.__code = None
else:
self.code = code
if messageID is None:
self.__messageID = None
else:
self.messageID = messageID
if token is None:
self.__token = b''
else:
self.token = token
self.__options = []
if options is not None:
self.options = options
self.payload = payload
def to_packed(self):
"""Generate the packed representation of the message, per :coapsect:`3`.
The result is a :class:`bytes` instance.
"""
vttkl = (1 << 6) | (self.__type << 4)
vttkl |= 0x0F & len(self.__token)
elements = []
elements.append(struct.pack(str('!BBH'), vttkl, self.packed_code, self.messageID))
elements.append(self.__token)
if self.options:
elements.append(coapy.option.encode_options(self.options))
if self.__payload:
elements.append(b'\xFF')
elements.append(self.__payload)
return b''.join(elements)
@classmethod
def from_packed(cls, packed_message):
"""Create a :class:`Message` (or subclass) instance from the
packed representation of a message, per :coapsect:`3`.
This will return ``None`` if the first four octets cannot be
successfully decoded; such messages should be silently ignored.
It will raise a :exc:`MessageFormatError` when
:attr:`type<messageType>`, :attr:`code` and :attr:`messageID`
information can be extracted but the message as a whole is
malformatted. :coapsect:`4` specifies the receiver MUST
(:attr:`CON<Type_CON>`) or may (:attr:`NON<Type_NON>`) or MUST
NOT (:attr:`ACK<Type_ACK>`, :attr:`RST<Type_RST>`) reply with
a Reset message, and otherwise the message is ignored (from a
protocol perspective; the receiver may use the failure as a
cue to perform some other action; see :coapsect:`5.7.1` for
example).
Otherwise it will return an instance of :class:`Message` or a
refined subclass based on the :attr:`code` within the packed
representation.
"""
if not isinstance(packed_message, bytes):
raise TypeError(packed_message)
data = bytearray(packed_message)
vttkl = data.pop(0)
ver = (vttkl >> 6)
if ver != cls.Ver:
# 3: Unknown version number: silently ignore
return None
message_type = 0x03 & (vttkl >> 4)
tkl = 0x0F & vttkl
code = cls.code_as_tuple(data.pop(0))
message_id = data.pop(0)
message_id = (message_id << 8) | data.pop(0)
dkw = {'type': message_type,
'code': code,
'messageID': message_id}
if 9 <= tkl:
raise MessageFormatError(MessageFormatError.TOKEN_TOO_LONG, dkw)
if ((cls.Empty == code) and ((0 != tkl) or (0 < len(data)))):
raise MessageFormatError(MessageFormatError.EMPTY_MESSAGE_NOT_EMPTY, dkw)
token = bytes(data[:tkl])
if 0 < tkl:
data[:tkl] = b''
try:
(options, remainder) = coapy.option.decode_options(bytes(data))
except coapy.option.OptionDecodeError as e:
# This can be an invalid delta or length in the first byte,
# or a value field that does not conform to the requirements.
# @todo@ refine this
raise MessageFormatError(MessageFormatError.INVALID_OPTION, dkw)
payload = None
if 0 < len(remainder):
data = bytearray(remainder)
if 0xFF != data[0]:
# This should have been interpreted as an option decode error
raise MessageFormatError(MessageFormatError.INVALID_OPTION, dkw)
payload = remainder[1:]
if 0 == len(payload):
raise MessageFormatError(MessageFormatError.ZERO_LENGTH_PAYLOAD, dkw)
kw = {'confirmable': (cls.Type_CON == message_type),
'acknowledgement': (cls.Type_ACK == message_type),
'reset': | |
- 3.92092028715294E-11*m.x623 - 5.55124997142466E-9*m.x624
- 6.71129331537853E-8*m.x625 - 5.55124997142466E-9*m.x626 - 3.92092028715291E-11*m.x627
- 2.7693971587199E-13*m.x628 - 3.92092028715291E-11*m.x629 - 4.74027403715494E-10*m.x630
- 3.92092028715291E-11*m.x631 - 2.76939715871989E-13*m.x632 - 3.16502532425133E-13*m.x633
- 4.4810517567462E-11*m.x634 - 5.41745604246281E-10*m.x635 - 4.4810517567462E-11*m.x636
- 3.16502532425131E-13*m.x637 - 4.48105175674623E-11*m.x638 - 6.3442856816282E-9*m.x639
- 7.67004950328977E-8*m.x640 - 6.3442856816282E-9*m.x641 - 4.48105175674622E-11*m.x642
- 5.41745604246284E-10*m.x643 - 7.67004950328977E-8*m.x644 - 9.27285786535036E-7*m.x645
- 7.67004950328977E-8*m.x646 - 5.41745604246283E-10*m.x647 - 4.48105175674623E-11*m.x648
- 6.3442856816282E-9*m.x649 - 7.67004950328977E-8*m.x650 - 6.3442856816282E-9*m.x651
- 4.48105175674622E-11*m.x652 - 3.16502532425133E-13*m.x653 - 4.4810517567462E-11*m.x654
- 5.41745604246281E-10*m.x655 - 4.4810517567462E-11*m.x656 - 3.16502532425131E-13*m.x657
- 4.48105175674618E-11*m.x658 - 6.34428568162813E-9*m.x659 - 7.67004950328969E-8*m.x660
- 6.34428568162813E-9*m.x661 - 4.48105175674615E-11*m.x662 - 6.34428568162818E-9*m.x663
- 8.98225751343221E-7*m.x664 - 1.0859277661286E-5*m.x665 - 8.98225751343221E-7*m.x666
- 6.34428568162815E-9*m.x667 - 7.67004950328975E-8*m.x668 - 1.0859277661286E-5*m.x669
- 0.000131285382487152*m.x670 - 1.0859277661286E-5*m.x671 - 7.6700495032897E-8*m.x672
- 6.34428568162818E-9*m.x673 - 8.98225751343221E-7*m.x674 - 1.0859277661286E-5*m.x675
- 8.98225751343221E-7*m.x676 - 6.34428568162815E-9*m.x677 - 4.48105175674618E-11*m.x678
- 6.34428568162813E-9*m.x679 - 7.67004950328969E-8*m.x680 - 6.34428568162813E-9*m.x681
- 4.48105175674615E-11*m.x682 - 5.41745604246279E-10*m.x683 - 7.6700495032897E-8*m.x684
- 9.27285786535027E-7*m.x685 - 7.6700495032897E-8*m.x686 - 5.41745604246277E-10*m.x687
- 7.67004950328975E-8*m.x688 - 1.0859277661286E-5*m.x689 - 0.000131285382487152*m.x690
- 1.0859277661286E-5*m.x691 - 7.67004950328972E-8*m.x692 - 9.27285786535034E-7*m.x693
- 0.000131285382487153*m.x694 - 0.00158720056641013*m.x695 - 0.000131285382487153*m.x696
- 9.27285786535029E-7*m.x697 - 7.67004950328975E-8*m.x698 - 1.0859277661286E-5*m.x699
- 0.000131285382487152*m.x700 - 1.0859277661286E-5*m.x701 - 7.67004950328972E-8*m.x702
- 5.41745604246279E-10*m.x703 - 7.6700495032897E-8*m.x704 - 9.27285786535027E-7*m.x705
- 7.6700495032897E-8*m.x706 - 5.41745604246277E-10*m.x707 - 4.48105175674618E-11*m.x708
- 6.34428568162813E-9*m.x709 - 7.67004950328969E-8*m.x710 - 6.34428568162813E-9*m.x711
- 4.48105175674615E-11*m.x712 - 6.34428568162818E-9*m.x713 - 8.98225751343221E-7*m.x714
- 1.0859277661286E-5*m.x715 - 8.98225751343221E-7*m.x716 - 6.34428568162815E-9*m.x717
- 7.67004950328975E-8*m.x718 - 1.0859277661286E-5*m.x719 - 0.000131285382487152*m.x720
- 1.0859277661286E-5*m.x721 - 7.6700495032897E-8*m.x722 - 6.34428568162818E-9*m.x723
- 8.98225751343221E-7*m.x724 - 1.0859277661286E-5*m.x725 - 8.98225751343221E-7*m.x726
- 6.34428568162815E-9*m.x727 - 4.48105175674618E-11*m.x728 - 6.34428568162813E-9*m.x729
- 7.67004950328969E-8*m.x730 - 6.34428568162813E-9*m.x731 - 4.48105175674615E-11*m.x732
- 3.16502532425133E-13*m.x733 - 4.4810517567462E-11*m.x734 - 5.41745604246281E-10*m.x735
- 4.4810517567462E-11*m.x736 - 3.16502532425131E-13*m.x737 - 4.48105175674623E-11*m.x738
- 6.3442856816282E-9*m.x739 - 7.67004950328977E-8*m.x740 - 6.3442856816282E-9*m.x741
- 4.48105175674622E-11*m.x742 - 5.41745604246284E-10*m.x743 - 7.67004950328977E-8*m.x744
- 9.27285786535036E-7*m.x745 - 7.67004950328977E-8*m.x746 - 5.41745604246283E-10*m.x747
- 4.48105175674623E-11*m.x748 - 6.3442856816282E-9*m.x749 - 7.67004950328977E-8*m.x750
- 6.3442856816282E-9*m.x751 - 4.48105175674622E-11*m.x752 - 3.16502532425133E-13*m.x753
- 4.4810517567462E-11*m.x754 - 5.41745604246281E-10*m.x755 - 4.4810517567462E-11*m.x756
- 3.16502532425131E-13*m.x757 - 4.4810517567462E-11*m.x758 - 6.34428568162816E-9*m.x759
- 7.67004950328972E-8*m.x760 - 6.34428568162816E-9*m.x761 - 4.48105175674618E-11*m.x762
- 6.3442856816282E-9*m.x763 - 8.98225751343224E-7*m.x764 - 1.0859277661286E-5*m.x765
- 8.98225751343224E-7*m.x766 - 6.34428568162816E-9*m.x767 - 7.67004950328977E-8*m.x768
- 1.0859277661286E-5*m.x769 - 0.000131285382487153*m.x770 - 1.0859277661286E-5*m.x771
- 7.67004950328975E-8*m.x772 - 6.3442856816282E-9*m.x773 - 8.98225751343224E-7*m.x774
- 1.0859277661286E-5*m.x775 - 8.98225751343224E-7*m.x776 - 6.34428568162816E-9*m.x777
- 4.4810517567462E-11*m.x778 - 6.34428568162816E-9*m.x779 - 7.67004950328972E-8*m.x780
- 6.34428568162816E-9*m.x781 - 4.48105175674618E-11*m.x782 - 6.34428568162813E-9*m.x783
- 8.98225751343215E-7*m.x784 - 1.08592776612859E-5*m.x785 - 8.98225751343215E-7*m.x786
- 6.34428568162811E-9*m.x787 - 8.98225751343221E-7*m.x788 - 0.000127171054530608*m.x789
- 0.00153745958581266*m.x790 - 0.000127171054530608*m.x791 - 8.98225751343218E-7*m.x792
- 1.0859277661286E-5*m.x793 - 0.00153745958581266*m.x794 - 0.0185874213808482*m.x795
- 0.00153745958581266*m.x796 - 1.08592776612859E-5*m.x797 - 8.98225751343221E-7*m.x798
- 0.000127171054530608*m.x799 - 0.00153745958581266*m.x800 - 0.000127171054530608*m.x801
- 8.98225751343218E-7*m.x802 - 6.34428568162813E-9*m.x803 - 8.98225751343215E-7*m.x804
- 1.08592776612859E-5*m.x805 - 8.98225751343215E-7*m.x806 - 6.34428568162811E-9*m.x807
- 7.67004950328972E-8*m.x808 - 1.08592776612859E-5*m.x809 - 0.000131285382487152*m.x810
- 1.08592776612859E-5*m.x811 - 7.67004950328968E-8*m.x812 - 1.0859277661286E-5*m.x813
- 0.00153745958581267*m.x814 - 0.0185874213808482*m.x815 - 0.00153745958581267*m.x816
- 1.0859277661286E-5*m.x817 - 0.000131285382487153*m.x818 - 0.0185874213808483*m.x819
- 0.224716302644531*m.x820 - 0.0185874213808483*m.x821 - 0.000131285382487152*m.x822
- 1.0859277661286E-5*m.x823 - 0.00153745958581267*m.x824 - 0.0185874213808482*m.x825
- 0.00153745958581267*m.x826 - 1.0859277661286E-5*m.x827 - 7.67004950328972E-8*m.x828
- 1.08592776612859E-5*m.x829 - 0.000131285382487152*m.x830 - 1.08592776612859E-5*m.x831
- 7.67004950328968E-8*m.x832 - 6.34428568162813E-9*m.x833 - 8.98225751343215E-7*m.x834
- 1.08592776612859E-5*m.x835 - 8.98225751343215E-7*m.x836 - 6.34428568162811E-9*m.x837
- 8.98225751343221E-7*m.x838 - 0.000127171054530608*m.x839 - 0.00153745958581266*m.x840
- 0.000127171054530608*m.x841 - 8.98225751343218E-7*m.x842 - 1.0859277661286E-5*m.x843
- 0.00153745958581266*m.x844 - 0.0185874213808482*m.x845 - 0.00153745958581266*m.x846
- 1.08592776612859E-5*m.x847 - 8.98225751343221E-7*m.x848 - 0.000127171054530608*m.x849
- 0.00153745958581266*m.x850 - 0.000127171054530608*m.x851 - 8.98225751343218E-7*m.x852
- 6.34428568162813E-9*m.x853 - 8.98225751343215E-7*m.x854 - 1.08592776612859E-5*m.x855
- 8.98225751343215E-7*m.x856 - 6.34428568162811E-9*m.x857 - 4.4810517567462E-11*m.x858
- 6.34428568162816E-9*m.x859 - 7.67004950328972E-8*m.x860 - 6.34428568162816E-9*m.x861
- 4.48105175674618E-11*m.x862 - 6.3442856816282E-9*m.x863 - 8.98225751343224E-7*m.x864
- 1.0859277661286E-5*m.x865 - 8.98225751343224E-7*m.x866 - 6.34428568162816E-9*m.x867
- 7.67004950328977E-8*m.x868 - 1.0859277661286E-5*m.x869 - 0.000131285382487153*m.x870
- 1.0859277661286E-5*m.x871 - 7.67004950328975E-8*m.x872 - 6.3442856816282E-9*m.x873
- 8.98225751343224E-7*m.x874 - 1.0859277661286E-5*m.x875 - 8.98225751343224E-7*m.x876
- 6.34428568162816E-9*m.x877 - 4.4810517567462E-11*m.x878 - 6.34428568162816E-9*m.x879
- 7.67004950328972E-8*m.x880 - 6.34428568162816E-9*m.x881 - 4.48105175674618E-11*m.x882
- 5.41745604246281E-10*m.x883 - 7.67004950328972E-8*m.x884 - 9.27285786535029E-7*m.x885
- 7.67004950328972E-8*m.x886 - 5.41745604246279E-10*m.x887 - 7.67004950328977E-8*m.x888
- 1.0859277661286E-5*m.x889 - 0.000131285382487153*m.x890 - 1.0859277661286E-5*m.x891
- 7.67004950328973E-8*m.x892 - 9.27285786535036E-7*m.x893 - 0.000131285382487153*m.x894
- 0.00158720056641013*m.x895 - 0.000131285382487153*m.x896 - 9.27285786535032E-7*m.x897
- 7.67004950328977E-8*m.x898 - 1.0859277661286E-5*m.x899 - 0.000131285382487153*m.x900
- 1.0859277661286E-5*m.x901 - 7.67004950328973E-8*m.x902 - 5.41745604246281E-10*m.x903
- 7.67004950328972E-8*m.x904 - 9.27285786535029E-7*m.x905 - 7.67004950328972E-8*m.x906
- 5.41745604246279E-10*m.x907 - 7.67004950328969E-8*m.x908 - 1.08592776612859E-5*m.x909
- 0.000131285382487151*m.x910 - 1.08592776612859E-5*m.x911 - 7.67004950328965E-8*m.x912
- 1.0859277661286E-5*m.x913 - 0.00153745958581266*m.x914 - 0.0185874213808482*m.x915
- 0.00153745958581266*m.x916 - 1.08592776612859E-5*m.x917 - 0.000131285382487152*m.x918
- 0.0185874213808482*m.x919 - 0.22471630264453*m.x920 - 0.0185874213808482*m.x921
- 0.000131285382487152*m.x922 - 1.0859277661286E-5*m.x923 - 0.00153745958581266*m.x924
- 0.0185874213808482*m.x925 - 0.00153745958581266*m.x926 - 1.08592776612859E-5*m.x927
- 7.67004950328969E-8*m.x928 - 1.08592776612859E-5*m.x929 - 0.000131285382487151*m.x930
- 1.08592776612859E-5*m.x931 - 7.67004950328965E-8*m.x932 - 9.27285786535027E-7*m.x933
- 0.000131285382487152*m.x934 - 0.00158720056641011*m.x935 - 0.000131285382487152*m.x936
- 9.27285786535022E-7*m.x937 - 0.000131285382487152*m.x938 - 0.0185874213808482*m.x939
- 0.224716302644531*m.x940 - 0.0185874213808482*m.x941 - 0.000131285382487152*m.x942
- 0.00158720056641013*m.x943 - 0.224716302644531*m.x944 - 2.71675213250715*m.x945
- 0.224716302644531*m.x946 - 0.00158720056641012*m.x947 - 0.000131285382487152*m.x948
- 0.0185874213808482*m.x949 - 0.224716302644531*m.x950 - 0.0185874213808482*m.x951
- 0.000131285382487152*m.x952 - 9.27285786535027E-7*m.x953 - 0.000131285382487152*m.x954
- 0.00158720056641011*m.x955 - 0.000131285382487152*m.x956 - 9.27285786535022E-7*m.x957
- 7.67004950328969E-8*m.x958 - 1.08592776612859E-5*m.x959 - 0.000131285382487151*m.x960
- 1.08592776612859E-5*m.x961 - 7.67004950328965E-8*m.x962 - 1.0859277661286E-5*m.x963
- 0.00153745958581266*m.x964 - 0.0185874213808482*m.x965 - 0.00153745958581266*m.x966
- 1.08592776612859E-5*m.x967 - 0.000131285382487152*m.x968 - 0.0185874213808482*m.x969
- 0.22471630264453*m.x970 - 0.0185874213808482*m.x971 - 0.000131285382487152*m.x972
- 1.0859277661286E-5*m.x973 - 0.00153745958581266*m.x974 - 0.0185874213808482*m.x975
- 0.00153745958581266*m.x976 - 1.08592776612859E-5*m.x977 - 7.67004950328969E-8*m.x978
- 1.08592776612859E-5*m.x979 - 0.000131285382487151*m.x980 - 1.08592776612859E-5*m.x981
- 7.67004950328965E-8*m.x982 - 5.41745604246281E-10*m.x983 - 7.67004950328972E-8*m.x984
- 9.27285786535029E-7*m.x985 - 7.67004950328972E-8*m.x986 - 5.41745604246279E-10*m.x987
- 7.67004950328977E-8*m.x988 - 1.0859277661286E-5*m.x989 - 0.000131285382487153*m.x990
- 1.0859277661286E-5*m.x991 - 7.67004950328973E-8*m.x992 - 9.27285786535036E-7*m.x993
- 0.000131285382487153*m.x994 - 0.00158720056641013*m.x995 - 0.000131285382487153*m.x996
- 9.27285786535032E-7*m.x997 - 7.67004950328977E-8*m.x998 - 1.0859277661286E-5*m.x999
- 0.000131285382487153*m.x1000 - 1.0859277661286E-5*m.x1001 - 7.67004950328973E-8*m.x1002
- 5.41745604246281E-10*m.x1003 - 7.67004950328972E-8*m.x1004 - 9.27285786535029E-7*m.x1005
- 7.67004950328972E-8*m.x1006 - 5.41745604246279E-10*m.x1007 - 4.4810517567462E-11*m.x1008
- 6.34428568162816E-9*m.x1009 - 7.67004950328972E-8*m.x1010 - 6.34428568162816E-9*m.x1011
- 4.48105175674618E-11*m.x1012 - 6.3442856816282E-9*m.x1013 - 8.98225751343224E-7*m.x1014
- 1.0859277661286E-5*m.x1015 - 8.98225751343224E-7*m.x1016 - 6.34428568162816E-9*m.x1017
- 7.67004950328977E-8*m.x1018 - 1.0859277661286E-5*m.x1019 - 0.000131285382487153*m.x1020
- 1.0859277661286E-5*m.x1021 - 7.67004950328975E-8*m.x1022 - 6.3442856816282E-9*m.x1023
- 8.98225751343224E-7*m.x1024 - 1.0859277661286E-5*m.x1025 - 8.98225751343224E-7*m.x1026
- 6.34428568162816E-9*m.x1027 - 4.4810517567462E-11*m.x1028 - 6.34428568162816E-9*m.x1029
- 7.67004950328972E-8*m.x1030 - 6.34428568162816E-9*m.x1031 - 4.48105175674618E-11*m.x1032
- 6.34428568162813E-9*m.x1033 - 8.98225751343215E-7*m.x1034 - 1.08592776612859E-5*m.x1035
- 8.98225751343215E-7*m.x1036 - 6.34428568162811E-9*m.x1037 - 8.98225751343221E-7*m.x1038
- 0.000127171054530608*m.x1039 - 0.00153745958581266*m.x1040 - 0.000127171054530608*m.x1041
- 8.98225751343218E-7*m.x1042 - 1.0859277661286E-5*m.x1043 - 0.00153745958581266*m.x1044
- 0.0185874213808482*m.x1045 - 0.00153745958581266*m.x1046 - 1.08592776612859E-5*m.x1047
- 8.98225751343221E-7*m.x1048 - 0.000127171054530608*m.x1049 - 0.00153745958581266*m.x1050
- 0.000127171054530608*m.x1051 - 8.98225751343218E-7*m.x1052 - 6.34428568162813E-9*m.x1053
- 8.98225751343215E-7*m.x1054 - 1.08592776612859E-5*m.x1055 - 8.98225751343215E-7*m.x1056
- 6.34428568162811E-9*m.x1057 - 7.67004950328972E-8*m.x1058 - 1.08592776612859E-5*m.x1059
- 0.000131285382487152*m.x1060 - 1.08592776612859E-5*m.x1061 - 7.67004950328968E-8*m.x1062
- 1.0859277661286E-5*m.x1063 - 0.00153745958581267*m.x1064 - 0.0185874213808482*m.x1065
- 0.00153745958581267*m.x1066 - 1.0859277661286E-5*m.x1067 - 0.000131285382487153*m.x1068
- 0.0185874213808483*m.x1069 - 0.224716302644531*m.x1070 - 0.0185874213808483*m.x1071
- 0.000131285382487152*m.x1072 - 1.0859277661286E-5*m.x1073 - 0.00153745958581267*m.x1074
- 0.0185874213808482*m.x1075 - 0.00153745958581267*m.x1076 - 1.0859277661286E-5*m.x1077
- 7.67004950328972E-8*m.x1078 - 1.08592776612859E-5*m.x1079 - 0.000131285382487152*m.x1080
- 1.08592776612859E-5*m.x1081 - 7.67004950328968E-8*m.x1082 - 6.34428568162813E-9*m.x1083
- 8.98225751343215E-7*m.x1084 - 1.08592776612859E-5*m.x1085 - 8.98225751343215E-7*m.x1086
- 6.34428568162811E-9*m.x1087 - 8.98225751343221E-7*m.x1088 - 0.000127171054530608*m.x1089
- 0.00153745958581266*m.x1090 - 0.000127171054530608*m.x1091 - 8.98225751343218E-7*m.x1092
- 1.0859277661286E-5*m.x1093 - 0.00153745958581266*m.x1094 - 0.0185874213808482*m.x1095
- 0.00153745958581266*m.x1096 - 1.08592776612859E-5*m.x1097 - 8.98225751343221E-7*m.x1098
- 0.000127171054530608*m.x1099 - 0.00153745958581266*m.x1100 - 0.000127171054530608*m.x1101
- 8.98225751343218E-7*m.x1102 - 6.34428568162813E-9*m.x1103 - 8.98225751343215E-7*m.x1104
- 1.08592776612859E-5*m.x1105 - 8.98225751343215E-7*m.x1106 - 6.34428568162811E-9*m.x1107
- 4.4810517567462E-11*m.x1108 - 6.34428568162816E-9*m.x1109 - 7.67004950328972E-8*m.x1110
- 6.34428568162816E-9*m.x1111 - 4.48105175674618E-11*m.x1112 - 6.3442856816282E-9*m.x1113
- 8.98225751343224E-7*m.x1114 - 1.0859277661286E-5*m.x1115 - 8.98225751343224E-7*m.x1116
- 6.34428568162816E-9*m.x1117 - 7.67004950328977E-8*m.x1118 - 1.0859277661286E-5*m.x1119
- 0.000131285382487153*m.x1120 - 1.0859277661286E-5*m.x1121 - 7.67004950328975E-8*m.x1122
- 6.3442856816282E-9*m.x1123 - 8.98225751343224E-7*m.x1124 - 1.0859277661286E-5*m.x1125
- 8.98225751343224E-7*m.x1126 - 6.34428568162816E-9*m.x1127 - 4.4810517567462E-11*m.x1128
- 6.34428568162816E-9*m.x1129 - 7.67004950328972E-8*m.x1130 - 6.34428568162816E-9*m.x1131
- 4.48105175674618E-11*m.x1132 - 3.16502532425132E-13*m.x1133 - 4.48105175674618E-11*m.x1134
- 5.41745604246279E-10*m.x1135 - 4.48105175674618E-11*m.x1136 - 3.16502532425131E-13*m.x1137
- 4.48105175674622E-11*m.x1138 - 6.34428568162818E-9*m.x1139 - 7.67004950328975E-8*m.x1140
- 6.34428568162818E-9*m.x1141 - 4.48105175674618E-11*m.x1142 - 5.41745604246283E-10*m.x1143
- 7.67004950328975E-8*m.x1144 - 9.27285786535032E-7*m.x1145 - 7.67004950328975E-8*m.x1146
- 5.41745604246279E-10*m.x1147 - 4.48105175674622E-11*m.x1148 - 6.34428568162818E-9*m.x1149
- 7.67004950328975E-8*m.x1150 - 6.34428568162818E-9*m.x1151 - 4.48105175674618E-11*m.x1152
- 3.16502532425132E-13*m.x1153 - 4.48105175674618E-11*m.x1154 - 5.41745604246279E-10*m.x1155
- 4.48105175674618E-11*m.x1156 - 3.16502532425131E-13*m.x1157 - 4.48105175674617E-11*m.x1158
- 6.34428568162811E-9*m.x1159 - 7.67004950328966E-8*m.x1160 - 6.34428568162811E-9*m.x1161
- 4.48105175674615E-11*m.x1162 - 6.34428568162815E-9*m.x1163 - 8.98225751343218E-7*m.x1164
- 1.08592776612859E-5*m.x1165 - 8.98225751343218E-7*m.x1166 - 6.34428568162811E-9*m.x1167
- 7.67004950328972E-8*m.x1168 - 1.08592776612859E-5*m.x1169 - 0.000131285382487152*m.x1170
- 1.08592776612859E-5*m.x1171 - 7.67004950328968E-8*m.x1172 - 6.34428568162815E-9*m.x1173
- 8.98225751343218E-7*m.x1174 - 1.08592776612859E-5*m.x1175 - 8.98225751343218E-7*m.x1176
- 6.34428568162811E-9*m.x1177 - 4.48105175674617E-11*m.x1178 - 6.34428568162811E-9*m.x1179
- 7.67004950328966E-8*m.x1180 - 6.34428568162811E-9*m.x1181 - 4.48105175674615E-11*m.x1182
- 5.41745604246277E-10*m.x1183 - 7.67004950328966E-8*m.x1184 - 9.27285786535022E-7*m.x1185
- 7.67004950328966E-8*m.x1186 - 5.41745604246275E-10*m.x1187 - 7.67004950328972E-8*m.x1188
- 1.08592776612859E-5*m.x1189 - 0.000131285382487152*m.x1190 - 1.08592776612859E-5*m.x1191
- 7.67004950328968E-8*m.x1192 - 9.27285786535029E-7*m.x1193 - 0.000131285382487152*m.x1194
- 0.00158720056641012*m.x1195 - 0.000131285382487152*m.x1196 - 9.27285786535026E-7*m.x1197
- 7.67004950328972E-8*m.x1198 - 1.08592776612859E-5*m.x1199 - 0.000131285382487152*m.x1200
- 1.08592776612859E-5*m.x1201 - 7.67004950328968E-8*m.x1202 - 5.41745604246277E-10*m.x1203
- 7.67004950328966E-8*m.x1204 - 9.27285786535022E-7*m.x1205 - 7.67004950328966E-8*m.x1206
- 5.41745604246275E-10*m.x1207 - 4.48105175674617E-11*m.x1208 - 6.34428568162811E-9*m.x1209
- 7.67004950328966E-8*m.x1210 - 6.34428568162811E-9*m.x1211 - 4.48105175674615E-11*m.x1212
- 6.34428568162815E-9*m.x1213 - 8.98225751343218E-7*m.x1214 - 1.08592776612859E-5*m.x1215
- 8.98225751343218E-7*m.x1216 - 6.34428568162811E-9*m.x1217 - 7.67004950328972E-8*m.x1218
- 1.08592776612859E-5*m.x1219 - 0.000131285382487152*m.x1220 - 1.08592776612859E-5*m.x1221
- 7.67004950328968E-8*m.x1222 - 6.34428568162815E-9*m.x1223 - 8.98225751343218E-7*m.x1224
- 1.08592776612859E-5*m.x1225 - 8.98225751343218E-7*m.x1226 - 6.34428568162811E-9*m.x1227
- 4.48105175674617E-11*m.x1228 - 6.34428568162811E-9*m.x1229 - 7.67004950328966E-8*m.x1230
- 6.34428568162811E-9*m.x1231 - 4.48105175674615E-11*m.x1232 - 3.16502532425132E-13*m.x1233
- 4.48105175674618E-11*m.x1234 - 5.41745604246279E-10*m.x1235 - 4.48105175674618E-11*m.x1236
- 3.16502532425131E-13*m.x1237 - 4.48105175674622E-11*m.x1238 - 6.34428568162818E-9*m.x1239
- 7.67004950328975E-8*m.x1240 - 6.34428568162818E-9*m.x1241 - 4.48105175674618E-11*m.x1242
- 5.41745604246283E-10*m.x1243 - 7.67004950328975E-8*m.x1244 - 9.27285786535032E-7*m.x1245
- 7.67004950328975E-8*m.x1246 - 5.41745604246279E-10*m.x1247 - 4.48105175674622E-11*m.x1248
- 6.34428568162818E-9*m.x1249 - 7.67004950328975E-8*m.x1250 - 6.34428568162818E-9*m.x1251
- 4.48105175674618E-11*m.x1252 - 3.16502532425132E-13*m.x1253 - 4.48105175674618E-11*m.x1254
- 5.41745604246279E-10*m.x1255 - 4.48105175674618E-11*m.x1256 - 3.16502532425131E-13*m.x1257
- 2.3737689931885E-13*m.x1258 - 3.36078881755965E-11*m.x1259 - 4.06309203184711E-10*m.x1260
- 3.36078881755965E-11*m.x1261 - 2.37376899318848E-13*m.x1262 | |
of (+) -> TTTG_24_58847416_24_58847448_137M10S_147M_fwd_R1
NB500964:12:HTTG2BGXX:4:22601:26270:1144|TTTG 99 24 58847416 17 137M10S 24 58847448 137
R2 of (+) -> TTTG_24_58847448_24_58847416_137M10S_147M_rev_R2
NB500964:12:HTTG2BGXX:4:22601:26270:1144|TTTG 147 24 58847448 17 147M 24 58847416 147
R1 of (-) -> TGTT_24_58847448_24_58847416_137M10S_147M_rev_R1
R2 of (-) -> TGTT_24_58847416_24_58847448_137M10S_147M_fwd_R2
"""
orientation = 'fwd'
if read.is_reverse:
orientation = 'rev'
readNum = which_read(read.flag)
# Unique identifier for strand of individual molecules
tag = '{}_{}_{}_{}_{}_{}_{}_{}'.format(barcode, # mol barcode
read.reference_id, # chr
read.reference_start, # start (0-based)
read.next_reference_id, # mate chr
read.next_reference_start, # mate start
cigar,
orientation, # strand direction
readNum
)
return tag
def read_bam(bamfile, pair_dict, read_dict, csn_pair_dict, tag_dict, badRead_bam, duplex,
read_chr=None, read_start=None, read_end=None, barcode_delim=None):
"""(bamfile, dict, dict, dict, dict, bamfile, bool, str, int, int) ->
dict, dict, dict, dict, int, int, int
=== Input ===
- bamfile (pysam.AlignmentFile object): uncollapsed BAM file
- pair_dict: dictionary of paired reads based on query name to process data in pairs
- read_dict: dictionary of bamfile reads grouped by unique molecular tags
- csn_pair_dict: dictionary of paired tags sharing the same consensus tag to track pairing
- tag_dict: integer dictionary indicating number of reads in each read family
{read_tag: 2, ..etc}
- badRead_bam (pysam.AlignmentFile object): BAM file of "bad" reads (unmapped, multiple mapping)
-- Optional --
# For large bamfiles that are split into regions
- read_chr (str): chromosome region to fetch reads
- read_start (int): starting position to fetch reads
- read_end (int): stopping position to fetch reads
# For duplex consensus making
- duplex: any string or bool [that is not None] specifying duplex consensus making [e.g. TRUE], necessary for
parsing barcode as query name for Uncollapsed and SSCS differ
# For bams with barcodes extracted by other software and placed into read name with different delimiters
- barcode_delim (str): sequence before barcode (e.g. '|' for 'HWI-D00331:196:C900FANXX:7:1110:14056:43945|TTTT')
=== Output ===
1) read_dict: dictionary of bamfile reads grouped by unique molecular tags
Example: {read_tag: [<pysam.calignedsegment.AlignedSegment>, <pysam.calignedsegment.AlignedSegment>]}
- Key: [Barcode]_[Read Chr]_[Read Start]_[Mate Chr]_[Mate Start]_[Cigar String]_[Strand]_[Orientation]_[ReadNum]
- Value: List of reads (pysam object)
2) tag_dict: integer dictionary indicating number of reads in each read family
{read_tag: 2, ..etc}
3) pair_dict: dictionary of paired reads based on query name to process data in pairs
(note: this is a tmp dict as values are removed from dict once pair assigned to other dicts, this is
important for retaining data from translocations or reads crossing bam division regions)
Example: {query name: [read 1, read 2]}
4) csn_pair_dict: dictionary of paired tags sharing the same consensus tag to track pairing (paired reads share the
same query name/header)
Example: {consensus_tag: [R1_tag, R2_tag]}
5) counter: total number of reads
6) unmapped: unmapped reads
7) multiple_mapping: number of reads that not properly mapped
- secondary reads: same sequence aligns to multiple locations
- supplementary reads: multiple parts of sequence align to multiple locations
"""
# Fetch data given genome coordinates
if read_chr is None:
bamLines = bamfile.fetch(until_eof=True)
else:
bamLines = bamfile.fetch(read_chr, read_start, read_end)
# Initialize counters
unmapped = 0
unmapped_mate = 0
multiple_mapping = 0 # secondary/supplementary reads
counter = 0
bad_spacer = 0
for line in bamLines:
# Parse out reads that don't fall within region
if read_chr is not None:
# pysam fetch will retrieve reads that fall outside region due to pairing (we filter out to prevent double
# counting as we'll be fetching those reads again when we iterate through the next region)
if line.reference_start < read_start or line.reference_start > read_end:
continue
counter += 1
######################
# Filter Reads #
######################
# === 1) FILTER OUT UNMAPPED / MULTIPLE MAPPING READS ===
mate_unmapped = [73, 89, 121, 153, 185, 137]
badRead = True
# Check if delimiter is found in read
if barcode_delim is not None and barcode_delim not in line.qname:
bad_spacer += 1
elif line.is_unmapped:
unmapped += 1
counter -= 1
elif line.flag in mate_unmapped:
unmapped_mate += 1
elif line.is_secondary:
multiple_mapping += 1
elif line.is_supplementary:
multiple_mapping += 1
else:
badRead = False
# Write bad reads to file
if badRead and badRead_bam is not None:
badRead_bam.write(line)
else:
pair_dict[line.qname].append(line)
######################
# Unique ID #
######################
# === 2) ASSIGN UNIQUE IDENTIFIER TO READ PAIRS ===
if len(pair_dict[line.qname]) == 2:
read = pair_dict[line.qname][0]
mate = pair_dict[line.qname][1]
# === Create consensus identifier ===
# Extract molecular barcode, barcodes in diff position for SSCS vs DCS generation
if duplex == None or duplex == False:
if barcode_delim is None:
# SSCS query name: H1080:278:C8RE3ACXX:6:1308:18882:18072|CACT
barcode = read.qname.split("|")[1]
else:
barcode = read.qname.split(barcode_delim)[1]
else:
# DCS query name: CCTG_12_25398000_12_25398118_neg:5
barcode = read.qname.split("_")[0]
# Consensus_tag cigar (ordered by strand and read)
cigar = cigar_order(read, mate)
# Assign consensus tag as new query name for paired consensus reads
consensus_tag = sscs_qname(read, mate, barcode, cigar)
for i in range(2):
read_i = pair_dict[line.qname][i]
# Molecular identifier for grouping reads belonging to the same read of a strand of a molecule
tag = unique_tag(read_i, barcode, cigar)
######################
# Assign to Dict #
######################
# === 3) ADD READ PAIRS TO DICTIONARIES ===
if tag not in read_dict and tag not in tag_dict:
read_dict[tag] = [read_i]
tag_dict[tag] += 1
# Group paired unique tags using consensus tag
if consensus_tag not in csn_pair_dict:
csn_pair_dict[consensus_tag] = [tag]
elif len(csn_pair_dict[consensus_tag]) == 2:
# Honestly this shouldn't happen anymore with these identifiers
print("Consensus tag NOT UNIQUE -> multiple tags (4) share same consensus tag [due to poor "
"strand differentiation as a result of identifiers lacking complexity]")
print(consensus_tag)
print(tag)
print(read_i)
print(csn_pair_dict[consensus_tag])
print(read_dict[csn_pair_dict[consensus_tag][0]][0])
print(read_dict[csn_pair_dict[consensus_tag][1]][0])
# Manual inspection should be done on these reads
else:
csn_pair_dict[consensus_tag].append(tag)
elif tag in tag_dict and read not in read_dict[tag]:
# Append reads sharing the same unique tag together (PCR dupes)
read_dict[tag].append(read_i)
tag_dict[tag] += 1
else:
# Data fetch error - line read twice (if its found in tag_dict and read_dict)
print('Pair already written: line read twice - check to see if read overlapping / near cytoband'
' region (point of data division)')
# remove read pair qname from pair_dict once reads added to read_dict
pair_dict.pop(line.qname)
return read_dict, tag_dict, pair_dict, csn_pair_dict, counter, unmapped_mate, multiple_mapping, bad_spacer
def read_mode(field, bam_reads):
"""(str, lst) -> str
Return mode (most common occurrence) of a specified field
Field e.g. cigarstring, flag, mapping quality, template_length
"""
field = 'i.{}'.format(field)
# Rank by number of occurrences
field_lst = collections.Counter(eval(field) for i in bam_reads).most_common()
# Take max occurrences
common_field_lst = [i for i, j in field_lst if j == field_lst[0][1]]
# Randomly select max if there's multiple
common_field = common_field_lst[randint(0, len(common_field_lst)-1)]
return common_field
def consensus_flag(bam_reads):
"""(list) -> str
Return consensus flag given list of reads from the same family.
If multiple flags are present within reads from the same molecule and a max can't be determined, prioritize flags
that indicate proper mapping/pairing (99, 147, 83, 163) and within insert size.
e.g.
H1080:278:C8RE3ACXX:6:2211:10900:88094|TGCT 99 chr7 55221737 60 98M = 55222033 394
H1080:278:C8RE3ACXX:6:2213:20942:84732|TGCT 97 chr7 55221737 60 98M = 55222033 394
H1080:278:C8RE3ACXX:6:2211:10900:88094|TGCT 147 chr7 55222033 60 98M = 55221737 -394
H1080:278:C8RE3ACXX:6:2213:20942:84732|TGCT 145 chr7 55222033 60 98M = 55221737 -394
In this example, location and insert size are exactly the same. Take 99 as consensus flag for first 2 reads, and
147 for second.
"""
# Rank flags by number of occurrences
count_flags = collections.Counter(i.flag for i in bam_reads).most_common() # [(97, 1), (99, 1)]
# List all flags with max count (will show multiple if there's a tie for the max count)
max_flag = [i for i, j in count_flags if j == count_flags[0][1]]
if len(max_flag) != 1:
if 99 in max_flag:
flag = 99
elif 83 in max_flag:
flag = 83
elif 147 in max_flag:
flag = 147
elif 163 in max_flag:
flag = 163
else:
flag = max_flag[randint(0, len(max_flag)-1)] # If flag not properly paired/mapped, randomly select from max
else:
flag = max_flag[0]
return flag
def create_aligned_segment(bam_reads, sscs, sscs_qual, query_name):
"""(list, str, list, list, str) -> pysam object
Return consensus read representing list of reads from the same molecule.
Bam file characteristics:
1) Query name -> new 'consensus' query name (e.g. TTTG_24_58847448_24_58847416_137M10S_147M_pos_99_147)
2) Flag -> | |
= 0.0
Hprime[n:, 0:n] = 0.0
Hprime[n:, n:] = 0.0
# Step3: transform back to original coordinate system w -> v = invB @ w
projected_hessian = B.T @ Hprime @ B
return projected_hessian
def _opt_projection_for_operation_cis(self, method="L-BFGS-B", maxiter=10000,
maxfev=10000, tol=1e-6, verbosity=0):
printer = _VerbosityPrinter.create_printer(verbosity)
model = self.parent.models[self.model_lbl]
base_hessian = self.hessian
level = 95 # or 50, or whatever - the scale factory doesn't matter for the optimization
printer.log('', 3)
printer.log("--- Hessian Projector Optimization for gate CIs (%s) ---" % method, 2, indent_offset=-1)
def _objective_func(vector_m):
matM = vector_m.reshape((self.nNonGaugeParams, self.nGaugeParams))
nongauge_space, gauge_space = model.compute_nongauge_and_gauge_spaces(non_gauge_mix_mx=matM)
projected_hessian_ex = self._project_hessian(base_hessian, nongauge_space, gauge_space)
sub_crf = ConfidenceRegionFactory(self.parent, self.model_lbl, self.circuit_list_lbl,
projected_hessian_ex, 0.0)
sub_crf.project_hessian('none')
crfv = sub_crf.view(level)
operationCIs = _np.concatenate([crfv.retrieve_profile_likelihood_confidence_intervals(gl).flatten()
for gl in model.operations])
return _np.sqrt(_np.sum(operationCIs**2))
#Run Minimization Algorithm
startM = _np.zeros((self.nNonGaugeParams, self.nGaugeParams), 'd')
x0 = startM.flatten()
print_obj_func = _opt.create_objfn_printer(_objective_func)
minSol = _opt.minimize(_objective_func, x0,
method=method, maxiter=maxiter,
maxfev=maxfev, tol=tol,
callback=print_obj_func if verbosity > 2 else None)
mixMx = minSol.x.reshape((self.nNonGaugeParams, self.nGaugeParams))
nongauge_space, gauge_space = model.compute_nongauge_and_gauge_spaces(non_gauge_mix_mx=mixMx)
projected_hessian_ex = self._project_hessian(base_hessian, nongauge_space, gauge_space)
printer.log('The resulting min sqrt(sum(operationCIs**2)): %g' % minSol.fun, 2)
return projected_hessian_ex
def _opt_projection_from_split(self, verbosity=0):
printer = _VerbosityPrinter.create_printer(verbosity)
model = self.parent.models[self.model_lbl]
base_hessian = self.hessian
level = 95 # or 50, or whatever - the scale factory doesn't matter for the optimization
printer.log('', 3)
printer.log("--- Hessian Projector Optimization from separate SPAM and Gate weighting ---", 2, indent_offset=-1)
#get gate-intrinsic-error
nongauge_space, gauge_space = model.compute_nongauge_and_gauge_spaces(item_weights={'gates': 1.0, 'spam': 1e-4})
projected_hessian = self._project_hessian(base_hessian, nongauge_space, gauge_space)
sub_crf = ConfidenceRegionFactory(self.parent, self.model_lbl,
self.circuit_list_lbl, projected_hessian, 0.0)
sub_crf.project_hessian('none')
crfv = sub_crf.view(level)
operationCIs = _np.concatenate([crfv.retrieve_profile_likelihood_confidence_intervals(gl).flatten()
for gl in model.operations])
op_intrinsic_err = _np.sqrt(_np.mean(operationCIs**2))
#get spam-intrinsic-error
nongauge_space, gauge_space = model.compute_nongauge_and_gauge_spaces(item_weights={'gates': 1e-4, 'spam': 1.0})
projected_hessian = self._project_hessian(base_hessian, nongauge_space, gauge_space)
sub_crf = ConfidenceRegionFactory(self.parent, self.model_lbl,
self.circuit_list_lbl, projected_hessian, 0.0)
sub_crf.project_hessian('none')
crfv = sub_crf.view(level)
spamCIs = _np.concatenate([crfv.retrieve_profile_likelihood_confidence_intervals(sl).flatten()
for sl in _itertools.chain(iter(model.preps),
iter(model.povms))])
spam_intrinsic_err = _np.sqrt(_np.mean(spamCIs**2))
ratio = op_intrinsic_err / spam_intrinsic_err
nongauge_space, gauge_space = model.compute_nongauge_and_gauge_spaces(
item_weights={'gates': 1.0, 'spam': ratio})
projected_hessian = self._project_hessian(base_hessian, nongauge_space, gauge_space)
if printer.verbosity >= 2:
#Create crfv here just to extract #'s for print stmts
sub_crf = ConfidenceRegionFactory(self.parent, self.model_lbl,
self.circuit_list_lbl, projected_hessian, 0.0)
sub_crf.project_hessian('none')
crfv = sub_crf.view(level)
operationCIs = _np.concatenate([crfv.retrieve_profile_likelihood_confidence_intervals(gl).flatten()
for gl in model.operations])
spamCIs = _np.concatenate([crfv.retrieve_profile_likelihood_confidence_intervals(sl).flatten()
for sl in _itertools.chain(iter(model.preps),
iter(model.povms))])
op_err = _np.sqrt(_np.mean(operationCIs**2))
spam_err = _np.sqrt(_np.mean(spamCIs**2))
printer.log('Resulting intrinsic errors: %g (gates), %g (spam)' %
(op_intrinsic_err, spam_intrinsic_err), 2)
printer.log('Resulting sqrt(mean(operationCIs**2)): %g' % op_err, 2)
printer.log('Resulting sqrt(mean(spamCIs**2)): %g' % spam_err, 2)
return projected_hessian
class ConfidenceRegionFactoryView(object):
"""
Encapsulates a lightweight "view" of a ConfidenceRegionFactory.
A view object is principally defined by it's having a fixed confidence-level.
Thus, a "view" is like a factory that generates confidence intervals for
just a single confidence level. As such, it is a useful object to pass
around to routines which compute and display error bars, as these routines
typically don't depend on what confidence-level is being used.
Parameters
----------
model : Model
The model at the center of this confidence region.
inv_projected_hessian : numpy.ndarray
The computed inverse of the non-gauge-projected Hessian.
mlgst_params : dict
A dictionary of ML-GST parameters only used for linear-response
error bars.
confidence_level : float
the confidence level (between 0 and 100) used in
the computation of confidence regions/intervals.
non_mark_radius_sq : float, optional
When non-zero, "a non-Markovian error region" is constructed using
this value as the squared "non-markovian radius". This specifies the
portion of 2*(max-log-likelihood - model-log-likelihood) that we
attribute to non-Markovian errors (typically the previous
difference minus it's expected value, the difference in number of
parameters between the maximal and model models). If set to
zero (the default), a standard and thereby statistically rigorous
conficence region is created. Non-zero values should only be
supplied if you really know what you're doing.
n_non_gauge_params : int
The numbers of non-gauge parameters. This could be computed from `model`
but can be passed in to save compuational time.
n_gauge_params : int
The numbers of gauge parameters. This could be computed from `model`
but can be passed in to save compuational time.
"""
def __init__(self, model, inv_projected_hessian, mlgst_params, confidence_level,
non_mark_radius_sq, n_non_gauge_params, n_gauge_params):
"""
Creates a new ConfidenceRegionFactoryView.
Usually this constructor is not called directly, and objects of
this type are obtained by calling the :method:`view` method of
a `ConfidenceRegionFactory` object.
Parameters
----------
model : Model
The model at the center of this confidence region.
inv_projected_hessian : numpy.ndarray
The computed inverse of the non-gauge-projected Hessian.
mlgst_params : dict
A dictionary of ML-GST parameters only used for linear-response
error bars.
confidence_level : float
the confidence level (between 0 and 100) used in
the computation of confidence regions/intervals.
non_mark_radius_sq : float, optional
When non-zero, "a non-Markovian error region" is constructed using
this value as the squared "non-markovian radius". This specifies the
portion of 2*(max-log-likelihood - model-log-likelihood) that we
attribute to non-Markovian errors (typically the previous
difference minus it's expected value, the difference in number of
parameters between the maximal and model models). If set to
zero (the default), a standard and thereby statistically rigorous
conficence region is created. Non-zero values should only be
supplied if you really know what you're doing.
n_non_gauge_params, n_gauge_params : int
The numbers of non-gauge and gauge parameters, respectively. These could be
computed from `model` but they're passed in to save compuational time.
"""
# Scale projected Hessian for desired confidence level => quadratic form for confidence region assume hessian
# gives Fisher info, so asymptotically normal => confidence interval = +/- seScaleFctr * 1/sqrt(hessian) where
# seScaleFctr gives the scaling factor for a normal distribution, i.e. integrating the std normal distribution
# between -seScaleFctr and seScaleFctr == confidence_level/100 (as a percentage)
assert(confidence_level > 0.0 and confidence_level < 100.0)
if confidence_level < 1.0:
_warnings.warn("You've specified a %f%% confidence interval, " % confidence_level
+ "which is usually small. Be sure to specify this"
+ "number as a percentage in (0,100) and not a fraction in (0,1).")
# Get constants C such that xT*Hessian*x = C gives contour for the desired confidence region.
# C1 == Single DOF case: constant for a single-DOF likelihood, (or a profile likelihood in our case)
# Ck == Total DOF case: constant for a region of the likelihood as a function of *all non-gauge* model
# parameters
self.nonMarkRadiusSq = non_mark_radius_sq
if non_mark_radius_sq == 0.0: # use == to test for *exact* zero floating pt value as herald
C1 = _stats.chi2.ppf(confidence_level / 100.0, 1)
Ck = _stats.chi2.ppf(confidence_level / 100.0, n_non_gauge_params)
# Alt. method to get C1: square the result of a single gaussian (normal distribution)
#Note: scipy's ppf gives inverse of cdf, so want to know where cdf == the leftover probability on left side
# std error scaling factor for desired confidence region
seScaleFctr = -_stats.norm.ppf((1.0 - confidence_level / 100.0) / 2.0)
assert(_np.isclose(C1, seScaleFctr**2))
# save quadratic form Q s.t. xT*Q*x = 1 gives confidence region using C1, i.e. a
# region appropriate for generating 1-D confidence intervals.
if inv_projected_hessian is not None:
self.invRegionQuadcForm = inv_projected_hessian * C1
else:
self.invRegionQuadcForm = None
self.intervalScaling = _np.sqrt(Ck / C1) # multiplicative scaling required to convert intervals
# to those obtained using a full (using Ck) confidence region.
self.stdIntervalScaling = 1.0 # multiplicative scaling required to convert intervals
# to *standard* (e.g. not non-Mark.) intervals.
self.stdRegionScaling = self.intervalScaling # multiplicative scaling required to convert intervals
# to those obtained using a full *standard* confidence region.
else:
C1 = _stats.ncx2.ppf(confidence_level / 100.0, 1, non_mark_radius_sq)
Ck = _stats.ncx2.ppf(confidence_level / 100.0, n_non_gauge_params, non_mark_radius_sq)
# save quadratic form Q s.t. xT*Q*x = 1 gives confidence region using C1, i.e. a
# region appropriate for generating 1-D confidence intervals.
if inv_projected_hessian is not None:
self.invRegionQuadcForm = inv_projected_hessian * C1
self.invRegionQuadcForm /= _np.sqrt(n_non_gauge_params) # make a *worst case* non-mark. region...
else:
self.invRegionQuadcForm = None
self.intervalScaling = _np.sqrt(Ck / C1) # multiplicative scaling required to convert intervals
# to those obtained using a full (using Ck) confidence region.
stdC1 = _stats.chi2.ppf(confidence_level / 100.0, 1)
stdCk = _stats.chi2.ppf(confidence_level / 100.0, n_non_gauge_params)
self.stdIntervalScaling = _np.sqrt(stdC1 / C1) # see above description
self.stdRegionScaling = _np.sqrt(stdCk | |
<gh_stars>1-10
# Working on: EOL Reached.
# Finished: FINAL RELEASE v2.0
# Update Description: Finishes up all features. Everything will be completed after 2.0 gets released. This will be the last update to S.A.N.E.
# Future Ideas: NEOL Reached.
# Imports
from tkinter.constants import END
import speech_recognition as sr
import pyttsx3
import webbrowser
import datetime
from asyncio import get_event_loop
import threading
import time
import wikipedia
import os
import random
from PyDictionary import PyDictionary
import pandas as pd
import pandas.errors as e
import json
import tkinter as tk
from googletrans import Translator, constants
from sys import exit
# Storage initialization, works along with the jawbreaker and predictor to gather user data
try:
data_storing = pd.read_csv("S.A.N.E. Data Files\\data_storing.csv", delimiter=', ', engine='python')
network_storage = pd.read_csv("S.A.N.E. Data Files\\network_storage.csv", delimiter=', ', engine='python')
common_values = pd.read_csv("S.A.N.E. Data Files\\common_values.csv", delimiter=', ', engine='python')
secondary_storage = pd.read_csv("S.A.N.E. Data Files\\secondary_storage.csv", delimiter=', ', engine='python')
data_storing = data_storing.values.tolist()
network_storage = network_storage.values.tolist()
common_values = common_values.values.tolist()
secondary_storage = secondary_storage.values.tolist()
data_storing.clear()
network_storage.clear()
common_values.clear()
secondary_storage.clear()
except e.EmptyDataError:
pass
# Settings data loading
settings_file = open('S.A.N.E. Data Files\\settings.json', 'r')
settings_file_d = json.load(settings_file)
settings_file.close()
if settings_file_d["GENDER"] == 'MALE':
gender = 0
elif settings_file_d["GENDER"] == 'FEMALE':
gender = 1
if 'he' in settings_file_d["PRONOUNS"]:
usr_gender = 'sir'
elif 'she' in settings_file_d["PRONOUNS"]:
usr_gender = "ma'am"
elif 'they' in settings_file_d["PRONOUNS"]:
usr_gender = ''
elif '' in settings_file_d["PRONOUNS"]:
usr_gender = ''
# Start-up for the voice engine, used for the pyttsx3 voice API (microsoft based API)
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[gender].id)
# More data dumps/storage
primary_storing, search_list, daily_donations = [], [], []
# Login initialization
logins = pd.read_csv("S.A.N.E. Data Files\\login.csv", engine='python')
logins = logins.values.tolist()
login_okay = False
username = ''
password = ''
confirm_pass = False
# Functions
def speak(speech):
engine.say(speech)
engine.runAndWait()
# Popup Username and Password Window
def popup_window():
global confirm_pass
def confirm_b():
global confirm_pass
username_e = p_user.get()
password_e = p_user.get()
if username_e == username:
if password_e == password:
confirm_pass = True
popup.destroy()
else:
speak('Incorrect password')
else:
speak("Incorrect username")
popup = tk.Tk()
popup.title('Credentials Checkpoint')
popup.wm_iconbitmap('C:\\Users\\Reinier\\PycharmProjects\\GitHub Projects\\S.A.N.EProject\\S.A.N.E. Distribution\\S.A.N.E. Icon.ico')
popup.wm_attributes('-alpha', 0.95)
popup.configure(bg='gray20')
popwidth, popheight = (popup.winfo_screenwidth()/4), (popup.winfo_screenheight()/4)
popup.geometry('%dx%d+0+0' % (popwidth, popheight))
p_user = tk.Entry(bg='gray20', fg='white')
p_user.pack()
p_user.place(x=(popwidth/2.5), y=0)
p_usrlabel = tk.Label(popup, text="Username:", bg='gray20', fg='white')
p_usrlabel.pack()
p_usrlabel.place(x=(popwidth/4), y=0)
p_pass = tk.Entry(bg='gray20', fg='white')
p_pass.pack()
p_pass.place(x=(popwidth/2.5), y=(50))
p_passlabel = tk.Label(popup, text="Password:", bg='gray20', fg='white')
p_passlabel.pack()
p_passlabel.place(x=(popwidth/4), y=50)
p_confirm = tk.Button(popup, text="Login", bg='gray20', fg='white', command=confirm_b)
p_confirm.pack()
p_confirm.place(x=(popwidth/2.05), y=75)
popup.mainloop()
# Set Login Dialog
def login_dialog():
def confirm_b():
global username
global password
username = set_user.get()
password = set_pass.get()
logins.append(username)
logins.append(password)
speak("Thank your for registering, please confirm your login to access the AI.")
set_login.destroy()
popup_window()
set_login = tk.Tk()
set_login.title('Credentials Checkpoint')
set_login.wm_iconbitmap('C:\\Users\\Reinier\\PycharmProjects\\GitHub Projects\\S.A.N.EProject\\S.A.N.E. Distribution\\S.A.N.E. Icon.ico')
set_login.wm_attributes('-alpha', 0.95)
set_login.configure(bg='gray20')
setlwidth, setlheight = (set_login.winfo_screenwidth()/4), (set_login.winfo_screenheight()/4)
set_login.geometry('%dx%d+0+0' % (setlwidth, setlheight))
set_user = tk.Entry(bg='gray20', fg='white')
set_user.pack()
set_user.place(x=(setlwidth/2.5), y=0)
set_usrlabel = tk.Label(set_login, text="Username:", bg='gray20', fg='white')
set_usrlabel.pack()
set_usrlabel.place(x=(setlwidth/4), y=0)
set_pass = tk.Entry(bg='gray20', fg='white')
set_pass.pack()
set_pass.place(x=(setlwidth/2.5), y=(50))
set_passlabel = tk.Label(set_login, text="Password:", bg='gray20', fg='white')
set_passlabel.pack()
set_passlabel.place(x=(setlwidth/4), y=50)
set_confirm = tk.Button(set_login, text="Login", bg='gray20', fg='white', command=confirm_b)
set_confirm.pack()
set_confirm.place(x=(setlwidth/2.05), y=75)
set_login.mainloop()
# Next three functions initialize the entire program with a username and password
def login__init__():
speak('Do you have a login yet? Please say yes or no.')
while 1:
yes_or_no = r.listen(source)
yes_or_no = r.recognize_google(yes_or_no)
if 'yes' in yes_or_no:
speak("Thank you for supporting, please input your login now.")
returning_user_login()
elif 'no' in yes_or_no:
speak("Welcome, please seta login in the window provided.")
set_password()
elif 'yes' and 'no' not in yes_or_no:
speak("Please only say either yes or no")
def returning_user_login(): # Login for returning users
global username
global password
global confirm_pass
username = str(logins[0])
username = username.replace(' nan nan nan nan', '')
username = username.replace('[', '')
username = username.replace("'", '')
username = username.replace(']', '')
username = username.replace(',', '') # Takes out excess characters from password because they are from a .csv
# file, like the one attached in the GitHub repo
password = str(logins[1])
password = password.replace(' <PASSWORD>', '')
password = password.replace('[', '')
password = password.replace("'", '')
password = password.replace(']', '')
password = password.replace(',', '')
popup_window()
confirm_pass = True
if confirm_pass:
speak("Credentials Authorized, Welcome.")
t1 = threading.Thread(do_stuff())
t1.start()
def set_password(): # Setup for the username and password process, this appends two things to the .csv, a username and password
login_dialog()
login_DF = pd.DataFrame(logins, columns=None)
login_DF.to_csv('S.A.N.E. Data Files\\login.csv', index=False, columns=None, sep=',')
returning_user_login()
def the_jawbreaker():
inp_count = 0
while True:
for i in data_storing:
if i not in secondary_storage:
secondary_storage.append(i) # Iterate over storage samples from inputs to find usable samples
else:
continue
obj = random.choice(secondary_storage)
# Choose random sample from storage container
value = obj
if ' ' in value:
val_list = list(value.split(' ')) # Dissect value to refine dataframe
word_1 = val_list[0]
word_2 = val_list[1]
if word_1 or word_2 in network_storage: # Write the value to separate containers (network, common)
network_storage.append(word_1)
network_storage.append(word_2)
elif word_1 or word_2 not in network_storage:
network_storage.append(word_1)
network_storage.append(word_2)
common_values.append(word_1)
common_values.append(word_2)
else:
if value in network_storage: # Single value placement command
network_storage.append(value)
elif value not in network_storage:
network_storage.append(value)
common_values.append(value)
# Prediction algorithm is a searching loop that uses reference data
for i in network_storage:
print(network_storage)
if len(network_storage) > 0:
suggestion = PyDictionary.synonym(i)
print(random.choice(suggestion))
def confirm_settings():
speak("This will shutdown, reopen to have your settings applied.")
exit()
def settings_change():
speak("Please enter your password to access the settings")
def set_voice_gender():
print(settings_file_d["GENDER"])
if settings_file_d['GENDER'] == 'MALE':
settings_data = {
"GENDER": "FEMALE",
"PRONOUNS": settings_file_d["PRONOUNS"]
}
settings_file = open('S.A.N.E. Data Files\\settings.json', 'w')
json.dump(settings_data, settings_file)
speak('Gender changed')
settings_file.close()
exit()
elif settings_file_d["GENDER"] == "FEMALE":
settings_data = {
"GENDER": "MALE",
"PRONOUNS": settings_file_d["PRONOUNS"]
}
settings_file = open('S.A.N.E. Data Files\\settings.json', 'w')
json.dump(settings_data, settings_file)
speak('Gender changed')
settings_file.close()
speak("Please restart the program to utilize changes.")
exit()
def set_pronouns():
pronouns = PRONOUN_SELECT.get('1.0', END)
pronouns = pronouns.replace('\n', '')
settings_data = {
"GENDER": settings_file_d["GENDER"],
"PRONOUNS": pronouns
}
settings_file = open('S.A.N.E. Data Files\\settings.json', 'w')
json.dump(settings_data, settings_file)
print(f'Prefered pronouns changed')
settings_file.close()
# Tkinter settings window
set_win = tk.Tk()
while 1:
popup_window()
if confirm_pass:
set_win.mainloop()
set_win.title('S.A.N.E. Settings Configuration')
set_win.wm_attributes('-alpha', 0.95)
set_win.configure(bg='gray20')
set_win.wm_iconbitmap('C:\\Users\\Reinier\\PycharmProjects\\GitHub Projects\\S.A.N.EProject\\S.A.N.E. Distribution\\S.A.N.E. Icon.ico')
root_h = (set_win.winfo_screenheight()/3)
root_w = (set_win.winfo_screenwidth()/3)
x = (root_w/1)
y = (root_h/1.2)
set_win.geometry('%dx%d+%d+%d' % (root_w, root_h, x, y))
set_win.resizable(False, False)
VOICE_GENDER_SELECT = tk.Button(set_win, text=f'Toggle Gender', bg='gray20', fg='white', command=set_voice_gender)
VOICE_GENDER_SELECT.pack()
PRONOUN_SELECT = tk.Text(set_win, bg='gray20', fg='white', width=100, height=1)
PRONOUN_SELECT.pack()
PRONOUN_SELECT_B = tk.Button(set_win, text=f'Confirm Pronouns', bg='gray20', fg='white', command=set_pronouns)
PRONOUN_SELECT_B.pack()
CONFIRM_SETTINGS = tk.Button(set_win, text=f'Confirm Settings', bg='gray20', fg='white', command=confirm_settings)
CONFIRM_SETTINGS.pack()
CONFIRM_SETTINGS.place(x=(root_w/2.35), y=(root_h-25))
set_win.mainloop()
def speak_with_user(): # Speak with user is the conversational algorithm, with limited functions, although it
# eventually re-roots itself into the main algorithm
speak("Sane friendly mode is in beta, if any features should be included or any bugs are prominent please report "
"to the developer of Sane.")
speak("How is your day today?")
response = r.listen(source)
response = r.recognize_google(response)
response = response.lower()
while 1:
if 'good' or 'great' or 'amazing' or 'really good' in response:
responses = ['What makes your day good?', 'Did you do anything cool?', 'What fun activities did you do '
'today?']
speak(random.choice(responses))
response = r.listen(source)
response = r.recognize_google(response)
primary_storing.append(response)
speak("That's great, do you have any other plans today?")
response = r.listen(source)
response = r.recognize_google(response)
if 'no' or 'nah' in response:
speak("Fair enough I'll be here in case you need me.")
while 1:
yes_or_no = r.listen(source)
yes_or_no = r.recognize_google(yes_or_no)
if 'yes' in yes_or_no:
exit(do_stuff())
else:
continue
elif 'yes' in response:
speak("Nice, although if you said what you are doing I'm not sure I heard")
response = r.listen(source)
response = r.recognize_google(response)
primary_storing.append(response)
time.sleep(0.5)
speak("Nice do you want any help with that?")
response = r.listen(source)
response = r.recognize_google(response)
if 'yes' or 'ok' in response:
speak("would you like me to define a word, Wiki search or anything?")
response = r.listen(source)
response = r.recognize_google(response)
if 'wiki' in response:
speak('What would you like to search?')
audio = r.listen(source)
audio = r.recognize_google(audio)
speak("Your results are being processed now.")
answer = wikipedia.summary(audio, sentences=5)
speak(answer)
while 1:
yes_or_no = r.listen(source)
yes_or_no = r.recognize_google(yes_or_no)
if 'yes' in yes_or_no:
exit(do_stuff())
else:
continue
elif 'define' in response:
speak("What is your word?")
audio = r.listen(source)
audio = r.recognize_google(audio)
word_def = PyDictionary.meaning(f"{audio}")
speak(word_def)
while 1:
yes_or_no = r.listen(source)
yes_or_no = r.recognize_google(yes_or_no)
if 'yes' in yes_or_no:
exit(do_stuff())
else:
continue
elif 'no' in response:
speak("Okay, I'll be here if you need any help later.")
elif 'bad' or 'not great' or 'pretty bad' or 'not that good' in response:
responses = ['Why is your day | |
<filename>venv/Lib/site-packages/matplotlib/image.py
"""
The image module supports basic image loading, rescaling and display
operations.
"""
import math
import os
import logging
from pathlib import Path
import numpy as np
import PIL.PngImagePlugin
import matplotlib as mpl
from matplotlib import _api
import matplotlib.artist as martist
from matplotlib.backend_bases import FigureCanvasBase
import matplotlib.colors as mcolors
import matplotlib.cm as cm
import matplotlib.cbook as cbook
# For clarity, names from _image are given explicitly in this module:
import matplotlib._image as _image
# For user convenience, the names from _image are also imported into
# the image namespace:
from matplotlib._image import *
from matplotlib.transforms import (
Affine2D, BboxBase, Bbox, BboxTransform, BboxTransformTo,
IdentityTransform, TransformedBbox)
_log = logging.getLogger(__name__)
# map interpolation strings to module constants
_interpd_ = {
'antialiased': _image.NEAREST, # this will use nearest or Hanning...
'none': _image.NEAREST, # fall back to nearest when not supported
'nearest': _image.NEAREST,
'bilinear': _image.BILINEAR,
'bicubic': _image.BICUBIC,
'spline16': _image.SPLINE16,
'spline36': _image.SPLINE36,
'hanning': _image.HANNING,
'hamming': _image.HAMMING,
'hermite': _image.HERMITE,
'kaiser': _image.KAISER,
'quadric': _image.QUADRIC,
'catrom': _image.CATROM,
'gaussian': _image.GAUSSIAN,
'bessel': _image.BESSEL,
'mitchell': _image.MITCHELL,
'sinc': _image.SINC,
'lanczos': _image.LANCZOS,
'blackman': _image.BLACKMAN,
}
interpolations_names = set(_interpd_)
def composite_images(images, renderer, magnification=1.0):
"""
Composite a number of RGBA images into one. The images are
composited in the order in which they appear in the *images* list.
Parameters
----------
images : list of Images
Each must have a `make_image` method. For each image,
`can_composite` should return `True`, though this is not
enforced by this function. Each image must have a purely
affine transformation with no shear.
renderer : `.RendererBase`
magnification : float, default: 1
The additional magnification to apply for the renderer in use.
Returns
-------
image : uint8 array (M, N, 4)
The composited RGBA image.
offset_x, offset_y : float
The (left, bottom) offset where the composited image should be placed
in the output figure.
"""
if len(images) == 0:
return np.empty((0, 0, 4), dtype=np.uint8), 0, 0
parts = []
bboxes = []
for image in images:
data, x, y, trans = image.make_image(renderer, magnification)
if data is not None:
x *= magnification
y *= magnification
parts.append((data, x, y, image._get_scalar_alpha()))
bboxes.append(
Bbox([[x, y], [x + data.shape[1], y + data.shape[0]]]))
if len(parts) == 0:
return np.empty((0, 0, 4), dtype=np.uint8), 0, 0
bbox = Bbox.union(bboxes)
output = np.zeros(
(int(bbox.height), int(bbox.width), 4), dtype=np.uint8)
for data, x, y, alpha in parts:
trans = Affine2D().translate(x - bbox.x0, y - bbox.y0)
_image.resample(data, output, trans, _image.NEAREST,
resample=False, alpha=alpha)
return output, bbox.x0 / magnification, bbox.y0 / magnification
def _draw_list_compositing_images(
renderer, parent, artists, suppress_composite=None):
"""
Draw a sorted list of artists, compositing images into a single
image where possible.
For internal Matplotlib use only: It is here to reduce duplication
between `Figure.draw` and `Axes.draw`, but otherwise should not be
generally useful.
"""
has_images = any(isinstance(x, _ImageBase) for x in artists)
# override the renderer default if suppressComposite is not None
not_composite = (suppress_composite if suppress_composite is not None
else renderer.option_image_nocomposite())
if not_composite or not has_images:
for a in artists:
a.draw(renderer)
else:
# Composite any adjacent images together
image_group = []
mag = renderer.get_image_magnification()
def flush_images():
if len(image_group) == 1:
image_group[0].draw(renderer)
elif len(image_group) > 1:
data, l, b = composite_images(image_group, renderer, mag)
if data.size != 0:
gc = renderer.new_gc()
gc.set_clip_rectangle(parent.bbox)
gc.set_clip_path(parent.get_clip_path())
renderer.draw_image(gc, round(l), round(b), data)
gc.restore()
del image_group[:]
for a in artists:
if (isinstance(a, _ImageBase) and a.can_composite() and
a.get_clip_on() and not a.get_clip_path()):
image_group.append(a)
else:
flush_images()
a.draw(renderer)
flush_images()
def _resample(
image_obj, data, out_shape, transform, *, resample=None, alpha=1):
"""
Convenience wrapper around `._image.resample` to resample *data* to
*out_shape* (with a third dimension if *data* is RGBA) that takes care of
allocating the output array and fetching the relevant properties from the
Image object *image_obj*.
"""
# decide if we need to apply anti-aliasing if the data is upsampled:
# compare the number of displayed pixels to the number of
# the data pixels.
interpolation = image_obj.get_interpolation()
if interpolation == 'antialiased':
# don't antialias if upsampling by an integer number or
# if zooming in more than a factor of 3
pos = np.array([[0, 0], [data.shape[1], data.shape[0]]])
disp = transform.transform(pos)
dispx = np.abs(np.diff(disp[:, 0]))
dispy = np.abs(np.diff(disp[:, 1]))
if ((dispx > 3 * data.shape[1] or
dispx == data.shape[1] or
dispx == 2 * data.shape[1]) and
(dispy > 3 * data.shape[0] or
dispy == data.shape[0] or
dispy == 2 * data.shape[0])):
interpolation = 'nearest'
else:
interpolation = 'hanning'
out = np.zeros(out_shape + data.shape[2:], data.dtype) # 2D->2D, 3D->3D.
if resample is None:
resample = image_obj.get_resample()
_image.resample(data, out, transform,
_interpd_[interpolation],
resample,
alpha,
image_obj.get_filternorm(),
image_obj.get_filterrad())
return out
def _rgb_to_rgba(A):
"""
Convert an RGB image to RGBA, as required by the image resample C++
extension.
"""
rgba = np.zeros((A.shape[0], A.shape[1], 4), dtype=A.dtype)
rgba[:, :, :3] = A
if rgba.dtype == np.uint8:
rgba[:, :, 3] = 255
else:
rgba[:, :, 3] = 1.0
return rgba
class _ImageBase(martist.Artist, cm.ScalarMappable):
"""
Base class for images.
interpolation and cmap default to their rc settings
cmap is a colors.Colormap instance
norm is a colors.Normalize instance to map luminance to 0-1
extent is data axes (left, right, bottom, top) for making image plots
registered with data plots. Default is to label the pixel
centers with the zero-based row and column indices.
Additional kwargs are matplotlib.artist properties
"""
zorder = 0
def __init__(self, ax,
cmap=None,
norm=None,
interpolation=None,
origin=None,
filternorm=True,
filterrad=4.0,
resample=False,
*,
interpolation_stage=None,
**kwargs
):
martist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
if origin is None:
origin = mpl.rcParams['image.origin']
_api.check_in_list(["upper", "lower"], origin=origin)
self.origin = origin
self.set_filternorm(filternorm)
self.set_filterrad(filterrad)
self.set_interpolation(interpolation)
self.set_interpolation_stage(interpolation_stage)
self.set_resample(resample)
self.axes = ax
self._imcache = None
self.update(kwargs)
def __getstate__(self):
state = super().__getstate__()
# We can't pickle the C Image cached object.
state['_imcache'] = None
return state
def get_size(self):
"""Return the size of the image as tuple (numrows, numcols)."""
if self._A is None:
raise RuntimeError('You must first set the image array')
return self._A.shape[:2]
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on all backends.
Parameters
----------
alpha : float or 2D array-like or None
"""
martist.Artist._set_alpha_for_array(self, alpha)
if np.ndim(alpha) not in (0, 2):
raise TypeError('alpha must be a float, two-dimensional '
'array, or None')
self._imcache = None
def _get_scalar_alpha(self):
"""
Get a scalar alpha value to be applied to the artist as a whole.
If the alpha value is a matrix, the method returns 1.0 because pixels
have individual alpha values (see `~._ImageBase._make_image` for
details). If the alpha value is a scalar, the method returns said value
to be applied to the artist as a whole because pixels do not have
individual alpha values.
"""
return 1.0 if self._alpha is None or np.ndim(self._alpha) > 0 \
else self._alpha
def changed(self):
"""
Call this whenever the mappable is changed so observers can update.
"""
self._imcache = None
self._rgbacache = None
cm.ScalarMappable.changed(self)
def _make_image(self, A, in_bbox, out_bbox, clip_bbox, magnification=1.0,
unsampled=False, round_to_pixel_border=True):
"""
Normalize, rescale, and colormap the image *A* from the given *in_bbox*
(in data space), to the given *out_bbox* (in pixel space) clipped to
the given *clip_bbox* (also in pixel space), and magnified by the
*magnification* factor.
*A* may be a greyscale image (M, N) with a dtype of float32, float64,
float128, uint16 or uint8, or an (M, N, 4) RGBA image with a dtype of
float32, float64, float128, or uint8.
If *unsampled* is True, the image will not be scaled, but an
appropriate affine transformation will be returned instead.
If *round_to_pixel_border* is True, the output image size will be
rounded to the nearest pixel boundary. This makes the images align
correctly with the axes. It should not be used if exact scaling is
needed, such as for `FigureImage`.
Returns
-------
image : (M, N, 4) uint8 array
The RGBA image, resampled unless *unsampled* is True.
x, y : float
The upper left corner where the image should be drawn, in pixel
space.
trans : Affine2D
The affine transformation from image to pixel space.
"""
if A is None:
raise RuntimeError('You must first set the image '
'array or the image attribute')
if A.size == 0:
raise RuntimeError("_make_image must get a non-empty image. "
"Your Artist's draw method must filter before "
"this method is called.")
clipped_bbox = Bbox.intersection(out_bbox, clip_bbox)
if | |
<filename>xxurl/xxurl.py
import os
import re
import elist.elist as elel
import edict.edict as eded
import tlist.tlist as tltl
import estring.estring as eses
import urllib.parse
import ipaddress
import posixpath
from efdir import fs
# https://url.spec.whatwg.org/#concept-url-origin
# https://docs.python.org/3/library/urllib.parse.html?highlight=urlparse#urllib.parse.urlparse
# scheme 0 URL scheme specifier scheme parameter
# netloc 1 Network location part empty string
# path 2 Hierarchical path empty string
# params 3 Parameters for last path element empty string
# query 4 Query component empty string
# fragment 5 Fragment identifier empty string
# username User name None
# password Password <PASSWORD>
# hostname Host name (lower case) None
# port Port number as integer, if present
#六元组
#u href urlstr url-string
#sixt urlsixt url-six-elements-tuple
#sixd urlsixd url-six-elements-dict
SIXL = ['scheme', 'netloc', 'path', 'params', 'query', 'fragment']
SIXMD = {
'path': 2,
'netloc': 1,
'fragment': 5,
'params': 3,
'scheme': 0,
'query': 4,
0:'scheme',
1:'netloc',
2:'path',
3:'params',
4:'query',
5:'fragment'
}
def six_to_attrname(attr):
if(type(attr) == type(0)):
attr = SIXMD[attr]
else:
attr = str.lower(attr)
if(attr in SIXL):
attr = attr
else:
print('attribute {0} not exist in url-six-elements-tuple'.format(attr))
attr = None
return(attr)
def six_u2d(url):
'''
url = 'http://www.baidu.com/index.php;params?username=query#frag'
pobj(six_u2d(url))
'''
d = {}
rslt = urllib.parse.urlparse(url)
for k in SIXL:
d[k] = rslt.__getattribute__(k)
return(d)
def six_d2u(d):
'''
d = {
'path': '/index.php',
'netloc': 'www.baidu.com',
'fragment': 'frag',
'params': 'params',
'scheme': 'http',
'query': 'username=query'
}
url = six_d2u(d)
url
'''
t = six_d2t(d)
url = urllib.parse.urlunparse(t)
return(url)
def six_u2t(url):
'''
url = 'http://www.baidu.com/index.php;params?username=query#frag'
pobj(six_u2t(url))
'''
rslt = urllib.parse.urlparse(url)
t = (rslt.scheme,rslt.netloc,rslt.path,rslt.params,rslt.query,rslt.fragment)
return(t)
def six_t2u(t):
'''
#params 是path 的一部分
t = ('http', 'www.baidu.com', '/index.php', 'params', 'username=query', 'frag')
url = six_t2u(t)
t = ('http', 'www.baidu.com', '', 'params', 'username=query', 'frag')
six_t2u(t)
t = ('http', 'www.baidu.com', '', 'params', 'username=query', 'frag')
six_t2u(t)
'''
url = urllib.parse.urlunparse(t)
return(url)
def six_d2t(d):
'''
d = {
'path': '/index.php',
'netloc': 'www.baidu.com',
'fragment': 'frag',
'params': 'params',
'scheme': 'http',
'query': 'username=query'
}
t = six_d2t(d)
pobj(t)
'''
t = (d['scheme'],d['netloc'],d['path'],d['params'],d['query'],d['fragment'])
return(t)
def six_t2d(t):
'''
t = ('http', 'www.baidu.com', '/index.php', 'params', 'username=query', 'frag')
pobj(six_t2d(t))
'''
d = {}
d['scheme'] = t[0]
d['netloc'] = t[1]
d['path'] = t[2]
d['params'] = t[3]
d['query'] = t[4]
d['fragment'] = t[5]
return(d)
def six_set(url,*args,**kwargs):
'''
url = 'http://www.baidu.com/index.php;params?username=query#frag'
six_set(url,netloc='www.google.com',fragment='newfrag')
url = 'http://www.baidu.com/index.php;params?username=query#frag'
six_set(url,'netloc','www.google.com','fragment','newfrag')
url = 'http://www.baidu.com/index.php;params?username=query#frag'
six_set(url,1,'www.google.com',5,'newfrag')
'''
d = six_u2d(url)
args = list(args)
lngth = args.__len__()
if(lngth >= 2):
lngth = lngth - (lngth%2)
args = args[:lngth]
tmp = elel.divide(args,2)
for kv in tmp:
k = kv[0]
k = six_to_attrname(k)
v = kv[1]
cond = (k in SIXL)
if(cond):
d[k] = v
else:
pass
else:
for k in kwargs:
rk = str.lower(k)
cond = (rk in SIXL)
if(cond):
d[rk] = kwargs[k]
else:
pass
url = six_d2u(d)
return(url)
def six_get(url,*args,**kwargs):
'''
url = 'http://www.baidu.com/index.php;params?username=query#frag'
six_get(url,attr='netloc')
six_get(url,'netloc')
six_get(url,0)
'''
d = six_u2d(url)
args = list(args)
lngth = args.__len__()
if(lngth == 0):
attr = kwargs['attr']
else:
attr = args[0]
attr = six_to_attrname(attr)
attr = str.lower(attr)
cond = (attr in SIXL)
if(cond):
return(d[attr])
else:
print('attribute {0} not exist in url-six-elements-tuple'.format(attr))
return(None)
NLOCL = ['username','password','hostname','port']
NETLOCSP = '://'
PASSWDSP = ':'
HOSTSP = '@'
PORTSP = ":"
PATHSP = ""
PARAMSP = ";"
QUERYSP = "?"
FRAGSP = "#"
#
def unpack_host(host):
'''
host = 'www.baidu.com'
unpack_host(host)
host = 'www.baidu.com:443'
unpack_host(host)
'''
tmp = host.split(':')
hostname = tmp[0]
try:
port = tmp[1]
except:
port = ''
else:
port = tmp[1]
d = {
'hostname':hostname,
'port':port
}
return(d)
def packup_host(d):
'''
d = {'hostname': 'www.baidu.com', 'port': ''}
packup_host(d)
d = {'hostname': 'www.baidu.com', 'port': '443'}
packup_host(d)
'''
if(d['port'] == ''):
host = d['hostname']
else:
host = d['hostname'] + ':' + d['port']
return(host)
#unpw username-password
def unpack_unpw(unpw):
'''
unpw = 'admin:secret'
unpack_unpw(unpw)
unpw = 'admin'
unpack_unpw(unpw)
'''
tmp = unpw.split(':')
username = tmp[0]
try:
password = tmp[1]
except:
password = ''
else:
password = tmp[1]
d = {
'username':username,
'password':password
}
return(d)
def packup_unpw(d):
'''
d = {'username': 'admin', 'password': '<PASSWORD>'}
packup_unpw(d)
d = {'username': 'admin', 'password': ''}
packup_unpw(d)
'''
if('password' in d):
pass
else:
d['password'] = ''
if('username' in d):
pass
else:
d['username'] = ''
if(d['password'] == ''):
host = d['username']
else:
host = d['username'] + ':' + d['password']
return(host)
#
def unpack_netloc(netloc,**kwargs):
'''
netloc = 'admin:secret@local-domain.com:8000'
unpack_netloc(netloc)
unpack_netloc(netloc,dehost=False)
netloc = 'admin@local-domain.com:8000'
unpack_netloc(netloc)
unpack_netloc(netloc,dehost=False)
netloc = 'local-domain.com:8000'
unpack_netloc(netloc)
unpack_netloc(netloc,dehost=False)
netloc = 'local-domain.com'
unpack_netloc(netloc)
unpack_netloc(netloc,dehost=False)
'''
if('@' in netloc):
tmp = netloc.split('@')
host = tmp[1]
unps = tmp[0]
if(':' in unps):
tmp = unps.split(':')
username = tmp[0]
password = tmp[1]
else:
username = unps
password = ''
else:
username = ''
password = ''
host = netloc
d = {}
d['username'] = username
d['password'] = password
if('dehost' in kwargs):
dehost = kwargs['dehost']
else:
dehost = True
if(dehost):
dho = unpack_host(host)
hostname = dho['hostname']
port = dho['port']
d['hostname'] = hostname
d['port'] = port
else:
d['host'] = host
return(d)
#hidden
def _cond_packup_netloc(d,attr):
'''
'''
cond1 = (attr in d)
if(cond1):
cond2 = (d[attr] != '')
if(cond2):
return(True)
else:
return(False)
else:
return(False)
def packup_netloc(d,**kwargs):
'''
d = {'username': 'admin', 'password': '<PASSWORD>', 'hostname': 'local-domain.com', 'port': '8000'}
packup_netloc(d)
d = {'username': 'admin', 'password': '<PASSWORD>', 'host': 'local-domain.com:8000'}
packup_netloc(d)
d = {'username': 'admin', 'hostname': 'local-domain.com', 'port': '8000'}
packup_netloc(d)
d = {'username': 'admin', 'host': 'local-domain.com:8000'}
packup_netloc(d)
d = {'hostname': 'local-domain.com', 'port': '8000'}
packup_netloc(d)
d = {'host': 'local-domain.com:8000'}
packup_netloc(d)
d = {'hostname': 'local-domain.com'}
packup_netloc(d)
d = {'host': 'local-domain.com'}
packup_netloc(d)
d = {'username': 'admin', 'password': '', 'hostname': '', 'port': ''}
packup_netloc(d)
d = {'username': '', 'password': '', 'hostname': '', 'port': '8000'}
packup_netloc(d)
'''
if('keep_portsp' in kwargs):
keep_portsp = kwargs['keep_portsp']
else:
keep_portsp = False
if(_cond_packup_netloc(d,'username')):
username = d['username']
else:
username = ''
if(_cond_packup_netloc(d,'password')):
password = d['password']
else:
password = ''
unpw = packup_unpw({'username':username,'password':password})
#for onlyport info secarino
cond_hostname_port = False
if(_cond_packup_netloc(d,'hostname') | _cond_packup_netloc(d,'port')):
hostname = d['hostname']
if(_cond_packup_netloc(d,'port')):
host = hostname + PORTSP + d['port']
else:
host = hostname
if(_cond_packup_netloc(d,'hostname')):
pass
else:
cond_hostname_port = True
elif(_cond_packup_netloc(d,'host')):
host = d['host']
else:
print("notice!: maybe either hostname or host needed")
host = ''
if(host == ''):
host_sp = ''
elif(cond_hostname_port):
#strip PORTSP
if(keep_portsp):
host = host
else:
host = host[1:]
host_sp = HOSTSP
else:
host_sp = HOSTSP
if(unpw == ''):
host_sp = ''
else:
pass
netloc = unpw + host_sp + host
return(netloc)
#九元组
#nint urlnint url-nine-elements-tuple
#nind urlnind url-nine-elements-dict
NINL = ['scheme', 'username', 'password','hostname','port','path', 'params', 'query', 'fragment']
NINMD = {
0:"scheme",
1:"username",
2:"password",
3:"hostname",
4:"port",
5:"path",
6:"params",
7:"query",
8:"fragment",
"scheme":0,
"username":1,
"password":2,
"hostname":3,
"port":4,
"path":5,
"params":6,
"query":7,
"fragment":8
}
NINSPL = [NETLOCSP,PASSWDSP,HOSTSP,PORTSP,PATHSP,PARAMSP,QUERYSP,FRAGSP]
def nin_to_attrname(attr):
if(type(attr) == type(0)):
attr = NINMD[attr]
else:
attr = str.lower(attr)
if(attr in NINL):
attr = attr
else:
print('attribute {0} not exist in url-nine-elements-tuple'.format(attr))
attr = None
return(attr)
def nin_u2d(url):
'''
url = 'http://www.baidu.com/index.php;params?username=query#frag'
nin_u2d(url)
url = 'http://admin:secret@local-domain.com:8000/path?q=123#anchor'
nin_u2d(url)
'''
d = six_u2d(url)
netloc = d['netloc']
nld = unpack_netloc(netloc,dehost=True)
rslt = eded._update(d,nld)
del rslt['netloc']
rslt = eded._reorder_via_klist(rslt,NINL)
return(rslt)
def nin_d2u(d,**kwargs):
'''
d = {'scheme': 'http', 'path': '/index.php', 'params': 'params', 'query': 'username=query', 'fragment': 'frag', 'username': '', 'password': '', 'hostname': 'www.baidu.com', 'port': ''}
nin_d2u(d)
d = {'scheme': 'http', 'path': '/path', 'params': '', 'query': 'q=123', 'fragment': 'anchor', 'username': 'admin', 'password': '<PASSWORD>', 'hostname': 'local-domain.com', 'port': '8000'}
nin_d2u(d)
'''
nlocd = eded._select_norecur(d,'username','password','hostname','port')
netloc = packup_netloc(nlocd,**kwargs)
sixd = eded._complement(nlocd,d)
sixd['netloc'] = netloc
url = six_d2u(sixd)
return(url)
def nin_u2t(url):
'''
url = 'http://www.baidu.com/index.php;params?username=query#frag'
nin_u2t(url)
url = 'http://admin:secret@local-domain.com:8000/path?q=123#anchor'
nin_u2t(url)
'''
d = nin_u2d(url)
l = elel.array_map(NINL,eded._getitem2,d)
t = tuple(l)
return(t)
def nin_t2u(t,**kwargs):
'''
t = ('http', '', '', 'www.baidu.com', '', '/index.php', 'params', 'username=query', 'frag')
nin_t2u(t)
t = ('http', 'admin', 'secret', 'local-domain.com', '8000', '/path', '', 'q=123', 'anchor')
nin_t2u(t)
t = ('http', 'admin', 'secret', 'local-domain.com', '8000', '/path', 'p=456', '', 'anchor')
nin_t2u(t)
t = ('http', '', 'secret', 'local-domain.com', '8000', '/path', '', 'q=123', '')
nin_t2u(t)
'''
d = nin_t2d(t)
url = nin_d2u(d,**kwargs)
return(url)
def nin_d2t(d):
'''
d = {'scheme': 'http', 'path': '/index.php', 'params': 'params', 'query': 'username=query', 'fragment': 'frag', 'username': '', 'password': '', 'hostname': 'www.baidu.com', 'port': ''}
nin_d2t(d)
d = {'scheme': 'http', 'path': '/path', 'params': '', 'query': 'q=123', 'fragment': 'anchor', 'username': 'admin', 'password': '<PASSWORD>', 'hostname': 'local-domain.com', 'port': '8000'}
nin_d2t(d)
'''
arr = elel.array_map(NINL,eded._getitem2,d)
t = tuple(arr)
return(t)
def nin_t2d(t):
'''
t = ('http', '', '', 'www.baidu.com', '', '/index.php', 'params', 'username=query', 'frag')
nin_t2d(t)
t = ('http', 'admin', 'secret', 'local-domain.com', '8000', '/path', '', 'q=123', 'anchor')
nin_t2d(t)
'''
d = eded.kvlist2d(NINL,t)
return(d)
def nin_set(url,*args,**kwargs):
'''
url = 'http://www.baidu.com/index.php;params?username=query#frag'
nin_set(url,hostname='www.google.com',fragment='newfrag')
nin_set(url,hostname='www.google.com',port='443',fragment='newfrag')
nin_set(url,username='admin',hostname='www.google.com',port='443',fragment='newfrag')
url = 'http://www.baidu.com/index.php;params?username=query#frag'
nin_set(url,'hostname','www.google.com','fragment','newfrag')
url = 'http://www.baidu.com/index.php;params?username=query#frag'
nin_set(url,3,'www.google.com',8,'newfrag')
'''
d = nin_u2d(url)
args = list(args)
lngth = args.__len__()
if(lngth >= 2):
lngth = lngth - (lngth%2)
args = args[:lngth]
tmp = elel.divide(args,2)
for kv in tmp:
| |
self).__init__(**kwargs)
self.value = value
self.location = None
self._type = 'News'
class NewsArticle(Article):
"""Defines a news article.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar name: The name of the thing represented by this object.
:vartype name: str
:ivar url: The URL to get more information about the thing represented by
this object.
:vartype url: str
:ivar image:
:vartype image:
~azure.cognitiveservices.search.websearch.models.ImageObject
:ivar description: A short description of the item.
:vartype description: str
:ivar bing_id: An ID that uniquely identifies this item.
:vartype bing_id: str
:ivar thumbnail_url: The URL to a thumbnail of the item.
:vartype thumbnail_url: str
:ivar provider: The source of the creative work.
:vartype provider:
list[~azure.cognitiveservices.search.websearch.models.Thing]
:ivar text:
:vartype text: str
:ivar word_count: The number of words in the text of the Article.
:vartype word_count: int
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'name': {'readonly': True},
'url': {'readonly': True},
'image': {'readonly': True},
'description': {'readonly': True},
'bing_id': {'readonly': True},
'thumbnail_url': {'readonly': True},
'provider': {'readonly': True},
'text': {'readonly': True},
'word_count': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'image': {'key': 'image', 'type': 'ImageObject'},
'description': {'key': 'description', 'type': 'str'},
'bing_id': {'key': 'bingId', 'type': 'str'},
'thumbnail_url': {'key': 'thumbnailUrl', 'type': 'str'},
'provider': {'key': 'provider', 'type': '[Thing]'},
'text': {'key': 'text', 'type': 'str'},
'word_count': {'key': 'wordCount', 'type': 'int'},
}
def __init__(self, **kwargs) -> None:
super(NewsArticle, self).__init__(**kwargs)
self._type = 'NewsArticle'
class Places(SearchResultsAnswer):
"""Defines a local entity answer.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar follow_up_queries:
:vartype follow_up_queries:
list[~azure.cognitiveservices.search.websearch.models.Query]
:ivar query_context:
:vartype query_context:
~azure.cognitiveservices.search.websearch.models.QueryContext
:ivar total_estimated_matches: The estimated number of webpages that are
relevant to the query. Use this number along with the count and offset
query parameters to page the results.
:vartype total_estimated_matches: long
:ivar is_family_friendly:
:vartype is_family_friendly: bool
:param value: Required. A list of local entities, such as restaurants or
hotels.
:type value: list[~azure.cognitiveservices.search.websearch.models.Thing]
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'follow_up_queries': {'readonly': True},
'query_context': {'readonly': True},
'total_estimated_matches': {'readonly': True},
'is_family_friendly': {'readonly': True},
'value': {'required': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'follow_up_queries': {'key': 'followUpQueries', 'type': '[Query]'},
'query_context': {'key': 'queryContext', 'type': 'QueryContext'},
'total_estimated_matches': {'key': 'totalEstimatedMatches', 'type': 'long'},
'is_family_friendly': {'key': 'isFamilyFriendly', 'type': 'bool'},
'value': {'key': 'value', 'type': '[Thing]'},
}
def __init__(self, *, value, **kwargs) -> None:
super(Places, self).__init__(**kwargs)
self.value = value
self._type = 'Places'
class Query(Model):
"""Defines a search query.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param text: Required. The query string. Use this string as the query term
in a new search request.
:type text: str
:ivar display_text: The display version of the query term. This version of
the query term may contain special characters that highlight the search
term found in the query string. The string contains the highlighting
characters only if the query enabled hit highlighting
:vartype display_text: str
:ivar web_search_url: The URL that takes the user to the Bing search
results page for the query.Only related search results include this field.
:vartype web_search_url: str
:ivar search_link:
:vartype search_link: str
:ivar thumbnail:
:vartype thumbnail:
~azure.cognitiveservices.search.websearch.models.ImageObject
"""
_validation = {
'text': {'required': True},
'display_text': {'readonly': True},
'web_search_url': {'readonly': True},
'search_link': {'readonly': True},
'thumbnail': {'readonly': True},
}
_attribute_map = {
'text': {'key': 'text', 'type': 'str'},
'display_text': {'key': 'displayText', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'search_link': {'key': 'searchLink', 'type': 'str'},
'thumbnail': {'key': 'thumbnail', 'type': 'ImageObject'},
}
def __init__(self, *, text: str, **kwargs) -> None:
super(Query, self).__init__(**kwargs)
self.text = text
self.display_text = None
self.web_search_url = None
self.search_link = None
self.thumbnail = None
class QueryContext(Model):
"""Defines the query context that Bing used for the request.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param original_query: Required. The query string as specified in the
request.
:type original_query: str
:ivar altered_query: The query string used by Bing to perform the query.
Bing uses the altered query string if the original query string contained
spelling mistakes. For example, if the query string is "saling downwind",
the altered query string will be "sailing downwind". This field is
included only if the original query string contains a spelling mistake.
:vartype altered_query: str
:ivar alteration_override_query: The query string to use to force Bing to
use the original string. For example, if the query string is "saling
downwind", the override query string will be "+saling downwind". Remember
to encode the query string which results in "%2Bsaling+downwind". This
field is included only if the original query string contains a spelling
mistake.
:vartype alteration_override_query: str
:ivar adult_intent: A Boolean value that indicates whether the specified
query has adult intent. The value is true if the query has adult intent;
otherwise, false.
:vartype adult_intent: bool
:ivar ask_user_for_location: A Boolean value that indicates whether Bing
requires the user's location to provide accurate results. If you specified
the user's location by using the X-MSEdge-ClientIP and X-Search-Location
headers, you can ignore this field. For location aware queries, such as
"today's weather" or "restaurants near me" that need the user's location
to provide accurate results, this field is set to true. For location aware
queries that include the location (for example, "Seattle weather"), this
field is set to false. This field is also set to false for queries that
are not location aware, such as "best sellers".
:vartype ask_user_for_location: bool
:ivar is_transactional:
:vartype is_transactional: bool
"""
_validation = {
'original_query': {'required': True},
'altered_query': {'readonly': True},
'alteration_override_query': {'readonly': True},
'adult_intent': {'readonly': True},
'ask_user_for_location': {'readonly': True},
'is_transactional': {'readonly': True},
}
_attribute_map = {
'original_query': {'key': 'originalQuery', 'type': 'str'},
'altered_query': {'key': 'alteredQuery', 'type': 'str'},
'alteration_override_query': {'key': 'alterationOverrideQuery', 'type': 'str'},
'adult_intent': {'key': 'adultIntent', 'type': 'bool'},
'ask_user_for_location': {'key': 'askUserForLocation', 'type': 'bool'},
'is_transactional': {'key': 'isTransactional', 'type': 'bool'},
}
def __init__(self, *, original_query: str, **kwargs) -> None:
super(QueryContext, self).__init__(**kwargs)
self.original_query = original_query
self.altered_query = None
self.alteration_override_query = None
self.adult_intent = None
self.ask_user_for_location = None
self.is_transactional = None
class RankingRankingGroup(Model):
"""Defines a search results group, such as mainline.
All required parameters must be populated in order to send to Azure.
:param items: Required. A list of search result items to display in the
group.
:type items:
list[~azure.cognitiveservices.search.websearch.models.RankingRankingItem]
"""
_validation = {
'items': {'required': True},
}
_attribute_map = {
'items': {'key': 'items', 'type': '[RankingRankingItem]'},
}
def __init__(self, *, items, **kwargs) -> None:
super(RankingRankingGroup, self).__init__(**kwargs)
self.items = items
class RankingRankingItem(Model):
"""Defines a search result item to display.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param answer_type: Required. The answer that contains the item to
display. Use the type to find the answer in the SearchResponse object. The
type is the name of a SearchResponse field. Possible values include:
'WebPages', 'Images', 'SpellSuggestions', 'News', 'RelatedSearches',
'Videos', 'Computation', 'TimeZone'. Default value: "WebPages" .
:type answer_type: str or
~azure.cognitiveservices.search.websearch.models.AnswerType
:ivar result_index: A zero-based index of the item in the answer.If the
item does not include this field, display all | |
<reponame>avilella/modPhred<gh_stars>1-10
#!/usr/bin/env python3
desc="""Generate plots based on var.tsv.gz (mod_report.py output).
More info at: https://github.com/lpryszcz/modPhred
Dependencies: numpy, pandas, matplotlib
"""
epilog="""Author: <EMAIL>
Barcelona, 23/06/2019
"""
import gzip, os, pickle, sys
from datetime import datetime
from collections import OrderedDict
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg') # Force matplotlib to not use any Xwindows backend
import matplotlib.pyplot as plt
import seaborn as sns
from guppy_encode import VERSION, HEADER, load_info, logger
def get_positions_from_bed(fname):
"""Return list of modified positions"""
positions = []
for l in open(fname):
if l.startswith('#') or not l[:-1]:
continue
ldata = l[:-1].split('\t')
positions.append(":".join((ldata[0], ldata[2])))
return positions
def plot_venn(outfn, beds, names=[], title=""):
"""Plot venn diagram"""
import venn
# select plotting function
if len(beds)==2: func = venn.venn2
elif len(beds)==3: func = venn.venn3
elif len(beds)==4: func = venn.venn4
elif len(beds)==5: func = venn.venn5
elif len(beds)==6: func = venn.venn6
else:
logger("[ERROR] Please provide between 2 and 6 BED files\n")
sys.exit(1)
# use fnames as names if names not given
if len(names)!=len(beds): names = beds
# load positions & plot
labels = venn.get_labels([get_positions_from_bed(bed) for bed in beds])#, fill=['number', 'logic']
fig, ax = func(labels, names=names)
# add title
if title: plt.title(title)
# and save or visualise plot
if outfn: plt.savefig(outfn)
else: plt.show()
def load_bed(fname):
"""Return regions from BED file. If not a file, try to unload region(s) from a string"""
regions = []
if os.path.isfile(fname) or os.path.islink(fname):
for l in open(fname):
if l.startswith('#') or not l[:-1]:
continue
ldata = l[:-1].replace(',','').split('\t')#; print ldata
if len(ldata) >= 3:
ref, start, end = ldata[:3]
else:
ref, se = ldata[0].split(':')
start, end = se.split('-')
start, end = map(int, (start, end))
regions.append((ref, start, end)) #yield ref, start, end
else:
for region in fname.split():
if not region: continue
ref, se = region.replace(',','').split(':')
start, end = se.split('-')
start, end = map(int, (start, end))
regions.append((ref, start, end))
return regions
def plot_scatter(infn, ext="png", logger=logger, data=False, region="", samples=[],
features=["depth", "basecall_accuracy", "mod_frequency", "median_mod_prob"]):
"""Plot scatter using seaborn"""
# make sure outfn exists
if not os.path.isfile(infn):
logger("[mod_plot][ERROR] File %s does not exists! Have you run mod_report.py?\n"%infn)
sys.exit(1)
# get outdir
outdir = os.path.join(os.path.dirname(infn), "plots")
if not os.path.isdir(outdir):
os.makedirs(outdir)
logger("Saving plots for %s to %s ...\n"%(", ".join(features), outdir))
# parse data
if isinstance(data, bool):
logger("Loading %s ...\n"%infn)
data = pd.read_csv(infn, sep="\t", header=len(HEADER.split('\n'))-2, index_col=False,
dtype={"chr": object, "pos": int}) # ADD TO ALL
# limit by region AND CONSIDER LIMITING COV TO 2-3x median?
if region:
logger(" limiting to %s ...\n"%region)
chrom, s, e = region, 0, 0
if "-" in region:
chrom, se = region.split(':')
s, e = map(int, se.split("-"))
data = data[data.chr==chrom]
if e:
data = data[s<=data.pos<=e]
if data.shape[0]<1:
logger("[mod_plot][ERROR] %s row(s) found in %s\n"%(data.shape[0], infn))
return
# rename .bam columns to basename and split it at _ with \n
data.columns = [os.path.basename(c).replace("_","\n") if ".bam" in c else c for c in data.columns]#; data.head()
# plot features
for feature in features:
# match feature by replacing _ with \n
cols = list(filter(lambda c: feature.replace("_","\n") in c, data.columns))#; print(cols)
# limit by sample
if samples:
scols = set(c for c in cols for s in samples if s.replace("_","\n") in c)
cols = list(sorted(scols))#; print(cols)
# plot palette="husl",
g = sns.pairplot(data, vars=cols, height=4, hue='mod', diag_kind='kde',
plot_kws={'alpha': 0.1, 's': 3, }) #'edgecolor': 'k'
outfn = os.path.join(outdir, "%s.%s"%(feature, ext))
# add figure title
g.fig.suptitle("%s\n%s"%(feature, infn), size=16)
g.fig.subplots_adjust(top=.90, right=0.95)
# make legend in top right corner and increase marker size
g._legend.set_bbox_to_anchor((0.10, 0.95))
g._legend.set_title("") #"mods:"
for lh in g._legend.legendHandles: lh._sizes = [20] #lh.set_alpha(1)
# set axes limits 0-1
if feature!="depth":
for r in g.axes:
for ax in r:
ax.set_xlim((0, 1))
ax.set_ylim((0, 1))
# save
g.fig.savefig(outfn)
def plot_regions(infn, bed, ext="svg", logger=logger, data=False, colors="brcmyg"):
"""Generate frequency plots for given regions
If data is given, it won't be loaded again.
"""
# make sure outfn exists
if not os.path.isfile(infn):
logger("[mod_plot][ERROR] File %s does not exists! Have you run mod_report.py?\n"%infn)
sys.exit(1)
# get outdir
outdir = os.path.join(os.path.dirname(infn), "plots")
if not os.path.isdir(outdir):
os.makedirs(outdir)
# load regions to plot
regions = load_bed(bed)#; print(regions)
logger("Saving plots for %s region(s) to %s ...\n"%(len(regions), outdir))
# parse data
if isinstance(data, bool):
logger("Loading %s ...\n"%infn)
data = pd.read_csv(infn, sep="\t", header=len(HEADER.split('\n'))-2, index_col=False,
dtype={"chr": object, "pos": int})
if data.shape[0]<1:
logger("[mod_plot][ERROR] %s row(s) found in %s\n"%(data.shape[0], infn))
return
# get uniq mods
mods = data["mod"].unique()
metrics = ['depth', 'basecall_accuracy', 'mod_frequency', 'median_mod_prob']
sample_names = [get_sample_name(n) for n in data.columns if n.endswith(metrics[0])]
metric2cols = {m: [c for c in data.columns if c.endswith(m)] for m in ['mod_frequency', ]}#; print(metric2cols)
logger(" %s samples and %s modifications: %s\n"%(len(sample_names), len(mods), ", ".join(mods)))
metric = 'mod_frequency'
# plot regions
for ref, s, e in regions:
#df = data[(data.chr==ref)&(data.pos>=s)&(data.pos<=e)]
df = data[np.all((data.chr==ref, data.pos>=s, data.pos<=e), axis=0)]
if df.shape[0]<1:
logger("[mod_plot][ERROR] No modifications in %s:%s-%s\n"%(ref, s, e))
continue
mods = df["mod"].unique()
logger(" %s:%s-%s with %s modifications: %s\n"%(ref, s, e, len(mods), ", ".join(mods)))
#return
fig, axes = plt.subplots(nrows=len(sample_names), ncols=1, sharex="all", sharey="all",
figsize=(20, 2+1*len(sample_names))) #20, 12 for 2 samples
fig.suptitle("%s:%s-%s"%(ref, s, e), fontsize=12)
labels = []
for strand, norm in zip("+-", (1, -1)):
for ax, col, name in zip(axes, metric2cols[metric], sample_names):
for color, mod in zip(colors, mods):
selection = (df.strand==strand)&(df["mod"]==mod)
ax.bar(df[selection].pos, norm*df[selection][col], color=color, label=mod)
if strand:
ax.set_title(name.replace('\n', '_')) #col)
ax.set_ylabel("%s\n[on +/- strand]"%metric)
# set limits
ax.set_xlim(s, e+1)
ax.set_ylim(-1, 1)
ax.set_xlabel("%s position"%ref)
#https://stackoverflow.com/a/13589144/632242
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
#plt.legend(by_label.values(), by_label.keys())
fig.legend(handles=by_label.values(), labels=by_label.keys())
#fig.tight_layout()
#plt.show()
outfn = os.path.join(outdir, "%s:%s-%s.%s"%(ref, s, e, ext))
fig.savefig(outfn)
def plot_flipflop(sig, trace, read, seq, s=0, n=20, BASE="ACGTZ", colours={'A':'green', 'C':'blue', 'G':'orange', 'T':'red', 'Z':'purple', 'N': 'grey'}):
"""Plot flipflop trace from Fast5"""
pos = np.argwhere(move==1).flatten()
down_sample_factor = round(len(sig) / float(len(trace)))
# get subset of trace & normalise to 0-1
trace = trace[pos[s]:pos[s+n]] / 255.
# get number of bases
nbase = trace.shape[1] // 2
# start figure
fig = plt.figure(figsize=(20, 5))
# plot squiggle
ax1 = fig.add_subplot(211)
plt.title("%s (bases: %s-%s)"%(read, s, s+n))
plt.ylabel('Normalised signal')
plt.plot(np.arange(pos[s]*down_sample_factor, pos[s+n]*down_sample_factor), sig[pos[s]*down_sample_factor:pos[s+n]*down_sample_factor], color='grey')
# plot flipflop
fig.add_subplot(212, sharex=ax1)
plt.xlabel('time (samples)')
plt.ylabel('State probability')
x2 = down_sample_factor * np.arange(pos[s], pos[s+n])
for i in range(nbase):
plt.fill_between(x2, trace[:, i], color=colours[BASE[i]], alpha=0.3)
plt.fill_between(x2, trace[:, i + nbase], color=colours[BASE[i]], alpha=0.3)
plt.plot(x2, trace[:, i], color=colours[BASE[i]], label=BASE[i])
plt.plot(x2, trace[:, i + nbase], color=colours[BASE[i]], linestyle='dashed')
# add bases
for pi, p in enumerate(pos[s:s+n], s):
plt.text(p*down_sample_factor, -0.1, seq[pi])
# add legend
plt.legend()
plt.grid()
plt.show()
def violin_plot(data, title="", axis="", names=[]):
"""Generate violin plot"""
if axis:
ax=axis
else:
ax = plt.subplot()
# violin plot
ax.violinplot(data, range(len(data)), points=20, widths=0.7, bw_method=0.5, showmeans=True, showextrema=True, showmedians=True)
ax.set_xticks(range(len(data)))
if axis:
ax.set_xticklabels([" " for x in range(len(data))])
elif names:
ax.set_xticklabels(names)
if title:
ax.set_title(title)
if not axis: plt.show()
return ax
def mod_plot(infn, ext="svg", logger=logger, data=False, colors="brcmyg"): #png
"""Generate violin plots
If data is given, it won't be loaded again.
"""
# make sure outfn exists
if not os.path.isfile(infn):
logger("[mod_plot][ERROR] File %s does not exists! Have you run mod_report.py?\n"%infn)
sys.exit(1)
# parse data
if isinstance(data, bool):
logger("Loading %s ...\n"%infn)
data = pd.read_csv(infn, sep="\t", header=len(HEADER.split('\n'))-2, index_col=False)
if data.shape[0]<10:
logger("[mod_plot][ERROR] %s row(s) found in %s\n"%(data.shape[0], infn))
return
# plot
bases = data["mod"].unique()#; print(bases)
metrics = ['depth', 'basecall_accuracy', 'mod_frequency', 'median_mod_prob']
metrics_names = ["Number of reads", "Agreement with reference",
"Frequency of modification", "Median modification probability"]
sample_names = [get_sample_name(n) for n in data.columns if n.endswith(metrics[0])]
fig, axes = plt.subplots(nrows=len(metrics), ncols=len(bases), sharex="col", sharey="row",
figsize=(1.5*len(bases)*len(sample_names), 2+3*len(metrics))) #6, 20
fig.suptitle(infn, fontsize=12)
nans = [float('nan'), float('nan')]
# get max median depth
maxYdepth = 0
for bi, b in enumerate(bases):
# get mask for only median_mod_prob
cols = list(filter(lambda x: x.endswith(metrics[-1]), data.columns))
_data = data[data["mod"]==b].loc[:, cols].to_numpy()
# mask nan before plotting https://stackoverflow.com/a/44306965/632242
mask = ~np.isnan(_data)
for mi, m in enumerate(metrics):
cols = list(filter(lambda x: x.endswith(m), data.columns))
ax = axes[mi, bi] if len(bases)>1 else axes[mi]
_data = data[data["mod"]==b].loc[:, cols].to_numpy()#; print(bi, b, mi, m, _data.shape)
#if _data.sum():
a = ax.violinplot([d[m] if d[m].any() else nans for d, m in zip(_data.T, mask.T)], points=20, widths=0.7,
bw_method=0.5, showextrema=True, showmedians=True) #showmeans=True,
# color samples differently
for pci, pc in enumerate(a['bodies']): pc.set_facecolor(colors[pci%len(colors)])
#pc.set_edgecolor('black')
#pc.set_alpha(1)
ax.set_xticks(range(1, len(cols)+1))
ax.set_xticklabels([" " for x in range(len(cols))])
if not mi:
ax.set_title("%s\n%s positions"%(b, data[data["mod"]==b].shape[0]))
# set depth Y range | |
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a | |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_stats_utils.ipynb (unless otherwise specified).
__all__ = ['cPreProcessing', 'cStationary', 'cErrorMetrics']
# Cell
import numpy as np
import pandas as pd
from scipy.stats import boxcox, pearsonr
from scipy.special import inv_boxcox
from pandas.tseries.frequencies import to_offset
from statsmodels.tsa.stattools import adfuller, kpss
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tools.eval_measures import aic, bic
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.impute import SimpleImputer
import matplotlib.pyplot as plt
# Cell
class cPreProcessing():
"""
Parent class.
Methods for dealing with irregularly spaced or missing data.
"""
def __init__(self):
pass
def fget_regular_times(self, df, timestep):
"""
Generate dataframe of regularly spaced times (to impute to)
(From fbprophet/forecaster/make_future_dataframe)
Parameters
----------
df = [pandas dataframe]
timestep = [datetime timedelta object]
Returns
-------
regtimes = [pandas DatetimeIndex] of datetimes regularly spaced at timestep
"""
# normalize start date to midnight
start_date = df.ds.min().normalize()
# round up end date by one extra timestep
end_date = (df.ds.max() + timestep).normalize()
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.date_range.html
regtimes = pd.date_range(start=start_date, end=end_date, freq=to_offset(timestep))
return regtimes
def finterleaf(self, df, impute_times):
"""
Interleaf dataframe with new prediction times
Set values at prediction dates as NaN so can use imputer
Parameters
----------
df = [pandas dataframe]
impute_times = [pandas DatetimeIndex] (format of regtimes)
Returns
-------
dft = pandas dataframe (format for use in fimpute)
"""
# if impute_times taken from fmake_regular_times()
if type(impute_times) == pd.core.indexes.datetimes.DatetimeIndex:
impute_times = pd.DataFrame(impute_times)
impute_times.columns = ["ds"]
# set date index
df.set_index('ds', inplace=True)
impute_times.set_index('ds', inplace=True)
# combine (interleaf)
dft = pd.concat([df, impute_times], sort=True)
dft.sort_values(by=["ds"], inplace=True)
# remove duplicate entries
dft = dft[dft.index.duplicated() == False]
return dft
def fimpute(self, df, method="time"):
"""
Imputation of data to new timestamps with NaN value.
Parameters
----------
df = dataframe containing original data and NaNs at timestamps for imputation
timestamps are the df index
Returns
-------
dfi = imputed dataframe
"""
# interpolates halfway, doesn´t account for weighting towards closer time
if method == "interp":
dfi = df.interpolate()
# missing values given mean value over whole time series
if method == "mean":
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
imp.fit(df)
dfi = imp.transform(df)
# linear interpolation weighted by timestamp proximity
if method == "time":
dfi = df.interpolate(method='time')
# smoothing
if method == "akima":
dfi = df.interpolate(method='akima')
return dfi
def fmake_regular_freq(self, df, timestep, method="time"):
"""
Interpolate data so regular update frequency throughout dataset.
(Deal with missing values)
Some python functions (e.g. seasonal_decompose, AutoArima) require a data "freq" argument
to determine seasonality. (Can be inferred from df.index.freq, df.index.inferred_freq)
Such functions require a constant data frequency.
Parameters
----------
df = irregularly space dataframe (with datestamp name "ds")
timestep = desired update frequency of data (timedelta object)
method = imputation method
Returns
-------
dff = imputed regularly spaced [pandas dataframe]
"""
# 0. preprocess: if dataframe alreay has time as index: reset and add as column
if df.index.name == "ds":
# df_lim["ds"] = df_lim.index
df.reset_index(level=0, inplace=True)
# 1. include in dataset times where you want to impute (and set to NaN values)
impute_times = self.fget_regular_times(df, timestep)
dft = self.finterleaf(df, impute_times)
# 2. impute with chosen method
dfi = self.fimpute(dft, method=method)
# 3. remove original data not at correct timestep
dff = dfi[dfi.index.isin(impute_times) == True]
if dff.index.freq == None:
dff.index.freq = to_offset(timestep)
return dff
# Cell
class cStationary(cPreProcessing):
"""
methods for checking whether time series is stationary
methods for transforming the time series into a stationary time series
methods for obtaining (p,q,d) ARIMA parameters
https://towardsdatascience.com/detecting-stationarity-in-time-series-data-d29e0a21e638
"""
def __init__(self):
pass
def fadf_verbose(self, adftest):
"""
CHECK STATIONARITY.
Print explanation of output of Augmented Dickey-Fuller test.
The Augmented Dickey-Fuller test is a type of statistical test called a unit root test.
The intuition behind a unit root test is that it determines how strongly a time series is defined by a trend.
Parameters
----------
adftest = adfuller(data.y, regression="ct")
Returns
-------
"""
print("""
Augmented Dickey-Fuller:
Null hypothesis: the time series can be represented by a unit root, thus not stationary (has some time-dependent structure)
""")
output = pd.Series(adftest[0:4], index=['Test Statistic','pvalue','#Lags Used','Number of Observations Used'])
for key,value in adftest[4].items():
output['Critical Value ({})'.format(key)] = value
print(output)
if output.pvalue <= 0.05:
print("\nReject the null hypothesis (H0), the data does not have a unit root and IS STATIONARY.")
return True
else:
print("\nFail to reject the null hypothesis (H0), the data has a unit root and is NON-STATIONARY.")
return False
def fkpss_verbose(self, kpsstest):
"""
CHECK STATIONARITY.
Print explanation of output of Kwiatkowski-Phillips-Schmidt-Shin test.
Another test for checking the stationarity of a time series (reversed null hypothesis to ADF).
In KPSS test, to turn ON the stationarity testing around a trend, you need to explicitly pass the regression='ct'.
A major difference between KPSS and ADF tests:
the capability of the KPSS test to check for stationarity in the ‘presence of a deterministic trend’.
Parameters
----------
kpsstest = kpss(data.y, regression="ct")
Returns
-------
"""
print("""
Kwiatkowski-Phillips-Schmidt-Shin:
Null hypothesis: the process is trend stationary
""")
output = pd.Series(kpsstest[0:3], index=['Test Statistic','pvalue','Lags Used'])
for key,value in kpsstest[3].items():
output['Critical Value ({})'.format(key)] = value
print (output)
if output.pvalue <= 0.05:
print("\nReject the null hypothesis (H0), the data has a unit root and is NON-STATIONARY.")
return False
else:
print("\nFail to reject the null hypothesis (H0),the data does not have a unit root and IS STATIONARY. ")
return True
def fstationary_verbose(self, stat_adf, stat_kpss):
"""
CHECK STATIONARITY.
Compare results of adf and kpss tests and advise how to make stationary.
Returns
-------
"""
if (stat_adf is False) and (stat_kpss is False):
print("\nBoth tests conclude that the series is not stationary -> series is not stationary")
return False
elif (stat_adf is True) and (stat_kpss is True):
print("\nBoth tests conclude that the series is stationary -> series is stationary")
return True
elif (stat_adf is False) and (stat_kpss is True):
print("\nKPSS = stationary and ADF = not stationary -> trend stationary, use power tranform to make stationary")
return False
elif (stat_adf is True) and (stat_kpss is False):
print("\nKPSS = not stationary and ADF = stationary -> difference stationary, use differencing transform to make stationary")
return False
def fcheck_stationary(self, y, verbose=True):
"""
CHECK STATIONARITY.
Parameters
----------
y = time series variable, data.y
Returns
-------
stationary status [bool]
"""
#df.dropna()
adftest = adfuller(y, regression="ct")
kpsstest = kpss(y, regression="ct")
if verbose:
stat_adf = self.fadf_verbose(adftest)
stat_kpss = self.fkpss_verbose(kpsstest)
stat = self.fstationary_verbose(stat_adf, stat_kpss)
return stat
def fdecompose(self, df, model="additive"):
"""
CHECK STATIONARITY.
Seasonal decomposition using moving averages
https://www.statsmodels.org/stable/generated/statsmodels.tsa.seasonal.seasonal_decompose.html
Time series must be regularly spaced (have constant frequency, dff.index.freq or dff.index.inferred_freq)
Parameters
----------
df = data frame with date index (to infer frequency)
"""
s = seasonal_decompose(df, model=model)
trend = s.trend
plt.plot(trend)
plt.title("Trend")
plt.show()
seasonal = s.seasonal
plt.plot(seasonal)
plt.title("Seasonal component")
plt.show()
resid = s.resid
plt.plot(resid)
plt.title("Residuals")
plt.show()
def fcheck_density(self, y):
"""
CHECK STATIONARITY.
Plot histogram and density trend (check gaussianity)
"""
plt.figure(1)
plt.subplot(211)
plt.hist(y)
plt.title("Data Distribution")
plt.subplot(212)
y.plot(kind='kde')
plt.show()
def fcheck_lag(self, y):
"""
CHECK STATIONARITY.
Plot lag scatter, autocorrelation and partial autocorrelation functions
For differencing and establishing (p,q,d) values for ARIMA
"""
plt.figure()
pd.plotting.lag_plot(y)
plt.title("Lag-1 plot")
plt.plot()
plt.figure()
pd.plotting.autocorrelation_plot(y)
plt.title("Autocorrelation")
plt.plot()
# contains confidence interval:
# correlation values outside of this code are very likely a correlation and not a statistical fluke
plot_acf(y)
plot_pacf(y)
def fdifferencing(self, df, interval=1):
"""
MAKE STATIONARY. (difference stationary)
adf and kpss can give the d value required by ARIMA
Make series stationary: In order to satisfy the assumption, it is necessary to make the series stationary.
This would include checking the stationarity of the series and performing required transformations
Determine d value: For making the series stationary, the number of times the difference operation was
performed will be taken as the d value.
The auro_arima function works by conducting differencing tests
(i.e., Kwiatkowski–Phillips–Schmidt–Shin, Augmented Dickey-Fuller or Phillips–Perron)
to determine the order of differencing, d. Canova-Hansen test for seasonal stability.
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.diff.html
"""
# df.dropna()
return df.diff(periods=interval)
def fboxcox(self, df):
"""
MAKE STATIONARY. (trend stationary)
[https://mode.com/example-gallery/forecasting_prophet_python_cookbook/]
Often in forecasting, you’ll explicitly choose a specific type of power transform to apply to the data
to remove noise before feeding the data into a forecasting model (e.g. a log | |
<reponame>bgraedel/arcos4py
"""Module to track and detect collective events.
Example:
>>> from arcos4py.tools import detectCollev
>>> ts = detectCollev(data)
>>> events_df = ts.run()
"""
from typing import Union
import numpy as np
import pandas as pd
from scipy.spatial import KDTree
from sklearn.cluster import DBSCAN
from ._errors import columnError, epsError, minClSzError, noDataError, nPrevError
class detectCollev:
"""Identifies and tracks collective signalling events.
Requires binarized measurement column.
Makes use of the dbscan algorithm,
applies this to every timeframe and subsequently connects
collective events between frames located within eps distance of each other.
Attributes:
input_data (DataFrame): Input data to be processed. Must contain a binarized measurement column.
eps (float): The maximum distance between two samples for one to be considered as in
the neighbourhood of the other.
This is not a maximum bound on the distances of points within a cluster.
Value is also used to connect collective events across multiple frames.
minClSz (int): Minimum size for a cluster to be identified as a collective event.
nPrev (int): Number of previous frames the tracking
algorithm looks back to connect collective events.
posCols (list): List of position columns contained in the data.
Must at least contain one
frame_column (str): Indicating the frame column in input_data.
id_column (str): Indicating the track id/id column in input_data.
bin_meas_column (str): Indicating the bin_meas_column in input_data or None.
clid_column (str): Indicating the column name containing the ids of collective events.
"""
def __init__(
self,
input_data: pd.DataFrame,
eps: float = 1,
minClSz: int = 1,
nPrev: int = 1,
posCols: list = ["x"],
frame_column: str = 'time',
id_column: Union[str, None] = None,
bin_meas_column: Union[str, None] = 'meas',
clid_column: str = 'clTrackID',
) -> None:
"""Constructs class with input parameters.
Arguments:
input_data (DataFrame): Input data to be processed. Must contain a binarized measurement column.
eps (float): The maximum distance between two samples for one to be considered as in
the neighbourhood of the other.
This is not a maximum bound on the distances of points within a cluster.
Value is also used to connect collective events across multiple frames.
minClSz (int): Minimum size for a cluster to be identified as a collective event.
nPrev (int): Number of previous frames the tracking
algorithm looks back to connect collective events.
posCols (list): List of position columns contained in the data.
Must at least contain one
frame_column (str): Indicating the frame column in input_data.
id_column (str | None): Indicating the track id/id column in input_data, optional.
bin_meas_column (str): Indicating the bin_meas_column in input_data or None.
clid_column (str): Indicating the column name containing the ids of collective events.
"""
# assign some variables passed in as arguments to the object
self.input_data = input_data
self.eps = eps
self.minClSz = minClSz
self.nPrev = nPrev
self.frame_column = frame_column
self.id_column = id_column
self.bin_meas_column = bin_meas_column
self.clid_column = clid_column
self.posCols = posCols
self.columns_input = self.input_data.columns
self.clidFrame = f'{clid_column}.frame'
self.pos_cols_inputdata = [col for col in self.posCols if col in self.columns_input]
# run input checks
self._run_input_checks()
def _check_input_data(self):
"""Checks if input contains data\
raises error if not."""
if self.input_data is None:
raise noDataError("Input is None")
elif self.input_data.empty:
raise noDataError("Input is empty")
def _check_pos_columns(self):
"""Checks if Input contains correct columns\
raises Exception if not."""
if not all(item in self.columns_input for item in self.posCols):
raise columnError("Input data does not have the indicated position columns!")
def _check_frame_column(self):
if self.frame_column not in self.columns_input:
raise columnError("Input data does not have the indicated frame column!")
def _check_eps(self):
"""Checks if eps is greater than 0."""
if self.eps <= 0:
raise epsError("eps has to be greater than 0")
def _check_minClSz(self):
"""Checks if minClSiz is greater than 0."""
if self.minClSz <= 0:
raise minClSzError("Parameter minClSiz has to be greater than 0!")
def _check_nPrev(self):
"""Checks if nPrev is greater than 0."""
if self.nPrev <= 0 and isinstance(self.nPrev, int):
raise nPrevError("Parameter nPrev has to be an integer greater than 0 and an integer!")
def _run_input_checks(self):
"""Run input checks."""
self._check_input_data()
self._check_pos_columns()
self._check_eps()
self._check_minClSz()
self._check_nPrev()
self._check_frame_column()
def _select_necessary_columns(
self, data: pd.DataFrame, frame_col: str, id_col: Union[str, None], pos_col: list, bin_col: Union[str, None]
) -> pd.DataFrame:
"""Select necessary input colums from input data into dataframe.
Arguments:
data (DataFrame): Containing necessary columns.
frame_col (str): Frame column in data.
id_col (str): Id column in data.
pos_col (list): string representation of position columns in data.
bin_col (str): Name of binary column.
Returns:
DataFrame: Filtered columns necessary for calculation.
"""
columns = [frame_col, id_col, bin_col]
columns = [col for col in columns if col]
columns.extend(pos_col)
neccessary_data = data[columns].copy(deep=True)
return neccessary_data
def _filter_active(self, data: pd.DataFrame, bin_meas_col: Union[str, None]) -> pd.DataFrame:
"""Selects rows with binary value of greater than 0.
Arguments:
data (DataFrame): Dataframe containing necessary columns.
bin_meas_col (str|None): Either name of the binary column or None if no such column exists.
Returns:
DataFrame: Filtered pandas DataFrame.
"""
if bin_meas_col is not None:
data = data[data[bin_meas_col] > 0]
return data
def _dbscan(self, x: np.ndarray) -> list:
"""Dbscan method to run and merge the cluster id labels to the original dataframe.
Arguments:
x (np.ndarray): With unique frame and position columns.
collid_col (str): Column to be created containing cluster-id labels.
Returns:
list[np.ndarray]: list with added collective id column detected by DBSCAN.
"""
if x.size:
db_array = DBSCAN(eps=self.eps, min_samples=self.minClSz, algorithm="kd_tree").fit(x[:, 1:])
cluster_labels = db_array.labels_
cluster_list = [id + 1 if id > -1 else np.nan for id in cluster_labels]
return cluster_list
return np.array([])
def _run_dbscan(self, data: pd.DataFrame, frame: str, clid_frame: str, id_column: Union[str, None]) -> pd.DataFrame:
"""Apply dbscan method to every group i.e. frame.
Arguments:
data (DataFrame): Must contain position columns and frame columns.
frame (str): Name of frame column in data.
clid_frame (str): column to be created containing the output cluster ids from dbscan.
id_column (str | None): track_id column
Returns:
DataFrame: Dataframe with added collective id column detected by DBSCAN for every frame.
"""
if self.id_column:
data = data.sort_values([frame, id_column]).reset_index(drop=True)
else:
data = data.sort_values([frame]).reset_index(drop=True)
subset = [frame] + self.pos_cols_inputdata
data_np = data[subset].to_numpy(dtype=np.float64)
grouped_array = np.split(data_np, np.unique(data_np[:, 0], axis=0, return_index=True)[1][1:])
# map dbscan to grouped_array
out = [self._dbscan(i) for i in grouped_array]
out_list = [item for sublist in out for item in sublist]
data[clid_frame] = out_list
data = data.dropna()
return data
def _make_db_id_unique(self, db_data: pd.DataFrame, frame: str, clid_frame, clid) -> pd.DataFrame:
"""Make db_scan cluster id labels unique by adding the\
cummulative sum of previous group to next group.
Arguments:
db_data (DataFrame): Returned by _run_dbscan function with non-unique cluster ids.
frame (str): Frame column.
clid_frame (str): Column name of cluster-id per frame.
clid (str): Column name of unique cluster ids to be returned.
Returns:
DataFrame: Dataframe with unique collective events.
"""
db_data_np = db_data[[frame, clid_frame]].to_numpy()
grouped_array = np.split(db_data_np[:, 1], np.unique(db_data_np[:, 0], axis=0, return_index=True)[1][1:])
max_array = [0] + [np.max(i) for i in grouped_array if i.size != 0]
out = [np.add(value, np.cumsum(max_array)[i]) for i, value in enumerate(grouped_array)]
db_gp = np.concatenate(out)
db_data[clid] = db_gp.astype(np.int64)
return db_data
def _nearest_neighbour(
self,
data_a: np.ndarray,
data_b: np.ndarray,
nbr_nearest_neighbours: int = 1,
):
"""Calculates nearest neighbour in from data_a\
to data_b nearest_neighbours in data_b.
Arguments:
data_a (DataFrame): containing position values.
data_b (DataFrame): containing position values.
nbr_nearest_neighbours (int): of the number of nearest neighbours to be calculated.
Returns:
tuple(np.ndarray, np.ndarray): Returns tuple of 2 arrays containing nearest neighbour indices and distances.
"""
kdB = KDTree(data=data_a)
nearest_neighbours = kdB.query(data_b, k=nbr_nearest_neighbours)
return nearest_neighbours
def _link_clusters_between_frames(self, data: pd.DataFrame, frame: str, colid: str) -> pd.DataFrame:
"""Tracks clusters detected with DBSCAN along a frame axis,\
returns tracked collective events as a pandas dataframe.
Arguments:
data (DataFrame): Output from dbscan.
frame (str): Frame column.
colid (str): Colid column.
Returns:
DataFrame: Pandas dataframe with tracked collective ids.
"""
essential_cols = [frame, colid] + self.posCols
data_essential = data[essential_cols]
data_np = data_essential.to_numpy()
data_np_frame = data_np[:, 0]
# loop over all frames to link detected clusters iteratively
for t in np.unique(data_np_frame, return_index=False)[1:]:
prev_frame = data_np[(data_np_frame >= (t - self.nPrev)) & (data_np_frame < t)]
current_frame = data_np[data_np_frame == t]
# only continue if objects were detected in previous frame
if prev_frame.size:
colid_current = current_frame[:, 1]
# loop over | |
<gh_stars>1-10
#!/usr/bin/env python3
"""
does distributed load testing using Locust on NCS instances
"""
# standard library modules
import argparse
import contextlib
import getpass
import json
import logging
import os
import socket
import signal
import subprocess
import sys
import threading
import time
import uuid
# third-party module(s)
import requests
# neocortix modules
import analyzeLtStats
import extractAnsibleRecap
try:
import ncscli.ncs as ncs
except ImportError:
# set system and python paths for default places, since path seems to be not set properly
ncscliPath = os.path.expanduser('~/ncscli')
sys.path.append( ncscliPath )
os.environ["PATH"] += os.pathsep + ncscliPath + os.sep + 'ncscli'
import ncscli.ncs as ncs
logger = logging.getLogger(__name__)
# possible place for globals is this class's attributes
class g_:
signaled = False
class SigTerm(BaseException):
#logger.warning( 'unsupported SigTerm exception created')
pass
def sigtermHandler( sig, frame ):
g_.signaled = True
logger.warning( 'SIGTERM received; will try to shut down gracefully' )
#raise SigTerm()
def sigtermSignaled():
return g_.signaled
def boolArg( v ):
'''use with ArgumentParser add_argument for (case-insensitive) boolean arg'''
if v.lower() == 'true':
return True
elif v.lower() == 'false':
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def scriptDirPath():
'''returns the absolute path to the directory containing this script'''
return os.path.dirname(os.path.realpath(__file__))
def loadSshPubKey():
pubKeyFilePath = os.path.expanduser( '~/.ssh/id_rsa.pub' )
with open( pubKeyFilePath ) as inFile:
contents = inFile.read()
return contents
# some port-reservation code adapted from https://github.com/Yelp/ephemeral-port-reserve
def preopen(ip, port):
''' open socket with SO_REUSEADDR and listen on it'''
port = int(port)
s = socket.socket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#logger.info( 'binding ip %s port %d', ip, port )
s.bind((ip, port))
# the connect below deadlocks on kernel >= 4.4.0 unless this arg is greater than zero
s.listen(1)
return s
def preclose(s):
sockname = s.getsockname()
# get the port into a TIME_WAIT state
with contextlib.closing(socket.socket()) as s2:
s2.connect(sockname)
s.accept()
s.close()
# return sockname[1]
def preopenPorts( startPort, maxPort, nPorts ):
sockets = []
gotPorts = False
while not gotPorts:
try:
for port in range( startPort, startPort+nPorts ):
logger.info( 'preopening port %d', port )
sock = preopen( '127.0.0.1', port )
sockets.append( sock )
gotPorts = True
except OSError as exc:
logger.warning( 'got exception (%s) %s', type(exc), exc, exc_info=False )
startPort += nPorts
sockets = []
if startPort >= maxPort:
break
results = {}
if not gotPorts:
logger.error( 'search for available ports exceeded maxPort (%d)', maxPort )
return results
results['ports'] = list( range( startPort, startPort+nPorts ) )
results['sockets'] = sockets
return results
def launchInstances_old( authToken, nInstances, sshClientKeyName, filtersJson=None ):
results = {}
# call ncs launch via command-line
filtersArg = "--filter '" + filtersJson + "'" if filtersJson else " "
cmd = 'ncs.py sc --authToken %s launch --count %d %s --sshClientKeyName %s --json > launched.json' % \
(authToken, nInstances, filtersArg, sshClientKeyName )
try:
subprocess.check_call( cmd, shell=True, stdout=sys.stderr )
except subprocess.CalledProcessError as exc:
logger.error( 'CalledProcessError %s', exc.output )
#raise # TODO raise a more helpful specific type of error
results['cloudServerErrorCode'] = exc.returncode
results['instancesAllocated'] = []
return results
def launchInstances( authToken, nInstances, sshClientKeyName,
filtersJson=None, encryptFiles=True ):
returnCode = 13
# call ncs launch via command-line
#filtersArg = "--filter '" + filtersJson + "'" if filtersJson else " "
#cmd = 'ncs.py sc --authToken %s launch --count %d %s --sshClientKeyName %s --json > launched.json' % \
# (authToken, nInstances, filtersArg, sshClientKeyName )
cmd = [
'ncs.py', 'sc', '--authToken', authToken, 'launch',
'--encryptFiles', str(encryptFiles),
'--count', str(nInstances), # filtersArg,
'--sshClientKeyName', sshClientKeyName, '--json'
]
if filtersJson:
cmd.extend( ['--filter', filtersJson] )
#logger.debug( 'cmd: %s', cmd )
try:
outFile = open('launched.json','w' )
#proc = subprocess.Popen( cmd, shell=True )
proc = subprocess.Popen( cmd, stdout=outFile )
while True:
#logger.debug( 'polling ncs')
proc.poll() # sets proc.returncode
if proc.returncode != None:
break
if sigtermSignaled():
logger.info( 'signaling ncs')
proc.send_signal( signal.SIGTERM )
try:
logger.info( 'waiting ncs')
proc.wait(timeout=60)
if proc.returncode:
logger.warning( 'ncs return code %d', proc.returncode )
except subprocess.TimeoutExpired:
logger.warning( 'ncs launch did not terminate in time' )
time.sleep( 1 )
returnCode = proc.returncode
if outFile:
outFile.close()
except Exception as exc:
logger.error( 'exception while launching instances (%s) %s', type(exc), exc, exc_info=True )
returnCode = 99
return returnCode
def terminateThese( authToken, inRecs ):
logger.info( 'to terminate %d instances', len(inRecs) )
iids = [inRec['instanceId'] for inRec in inRecs]
ncs.terminateInstances( authToken, iids )
def jsonToInv():
cmd = 'cat launched.json | jsonToInv.py > launched.inv'
try:
subprocess.check_call( cmd, shell=True, stdout=sys.stderr )
except subprocess.CalledProcessError as exc:
logger.error( '%s', exc.output )
raise # TODO raise a more helpful specific type of error
def installPrereqs():
invFilePath = 'launched.inv'
tempFilePath = 'data/installPrereqsDeb.temp'
scriptDirPath = os.path.dirname(os.path.realpath(__file__))
jsonToInv()
logger.info( 'calling installPrereqsQuicker.yml' )
cmd = 'ANSIBLE_HOST_KEY_CHECKING=False ANSIBLE_DISPLAY_FAILED_STDERR=yes ansible-playbook %s/installPrereqsQuicker.yml -i %s | tee data/installPrereqsDeb.temp; wc installed.inv' \
% (scriptDirPath, invFilePath)
try:
exitCode = subprocess.call( cmd, shell=True, stdout=subprocess.DEVNULL )
if exitCode:
logger.warning( 'ansible-playbook installPrereqs returned exit code %d', exitCode )
except subprocess.CalledProcessError as exc:
logger.error( '%s', exc.output )
raise # TODO raise a more helpful specific type of error
installerRecap = extractAnsibleRecap.extractRecap( tempFilePath )
wellInstalled = extractAnsibleRecap.getGoodInstances( installerRecap )
sys.stderr.flush()
return wellInstalled
def startWorkers( victimUrl, masterHost, dataPorts ):
cmd = 'ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook %s/startWorkers.yml -e "victimUrl=%s masterHost=%s masterPort=%s" -i installed.inv |tee data/startWorkers.out' \
% (scriptDirPath(), victimUrl, masterHost, dataPorts[0])
try:
subprocess.check_call( cmd, shell=True, stdout=subprocess.DEVNULL )
except subprocess.CalledProcessError as exc:
logger.warning( 'startWorkers returnCode %d (%s)', exc.returncode, exc.output )
def killWorkerProcs():
logger.info( 'calling killWorkerProcs.yml' )
cmd = 'ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook %s/killWorkerProcs.yml -i installed.inv' \
% (scriptDirPath())
try:
subprocess.check_call( cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL )
except subprocess.CalledProcessError as exc:
logger.info( 'exception from killWorkerProcs (return code %d)', exc.returncode )
def output_reader(proc):
for line in iter(proc.stdout.readline, b''):
print('Locust: {0}'.format(line.decode('utf-8')), end='', file=sys.stderr)
def startMaster( victimHostUrl, dataPorts, webPort ):
logger.info( 'calling runLocust.py' )
result = {}
cmd = [
'python3', '-u', scriptDirPath()+'/runLocust.py', '--host='+victimHostUrl,
'--heartbeat-liveness=30',
'--master-bind-port', str(dataPorts[0]), '--web-port', str(webPort),
'--master', '--loglevel', 'INFO', '-f', scriptDirPath()+'/master_locust.py'
]
try:
proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
result['proc'] = proc
t = threading.Thread(target=output_reader, args=(proc,))
result['thread'] = t
t.start()
except subprocess.CalledProcessError as exc:
# this section never runs because Popen does not raise this exception
logger.error( 'return code: %d %s', exc.returncode, exc.output )
raise # TODO raise a more helpful specific type of error
finally:
return result
def stopMaster( specs ):
logger.info( 'specs %s', specs )
proc = specs.get('proc')
if proc:
proc.terminate()
try:
proc.wait(timeout=5)
if proc.returncode:
logger.warning( 'runLocust return code %d', proc.returncode )
except subprocess.TimeoutExpired:
logger.warning( 'runLocust did not terminate in time' )
thread = specs.get('thread')
if thread:
thread.join()
def genXmlReport( wasGood ):
'''preliminary version generates "fake" junit-style xml'''
templateProlog = '''<?xml version="1.0" ?>
<testsuites>
<testsuite tests="1" errors="0" failures="%d" name="loadtests" >
<testcase classname="com.neocortix.loadtest" name="loadtest" time="1.0">
'''
templateFail = '''
<failure message="response time too high">Assertion failed</failure>
'''
templateEpilog = '''
</testcase>
</testsuite>
</testsuites>
'''
if wasGood:
return (templateProlog % 0) + templateEpilog
else:
return (templateProlog % 1) + templateFail + templateEpilog
def testsPass( args, loadTestStats ):
if loadTestStats.get('nReqsSatisfied', 0) <= 0:
return False
return loadTestStats.get('meanResponseTimeMs30', 99999) <= args.reqMsprMean
def conductLoadtest( masterUrl, nWorkersWanted, usersPerWorker,
startTimeLimit, susTime, stopWanted, nReqInstances, rampUpRate
):
logger.info( 'locals %s', locals() )
hatch_rate = rampUpRate if rampUpRate else nWorkersWanted # force it non-zero
if not masterUrl.endswith( '/' ):
masterUrl = masterUrl + '/'
if stopWanted:
logger.info( 'requesting stop via %s', masterUrl+'stop' )
resp = requests.get( masterUrl+'stop' )
logger.info( '%s', resp.json() )
startTime = time.time()
deadline = startTime + startTimeLimit
workersFound = False
while True:
if g_.signaled:
break
try:
reqUrl = masterUrl+'stats/requests'
resp = requests.get( reqUrl )
respJson = resp.json()
if 'slaves' in respJson:
workerData = respJson['slaves']
workersFound = len(workerData)
logger.info( '%d workers found', workersFound )
if workersFound >= nWorkersWanted:
break
if time.time() > deadline:
break
except Exception as exc:
logger.warning( 'exception (%s) %s', type(exc), exc )
time.sleep(1)
nGoodWorkers = 0
maxRps = 0
if workersFound:
url = masterUrl+'swarm'
nUsersWanted = nWorkersWanted * usersPerWorker
reqParams = {'locust_count': nUsersWanted,'hatch_rate': hatch_rate }
logger.info( 'swarming, count: %d, rate %.1f', nUsersWanted, hatch_rate )
resp = requests.post( url, data=reqParams )
if (resp.status_code < 200) or (resp.status_code >= 300):
logger.warning( 'error code from server (%s) %s', resp.status_code, resp.text )
logger.info( 'error url "%s"', url )
logger.info( 'monitoring for %d seconds', susTime )
deadline = time.time() + susTime
while time.time() <= deadline:
if g_.signaled:
break
try:
resp = requests.get( masterUrl+'stats/requests' )
respJson = resp.json()
rps = respJson['total_rps']
maxRps = max( maxRps, rps )
if 'slaves' in respJson:
workerData = respJson['slaves']
workersFound = len(workerData)
#logger.info( '%d workers found', workersFound )
nGoodWorkers = 0
nUsers | |
while len(result)!=k+1:
result=result+a+b
a=b
b=result
if len(result)>=k:
break
return result[k]
#Rversing the Reversed
def reverse_reversed(items):
reversed = []
for item in items[::-1]:
if isinstance(item, list):
reversed.append(reverse_reversed(item))
else:
reversed.append(item)
return reversed
#count word dominators
# definition of the function.
def dominates(w1, w2):
cnt = 0
# start the for loop
for (c1, c2) in zip(w1, w2):
# check the condition.
if c1 > c2:
# update the value.
cnt += 1
# return the value.
return cnt > len(w1)/2
# definition of the function.
def count_word_dominator(words):
cnt = 0
for i in range(len(words)):
dominateStatus = True
for j in range(i + 1, len(words)):
if not dominates(words[i], words[j]):
dominateStatus = False
if dominateStatus:
cnt += 1
return cnt
#Duplicates digit bonus
def rpt(n):
x=list(n)
x.reverse()
f=0
cost=0
k=-1
last=x[0]
#looks for repeating digits
for i in range(len(x)):
k=-1
if x[i]=='#':
continue
c=x[i]
x[i]='#'
#Calculate the value for the repeating digits
for j in range(i+1,len(x)):
if x[j]==c:
k=k+1
x[j]='#'
else:
break
if k>=0:
if f==0 and c==last:
cost+=(2*(10)*k)
f=1
else:
cost=cost+((10)*k)
return cost
#Nearest Smaller element
def nearest_smaller(items):
result = []
curr_ele_index = 0
while len(result) < len(items):
smallest_ele_left = 'x'
smallest_ele_right = 'x'
left_index = curr_ele_index - 1
while left_index >= 0:
if(items[left_index] < items[curr_ele_index]):
smallest_ele_left = left_index
break
left_index -= 1
right_index = curr_ele_index + 1
while right_index < len(items):
if(items[right_index] < items[curr_ele_index]):
smallest_ele_right = right_index
break
right_index += 1
if smallest_ele_left == 'x':
if smallest_ele_right == 'x':
result.append(items[curr_ele_index])
else:
result.append(items[smallest_ele_right])
else:
if smallest_ele_right == 'x':
result.append(items[smallest_ele_left])
else:
dist_left = curr_ele_index - smallest_ele_left
dist_right = smallest_ele_right - curr_ele_index
if(dist_left < dist_right):
result.append(items[smallest_ele_left])
elif (dist_right < dist_left):
result.append(items[smallest_ele_right])
else:
smaller_ele = min(items[smallest_ele_left], items[smallest_ele_right])
result.append(smaller_ele)
curr_ele_index += 1
return result
def collatzy_distance(start, end):
currentSeq = [start]
dis = 0
while end not in currentSeq:
nextSeq = []
for c in currentSeq:
nextSeq.append(3*c + 1)
nextSeq.append(c//2)
currentSeq = nextSeq
dis += 1
return dis
#Interesting Intersecting
def squares_intersect(s1,s2):
x1,x2,y1,y2,r1,r2=s1[0],s2[0],s1[1],s2[1],s1[2],s2[2]
if ((x1<x2) and (x1+r1<x2)) or ((y1<y2) and (y1+r1<y2)):
return False
elif ((x2<x1) and (x2+r2<x1)) or ((y2<y1) and (y2+r2<y1)):
return False
else:
return True
def give_change(amount, coins):
change = [] # store the change
j = 0; # to index the coins
while amount > 0:
if amount >= coins[j]:
amount = amount - coins[j]
change.append(coins[j])
else:
j = j + 1
return change
#Keep doubling
def double_untill_all_digits(n, giveup = 1000):
counter = 0
numbers = [0,1,2,3,4,5,6,7,8,9]
digits = [int(i) for i in str(n)]
for i in range(giveup):
if all(elem in digits for elem in numbers):
return counter
else:
n = n * 2
counter += 1
digits = [int(i) for i in str(n)]
return -1
#that's enough of you
def remove_kth(items,k):
unique_items = list(set(items))
count_items = [0]*len(unique_items)
final_items = []
for i in range(len(items)):
ind = unique_items.index(items[i])
count_items[ind] = count_items[ind]+1
if count_items[ind]<=k:
final_items = final_items + [items[i]]
return final_items
#Longest palindrome substring
def longest_palindrome(text):
longest = ''
for i in range(len(text)):
for j in range(i+1, len(text)+1):
if text[i:j][::-1] == text[i:j]:
if len(text[i:j]) > len(longest):
longest = text[i:j]
return longest
#All your fractions are belong to base
def group_and_skip(n,out,inp):
remainingCoin=[]
while(n>out):
n=n-out
remainingCoin.append(n%out)
n=int(n/out)+inp
remainingCoin.append(n)
print(remainingCoin)
#Count Consective summers
def count_consecutive_summers(n):
count = 0
for i in range(1, n+1):
total = 0
for j in range(i, n+1):
total += j
if total == n:
count += 1
return count
#Pulldonw Your Neighbours
def eliminate_neighbours( items ):
items_list = items.copy( )
size = len( items_list )
larger = 0; smaller = items[0]
neighbour = 0
small_ind = 0;
count = 0
for i in items_list:
if i > larger:
larger = i
if ( larger <= size ):
while ( larger in items_list ):
size = len( items_list )
for j in items_list:
if j < smaller:
smaller = j
small_ind = items_list.index( j )
if ( small_ind == 0 ):
neighbour = items_list[ small_ind +1 ]
elif ( small_ind == size-1 ):
neighbour = items_list [ small_ind -1 ]
else:
if ( items_list[ small_ind -1 ] > items_list [ small_ind +1 ] ):
neighbour = items_list[ small_ind -1 ]
else:
neighbour = items_list[ small_ind +1 ]
items_list.remove( smaller )
items_list.remove( neighbour )
count += 1
smaller = larger
return ( count )
#What do you hear, what do you say
def count_and_say(digits):
if len(digits) ==0:
return ""
digitletters =[]
digitCount = []
previousDigit =''
count =-1
for digit in digits:
if digit == previousDigit:
digitCount[count] += 1
else:
digitletters.append(digit)
count +=1
digitCount.append(1)
previousDigit = digit
output =""
count =0
for digit in digitletters:
output+= str(digitCount[count]) + digit
count +=1
return output
def safe_squares_rooks(n,rooks):
#creating two sets for storing unsafe rows and cols
unsafe_rows=set()
unsafe_cols=set()
#looping through rooks list
for rook in rooks:
#adding current rook's row and col to unsafe row,col sets
#sets prevent duplicates, so we dont need to worry about them
unsafe_rows.add(rook[0])
unsafe_cols.add(rook[1])
#number of safe rows or cols = n - number of unsafe rows or cols
safe_rows_count=n-len(unsafe_rows)
safe_cols_count=n-len(unsafe_cols)
#returning total number of safe spaces
return safe_rows_count*safe_cols_count
#Bishops on a binge
def safe_squares_bishops(n, bishops):
safe_cells = 0
for row in range(n):
for col in range(n):
safe=True
for pos in bishops:
if abs(row - pos[0]) == abs(col - pos[1]):
safe=False
break
if safe:safe_cells+=1
return safe_cells
#Up for the count
def counting_series(n):
len = 1;
count = 9;
start = 1;
while n > len * count :
n= n - len * count
len = len + 1
count = count * 10
start = start * 10
start = start + (n - 1) / len
s = str(start)
return s[((n - 1) % len)]
#Reverse the vowels
def reverse_vowels(text):
vowels=[]
for c in text:
i=c.lower()
if i=='a' or i=='e' or i=='i' or i=='o' or i=='u':
vowels.append(c)
vowels.reverse()
k=0
str=""
for j in range(len(text)):
i=text[j].lower()
if i=='a' or i=='e' or i=='i' or i=='o' or i=='u':
st=str+vowels[k]
k+=1
else:
str=str.text[j]
str=str[0].upper()+str[1:]
return str
#Everybody do a scrooge shuffle
def spread_the_coins(pile,left,right):
start=i=0
while i <(len(pile)):
k = pile[i]//(left+right)
if k>0:
pile[i]-=k*(left+right)
if i!=len(pile)-1:
pile[i+1] += k*right
else:
pile.append(k*right)
if i!=0:
pile[i-1] += k*left
i-=1
continue
else:
start-=1
pile.insert(0,k*left)
continue
i+=1
return(start,pile)
#Calking-wilf sequence
def calkin_wilf(n):
queue=[]
f=Fraction(1,1)
queue.append(f)
ans=Fraction(1,1)
while n>0:
temp=queue.pop(0)
ans=temp
p=temp.numerator
q=temp.denominator
f1=Fraction(p,p+q)
f2=Fraction(p+q,q)
queue.append(f1)
queue.append(f2)
n=n-1
return ans
#Hippity hoppity, abolish loopity
def frog_collision(frog1, frog2):
s1_x, s1_y, d1_x, d1_y = frog1[0], frog1[1], frog1[2], frog1[3]
s2_x, s2_y, d2_x, d2_y = frog2[0], frog2[1], frog2[2], frog2[3]
try:
t1 = (s1_x - s2_x)/(d2_x - d1_x)
t2 = (s1_y - s2_y)/(d2_y - d1_y)
if t1 == t2:
return t1
else:
return None
except:
if d2_x - d1_x == 0 and s1_x - s2_x == 0 and d2_y - d1_y != 0:
return (s1_y - s2_y)/(d2_y - d1_y)
elif d2_y - d1_y == 0 and s1_y - s2_y == 0 and d2_x - d1_x != 0:
return (s1_x - s2_x)/(d2_x - d1_x)
else:
return None
#Double Trouble
def double_trouble(items,n):
data = []
counter,value = -1,1
while True:
counter = (counter+1)%len(items)
data.append(value)
if sum(data) >= n: break
if counter == len(items) -1 : value *= 2
return items[counter]
#Nearest Polygonal Number
def fun(s, i):
return ((s-2)*(i*i) - (s-4)*i)//2
def nearest_poligonal_number(n, s):
a = 1
b = 2
while fun(s, b) < n:
b = b*b
m = (a+b)//2
while b-a >= 2:
at_m = fun(s,m)
if at_m > n:
b = m
elif at_m < n:
a = m
else:
return at_m
m = (a+b)//2
at_a = fun(s,a)
at_b = fun(s,b)
if abs(at_a-n) <= abs(at_b-n):
return at_a
else:
return at_b
#Postfix Interpreter
from collections import deque
def postfix_evaluate(items):
stack = deque()
for x in items:
if x=='+' or x=='-' or x=='*' or x=='/':
op1=stack.pop()
op2=stack.pop()
result=calculate(op2,op1,x)
stack.append(result)
else:
stack.append(x)
return stack.pop()
def calculate(op2,op1,x):
if x is '*':
return int(op2)*int(op1)
elif x is '/':
if (op1 == 0):
return 0
else:
return int(op2)//int(op1)
elif x is '+':
return int(op2)+int(op1)
elif x is '-':
return int(op2)-int(op1)
#Fractran Interpreter
import sys
def fractran(n:int, prog:list, giveup:int=1000):
# Intialize result list with n
res = [n]
# Run the logic, | |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Assemble common TF Dockerfiles from many parts.
- Assembles Dockerfiles
- Builds images (and optionally runs image tests)
- Pushes images to Docker Hub (provided with credentials)
Logs are written to stderr; the list of successfully built images is
written to stdout.
Read README.md (in this directory) for instructions!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import errno
import os
import platform
import re
import shutil
import textwrap
from absl import app
from absl import flags
import cerberus
import yaml
FLAGS = flags.FLAGS
flags.DEFINE_string('hub_username', None,
'Dockerhub username, only used with --upload_to_hub')
flags.DEFINE_string(
'hub_password', None,
('Dockerhub password, only used with --upload_to_hub. Use from an env param'
' so your password isn\'t in your history.'))
flags.DEFINE_integer('hub_timeout', 3600,
'Abort Hub upload if it takes longer than this.')
flags.DEFINE_string(
'repository', 'tensorflow',
'Tag local images as {repository}:tag (in addition to the '
'hub_repository, if uploading to hub)')
flags.DEFINE_string(
'hub_repository', None,
'Push tags to this Docker Hub repository, e.g. tensorflow/tensorflow')
flags.DEFINE_boolean(
'upload_to_hub',
False,
('Push built images to Docker Hub (you must also provide --hub_username, '
'--hub_password, and --hub_repository)'),
short_name='u',
)
flags.DEFINE_boolean(
'construct_dockerfiles', False, 'Do not build images', short_name='d')
flags.DEFINE_boolean(
'keep_temp_dockerfiles',
False,
'Retain .temp.Dockerfiles created while building images.',
short_name='k')
flags.DEFINE_boolean(
'dry_run', False, 'Do not actually generate Dockerfiles', short_name='n')
flags.DEFINE_string(
'spec_file',
'./spec.yml',
'Path to a YAML specification file',
short_name='s')
flags.DEFINE_string(
'output_dir',
'./dockerfiles', ('Path to an output directory for Dockerfiles. '
'Will be created if it doesn\'t exist.'),
short_name='o')
flags.DEFINE_string(
'partial_dir',
'./partials',
'Path to a directory containing foo.partial.Dockerfile partial files.',
short_name='p')
flags.DEFINE_boolean(
'quiet_dry_run',
True,
'Do not print contents of dry run Dockerfiles.',
short_name='q')
flags.DEFINE_boolean(
'nocache', False,
'Disable the Docker build cache; identical to "docker build --no-cache"')
flags.DEFINE_string(
'spec_file',
'./spec.yml',
'Path to the YAML specification file',
short_name='s')
# Schema to verify the contents of spec.yml with Cerberus.
# Must be converted to a dict from yaml to work.
# Note: can add python references with e.g.
# !!python/name:builtins.str
# !!python/name:__main__.funcname
SCHEMA_TEXT = """
header:
type: string
partials:
type: dict
keyschema:
type: string
valueschema:
type: dict
schema:
desc:
type: string
args:
type: dict
keyschema:
type: string
valueschema:
anyof:
- type: [ boolean, number, string ]
- type: dict
schema:
default:
type: [ boolean, number, string ]
desc:
type: string
options:
type: list
schema:
type: string
images:
keyschema:
type: string
valueschema:
type: dict
schema:
desc:
type: string
arg-defaults:
type: list
schema:
anyof:
- type: dict
keyschema:
type: string
arg_in_use: true
valueschema:
type: string
- type: string
isimage: true
create-dockerfile:
type: boolean
partials:
type: list
schema:
anyof:
- type: dict
keyschema:
type: string
regex: image
valueschema:
type: string
isimage: true
- type: string
ispartial: true
"""
class TfDockerValidator(cerberus.Validator):
"""Custom Cerberus validator for TF dockerfile spec.
Note: Each _validate_foo function's docstring must end with a segment
describing its own validation schema, e.g. "The rule's arguments are...". If
you add a new validator, you can copy/paste that section.
"""
def _validate_ispartial(self, ispartial, field, value):
"""Validate that a partial references an existing partial spec.
Args:
ispartial: Value of the rule, a bool
field: The field being validated
value: The field's value
The rule's arguments are validated against this schema:
{'type': 'boolean'}
"""
if ispartial and value not in self.root_document.get('partials', dict()):
self._error(field, '{} is not an existing partial.'.format(value))
def _validate_isimage(self, isimage, field, value):
"""Validate that an image references an existing partial spec.
Args:
isimage: Value of the rule, a bool
field: The field being validated
value: The field's value
The rule's arguments are validated against this schema:
{'type': 'boolean'}
"""
if isimage and value not in self.root_document.get('images', dict()):
self._error(field, '{} is not an existing image.'.format(value))
def _validate_arg_in_use(self, arg_in_use, field, value):
"""Validate that an arg references an existing partial spec's args.
Args:
arg_in_use: Value of the rule, a bool
field: The field being validated
value: The field's value
The rule's arguments are validated against this schema:
{'type': 'boolean'}
"""
if arg_in_use:
for partial in self.root_document.get('partials', dict()).values():
if value in partial.get('args', tuple()):
return
self._error(field, '{} is not an arg used in any partial.'.format(value))
def build_partial_description(partial_spec):
"""Create the documentation lines for a specific partial.
Generates something like this:
# This is the partial's description, from spec.yml.
# --build-arg ARG_NAME=argdefault
# this is one of the args.
# --build-arg ANOTHER_ARG=(some|choices)
# another arg.
Args:
partial_spec: A dict representing one of the partials from spec.yml. Doesn't
include the name of the partial; is a dict like { desc: ..., args: ... }.
Returns:
A commented string describing this partial.
"""
# Start from linewrapped desc field
lines = []
wrapper = textwrap.TextWrapper(
initial_indent='# ', subsequent_indent='# ', width=80)
description = wrapper.fill(partial_spec.get('desc', '( no comments )'))
lines.extend(['#', description])
# Document each arg
for arg, arg_data in partial_spec.get('args', dict()).items():
# Wrap arg description with comment lines
desc = arg_data.get('desc', '( no description )')
desc = textwrap.fill(
desc,
initial_indent='# ',
subsequent_indent='# ',
width=80,
drop_whitespace=False)
# Document (each|option|like|this)
if 'options' in arg_data:
arg_options = ' ({})'.format('|'.join(arg_data['options']))
else:
arg_options = ''
# Add usage sample
arg_use = '# --build-arg {}={}{}'.format(arg,
arg_data.get('default', '(unset)'),
arg_options)
lines.extend([arg_use, desc])
return '\n'.join(lines)
def construct_contents(partial_specs, image_spec):
"""Assemble the dockerfile contents for an image spec.
It assembles a concrete list of partial references into a single, large
string.
Also expands argument defaults, so that the resulting Dockerfile doesn't have
to be configured with --build-arg=... every time. That is, any ARG directive
will be updated with a new default value.
Args:
partial_specs: The dict from spec.yml["partials"].
image_spec: One of the dict values from spec.yml["images"].
Returns:
A string containing a valid Dockerfile based on the partials listed in
image_spec.
"""
processed_partial_strings = []
for partial_name in image_spec['partials']:
# Apply image arg-defaults to existing arg defaults
partial_spec = copy.deepcopy(partial_specs[partial_name])
args = partial_spec.get('args', dict())
for k_v in image_spec.get('arg-defaults', []):
arg, value = list(k_v.items())[0]
if arg in args:
args[arg]['default'] = value
# Read partial file contents
filename = partial_spec.get('file', partial_name)
partial_path = os.path.join(FLAGS.partial_dir,
'{}.partial.Dockerfile'.format(filename))
with open(partial_path, 'r') as f_partial:
partial_contents = f_partial.read()
# Replace ARG FOO=BAR with ARG FOO=[new-default]
for arg, arg_data in args.items():
if 'default' in arg_data and arg_data['default']:
default = '={}'.format(arg_data['default'])
else:
default = ''
partial_contents = re.sub(r'ARG {}.*'.format(arg), 'ARG {}{}'.format(
arg, default), partial_contents)
# Store updated partial contents
processed_partial_strings.append(partial_contents)
# Join everything together
return '\n'.join(processed_partial_strings)
def mkdir_p(path):
"""Create a directory and its parents, even if it already exists."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def gather_tag_args(slices, cli_input_args, required_args):
"""Build a dictionary of all the CLI and slice-specified args for a tag."""
args = {}
def construct_documentation(header, partial_specs, image_spec):
"""Assemble all of the documentation for a single dockerfile.
Builds explanations of included partials and available build args.
Args:
header: The string from spec.yml["header"]; will be commented and wrapped.
partial_specs: The dict from spec.yml["partials"].
image_spec: The spec for the dockerfile being built.
Returns:
A string containing a commented header that documents the contents of the
dockerfile.
"""
# Comment and wrap header and image description
commented_header = '\n'.join(
[('# ' + l).rstrip() for l in header.splitlines()])
commented_desc = '\n'.join(
['# ' + l for l in image_spec.get('desc', '').splitlines()])
partial_descriptions = []
# Build documentation for each partial in the image
for partial in image_spec['partials']:
# Copy partial data for default args unique to this image
partial_spec = copy.deepcopy(partial_specs[partial])
args = partial_spec.get('args', dict())
# Overwrite any existing arg defaults
for k_v in image_spec.get('arg-defaults', []):
arg, value = list(k_v.items())[0]
if arg in args:
args[arg]['default'] = value
# Build the description from new args
partial_description = build_partial_description(partial_spec)
partial_descriptions.append(partial_description)
contents = [commented_header, '#', commented_desc] + partial_descriptions
return '\n'.join(contents) + '\n'
def normalize_partial_args(partial_specs):
"""Normalize the shorthand form of a partial's args specification.
Turns this:
partial:
args:
SOME_ARG: arg_value
Into this:
partial:
args:
SOME_ARG:
default: arg_value
Args:
partial_specs: The dict | |
<reponame>c-nuro/airflow
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import logging
import multiprocessing
import os
import signal
import threading
from contextlib import redirect_stderr, redirect_stdout, suppress
from datetime import timedelta
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import Iterator, List, Optional, Set, Tuple
from setproctitle import setproctitle
from sqlalchemy import func, or_
from sqlalchemy.orm import eagerload
from sqlalchemy.orm.session import Session
from airflow import models, settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException, TaskNotFound
from airflow.models import SlaMiss, errors
from airflow.models.dag import DAG, DagModel
from airflow.models.dagbag import DagBag
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.callback_requests import (
CallbackRequest,
DagCallbackRequest,
SlaCallbackRequest,
TaskCallbackRequest,
)
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.session import provide_session
from airflow.utils.state import State
DR = models.DagRun
TI = models.TaskInstance
class DagFileProcessorProcess(LoggingMixin, MultiprocessingStartMethodMixin):
"""Runs DAG processing in a separate process using DagFileProcessor
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: str
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(
self,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
callback_requests: List[CallbackRequest],
):
super().__init__()
self._file_path = file_path
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._callback_requests = callback_requests
# The process that was launched to process the given .
self._process: Optional[multiprocessing.process.BaseProcess] = None
# The result of DagFileProcessor.process_file(file_path).
self._result: Optional[Tuple[int, int]] = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time: Optional[datetime.datetime] = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessorProcess.class_creation_counter
self._parent_channel: Optional[MultiprocessingConnection] = None
DagFileProcessorProcess.class_creation_counter += 1
@property
def file_path(self) -> str:
return self._file_path
@staticmethod
def _run_file_processor(
result_channel: MultiprocessingConnection,
parent_channel: MultiprocessingConnection,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
thread_name: str,
callback_requests: List[CallbackRequest],
) -> None:
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param parent_channel: the parent end of the channel to close in the child
:type parent_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: str
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_ids: if specified, only examine DAG ID's that are
in this list
:type dag_ids: list[str]
:param thread_name: the name to use for the process that is launched
:type thread_name: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log: logging.Logger = logging.getLogger("airflow.processor")
# Since we share all open FDs from the parent, we need to close the parent side of the pipe here in
# the child, else it won't get closed properly until we exit.
log.info("Closing parent pipe")
parent_channel.close()
del parent_channel
set_context(log, file_path)
setproctitle(f"airflow scheduler - DagFileProcessor {file_path}")
try:
# redirect stdout/stderr to log
with redirect_stdout(StreamLogWriter(log, logging.INFO)), redirect_stderr(
StreamLogWriter(log, logging.WARN)
), Stats.timer() as timer:
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path)
dag_file_processor = DagFileProcessor(dag_ids=dag_ids, log=log)
result: Tuple[int, int] = dag_file_processor.process_file(
file_path=file_path,
pickle_dags=pickle_dags,
callback_requests=callback_requests,
)
result_channel.send(result)
log.info("Processing %s took %.3f seconds", file_path, timer.duration)
except Exception:
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
result_channel.close()
def start(self) -> None:
"""Launch the process and start processing the DAG."""
start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(start_method)
_parent_channel, _child_channel = context.Pipe(duplex=False)
process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
_parent_channel,
self.file_path,
self._pickle_dags,
self._dag_ids,
f"DagFileProcessor{self._instance_id}",
self._callback_requests,
),
name=f"DagFileProcessor{self._instance_id}-Process",
)
self._process = process
self._start_time = timezone.utcnow()
process.start()
# Close the child side of the pipe now the subprocess has started -- otherwise this would prevent it
# from closing in some cases
_child_channel.close()
del _child_channel
# Don't store it on self until after we've started the child process - we don't want to keep it from
# getting GCd/closed
self._parent_channel = _parent_channel
def kill(self) -> None:
"""Kill the process launched to process the file, and ensure consistent state."""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
self._kill_process()
def terminate(self, sigkill: bool = False) -> None:
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
with suppress(TimeoutError):
self._process._popen.wait(5) # type: ignore
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self) -> None:
if self._process is None:
raise AirflowException("Tried to kill process before starting!")
if self._process.is_alive() and self._process.pid:
self.log.warning("Killing DAGFileProcessorProcess (PID=%d)", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
if self._parent_channel:
self._parent_channel.close()
@property
def pid(self) -> int:
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None or self._process.pid is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self) -> Optional[int]:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get exit code before starting!")
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
# If we get an EOFError, it means the child end of the pipe has been closed. This only happens
# in the finally block. But due to a possible race condition, the process may have not yet
# terminated (it could be doing cleanup/python shutdown still). So we kill it here after a
# "suitable" timeout.
self._done = True
# Arbitrary timeout -- error/race condition only, so this doesn't need to be tunable.
self._process.join(timeout=5)
if self._process.is_alive():
# Didn't shut down cleanly - kill it
self._kill_process()
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self) -> Optional[Tuple[int, int]]:
"""
:return: result of running DagFileProcessor.process_file()
:rtype: tuple[int, int] or None
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self) -> datetime.datetime:
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
@property
def waitable_handle(self):
return self._process.sentinel
class DagFileProcessor(LoggingMixin):
"""
Process a Python file containing Airflow DAGs.
This includes:
| |
8, 9, 6],
[7, 2, 9, 5, 4, 1],
[6, 3, 9, 2, 5, 2],
[3, 7, 5, 8, 9, 3],
],
dtype=np.float32,
),
)
)
def _test_eager_boxing_with_overlapping_placement_s1_to_b(
test_case, in_device, out_device
):
if flow.env.get_rank() == 0:
np_arr = np.array(
[
[4, 6, 5, 20, 8, 9],
[6, 8, 9, 0, 4, 6],
[3, 7, 5, 0, 3, 5],
[6, 8, 9, 0, 8, 7],
],
dtype=np.float32,
)
elif flow.env.get_rank() == 1:
np_arr = np.array(
[
[2, 10, 10, 7, 10, 3],
[3, 9, 10, 5, 5, 6],
[4, 6, 6, 9, 8, 6],
[6, 8, 6, 4, 5, 3],
],
dtype=np.float32,
)
elif flow.env.get_rank() == 2:
np_arr = np.array(
[
[9, 6, 5, 8, 3, 6],
[4, 9, 7, 0, 2, 1],
[2, 5, 7, 9, 4, 8],
[6, 8, 10, 0, 4, 9],
],
dtype=np.float32,
)
elif flow.env.get_rank() == 3:
np_arr = np.array(
[
[9, 4, 5, 8, 9, 6],
[7, 2, 9, 5, 4, 1],
[6, 3, 9, 2, 5, 2],
[3, 7, 5, 8, 9, 3],
],
dtype=np.float32,
)
device = flow.device(in_device)
tensor = flow.tensor(np_arr, device=device, dtype=flow.float32)
placement = flow.placement(in_device, {0: [0, 1, 3]})
x = tensor.to_consistent(placement, flow.sbp.split(0))
y = x.to_consistent(placement, flow.sbp.split(1))
new_placement = flow.placement(out_device, {0: [2, 3]})
z = y.to_consistent(new_placement, flow.sbp.broadcast)
test_case.assertEqual(z.placement, new_placement)
if flow.env.get_rank() == 2:
test_case.assertTrue(
np.array_equal(
z.to_local().numpy(),
np.array(
[
[4, 6, 5, 20, 8, 9],
[6, 8, 9, 0, 4, 6],
[3, 7, 5, 0, 3, 5],
[6, 8, 9, 0, 8, 7],
[2, 10, 10, 7, 10, 3],
[3, 9, 10, 5, 5, 6],
[4, 6, 6, 9, 8, 6],
[6, 8, 6, 4, 5, 3],
[9, 4, 5, 8, 9, 6],
[7, 2, 9, 5, 4, 1],
[6, 3, 9, 2, 5, 2],
[3, 7, 5, 8, 9, 3],
],
dtype=np.float32,
),
)
)
if flow.env.get_rank() == 3:
test_case.assertTrue(
np.array_equal(
z.to_local().numpy(),
np.array(
[
[4, 6, 5, 20, 8, 9],
[6, 8, 9, 0, 4, 6],
[3, 7, 5, 0, 3, 5],
[6, 8, 9, 0, 8, 7],
[2, 10, 10, 7, 10, 3],
[3, 9, 10, 5, 5, 6],
[4, 6, 6, 9, 8, 6],
[6, 8, 6, 4, 5, 3],
[9, 4, 5, 8, 9, 6],
[7, 2, 9, 5, 4, 1],
[6, 3, 9, 2, 5, 2],
[3, 7, 5, 8, 9, 3],
],
dtype=np.float32,
),
)
)
def _test_eager_boxing_with_overlapping_placement_s1_to_p(
test_case, in_device, out_device
):
if flow.env.get_rank() == 0:
np_arr = np.array(
[
[4, 6, 5, 20, 8, 9],
[6, 8, 9, 0, 4, 6],
[3, 7, 5, 0, 3, 5],
[6, 8, 9, 0, 8, 7],
],
dtype=np.float32,
)
elif flow.env.get_rank() == 1:
np_arr = np.array(
[
[2, 10, 10, 7, 10, 3],
[3, 9, 10, 5, 5, 6],
[4, 6, 6, 9, 8, 6],
[6, 8, 6, 4, 5, 3],
],
dtype=np.float32,
)
elif flow.env.get_rank() == 2:
np_arr = np.array(
[
[9, 6, 5, 8, 3, 6],
[4, 9, 7, 0, 2, 1],
[2, 5, 7, 9, 4, 8],
[6, 8, 10, 0, 4, 9],
],
dtype=np.float32,
)
elif flow.env.get_rank() == 3:
np_arr = np.array(
[
[9, 4, 5, 8, 9, 6],
[7, 2, 9, 5, 4, 1],
[6, 3, 9, 2, 5, 2],
[3, 7, 5, 8, 9, 3],
],
dtype=np.float32,
)
device = flow.device(in_device)
tensor = flow.tensor(np_arr, device=device, dtype=flow.float32)
placement = flow.placement(in_device, {0: [0, 1, 3]})
x = tensor.to_consistent(placement, flow.sbp.split(0))
y = x.to_consistent(placement, flow.sbp.split(1))
new_placement = flow.placement(out_device, {0: [2, 3]})
z = y.to_consistent(new_placement, flow.sbp.partial_sum)
test_case.assertEqual(z.placement, new_placement)
if flow.env.get_rank() == 2:
test_case.assertTrue(
np.array_equal(
z.to_local().numpy(),
np.array(
[
[4, 6, 0, 0, 0, 0],
[6, 8, 0, 0, 0, 0],
[3, 7, 0, 0, 0, 0],
[6, 8, 0, 0, 0, 0],
[2, 10, 0, 0, 0, 0],
[3, 9, 0, 0, 0, 0],
[4, 6, 0, 0, 0, 0],
[6, 8, 0, 0, 0, 0],
[9, 4, 0, 0, 0, 0],
[7, 2, 0, 0, 0, 0],
[6, 3, 0, 0, 0, 0],
[3, 7, 0, 0, 0, 0],
],
dtype=np.float32,
),
)
)
if flow.env.get_rank() == 3:
test_case.assertTrue(
np.array_equal(
z.to_local().numpy(),
np.array(
[
[0, 0, 5, 20, 8, 9],
[0, 0, 9, 0, 4, 6],
[0, 0, 5, 0, 3, 5],
[0, 0, 9, 0, 8, 7],
[0, 0, 10, 7, 10, 3],
[0, 0, 10, 5, 5, 6],
[0, 0, 6, 9, 8, 6],
[0, 0, 6, 4, 5, 3],
[0, 0, 5, 8, 9, 6],
[0, 0, 9, 5, 4, 1],
[0, 0, 9, 2, 5, 2],
[0, 0, 5, 8, 9, 3],
],
dtype=np.float32,
),
)
)
def _test_eager_boxing_with_in_placement_contain_out_placement_p_to_s1(
test_case, in_device, out_device
):
if flow.env.get_rank() == 0:
np_arr = np.array(
[[4, 6, 5, 20], [6, 8, 9, 0], [3, 7, 5, 0], [6, 8, 9, 0]], dtype=np.float32,
)
elif flow.env.get_rank() == 1:
np_arr = np.array(
[[2, 10, 10, 7], [3, 9, 10, 5], [4, 6, 6, 9], [6, 8, 6, 4]],
dtype=np.float32,
)
elif flow.env.get_rank() == 2:
np_arr = np.array(
[[9, 6, 5, 8], [4, 9, 7, 0], [2, 5, 7, 9], [6, 8, 10, 0]], dtype=np.float32,
)
elif flow.env.get_rank() == 3:
np_arr = np.array(
[[9, 4, 5, 8], [7, 2, 9, 5], [6, 3, 9, 2], [3, 7, 5, 8]], dtype=np.float32,
)
device = flow.device(in_device)
tensor = flow.tensor(np_arr, device=device, dtype=flow.float32)
placement = flow.placement(in_device, {0: [0, 1, 3]})
x = tensor.to_consistent(placement, flow.sbp.partial_sum)
new_placement = flow.placement(out_device, {0: [1, 3]})
y = x.to_consistent(new_placement, flow.sbp.split(1))
test_case.assertEqual(y.placement, new_placement)
if flow.env.get_rank() == 1:
test_case.assertTrue(
np.array_equal(
y.to_local().numpy(),
np.array([[15, 20], [16, 19], [13, 16], [15, 23],], dtype=np.float32,),
)
)
if flow.env.get_rank() == 3:
test_case.assertTrue(
np.array_equal(
y.to_local().numpy(),
np.array([[20, 35], [28, 10], [20, 11], [20, 12],], dtype=np.float32,),
)
)
def _test_eager_boxing_with_in_placement_contain_out_placement_b_to_s1(
test_case, in_device, out_device
):
if flow.env.get_rank() == 0:
np_arr = np.array(
[[4, 6, 5, 20], [6, 8, 9, 0], [3, 7, 5, 0], [6, 8, 9, 0]], dtype=np.float32,
)
elif flow.env.get_rank() == 1:
np_arr = np.array(
[[2, 10, 10, 7], [3, 9, 10, 5], [4, 6, 6, 9], [6, 8, 6, 4]],
dtype=np.float32,
)
elif flow.env.get_rank() == 2:
np_arr = np.array(
[[9, 6, 5, 8], [4, 9, 7, 0], [2, 5, 7, 9], [6, 8, 10, 0]], dtype=np.float32,
)
elif flow.env.get_rank() == 3:
np_arr = np.array(
[[9, 4, 5, 8], [7, 2, 9, 5], [6, 3, 9, 2], [3, 7, 5, 8]], dtype=np.float32,
)
device = flow.device(in_device)
tensor = flow.tensor(np_arr, device=device, dtype=flow.float32)
placement = flow.placement(in_device, {0: [0, 1, 3]})
x = tensor.to_consistent(placement, flow.sbp.broadcast)
new_placement = flow.placement(out_device, {0: [1, 3]})
y = x.to_consistent(new_placement, flow.sbp.split(1))
test_case.assertEqual(y.placement, new_placement)
if flow.env.get_rank() == 1:
test_case.assertTrue(
np.array_equal(
y.to_local().numpy(),
np.array([[4, 6], [6, 8], [3, 7], [6, 8],], dtype=np.float32,),
)
)
if flow.env.get_rank() == 3:
test_case.assertTrue(
np.array_equal(
y.to_local().numpy(),
np.array([[5, 20], [9, 0], [5, 0], [9, 0],], dtype=np.float32,),
)
)
def _test_eager_boxing_with_in_placement_contain_out_placement_s0_to_s1(
test_case, in_device, out_device
):
if flow.env.get_rank() == 0:
np_arr = np.array(
[[4, 6, 5, 20], [6, 8, 9, 0], [3, 7, 5, 0], [6, 8, 9, 0]], dtype=np.float32,
)
elif flow.env.get_rank() == 1:
np_arr = np.array(
[[2, 10, 10, 7], [3, 9, 10, 5], [4, 6, 6, 9], [6, 8, 6, 4]],
dtype=np.float32,
)
elif flow.env.get_rank() == 2:
np_arr = np.array(
[[9, 6, 5, 8], [4, 9, 7, 0], [2, 5, 7, 9], [6, 8, 10, 0]], dtype=np.float32,
)
elif flow.env.get_rank() == 3:
np_arr = np.array(
[[9, 4, 5, 8], [7, 2, 9, 5], [6, 3, 9, 2], [3, 7, 5, 8]], dtype=np.float32,
)
device = flow.device(in_device)
tensor = flow.tensor(np_arr, device=device, dtype=flow.float32)
placement = flow.placement(in_device, {0: [0, 1, 3]})
x = tensor.to_consistent(placement, flow.sbp.split(0))
new_placement = flow.placement(out_device, {0: [1, 3]})
y = x.to_consistent(new_placement, flow.sbp.split(1))
test_case.assertEqual(y.placement, new_placement)
if flow.env.get_rank() == 1:
test_case.assertTrue(
np.array_equal(
y.to_local().numpy(),
np.array(
[
[4, 6],
[6, 8],
[3, 7],
[6, 8],
[2, 10],
[3, 9],
[4, 6],
[6, 8],
[9, 4],
[7, 2],
[6, 3],
[3, 7],
],
dtype=np.float32,
),
)
)
if flow.env.get_rank() == 3:
test_case.assertTrue(
np.array_equal(
y.to_local().numpy(),
np.array(
[
[5, 20],
[9, 0],
[5, 0],
[9, 0],
[10, 7],
[10, 5],
[6, 9],
[6, 4],
[5, 8],
[9, 5],
[9, 2],
[5, 8],
],
dtype=np.float32,
),
)
| |
self._find_key.delete(key)
self._match_doc_id.delete(doc_id)
self._find_key_in_leaf.delete(containing_leaf_start, key)
return True
def delete(self, doc_id, key, start=0, size=0):
containing_leaf_start, element_index = self._find_key_to_update(
key, doc_id)[:2]
self._delete_element(containing_leaf_start, element_index)
self._find_key.delete(key)
self._match_doc_id.delete(doc_id)
self._find_key_in_leaf.delete(containing_leaf_start, key)
return True
def _find_key_many(self, key, limit=1, offset=0):
leaf_with_key = self._find_leaf_with_first_key_occurence(key)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
try:
leaf_with_key, key_index = self._find_index_of_first_key_equal(
key, leaf_with_key, nr_of_elements)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
except ElemNotFound:
leaf_with_key = next_leaf
key_index = 0
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
while offset:
if key_index < nr_of_elements:
curr_key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_with_key, key_index)
if key == curr_key:
if status != 'd':
offset -= 1
key_index += 1
else:
return
else:
key_index = 0
if next_leaf:
leaf_with_key = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
else:
return
while limit:
if key_index < nr_of_elements:
curr_key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_with_key, key_index)
if key == curr_key:
if status != 'd':
yield doc_id, start, size, status
limit -= 1
key_index += 1
else:
return
else:
key_index = 0
if next_leaf:
leaf_with_key = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
else:
return
def _find_key_smaller(self, key, limit=1, offset=0):
leaf_with_key = self._find_leaf_with_first_key_occurence(key)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
leaf_with_key, key_index = self._find_index_of_first_key_equal_or_smaller_key(key, leaf_with_key, nr_of_elements)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
curr_key = self._read_single_leaf_record(leaf_with_key, key_index)[0]
if curr_key >= key:
key_index -= 1
while offset:
if key_index >= 0:
key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_with_key, key_index)
if status != 'd':
offset -= 1
key_index -= 1
else:
if prev_leaf:
leaf_with_key = prev_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(prev_leaf)
key_index = nr_of_elements - 1
else:
return
while limit:
if key_index >= 0:
key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_with_key, key_index)
if status != 'd':
yield doc_id, key, start, size, status
limit -= 1
key_index -= 1
else:
if prev_leaf:
leaf_with_key = prev_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(prev_leaf)
key_index = nr_of_elements - 1
else:
return
def _find_key_equal_and_smaller(self, key, limit=1, offset=0):
leaf_with_key = self._find_leaf_with_last_key_occurence(key)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
try:
leaf_with_key, key_index = self._find_index_of_last_key_equal_or_smaller_key(key, leaf_with_key, nr_of_elements)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
except ElemNotFound:
leaf_with_key = prev_leaf
key_index = self._read_leaf_nr_of_elements_and_neighbours(
leaf_with_key)[0]
curr_key = self._read_single_leaf_record(leaf_with_key, key_index)[0]
if curr_key > key:
key_index -= 1
while offset:
if key_index >= 0:
key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_with_key, key_index)
if status != 'd':
offset -= 1
key_index -= 1
else:
if prev_leaf:
leaf_with_key = prev_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(prev_leaf)
key_index = nr_of_elements - 1
else:
return
while limit:
if key_index >= 0:
key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_with_key, key_index)
if status != 'd':
yield doc_id, key, start, size, status
limit -= 1
key_index -= 1
else:
if prev_leaf:
leaf_with_key = prev_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(prev_leaf)
key_index = nr_of_elements - 1
else:
return
def _find_key_bigger(self, key, limit=1, offset=0):
leaf_with_key = self._find_leaf_with_last_key_occurence(key)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
try:
leaf_with_key, key_index = self._find_index_of_last_key_equal_or_smaller_key(key, leaf_with_key, nr_of_elements)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
except ElemNotFound:
key_index = 0
curr_key = self._read_single_leaf_record(leaf_with_key, key_index)[0]
if curr_key <= key:
key_index += 1
while offset:
if key_index < nr_of_elements:
curr_key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_with_key, key_index)
if status != 'd':
offset -= 1
key_index += 1
else:
key_index = 0
if next_leaf:
leaf_with_key = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
else:
return
while limit:
if key_index < nr_of_elements:
curr_key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_with_key, key_index)
if status != 'd':
yield doc_id, curr_key, start, size, status
limit -= 1
key_index += 1
else:
key_index = 0
if next_leaf:
leaf_with_key = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
else:
return
def _find_key_equal_and_bigger(self, key, limit=1, offset=0):
leaf_with_key = self._find_leaf_with_first_key_occurence(key)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
leaf_with_key, key_index = self._find_index_of_first_key_equal_or_smaller_key(key, leaf_with_key, nr_of_elements)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
curr_key = self._read_single_leaf_record(leaf_with_key, key_index)[0]
if curr_key < key:
key_index += 1
while offset:
if key_index < nr_of_elements:
curr_key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_with_key, key_index)
if status != 'd':
offset -= 1
key_index += 1
else:
key_index = 0
if next_leaf:
leaf_with_key = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
else:
return
while limit:
if key_index < nr_of_elements:
curr_key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_with_key, key_index)
if status != 'd':
yield doc_id, curr_key, start, size, status
limit -= 1
key_index += 1
else:
key_index = 0
if next_leaf:
leaf_with_key = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
else:
return
def _find_key_between(self, start, end, limit, offset, inclusive_start, inclusive_end):
"""
Returns generator containing all keys withing given interval.
"""
if inclusive_start:
leaf_with_key = self._find_leaf_with_first_key_occurence(start)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
leaf_with_key, key_index = self._find_index_of_first_key_equal_or_smaller_key(start, leaf_with_key, nr_of_elements)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
curr_key = self._read_single_leaf_record(
leaf_with_key, key_index)[0]
if curr_key < start:
key_index += 1
else:
leaf_with_key = self._find_leaf_with_last_key_occurence(start)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
leaf_with_key, key_index = self._find_index_of_last_key_equal_or_smaller_key(start, leaf_with_key, nr_of_elements)
curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_with_key, key_index)
if curr_key <= start:
key_index += 1
while offset:
if key_index < nr_of_elements:
curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_with_key, key_index)
if curr_status != 'd':
offset -= 1
key_index += 1
else:
key_index = 0
if next_leaf:
leaf_with_key = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
else:
return
while limit:
if key_index < nr_of_elements:
curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_with_key, key_index)
if curr_key > end or (curr_key == end and not inclusive_end):
return
elif curr_status != 'd':
yield curr_doc_id, curr_key, curr_start, curr_size, curr_status
limit -= 1
key_index += 1
else:
key_index = 0
if next_leaf:
leaf_with_key = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
else:
return
def get(self, key):
return self._find_key(self.make_key(key))
def get_many(self, key, limit=1, offset=0):
return self._find_key_many(self.make_key(key), limit, offset)
def get_between(self, start, end, limit=1, offset=0, inclusive_start=True, inclusive_end=True):
if start is None:
end = self.make_key(end)
if inclusive_end:
return self._find_key_equal_and_smaller(end, limit, offset)
else:
return self._find_key_smaller(end, limit, offset)
elif end is None:
start = self.make_key(start)
if inclusive_start:
return self._find_key_equal_and_bigger(start, limit, offset)
else:
return self._find_key_bigger(start, limit, offset)
else:
start = self.make_key(start)
end = self.make_key(end)
return self._find_key_between(start, end, limit, offset, inclusive_start, inclusive_end)
def all(self, limit=-1, offset=0):
"""
Traverses linked list of all tree leaves and returns generator containing all elements stored in index.
"""
if self.root_flag == 'n':
leaf_start = self.data_start + self.node_size
else:
leaf_start = self.data_start
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_start)
key_index = 0
while offset:
if key_index < nr_of_elements:
curr_key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_start, key_index)
if status != 'd':
offset -= 1
key_index += 1
else:
key_index = 0
if next_leaf:
leaf_start = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
else:
return
while limit:
if key_index < nr_of_elements:
curr_key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_start, key_index)
if status != 'd':
yield doc_id, curr_key, start, size, status
limit -= 1
key_index += 1
else:
key_index = 0
if next_leaf:
leaf_start = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
else:
return
def make_key(self, key):
raise NotImplementedError()
def make_key_value(self, data):
raise NotImplementedError()
def _open_storage(self):
s = globals()[self.storage_class]
if not self.storage:
self.storage = s(self.db_path, self.name)
self.storage.open()
def _create_storage(self):
s = globals()[self.storage_class]
if not self.storage:
self.storage = s(self.db_path, self.name)
self.storage.create()
def compact(self, node_capacity=0):
if not node_capacity:
node_capacity = self.node_capacity
compact_ind = self.__class__(
self.db_path, self.name + '_compact', node_capacity=node_capacity)
compact_ind.create_index()
gen = self.all()
while True:
try:
doc_id, key, start, size, status = gen.next()
except StopIteration:
break
self.storage._f.seek(start)
value = self.storage._f.read(size)
start_ = compact_ind.storage._f.tell()
compact_ind.storage._f.write(value)
compact_ind.insert(doc_id, key, start_, size, status)
compact_ind.close_index()
original_name = self.name
# os.unlink(os.path.join(self.db_path, self.name + "_buck"))
self.close_index()
shutil.move(os.path.join(compact_ind.db_path, compact_ind.
name + "_buck"), os.path.join(self.db_path, self.name + "_buck"))
shutil.move(os.path.join(compact_ind.db_path, compact_ind.
name + "_stor"), os.path.join(self.db_path, self.name + "_stor"))
# self.name = original_name
self.open_index() # reload...
self.name = original_name
self._save_params(dict(name=original_name))
self._fix_params()
self._clear_cache()
return True
def _fix_params(self):
super(IU_TreeBasedIndex, self)._fix_params()
self._count_props()
def _clear_cache(self):
self._find_key.clear()
self._match_doc_id.clear()
# self._read_single_leaf_record.clear()
self._find_key_in_leaf.clear()
self._read_single_node_key.clear()
self._find_first_key_occurence_in_node.clear()
self._find_last_key_occurence_in_node.clear()
self._read_leaf_nr_of_elements.clear()
self._read_leaf_neighbours.clear()
self._read_leaf_nr_of_elements_and_neighbours.clear()
self._read_node_nr_of_elements_and_children_flag.clear()
def close_index(self):
super(IU_TreeBasedIndex, self).close_index()
self._clear_cache()
class IU_MultiTreeBasedIndex(IU_TreeBasedIndex):
"""
Class that allows to index more than one key per database record.
It operates very well on GET/INSERT. It's not optimized for
UPDATE operations (will always readd everything)
"""
def __init__(self, *args, **kwargs):
super(IU_MultiTreeBasedIndex, self).__init__(*args, **kwargs)
def insert(self, doc_id, key, start, size, status='o'):
if isinstance(key, (list, tuple)):
key = set(key)
elif not isinstance(key, set):
key = set([key])
ins = super(IU_MultiTreeBasedIndex, self).insert
for curr_key in key:
ins(doc_id, curr_key, | |
<reponame>XilinJia/Negociant
'''
Project: Negociant
Copyright (c) 2017 <NAME> <https://github.com/XilinJia>
This software is released under the MIT license
https://opensource.org/licenses/MIT
'''
# encoding: utf-8
'''
本文件包含了CTA引擎中的策略开发用模板,开发策略时需要继承CtaTemplate类。
'''
from datetime import datetime, timedelta, time
import numpy as np
import math
from negociant.trader.vtObject import VtBarData
from negociant.trader.vtFunction import getTempPath
from negociant.trader.vtConstant import *
from negociant.trader.lkTrades import TradeRecord
from negociant.trader.markets.lkMarketHours import MarketsOpHours
from negociant.trader.lkBarsEngine import BarGenerator
from .lkTechnicals import Technicals
from .ctaArrayManager import ArrayManager
from .ctaBase import *
########################################################################
class LKCtaTemplate(object):
"""CTA策略模板"""
# 策略类的名称和作者
className = 'LKCtaTemplate'
author = 'XJia'
# MongoDB数据库的名称,K线数据库默认为1分钟
tickDbName = TICK_DB_NAME
barDbName = MINUTE_DB_NAME
# 策略的基本参数
name = EMPTY_UNICODE # 策略实例名称
vtSymbol = EMPTY_STRING # 交易的合约vt系统代码
hotSymbol = EMPTY_STRING
productClass = EMPTY_STRING # 产品类型(只有IB接口需要)
currency = EMPTY_STRING # 货币(只有IB接口需要)
# 策略的基本变量,由引擎管理
inited = False # 是否进行了初始化
trading = False # 是否启动交易,由引擎管理
pos = 0 # 持仓情况
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author',
'useCapital',
'vtSymbol',
'hotSymbol',
'useCapital',
'contractSize',
'margin']
# 变量列表,保存了变量的名称
varList = ['inited',
'trading',
'pos']
# 同步列表,保存了需要保存到数据库的变量名称
syncList = ['pos']
#----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
self.ctaEngine = ctaEngine
# print(ctaEngine, " ", setting)
# self.barGen = BarGenerator(self.onBar)
self.barGen = BarGenerator()
# load market hours to be used as time filters
self.marketHours = MarketsOpHours()
self.useCapital = 100000.
self.margin = 1.
self.contractSize = 1
# 设置策略的参数
if setting:
d = self.__dict__
for key in self.paramList:
if key in setting:
d[key] = setting[key]
print("read CTA_Setting: ", key, setting[key])
if "useCapital" in d :
self.useCapital = float(d["useCapital"])
if "margin" in d :
self.margin = float(d["margin"])
if "contractSize" in d :
self.contractSize = int(d["contractSize"])
if 'hotSymbol' in d :
self.hotSymbol = d['hotSymbol']
self.effCapital = self.useCapital / self.contractSize / self.margin
self.tradeRec = TradeRecord(self.vtSymbol)
logFilePath = getTempPath(self.name + '-' + self.vtSymbol + '-' + str(datetime.today().date()) + '.log')
self.logFile = open(logFilePath, 'a')
# print(self.name, ' Capital: ', self.useCapital, self.vtSymbol, self.hotSymbol, ' contractSize: ',
# self.contractSize, ' margin: ', self.margin, ' effCapital: ', int(self.effCapital))
self.logFile.write(self.name + ' Capital: ' + str(self.useCapital) + ' ' + self.vtSymbol + ' ' + self.hotSymbol +
' contractSize: ' + str(self.contractSize) + ' margin: ' + str(self.margin) + ' effCapital: ' +
str(int(self.effCapital)) + '\n')
#----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
raise NotImplementedError
#----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
raise NotImplementedError
#----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
raise NotImplementedError
def getLots(self, price):
return int(self.effCapital / price)
def getLotsPermission(self, price, newLots, dumpLots) :
"""
this function is only for single strategy, not appropriate for portfolio.
class LKSCC implemented a finer one
"""
affordLots = int(self.effCapital / price)
availLots = affordLots - abs(self.pos) + dumpLots
if self.pos == 0 or newLots != math.copysign(newLots, self.pos) :
return min(abs(newLots), affordLots)
# print(self.vtSymbol, "affordLots: ", affordLots, " curLots: ", abs(self.pos), " dumpLots: ", dumpLots, " availLots: ", availLots)
return min(abs(newLots), availLots)
#----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
raise NotImplementedError
#----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
raise NotImplementedError
#----------------------------------------------------------------------
def onTrade(self, trade):
"""收到成交推送(必须由用户继承实现)"""
raise NotImplementedError
#----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送(必须由用户继承实现)"""
raise NotImplementedError
#----------------------------------------------------------------------
def onStopOrder(self, so):
"""收到停止单推送(必须由用户继承实现)"""
raise NotImplementedError
def goLong(self, price) :
lots = self.getLots(price)
self.logFile.write("Going long " + self.vtSymbol + " lots:" + str(lots) + " " + str(self.pos) + " at price: " +
str(price) + '\n')
if self.pos < 0:
self.cover(price, abs(self.pos))
if lots>0 :
self.buy(price, lots)
def goShort(self, price) :
lots = self.getLots(price)
self.logFile.write("Going short " + self.vtSymbol + " lots:" + str(lots) + " " + str(self.pos) + " at price: " +
str(price) + '\n')
if self.pos > 0:
self.sell(price, self.pos)
if lots>0 :
self.short(price, lots)
#----------------------------------------------------------------------
def buy(self, price, volume, stop=False):
"""买开"""
return self.sendOrder(CTAORDER_BUY, price, volume, stop)
#----------------------------------------------------------------------
def sell(self, price, volume, stop=False):
"""卖平"""
return self.sendOrder(CTAORDER_SELL, price, volume, stop)
#----------------------------------------------------------------------
def short(self, price, volume, stop=False):
"""卖开"""
return self.sendOrder(CTAORDER_SHORT, price, volume, stop)
#----------------------------------------------------------------------
def cover(self, price, volume, stop=False):
"""买平"""
return self.sendOrder(CTAORDER_COVER, price, volume, stop)
#----------------------------------------------------------------------
def sendOrder(self, orderType, price, volume, stop=False):
"""发送委托"""
if self.trading and self.marketHours.isMarketOpen(self.vtSymbol) :
# 如果stop为True,则意味着发本地停止单
if stop:
vtOrderIDList = self.ctaEngine.sendStopOrder(self.vtSymbol, orderType, price, volume, self)
else:
vtOrderIDList = self.ctaEngine.sendOrder(self.vtSymbol, orderType, price, volume, self)
return vtOrderIDList
else:
# 交易停止时发单返回空字符串
return []
#----------------------------------------------------------------------
def cancelOrder(self, vtOrderID):
"""撤单"""
# 如果发单号为空字符串,则不进行后续操作
if not vtOrderID:
return
if STOPORDERPREFIX in vtOrderID:
self.ctaEngine.cancelStopOrder(vtOrderID)
else:
self.ctaEngine.cancelOrder(vtOrderID)
#----------------------------------------------------------------------
def cancelAll(self):
"""全部撤单"""
self.ctaEngine.cancelAll(self.name)
#----------------------------------------------------------------------
def insertTick(self, tick):
"""向数据库中插入tick数据"""
self.ctaEngine.insertData(self.tickDbName, self.vtSymbol, tick)
#----------------------------------------------------------------------
def insertBar(self, bar):
"""向数据库中插入bar数据"""
self.ctaEngine.insertData(self.barDbName, self.vtSymbol, bar)
#----------------------------------------------------------------------
def loadTick(self, days):
"""读取tick数据"""
return self.ctaEngine.loadTick(self.tickDbName, self.vtSymbol, days)
#----------------------------------------------------------------------
def loadBar(self, days):
"""读取bar数据"""
return self.ctaEngine.loadBar(self.barDbName, self.hotSymbol, days)
#----------------------------------------------------------------------
def writeCtaLog(self, content):
"""记录CTA日志"""
content = self.name + ':' + content
self.ctaEngine.writeCtaLog(content)
#----------------------------------------------------------------------
def putEvent(self):
"""发出策略状态变化事件"""
self.ctaEngine.putStrategyEvent(self.name)
#----------------------------------------------------------------------
def getEngineType(self):
"""查询当前运行的环境"""
return self.ctaEngine.engineType
#----------------------------------------------------------------------
def saveSyncData(self):
"""保存同步数据到数据库"""
if self.trading:
self.ctaEngine.saveSyncData(self)
#----------------------------------------------------------------------
def getPriceTick(self):
"""查询最小价格变动"""
return self.ctaEngine.getPriceTick(self)
########################################################################
class LKTargetPos(LKCtaTemplate):
"""
允许直接通过修改目标持仓来实现交易的策略模板
开发策略时,无需再调用buy/sell/cover/short这些具体的委托指令,
只需在策略逻辑运行完成后调用setTargetPos设置目标持仓,底层算法
会自动完成相关交易,适合不擅长管理交易挂撤单细节的用户。
使用该模板开发策略时,请在以下回调方法中先调用母类的方法:
onTick
onBar
onOrder
假设策略名为TestStrategy,请在onTick回调中加上:
super(TestStrategy, self).onTick(tick)
其他方法类同。
"""
className = 'LKTargetPos'
author = 'XJia'
# 目标持仓模板的基本变量
tickAdd = 1 # 委托时相对基准价格的超价
lastTick = None # 最新tick数据
lastBar = None # 最新bar数据
targetPos = EMPTY_INT # 目标持仓
orderList = [] # 委托号列表
# 变量列表,保存了变量的名称
varList = LKCtaTemplate.varList + ['targetPos']
#----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
super(LKTargetPos, self).__init__(ctaEngine, setting)
#----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情推送"""
self.lastTick = tick
# 实盘模式下,启动交易后,需要根据tick的实时推送执行自动开平仓操作
# if self.trading:
# self.trade()
#----------------------------------------------------------------------
def onBar(self, bar):
"""收到K线推送"""
self.lastBar = bar
#----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托推送"""
if order.status == STATUS_ALLTRADED or order.status == STATUS_CANCELLED:
if order.vtOrderID in self.orderList:
self.orderList.remove(order.vtOrderID)
#----------------------------------------------------------------------
def setTargetPos(self, targetPos):
"""设置目标仓位"""
self.targetPos = targetPos
self.trade()
#----------------------------------------------------------------------
def trade(self):
"""执行交易"""
# 先撤销之前的委托
self.cancelAll()
# 如果目标仓位和实际仓位一致,则不进行任何操作
posChange = self.targetPos - self.pos
# print("posChange: ", self.vtSymbol, posChange, self.pos, self.targetPos)
if not posChange:
return
# 确定委托基准价格,有tick数据时优先使用,否则使用bar
longPrice = 0
shortPrice = 0
if self.lastTick:
if posChange > 0:
longPrice = self.lastTick.askPrice1 + self.tickAdd
if self.lastTick.upperLimit:
longPrice = min(longPrice, self.lastTick.upperLimit) # 涨停价检查
else:
shortPrice = self.lastTick.bidPrice1 - self.tickAdd
if self.lastTick.lowerLimit:
shortPrice = max(shortPrice, self.lastTick.lowerLimit) # 跌停价检查
else:
if posChange > 0:
longPrice = self.lastBar.close + self.tickAdd
else:
shortPrice = self.lastBar.close - self.tickAdd
# 回测模式下,采用合并平仓和反向开仓委托的方式
if self.getEngineType() == ENGINETYPE_BACKTESTING:
if posChange > 0:
l = self.buy(longPrice, abs(posChange))
else:
l = self.short(shortPrice, abs(posChange))
self.orderList.extend(l)
# 实盘模式下,首先确保之前的委托都已经结束(全成、撤销)
# 然后先发平仓委托,等待成交后,再发送新的开仓委托
else:
# 检查之前委托都已结束
if self.orderList:
return
# 买入
if posChange > 0:
# 若当前有空头持仓
if self.pos < 0:
# 若买入量小于空头持仓,则直接平空买入量
if posChange < abs(self.pos):
l = self.cover(longPrice, posChange)
# 否则先平所有的空头仓位
else:
l = self.cover(longPrice, abs(self.pos))
# 若没有空头持仓,则执行开仓操作
else:
l = self.buy(longPrice, abs(posChange))
# 卖出和以上相反
else:
if self.pos > 0:
if abs(posChange) < self.pos:
l = self.sell(shortPrice, abs(posChange))
else:
l = self.sell(shortPrice, abs(self.pos))
else:
l = self.short(shortPrice, abs(posChange))
self.orderList.extend(l)
class LKSCC(LKTargetPos):
className = 'LKSCC'
author = 'XJia'
# 策略参数
initDays = 200 # 初始化数据所用的天数
#----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
super(LKSCC, self).__init__(ctaEngine, setting)
self.doneInit = False
techLogPath = getTempPath(self.name + '-' + self.vtSymbol + '-' + str(datetime.today().date()) + '.csv')
self.techLog = open(techLogPath, 'a')
self.barGen.requireMinBar(self.onBar)
self.barGen.requireDayBar(self.onDayBar)
self.tech = Technicals(size=40)
self.signals = []
# subclass needs to populate self.signals
def setSignalCapitals(self, boostR=1.) :
capitalAlloc = self.effCapital / len(self.signals) * boostR
for i in range(len(self.signals)) :
self.signals[i].setCapital(capitalAlloc)
def getLotsPermission(self, price, newLots, dumpLots) :
affordLots = int(self.effCapital / price)
availLots = affordLots - abs(self.pos) + dumpLots
if self.pos == 0 or newLots != math.copysign(newLots, self.pos) :
return min(abs(newLots), affordLots)
# print(self.vtSymbol, "affordLots: ", affordLots, " curLots: ", abs(self.pos), " dumpLots: ", dumpLots, " availLots: ", availLots)
if availLots < abs(newLots) :
print("availLots not enough for newLots: ", newLots, affordLots, self.pos, dumpLots, availLots, " reallocating")
effStrats = 0
for i in range(len(self.signals)) :
lotsi = self.signals[i].getSignalPos()
if lotsi != 0 and lotsi == math.copysign(lotsi, self.pos) :
effStrats += 1
lotsRealloc = int(affordLots / (effStrats+1))
for i in range(len(self.signals)) :
lotsi = self.signals[i].getSignalPos()
if lotsi != 0 :
self.signals[i].setSignalPos(math.copysign(lotsRealloc, lotsi), price)
# print("Reset signal position: ", self.vtSymbol, self.signals[i].name,
# str(datetime.now().replace(second=0, microsecond=0)), lotsi, self.signals[i].getSignalPos())
self.logFile.write("Reset signal position: " + self.vtSymbol + ' ' + self.signals[i].name + ' ' | |
if_none_match, 'str')
if if_tags is not None:
header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
acquire_lease.metadata = {'url': '/{containerName}/{blob}'}
async def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
"""[Update] The Lease Blob operation establishes and manages a lock on a
blob for write and delete operations.
:param lease_id: Specifies the current lease ID on the resource.
:type lease_id: str
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>
:type timeout: int
:param request_id: Provides a client-generated, opaque value with a 1
KB character limit that is recorded in the analytics logs when storage
analytics logging is enabled.
:type request_id: str
:param modified_access_conditions: Additional parameters for the
operation
:type modified_access_conditions:
~azure.storage.blob.models.ModifiedAccessConditions
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
if_modified_since = None
if modified_access_conditions is not None:
if_modified_since = modified_access_conditions.if_modified_since
if_unmodified_since = None
if modified_access_conditions is not None:
if_unmodified_since = modified_access_conditions.if_unmodified_since
if_match = None
if modified_access_conditions is not None:
if_match = modified_access_conditions.if_match
if_none_match = None
if modified_access_conditions is not None:
if_none_match = modified_access_conditions.if_none_match
if_tags = None
if modified_access_conditions is not None:
if_tags = modified_access_conditions.if_tags
comp = "lease"
action = "release"
# Construct URL
url = self.release_lease.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
# Construct headers
header_parameters = {}
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
if if_tags is not None:
header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
release_lease.metadata = {'url': '/{containerName}/{blob}'}
async def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
"""[Update] The Lease Blob operation establishes and manages a lock on a
blob for write and delete operations.
:param lease_id: Specifies the current lease ID on the resource.
:type lease_id: str
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>
:type timeout: int
:param request_id: Provides a client-generated, opaque value with a 1
KB character limit that is recorded in the analytics logs when storage
analytics logging is enabled.
:type request_id: str
:param modified_access_conditions: Additional parameters for the
operation
:type modified_access_conditions:
~azure.storage.blob.models.ModifiedAccessConditions
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
if_modified_since = None
if modified_access_conditions is not None:
if_modified_since = modified_access_conditions.if_modified_since
if_unmodified_since = None
if modified_access_conditions is not None:
if_unmodified_since = modified_access_conditions.if_unmodified_since
if_match = None
if modified_access_conditions is not None:
if_match = modified_access_conditions.if_match
if_none_match = None
if modified_access_conditions is not None:
if_none_match = modified_access_conditions.if_none_match
if_tags = None
if modified_access_conditions is not None:
if_tags = modified_access_conditions.if_tags
comp = "lease"
action = "renew"
# Construct URL
url = self.renew_lease.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
# Construct headers
header_parameters = {}
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
if if_tags is not None:
header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
renew_lease.metadata = {'url': '/{containerName}/{blob}'}
async def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
"""[Update] The Lease Blob operation establishes and manages a lock on a
blob for write and delete operations.
:param lease_id: Specifies the current lease ID on the resource.
:type lease_id: str
:param proposed_lease_id: Proposed lease ID, in a GUID string format.
The Blob service returns 400 (Invalid request) if the proposed lease
ID is not in the correct format. See Guid Constructor (String) for a
list of valid GUID string formats.
:type proposed_lease_id: str
:param timeout: The timeout parameter is expressed in seconds. For
more information, see <a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>
:type timeout: int
:param request_id: Provides a client-generated, opaque value with a 1
KB character limit that is recorded in the analytics logs when storage
analytics logging is enabled.
:type request_id: str
:param modified_access_conditions: Additional parameters for the
operation
:type modified_access_conditions:
~azure.storage.blob.models.ModifiedAccessConditions
:param callable cls: A custom type or function that will be passed the
direct response
:return: None or the result of cls(response)
:rtype: None
:raises:
:class:`StorageErrorException<azure.storage.blob.models.StorageErrorException>`
"""
error_map = kwargs.pop('error_map', None)
if_modified_since = None
if modified_access_conditions is not None:
if_modified_since = modified_access_conditions.if_modified_since
if_unmodified_since = None
if modified_access_conditions is not None:
if_unmodified_since = modified_access_conditions.if_unmodified_since
if_match = None
if modified_access_conditions is not None:
if_match = modified_access_conditions.if_match
if_none_match = None
if modified_access_conditions is not None:
if_none_match = modified_access_conditions.if_none_match
if_tags = None
if modified_access_conditions is not None:
if_tags = modified_access_conditions.if_tags
comp = "lease"
action = "change"
# Construct URL
url = self.change_lease.metadata['url']
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
# Construct headers
header_parameters = {}
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
if if_tags is not None:
header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.StorageErrorException(response, self._deserialize)
if cls:
response_headers = {
'ETag': self._deserialize('str', response.headers.get('ETag')),
'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
}
return cls(response, None, response_headers)
change_lease.metadata = {'url': | |
date when you are installing new packages.
# ### Managing `conda` environments
#
# #### What is a conda environment and why is it so useful?
#
# Using `conda`, you can create an isolated R or Python virtual environment for your project.
# The default environment is the `base` environment,
# which contains only the essential packages from Miniconda
# (and anything else you have installed in it since installing Miniconda).
# You can see that your shell's prompt string is prefaced with `(base)`
# when you are inside this environment:
#
# ```{bash}
# (base) Helps-MacBook-Pro:~ tiffany$
# ```
#
# In the computer setup guide,
# we asked you to follow instructions so that this environment
# will be activatd by default every time you open your terminal.
#
# To create another environment on your computer,
# that is isolated from the `(base)` environment
# you can either do this through:
#
# 1. Manual specifications of packages.
# 2. An environment file in YAML format (`environment.yml`).
#
# We will now discuss both, as they are both relevant workflows for data science.
# When do you use one versus the other?
# I typically use the manual specifications of packages when I am creating
# a new data science project.
# From that I generate an environment file in YAML format
# that I can share with collaborators (or anyone else who wants to reproduce my work).
# Thus, I use an environment file in YAML format when I join a project as a collaborator
# and I need to use the same environment that has been previously used for that project,
# or when I want to reproduce someone else's work.
# ### Creating environment by manually specifying packages
#
# We can create `test_env` conda environment by typing `conda -n <name-of-env>`.
# However,
# it is often useful to specify more than just the name of the environment,
# e.g. the channel from which to install packages, the Python version,
# and a list of packages to install into the new env.
# In the example below,
# I am creating the `test_env` environment
# that uses python 3.7 and a list of libraries: `jupyterlab` and `pandas`.
#
# ```
# conda create -n test_env -c conda-forge python=3.7 jupyterlab pandas=1.0.2
# ```
#
# conda will solve any dependencies between the packages like before
# and create a new environment with those packages.
# Usually,
# we don't need to specify the channel,
# but in this case I want to get the very latest version of these packages,
# and they are made available in `conda-forge`
# before they reach the default conda channel.
#
# To activate this new environment,
# you can type `conda activate test_env`
# (and `conda deactivate` for deactivating).
# Since you will do this often,
# we created an alias shortcut `ca`
# that you can use to activate environments.
# To know the current environment that you're in you can look at the prefix
# of the prompt string in your shell which now changed to (`test_env`).
# And to see all your environments,
# you can type `conda env list`.
# ### Seeing what packages are available in an environment
#
# We will now check packages that are available to us.
# The command below will list all the packages in an environment, in this case `test_env`.
# The list will include versions of each package, the specific build,
# and the channel that the package was downloaded from.
# `conda list` is also useful to ensure that you have installed the packages that you desire.
#
# ```
# conda list
# ```
#
# ```
# # packages in environment at //miniconda/envs/test_env:
# #
# Using Anaconda Cloud api site https://api.anaconda.org
# blas 1.1 openblas conda-forge
# ca-certificates 2016.9.26 0 conda-forge
# certifi 2016.9.26 py27_0 conda-forge
# cycler 0.10.0 py27_0 conda-forge
# freetype 2.6.3 1 conda-forge
# functools32 3.2.3.2 py27_1 conda-forge
# libgfortran 3.0.0 0 conda-forge
# ```
# ### Installing conda package
#
# Under the name column of the result in the terminal or the package column in the Anaconda Cloud listing,
# shows the necessary information to install the package.
# e.g. conda-forge/rasterio.
# The first word list the channel that this package is from and the second part shows the name of the package.
#
# To install the latest version available within the channel, do not specify in the install command. We will install version 0.35 of `rasterio` from conda-forge into `test_env` in this example. Conda will also automatically install the dependencies for this package.
#
# ```
# conda install -c conda-forge rasterio=0.35
# ```
#
# If you have a few trusted channels that you prefer to use, you can pre-configure these so that everytime you are creating an environment, you won't need to explicitly declare the channel.
#
# ```
# conda config --add channels conda-forge
# ```
# ### Removing a conda package
#
# We decided that rasterio is not needed in this tutorial, so we will remove it from `test_env`.
# Note that this will remove the main package rasterio and its dependencies (unless a dependency was installed explicitly at an earlier point in time or is required be another package).
#
# ```
# conda remove -n test_env rasterio
# ```
#
# ```
# Using Anaconda Cloud api site https://api.anaconda.org
# Fetching package metadata .........
# Solving package specifications: ..........
#
# Package plan for package removal in environment //anaconda/envs/test_env:
#
# The following packages will be REMOVED:
#
# rasterio: 0.35.1-np111py27_1 conda-forge
#
# Proceed ([y]/n)? y
#
# Unlinking packages ...
# [ COMPLETE ]|#######################################################################################################| 100%
# ```
# ### Sharing Environments with others
#
# To share an environment, you can export your conda environment to an environment file,
# which will list each package and its version
# in the format `package=version=build`.
#
# Exporting your environment to a file called `environment.yaml`
# (it could be called anything,
# but this is the conventional name
# and using it makes it easy for others
# to recognize that this is a conda env file,
# the extension can be either `.yaml` or `.yml`):
#
# ```
# conda env export --from-history -f environment.yml
# ```
#
# Remember that `.yaml` files are plain text,
# so you can use a text editor such as VS Code to open them.
# If you do,
# you will realize that this environment file has A LOT more packages
# than `jupyterlab` and `pandas`.
# This is because the default behavior is to also list the dependencies
# that were installed together with these packages,
# e.g. `numpy`.
# This is good in the sense that it gives an exact copy of *everything*
# in your environment.
#
# We use the `--from-history` flag/option above as
# some dependencies might differ between operating systems,
# so this file *might* not work with someone from a different OS.
# The `--from-history` flag,
# looks at the history of the packages you explicitly told `conda` to install
# and only list those in the export.
# The required dependencies will then be handled in an OS-specific manner during the installation,
# which guarantees that they will work across OSes.
# This `environment.yaml` file would be much shorter and look something like this:
#
# ```yaml
# name: test_env
# channels:
# - conda-forge
# - defaults
# dependencies:
# - conda
# - python=3.7
# - pandas==1.0.2
# - jupyterlab
# ```
#
# Importantly,
# this will not include the package version
# unless you included it when you installed
# with the `package==version` syntax.
# For an environment to be reproducible,
# you **NEED** to add the version string manually.
# ### Creating environment from an environment file
#
# Now, let's install `environment.yml` environment file above so that we can create a conda environment called `test_env`.
#
# ```
# $ conda env create --file environment.yml
# ```
#
# #### Exercise
#
# Create an environment on your laptop with an older version of Python!
#
# 1. Clone [this GitHub repository](https://github.com/ttimbers/conda_env_practice/blob/main/README.md).
#
# 2. Try to run some antiquated (Python 3.0.0 and higher compatible) Python code, such as `python -c "print 'Back from the Future'"`. This should fail.
#
# 3. In the terminal, navigate to the root of the repository and run: `conda env create --file environment.yml`
#
# 4. Activate the environment by typing `conda activate oldie_but_a_goodie`
#
# 5. Try | |
by"))
notes = models.TextField(_("Notes"), max_length=765, blank=True, null=True)
create_date = models.DateTimeField(null=True, blank=True)
edit_date = models.DateTimeField(null=True, blank=True)
objects = StakeholderManager()
class Meta:
ordering = ('country','name','type')
verbose_name_plural = _("Stakeholders")
# on save add create date or update edit date
def save(self, *args, **kwargs):
if self.create_date == None:
self.create_date = timezone.now()
self.edit_date = timezone.now()
super(Stakeholder, self).save()
# displayed in admin templates
def __unicode__(self):
return unicode(self.name)
class StakeholderAdmin(admin.ModelAdmin):
list_display = ('name', 'type', 'country', 'create_date')
display = 'Stakeholders'
list_filter = ('country','type','sector')
class ProjectAgreementManager(models.Manager):
def get_approved(self):
return self.filter(approval="approved")
def get_open(self):
return self.filter(approval="")
def get_inprogress(self):
return self.filter(approval="in progress")
def get_awaiting_approval(self):
return self.filter(approval="awaiting approval")
def get_rejected(self):
return self.filter(approval="rejected")
def get_new(self):
return self.filter(Q(approval=None) | Q(approval=""))
def get_queryset(self):
return super(ProjectAgreementManager, self).get_queryset().select_related('office','approved_by','approval_submitted_by')
# Project Initiation, admin is handled in the admin.py
# TODO: Clean up unused fields and rename model with manual migration file
"""
https://docs.djangoproject.com/en/dev/ref/migration-operations/#renamemodel
class Migration(migrations.Migration):
dependencies = [
('workflow', '0001_initial'),
]
operations = [
operations.RenameModel("ProjectAgreement", "WorkflowLevelOne")
]
"""
class ProjectAgreement(models.Model):
agreement_key = models.UUIDField(default=uuid.uuid4, unique=True),
short = models.BooleanField(default=True,verbose_name=_("Short Form (recommended)"))
program = models.ForeignKey(Program, verbose_name=_("Program"), related_name="agreement")
date_of_request = models.DateTimeField(_("Date of Request"), blank=True, null=True)
# Rename to more generic "nonproject" names
project_name = models.CharField(
_("Project Name"),
help_text=_('Please be specific in your name. Consider that your Project Name includes WHO, WHAT, WHERE, HOW'),
max_length=255)
project_type = models.ForeignKey(
ProjectType, verbose_name=_("Project Type"), help_text='', max_length=255,
blank=True, null=True, on_delete=models.SET_NULL)
project_activity = models.CharField(
_("Project Activity"), help_text=_('This should come directly from the activities listed in the Logframe'),
max_length=255, blank=True, null=True)
project_description = models.TextField(_("Project Description"), help_text='', blank=True, null=True)
site = models.ManyToManyField(SiteProfile, blank=True)
has_rej_letter = models.BooleanField(
_("If Rejected: Rejection Letter Sent?"), help_text=_('If yes attach copy'), default=False)
activity_code = models.CharField(_("Project Code"), help_text='', max_length=255, blank=True, null=True)
office = models.ForeignKey(Office, verbose_name=_("Office"), null=True, blank=True, on_delete=models.SET_NULL)
cod_num = models.CharField(_("Project COD #"), max_length=255, blank=True, null=True)
sector = models.ForeignKey("Sector", verbose_name=_("Sector"), blank=True, null=True, on_delete=models.SET_NULL)
project_design = models.CharField(_("Activity design for"), max_length=255, blank=True, null=True)
account_code = models.CharField(_("Fund Code"), help_text='', max_length=255, blank=True, null=True)
lin_code = models.CharField(_("LIN Code"), help_text='', max_length=255, blank=True, null=True)
staff_responsible = models.CharField(_("Staff Responsible"), max_length=255, blank=True, null=True)
partners = models.BooleanField(_("Are there partners involved?"), default=0)
name_of_partners = models.CharField(_("Name of Partners"), max_length=255, blank=True, null=True)
stakeholder = models.ManyToManyField(Stakeholder,verbose_name=_("Stakeholders"), blank=True)
effect_or_impact = models.TextField(_("What is the anticipated Outcome or Goal?"), blank=True, null=True)
expected_start_date = models.DateTimeField(_("Expected starting date"), blank=True, null=True)
expected_end_date = models.DateTimeField(_("Expected ending date"),blank=True, null=True)
expected_duration = models.CharField(
_("Expected duration"), help_text=_("[MONTHS]/[DAYS]"), blank=True, null=True, max_length=255)
beneficiary_type = models.CharField(
_("Type of direct beneficiaries"), help_text=_("i.e. Farmer, Association, Student, Govt, etc."),
max_length=255, blank=True, null=True)
estimated_num_direct_beneficiaries = models.CharField(
_("Estimated number of direct beneficiaries"),
help_text=_("Please provide achievable estimates as we will use these as our 'Targets'"),
max_length=255, blank=True, null=True)
average_household_size = models.CharField(
_("Average Household Size"), help_text=_("Refer to Form 01 - Community Profile"),
max_length=255, blank=True, null=True)
estimated_num_indirect_beneficiaries = models.CharField(
_("Estimated Number of indirect beneficiaries"),
help_text=_("This is a calculation - multiply direct beneficiaries by average household size"),
max_length=255, blank=True, null=True)
total_estimated_budget = models.DecimalField(
_("Total Project Budget"), decimal_places=2, max_digits=12,
help_text=_("In USD"), default=Decimal("0.00"),blank=True)
mc_estimated_budget = models.DecimalField(
_("Organizations portion of Project Budget"), decimal_places=2, max_digits=12,
help_text=_("In USD"), default=Decimal("0.00"),blank=True)
local_total_estimated_budget = models.DecimalField(
_("Estimated Total in Local Currency"), decimal_places=2, max_digits=12,
help_text=_("In Local Currency"), default=Decimal("0.00"),blank=True)
local_mc_estimated_budget = models.DecimalField(
_("Estimated Organization Total in Local Currency"), decimal_places=2,max_digits=12,
help_text=_("Total portion of estimate for your agency"), default=Decimal("0.00"), blank=True)
exchange_rate = models.CharField(
help_text=_("Local Currency exchange rate to USD"), max_length=255, blank=True, null=True)
exchange_rate_date = models.DateField(help_text=_("Date of exchange rate"), blank=True, null=True)
"""
Start Clean Up - These can be removed
"""
community_rep = models.CharField(_("Community Representative"), max_length=255, blank=True, null=True)
community_rep_contact = models.CharField(
_("Community Representative Contact"), help_text='Can have mulitple contact numbers',
max_length=255, blank=True, null=True)
community_mobilizer = models.CharField(_("Community Mobilizer"), max_length=255, blank=True, null=True)
community_mobilizer_contact = models.CharField(
_("Community Mobilizer Contact Number"), max_length=255, blank=True, null=True)
community_proposal = models.FileField(_("Community Proposal"), upload_to='uploads', blank=True, null=True)
estimate_male_trained = models.IntegerField(_("Estimated # of Male Trained"),blank=True,null=True)
estimate_female_trained = models.IntegerField(_("Estimated # of Female Trained"),blank=True,null=True)
estimate_total_trained = models.IntegerField(_("Estimated Total # Trained"),blank=True,null=True)
estimate_trainings = models.IntegerField(_("Estimated # of Trainings Conducted"),blank=True,null=True)
distribution_type = models.CharField(_("Type of Items Distributed"),max_length=255,null=True,blank=True)
distribution_uom = models.CharField(_("Unit of Measure"),max_length=255,null=True,blank=True)
distribution_estimate = models.CharField(_("Estimated # of Items Distributed"),max_length=255,null=True,blank=True)
cfw_estimate_male = models.IntegerField(_("Estimated # of Male Laborers"),blank=True,null=True)
cfw_estimate_female = models.IntegerField(_("Estimated # of Female Laborers"),blank=True,null=True)
cfw_estimate_total = models.IntegerField(_("Estimated Total # of Laborers"),blank=True,null=True)
cfw_estimate_project_days = models.IntegerField(_("Estimated # of Project Days"),blank=True,null=True)
cfw_estimate_person_days = models.IntegerField(_("Estimated # of Person Days"),blank=True,null=True)
cfw_estimate_cost_materials = models.CharField(_("Estimated Total Cost of Materials"),max_length=255,blank=True,null=True)
cfw_estimate_wages_budgeted= models.CharField(_("Estimated Wages Budgeted"),max_length=255,blank=True,null=True)
"""
End Clean Up
"""
estimation_date = models.DateTimeField(_("Estimation date"), blank=True, null=True)
estimated_by = models.ForeignKey(
TolaUser, blank=True, null=True, on_delete=models.SET_NULL,
verbose_name="Originated By", related_name="estimating")
estimated_by_date = models.DateTimeField("Date Originated", null=True, blank=True)
checked_by = models.ForeignKey(
TolaUser, blank=True, null=True, on_delete=models.SET_NULL,
related_name="checking",verbose_name=_("Checked by"))
checked_by_date = models.DateTimeField("Date Checked", null=True, blank=True)
reviewed_by = models.ForeignKey(
TolaUser, verbose_name="Request review", blank=True, null=True, on_delete=models.SET_NULL,
related_name="reviewing" )
reviewed_by_date = models.DateTimeField("Date Verified", null=True, blank=True)
finance_reviewed_by = models.ForeignKey(
TolaUser, blank=True, null=True, on_delete=models.SET_NULL,
related_name="finance_reviewing", verbose_name=_("Finance reviewed by"))
finance_reviewed_by_date = models.DateTimeField(_("Date Reviewed by Finance"), null=True, blank=True)
me_reviewed_by = models.ForeignKey(
TolaUser, blank=True, null=True, on_delete=models.SET_NULL,
verbose_name=_("M&E Reviewed by"), related_name="reviewing_me")
me_reviewed_by_date = models.DateTimeField(_("Date Reviewed by M&E"), null=True, blank=True)
capacity = models.ManyToManyField(Capacity,verbose_name=_("Sustainability Plan"), blank=True)
evaluate = models.ManyToManyField(Evaluate, blank=True, verbose_name=_("Evaluate"))
approval = models.CharField(_("Approval Status"), default="in progress", max_length=255, blank=True, null=True)
approved_by = models.ForeignKey(
TolaUser, blank=True, null=True, on_delete=models.SET_NULL,
related_name="approving_agreement", verbose_name="Request approval")
approved_by_date = models.DateTimeField(_("Date Approved"), null=True, blank=True)
approval_submitted_by = models.ForeignKey(
TolaUser, blank=True, null=True, on_delete=models.SET_NULL,
related_name="submitted_by_agreement", verbose_name=_("Approval submitted by"))
approval_remarks = models.CharField(_("Approval Remarks"), max_length=255, blank=True, null=True)
justification_background = models.TextField(_("General Background and Problem Statement"), blank=True, null=True)
risks_assumptions = models.TextField(_("Risks and Assumptions"), blank=True, null=True)
justification_description_community_selection = models.TextField(
_("Description of Stakeholder Selection Criteria"), blank=True, null=True)
description_of_project_activities = models.TextField(_("Description of project activities"), blank=True, null=True)
description_of_government_involvement = models.TextField(
_("Description of government involvement"),blank=True, null=True)
description_of_community_involvement = models.TextField(
_("Description of community involvement"), blank=True, null=True)
community_project_description = models.TextField(
_("Describe the project you would like the program to consider"),
help_text="Description must describe how the Community Proposal meets the project criteria",
blank = True, null = True)
create_date = models.DateTimeField(_("Date Created"), null=True, blank=True)
edit_date = models.DateTimeField(_("Last Edit Date"), null=True, blank=True)
history = HistoricalRecords()
#optimize base query for all classbasedviews
objects = ProjectAgreementManager()
class Meta:
ordering = ('project_name',)
verbose_name_plural = _("Project Initiation")
permissions = (
("can_approve", "Can approve initiation"),
)
# on save add create date or update edit date
def save(self, *args, **kwargs):
if self.create_date == None:
self.create_date = timezone.now()
# defaults don't work if they aren't in the form so preset these to 0
if self.total_estimated_budget == None:
self.total_estimated_budget = Decimal("0.00")
if self.mc_estimated_budget == None:
self.mc_estimated_budget = Decimal("0.00")
if self.local_total_estimated_budget == None:
self.local_total_estimated_budget = Decimal("0.00")
if self.local_mc_estimated_budget == None:
self.local_mc_estimated_budget = Decimal("0.00")
self.edit_date = timezone.now()
super(ProjectAgreement, self).save()
@property
def project_name_clean(self):
return self.project_name.encode('ascii', 'ignore')
@property
def sites(self):
return ', '.join([x.name for x in self.site.all()])
@property
def stakeholders(self):
return ', '.join([x.name for x in self.stakeholder.all()])
@property
def capacities(self):
return ', '.join([x.capacity for x in self.capacity.all()])
@property
def evaluations(self):
return ', '.join([x.evaluate for x in self.evaluate.all()])
# displayed in admin templates
def __unicode__(self):
new_name = unicode(self.office) + unicode(" - ") + unicode(self.project_name)
return new_name
# Project Tracking, admin is handled in the admin.py
# TODO: Clean up unused fields and rename model with manual migration file
"""
https://docs.djangoproject.com/en/dev/ref/migration-operations/#renamemodel
class Migration(migrations.Migration):
dependencies = [
('workflow', '0001_initial'),
]
operations = [
operations.RenameModel("ProjectComplete", "WorkflowLevelTwo")
]
"""
class ProjectComplete(models.Model):
short = models.BooleanField(default=True,verbose_name="Short Form (recommended)")
program = models.ForeignKey(Program, null=True, blank=True, related_name="complete", verbose_name=_("Program"))
project_agreement = models.OneToOneField(ProjectAgreement, verbose_name=_("Project Initiation"))
# Rename to more generic "nonproject" names
activity_code = models.CharField(_("Project Code"), max_length=255, blank=True, null=True)
project_name = models.CharField(_("Project Name"), max_length=255, blank=True, null=True)
project_activity = models.CharField(_("Project Activity"), max_length=255, blank=True, null=True)
project_type = models.ForeignKey(
ProjectType, max_length=255, blank=True, null=True, on_delete=models.SET_NULL, verbose_name=_("Project Type"))
office = models.ForeignKey(Office, null=True, blank=True, on_delete=models.SET_NULL, verbose_name=_("Office"))
sector = models.ForeignKey("Sector", blank=True, null=True, on_delete=models.SET_NULL, verbose_name=_("Sector"))
expected_start_date = models.DateTimeField(
_("Expected start date"), help_text=_("Imported from Project Initiation"), blank=True, null=True)
expected_end_date = models.DateTimeField(
_("Expected end date"), help_text=_("Imported Project Initiation"), blank=True, null=True)
expected_duration = models.CharField(
_("Expected Duration"), max_length=255, help_text=_("Imported from Project Initiation"), blank=True, null=True)
actual_start_date = models.DateTimeField(
_("Actual start date"), help_text=_("Imported from Project Initiation"), blank=True, null=True)
actual_end_date = models.DateTimeField(_("Actual end date"), blank=True, null=True)
actual_duration = models.CharField(_("Actual duaration"), max_length=255, blank=True, null=True)
on_time = models.BooleanField(default=None)
stakeholder = models.ManyToManyField(Stakeholder, blank=True, verbose_name=_("Stakeholder"))
no_explanation = models.TextField(_("If not on time explain delay"), blank=True, null=True)
account_code = models.CharField(_("Fund Code"), help_text='', max_length=255, blank=True, null=True)
lin_code = models.CharField(_("LIN Code"), help_text='', max_length=255, blank=True, null=True)
estimated_budget = models.DecimalField(
_("Estimated Budget"), decimal_places=2, max_digits=12,help_text="", default=Decimal("0.00") ,blank=True)
actual_budget = models.DecimalField(
_("Actual Cost"), decimal_places=2, max_digits=20, | |
other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'AssetSystemMetadataUsage') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DataFlowPagedCollection():
"""
A page from a collection of DataStage flows.
:attr List[DataIntgFlow] data_flows: (optional) A page from a collection of
DataStage flows.
:attr HrefModel first: (optional) URI of a resource.
:attr HrefModel last: (optional) URI of a resource.
:attr int limit: (optional) The number of data flows requested to be returned.
:attr HrefModel next: (optional) URI of a resource.
:attr HrefModel prev: (optional) URI of a resource.
:attr int total_count: (optional) The total number of DataStage flows available.
"""
def __init__(self,
*,
data_flows: List['DataIntgFlow'] = None,
first: 'HrefModel' = None,
last: 'HrefModel' = None,
limit: int = None,
next: 'HrefModel' = None,
prev: 'HrefModel' = None,
total_count: int = None) -> None:
"""
Initialize a DataFlowPagedCollection object.
:param List[DataIntgFlow] data_flows: (optional) A page from a collection
of DataStage flows.
:param HrefModel first: (optional) URI of a resource.
:param HrefModel last: (optional) URI of a resource.
:param int limit: (optional) The number of data flows requested to be
returned.
:param HrefModel next: (optional) URI of a resource.
:param HrefModel prev: (optional) URI of a resource.
:param int total_count: (optional) The total number of DataStage flows
available.
"""
self.data_flows = data_flows
self.first = first
self.last = last
self.limit = limit
self.next = next
self.prev = prev
self.total_count = total_count
@classmethod
def from_dict(cls, _dict: Dict) -> 'DataFlowPagedCollection':
"""Initialize a DataFlowPagedCollection object from a json dictionary."""
args = {}
if 'data_flows' in _dict:
args['data_flows'] = [DataIntgFlow.from_dict(x) for x in _dict.get('data_flows')]
if 'first' in _dict:
args['first'] = HrefModel.from_dict(_dict.get('first'))
if 'last' in _dict:
args['last'] = HrefModel.from_dict(_dict.get('last'))
if 'limit' in _dict:
args['limit'] = _dict.get('limit')
if 'next' in _dict:
args['next'] = HrefModel.from_dict(_dict.get('next'))
if 'prev' in _dict:
args['prev'] = HrefModel.from_dict(_dict.get('prev'))
if 'total_count' in _dict:
args['total_count'] = _dict.get('total_count')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DataFlowPagedCollection object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'data_flows') and self.data_flows is not None:
_dict['data_flows'] = [x.to_dict() for x in self.data_flows]
if hasattr(self, 'first') and self.first is not None:
_dict['first'] = self.first.to_dict()
if hasattr(self, 'last') and self.last is not None:
_dict['last'] = self.last.to_dict()
if hasattr(self, 'limit') and self.limit is not None:
_dict['limit'] = self.limit
if hasattr(self, 'next') and self.next is not None:
_dict['next'] = self.next.to_dict()
if hasattr(self, 'prev') and self.prev is not None:
_dict['prev'] = self.prev.to_dict()
if hasattr(self, 'total_count') and self.total_count is not None:
_dict['total_count'] = self.total_count
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this DataFlowPagedCollection object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'DataFlowPagedCollection') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'DataFlowPagedCollection') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DataImportError():
"""
An import error object describe an import problem specific to a particular data flow.
:attr str description: (optional) additional error text.
:attr str name: error object name.
:attr str stage_type: (optional) error stage type.
:attr str type: error type.
"""
def __init__(self,
name: str,
type: str,
*,
description: str = None,
stage_type: str = None) -> None:
"""
Initialize a DataImportError object.
:param str name: error object name.
:param str type: error type.
:param str description: (optional) additional error text.
:param str stage_type: (optional) error stage type.
"""
self.description = description
self.name = name
self.stage_type = stage_type
self.type = type
@classmethod
def from_dict(cls, _dict: Dict) -> 'DataImportError':
"""Initialize a DataImportError object from a json dictionary."""
args = {}
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'name' in _dict:
args['name'] = _dict.get('name')
else:
raise ValueError('Required property \'name\' not present in DataImportError JSON')
if 'stage_type' in _dict:
args['stage_type'] = _dict.get('stage_type')
if 'type' in _dict:
args['type'] = _dict.get('type')
else:
raise ValueError('Required property \'type\' not present in DataImportError JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DataImportError object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'stage_type') and self.stage_type is not None:
_dict['stage_type'] = self.stage_type
if hasattr(self, 'type') and self.type is not None:
_dict['type'] = self.type
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this DataImportError object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'DataImportError') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'DataImportError') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TypeEnum(str, Enum):
"""
error type.
"""
UNSUPPORTED_STAGE_TYPE = 'unsupported_stage_type'
UNSUPPORTED_FEATURE = 'unsupported_feature'
EMPTY_JSON = 'empty_json'
ISX_CONVERSION_ERROR = 'isx_conversion_error'
MODEL_CONVERSION_ERROR = 'model_conversion_error'
INVALID_INPUT_TYPE = 'invalid_input_type'
INVALID_JSON_FORMAT = 'invalid_json_format'
JSON_CONVERSION_ERROR = 'json_conversion_error'
FLOW_DELETION_ERROR = 'flow_deletion_error'
FLOW_CREATION_ERROR = 'flow_creation_error'
FLOW_RESPONSE_PARSING_ERROR = 'flow_response_parsing_error'
AUTH_TOKEN_ERROR = 'auth_token_error'
FLOW_COMPILATION_ERROR = 'flow_compilation_error'
EMPTY_STAGE_LIST = 'empty_stage_list'
EMPTY_STAGE_NODE = 'empty_stage_node'
MISSING_STAGE_TYPE_CLASS_NAME = 'missing_stage_type_class_name'
DUMMY_STAGE = 'dummy_stage'
MISSING_STAGE_TYPE = 'missing_stage_type'
MISSING_REPOS_ID = 'missing_repos_id'
STAGE_CONVERSION_ERROR = 'stage_conversion_error'
UNIMPLEMENTED_STAGE_TYPE = 'unimplemented_stage_type'
JOB_CREATION_ERROR = 'job_creation_error'
JOB_RUN_ERROR = 'job_run_error'
FLOW_SEARCH_ERROR = 'flow_search_error'
UNSUPPORTED_JOB_TYPE = 'unsupported_job_type'
INTERNAL_ERROR = 'internal_error'
CONNECTION_CREATION_ERROR = 'connection_creation_error'
FLOW_RENAME_ERROR = 'flow_rename_error'
DUPLICATE_JOB_ERROR = 'duplicate_job_error'
PARAMETER_SET_CREATION_ERROR = 'parameter_set_creation_error'
DISTRIBUTED_LOCK_ERROR = 'distributed_lock_error'
DUPLICATE_OBJECT_ERROR = 'duplicate_object_error'
UNBOUND_OBJECT_REFERENCE = 'unbound_object_reference'
TABLE_DEF_CREATION_ERROR = 'table_def_creation_error'
CONNECTION_CREATION_API_ERROR = 'connection_creation_api_error'
CONNECTION_PATCH_API_ERROR = 'connection_patch_api_error'
CONNECTION_DELETION_API_ERROR = 'connection_deletion_api_error'
SEQUENCE_JOB_CREATION_ERROR = 'sequence_job_creation_error'
UNSUPPORTED_STAGE_TYPE_IN_SUBFLOW = 'unsupported_stage_type_in_subflow'
class DataIntgFlow():
"""
A DataStage flow model that defines physical source(s), physical target(s) and an
optional pipeline containing operations to apply to source(s).
:attr List[object] attachments: (optional) Metadata information for datastage
flow.
:attr DataIntgFlowEntity entity: (optional) The underlying DataStage flow
definition.
:attr AssetSystemMetadata metadata: (optional) System metadata about an asset.
"""
def __init__(self,
*,
attachments: List[object] = None,
entity: 'DataIntgFlowEntity' = None,
metadata: 'AssetSystemMetadata' = None) -> None:
"""
Initialize a DataIntgFlow object.
:param List[object] attachments: (optional) Metadata information for
datastage flow.
:param DataIntgFlowEntity entity: (optional) The underlying DataStage flow
definition.
:param AssetSystemMetadata metadata: (optional) System metadata about an
asset.
"""
self.attachments = attachments
self.entity = entity
self.metadata = metadata
@classmethod
def from_dict(cls, _dict: Dict) -> 'DataIntgFlow':
"""Initialize a DataIntgFlow object from a json dictionary."""
args = {}
if 'attachments' in _dict:
args['attachments'] = _dict.get('attachments')
if 'entity' in _dict:
args['entity'] = DataIntgFlowEntity.from_dict(_dict.get('entity'))
if 'metadata' in _dict:
args['metadata'] = AssetSystemMetadata.from_dict(_dict.get('metadata'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DataIntgFlow object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'attachments') and self.attachments is not None:
_dict['attachments'] = self.attachments
if hasattr(self, 'entity') and self.entity is not None:
_dict['entity'] = self.entity.to_dict()
if hasattr(self, 'metadata') and self.metadata is not None:
_dict['metadata'] = self.metadata.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this DataIntgFlow object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'DataIntgFlow') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'DataIntgFlow') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DataIntgFlowEntity():
"""
The underlying DataStage flow definition.
:attr object data_intg_flow: (optional) Asset type object.
:attr object data_intg_subflow: (optional) Asset type object.
:attr str description: (optional) The description of the DataStage flow.
:attr DataIntgFlowLock lock: (optional) Lock information for a DataStage flow
asset.
:attr str name: (optional) The name of | |
(SELECT name FROM temp.temp_not_used_terms);\n' % (table, term))
out.write('DROP TABLE temp.temp_not_used_terms;\n')
out.write('DROP TABLE temp.temp_all_terms;\n')
out.write('DROP TABLE temp.temp_used_terms;\n')
out.write('\n')
def update_mapped_terms(out):
"""
Update the terms with the values from the existing ontologies.
"""
out.write('alter table isa.dataset_experiment_type drop constraint dataset_experiment_type_experiment_type_fkey;\n' )
out.write('alter table isa.dataset_experiment_type add constraint dataset_experiment_type_experiment_type_fkey FOREIGN KEY (experiment_type) REFERENCES isa.experiment_type(term) ON UPDATE CASCADE ON DELETE CASCADE;\n' )
#AB. Need to add logic to update term in referencing tables when transformed term collides w/ existing one
#AB. Do a first pass to update terms to new values if they don't collide w/ existing ones
for table in vocabulary_tables:
if table not in vocabulary_orphans['vocabulary']:
term = vocabulary_term_name['vocabulary'][table]
print 'MAPPING: table=%s term=%s' % (table,term)
for name,value in mapped_terms.iteritems():
out.write('UPDATE vocabulary.%s SET %s = \'%s\' WHERE %s = \'%s\' AND \'%s\' NOT IN (SELECT %s FROM vocabulary.%s);\n' % (table, term, value, term, name, value, term, table))
out.write('\n')
for table in isa_tables:
if table not in vocabulary_orphans['isa']:
term = vocabulary_term_name['isa'][table]
print 'MAPPING: table=%s term=%s' % (table,term)
for name,value in mapped_terms.iteritems():
out.write('UPDATE isa.%s SET %s = \'%s\' WHERE %s = \'%s\' AND \'%s\' NOT IN (SELECT %s FROM isa.%s);\n' % (table, term, value, term, name, value, term, table))
out.write('\n')
#AB. Now remap to new terms in tables that reference the old term ....
for table in isa_tables:
if table not in vocabulary_orphans['isa']:
term = vocabulary_term_name['isa'][table]
for name,value in mapped_terms.iteritems():
print '---> MAPPING: table=%s term=%s name=%s value=%s' % (table,term,name,value)
references = vocabulary_relations['isa'][table].get('references', None)
if len(references) > 0:
queries = []
for reference in references:
schema, constraint = reference['constraint_name']
foreign_key = reference['foreign_key']
schema_name = foreign_key['schema_name']
table_name = foreign_key['table_name']
column_name = foreign_key['column_name']
fk_column = get_term_reference(goal, '%s' % schema_name, '%s' % table_name, '%s' % constraint)
if table_name == 'dataset_'+table:
print ' Schema=%s | Table=%s | Column=%s | Constraint=%s | fk_column=%s' % (schema_name,table_name,column_name,constraint,fk_column)
out.write('alter table %s.%s drop constraint if exists %s_pkey;\n' % (schema_name,table_name,table_name))
out.write('update %s.%s tt set %s =%s from isa.%s where %s=\'%s\' AND tt.%s=\'%s\' ;\n' % (schema_name,table_name,column_name,term,table,term,value,column_name,name))
out.write('delete from %s.%s where "RID" in (SELECT "RID" FROM (SELECT "RID",row_number() over (partition by dataset_id,%s order by "RID") as row_num from %s.%s) T where T.row_num>1) ;\n' % (schema_name,table_name,column_name,schema_name,table_name))
out.write('alter table %s.%s add constraint %s_pkey primary key (dataset_id,%s);\n' % (schema_name,table_name,table_name,column_name))
out.write('\n')
for table in experiment_tables:
for name,value in mapped_terms.iteritems():
print '---> EXPERIMENT TABLE MAPPINGS: table=%s name=%s value=%s' % (table,name,value)
out.write('update isa.experiment tt set experiment_type = (SELECT "RID" from isa.experiment_type where term=\'%s\') WHERE tt.experiment_type=(SELECT "RID" FROM isa.experiment_type WHERE term=\'%s\') ;\n' % (value,name))
out.write('\n')
def make_temp_schema():
"""
Generate the "temp" schema.
"""
out = file('%s/uberon.sql' % output, 'w')
out.write('BEGIN;\n')
out.write('%s\n' % make_temp_functions)
delete_not_used_terms(out)
update_mapped_terms(out)
iri = []
out.write('INSERT INTO temp.terms(name)\n')
queries = []
for table in vocabulary_tables:
if table not in vocabulary_orphans['vocabulary']:
column = vocabulary_term_name['vocabulary'][table]
queries.append('SELECT DISTINCT %s AS name FROM vocabulary.%s' % (column, table))
iri.append('SELECT temp.set_iri(\'%s\', \'%s\', \'%s\');\n' %('vocabulary', table, column))
for table in isa_tables:
if table not in vocabulary_orphans['isa']:
column = vocabulary_term_name['isa'][table]
queries.append('SELECT DISTINCT %s AS name FROM isa.%s' % (column, table))
iri.append('SELECT temp.set_iri(\'%s\', \'%s\', \'%s\');\n' %('isa', table, column))
out.write('\nUNION\n'.join(queries))
out.write('\n;\n\n')
for table in vocabulary_tables:
if table not in vocabulary_orphans['vocabulary']:
column = vocabulary_term_name['vocabulary'][table]
table_name = table
if table in union_vocabularies.keys():
table_name = 'vocabulary_%s' % table
out.write('CREATE TABLE temp.%s AS SELECT DISTINCT %s AS name FROM vocabulary.%s;\n' % (table_name, column, table))
out.write('ALTER TABLE temp.%s OWNER TO ermrest;\n\n' % table_name)
temporary_tables['vocabulary'].append({
'original_name': '%s' % table,
'name': '%s' % table_name})
for table in isa_tables:
if table not in vocabulary_orphans['isa']:
column = vocabulary_term_name['isa'][table]
table_name = table
if table in union_vocabularies.keys():
table_name = 'isa_%s' % table
out.write('CREATE TABLE temp.%s AS SELECT DISTINCT %s AS name FROM isa.%s;\n' % (table_name, column, table))
out.write('ALTER TABLE temp.%s OWNER TO ermrest;\n\n' % table_name)
temporary_tables['isa'].append({
'original_name': '%s' % table,
'name': '%s' % table_name})
out.write('\n')
for domain in union_vocabularies.keys():
queries = []
for schema in union_vocabularies[domain].keys():
for table in union_vocabularies[domain][schema]:
table_name = table
if table in union_vocabularies.keys():
table_name = '%s_%s' % (schema, table)
queries.append('SELECT DISTINCT name AS name FROM temp.%s' % table_name)
column = vocabulary_term_name[schema][table]
iri.append('SELECT temp.set_iri(\'%s\', \'%s\', \'%s\');\n' %(schema, table, column))
out.write('CREATE TABLE temp.%s AS %s;\n' % (domain, ' UNION '.join(queries)))
out.write('\n')
out.write('CREATE TABLE temp.uberon AS\n')
out.write('SELECT DISTINCT name,cv FROM data_commons.cvterm WHERE name IN (SELECT name FROM temp.terms);\n')
out.write('ALTER TABLE temp.uberon OWNER TO ermrest;\n')
out.write('\n')
data_commons_ontologies_enclosed = []
for ontology in data_commons_ontologies:
data_commons_ontologies_enclosed.append('\'%s\'' % ontology)
out.write('UPDATE temp.terms T1 SET cv = (SELECT cv FROM temp.uberon T2 WHERE T2.cv IN (%s) AND T1.name = T2.name LIMIT 1) WHERE name IN (SELECT name FROM temp.uberon);\n' % ','.join(data_commons_ontologies_enclosed))
out.write('UPDATE temp.terms T1 SET cv = (SELECT cv FROM temp.uberon T2 WHERE T1.name = T2.name LIMIT 1) WHERE cv is NULL AND name IN (SELECT name FROM temp.uberon);\n')
out.write('\n')
for table in ['owl_terms', 'owl_predicates', 'ocdm', 'facebase', 'terms', 'uberon']:
out.write('SELECT temp.make_facebase_temp_tables(\'temp\', \'%s\');\n' % table)
out.write('\n')
for table in vocabulary_tables:
if table not in vocabulary_orphans['vocabulary']:
table_name = table
if table in union_vocabularies.keys():
table_name = 'vocabulary_%s' % table
out.write('SELECT temp.make_facebase_temp_tables(\'temp\', \'%s\');\n' % table_name)
for table in isa_tables:
if table not in vocabulary_orphans['isa']:
table_name = table
if table in union_vocabularies.keys():
table_name = 'isa_%s' % table
out.write('SELECT temp.make_facebase_temp_tables(\'temp\', \'%s\');\n' % table_name)
out.write('\n')
for table in union_vocabularies.keys():
out.write('SELECT temp.make_facebase_temp_tables(\'temp\', \'%s\');\n' % table)
out.write('\n')
for table in ['ocdm', 'facebase', 'terms', 'uberon']:
out.write('SELECT temp.make_temp_tables_annotations(\'temp\', \'%s\');\n' % table)
out.write('\n')
for line in iri:
out.write('%s' % line)
out.write('\n')
out.write('\nSELECT _ermrest.model_change_event();\n')
out.write('\n')
out.write('COMMIT;\n')
out.close()
def get_domain_table(schema, table):
"""
Get the "vocab" table.
"""
for domain in union_vocabularies.keys():
tables = union_vocabularies[domain].get(schema, None)
if tables != None:
if table in tables:
return domain
return table
def get_vocabulary_domains():
"""
Generate the "vocab" domain tables.
"""
for table in union_vocabularies.keys():
vocabulary_domains.append(table)
for table in vocabulary_tables:
if is_in_unions('vocabulary', table) == False and table not in vocabulary_orphans['vocabulary']:
vocabulary_domains.append(table)
for table in isa_tables:
if is_in_unions('isa', table) == False and table not in vocabulary_orphans['isa']:
vocabulary_domains.append(table)
def trace():
"""
Trace the vocabulary orphans and domain tables.
"""
print 'vocabulary orphans:'
for orphan in vocabulary_orphans['vocabulary']:
print '\t%s' % orphan
print 'isa orphans:'
for orphan in vocabulary_orphans['isa']:
print '\t%s' % orphan
print 'Domain Tables:'
for domain in vocabulary_domains:
print '\t%s' % domain
def make_domain_script():
"""
Generate the domain.sql script.
"""
out = file('%s/domain.sql' % output, 'w')
out.write('BEGIN;\n')
out.write('%s\n' % domain_functions)
out.write('\n')
out.write('%s\n' % dataset_functions)
out.write('\n')
for domain in vocabulary_domains:
out.write('SELECT data_commons.make_facebase_domain_tables(\'vocab\', \'%s\');\n' % domain)
out.write('\n')
for domain in vocabulary_domains:
out.write('SELECT data_commons.load_facebase_domain_tables(\'vocab\', \'%s\');\n' % domain)
out.write('\n')
for domain in vocabulary_dbxref_tables:
out.write('SELECT data_commons.make_facebase_domain_tables(\'vocab\', \'%s\');\n' % domain)
out.write('\n')
for table in vocabulary_dbxref_tables:
print 'Calling data_commons.load_facebase_domain_tables_cv for table= "%s"' % (table)
out.write('CREATE TABLE temp.%s (name text PRIMARY KEY,cv text);\n' % table)
out.write('ALTER TABLE temp.%s OWNER TO ermrest;\n' % table)
out.write('INSERT INTO temp.%s (name,cv) SELECT DISTINCT gene_symbol,\'%s\' FROM vocabulary.%s ON CONFLICT DO NOTHING;\n' % (table, table,table))
out.write('SELECT data_commons.load_facebase_domain_tables_cv(\'vocab\', \'%s\',\'%s\');\n' % (table,table))
out.write('\n')
out.write('\n')
out.write('\nSELECT _ermrest.model_change_event();\n')
out.write('\n')
out.write('COMMIT;\n')
out.close()
def make_references_script(goal):
"""
Generate the vocabulary vocabulary_references.sql script.
"""
out = file('%s/vocabulary_references.sql' % output, 'w')
out.write('BEGIN;\n')
out.write('%s\n' % references_functions)
out.write('\n')
for schema in views.keys():
for view in views[schema].keys():
out.write('DROP VIEW %s.%s;\n' % (schema, view))
out.write('\n')
for schema in triggers.keys():
for table in triggers[schema].keys():
for trigger in triggers[schema][table]:
for trigger_name in trigger.keys():
out.write('DROP TRIGGER %s ON %s.%s;\n' % (trigger_name, schema, table))
out.write('\n')
for schema in duplicate_constraints.keys():
for table in duplicate_constraints[schema].keys():
for constraint in duplicate_constraints[schema][table]:
out.write('ALTER TABLE %s.%s DROP CONSTRAINT IF EXISTS %s;\n' % (schema, table, constraint))
out.write('\n')
for table in vocabulary_tables:
if table not in vocabulary_orphans['vocabulary']:
foreign_keys = vocabulary_relations['vocabulary'][table].get('foreign_keys', None)
if foreign_keys != None:
for foreign_key in foreign_keys:
schema, constraint = foreign_key['constraint_name']
out.write('ALTER TABLE vocabulary.%s DROP CONSTRAINT IF EXISTS %s;\n' % (table, constraint))
out.write('\n')
for table in vocabulary_ref_tables:
table_model = goal.table('vocabulary', table)
foreign_keys_model = table_model.foreign_keys
for foreign_key_model in foreign_keys_model:
referenced_column = foreign_key_model.referenced_columns[0]
if referenced_column['schema_name'] == 'vocabulary':
foreign_keys = vocabulary_relations['vocabulary'][table].get('foreign_keys', | |
1213
"https://play.google.com/store/apps/details?id=com.globalegrow.app.gearbest",
# 1214
"http://amazon.com/gp/bestsellers/electronics/281052",
# 1215
"http://amazon.ca/",
# 1216
"https://www.bukalapak.com/",
# 1217
"http://www.olx.in/",
# 1218
"http://photo.gmw.cn/2016-01/04/content_18342869.htm",
# 1219
"http://amazon.de/",
# 1220
"http://www.cnn.com/",
# 1221
"https://www.capitalone.com/",
# 1222
"http://www.hp.com/",
# 1223
"http://photobucket.com/auth/twitter/start?callback_type=login&display=popup&mobile=",
# 1224
"http://www.dailymotion.com/ca-en",
# 1225
"http://www.asos.com/infopages/pgeshiptocountry.aspx?CTARef=HP|gen|middle|freeship",
# 1226
"http://extratorrent.cc/category/101/Naruto+Torrents.html",
# 1227
"http://uae.souq.com/ae-en/",
# 1228
"http://www.nicovideo.jp/",
# 1229
"https://www.youtube.com/",
# 1230
"http://www.milliyet.com.tr/ask-yeniden-38-yeni-bolum-gundem-2178176/",
# 1231
"http://mst.zol.com.cn/563/5635847.html",
# 1232
"http://weather.yahoo.co.jp/weather/",
# 1233
"http://goo.ne.jp/",
# 1234
"http://military.china.com.cn/node_7207693.htm",
# 1235
"http://www.google.com.tr/",
# 1236
"http://vimeo.com/ondemand/bunnythemovie",
# 1237
"http://sports.news.naver.com/videoCenter/index.nhn?uCategory=kbasketball&category=&listType=total&date=20160114&gameId=&teamCode=&playerId=&keyword=&id=171963&page=1",
# 1238
"http://ettoday.net/",
# 1239
"http://wsj.com/",
# 1240
"http://www.cnet.com/topics/tablets/how-to/",
# 1241
"http://www.reimageplus.com/",
# 1242
"http://sp.oshiete.goo.ne.jp/",
# 1243
"https://www.mystart.com/",
# 1244
"http://car.bitauto.com/jincouxingche/",
# 1245
"http://channel.jd.com/food.html",
# 1246
"http://amazon.de/Gardena-3101-20-cs-Rechenbesen-43-breit/dp/B0001E3VFQ",
# 1247
"https://commons.era.nih.gov/commons/",
# 1248
"http://www.amazon.in/Fastrack-Watches/b?ie=UTF8&node=4371849031",
# 1249
"http://www.macys.com/catalog/index.ognc?CategoryID=28001&cm_sp=intl_hdr-_-flytrackingbreadcrumb-_-28001_wear-to-work_COL1",
# 1250
"http://www.asd.tv/",
# 1251
"http://www.twitch.tv/",
# 1252
"http://jx.ifeng.com/a/20160114/4192621_0.shtml",
# 1253
"http://www.ebay.in/cln/sukaina_7389/Lap-it-up/266105110013",
# 1254
"http://bbs.lady8844.com/thread-1744166-1-1.html",
# 1255
"http://www.kinopoisk.ru/",
# 1256
"http://mobile.zol.com.cn/549/5496564.html",
# 1257
"http://ent.cntv.cn/special/zgmgqz/",
# 1258
"https://www.wikimedia.org//species.wikimedia.org/",
# 1259
"http://bbs.yaolan.com/",
# 1260
"http://www.wp.pl/",
# 1261
"http://sports.sina.com.cn/g/laliga/2016-01-14/doc-ifxnrahr8292867.shtml",
# 1262
"http://city.china.com/pic/11146172/20160114/21138608.html",
# 1263
"http://culture.gmw.cn/2016-01/14/content_18487603.htm",
# 1264
"http://gfycat.com/",
# 1265
"http://shop.nordstrom.com/c/bucket-bags?dept=8000001&origin=topnav",
# 1266
"http://oshiete.goo.ne.jp/qa/9153610.html",
# 1267
"http://so.lianmeng.360.cn/",
# 1268
"http://www.snapdeal.com/products/lifestyle-watches",
# 1269
"http://www.lady8844.com/",
# 1270
"http://www.google.com.pk/intl/en/ads/",
# 1271
"http://man.39.net/a/160112/4756532.html",
# 1272
"http://www.google.cl/",
# 1273
"http://www.amazon.cn/careers",
# 1274
"http://tv.yesky.com/276/99912776.shtml",
# 1275
"http://www.google.fr/",
# 1276
"http://fk.39.net/lc/",
# 1277
"http://i.baike.com/myRole.do?action=apply",
# 1278
"http://store.steampowered.com/",
# 1279
"http://cafeyte.about.com",
# 1280
"http://www.goodreads.com/",
# 1281
"http://onedio.com/haber/88-oscar-adaylari-aciklandi-mustang-yabanci-dilde-en-iyi-film-dalinda-aday--659045",
# 1282
"http://auto.sina.com.cn/service/2016-01-13/detail-ifxnkkux1265305.shtml?c=spr_web_sina_cnxh_auto_t0002",
# 1283
"http://www.ebay.com.au/",
# 1284
"http://www.libero.it/",
# 1285
"http://www.liveinternet.ru/",
# 1286
"http://www.ifeng.com/",
# 1287
"http://uae.souq.com/ae-en/lighting-or-lamps-or-chandeliers/garden-lighting-122%7Chome-decor-137%7Clamps---and---lightings-503/koopman%7Cbaby-zoo%7Cnsgt%7Cevidea/a-t-7/s/?sortby=cp_desc&page=2",
# 1288
"http://menshair.about.com",
# 1289
"http://www.goal.com/",
# 1290
"http://www.google.az/",
# 1291
"http://www.huffingtonpost.ca/news/home-decor-canada",
# 1292
"https://www.shopify.ca/",
# 1293
"https://www.whatsapp.com/press/",
# 1294
"http://www.dailymail.co.uk/news/article-3399833/Hatton-Garden-masterminds-caught-police-talking-raid.html",
# 1295
"http://amazon.co.uk/gp/bestsellers/electronics/560836",
# 1296
"http://ask.zol.com.cn/q/1556394.html",
# 1297
"http://www.snapdeal.com/products/food-noodles-soups-pasta",
# 1298
"http://military.china.com/",
# 1299
"http://very8879576.pixnet.net/blog/post/424682411",
# 1300
"http://sourceforge.net/",
# 1301
"http://cm.39.net/a/160113/4756490.html",
# 1302
"http://www.nytimes.com/content/help/rights/sale/terms-of-sale.html",
# 1303
"http://www.olx.in/item/eleven-months-old-balckberry-q5-for-seven-thousand-only-ID11r57p.html",
# 1304
"http://abbotsford.craigslist.ca/",
# 1305
"http://www.bloomberg.com/",
# 1306
"http://combate.globo.com/",
# 1307
"http://amazon.es/cookiesypublicidadeninternet",
# 1308
"http://www.impress.co.jp/",
# 1309
"http://www.google.be/",
# 1310
"http://www.booking.com/index.bg.html?bb_ltbi=0;sb_price_type=total&;lang=bg",
# 1311
"http://www.webmd.com/click?url=http://www.emedicinehealth.com/script/main/hp.asp",
# 1312
"http://adf.ly/",
# 1313
"http://www.adobe.com/",
# 1314
"http://car.bitauto.com/changanxiaoka/",
# 1315
"http://cctv.cntv.cn/lm/xinwendiaocha/index.shtml",
# 1316
"http://www.elpais.com/especial/clasificacion-colegios-madrid/",
# 1317
"http://www.t-online.de/wirtschaft/unternehmen/id_76644348/wirtschaft-aufsichtsratschef-mirow-hsh-nordbank-ist-eine-wettbewerbsfaehige-bank.html",
# 1318
"https://www.spotify.com/ca-en/",
# 1319
"http://detail.zol.com.cn/cell_phone/index1100891.shtml",
# 1320
"https://www.quora.com/",
# 1321
"http://redir.xuite.net/redir/xuite/www/index/log/daFunShowP^http://photo.xuite.net/_pic/ichiro0910/19743330/1111339391.jpg/redir",
# 1322
"https://ca.yahoo.com/?p=us",
# 1323
"https://www.facebook.com/",
# 1324
"http://www.milliyet.com.tr/mola-galeri-detay/misir-piramitlerinin-en-ilginc-9-ozelligi---/715/",
# 1325
"https://instagram.com/jabongindia/",
# 1326
"http://detail.zol.com.cn/digital_camera_index/subcate15_657_list_1.html",
# 1327
"http://www.dailymail.co.uk/video/news/video-1229387/Khloe-Kardashian-strips-raunchy-photo-shoot.html",
# 1328
"http://bbs.lady8844.com/zt/gkk14/",
# 1329
"http://news.nicovideo.jp/watch/nw1986410?news_ref=nicotop_topics_soft",
# 1330
"http://customer.xfinity.com/help-and-support",
# 1331
"http://amazon.it/",
# 1332
"http://vid.me/",
# 1333
"http://www.hatena.ne.jp/",
# 1334
"http://activity.bilibili.com",
# 1335
"http://www.ameba.jp/",
# 1336
"http://www.hurriyet.com.tr/yoldurumu/istanbul/",
# 1337
"http://yuedu.163.com/book_reader/f42523f43ba34722b2cf64ec99e9dd9d_4/e9bae95f834349b1a1041b24dc1c980f_4?utm_campaign=163ad&utm_source=163home&utm_medium=tab_0_3_6",
# 1338
"http://arte.about.com",
# 1339
"http://finance.oeeee.com/html/201601/12/356584.html",
# 1340
"http://www.mercadolivre.com.br/",
# 1341
"https://pinpoint.microsoft.com/en-ca",
# 1342
"http://www.facebook.com/dialog/share?app_id=132746074315&display=popup&href=http%3A%2F%2Fwww.engadget.com%2F2015%2F10%2F08%2Fnasa-finds-that-pluto-has-blue-skies-and-surface-water-ice%2F",
# 1343
"http://www.naukri.com/tieups/tieups.php?othersrcp=17636",
# 1344
"http://www.lenovo.com//shop.lenovo.com/ca/en/landingpage/announcements/",
# 1345
"http://www.fandango.com/",
# 1346
"http://agency.reuters.com",
# 1347
"http://www.alibaba.com/",
# 1348
"http://iservice.ltn.com.tw/Service/english/english.php?engno=931351&day=2015-11-11",
# 1349
"http://www.tudou.com/albumplay/TpIXNUU7Z5s/qD26a3lr7Fw.html",
# 1350
"http://sh.eastday.com/m/20160114/u1ai9179812.html",
# 1351
"http://www.hurriyet.com.tr/rusya-sultanahmet-saldirisiyla-baglantili-o-ismi-acikladi-haydar-suleymanov-40040530",
# 1352
"http://news.xywy.com/news/jrzd/20131211/733051.html",
# 1353
"http://www.bild.de/news/standards/nikolaus-blome/zu-frueh-fuer-grenz-schliessung-44155912.bild.html",
# 1354
"http://www.yaolan.com/zhishi/ertongjinshi/",
# 1355
"https://chaseonline.chase.com/",
# 1356
"https://developer.vimeo.com",
# 1357
"http://uae.souq.com/ae-en/hasbro/toys-24%7Cbaby-toys-and-accessories-335/hasbro/new/a-t-7-c/s/?sortby=ir_desc&page=1",
# 1358
"http://www.chip.de/preisvergleich/438425/LG-Electronics-49UF7787.html",
# 1359
"https://www.wellsfargo.com/goals-going-to-college/",
# 1360
"http://www.yesky.com/sitemap.shtml",
# 1361
"http://collection.sina.com.cn/yjjj/2016-01-14/doc-ifxnqriy2842198.shtml",
# 1362
"http://www.independent.co.uk/",
# 1363
"https://www.sogou.com/",
# 1364
"https://www.seznam.cz/",
# 1365
"https://www.linkedin.com/",
# 1366
"https://www.etsy.com/c/toys-and-games/toys/baby-and-toddler-toys?ref=catnav-2941",
# 1367
"http://lo.ameba.jp/v1/OrcVXglfogniYXHRWOEJ",
# 1368
"http://passport.china.com/jsp/user/findpassword.jsp",
# 1369
"http://www.dailymail.co.uk/tvshowbiz/article-3396305/Casey-Batchelor-showcases-legs-breasts-high-fashion-photoshoot.html",
# 1370
"http://www.slideshare.net/",
# 1371
"http://www.outbrain.com/",
# 1372
"http://www.ign.com/boards",
# 1373
"http://news.livedoor.com/topics/detail/11062813/",
# 1374
"http://conservativetribune.com/feds-schools-protect-students/",
# 1375
"http://www.microsoft.com/surface/en-ca/support/browse/surface-2?category=getting-started",
# 1376
"http://wp.tv",
# 1377
"http://www.lenovo.com/ca/en/",
# 1378
"http://esporte.uol.com.br/futebol/times/fluminense/",
# 1379
"http://www.microsoft.com/en-ca/",
# 1380
"http://www.bjcankao.com/",
# 1381
"http://www.163.com/",
# 1382
"http://store.steampowered.com/tag/en/Photo%20Editing/?snr=1_4_4__12",
# 1383
"http://www.pixnet.net",
# 1384
"http://www.sears.com/clothing-shoes-jewelry-shoes/b-1325051825",
# 1385
"http://ypk.39.net/yaopin/jsjnfmyy/jisulei/7bc1e.html",
# 1386
"http://www.ebay.co.uk/sch/Manicure-and-Pedicure/47945/bn_552182/i.html",
# 1387
"http://www.dailymail.co.uk/tvshowbiz/article-3396065/Shia-LaBeouf-girlfriend-three-years-Mia-Goth-nearly-identical-grey-sweatsuits.html",
# 1388
"http://shop.nordstrom.com/c/housewarming-gifts?dept=8000001&origin=topnav",
# 1389
"http://fxn.ws/1Ro7HfP",
# 1390
"http://rd.nicovideo.jp/cc/nicotop_blomaga/mediaarlist",
# 1391
"http://news.xywy.com/news/jrzd/20140113/733212.html",
# 1392
"http://www.cheyisou.com/",
# 1393
"http://amazon.ca/Dell-13-3-inch-Ultrabook-Computer-Processor/dp/B00RY4X8A4",
# 1394
"http://car.auto.caijing.com.cn/xuanchegongju/?p=5-8",
# 1395
"http://baby.39.net/yeqz/",
# 1396
"http://www.babytree.com/rd/rd.php?refcode=bbox1017syl&sid=bbox1017syl&url=http://babybox.babytree.com/",
# 1397
"http://www.olx.in/posting/",
# 1398
"http://ent.china.com/star/news/11052670/20160114/21137801.html",
# 1399
"http://detail.zol.com.cn/gpswatch/smartq/",
# 1400
"http://www.ebay.it",
# 1401
"http://s.kouclo.com/search.php?c=1902,DIY%E9%A5%B0%E5%93%81%E9%85%8D%E4%BB%B6",
# 1402
"https://hootsuite.com/",
# 1403
"http://www.ebay.de/cln/inner-circle-outlet/Naked-NO-it-s-just-NUDE/264988527014",
# 1404
"http://plus.baike.com/ihangkonghangtian",
# 1405
"https://www.etsy.com/c/craft-supplies-and-tools/fiber-and-textile-art-supplies/knitting-and-crocheting?ref=catnav-562",
# 1406
"http://kompas.com/",
# 1407
"http://www.ce.cn/cysc/",
# 1408
"http://car.bitauto.com/kaidilake/",
# 1409
"http://yyk.39.net/doctors/gaoxueya/",
# 1410
"http://auto.china.com/zhuanzai/hangye/11162373/20160114/21138859.html",
# 1411
"http://www.aol.ca/?r=www.aol.com",
# 1412
"http://blogs.wsj.com/japanrealtime/",
# 1413
"http://www.yapo.cl/",
# 1414
"http://ent.qq.com/a/20160114/019261.htm",
# 1415
"http://www.zillow.com/browse/homes/ky/",
# 1416
"http://sn.ifeng.com/zixun/jinrishanxi/detail_2016_01/14/4740220_0.shtml",
# 1417
"http://www.buzzfeed.com/",
# 1418
"http://photo.cntv.cn/index.shtml",
# 1419
"http://fitness.39.net/special/fms/",
# 1420
"http://shop.nordstrom.com/c/rag-bone?dept=8000001&origin=topnav",
# 1421
"http://www.snapdeal.com/products/mobiles-power-banks/?q=Mah_s:5001-7000^%207001-8000^%208001-9000",
# 1422
"http://www.teepr.com/category/%e6%b8%ac%e9%a9%97/",
# 1423
"http://tv.tudou.com/hanju/",
# 1424
"http://www.google.nl/chrome/browser/?hl=nl&brand=CHNG&utm_source=nl-hpp&utm_medium=hpp&utm_campaign=nl",
# 1425
"http://www.wsj.com/articles/how-the-indonesia-terror-attack-unfolded-1452788046",
# 1426
"http://www.bbc.com/guidance/",
# 1427
"http://www.kohls.com/catalog/clearance-toys.jsp?CN=4294736457+4294720971&cc=toys-TN2.0-S-clearancetoys",
# 1428
"http://ypk.39.net/zcy/qry/zcy-1.shtml",
# 1429
"http://kouclo.com/",
# 1430
"http://edmonton.craigslist.ca/search/sks",
# 1431
"http://wp.tv/i,przelomowe-wyniki-badan-wloskich-uczonych,mid,1857150,cid,4051,klip.html",
# 1432
"http://themeforest.net/category/muse-templates/corporate",
# 1433
"https://kat.cr/search/the%20revenant/",
# 1434
"http://sports.sina.com.cn/g/laliga/2016-01-14/doc-ifxnqriy2876302.shtml",
# 1435
"https://www.sway.com?WT.mc_id=O16_BingHP&utm_source=O16Bing&utm_medium=Nav&utm_campaign=HP",
# 1436
"https://www.wikimedia.org/",
# 1437
"http://www.jabong.com/beauty/",
# 1438
"http://en.savefrom.net/",
# 1439
"http://www.reuters.com/article/us-usa-coal-idUSKCN0US2WB20160114",
# 1440
"http://www.sf49ers.com/",
# 1441
"http://www.rediff.com/",
# 1442
"http://www.amazon.in/Fragrances/b?ie=UTF8&node=1374298031",
# 1443
"http://tradeadexchange.com/",
# 1444
"http://shutterstock.com/pic-54411157/stock-vector.html",
# 1445
"http://www.google.co.th/",
# 1446
"http://ppomppu.co.kr/zboard/view.php?id=mobile_gallery&no=47133",
# 1447
"http://www.google.com.au/",
# 1448
"http://www.kohls.com/catalog/powered-riding-toys-outdoor-play-toys-toys.jsp?CN=4294719493+4294719494+4294719592&cc=toys-TN2.0-S-poweredriding",
# 1449
"http://www.nyaa.se/?page=download&tid=774814",
# 1450
"http://maps.google.com.sa/maps?hl=ar&tab=wl",
# 1451
"http://www.enet.com.cn/instrument",
# 1452
"http://www.tudou.com/",
# 1453
"http://teleshow.wp.pl/wiadomosc.html?wid=18095151&title=Bilety-na-Kraftwerk-juz-w-sprzedazy&tpl=3",
# 1454
"http://publictransport.about.com",
# 1455
"http://www.xda-developers.com/",
# 1456
"http://gzly.cnnic.cn/gzly/index_new.jsp",
# 1457
"http://www.tmall.com/wow/chaoshi/act/tmcskhfc-lf",
# 1458
"http://dory.kr/2182",
# 1459
"http://en.gameforge.com/games/gamedetails/bitefight",
# 1460
"http://www.google.co.il/intl/iw/about.html",
# 1461
"http://product.yesky.com/gshijie/bizhi/362/70434862.shtml",
# 1462
"http://www.retailmenot.com/",
# 1463
"http://www.ettoday.net/news/focus/%E7%B6%B2%E6%90%9C/",
# 1464
"http://finance.sina.com.cn/stock/jsy/2016-01-14/doc-ifxnqriy2875826.shtml",
# 1465
"http://www.booking.com/destinationfinder/cities/it/rome.html?dsf_source=2&",
# 1466
"http://www.stumbleupon.com/",
# 1467
"http://www.fedex.com/ky/",
# 1468
"http://www.cnet.com/videos/cnet-news/",
# 1469
"http://japanpost.jp/locate/b15071301/index.php?tu=jpbank",
# 1470
"http://shutterstock.com/pic-233671087/stock-photo-budapest-hungary-nov-illustrative-editorial-photo-of-credit-cards-with-touch-free-paypass.html",
# 1471
"https://careers.americanexpress.com/?intlink=US-Homepage-Career-NOJS",
# 1472
"http://onedio.com/",
# 1473
"http://www.google.it/",
# 1474
"http://www.snapdeal.com/products/furniture-bedroom/filters/Type_s~Double%20Beds",
# 1475
"http://ppomppu.co.kr/",
# 1476
"https://ca.godaddy.com/",
# 1477
"http://www.hroot.com/default/index.html",
# 1478
"https://twitter.com/privacy",
# 1479
"http://baidu.com/",
# 1480
"http://bbs.hupu.com/15227696.html",
# 1481
"http://amazon.com/",
# 1482
"http://www.qulishi.com/",
# 1483
"http://www.sears.com/search=tech%20toys",
# 1484
"http://maps.yandex.ru",
# 1485
"http://www.tudou.com/home/weibusi/",
# 1486
"http://redir.xuite.net/redir/xuite/www/index/log/photoBlock^http://yo.xuite.net/info/edition_detail.php?id=58",
# 1487
"http://www.indiatimes.com/aboutus/",
# 1488
"http://community.newegg.com/",
# 1489
"http://jbk.39.net/jiancha/fbys/",
# 1490
"http://travel.kompas.com/hotel-story",
# 1491
"http://www.google.hu/",
# 1492
"http://amazon.de/Canon-Digitalkamera-Megapixel-ZoomPlus-LCD-Display/dp/B00RYV9P3Q",
# 1493
"http://sports.sina.com.cn/g/championsleague/",
# 1494
"http://vimeo.com/",
# 1495
"http://mailpec.libero.it/?o=hp",
# 1496
"https://eksisozluk.com/basliklar/kanal/oyun",
# 1497
"http://ettoday.net/news/20160114/630711.htm",
# 1498
"http://www.dailymail.co.uk/video/news/video-1234977/Family-perform-amazing-festive-dance-routine-Justin-Bieber.html",
# 1499
"http://www.appying.com/",
# 1500
"http://www.huffingtonpost.com/news/world-elections-2012/",
# 1501
"http://ent.china.com/star/news/11052670/20160114/21138724.html",
# 1502
"http://detail.zol.com.cn/369/368535/param.shtml",
# 1503
"http://rutracker.org/forum/index.php",
# 1504
"http://avito.ru/rossiya/ptitsy",
# 1505
"http://jn.house.ifeng.com/column/loupankong/qiuyinong",
# 1506
"https://mail.ru",
# 1507
"http://video.sina.com.cn/m/201512310544265_65165305.html",
# 1508
"http://www.att.com",
# 1509
"http://businessinsider.com/",
# 1510
"http://www.google.it/imghp?hl=it&tab=wi",
# 1511
"https://www.facebook.com/amazonwebservices?nc1=f_so_fb",
# 1512
"http://www.livedoor.com/",
# 1513
"https://eksisozluk.com/uzay-caginda-kagida-basilmis-kitap-okuyan-tip--5014601?a=nice",
# 1514
"http://product.yesky.com/product/862/862340/price.shtml",
# 1515
"http://customer.xfinity.com/overview",
# 1516
"https://mura.goo.ne.jp/",
# 1517
"http://www.taoche.com/qida/",
# 1518
"https://www.exoclick.com/",
# 1519
"http://blogfa.com/",
# 1520
"http://www.livedoor.com/",
# 1521
"http://www.intuit.com//global.intuit.com/choose-country.jsp",
# 1522
"http://www.addthis.com/",
# 1523
"http://espn.go.com/",
# 1524
"http://q.mama.cn/topic/24117274/",
# 1525
"http://www.ebay.in/sch/Portable-Audio-Video-/15052/i.html?_udlo=&_sop=12&_from=R40%7CR40%7CR40&_nkw=&_udhi=",
# 1526
"http://www.icicibank.com/Personal-Banking/account-deposit/iwish/index.page?",
# 1527
"http://www.imdb.com/title/tt1594972?pf_rd_m=A2FGELUUNOQJNL&pf_rd_p=2381712442&pf_rd_r=1JY4J1T5VXD1VBEY3CS8&pf_rd_s=right-3&pf_rd_t=15061&pf_rd_i=homepage&ref_=hm_otw_t2",
# 1528
"http://www.weebly.com/",
# 1529
"http://web.de/",
# 1530
"http://go.com/",
# 1531
"http://user.gearbest.com/my-orders.html",
# 1532
"http://theguardian.com/",
# 1533
"https://www.airbnb.com/",
# 1534
"http://gotexas.about.com",
# 1535
"http://www.espncricinfo.com/bangladesh/content/team/25.html",
# 1536
"http://www.bbc.co.uk/programmes/p03ffzrw",
# 1537
"http://foodnetwork.com/healthy/packages/healthy-every-week.html",
# 1538
"http://www.clevelandbrowns.com/?icampaign=nflcom-footer-clublogos-CLE",
# 1539
"http://jbk.39.net/zizhen?bw=%E8%87%80%E9%83%A8",
# 1540
"http://www.milliyet.com.tr/konut/",
# 1541
"http://yandex.ru//www.yandex.ru/all",
# 1542
"https://adidas.world.tmall.com//adidas.tmall.com/p/rose6.htm?scene=taobao_shop",
# 1543
"http://www.enet.com.cn/hospital",
# 1544
"http://blog.milliyet.com.tr/ekranin-cicegi-burnunda-yenileri-/Blog/?BlogNo=519678&ref=milliyet_anasayfa",
# 1545
"http://z.xywy.com/jiahaozhuanjia.htm",
# 1546
"http://xunlei.com/",
# 1547
"https://www.kickstarter.com/",
# 1548
"http://plus.google.com/+NDTV/posts?pfrom=home-footer2015",
# 1549
"http://cctv5.cntv.cn/",
# 1550
"http://game.china.com/maoerduo/manzhan/news/11152946/20160114/21140492.html",
# 1551
"http://www.popads.net/blog",
# 1552
"http://www.bettermoneyhabits.com",
# 1553
"http://www.skype.com/en/",
# 1554
"http://en.gameforge.com/user/search",
# 1555
"http://redir.xuite.net/redir/xuite/www/index/log/photoBlock^http://photo.xuite.net/pzkw3489/19735668/24.jpg",
# 1556
"http://en.gameforge.com/home/index",
# 1557
"http://r.mail.ru/clb1406988/agent.mail.ru/?from=odnoklassniki",
# 1558
"http://www.google.cn/",
# 1559
"http://www.asos.com/asos-as-seen-on-screen/cat/pgehtml.aspx?cid=18967",
# 1560
"http://travel.detik.com/indeksfokus/1273/resolusi-traveling-2016/berita",
# 1561
"http://dictionary.reference.com/browse/dignified",
# 1562
"http://www.snapdeal.com/products/men-apparel-jeans",
# 1563
"https://aws.amazon.com/codecommit/?nc2=h_l3_dm",
# 1564
"http://www.kohls.com/catalog/bath-toys-bath-baby-gear.jsp?CN=4294719283+4294719613+4294719566&cc=toys-TN2.0-S-bathtoys",
# 1565
"https://www.groupon.com/coupons/stores/barnesandnoble",
# 1566
"http://services.amazon.co.uk/services/sponsored-products/how-it-works.html/ref=AMZfooter_UK_SP?ld=AMZfooterUKsp",
# 1567
"http://nk.39.net/a/160111/4756072.html",
# 1568
"http://product.yesky.com/product/862/862264/",
# 1569
"http://dcdv.zol.com.cn/topic/4859123.html",
# 1570
"http://car.bitauto.com/yingfeinidiqx56/",
# 1571
"http://hechi.bitauto.com/",
# 1572
"http://www.adventuresbydisney.com/?CMP=AFC-DPFY13Q1DIENT1376D",
# 1573
"http://www.kaskus.co.id/forum/671/?ref=homelanding&med=forum_categories",
# 1574
"http://www.kohls.com/catalog/womens-evening-pumps-heels-shoes.jsp?CN=4294720878+4294737256+4294719627+4294719777&cc=shoes-TN2.0-S-WomensEvening",
# 1575
"http://home.focus.cn/news/2016-01-06/10634257.html",
# 1576
"http://t.co/",
# 1577
"http://amazon.co.uk/Sony-DSCHX60-Digital-Compact-Optical/dp/B00IGL9PSS",
# 1578
"http://stackoverflow.com/questions/34801430/is-there-an-online-host-for-r-that-allows-it-to-work-continuously",
# 1579
"http://www.kinopoisk.ru/name/26537/",
# 1580
"http://www.kaskus.co.id/forum/13/?ref=homelanding&med=forum_categories",
# 1581
"http://www.buzzfeed.com/dashboard/nathanwpyle/how-empathy-takes-us-out-on-a-limb",
# 1582
"http://zt.xywy.com/rczy/",
# 1583
"http://edmonton.craigslist.ca//www.craigslist.org/about/help/",
# 1584
"http://www.tudou.com/programs/view/-KTYjmHaZME/",
# 1585
"http://www.alibaba.com/Rubber-Plastics_p80",
# 1586
"http://esf.focus.cn/search/",
# 1587
"http://www.samsung.com/ca/home/",
# 1588
"http://netflix.com/",
# 1589
"http://my.ebay.in/wishlistsearch/",
# 1590
"http://www.eastdaybar.com/",
# 1591
"http://car.bitauto.com/kuncheng/",
# 1592
"http://www.51.la/",
# 1593
"http://www.quantcast.com/p-4b4gl_1fWISuU",
# 1594
"http://bbs.lady8844.com/thread-1461595-1-1.html",
# 1595
"http://weibo.com/login.php",
# 1596
"http://fortune.goo.ne.jp/destiny/aries.html",
# 1597
"http://www.urdupoint.com/books/baab/novel/aas-paas-hai-khuda-115-desc",
# 1598
"http://amazon.it/Sony-Fotocamera-Obiettivo-Intercambiabile-Megapixel/dp/B00HH8A60C",
# 1599
"http://www.shopclues.com/wholesale/super-deals-of-wholesale/fashion-307.html",
# 1600
"http://www.shopclues.com/car-and-bike-accessories/car-accessories/car-interiors-and-comfort/car-perfumes-and-freshners.html",
# 1601
"http://videojuegos.about.com",
# 1602
"https://www.chase.com/",
# 1603
"https://www.etsy.com/c/craft-supplies-and-tools/patterns-and-tutorials/sewing-and-needlecraft/needlepoint?ref=catnav-562",
# 1604
"https://plus.google.com/u/0/+%D0%A0%D0%91%D0%9A/posts",
# 1605
"http://www.bilibili.com/",
# 1606
"http://gizmodo.com//kinja.com/katharinetrendacosta",
# 1607
"http://www.intuit.com/",
# 1608
"http://adplxmd.com/",
# 1609
"http://www.kohls.com/catalog/lc-lauren-conrad-shoes.jsp?CN=4294873305+4294719777&cc=shoes-TN2.0-S-LCLaurenConrad",
# 1610
"http://amazon.ca/FIT-20-5200-32-Pound-Dumbbell-Rack/dp/B00B4RVYPS",
# 1611
"http://www.tudou.com/home/discoveryvideo/",
# 1612
"https://www.whatsapp.com/dl/",
# 1613
"http://conservativetribune.com/powerball-jackpot-do-not-do/",
# 1614
"http://114.1688.com/newbie/course/36.htm",
# 1615
"http://club.xywy.com/doc_card/63691392",
# 1616
"https://watsons.world.tmall.com//watsons.tmall.com/category-1135919138.htm?search=y&parentCatId=1122785379&parentCatName=%C3%C0%D7%B1%B9%A4%BE%DF&catName=%BB%AF%D7%B1%C3%DE%B0%F4",
# 1617
"https://www262.americanexpress.com/business-card-application/mgm/200002-CCSG?inav=footer_refer_friend",
# 1618
"http://www.webmd.com/alzheimers/features/mind-diet-alzheimers-disease",
# 1619
"http://www.washingtonpost.com",
# 1620
"http://news.ifeng.com/a/20160114/47070310_0.shtml",
# 1621
"http://www.nike.com/th/th_th/",
# 1622
"http://www.ebay.co.uk/sch/Home-Audio-HiFi-Separates/14969/bn_1838773/i.html",
# 1623
"http://www.douyutv.com/411433",
# 1624
"http://fc2.com/",
# 1625
"http://www.mama.cn/ask/jingxuan/1365/",
# 1626
"http://jbk.39.net/zhengzhuang/syx/",
# 1627
"http://www.amazon.in/",
# 1628
"http://huabao.qzone.qq.com/",
# 1629
"https://www.whatsapp.com/",
# 1630
"http://car.bitauto.com/volvo/",
# 1631
"http://amazon.co.jp/",
# 1632
"http://www.asos.com/women/marketplace-edits/cat/pgehtml.aspx?cid=19024&via=top",
# 1633
"http://bola.liputan6.com/kategori/jadwal-live-streaming-bola",
# 1634
"http://kinogo.co/",
# 1635
"http://www.ebay.co.uk/sch/Vehicle-Parts-Accessories-/131090/i.html",
# 1636
"http://mamibox.yaolan.com/",
# 1637
"http://espn.go.com/mens-college-basketball/insider/story/_/id/14568141/keys-michigan-state-spartans-iowa-hawkeyes-matchup",
# 1638
"http://www.dailymail.co.uk/tvshowbiz/article-3398926/Jonathan-Rhys-Meyers-picture-health-shows-eccentric-style-oversized-hooded-jacket-outing-fianc-e-Mara-Lane.html",
# 1639
"http://www.yodobashi.com/ec/feature/174116_000000000000024018/",
| |
sequence per batch.
window_per_building : dict
Keys are Building instance integers.
Must have one for each building.
Values are (<start>, <end>) dates (or whatever
nilmtk.DataSet.set_window() accepts)
"""
self._set_logger(logger)
self.dataset = DataSet(filename)
self.appliances = appliances
if max_input_power is None and max_appliance_powers is not None:
self.max_input_power = np.sum(max_appliance_powers)
else:
self.max_input_power = max_input_power
self.clip_input = clip_input
self.divide_input_by_max_input_power = divide_input_by_max_input_power
if max_appliance_powers is None:
max_appliance_powers = [None] * len(appliances)
self.max_appliance_powers = OrderedDict()
for i, appliance in enumerate(appliances):
if isinstance(appliance, list):
appliance_name = appliance[0]
else:
appliance_name = appliance
self.max_appliance_powers[appliance_name] = max_appliance_powers[i]
self.dataset.set_window(*window)
if train_buildings is None:
train_buildings = [1]
if validation_buildings is None:
validation_buildings = [1]
self.output_one_appliance = output_one_appliance
self.sample_period = sample_period
self.boolean_targets = boolean_targets
self.skip_probability = skip_probability
if skip_probability_for_first_appliance is None:
self.skip_probability_for_first_appliance = skip_probability
else:
self.skip_probability_for_first_appliance = (
skip_probability_for_first_appliance)
self._tz = self.dataset.metadata['timezone']
self.include_diff = include_diff
self.include_power = include_power
self.max_diff = max_diff
self.clip_appliance_power = clip_appliance_power
if lag is None:
lag = -1 if target_is_prediction else 0
elif lag == 0 and target_is_prediction:
warn("lag is 0 and target_is_prediction==True."
" Hence output will be identical to input.")
self.lag = lag
self.target_is_prediction = target_is_prediction
self.target_is_diff = target_is_diff
self.one_target_per_seq = one_target_per_seq
self.ensure_all_appliances_represented = (
ensure_all_appliances_represented)
self.border = border
if window_per_building and window != (None, None):
raise ValueError(
"Cannot set both `window_per_building` and `window`")
self.window_per_building = window_per_building
self.logger.info("Loading training activations...")
if on_power_thresholds is None:
on_power_thresholds = [None] * len(self.appliances)
self.on_power_thresholds = on_power_thresholds
if min_on_durations is None:
min_on_durations = [0] * len(self.appliances)
self.train_activations = self._load_activations(
train_buildings, min_on_durations, min_off_durations,
on_power_thresholds)
if train_buildings == validation_buildings:
self.validation_activations = {}
# validation activations will be removed from train_activations
self.remove_used_activations = True
else:
self.logger.info("Loading validation activations...")
self.remove_used_activations = False
self.validation_activations = self._load_activations(
validation_buildings, min_on_durations, min_off_durations,
on_power_thresholds)
self.dataset.store.close()
super(RealApplianceSource, self).__init__(
seq_length=seq_length,
n_seq_per_batch=n_seq_per_batch,
n_inputs=sum([include_diff, include_power]),
n_outputs=(1 if output_one_appliance or target_is_prediction
else len(appliances)),
**kwargs
)
assert not (self.input_padding and self.random_window)
self.logger.info("Done loading activations.")
def get_labels(self):
return self.train_activations.keys()
def _load_activations(self, buildings, min_on_durations, min_off_durations,
on_power_thresholds):
activations = OrderedDict()
for building_i in buildings:
if self.window_per_building:
window = self.window_per_building[building_i]
self.logger.info(
"Setting window for building {} to (start={}, end={})"
.format(building_i, *window))
self.dataset.set_window(*window)
elec = self.dataset.buildings[building_i].elec
meters = get_meters_for_appliances(
elec, self.appliances, self.logger)
for appliance_i, meter in enumerate(meters):
appliance = self.appliances[appliance_i]
if isinstance(appliance, list):
appliance = appliance[0]
self.logger.info(
" Loading activations for {} from building {}..."
.format(appliance, building_i))
activation_series = meter.activation_series(
on_power_threshold=on_power_thresholds[appliance_i],
min_on_duration=min_on_durations[appliance_i],
min_off_duration=min_off_durations[appliance_i],
resample=True)
activations[appliance] = _preprocess_activations(
activation_series,
max_power=self.max_appliance_powers[appliance],
sample_period=self.sample_period,
clip_appliance_power=self.clip_appliance_power)
self.logger.info(
" Loaded {:d} activations."
.format(len(activation_series)))
return activations
def _gen_single_example(self, validation=False, appliances=None):
if appliances is None:
appliances = []
X = np.zeros(shape=(self.seq_length, self.n_inputs), dtype=np.float32)
y = np.zeros(shape=(self.seq_length, self.n_outputs), dtype=np.float32)
POWER_THRESHOLD = 1
if validation and self.validation_activations:
activations = self.validation_activations
else:
activations = self.train_activations
if not self.one_target_per_seq:
random_appliances = []
appliance_names = activations.keys()
while not random_appliances:
if not self.rng.binomial(
n=1, p=self.skip_probability_for_first_appliance):
appliance_i = 0
appliance = appliance_names[0]
random_appliances.append((appliance_i, appliance))
for appliance_i, appliance in enumerate(appliance_names[1:]):
if not self.rng.binomial(n=1, p=self.skip_probability):
random_appliances.append((appliance_i+1, appliance))
appliances.extend(random_appliances)
appliances = list(set(appliances)) # make unique
for appliance_i, appliance in appliances:
n_activations = len(activations[appliance])
if n_activations == 0:
continue
activation_i = self.rng.randint(0, n_activations)
if self.remove_used_activations and validation:
activation = activations[appliance].pop(activation_i)
else:
activation = activations[appliance][activation_i]
if self.output_one_appliance and appliance_i > 0:
# Allow appliance to start before the start of the target seq
# and end after the end of the target seq
latest_start_i = self.seq_length - (self.border + self.lag)
earliest_start_i = self.border - len(activation)
else:
# Try to fit the appliance into the target seq
latest_start_i = ((self.seq_length - len(activation)) -
(self.border + self.lag))
latest_start_i = max(latest_start_i, self.border)
earliest_start_i = 0
start_i = self.rng.randint(earliest_start_i, latest_start_i)
X_start_i = max(0, start_i)
activation_start_i = -min(0, start_i)
activation_end_i = (
activation_start_i + self.seq_length - X_start_i - self.lag)
target = activation.values[activation_start_i:activation_end_i]
X_end_i = X_start_i + len(target)
assert X_end_i <= self.seq_length
X[X_start_i:X_end_i, 0] += target
if (not self.target_is_prediction and
(appliance_i == 0 or not self.output_one_appliance)):
target = np.copy(target)
if self.boolean_targets:
target[target <= POWER_THRESHOLD] = 0
target[target > POWER_THRESHOLD] = 1
else:
max_appliance_power = self.max_appliance_powers[appliance]
if max_appliance_power is not None:
target /= max_appliance_power
if self.target_is_diff:
y[(X_start_i+self.lag):(X_end_i+self.lag-1),
appliance_i] = np.diff(target)
else:
y[(X_start_i+self.lag):(X_end_i+self.lag),
appliance_i] = target
if self.clip_input:
np.clip(X, 0, self.max_input_power, out=X)
fdiff = np.diff(X[:, 0]) / self.max_diff
if (self.divide_input_by_max_input_power and
self.max_input_power is not None):
X[:, 0] /= self.max_input_power
if self.target_is_prediction:
if self.target_is_diff:
data = np.concatenate([fdiff, [0]]).reshape(self.seq_length, 1)
else:
data = np.copy(X)
if self.lag > 0:
y[self.lag:, :] = data[:-self.lag, :]
elif self.lag == 0:
y = data
else:
y[:self.lag, :] = data[-self.lag:, :]
if self.include_diff:
feature_i = int(self.include_power)
X[:-1, feature_i] = fdiff
return X, y
def _appliances_for_sequence(self):
"""Returns a dict which maps from seq_i to a list of appliances which
must be included in that sequence. This is used to ensure that,
if `skip_probability` > 0 then every appliance must be represented in
at least one sequence.
"""
if not self.ensure_all_appliances_represented:
return {}
all_appliances = list(enumerate(self.get_labels()))
if self.one_target_per_seq:
return {i: [all_appliances[i % len(all_appliances)]]
for i in range(self.n_seq_per_batch)}
if self.skip_probability == 0:
return {i: [] for i in range(self.n_seq_per_batch)}
n_appliances = len(self.appliances)
n_appliances_per_seq = n_appliances // self.n_seq_per_batch
remainder = n_appliances % self.n_seq_per_batch
appliances_for_sequence = {}
for i in range(self.n_seq_per_batch):
start = n_appliances_per_seq * i
end = start + n_appliances_per_seq
if remainder:
end += 1
remainder -= 1
appliances = all_appliances[start:end]
appliances_for_sequence[i] = appliances
return appliances_for_sequence
def _gen_data(self, validation=False):
X = np.zeros(self.input_shape(), dtype=np.float32)
y = np.zeros(self.output_shape(), dtype=np.float32)
deterministic_appliances = self._appliances_for_sequence()
for i in range(self.n_seq_per_batch):
X[i, :, :], y[i, :, :] = self._gen_single_example(
validation, deterministic_appliances.get(i))
if self.remove_used_activations and validation:
for appliance in self.appliances:
if isinstance(appliance, list):
appliance = appliance[0]
self.logger.info(
"{}: {:d} train activations".format(
appliance, len(self.train_activations[appliance])))
return X, y
class NILMTKSource(Source):
def __init__(self, filename, appliances,
train_buildings, validation_buildings,
window=(None, None),
sample_period=6,
**kwargs):
super(NILMTKSource, self).__init__(
n_outputs=len(appliances),
n_inputs=1,
**kwargs)
self.window = window
self.dataset = DataSet(filename)
self.dataset.set_window(*window)
self.tz = self.dataset.metadata['timezone']
self.window = [pd.Timestamp(ts, tz=self.tz) for ts in self.window]
self.appliances = appliances
self.train_buildings = train_buildings
self.validation_buildings = validation_buildings
self.sample_period = sample_period
self._init_meter_groups()
self._init_good_sections()
def _init_meter_groups(self):
self.metergroups = {}
for building_i in self._all_buildings():
elec = self.dataset.buildings[building_i].elec
meters = get_meters_for_appliances(
elec, self.appliances, self.logger)
self.metergroups[building_i] = MeterGroup(meters)
def _all_buildings(self):
buildings = self.train_buildings + self.validation_buildings
buildings = list(set(buildings))
return buildings
def _init_good_sections(self):
self.good_sections = {}
min_duration_secs = self.sample_period * self.seq_length
min_duration = timedelta(seconds=min_duration_secs)
for building_i in self._all_buildings():
self.logger.info(
"init good sections for building {}".format(building_i))
mains = self.dataset.buildings[building_i].elec.mains()
self.good_sections[building_i] = [
section for section in mains.good_sections()
if section.timedelta > min_duration
]
def _gen_single_example(self, validation=False):
buildings = (self.validation_buildings if validation
else self.train_buildings)
building_i = self.rng.choice(buildings)
elec = self.dataset.buildings[building_i].elec
section = self.rng.choice(self.good_sections[building_i])
section_duration = section.timedelta.total_seconds()
max_duration = self.sample_period * self.seq_length
latest_start = section_duration - max_duration
relative_start = self.rng.randint(0, latest_start)
start = section.start + timedelta(seconds=relative_start)
end = start + timedelta(seconds=max_duration)
sections = [TimeFrame(start, end)]
mains_power = elec.mains().power_series(
sample_period=self.sample_period, sections=sections).next()
appliances_power = self.metergroups[building_i].dataframe_of_meters(
sample_period=self.sample_period, sections=sections)
def truncate(data):
n = len(data)
assert n >= self.seq_length
if n > self.seq_length:
data = data[:self.seq_length]
return data
mains_power = truncate(mains_power)
appliances_power = truncate(appliances_power)
appliances_power.columns = elec.get_labels(appliances_power.columns)
# time of day
index = mains_power.index.tz_localize(None)
secs_into_day = (index.astype(int) / 1E9) % SECS_PER_DAY
time_of_day = ((secs_into_day / SECS_PER_DAY) * 2.) - 1.
return appliances_power, mains_power, time_of_day
def timestamp_to_int(ts):
ts = pd.Timestamp(ts)
return ts.asm8.astype('datetime64[s]').astype(int)
class NILMTKSourceOld(Source):
def __init__(self, filename, appliances, building=1):
"""
Parameters
----------
filename : str
appliances : list of strings
The first one is the target appliance
building : int
"""
super(NILMTKSource, self).__init__(
seq_length=14400,
n_seq_per_batch=5,
n_inputs=1000,
n_outputs=1)
self.sample_period = 6
self.min_power = 20
self.max_power = 200
self.dataset = DataSet(filename)
self.appliances = appliances
self._tz = self.dataset.metadata['timezone']
self.metergroup = self.dataset.buildings[building].elec
def _get_data_for_single_day(self, start):
start = pd.Timestamp(start).date()
end = start + timedelta(days=1)
timeframe = TimeFrame(start, end, tz=self._tz)
load_kwargs = dict(sample_period=self.sample_period,
sections=[timeframe])
# Load output (target) data
app = self.metergroup[self.appliances[0]]
y = app.power_series_all_data(**load_kwargs)
if y is None or y.max() < self.min_power:
return None, None
# Load input (aggregate) data
app = y + self.metergroup[self.appliances[1]]
X = app.power_series_all_data(**load_kwargs)
for appliance in self.appliances[2:]:
app = self.metergroup[appliance]
X += app.power_series_all_data(**load_kwargs)
freq = "{:d}S".format(self.sample_period)
index = pd.date_range(start, end, freq=freq, tz=self._tz)
def preprocess(data):
data = data.fillna(0)
data = data.clip(upper=self.max_power)
data[data < self.min_power] = 0
data = data.reindex(index, fill_value=0)
data /= self.max_power
return data
def index_as_minus_one_to_plus_one(data):
index | |
True])
assert result.equals(expected)
def test_fill_null():
arr = pa.array([1, 2, None, 4], type=pa.int8())
fill_value = pa.array([5], type=pa.int8())
with pytest.raises(pa.ArrowInvalid,
match="Array arguments must all be the same length"):
arr.fill_null(fill_value)
arr = pa.array([None, None, None, None], type=pa.null())
fill_value = pa.scalar(None, type=pa.null())
result = arr.fill_null(fill_value)
expected = pa.array([None, None, None, None])
assert result.equals(expected)
arr = pa.array(['a', 'bb', None])
result = arr.fill_null('ccc')
expected = pa.array(['a', 'bb', 'ccc'])
assert result.equals(expected)
arr = pa.array([b'a', b'bb', None], type=pa.large_binary())
result = arr.fill_null('ccc')
expected = pa.array([b'a', b'bb', b'ccc'], type=pa.large_binary())
assert result.equals(expected)
arr = pa.array(['a', 'bb', None])
result = arr.fill_null(None)
expected = pa.array(['a', 'bb', None])
assert result.equals(expected)
@pytest.mark.parametrize('arrow_type', numerical_arrow_types)
def test_fill_null_array(arrow_type):
arr = pa.array([1, 2, None, 4], type=arrow_type)
fill_value = pa.scalar(5, type=arrow_type)
result = arr.fill_null(fill_value)
expected = pa.array([1, 2, 5, 4], type=arrow_type)
assert result.equals(expected)
# Implicit conversions
result = arr.fill_null(5)
assert result.equals(expected)
# ARROW-9451: Unsigned integers allow this for some reason
if not pa.types.is_unsigned_integer(arr.type):
with pytest.raises((ValueError, TypeError)):
arr.fill_null('5')
result = arr.fill_null(pa.scalar(5, type='int8'))
assert result.equals(expected)
@pytest.mark.parametrize('arrow_type', numerical_arrow_types)
def test_fill_null_chunked_array(arrow_type):
fill_value = pa.scalar(5, type=arrow_type)
arr = pa.chunked_array([pa.array([None, 2, 3, 4], type=arrow_type)])
result = arr.fill_null(fill_value)
expected = pa.chunked_array([pa.array([5, 2, 3, 4], type=arrow_type)])
assert result.equals(expected)
arr = pa.chunked_array([
pa.array([1, 2], type=arrow_type),
pa.array([], type=arrow_type),
pa.array([None, 4], type=arrow_type)
])
expected = pa.chunked_array([
pa.array([1, 2], type=arrow_type),
pa.array([], type=arrow_type),
pa.array([5, 4], type=arrow_type)
])
result = arr.fill_null(fill_value)
assert result.equals(expected)
# Implicit conversions
result = arr.fill_null(5)
assert result.equals(expected)
result = arr.fill_null(pa.scalar(5, type='int8'))
assert result.equals(expected)
def test_logical():
a = pa.array([True, False, False, None])
b = pa.array([True, True, False, True])
assert pc.and_(a, b) == pa.array([True, False, False, None])
assert pc.and_kleene(a, b) == pa.array([True, False, False, None])
assert pc.or_(a, b) == pa.array([True, True, False, None])
assert pc.or_kleene(a, b) == pa.array([True, True, False, True])
assert pc.xor(a, b) == pa.array([False, True, False, None])
assert pc.invert(a) == pa.array([False, True, True, None])
def test_cast():
arr = pa.array([2 ** 63 - 1], type='int64')
with pytest.raises(pa.ArrowInvalid):
pc.cast(arr, 'int32')
assert pc.cast(arr, 'int32', safe=False) == pa.array([-1], type='int32')
arr = pa.array([datetime(2010, 1, 1), datetime(2015, 1, 1)])
expected = pa.array([1262304000000, 1420070400000], type='timestamp[ms]')
assert pc.cast(arr, 'timestamp[ms]') == expected
arr = pa.array([[1, 2], [3, 4, 5]], type=pa.large_list(pa.int8()))
expected = pa.array([["1", "2"], ["3", "4", "5"]],
type=pa.list_(pa.utf8()))
assert pc.cast(arr, expected.type) == expected
def test_strptime():
arr = pa.array(["5/1/2020", None, "12/13/1900"])
got = pc.strptime(arr, format='%m/%d/%Y', unit='s')
expected = pa.array([datetime(2020, 5, 1), None, datetime(1900, 12, 13)],
type=pa.timestamp('s'))
assert got == expected
# Positional format
assert pc.strptime(arr, '%m/%d/%Y', unit='s') == got
# TODO: We should test on windows once ARROW-13168 is resolved.
@pytest.mark.pandas
@pytest.mark.skipif(sys.platform == 'win32',
reason="Timezone database is not available on Windows yet")
def test_strftime():
from pyarrow.vendored.version import Version
def _fix_timestamp(s):
if Version(pd.__version__) < Version("1.0.0"):
return s.to_series().replace("NaT", pd.NaT)
else:
return s
times = ["2018-03-10 09:00", "2038-01-31 12:23", None]
timezones = ["CET", "UTC", "Europe/Ljubljana"]
formats = ["%a", "%A", "%w", "%d", "%b", "%B", "%m", "%y", "%Y", "%H",
"%I", "%p", "%M", "%z", "%Z", "%j", "%U", "%W", "%c", "%x",
"%X", "%%", "%G", "%V", "%u"]
for timezone in timezones:
ts = pd.to_datetime(times).tz_localize(timezone)
for unit in ["s", "ms", "us", "ns"]:
tsa = pa.array(ts, type=pa.timestamp(unit, timezone))
for fmt in formats:
options = pc.StrftimeOptions(fmt)
result = pc.strftime(tsa, options=options)
expected = pa.array(_fix_timestamp(ts.strftime(fmt)))
assert result.equals(expected)
fmt = "%Y-%m-%dT%H:%M:%S"
# Default format
tsa = pa.array(ts, type=pa.timestamp("s", timezone))
result = pc.strftime(tsa, options=pc.StrftimeOptions())
expected = pa.array(_fix_timestamp(ts.strftime(fmt)))
assert result.equals(expected)
# Default format plus timezone
tsa = pa.array(ts, type=pa.timestamp("s", timezone))
result = pc.strftime(tsa, options=pc.StrftimeOptions(fmt + "%Z"))
expected = pa.array(_fix_timestamp(ts.strftime(fmt + "%Z")))
assert result.equals(expected)
# Pandas %S is equivalent to %S in arrow for unit="s"
tsa = pa.array(ts, type=pa.timestamp("s", timezone))
options = pc.StrftimeOptions("%S")
result = pc.strftime(tsa, options=options)
expected = pa.array(_fix_timestamp(ts.strftime("%S")))
assert result.equals(expected)
# Pandas %S.%f is equivalent to %S in arrow for unit="us"
tsa = pa.array(ts, type=pa.timestamp("us", timezone))
options = pc.StrftimeOptions("%S")
result = pc.strftime(tsa, options=options)
expected = pa.array(_fix_timestamp(ts.strftime("%S.%f")))
assert result.equals(expected)
# Test setting locale
tsa = pa.array(ts, type=pa.timestamp("s", timezone))
options = pc.StrftimeOptions(fmt, locale="C")
result = pc.strftime(tsa, options=options)
expected = pa.array(_fix_timestamp(ts.strftime(fmt)))
assert result.equals(expected)
# Test timestamps without timezone
fmt = "%Y-%m-%dT%H:%M:%S"
ts = pd.to_datetime(times)
tsa = pa.array(ts, type=pa.timestamp("s"))
result = pc.strftime(tsa, options=pc.StrftimeOptions(fmt))
expected = pa.array(_fix_timestamp(ts.strftime(fmt)))
# Positional format
assert pc.strftime(tsa, fmt) == result
assert result.equals(expected)
with pytest.raises(pa.ArrowInvalid,
match="Timezone not present, cannot convert to string"):
pc.strftime(tsa, options=pc.StrftimeOptions(fmt + "%Z"))
with pytest.raises(pa.ArrowInvalid,
match="Timezone not present, cannot convert to string"):
pc.strftime(tsa, options=pc.StrftimeOptions(fmt + "%z"))
def _check_datetime_components(timestamps, timezone=None):
from pyarrow.vendored.version import Version
ts = pd.to_datetime(timestamps).tz_localize(
"UTC").tz_convert(timezone).to_series()
tsa = pa.array(ts, pa.timestamp("ns", tz=timezone))
subseconds = ((ts.dt.microsecond * 10 ** 3 +
ts.dt.nanosecond) * 10 ** -9).round(9)
iso_calendar_fields = [
pa.field('iso_year', pa.int64()),
pa.field('iso_week', pa.int64()),
pa.field('iso_day_of_week', pa.int64())
]
if Version(pd.__version__) < Version("1.1.0"):
# https://github.com/pandas-dev/pandas/issues/33206
iso_year = ts.map(lambda x: x.isocalendar()[0]).astype("int64")
iso_week = ts.map(lambda x: x.isocalendar()[1]).astype("int64")
iso_day = ts.map(lambda x: x.isocalendar()[2]).astype("int64")
else:
# Casting is required because pandas isocalendar returns int32
# while arrow isocalendar returns int64.
iso_year = ts.dt.isocalendar()["year"].astype("int64")
iso_week = ts.dt.isocalendar()["week"].astype("int64")
iso_day = ts.dt.isocalendar()["day"].astype("int64")
iso_calendar = pa.StructArray.from_arrays(
[iso_year, iso_week, iso_day],
fields=iso_calendar_fields)
assert pc.year(tsa).equals(pa.array(ts.dt.year))
assert pc.month(tsa).equals(pa.array(ts.dt.month))
assert pc.day(tsa).equals(pa.array(ts.dt.day))
assert pc.day_of_week(tsa).equals(pa.array(ts.dt.dayofweek))
assert pc.day_of_year(tsa).equals(pa.array(ts.dt.dayofyear))
assert pc.iso_year(tsa).equals(pa.array(iso_year))
assert pc.iso_week(tsa).equals(pa.array(iso_week))
assert pc.iso_calendar(tsa).equals(iso_calendar)
assert pc.quarter(tsa).equals(pa.array(ts.dt.quarter))
assert pc.hour(tsa).equals(pa.array(ts.dt.hour))
assert pc.minute(tsa).equals(pa.array(ts.dt.minute))
assert pc.second(tsa).equals(pa.array(ts.dt.second.values))
assert pc.millisecond(tsa).equals(pa.array(ts.dt.microsecond // 10 ** 3))
assert pc.microsecond(tsa).equals(pa.array(ts.dt.microsecond % 10 ** 3))
assert pc.nanosecond(tsa).equals(pa.array(ts.dt.nanosecond))
assert pc.subsecond(tsa).equals(pa.array(subseconds))
day_of_week_options = pc.DayOfWeekOptions(
count_from_zero=False, week_start=1)
assert pc.day_of_week(tsa, options=day_of_week_options).equals(
pa.array(ts.dt.dayofweek + 1))
week_options = pc.WeekOptions(
week_starts_monday=True, count_from_zero=False,
first_week_is_fully_in_year=False)
assert pc.week(tsa, options=week_options).equals(pa.array(iso_week))
@pytest.mark.pandas
def test_extract_datetime_components():
from pyarrow.vendored.version import Version
timestamps = ["1970-01-01T00:00:59.123456789",
"2000-02-29T23:23:23.999999999",
"2033-05-18T03:33:20.000000000",
"2020-01-01T01:05:05.001",
"2019-12-31T02:10:10.002",
"2019-12-30T03:15:15.003",
"2009-12-31T04:20:20.004132",
"2010-01-01T05:25:25.005321",
"2010-01-03T06:30:30.006163",
"2010-01-04T07:35:35",
"2006-01-01T08:40:40",
"2005-12-31T09:45:45",
"2008-12-28",
"2008-12-29",
"2012-01-01 01:02:03"]
timezones = ["UTC", "US/Central", "Asia/Kolkata",
"Etc/GMT-4", "Etc/GMT+4", "Australia/Broken_Hill"]
# Test timezone naive timestamp array
_check_datetime_components(timestamps)
# Test timezone aware timestamp array
if sys.platform == 'win32':
# TODO: We should test on windows once ARROW-13168 is resolved.
pytest.skip('Timezone database is not available on Windows yet')
elif Version(pd.__version__) < Version('1.0.0'):
pytest.skip('Pandas < 1.0 extracts time components incorrectly.')
else:
for timezone in timezones:
_check_datetime_components(timestamps, timezone)
# TODO: We should test on windows once ARROW-13168 is resolved.
@pytest.mark.pandas
@pytest.mark.skipif(sys.platform == 'win32',
reason="Timezone database is not available on Windows yet")
def test_assume_timezone():
from pyarrow.vendored.version import Version
ts_type = pa.timestamp("ns")
timestamps = pd.to_datetime(["1970-01-01T00:00:59.123456789",
"2000-02-29T23:23:23.999999999",
"2033-05-18T03:33:20.000000000",
"2020-01-01T01:05:05.001",
"2019-12-31T02:10:10.002",
"2019-12-30T03:15:15.003",
"2009-12-31T04:20:20.004132",
"2010-01-01T05:25:25.005321",
"2010-01-03T06:30:30.006163",
"2010-01-04T07:35:35",
"2006-01-01T08:40:40",
"2005-12-31T09:45:45",
"2008-12-28",
"2008-12-29",
"2012-01-01 01:02:03"])
nonexistent = pd.to_datetime(["2015-03-29 02:30:00",
"2015-03-29 03:30:00"])
ambiguous = pd.to_datetime(["2018-10-28 01:20:00",
"2018-10-28 02:36:00",
"2018-10-28 03:46:00"])
ambiguous_array = pa.array(ambiguous, type=ts_type)
nonexistent_array = pa.array(nonexistent, type=ts_type)
for timezone in ["UTC", "US/Central", "Asia/Kolkata"]:
options = pc.AssumeTimezoneOptions(timezone)
ta = pa.array(timestamps, type=ts_type)
expected = timestamps.tz_localize(timezone)
result = pc.assume_timezone(ta, options=options)
assert result.equals(pa.array(expected))
result = pc.assume_timezone(ta, timezone) # Positional option
assert result.equals(pa.array(expected))
ta_zoned = pa.array(timestamps, type=pa.timestamp("ns", timezone))
with pytest.raises(pa.ArrowInvalid, match="already have a timezone:"):
pc.assume_timezone(ta_zoned, options=options)
invalid_options = pc.AssumeTimezoneOptions("Europe/Brusselsss")
with pytest.raises(ValueError, match="not found in timezone database"):
pc.assume_timezone(ta, options=invalid_options)
timezone = "Europe/Brussels"
# nonexistent parameter was introduced in Pandas 0.24.0
if Version(pd.__version__) >= Version("0.24.0"):
options_nonexistent_raise = pc.AssumeTimezoneOptions(timezone)
options_nonexistent_earliest = pc.AssumeTimezoneOptions(
timezone, ambiguous="raise", nonexistent="earliest")
options_nonexistent_latest = pc.AssumeTimezoneOptions(
timezone, ambiguous="raise", nonexistent="latest")
with pytest.raises(ValueError,
match="Timestamp doesn't exist in "
f"timezone '{timezone}'"):
pc.assume_timezone(nonexistent_array,
options=options_nonexistent_raise)
expected = pa.array(nonexistent.tz_localize(
timezone, nonexistent="shift_forward"))
result = pc.assume_timezone(
nonexistent_array, options=options_nonexistent_latest)
expected.equals(result)
expected = pa.array(nonexistent.tz_localize(
timezone, nonexistent="shift_backward"))
result = pc.assume_timezone(
nonexistent_array, options=options_nonexistent_earliest)
expected.equals(result)
options_ambiguous_raise = pc.AssumeTimezoneOptions(timezone)
options_ambiguous_latest = pc.AssumeTimezoneOptions(
timezone, ambiguous="latest", nonexistent="raise")
options_ambiguous_earliest = pc.AssumeTimezoneOptions(
timezone, ambiguous="earliest", nonexistent="raise")
with pytest.raises(ValueError,
match="Timestamp is ambiguous in "
f"timezone '{timezone}'"):
pc.assume_timezone(ambiguous_array, options=options_ambiguous_raise)
expected = ambiguous.tz_localize(timezone, ambiguous=[True, True, True])
result = pc.assume_timezone(
ambiguous_array, options=options_ambiguous_earliest)
result.equals(pa.array(expected))
expected = ambiguous.tz_localize(timezone, ambiguous=[False, False, False])
result = pc.assume_timezone(
ambiguous_array, options=options_ambiguous_latest)
result.equals(pa.array(expected))
def test_count():
arr = pa.array([1, 2, 3, None, None])
assert pc.count(arr).as_py() == 3
assert pc.count(arr, mode='only_valid').as_py() == 3
assert pc.count(arr, mode='only_null').as_py() == 2
assert pc.count(arr, mode='all').as_py() == 5
assert pc.count(arr, 'all').as_py() == 5
with pytest.raises(ValueError,
match='"something else" is not a valid count mode'):
pc.count(arr, 'something else')
def test_index():
arr = pa.array([0, 1, None, 3, 4], type=pa.int64())
assert pc.index(arr, pa.scalar(0)).as_py() == 0
assert pc.index(arr, pa.scalar(2, type=pa.int8())).as_py() == -1
assert pc.index(arr, 4).as_py() == 4
assert arr.index(3, start=2).as_py() == 3
assert arr.index(None).as_py() == -1
arr = pa.chunked_array([[1, 2], [1, 3]], type=pa.int64())
assert arr.index(1).as_py() == 0
assert arr.index(1, start=2).as_py() == 2
assert arr.index(1, start=1, end=2).as_py() == -1
def check_partition_nth(data, indices, pivot, null_placement):
indices = indices.to_pylist()
assert len(indices) == len(data)
assert sorted(indices) == list(range(len(data)))
until_pivot = [data[indices[i]] for i in range(pivot)]
after_pivot = [data[indices[i]] for i in range(pivot, len(data))]
p = data[indices[pivot]]
if p is None:
if null_placement == "at_start":
assert all(v is None | |
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from django.utils.translation import gettext as _
from django.core.exceptions import ObjectDoesNotExist
from sales.models import Sales, SalesDetails
# from purchases.models import PurchasesProductsDetails
from datetime import datetime, timedelta
from customers.models import Customers
from users.models import Users
#from stores.models import Stores
from shops.models import Shops
from products.models import Products
from brands.models import Brands
import dateutil.parser
import json
# https://www.chartjs.org/samples/latest/scales/time/financial.html
# view-source:https://www.chartjs.org/samples/latest/scales/time/financial.html
dummy=_('Starting and ending dates are wrong')
dummy=_('Starting and ending times are wrong')
#dummy=_('Please select at least one store')
dummy=_('Please select at least one shop')
dummy=_('Please select at least one product')
dummy=_('Please select at least one brand')
dummy=_('Please select at least one customer')
dummy=_('Please select at least one cashier')
def __sales_of_the_day__(request, itm_menu):
try:
today=datetime.today()
sales=Sales.objects.filter(sold_when=today,dropped=False).order_by('sold_at')
if len(sales) < 1:
msg=_('There are not any sale today')
html_='<p class="label label-default">'
html_+=msg + '</p> '
html='$("#secSalesOfTheDay").html(\''+html_+'\');'
html+='$("#secSalesOfTheDay").parent().css("height", "375.6px");'
html+='$("#secSalesOfTheDay #sales_of_the_day").remove();'
# html+='$("#menuSalesOfTheDay").hide();'
return html
my_users=[]
user=Users.objects.get(pk=request.user)
users=Users.objects.filter(created_by_user=user)
my_users.extend(users)
my_users.append(user)
shops=Shops.objects.filter(dropped=False,created_by_user__in=my_users)
products=Products.objects.filter(dropped=False,created_by_user__in=my_users)
brands=Brands.objects.filter(dropped=False,created_by_user__in=my_users)
sales_id_array=[]
sales_array=[]
details='<div class="table-responsive">'
details+='<table class="table table-bordered table-striped table-hover dataTable js-exportable"><thead><tr><th>'
details+=_('Sale ID')
details+='</th><th>'
details+=_('Saling when')
details+='</th><th>'
details+=_('Saling at')
details+='</th><th>'
details+=_('Options')
details+='</th></tr></thead><tbody>'
for sale in sales:
if sale.id not in sales_id_array:
sds=SalesDetails.objects.filter(sale=sale)
for sd in sds:
ppd=sd.product
store=ppd.in_store
# pd has data about product and brand
pd=ppd.purchase_detail
shop=store.shop
if shop in shops and \
pd.product in products and \
pd.brand in brands and \
sale.id not in sales_id_array:
sales_array.append(sale)
sales_id_array.append(sale.id)
details+='<tr><td>'
details+=sale.identifier+'</td><td>'
details+=sale.sold_when_fmt_mx+'</td><td>'
details+=str(sale.sold_at)+'</td><td>'
details+='<a href="#" data-placement="bottom" '
details+='data-toggle="tooltip" '
details+='title="' + _('Details') + '" '
details+='data-original-title="' + _('Details') + '" '
details+='onclick="showDetailsSalesAnalytics(' + str(sale.id)
details+=', itm_menu);'
details+=' return false;">'
# details+='onclick="alert(tst);">'
details+='<i class="material-icons">zoom_in</i>'
details+='</a></td></tr>'
details += '</table></div>'
data='var itm_menu = "' + itm_menu + '"; '
data+='var module = "sales"; '
data='var data = ['
for sale in sales:
data+='{t: new Date('
data+=str(today.year)+','+str(today.month)+','+str(today.day)
data+=','+str(sale.sold_at.hour)+','
data+=str(sale.sold_at.minute)+','
data+=str(sale.sold_at.second)
data+='), y: ' + str(sale.number_of_sold_products) + '},'
data=data[:len(data)-1]
data+='];'
except ObjectDoesNotExist:
msg=_('There are not any sale today')
html='show_msg_with_toastr("warning", "' + msg + '");'
return html
ds_lbl=_('Sold products')
html = data
html += 'var dateFormat = "YYYY MMMM DD";'
html += 'var ctx = document.getElementById("sales_of_the_day").getContext("2d");'
html += 'var color = Chart.helpers.color;'
html += 'var cfg = {'
html += 'type: "bar",'
html += 'data: {'
html += 'datasets: [{'
html += 'label: "' + ds_lbl + '",'
html += 'backgroundColor: color(window.chartColors.blue).alpha(0.5).rgbString(),'
html += 'borderColor: window.chartColors.blue,'
html += 'data: data,'
html += 'type: "line",'
html += 'pointRadius: 3,'
html += 'fill: false,'
html += 'lineTension: 0,'
html += 'borderWidth: 2'
html += '}]'
html += '},'
html += 'options: {'
html += 'scales: {'
html += 'xAxes: [{'
html += 'type: "time",'
html += 'distribution: "series",'
html += 'ticks: {'
html += 'source: "data",'
html += 'autoSkip: true'
html += '},'
html += 'time: {'
html += 'unit: "minute"'
html += '}'
html += '}],'
html += 'yAxes: [{'
html += 'scaleLabel: {'
html += 'display: true,'
html += 'labelString: "' + _('Sold products amount') + '"'
html += '}'
html += '}]'
html += '},'
html += 'tooltips: {'
html += 'intersect: false,'
html += 'mode: "index",'
html += 'callbacks: {'
html += 'label: function(tooltipItem, myData) {'
html += 'var label = myData.datasets[tooltipItem.datasetIndex].label || "";'
html += 'if (label) {'
html += 'label += ": ";'
html += '}'
html += 'label += parseInt(tooltipItem.value);'
html += 'return label;'
html += '}'
html += '}'
html += '}'
html += '}'
html += '};'
html += 'var chart = new Chart(ctx, cfg);'
html += '$("#secSalesOfTheDay #secDetails").html(\''
html += details
html += '\');'
return html
def index(request):
context={}
if request.method == 'GET':
itm_menu=request.GET.get('itm_menu', 'lnk1')
form = {
'title': _('Sales analytics')
}
context['form']=form
context['itm_menu']=itm_menu
context['sales_of_the_day_chart']=__sales_of_the_day__(request, itm_menu)
return render(request,'analytics/sales/index.html',context=context)
context['app_version']=_('Free version')
context['itm_menu']='lnkHome'
return render(request,'dashboard/index.html',context=context)
def by_range(request):
if request.method=='POST':
starting_when=request.POST.get('starting_when', None)
starting_when=starting_when[6:]+'-'+starting_when[3:5]+'-'+starting_when[0:2]
starting_at=request.POST.get('starting_at', None)
ending_when=request.POST.get('ending_when', None)
ending_when=ending_when[6:]+'-'+ending_when[3:5]+'-'+ending_when[0:2]
ending_at=request.POST.get('ending_at', None)
itm_menu=request.POST.get('itm_menu', 'lnk1')
shops=request.POST.get('shops', None)
shops=json.loads(shops)
shops=Shops.objects.filter(dropped=False, pk__in=shops)
products=request.POST.get('products', None)
products=json.loads(products)
products=Products.objects.filter(dropped=False, pk__in=products)
brands=request.POST.get('brands', None)
brands=json.loads(brands)
brands=Brands.objects.filter(dropped=False, pk__in=brands)
customers=request.POST.get('customers', None)
customers=json.loads(customers)
customers=Customers.objects.filter(dropped=False, pk__in=customers)
users=request.POST.get('users', None)
users=json.loads(users)
users=Users.objects.filter(dropped=False, pk__in=users)
try:
# sales=Sales.objects.filter(sold_when__range=(starting_when, ending_when), sold_at__range=(starting_at, ending_at), dropped=False).order_by('sold_at')
starts=dateutil.parser.parse(starting_when+' '+starting_at)
ends=dateutil.parser.parse(ending_when+' '+ending_at)
sales=Sales.objects.filter(sold_date__range=(starts, ends), dropped=False,created_by_user__in=users,customer__in=customers).order_by('sold_date')
#sales=Sales.objects.filter(sold_when__range=(starting_when, ending_when), dropped=False)
# sales=sales.filter(sold_at__range=(starting_at, ending_at)).order_by('sold_at')
# print('*********sales******')
# print(sales)
if len(sales) < 1:
msg=_('There are not any sale matching with your query options')
html_='<p class="label label-default">'
html_+=msg + '</p> '
html='<script type="text/javascript">$("#secSalesByRange #msg").html(\''+html_+'\');'
# html+='$("#menuSalesByRange").hide();'
html+='$("#secSalesByRange #sales_by_range").hide();'
html+='$("#secSalesByRange #secDetails").empty();'
html+='</script>'
return HttpResponse(html, 'text/html; charset=utf-8')
# return html
sales_id_array=[]
sales_array=[]
details='<div class="table-responsive">'
details+='<table class="table table-bordered table-striped table-hover dataTable js-exportable"><thead><tr><th>'
details+=_('Sale ID')
details+='</th><th>'
details+=_('Saling when')
details+='</th><th>'
details+=_('Saling at')
details+='</th><th>'
details+=_('Options')
details+='</th></tr></thead><tbody>'
for sale in sales:
if sale.id not in sales_id_array:
sds=SalesDetails.objects.filter(sale=sale)
for sd in sds:
ppd=sd.product
store=ppd.in_store
# pd has data about product and brand
pd=ppd.purchase_detail
shop=store.shop
if shop in shops and \
pd.product in products and \
pd.brand in brands and \
sale.id not in sales_id_array:
sales_array.append(sale)
sales_id_array.append(sale.id)
details+='<tr><td>'
details+=sale.identifier+'</td><td>'
details+=sale.sold_when_fmt_mx+'</td><td>'
details+=str(sale.sold_at)+'</td><td>'
details+='<a href="#" data-placement="bottom" '
details+='data-toggle="tooltip" '
details+='title="' + _('Details') + '" '
details+='data-original-title="' + _('Details') + '" '
details+='onclick="showDetails(' + str(sale.id)
details+=', module, false, false, false, false, itm_menu);'
details+=' return false;">'
# details+='onclick="alert(tst);">'
details+='<i class="material-icons">zoom_in</i>'
details+='</a></td></tr>'
if len(sales_array) < 1:
msg=_('There are not any sale matching with your query options')
html_='<p class="label label-default">'
html_+=msg + '</p> '
html='<script type="text/javascript">$("#secSalesByRange #msg").html(\''+html_+'\');'
# html+='$("#menuSalesByRange").hide();'
html+='$("#secSalesByRange #sales_by_range").hide();'
html+='$("#secSalesByRange #secDetails").empty();'
html+='</script>'
return HttpResponse(html, 'text/html; charset=utf-8')
details += '</table></div>'
data='<script type="text/javascript"> '
data+='var itm_menu = "' + itm_menu + '"; '
data+='var module = "sales"; '
data+='var data = ['
#for sale in sales:
for sale in sales_array:
sold_when=sale.sold_when
sold_at=sale.sold_at
data+='{t: new Date('
data+=str(sold_when.year)+','+str(sold_when.month)+','+str(sold_when.day)
data+=','+str(sold_at.hour)+','
data+=str(sold_at.minute)+','
data+=str(sold_at.second)
data+='), y: ' + str(sale.number_of_sold_products) + '},'
data=data[:len(data)-1]
data+='];'
except ObjectDoesNotExist:
msg=_('There are not any sale matching with your query options')
html='<script type="text/javascript"> '
html+='show_msg_with_toastr("warning", "' + msg + '");'
html+='</script>'
return HttpResponse(html, 'text/html; charset=utf-8')
# return html
ds_lbl=_('Sold products')
html = data
html += 'var dateFormat = "YYYY MMMM DD";'
html += 'var ctx = document.getElementById("sales_by_range").getContext("2d");'
html += 'var color = Chart.helpers.color;'
html += 'var cfg = {'
html += 'type: "bar",'
html += 'data: {'
html += 'datasets: [{'
html += 'label: "' + ds_lbl + '",'
html += 'backgroundColor: color(window.chartColors.blue).alpha(0.5).rgbString(),'
html += 'borderColor: window.chartColors.blue,'
html += 'data: data,'
html += 'type: "line",'
html += 'pointRadius: 3,'
html += 'fill: false,'
html += 'lineTension: 0,'
html += 'borderWidth: 2'
html += '}]'
html += '},'
html += 'options: {'
html += 'scales: {'
html += 'xAxes: [{'
html += 'type: "time",'
html += 'distribution: "series",'
html += 'ticks: {'
html += 'source: "data",'
html += 'autoSkip: true'
html += '},'
html += 'time: {'
html += 'unit: "minute"'
html += '}'
html += '}],'
html += 'yAxes: [{'
html += 'scaleLabel: {'
html += 'display: true,'
html += 'labelString: "' + _('Sold products amount') + '"'
html += '}'
html += '}]'
html += '},'
html += 'tooltips: {'
html += 'intersect: false,'
html += 'mode: "index",'
html += 'callbacks: {'
html += 'label: function(tooltipItem, myData) {'
html += 'var label = myData.datasets[tooltipItem.datasetIndex].label || "";'
html += 'if (label) {'
html += 'label += ": ";'
html += '}'
html += 'label += parseInt(tooltipItem.value);'
html += 'return label;'
html += '}'
html += '}'
html += '}'
html += '}'
html += '};'
html += 'var chart = new Chart(ctx, cfg);'
html += '$("#secSalesByRange #sales_by_range").show();'
html += '$("#secSalesByRange #msg").empty();'
html += '$("#secSalesByRange #secDetails").html(\''
html += details
html += '\');'
# html += '$("#menuSalesByRange").show();'
html+='</script>'
html+='<script src="/static/js/dashboard/search-results/after-load-html.js"></script>'
# return html
return HttpResponse(html, 'text/html; charset=utf-8')
msg = _('You do not have permission to perform this request')
return JsonResponse({'status': 'error', 'msg': msg})
def by_customer(request):
if request.method=='POST':
customer=request.POST.get('customer_obj', None)
shops=request.POST.get('shops', None)
shops=json.loads(shops)
shops=Shops.objects.filter(dropped=False, pk__in=shops)
products=request.POST.get('products', None)
products=json.loads(products)
products=Products.objects.filter(dropped=False, pk__in=products)
brands=request.POST.get('brands', None)
brands=json.loads(brands)
brands=Brands.objects.filter(dropped=False, pk__in=brands)
users=request.POST.get('users', None)
users=json.loads(users)
users=Users.objects.filter(dropped=False, pk__in=users)
itm_menu=request.POST.get('itm_menu', 'lnk1')
try:
customer=Customers.objects.get(pk=customer)
except ObjectDoesNotExist:
msg=_('The specified customer does not exists')
html='<script type="text/javascript"> '
html+='show_msg_with_toastr("error", "' + msg + '");'
html+='</script>'
return HttpResponse(html, 'text/html; charset=utf-8')
try:
sales=Sales.objects.filter(customer=customer, dropped=False, created_by_user__in=users).order_by('sold_date')
if len(sales) < 1:
msg=_('There are not any sale matching with your query options')
html_='<p class="label label-default">'
html_+=msg + '</p> '
html='<script type="text/javascript">$("#secSalesByCustomers #msg").html(\''+html_+'\');'
# html+='$("#menuSalesByCustomers").hide();'
html+='$("#secSalesByCustomers #sales_by_customers").hide();'
html+='$("#secSalesByCustomers #secDetails").empty();'
html+='</script>'
return HttpResponse(html, 'text/html; charset=utf-8')
# return html
details='<div class="table-responsive">'
details+='<table class="table table-bordered table-striped table-hover dataTable js-exportable"><thead><tr><th>'
details+=_('Sale ID')
details+='</th><th>'
details+=_('Saling when')
details+='</th><th>'
details+=_('Saling at')
details+='</th><th>'
details+=_('Options')
details+='</th></tr></thead><tbody>'
sales_id_array=[]
sales_array=[]
for sale in sales:
if sale.id not in sales_id_array:
sds=SalesDetails.objects.filter(sale=sale)
for sd in sds:
ppd=sd.product
store=ppd.in_store
# pd has data about product and brand
pd=ppd.purchase_detail
shop=store.shop
if shop in shops and \
pd.product in products and \
pd.brand in brands and \
sale.id not in sales_id_array:
sales_array.append(sale)
sales_id_array.append(sale.id)
details+='<tr><td>'
details+=sale.identifier+'</td><td>'
details+=sale.sold_when_fmt_mx+'</td><td>'
details+=str(sale.sold_at)+'</td><td>'
details+='<a href="#" data-placement="bottom" '
details+='data-toggle="tooltip" '
details+='title="' + _('Details') + '" '
details+='data-original-title="' + _('Details') + '" '
details+='onclick="showDetails(' + str(sale.id)
details+=', module, false, false, false, false, itm_menu);'
details+=' return false;">'
# details+='onclick="alert(tst);">'
details+='<i class="material-icons">zoom_in</i>'
details+='</a></td></tr>'
if len(sales_array) < 1:
msg=_('There are not any sale matching with your query options')
html_='<p class="label label-default">'
html_+=msg + '</p> '
html='<script type="text/javascript">$("#secSalesByCustomers #msg").html(\''+html_+'\');'
# html+='$("#menuSalesByCustomers").hide();'
html+='$("#secSalesByCustomers #sales_by_customers").hide();'
html+='$("#secSalesByCustomers #secDetails").empty();'
html+='</script>'
return HttpResponse(html, 'text/html; charset=utf-8')
details += '</table></div>'
data='<script type="text/javascript"> '
data+='var itm_menu = "' + itm_menu + '"; '
data+='var module = "sales"; '
data+='var data = ['
# for sale in sales:
for sale in sales_array:
sold_when=sale.sold_when
sold_at=sale.sold_at
data+='{t: new Date('
data+=str(sold_when.year)+','+str(sold_when.month)+','+str(sold_when.day)
data+=','+str(sold_at.hour)+','
data+=str(sold_at.minute)+','
data+=str(sold_at.second)
data+='), y: ' + str(sale.number_of_sold_products) + '},'
data=data[:len(data)-1]
data+='];'
except ObjectDoesNotExist:
msg=_('There are not any sale matching with your query options')
html='<script type="text/javascript"> '
html+='show_msg_with_toastr("warning", "' + msg + '");'
html+='</script>'
return HttpResponse(html, 'text/html; charset=utf-8')
# return html
ds_lbl=_('Sold products')
html = data
html += 'var dateFormat = "YYYY MMMM DD";'
html += 'var ctx = document.getElementById("sales_by_customers").getContext("2d");'
html += 'var color = Chart.helpers.color;'
html += 'var cfg = {'
html += 'type: "bar",'
html += 'data: {'
html += 'datasets: [{'
html += 'label: "' + ds_lbl + '",'
html += 'backgroundColor: color(window.chartColors.blue).alpha(0.5).rgbString(),'
html += 'borderColor: window.chartColors.blue,'
html += 'data: data,'
html += 'type: "line",'
html += 'pointRadius: 3,'
html += 'fill: false,'
html += 'lineTension: 0,'
html += 'borderWidth: 2'
html += '}]'
html += '},'
html += 'options: {'
html += 'scales: {'
html += 'xAxes: [{'
html += 'type: "time",'
html += 'distribution: "series",'
html += 'ticks: {'
html += 'source: "data",'
html += 'autoSkip: true'
html += '},'
html += 'time: {'
html += 'unit: "minute"'
html += '}'
html += '}],'
html += 'yAxes: [{'
html += 'scaleLabel: {'
html += 'display: true,'
html += 'labelString: "' + _('Sold products amount') + '"'
html += '}'
html += '}]'
html += '},'
html += 'tooltips: {'
html += 'intersect: false,'
html += 'mode: "index",'
html += 'callbacks: {'
html += 'label: function(tooltipItem, myData) {'
html += 'var label = myData.datasets[tooltipItem.datasetIndex].label || "";'
html += 'if (label) {'
html += 'label += ": ";'
html += '}'
html += 'label += parseInt(tooltipItem.value);'
html += 'return label;'
html += '}'
html += '}'
html += '}'
html += '}'
html += '};'
html += 'var chart = new Chart(ctx, cfg);'
html += '$("#secSalesByCustomers #sales_by_customers").show();'
html += '$("#secSalesByCustomers #msg").empty();'
html += '$("#secSalesByCustomers #secDetails").html(\''
html += details
html += '\');'
html+='</script>'
html+='<script src="/static/js/dashboard/search-results/after-load-html.js"></script>'
html+='</script>'
# return html
return HttpResponse(html, 'text/html; charset=utf-8')
msg = _('You do not have permission to perform this request')
return JsonResponse({'status': 'error', 'msg': msg})
def all_customers(request):
if request.method=='GET':
shops=request.GET.get('shops', None)
shops=json.loads(shops)
shops=Shops.objects.filter(dropped=False, pk__in=shops)
products=request.GET.get('products', None)
products=json.loads(products)
products=Products.objects.filter(dropped=False, pk__in=products)
brands=request.GET.get('brands', None)
brands=json.loads(brands)
brands=Brands.objects.filter(dropped=False, pk__in=brands)
users=request.GET.get('users', None)
users=json.loads(users)
users=Users.objects.filter(dropped=False, pk__in=users)
itm_menu=request.GET.get('itm_menu', None)
try:
sales=Sales.objects.filter(dropped=False,created_by_user__in=users).order_by('sold_date')
if len(sales) < 1:
msg=_('There are not any sale matching with your query options')
html_='<p class="label label-default">'
html_+=msg + '</p> '
html='<script type="text/javascript">$("#secSalesByCustomers #msg").html(\''+html_+'\');'
# html+='$("#menuSalesByCustomers").hide();'
html+='$("#secSalesByCustomers #sales_by_customers").hide();'
html+='$("#secSalesByCustomers #secDetails").empty();'
html+='</script>'
return HttpResponse(html, 'text/html; charset=utf-8')
# return html
details='<div class="table-responsive">'
details+='<table class="table table-bordered table-striped table-hover dataTable js-exportable"><thead><tr><th>'
details+=_('Sale ID')
details+='</th><th>'
details+=_('Saling when')
details+='</th><th>'
details+=_('Saling at')
details+='</th><th>'
details+=_('Options')
details+='</th></tr></thead><tbody>'
sales_id_array=[]
sales_array=[]
for sale in sales:
if sale.id not in sales_id_array:
sds=SalesDetails.objects.filter(sale=sale)
for sd in sds:
ppd=sd.product
store=ppd.in_store
# pd has data about product and brand
pd=ppd.purchase_detail
shop=store.shop
if shop in shops and \
pd.product in products and \
pd.brand in brands and \
sale.id not in sales_id_array:
sales_array.append(sale)
sales_id_array.append(sale.id)
details+='<tr><td>'
details+=sale.identifier+'</td><td>'
details+=sale.sold_when_fmt_mx+'</td><td>'
details+=str(sale.sold_at)+'</td><td>'
details+='<a href="#" data-placement="bottom" '
details+='data-toggle="tooltip" '
details+='title="' + _('Details') + '" '
details+='data-original-title="' + _('Details') + '" '
details+='onclick="showDetails(' + str(sale.id)
details+=', module, false, false, false, false, itm_menu);'
details+=' return false;">'
# details+='onclick="alert(tst);">'
details+='<i class="material-icons">zoom_in</i>'
details+='</a></td></tr>'
if len(sales_array) < 1:
msg=_('There are not any sale matching with your query options')
html_='<p class="label label-default">'
html_+=msg + '</p> '
html='<script type="text/javascript">$("#secSalesByCustomers #msg").html(\''+html_+'\');'
# html+='$("#menuSalesByCustomers").hide();'
html+='$("#secSalesByCustomers #sales_by_customers").hide();'
html+='$("#secSalesByCustomers #secDetails").empty();'
html+='</script>'
return HttpResponse(html, 'text/html; charset=utf-8')
details += '</table></div>'
data='<script type="text/javascript"> '
data+='var itm_menu = "' | |
the mw_lutman for the AWG8
# only x2 and x3 downsample_swp_points available
angles = np.arange(0, 341, 20 * downsample_swp_points)
p = mqo.conditional_oscillation_seq_multi(
Q_idxs_target,
Q_idxs_control,
Q_idxs_parked,
platf_cfg=self.cfg_openql_platform_fn(),
disable_cz=disable_cz,
disabled_cz_duration=disabled_cz_duration_ns,
angles=angles,
wait_time_before_flux=wait_time_before_flux_ns,
wait_time_after_flux=wait_time_after_flux_ns,
flux_codeword=flux_codeword,
cz_repetitions=cz_repetitions,
parked_qubit_seq=parked_qubit_seq,
disable_parallel_single_q_gates=disable_parallel_single_q_gates
)
s = swf.OpenQL_Sweep(
openql_program=p,
CCL=self.instr_CC.get_instr(),
parameter_name="Phase",
unit="deg",
)
MC.set_sweep_function(s)
MC.set_sweep_points(p.sweep_points)
d = self.get_int_avg_det(qubits=list_qubits_used)
MC.set_detector_function(d)
MC.run(
"conditional_oscillation_{}_x{}_{}{}".format(
list_qubits_used, cz_repetitions,
self.msmt_suffix, label,
),
disable_snapshot_metadata=disable_metadata,
)
if len(pairs) > 1:
# qb_ro_order = np.sum([ list(self._acq_ch_map[key].keys()) for key in self._acq_ch_map.keys()])
# qubits_by_feedline = [['D1','X1'],
# ['D2','Z1','D3','D4','D5','D7','X2','X3','Z3'],
# ['D6','D8','D9','X4','Z2','Z4']]
# qb_ro_order = sorted(np.array(pairs).flatten().tolist(),
# key=lambda x: [i for i,qubits in enumerate(qubits_by_feedline) if x in qubits])
qb_ro_order = [qb for qb_dict in self._acq_ch_map.values() for qb in qb_dict.keys()]
else:
# qb_ro_order = [ list(self._acq_ch_map[key].keys()) for key in self._acq_ch_map.keys()][0]
qb_ro_order = [pairs[0][0], pairs[0][1]]
result_dict = {}
for i, pair in enumerate(pairs):
ch_osc = qb_ro_order.index(pair[0])
ch_spec = qb_ro_order.index(pair[1])
options_dict = {
'ch_idx_osc': ch_osc,
'ch_idx_spec': ch_spec
}
a = ma2.Conditional_Oscillation_Analysis(
options_dict=options_dict,
extract_only=extract_only)
result_dict['pair_{}_delta_phi_a'.format(i + 1)] = \
a.proc_data_dict['quantities_of_interest']['phi_cond'].n % 360
result_dict['pair_{}_missing_frac_a'.format(i + 1)] = \
a.proc_data_dict['quantities_of_interest']['missing_fraction'].n
result_dict['pair_{}_offset_difference_a'.format(i + 1)] = \
a.proc_data_dict['quantities_of_interest']['offs_diff'].n
result_dict['pair_{}_phi_0_a'.format(i + 1)] = \
(a.proc_data_dict['quantities_of_interest']['phi_0'].n + 180) % 360 - 180
result_dict['pair_{}_phi_1_a'.format(i + 1)] = \
(a.proc_data_dict['quantities_of_interest']['phi_1'].n + 180) % 360 - 180
return result_dict
def measure_parity_check_flux_dance(
self,
target_qubits: List[str],
control_qubits: List[str],
flux_dance_steps: List[int] = [1, 2, 3, 4],
flux_codeword: str = 'flux-dance',
refocusing: bool = False,
ramsey_qubits: Union[List[str], bool] = None,
parking_qubits: List[str] = None,
nr_flux_dance_before_cal_points: int = None,
phase_offsets: List[float] = None,
control_cases_to_measure: List[str] = None,
downsample_angle_points: int = 1,
prepare_for_timedomain=True,
initialization_msmt: bool = False,
wait_time_before_flux_ns: int = 0,
wait_time_after_flux_ns: int = 0,
label_suffix="",
MC=None,
disable_metadata=False,
plotting=True,
):
"""
Measures a parity check while playing codewords that are part
of a flux dance (originally used for surface code).
This experiment is similar to `measure_conditional_oscillation_multi()`,
but plays composite flux codewords instead of only individual ones
for the involved qubits.
Specifically, a conditional oscillation is performed between the
target qubit and each control qubit, where the target qubit is being ramsey'd
and the control qubits are being prepared in every possible combination
of 0 and 1 (for example, ['00','01','10','11']).
These combinations can also be given explicitly in `control_cases_to_measure`,
then only those control cases will be prepared. This option is still
experimental and may not work as expected!
Parkings have to be taken care of by the flux dance codewords,
and lutmans of parking qubit have to be prepared externally before this measurement.
The list of flux codewords to be played inbetween the two microwave
pulses of the conditional oscillation is assembled from the
`flux_codeword`, `flux_dance_steps` and `refocusing` arguments, and
will contain as many codewords as there are steps given.
By analyzing the phases of the oscillation for each prepared case,
the quality of the parity check can be assessed.
Args:
target_qubits (List[str]):
List of target qubit labels. These will be ramsey'd.
control_qubits (List[str]):
List of control qubit labels. These will be prepared in either 0 or 1.
Has to be given in readout (feedline) order!
Otherwise readout results will be scrambled.
flux_dance_steps (List[int]):
Numbers of flux dance codewords that should be played inbetween
the MW pulses in the conditional oscillation. Has to match
the definitons in the CC config file for the given `flux_codeword`.
flux_codeword (str):
The flux codeword to build flux dance list with. Will be combined
with `flux_dance_steps` and `refocusing`.
Codeword from this list will then be played inbetween the MW pulses
in the conditional oscillation.
Codewords have to be defined in CC config.
refocusing (bool):
If True, appends the 'refocus' flag to `flux_codeword`
when assembling the flux codeword list, thereby turning on
refocusing pulses on qubits that are not used during the flux dance steps.
Corresponding refocusing codewords have to be defined in CC config.
ramsey_qubits (Union[List[str], bool]):
Apart from the target qubit, also additional qubits can be ramsey'd.
This is done to mimic the real world scenario of the flux dance
being executed as part of a QEC code.
If given as list of labels, explicitly those qubits will be ramsey'd.
If given as boolean, will turn on or off the automatic selection of
all other ancillas of the same type as the target qubit.
This is only implemented for surface-17 and may not match the desired behaviour.
nr_flux_dance_before_cal_points (int):
For investigation of the effect of fluxing on readout and for debugging purposes,
The same flux dance as in the main experiment can be applied
`nr_flux_dance_before_cal_points` times before the calibration points.
phase_offsets: List[float] = None,
Phase offsets to apply to all phase-gates of the conditional oscillation,
given per target qubit.
control_cases_to_measure (List[str]):
Explicit list of control qubit preparation cases that should be measured.
Experimental! May produce unexpected results.
downsample_angle_points (int):
Factor by which to reduce the number of points
in the conditional oscillations.
Restricted to 2 and 3, due to limitation in MW codewords.
prepare_for_timedomain (bool):
Whether the instruments should be prepared for time domain measurement.
Includes preparation of readout, flux and MW pulses for the given qubits.
This takes a significant amount of time and can be disabled if
the instruments are already prepared, for example because the
same measurement was executed right before.
initialization_msmt (bool):
Whether to initialize all qubits via measurement
at the beginning of each experiment.
wait_time_before_flux_ns (int):
additional waiting time (in ns) before the flux dance.
wait_time_after_flux_ns (int):
additional waiting time (in ns) after the flux dance, before
the final mw pulse
label_suffix (str):
String to be appended at the end of the measurement label.
MC (`pycqed.measurement.MeasurementControl`):
MeasurementControl object. Will be taken from instance parameter if None.
disable_metadata (bool)
Whether experiment metadata like intrument snapshots etc should
be saved in the hdf5 file.
plotting (bool):
Whether the analysis should generate plots. Can save some time.
Returns:
Analysis result.
"""
if self.ro_acq_weight_type() != 'optimal':
# this occurs because the detector groups qubits per feedline.
# If you do not pay attention, this will mess up the analysis of
# this experiment.
raise ValueError('Current analysis is not working with {}'.format(self.ro_acq_weight_type()))
if MC is None:
MC = self.instr_MC.get_instr()
# if `ramsey_qubits` and/or `flux_dance_steps` are given, they will be used literally.
# otherwise, they will be set for the standard experiment for the target qubit type
if 'X' in target_qubits[0]:
if ramsey_qubits and type(ramsey_qubits) is bool:
ramsey_qubits = [qb for qb in ['X1', 'X2', 'X3', 'X4'] if qb not in target_qubits]
if not flux_dance_steps:
flux_dance_steps = [1, 2, 3, 4]
elif 'Z' in target_qubits[0]:
if ramsey_qubits and type(ramsey_qubits) is bool:
ramsey_qubits = [qb for qb in ['Z1', 'Z2', 'Z3', 'Z4'] if qb not in target_qubits]
if not flux_dance_steps:
flux_dance_steps = [5, 6, 7, 8]
else:
log.warning(f"Target qubit {target_qubits[0]} not X or Z!")
# if ramsey_qubits is given as list of qubit names,
# only those will be used and converted to qubit numbers.
# if ramsey_qubits is given as boolean,
# all ancillas that are not part of the parity check will be ramseyd
if ramsey_qubits:
Q_idxs_ramsey = []
for i, qb in enumerate(ramsey_qubits):
assert qb in self.qubits()
if qb in target_qubits:
log.warning(f"Ramsey qubit {qb} already given as ancilla qubit!")
Q_idxs_ramsey += [self.find_instrument(qb).cfg_qubit_nr()]
Q_idxs_target = []
for i, target_qubit in enumerate(target_qubits):
log.info(f"Parity {target_qubit} - {control_qubits}, flux dance steps {flux_dance_steps}")
assert target_qubit in self.qubits()
Q_idxs_target += [self.find_instrument(target_qubit).cfg_qubit_nr()]
# filter control qubits based on control_cases_to_measure,
# then the cases will be created based on the filtered control qubits
Q_idxs_control = []
assert all([qb in self.qubits() for qb in control_qubits])
if not control_cases_to_measure:
# if cases are not given, measure all cases for all control qubits
control_qubits_by_case = control_qubits
Q_idxs_control += [self.find_instrument(Q).cfg_qubit_nr() for Q in control_qubits_by_case]
cases = ['{:0{}b}'.format(i, len(Q_idxs_control)) for i in range(2 ** len(Q_idxs_control))]
else:
# if cases are given, prepare and measure only them
# select only the control qubits needed, avoid repetition
control_qubits_by_case = []
for case in control_cases_to_measure:
control_qubits_by_case += [control_qubits[i] for i, c | |
<reponame>AbhinavGopal/ts_tutorial
"""Agents for neural net bandit problems.
We implement three main types of agent:
- epsilon-greedy (fixed epsilon, annealing epsilon)
- dropout (arXiv:1506.02142)
- ensemble sampling
All code is specialized to the setting of 2-layer fully connected MLPs.
"""
import numpy as np
import numpy.random as rd
from base.agent import Agent
from ensemble_nn.env_nn import TwoLayerNNBandit
class TwoLayerNNEpsilonGreedy(Agent):
def __init__(self,
input_dim,
hidden_dim,
actions,
time_horizon,
prior_var,
noise_var,
epsilon_param=0.0,
learning_rate=1e-1,
num_gradient_steps=1,
batch_size=64,
lr_decay=1,
leaky_coeff=0.01):
"""Epsilon-greedy agent with two-layer neural network model.
Args:
input_dim: int dimension of input.
hidden_dim: int size of hidden layer.
actions: numpy array of valid actions (generated by environment).
time_horizon: int size to pre-allocate data storage.
prior_var: prior variance for random initialization.
noise_var: noise variance for update.
epsilon_param: fixed epsilon choice.
learning_rate: sgd learning rate.
num_gradient_steps: how many sgd to do.
batch_size: size of batch.
lr_decay: decay learning rate.
leaky_coeff: slope of "negative" part of the Leaky ReLU.
"""
self.W1 = 1e-2 * rd.randn(hidden_dim, input_dim) # initialize weights
self.W2 = 1e-2 * rd.randn(hidden_dim)
self.actions = actions
self.num_actions = len(actions)
self.T = time_horizon
self.prior_var = prior_var
self.noise_var = noise_var
self.epsilon_param = epsilon_param
self.lr = learning_rate
self.num_gradient_steps = num_gradient_steps # number of gradient steps we
# take during each time period
self.batch_size = batch_size
self.lr_decay = lr_decay
self.leaky_coeff = leaky_coeff
self.action_hist = np.zeros((self.T, input_dim))
self.reward_hist = np.zeros(self.T)
def _model_forward(self, input_actions):
"""Neural network forward pass.
Args:
input_actions: actions to evaluate (numpy array).
Returns:
out: network prediction.
cache: tuple holding intermediate activations for backprop.
"""
affine_out = np.sum(input_actions[:, np.newaxis, :] * self.W1, axis=2)
relu_out = np.maximum(self.leaky_coeff * affine_out, affine_out)
out = np.sum(relu_out * self.W2, axis=1)
cache = (input_actions, affine_out, relu_out)
return out, cache
def _model_backward(self, out, cache, y):
"""Neural network backward pass (for backpropagation).
Args:
out: output of batch of predictions.
cache: intermediate activations from _model_forward.
y: target labels.
Returns:
dW1: gradients for layer 1.
dW2: gradients for layer 2.
"""
input_actions, affine_out, relu_out = cache
dout = -(2 / self.noise_var) * (y - out)
dW2 = np.sum(dout[:, np.newaxis] * relu_out, axis=0)
drelu_out = dout[:, np.newaxis] * self.W2
mask = (affine_out >= 0) + self.leaky_coeff * (affine_out < 0)
daffine_out = mask * drelu_out
dW1 = np.dot(daffine_out.T, input_actions)
return dW1, dW2
def _update_model(self, t):
"""Update the model by taking a few gradient steps."""
for i in range(self.num_gradient_steps):
# sample minibatch
batch_ind = rd.randint(t + 1, size=self.batch_size)
action_batch = self.action_hist[batch_ind]
reward_batch = self.reward_hist[batch_ind]
out, cache = self._model_forward(action_batch)
dW1, dW2 = self._model_backward(out, cache, reward_batch)
dW1 /= self.batch_size
dW2 /= self.batch_size
dW1 += 2 / (self.prior_var * (t + 1)) * self.W1
dW2 += 2 / (self.prior_var * (t + 1)) * self.W2
self.W1 -= self.lr * dW1
self.W2 -= self.lr * dW2
def update_observation(self, observation, action, reward):
"""Learn from observations."""
t = observation
self.action_hist[t] = self.actions[action]
self.reward_hist[t] = reward
self._update_model(t)
self.lr *= self.lr_decay
def pick_action(self, observation):
"""Fixed epsilon-greedy action selection."""
u = rd.rand()
if u < self.epsilon_param:
action = rd.randint(self.num_actions)
else:
model_out, _ = self._model_forward(self.actions)
action = np.argmax(model_out)
return action
class TwoLayerNNEpsilonGreedyAnnealing(TwoLayerNNEpsilonGreedy):
"""Epsilon-greedy with an annealing epsilon:
epsilon = self.epsilon_param / (self.epsilon_param + t)
"""
def pick_action(self, observation):
"""Overload pick_action to dynamically recalculate epsilon-greedy."""
t = observation
epsilon = self.epsilon_param / (self.epsilon_param + t)
u = rd.rand()
if u < epsilon:
action = rd.randint(self.num_actions)
else:
model_out, _ = self._model_forward(self.actions)
action = np.argmax(model_out)
return action
class TwoLayerNNDropout(TwoLayerNNEpsilonGreedy):
"""Dropout is used to represent model uncertainty.
ICML paper suggests this is Bayesian uncertainty: arXiv:1506.02142.
Follow up work suggests that this is flawed: TODO(iosband) add link.
"""
def __init__(self,
input_dim,
hidden_dim,
actions,
time_horizon,
prior_var,
noise_var,
drop_prob=0.5,
learning_rate=1e-1,
num_gradient_steps=1,
batch_size=64,
lr_decay=1,
leaky_coeff=0.01):
"""Dropout agent with two-layer neural network model.
Args:
input_dim: int dimension of input.
hidden_dim: int size of hidden layer.
actions: numpy array of valid actions (generated by environment).
time_horizon: int size to pre-allocate data storage.
prior_var: prior variance for random initialization.
noise_var: noise variance for update.
drop_prob: probability of randomly zero-ing out weight component.
learning_rate: sgd learning rate.
num_gradient_steps: how many sgd to do.
batch_size: size of batch.
lr_decay: decay learning rate.
leaky_coeff: slope of "negative" part of the Leaky ReLU.
"""
self.W1 = 1e-2 * rd.randn(hidden_dim, input_dim)
self.W2 = 1e-2 * rd.randn(hidden_dim)
self.actions = actions
self.num_actions = len(actions)
self.T = time_horizon
self.prior_var = prior_var
self.noise_var = noise_var
self.p = drop_prob
self.lr = learning_rate
self.num_gradient_steps = num_gradient_steps
self.batch_size = batch_size
self.lr_decay = lr_decay
self.leaky_coeff = leaky_coeff
self.action_hist = np.zeros((self.T, input_dim))
self.reward_hist = np.zeros(self.T)
def _model_forward(self, input_actions):
"""Neural network forward pass.
Note that dropout remains "on" so that forward pass is stochastic.
Args:
input_actions: actions to evaluate (numpy array).
Returns:
out: network prediction.
cache: tuple holding intermediate activations for backprop.
"""
affine_out = np.sum(input_actions[:, np.newaxis, :] * self.W1, axis=2)
relu_out = np.maximum(self.leaky_coeff * affine_out, affine_out)
dropout_mask = rd.rand(*relu_out.shape) > self.p
dropout_out = relu_out * dropout_mask
out = np.sum(dropout_out * self.W2, axis=1)
cache = (input_actions, affine_out, relu_out, dropout_mask, dropout_out)
return out, cache
def _model_backward(self, out, cache, y):
"""Neural network backward pass (for backpropagation).
Args:
out: output of batch of predictions.
cache: intermediate activations from _model_forward.
y: target labels.
Returns:
dW1: gradients for layer 1.
dW2: gradients for layer 2.
"""
input_actions, affine_out, relu_out, dropout_mask, dropout_out = cache
dout = -(2 / self.noise_var) * (y - out)
dW2 = np.sum(dout[:, np.newaxis] * relu_out, axis=0)
ddropout_out = dout[:, np.newaxis] * self.W2
drelu_out = ddropout_out * dropout_mask
relu_mask = (affine_out >= 0) + self.leaky_coeff * (affine_out < 0)
daffine_out = relu_mask * drelu_out
dW1 = np.dot(daffine_out.T, input_actions)
return dW1, dW2
def pick_action(self, observation):
"""Select the greedy action according to the output of a stochastic
forward pass."""
model_out, _ = self._model_forward(self.actions)
action = np.argmax(model_out)
return action
class TwoLayerNNEnsembleSampling(Agent):
"""An ensemble sampling agent maintains an ensemble of neural nets, each
fitted to a perturbed prior and perturbed observations."""
def __init__(self,
input_dim,
hidden_dim,
actions,
time_horizon,
prior_var,
noise_var,
num_models=10,
learning_rate=1e-1,
num_gradient_steps=1,
batch_size=64,
lr_decay=1,
leaky_coeff=0.01):
"""Ensemble sampling agent with two-layer neural network model.
Args:
input_dim: int dimension of input.
hidden_dim: int size of hidden layer.
actions: numpy array of valid actions (generated by environment).
time_horizon: int size to pre-allocate data storage.
prior_var: prior variance for random initialization.
noise_var: noise variance for update.
num_models: Number of ensemble models to train.
learning_rate: sgd learning rate.
num_gradient_steps: how many sgd to do.
batch_size: size of batch.
lr_decay: decay learning rate.
leaky_coeff: slope of "negative" part of the Leaky ReLU.
"""
self.M = num_models
# initialize models by sampling perturbed prior means
self.W1_model_prior = np.sqrt(prior_var) * rd.randn(self.M, hidden_dim,
input_dim)
self.W2_model_prior = np.sqrt(prior_var) * rd.randn(self.M, hidden_dim)
self.W1 = np.copy(self.W1_model_prior)
self.W2 = np.copy(self.W2_model_prior)
self.actions = actions
self.num_actions = len(actions)
self.T = time_horizon
self.prior_var = prior_var
self.noise_var = noise_var
self.lr = learning_rate
self.num_gradient_steps = num_gradient_steps
self.batch_size = batch_size
self.lr_decay = lr_decay
self.leaky_coeff = leaky_coeff
self.action_hist = np.zeros((self.T, input_dim))
self.model_reward_hist = np.zeros((self.M, self.T))
def _model_forward(self, m, input_actions):
"""Neural network forward pass for single model of ensemble.
Args:
m: index of which network to evaluate.
input_actions: actions to evaluate (numpy array).
Returns:
out: network prediction.
cache: tuple holding intermediate activations for backprop.
"""
affine_out = np.sum(input_actions[:, np.newaxis, :] * self.W1[m], axis=2)
relu_out = np.maximum(self.leaky_coeff * affine_out, affine_out)
out = np.sum(relu_out * self.W2[m], axis=1)
cache = (input_actions, affine_out, relu_out)
return out, cache
def _model_backward(self, m, out, cache, y):
"""Neural network backward pass (for backpropagation) for single network.
Args:
m: index of which network to evaluate.
out: output of batch of predictions.
cache: intermediate activations from _model_forward.
y: target labels.
Returns:
dW1: gradients for layer 1.
dW2: gradients for layer 2.
"""
input_actions, affine_out, relu_out = cache
dout = -(2 / self.noise_var) * (y - out)
dW2 = np.sum(dout[:, np.newaxis] * relu_out, axis=0)
drelu_out = dout[:, np.newaxis] * self.W2[m]
mask = (affine_out >= 0) + self.leaky_coeff * (affine_out < 0)
daffine_out = mask * drelu_out
dW1 = np.dot(daffine_out.T, input_actions)
return dW1, dW2
def _update_model(self, m, t):
"""Apply SGD to model m."""
for i in range(self.num_gradient_steps):
# sample minibatch
batch_ind = rd.randint(t + 1, size=self.batch_size)
action_batch = self.action_hist[batch_ind]
reward_batch = self.model_reward_hist[m][batch_ind]
out, cache | |
"""
Test of Summary tables. This has many test cases, so to keep files smaller, it's split into two
files: test_summary.py and test_summary2.py.
"""
import actions
import logger
import summary
import testutil
import test_engine
from useractions import allowed_summary_change
from test_engine import Table, Column, View, Section, Field
log = logger.Logger(__name__, logger.INFO)
class TestSummary(test_engine.EngineTestCase):
sample = testutil.parse_test_sample({
"SCHEMA": [
[1, "Address", [
[11, "city", "Text", False, "", "City", ""],
[12, "state", "Text", False, "", "State", "WidgetOptions1"],
[13, "amount", "Numeric", False, "", "Amount", "WidgetOptions2"],
]]
],
"DATA": {
"Address": [
["id", "city", "state", "amount" ],
[ 21, "New York", "NY" , 1. ],
[ 22, "Albany", "NY" , 2. ],
[ 23, "Seattle", "WA" , 3. ],
[ 24, "Chicago", "IL" , 4. ],
[ 25, "Bedford", "MA" , 5. ],
[ 26, "New York", "NY" , 6. ],
[ 27, "Buffalo", "NY" , 7. ],
[ 28, "Bedford", "NY" , 8. ],
[ 29, "Boston", "MA" , 9. ],
[ 30, "Yonkers", "NY" , 10. ],
[ 31, "New York", "NY" , 11. ],
]
}
})
starting_table = Table(1, "Address", primaryViewId=0, summarySourceTable=0, columns=[
Column(11, "city", "Text", isFormula=False, formula="", summarySourceCol=0),
Column(12, "state", "Text", isFormula=False, formula="", summarySourceCol=0),
Column(13, "amount", "Numeric", isFormula=False, formula="", summarySourceCol=0),
])
starting_table_data = [
["id", "city", "state", "amount" ],
[ 21, "New York", "NY" , 1 ],
[ 22, "Albany", "NY" , 2 ],
[ 23, "Seattle", "WA" , 3 ],
[ 24, "Chicago", "IL" , 4 ],
[ 25, "Bedford", "MA" , 5 ],
[ 26, "New York", "NY" , 6 ],
[ 27, "Buffalo", "NY" , 7 ],
[ 28, "Bedford", "NY" , 8 ],
[ 29, "Boston", "MA" , 9 ],
[ 30, "Yonkers", "NY" , 10 ],
[ 31, "New York", "NY" , 11 ],
]
#----------------------------------------------------------------------
def test_encode_summary_table_name(self):
self.assertEqual(summary.encode_summary_table_name("Foo"), "GristSummary_3_Foo")
self.assertEqual(summary.encode_summary_table_name("Foo2"), "GristSummary_4_Foo2")
self.assertEqual(summary.decode_summary_table_name("GristSummary_3_Foo"), "Foo")
self.assertEqual(summary.decode_summary_table_name("GristSummary_4_Foo2"), "Foo2")
self.assertEqual(summary.decode_summary_table_name("GristSummary_3_Foo2"), "Foo")
self.assertEqual(summary.decode_summary_table_name("GristSummary_4_Foo2_2"), "Foo2")
# Test that underscore in the name is OK.
self.assertEqual(summary.decode_summary_table_name("GristSummary_5_Foo_234"), "Foo_2")
self.assertEqual(summary.decode_summary_table_name("GristSummary_4_Foo_234"), "Foo_")
self.assertEqual(summary.decode_summary_table_name("GristSummary_6__Foo_234"), "_Foo_2")
# Test that we return None for invalid values.
self.assertEqual(summary.decode_summary_table_name("Foo2"), None)
self.assertEqual(summary.decode_summary_table_name("GristSummary_3Foo"), None)
self.assertEqual(summary.decode_summary_table_name("GristSummary_4_Foo"), None)
self.assertEqual(summary.decode_summary_table_name("GristSummary_3X_Foo"), None)
self.assertEqual(summary.decode_summary_table_name("_5_Foo_234"), None)
self.assertEqual(summary.decode_summary_table_name("_GristSummary_3_Foo"), None)
self.assertEqual(summary.decode_summary_table_name("gristsummary_3_Foo"), None)
self.assertEqual(summary.decode_summary_table_name("GristSummary3_Foo"), None)
#----------------------------------------------------------------------
def test_create_view_section(self):
self.load_sample(self.sample)
# Verify the starting table; there should be no views yet.
self.assertTables([self.starting_table])
self.assertViews([])
# Create a view + section for the initial table.
self.apply_user_action(["CreateViewSection", 1, 0, "record", None])
# Verify that we got a new view, with one section, and three fields.
self.assertTables([self.starting_table])
basic_view = View(1, sections=[
Section(1, parentKey="record", tableRef=1, fields=[
Field(1, colRef=11),
Field(2, colRef=12),
Field(3, colRef=13),
])
])
self.assertViews([basic_view])
self.assertTableData("Address", self.starting_table_data)
# Create a "Totals" section, i.e. a summary with no group-by columns.
self.apply_user_action(["CreateViewSection", 1, 0, "record", []])
# Verify that a new table gets created, and a new view, with a section for that table,
# and some auto-generated summary fields.
summary_table1 = Table(2, "GristSummary_7_Address", primaryViewId=0, summarySourceTable=1,
columns=[
Column(14, "group", "RefList:Address", isFormula=True, summarySourceCol=0,
formula="table.getSummarySourceGroup(rec)"),
Column(15, "count", "Int", isFormula=True, summarySourceCol=0,
formula="len($group)"),
Column(16, "amount", "Numeric", isFormula=True, summarySourceCol=0,
formula="SUM($group.amount)"),
])
summary_view1 = View(2, sections=[
Section(2, parentKey="record", tableRef=2, fields=[
Field(4, colRef=15),
Field(5, colRef=16),
])
])
self.assertTables([self.starting_table, summary_table1])
self.assertViews([basic_view, summary_view1])
# Verify the summarized data.
self.assertTableData('GristSummary_7_Address', cols="subset", data=[
[ "id", "count", "amount"],
[ 1, 11, 66.0 ],
])
# Create a summary section, grouped by the "State" column.
self.apply_user_action(["CreateViewSection", 1, 0, "record", [12]])
# Verify that a new table gets created again, a new view, and a section for that table.
# Note that we also check that summarySourceTable and summarySourceCol fields are correct.
summary_table2 = Table(3, "GristSummary_7_Address2", primaryViewId=0, summarySourceTable=1,
columns=[
Column(17, "state", "Text", isFormula=False, formula="", summarySourceCol=12),
Column(18, "group", "RefList:Address", isFormula=True, summarySourceCol=0,
formula="table.getSummarySourceGroup(rec)"),
Column(19, "count", "Int", isFormula=True, summarySourceCol=0,
formula="len($group)"),
Column(20, "amount", "Numeric", isFormula=True, summarySourceCol=0,
formula="SUM($group.amount)"),
])
summary_view2 = View(3, sections=[
Section(3, parentKey="record", tableRef=3, fields=[
Field(6, colRef=17),
Field(7, colRef=19),
Field(8, colRef=20),
])
])
self.assertTables([self.starting_table, summary_table1, summary_table2])
self.assertViews([basic_view, summary_view1, summary_view2])
# Verify more fields of the new column objects.
self.assertTableData('_grist_Tables_column', rows="subset", cols="subset", data=[
['id', 'colId', 'type', 'formula', 'widgetOptions', 'label'],
[17, 'state', 'Text', '', 'WidgetOptions1', 'State'],
[20, 'amount', 'Numeric', 'SUM($group.amount)', 'WidgetOptions2', 'Amount'],
])
# Verify the summarized data.
self.assertTableData('GristSummary_7_Address2', cols="subset", data=[
[ "id", "state", "count", "amount" ],
[ 1, "NY", 7, 1.+2+6+7+8+10+11 ],
[ 2, "WA", 1, 3. ],
[ 3, "IL", 1, 4. ],
[ 4, "MA", 2, 5.+9 ],
])
# Create a summary section grouped by two columns ("city" and "state").
self.apply_user_action(["CreateViewSection", 1, 0, "record", [11,12]])
# Verify the new table and views.
summary_table3 = Table(4, "GristSummary_7_Address3", primaryViewId=0, summarySourceTable=1,
columns=[
Column(21, "city", "Text", isFormula=False, formula="", summarySourceCol=11),
Column(22, "state", "Text", isFormula=False, formula="", summarySourceCol=12),
Column(23, "group", "RefList:Address", isFormula=True, summarySourceCol=0,
formula="table.getSummarySourceGroup(rec)"),
Column(24, "count", "Int", isFormula=True, summarySourceCol=0,
formula="len($group)"),
Column(25, "amount", "Numeric", isFormula=True, summarySourceCol=0,
formula="SUM($group.amount)"),
])
summary_view3 = View(4, sections=[
Section(4, parentKey="record", tableRef=4, fields=[
Field(9, colRef=21),
Field(10, colRef=22),
Field(11, colRef=24),
Field(12, colRef=25),
])
])
self.assertTables([self.starting_table, summary_table1, summary_table2, summary_table3])
self.assertViews([basic_view, summary_view1, summary_view2, summary_view3])
# Verify the summarized data.
self.assertTableData('GristSummary_7_Address3', cols="subset", data=[
[ "id", "city", "state", "count", "amount" ],
[ 1, "New York", "NY" , 3, 1.+6+11 ],
[ 2, "Albany", "NY" , 1, 2. ],
[ 3, "Seattle", "WA" , 1, 3. ],
[ 4, "Chicago", "IL" , 1, 4. ],
[ 5, "Bedford", "MA" , 1, 5. ],
[ 6, "Buffalo", "NY" , 1, 7. ],
[ 7, "Bedford", "NY" , 1, 8. ],
[ 8, "Boston", "MA" , 1, 9. ],
[ 9, "Yonkers", "NY" , 1, 10. ],
])
# The original table's data should not have changed.
self.assertTableData("Address", self.starting_table_data)
#----------------------------------------------------------------------
def test_summary_gencode(self):
self.maxDiff = 1000 # If there is a discrepancy, allow the bigger diff.
self.load_sample(self.sample)
self.apply_user_action(["CreateViewSection", 1, 0, "record", []])
self.apply_user_action(["CreateViewSection", 1, 0, "record", [11,12]])
self.assertMultiLineEqual(self.engine.fetch_table_schema(),
"""import grist
from functions import * # global uppercase functions
import datetime, math, re # modules commonly needed in formulas
@grist.UserTable
class Address:
city = grist.Text()
state = grist.Text()
amount = grist.Numeric()
class _Summary:
@grist.formulaType(grist.ReferenceList('Address'))
def group(rec, table):
return table.getSummarySourceGroup(rec)
@grist.formulaType(grist.Int())
def count(rec, table):
return len(rec.group)
@grist.formulaType(grist.Numeric())
def amount(rec, table):
return SUM(rec.group.amount)
""")
#----------------------------------------------------------------------
def test_summary_table_reuse(self):
# Test that we'll reuse a suitable summary table when already available.
self.load_sample(self.sample)
# Create a summary section grouped by two columns ("city" and "state").
self.apply_user_action(["CreateViewSection", 1, 0, "record", [11,12]])
# Verify the new table and views.
summary_table = Table(2, "GristSummary_7_Address", primaryViewId=0, summarySourceTable=1,
columns=[
Column(14, "city", "Text", isFormula=False, formula="", summarySourceCol=11),
Column(15, "state", "Text", isFormula=False, formula="", summarySourceCol=12),
Column(16, "group", "RefList:Address", isFormula=True, summarySourceCol=0,
formula="table.getSummarySourceGroup(rec)"),
Column(17, "count", "Int", isFormula=True, summarySourceCol=0,
formula="len($group)"),
Column(18, "amount", "Numeric", isFormula=True, summarySourceCol=0,
formula="SUM($group.amount)"),
])
summary_view = View(1, sections=[
Section(1, parentKey="record", tableRef=2, fields=[
Field(1, colRef=14),
Field(2, colRef=15),
Field(3, colRef=17),
Field(4, colRef=18),
])
])
self.assertTables([self.starting_table, summary_table])
self.assertViews([summary_view])
# Create twoo other views + view sections with the same breakdown (in different order
# of group-by fields, which should still reuse the same table).
self.apply_user_action(["CreateViewSection", 1, 0, "record", [12,11]])
self.apply_user_action(["CreateViewSection", 1, 0, "record", [11,12]])
summary_view2 = View(2, sections=[
Section(2, parentKey="record", tableRef=2, fields=[
Field(5, colRef=15),
Field(6, colRef=14),
Field(7, colRef=17),
Field(8, colRef=18),
])
])
summary_view3 = View(3, sections=[
Section(3, parentKey="record", tableRef=2, fields=[
Field(9, colRef=14),
Field(10, colRef=15),
Field(11, colRef=17),
Field(12, colRef=18),
])
])
# Verify that we have a new view, but are reusing the table.
self.assertTables([self.starting_table, summary_table])
self.assertViews([summary_view, summary_view2, summary_view3])
# Verify the summarized data.
self.assertTableData('GristSummary_7_Address', cols="subset", data=[
[ "id", "city", "state", "count", "amount" ],
[ 1, "New York", "NY" , 3, 1.+6+11 ],
[ 2, "Albany", "NY" , 1, 2. ],
[ 3, "Seattle", "WA" , 1, 3. ],
[ 4, "Chicago", "IL" , 1, 4. ],
[ 5, "Bedford", "MA" , 1, 5. ],
[ 6, "Buffalo", "NY" , 1, 7. ],
[ 7, "Bedford", "NY" , 1, 8. ],
[ 8, "Boston", "MA" , 1, 9. ],
[ 9, "Yonkers", "NY" , 1, 10. ],
])
#----------------------------------------------------------------------
def test_summary_no_invalid_reuse(self):
# Verify that if we have some summary tables for one table, they don't mistakenly get used
# when we need a summary for another table.
# Load table and create a couple summary sections, for totals, and grouped by "state".
self.load_sample(self.sample)
self.apply_user_action(["CreateViewSection", 1, 0, "record", []])
self.apply_user_action(["CreateViewSection", 1, 0, "record", [12]])
self.assertTables([
self.starting_table,
Table(2, "GristSummary_7_Address", 0, 1, columns=[
Column(14, "group", "RefList:Address", True, | |
<reponame>learnforpractice/pyeos<gh_stars>100-1000
import eoslib
from eoslib import N,read_message,require_auth,now
try:
import struct
except Exception as e:
#load struct module in micropython
import ustruct as struct
exchange = N(b'exchange')
currency = N(b'currency')
table_account = N(b'account')
table_asks = N(b'asks')
table_bids = N(b'bids')
Name = eoslib.n2s
def min(a,b):
if a > b:
return uint64(b)
return uint64(a)
def uint64(bs):
return int.from_bytes(bs, 'little')
def uint128(bs):
return int.from_bytes(bs, 'little')
class Object(object):
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return str(self.__dict__)
class Transfer(Object):
def __init__(self):
self.msg = read_message()
self.from_ = uint64(self.msg[:8])
print(self.msg, self.msg[:8], self.msg[8:16])
self.to_ = uint64(self.msg[8:16])
self.amount = uint64(self.msg[16:24])
self.memo = str(self.msg[24:],'utf8')
'''
"name" : "Account",
"fields" : {
"owner" : "AccountName",
"eos_balance" : "UInt64",
"currency_balance" : "UInt64",
"open_orders" : "UInt32"
}
'''
class Account(Object):
def __init__(self,owner):
self.owner = owner
self.load()
def save(self):
keys = struct.pack("Q", self.owner)
print(self.eos_balance, self.currency_balance, self.open_orders)
values = struct.pack('QQI', self.eos_balance, self.currency_balance, self.open_orders)
eoslib.store(exchange, table_account, keys, 0, values)
def load(self):
keys = struct.pack("Q", self.owner)
print(self.owner)
values = bytes(20)
if eoslib.load(exchange, exchange, table_account, keys, 0, 0, values) > 0:
values = struct.unpack('QQI', values)
print(values)
self.eos_balance = values[0]
self.currency_balance = values[1]
self.open_orders = values[2]
else:
self.eos_balance = 0
self.currency_balance = 0
self.open_orders = 0
'''
AccountName name = 0;
uint64_t id = 0;
'''
class OrderID(Object):
def __init__(self, bs=None):
if bs:
result = struct.unpack('QQ', bs)
self.name = result[0]
self.id = result[1]
else:
self.name = 0
self.id = 0
self.raw_data = None
def __call__(self):
self.raw_data = struct.pack('QQ', self.name, self.id)
return self.raw_data
'''
"buyer" : "OrderID",
"price" : "UInt128",
"quantity" : "UInt64",
"expiration" : "Time"
'''
class Bid(Object):
def __init__(self):
self.buyer = OrderID()
self.price = uint128(0)
self.quantity = uint64(0)
self.expiration = 0
def store(self):
keys = struct.pack('16s16s',self.buyer(),self.price())
values = struct.pack('QI',self.quantity,self.expiration)
return eoslib.store(exchange,table_bids,keys,1,values)
def remove(self):
keys = struct.pack('16s16s',self.buyer(),self.price())
values = struct.pack('QI',self.quantity,self.expiration)
return eoslib.remove(exchange,table_bids,keys,1)
def load_by_order_id(id):
keys = struct.pack('16s16s',id(),bytes(16))
values = bytes(12)
if eoslib.load(exchange,exchange,table_bids,keys,1,0,values) > 0:
bid = Bid()
bid.buyer = OrderID(keys[:16])
bid.price = uint128(keys[16:])
values = values[:12]
result = struct.unpack('QI',values)
bid.quantity = result[0]
bid.expiration = result[1]
return bid
else:
return None
def load_by_price(price):
keys = struct.pack('16s16s',bytes(16),price())
values = bytes(12)
if eoslib.load(exchange,exchange,table_bids,keys,1,1,values) > 0:
bid = Bid()
bid.buyer = OrderID(keys[:16])
bid.price = uint128(keys[16:])
result = struct.unpack('QI',values)
bid.quantity = result[0]
bid.expiration = result[1]
return bid
return None
def front_by_order_id():
keys = bytes(32)
values = bytes(8+4)
if eoslib.front(exchange,exchange,table_bids,keys,1,0,values) > 0:
bid = Bid()
bid.buyer = OrderID(keys[:16])
bid.price = uint128(keys[16:])
result = struct.unpack('QI',values)
bid.quantity = result[0]
bid.expiration = result[1]
return bid
return None
def back_by_order_id():
keys = bytes(32)
values = bytes(8+4)
if eoslib.back(exchange,exchange,table_bids,keys,1,0,values) > 0:
bid = Bid()
bid.buyer = OrderID(keys[:16])
bid.price = uint128(keys[16:])
result = struct.unpack('QI',values)
bid.quantity = result[0]
bid.expiration = result[1]
return bid
return None
def front_by_price():
keys = bytes(32)
values = bytes(8+4)
if eoslib.front(exchange,exchange,table_bids,keys,1,1,values) > 0:
bid = Bid()
bid.buyer = OrderID(keys[:16])
bid.price = uint128(keys[16:])
result = struct.unpack('QI',values)
bid.quantity = result[0]
bid.expiration = result[1]
return bid
return None
def back_by_price():
keys = bytes(32)
values = bytes(8+4)
if eoslib.back(exchange,exchange,table_bids,keys,1,1,values) > 0:
bid = Bid()
bid.buyer = OrderID(keys[:16])
bid.price = uint128(keys[16:])
result = struct.unpack('QI',values)
bid.quantity = result[0]
bid.expiration = result[1]
return bid
return None
def next_by_order_id(self):
keys = struct.pack('16s16s',self.buyer(),bytes(16))
values = struct.pack('QI',self.quantity,self.expiration)
if eoslib.next(exchange,exchange,table_bids,keys,1,0,values) > 0:
bid = Ask()
bid.seller = OrderID(keys[:16])
bid.price = uint128(keys[16:])
result = struct.unpack('QI',values)
bid.quantity = result[0]
bid.expiration = result[1]
return bid
return None
def next_by_price(self):
keys = struct.pack('16s16s',bytes(16),self.price())
values = struct.pack('QI',self.quantity,self.expiration)
if eoslib.next(exchange,exchange,table_bids,keys,1,0,values) > 0:
bid = Ask()
bid.seller = OrderID(keys[:16])
bid.price = uint128(keys[16:])
result = struct.unpack('QI',values)
bid.quantity = result[0]
bid.expiration = result[1]
return bid
return None
def __repr__(self):
return str(self.__dict__)
'''
"seller" : "OrderID",
"price" : "UInt128",
"quantity" : "UInt64",
"expiration" : "Time"
'''
class Ask(Object):
def __init__(self):
self.seller = OrderID(0)
self.price = uint128(0)
self.quantity = uint64(0)
self.expiration = 0
def __call__(self):
return struct.pack('16s16sQI',self.seller(),self.price(),self.quantity,self.expiration)
def store(self):
keys = struct.pack('16s16s',self.seller(),self.price())
values = struct.pack('QI',self.quantity,self.expiration)
return eoslib.store(exchange,table_asks,keys,1,values)
def remove(self):
keys = struct.pack('16s16s',self.seller(),self.price())
values = struct.pack('QI',self.quantity,self.expiration)
return eoslib.remove(exchange,table_asks,keys,1)
def load_by_order_id(id):
keys = struct.pack('16s16s',id(),bytes(16))
values = bytes(12)
if eoslib.load(exchange,exchange,table_asks,keys,1,0,values) > 0:
ask = Ask()
ask.seller = OrderID(keys[:16])
ask.price = uint128(keys[16:])
result = struct.unpack('QI',values)
ask.quantity = result[0]
ask.expiration = result[1]
return ask
else:
return None
def load_by_price(price):
keys = struct.pack('16s16s',bytes(16),price())
values = bytes(12)
if eoslib.load(exchange,exchange,table_asks,keys,1,1,values) > 0:
ask = Ask()
ask.seller = OrderID(keys[:16])
ask.price = uint128(keys[16:])
result = struct.unpack('QI',values)
ask.quantity = result[0]
ask.expiration = result[1]
return ask
else:
return None
def front_by_order_id():
keys = bytes(32)
values = bytes(8+4)
if eoslib.front(exchange,exchange,table_asks,keys,1,0,values) > 0:
ask = Ask()
ask.seller = OrderID(keys[:16])
ask.price = uint128(keys[16:])
result = struct.unpack('QI',values)
ask.quantity = result[0]
ask.expiration = result[1]
return ask
return None
def back_by_order_id():
keys = bytes(32)
values = bytes(8+4)
if eoslib.back(exchange,exchange,table_asks,keys,1,0,values) > 0:
ask = Ask()
ask.seller = OrderID(keys[:16])
ask.price = uint128(keys[16:])
result = struct.unpack('QI',values)
ask.quantity = result[0]
ask.expiration = result[1]
return ask
return None
def front_by_price():
keys = bytes(32)
values = bytes(8+4)
if eoslib.front(exchange,exchange,table_asks,keys,1,1,values) > 0:
ask = Ask()
ask.seller = OrderID(keys[:16])
ask.price = uint128(keys[16:])
result = struct.unpack('QI',values)
ask.quantity = result[0]
ask.expiration = result[1]
return ask
return None
def back_by_price():
keys = bytes(32)
values = bytes(8+4)
if eoslib.back(exchange,exchange,table_asks,keys,1,1,values) > 0:
ask = Ask()
ask.seller = OrderID(keys[:16])
ask.price = uint128(keys[16:])
result = struct.unpack('QI',values)
ask.quantity = result[0]
ask.expiration = result[1]
return ask
return None
def next_by_order_id(self):
keys = struct.pack('16s16s',self.seller(),bytes(16))
values = struct.pack('QI',self.quantity,self.expiration)
if eoslib.next(exchange,exchange,table_asks,keys,1,0,values) > 0:
ask = Ask()
ask.seller = OrderID(keys[:16])
ask.price = uint128(keys[16:])
result = struct.unpack('QI',values)
ask.quantity = result[0]
ask.expiration = result[1]
return ask
return None
def next_by_price(self):
keys = struct.pack('16s16s',bytes(16),self.price())
values = struct.pack('QI',self.quantity,self.expiration)
if eoslib.next(exchange,exchange,table_asks,keys,1,0,values) > 0:
ask = Ask()
ask.seller = OrderID(keys[:16])
ask.price = uint128(keys[16:])
result = struct.unpack('QI',values)
ask.quantity = result[0]
ask.expiration = result[1]
return ask
return None
def __repr__(self):
return str(self.__dict__)
'''
"name" : "BuyOrder",
"base" : "Bid",
"fields" : {
"fill_or_kill" : "UInt8"
}
'''
class BuyOrder(Bid):
def __init__(self):
msg = read_message()
self.buyer = OrderID(msg[:16])
self.price = uint128(msg[16:32]) # eos tokens per currency
self.quantity = uint64(msg[32:40]) #buy currency amount
self.expiration = uint64(msg[40:])
self.fill_or_kill = msg[-1]
'''
"name" : "SellOrder",
"base" : "Ask",
"fields" : {
"fill_or_kill" : "UInt8"
}
'''
class SellOrder(Ask):
def __init__(self):
msg = read_message()
self.seller = OrderID(msg[:16])
self.price = uint128(msg[16:32]) # eos tokens per currency
self.quantity = uint64(msg[32:40]) #sell currency amount
self.expiration = uint64(msg[40:])
self.fill_or_kill = msg[-1]
#void match( Bid& bid, Account& buyer, Ask& ask, Account& seller )
def match( bid, buyer, ask, seller ):
print( "match bid: ", bid, "\nmatch ask: ", ask, "\n");
ask_eos = ask.quantity * ask.price;
fill_amount_eos = min( ask_eos, bid.quantity );
fill_amount_currency = uint64(0);
if fill_amount_eos == ask_eos: #/// complete fill of ask
fill_amount_currency = ask.quantity;
else: #/// complete fill of buy
fill_amount_currency = fill_amount_eos / ask.price;
# fill_amount_currency = int(fill_amount_currency)
print( "\n\nmatch bid: ", Name(bid.buyer.name), ":", bid.buyer.id,
"match ask: ", Name(ask.seller.name), ":", ask.seller.id, "\n\n" );
bid.quantity -= fill_amount_eos;
seller.eos_balance += fill_amount_eos;
ask.quantity -= fill_amount_currency;
buyer.currency_balance += fill_amount_currency;
def apply_exchange_buy():
order = BuyOrder()
bid = order
print(eoslib.n2s(bid.buyer.name))
eoslib.require_auth( bid.buyer.name )
assert bid.quantity > 0, "invalid quantity"
assert bid.expiration > eoslib.now(), "order expired"
print( eoslib.n2s(bid.buyer.name), " created bid for ", order.quantity, " currency at price: ", order.price, "\n" )
buyer_account = Account( bid.buyer.name )
buyer_account.eos_balance -= bid.quantity
print('buyer_account:',buyer_account)
lowest_ask = Ask.front_by_price()
if not lowest_ask:
print( "\n No asks found, saving buyer account and storing bid\n" )
assert not order.fill_or_kill, "order not completely filled"
bid.store()
buyer_account.open_orders+=1
buyer_account.save()
return
print( "ask: ", lowest_ask, "\n" );
print( "bid: ", bid, "\n" );
seller_account = Account( lowest_ask.seller.name );
while lowest_ask.price <= bid.price :
print( "lowest ask <= bid.price\n",lowest_ask.price, bid.price);
match( bid, buyer_account, lowest_ask, seller_account );
if lowest_ask.quantity == 0:
seller_account.open_orders-=1;
seller_account.save();
buyer_account.save();
lowest_ask.remove();
lowest_ask = Ask.front_by_price()
if not lowest_ask:
break;
seller_account = Account( lowest_ask.seller.name );
else:
break; # buyer's bid should be filled
print( "lowest_ask >= bid.price or buyer's bid has been filled\n" );
if bid.quantity and not order.fill_or_kill:
buyer_account.open_orders+=1;
buyer_account.save();
print( "saving buyer's account\n" );
if bid.quantity:
print( bid.quantity, " eos left over" );
assert not order.fill_or_kill, "order not completely filled" ;
bid.store();
return;
print( "bid filled\n" );
def apply_exchange_sell():
order = SellOrder()
ask = order;
require_auth( ask.seller.name ); | |
import numpy as np
from . import regimes as REGI
from . import user_output as USER
import multiprocessing as mp
import scipy.sparse as SP
from .utils import sphstack, set_warn, RegressionProps_basic, spdot, sphstack
from .twosls import BaseTSLS
from .robust import hac_multi
from . import summary_output as SUMMARY
from platform import system
"""
Two-stage Least Squares estimation with regimes.
"""
__author__ = "<NAME> <EMAIL>, <NAME> <EMAIL>, <NAME> <EMAIL>"
class TSLS_Regimes(BaseTSLS, REGI.Regimes_Frame):
"""
Two stage least squares (2SLS) with regimes
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
constant_regi: ['one', 'many']
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime (default)
cols2regi : list, 'all'
Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all' (default), all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
robust : string
If 'white', then a White consistent estimator of the
variance-covariance matrix is given.
If 'hac', then a HAC consistent estimator of the
variance-covariance matrix is given.
If 'ogmm', then Optimal GMM is used to estimate
betas and the variance-covariance matrix.
Default set to None.
gwk : pysal W object
Kernel spatial weights needed for HAC estimation. Note:
matrix must have ones along the main diagonal.
sig2n_k : boolean
If True, then use n-k to estimate sigma^2. If False, use n.
vm : boolean
If True, include variance-covariance matrix in summary
cores : boolean
Specifies if multiprocessing is to be used
Default: no multiprocessing, cores = False
Note: Multiprocessing may not work on all platforms.
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_regimes : string
Name of regimes variable for use in output
name_w : string
Name of weights matrix for use in output
name_gwk : string
Name of kernel weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
vm : array
Variance covariance matrix (kxk)
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
constant_regi: [False, 'one', 'many']
Ignored if regimes=False. Constant option for regimes.
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime
cols2regi : list, 'all'
Ignored if regimes=False. Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all', all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
kr : int
Number of variables/columns to be "regimized" or subject
to change by regime. These will result in one parameter
estimate by regime for each variable (i.e. nr parameters per
variable)
kf : int
Number of variables/columns to be considered fixed or
global across regimes and hence only obtain one parameter
estimate
nr : int
Number of different regimes in the 'regimes' list
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_regimes : string
Name of regimes variable for use in output
name_w : string
Name of weights matrix for use in output
name_gwk : string
Name of kernel weights matrix for use in output
name_ds : string
Name of dataset for use in output
multi : dictionary
Only available when multiple regressions are estimated,
i.e. when regime_err_sep=True and no variable is fixed
across regimes.
Contains all attributes of each individual regression
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal.lib
Open data on NCOVR US County Homicides (3085 areas) using pysal.lib.io.open().
This is the DBF associated with the NAT shapefile. Note that
pysal.lib.io.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.lib.io.open(pysal.lib.examples.get_path("NAT.dbf"),'r')
Extract the HR90 column (homicide rates in 1990) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y_var = 'HR90'
>>> y = np.array([db.by_col(y_var)]).reshape(3085,1)
Extract UE90 (unemployment rate) and PS90 (population structure) vectors from
the DBF to be used as independent variables in the regression. Other variables
can be inserted by adding their names to x_var, such as x_var = ['Var1','Var2','...]
Note that PySAL requires this to be an nxj numpy array, where j is the
number of independent variables (not including a constant). By default
this model adds a vector of ones to the independent variables passed in.
>>> x_var = ['PS90','UE90']
>>> x = np.array([db.by_col(name) for name in x_var]).T
In this case we consider RD90 (resource deprivation) as an endogenous regressor.
We tell the model that this is so by passing it in a different parameter
from the exogenous variables (x).
>>> yd_var = ['RD90']
>>> yd = np.array([db.by_col(name) for name in yd_var]).T
Because we have endogenous variables, to obtain a correct estimate of the
model, we need to instrument for RD90. We use FP89 (families below poverty)
for this and hence put it in the instruments parameter, 'q'.
>>> q_var = | |
des reinettes king of the pippins apples",
"3352": "reinettes and heritage varieties incl canada blanc reinette du mans armorique vigan calville apples",
"3353": "st edmunds pippin apples",
"3354": "ripe ready to eat avocados",
"3355": "strawberries nominal 500g 1 litre berries",
"3356": "strawberries nominal250g 1 2 litre berries",
"3357": "small regular red black cherries",
"3358": "large regular red black cherries",
"3359": "chasselas grapes",
"3360": "muscat de hambourg grapes",
"3361": "without p harvest treatment grapefruit",
"3362": "without postharvest treatment lemons",
"3363": "bowen kensington pride mango",
"3364": "r2e2 artwoeetwo mango",
"3365": "ripe ready to eat mango",
"3366": "madro\u00f1a",
"3367": "glasshouse netted varieties melon",
"3368": "ogen melon",
"3369": "nectavigne red flesh nectarine",
"3370": "maltaise oranges",
"3371": "salustiana oranges",
"3372": "navelate and other late navel varieties oranges",
"3373": "navelina incl newhall oranges",
"3374": "without postharvest treatment oranges",
"3375": "de vigne sanguine red flesh peaches",
"3376": "alexander lucas pears",
"3377": "louise bonne pears",
"3378": "santa maria pears",
"3379": "mini pineapple",
"3380": "perola pineapple",
"3381": "soursop",
"3382": "sugar apple",
"3383": "small clementine tangerines mandarins",
"3384": "medium clementine tangerines mandarins",
"3385": "large clementine tangerines mandarins",
"3386": "clementine with leaves attached tangerines mandarins",
"3387": "clementine without p harvest treatment tangerines mandarins",
"3388": "satsuma clauselina tangerines mandarins",
"3389": "satsuma tangerines mandarins",
"3390": "arracach",
"3391": "rouge salambo red artichokes",
"3392": "green bunch asparagus",
"3393": "white bunch asparagus",
"3394": "purple bunch asparagus",
"3395": "red belgian endive witloof chicory",
"3396": "savoy red cabbage",
"3397": "summer cabbage pointed type",
"3398": "chickpeas garbanzo",
"3399": "regular fresh semi dried with leaves attached garlic",
"3400": "regular smoked garlic",
"3401": "one clove types garlic",
"3402": "regular bunch leeks",
"3403": "baby bunch leeks",
"3404": "cep mushrooms",
"3405": "fairy ring champignon mushrooms",
"3406": "grey tricholoma mushrooms",
"3407": "grisette mushrooms",
"3408": "horn of plenty black trumpet mushrooms",
"3409": "pioppino mushrooms",
"3410": "saffron milk cap mushrooms",
"3411": "sheep polypore mushrooms",
"3412": "yellow brown fresh bunch onions",
"3413": "tabasco peppers capsicums",
"3414": "baking white potato",
"3415": "baking red and eye varieties potato",
"3416": "bunch rhubarb",
"3417": "new zealand spinach",
"3418": "zucchini courgette round squash",
"3419": "borage",
"3420": "belle du jumet honey pears",
"3421": "3 7 lbs watermelon mini seedless melon",
"3422": "interspecific apricots",
"3423": "heirloom varieties include but are not limited to amish salad anna russian aunt ruby's yellow cherry big italian plum black prince zebra brandywine dr caroline earl of edgecomb eva purple ball flamme green hawaiian pineapple tomatoes",
"3424": "purple red beta sweet carrots",
"3425": "small ellendale tangerines mandarins",
"3426": "medium ellendale tangerines mandarins",
"3427": "large ellendale tangerines mandarins",
"3428": "small honey murcott tangerines mandarins",
"3429": "medium honey murcott tangerines mandarins",
"3430": "large honey murcott tangerines mandarins",
"3431": "small imperial tangerines mandarins",
"3432": "medium imperial tangerines mandarins",
"3433": "large imperial tangerines mandarins",
"3434": "tosca pears",
"3435": "pinova apples",
"3436": "orange cauliflower",
"3437": "flat yellow nectarine",
"3438": "ambrosia apples",
"3439": "white flesh flat nectarine",
"3440": "large pomegranate",
"3441": "butterkin squash",
"3442": "new york 1 snapdragon apples",
"3443": "new york 2 rubyfrost apples",
"3444": "green dragon chin loung apples",
"3445": "ds 3 pazazz apples",
"3446": "kale sprouts",
"3447": "ds 22 riverbelle apples",
"3448": "tip top skylar rae cherries",
"3449": "sugrathirteen midnight beauty brand grapes",
"3450": "sugranineteen scarlotta seedless brand grapes",
"3451": "sugrathirtyfour adora seedless brand grapes",
"3452": "sugrathirtyfive autumncrisp brand grapes",
"3453": "galangal root",
"3454": "green jackfruit",
"3455": "yellow jackfruit",
"3456": "winter melon",
"3457": "president plums",
"3458": "cherry orange tomatoes",
"3459": "shiny red persimmon",
"3460": "red jonaprince prince apples",
"3461": "lady williams apples",
"3462": "garlic chinese chives",
"3463": "chinese spinach yin choy amaranth callaloo een",
"3464": "b 74 calypso mango",
"3465": "stripy bell enjoya peppers capsicums",
"3466": "cape rose cheeky pears",
"3467": "regal 13 82 juici apples",
"3468": "small honeycrisp apples",
"3469": "sugrasixteen sable seedless brand grapes",
"3470": "watermelon red small seeds melon",
"3471": "baby cactus leaves nopales pads",
"3472": "sacred pepper leaf",
"3473": "epazote",
"3474": "saffron sweet potato yam kumara",
"3475": "peppermint mint",
"3476": "orange tree leaf",
"3477": "summer cilantro bolivian coriander papalo",
"3478": "quelites",
"3479": "chepil chipilin leaf",
"3480": "pumpkin vine",
"3481": "xpelon bean",
"3482": "rabbit herb",
"3483": "purple grass herb",
"3484": "dalinette choupette apples",
"3485": "harovin sundown cold snap pears",
"3486": "cn121 sugarbee apples",
"3487": "mn 55 rave first kiss apples",
"3488": "extra large red mango",
"3489": "cepuna migo pears",
"3490": "maia 1 evercrisp apples",
"3491": "arra fifteen sweeties grapes",
"3492": "arra twentynine passion fire grapes",
"3493": "tearless sweet sunions onions",
"3494": "3 7 lbs watermelon yellow mini seedless sunny gold melon",
"3495": "celina qtee pears",
"3496": "ifg core red seedless 68 175 sweet celebration four romance none jack's salute grapes",
"3497": "ifg core black seedless one sweet surrender eight enchantment thirteen secrets fifteen surprise ifgsixteen seventeen joy twenty five magic six bond grapes",
"3498": "ifg core green seedless two sweet sunshine ten globe eleven sugar crisp grapes",
"3499": "ifg novelty red seedless fourteen sweet mayabelle eighteen nectar nineteen candy hearts twenty one snaps three drops grapes",
"3500": "ifg novelty black seedless six sweet sapphire twelve funny fingers twenty candy crunch two dream s grapes",
"3501": "ifg novelty green seedless seven cotton candy grapes",
"3502": "arra twentyseven 27 mystic star grapes",
"3503": "arra twentyeight passion punch grapes",
"3504": "arra thirty sugardrop grapes",
"3505": "arra thirtytwo mystic dream grapes",
"3506": "sweet scarlet muscato amore grapes",
"3507": "wa 38 cosmic crisp apples",
"3508": "thomcord grape jammers jelly drops california a29 67 grapes",
"3509": "gem avocados",
"3510": "small ambrosia apples",
"3511": "wa 2 crimson delight sunrise magic apples",
"3512": "round tasti lee tomatoes",
"3513": "shinano gold yello apples",
"3514": "fengapi tessa apples",
"3515": "small prema 153 lemonade apples",
"3516": "large prema 153 lemonade apples",
"3518": "oksana xenia pears",
"3519": "r10 45 wild twist apples",
"3520": "sk 20 smileball goldies onique sweetnothings loving onions",
"3521": "regal d5 100 karma apples",
"3522": "hw624 pears",
"3523": "sq 159 magic star natyra kentish kiss sprank apples",
"3600": "antares apples",
"3601": "huaguan autumn glory apples",
"3602": "belgica apples",
"3603": "minneiska apples",
"3604": "emmons apples",
"3605": "nicoter apples",
"3606": "sweet sensation pears",
"3607": "mariri red apples",
"3608": "large sciros pacific rose apples",
"3609": "red plumcot interspecific plum",
"3610": "green plumcot interspecific plum",
"3611": "black plumcot interspecific plum",
"3612": "nicogreen apples",
"3613": "fuji brak apples",
"3614": "red apricots",
"3615": "civni apples",
"3616": "large scilate envy apples",
"3617": "seedless lemons",
"3618": "opal apples",
"3619": "milwa apples",
"3620": "plumac apples",
"3621": "francis madame francine mango",
"3622": "honey green papaya melon",
"3623": "hami chinese or snow melon",
"3624": "korean melon",
"3625": "minnewashta zestar apples",
"3626": "meyer lemons",
"3627": "large prema17 apples",
"3628": "prema280 apples",
"3629": "civg198 apples",
"3630": "co op 43 apples",
"3631": "pink porcelain doll pumpkin",
"3632": "dekopon shiranui hallabong sumo citrus tangerines mandarins",
"4011": "yellow includes cavendish bananas",
"4012": "large navel oranges",
"4013": "small navel oranges",
"4014": "small valencia oranges",
"4015": "small red delicious apples",
"4016": "large red delicious apples",
"4017": "large granny smith apples",
"4018": "large granny smith apples",
"4019": "large mcintosh apples",
"4020": "large golden delicious apples",
"4021": "small golden delicious apples",
"4022": "white green seedless peerlette thompson grapes",
"4023": "red seedless flame ruby emperatriz grapes",
"4024": "small bartlett williams wbc pears",
"4025": "small anjou pears",
"4026": "small bosc beurre pears",
"4027": "small ruby red pink includes ray grapefruit",
"4028": "pint strawberries berries",
"4029": "small pineapple",
"4030": "regular kiwifruit",
"4031": "watermelon red melon",
"4032": "watermelon red seedless melon",
"4033": "small lemons",
"4034": "large honeydew white melon",
"4035": "small yellow flesh nectarine",
"4036": "large yellow flesh nectarine",
"4037": "small yellow flesh peaches",
"4038": "large yellow flesh peaches",
"4039": "small black includes ambra beaut prima blackamber torch catalina challenger diamond friar royal knight freedom flame howard sun angeleno plums",
"4040": "large black includes ambra beaut prima blackamber torch catalina challenger diamond friar royal knight freedom flame howard sun angeleno plums",
"4041": "small red includes santa rosa late beaut rich spring first royal jewel rose zee ace aleta burgandy july frontier fortune | |
<gh_stars>0
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2020 ADCIRC Development Group
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class NhcDownloader:
def __init__(self,
dblocation=".",
use_besttrack=True,
use_forecast=True,
pressure_method="knaffzehr",
use_aws=True):
from datetime import datetime
from .metdb import Metdb
import tempfile
self.__mettype = "nhc"
self.__metstring = "NHC"
self.__use_forecast = use_besttrack
self.__use_hindcast = use_forecast
self.__year = datetime.now().year
self.__pressure_method = pressure_method
self.__use_rss = True
self.__use_aws = use_aws
self.__database = Metdb()
self.__min_forecast_length = 2
if self.__use_aws:
from .s3file import S3file
import os
self.__dblocation = tempfile.gettempdir()
self.__downloadlocation = dblocation + "/nhc"
if "BUCKET_NAME" in os.environ:
self.__s3file = S3file(os.environ["BUCKET_NAME"])
else:
self.__s3file = S3file()
else:
self.__dblocation = dblocation
self.__downloadlocation = self.__dblocation + "/nhc"
self.__rss_feeds = [
"https://www.nhc.noaa.gov/index-at.xml",
"https://www.nhc.noaa.gov/index-ep.xml",
"https://www.nhc.noaa.gov/index-cp.xml"
]
def mettype(self):
return self.__mettype
def metstring(self):
return self.__metstring
def download(self):
n = 0
if self.__use_forecast:
n += self.download_forecast()
if self.__use_hindcast:
n += self.download_hindcast()
return n
def download_forecast(self):
if self.__use_rss:
return self.download_forecast_rss()
else:
return self.download_forecast_ftp()
def download_forecast_rss(self):
print("[INFO]: Retrieving NHC RSS feed...")
n = 0
for feed in self.__rss_feeds:
n += self.read_nhc_rss_feed(feed)
print("[INFO]: Finished reading RSS feed")
return n
@staticmethod
def generate_advisory_number(string):
"""
Takes input for an advisory and reformats it using 3 places so it is ordered in the table
:param string: advisory number, i.e. 2b or 2
:return: advisory number padded with zeros, i.e. 002b or 002
"""
import re
split = re.split("([0-9]{1,2})", string)
if len(split) == 2:
adv_number = "{:03}".format(int(split[1]))
else:
adv_number = "{:03}".format(int(split[1])) + split[2]
return adv_number
def read_nhc_rss_feed(self, rss):
import feedparser
import os
from datetime import datetime
from .forecastdata import ForecastData
try:
n = 0
feed = feedparser.parse(rss)
# ... During month changes, we need to be able to roll
# the date correctly
rss_date_str = feed["date"][5:-4]
rss_date = datetime.strptime(rss_date_str, "%d %b %Y %H:%M:%S")
for e in feed.entries:
if "Forecast Advisory" in e['title']:
adv_number_str = e['title'].split()[-1]
adv_number = NhcDownloader.generate_advisory_number(adv_number_str)
adv_lines = e["description"].split("\n")
id_str = (adv_lines[7].split()[-1]).lstrip()
basin_str = str(id_str[0:2]).lower()
storm_str = id_str[2:4]
year_str = id_str[-4:]
vmax = 0
storm_name = e['title'].split(
"Forecast Advisory")[0].split()[-1]
# storm_type = e['title'].split(storm_name)[0]
fn = "nhc_fcst_" + year_str + "_" + basin_str + "_" + storm_str + "_" + adv_number + ".fcst"
if self.__use_aws:
filepath = self.mettype() + "/forecast/" + fn
else:
filepath = self.__downloadlocation + "_fcst/" + fn
metadata = {
"year": year_str,
"basin": basin_str,
"storm": storm_str,
"advisory": adv_number
}
entry_found = self.__database.has("nhc_fcst", metadata)
if not entry_found:
print(" Downloading NHC forecast for Basin: " +
basin2string(basin_str) + ", Year: " + year_str +
", Storm: " + storm_name + "(" + storm_str +
"), Advisory: " + adv_number,
flush=True)
i = 0
forecasts = [ForecastData(self.__pressure_method)]
while i < len(adv_lines):
if "CENTER LOCATED NEAR" in adv_lines[i] and "REPEAT" not in adv_lines[i]:
data = adv_lines[i].split("...")[0].split()
x, y = self.get_storm_center(data[-3], data[-4])
time = self.get_rss_time(rss_date, data[-1])
forecasts[0].set_storm_center(x, y)
forecasts[0].set_time(time)
elif "ESTIMATED MINIMUM CENTRAL" in adv_lines[i]:
forecasts[0].set_pressure(
float(adv_lines[i].split()[-2]))
elif "MAX SUSTAINED WINDS" in adv_lines[i]:
data = adv_lines[i].split()
if len(data) > 5:
forecasts[0].set_max_gust(float(data[-2]))
forecasts[0].set_max_wind(float(data[3]))
vmax = forecasts[0].max_wind()
while "KT" in adv_lines[i + 1]:
i += 1
iso, d1, d2, d3, d4 = self.parse_isotachs(
adv_lines[i])
forecasts[0].set_isotach(iso, d1, d2, d3, d4)
elif "PRESENT MOVEMENT TOWARD" in adv_lines[i]:
heading = int(
adv_lines[i].split("DEGREES")[0].split()[-1])
fwdspd = int(adv_lines[i].split()[-2])
forecasts[0].set_heading(heading)
forecasts[0].set_forward_speed(fwdspd)
elif "FORECAST VALID" in adv_lines[i] and "ABSORBED" not in adv_lines[i]:
data = adv_lines[i].split("...")[0].split()
# This should be specified in the rss, but if not, compute a value before going further
if forecasts[0].pressure() == -1:
forecasts[0].compute_pressure()
if len(data) >= 4:
forecasts.append(
ForecastData(self.__pressure_method))
data = adv_lines[i].split("...")[0].split()
time = self.get_rss_time(rss_date, data[2])
x, y = self.get_storm_center(data[4], data[3])
i += 1
data = adv_lines[i].replace(".", " ").split()
forecasts[-1].set_max_wind(float(data[2]))
vmax = max(vmax, forecasts[-1].max_wind())
forecasts[-1].set_max_gust(float(data[5]))
forecasts[-1].set_storm_center(x, y)
forecasts[-1].set_time(time)
forecasts[-1].set_forecast_hour(
(time -
forecasts[0].time()).total_seconds() /
3600)
forecasts[-1].compute_pressure(
vmax, forecasts[-2].max_wind(),
forecasts[-2].pressure())
while "KT" in adv_lines[i + 1]:
i += 1
iso, d1, d2, d3, d4 = self.parse_isotachs(
adv_lines[i])
forecasts[-1].set_isotach(
iso, d1, d2, d3, d4)
# ... TODO: Do we want a record that has no isotachs?
# Maybe there are some assumptions to be made here
# if forecasts[-1].nisotachs() == 0:
# del forecasts[-1]
i += 1
# ... TODO: What is the minimum number of forecast snaps that are acceptable?
if len(forecasts) > self.__min_forecast_length:
if self.__use_aws:
self.write_atcf(self.__dblocation + "/" + fn, basin_str, storm_name, storm_str,
forecasts)
self.__s3file.upload_file(self.__dblocation + "/" + fn, filepath)
start_date, end_date, duration = self.get_nhc_start_end_date(
self.__dblocation + "/" + fn, True)
md5 = self.compute_checksum(self.__dblocation + "/" + fn)
else:
self.write_atcf(filepath, basin_str, storm_name, storm_str,
forecasts)
start_date, end_date, duration = self.get_nhc_start_end_date(
filepath, True)
md5 = self.compute_checksum(filepath)
nhc_metadata = {
"year": year_str,
"basin": basin_str,
"storm": storm_str,
"advisory": adv_number,
"md5": md5,
'advisory_start': start_date,
'advisory_end': end_date,
'advisory_duration_hr': duration
}
self.__database.add(nhc_metadata, "nhc_fcst", filepath)
# Increment the counter
n += 1
else:
print(" [WARNING]: Dropping forecast for having <", self.__min_forecast_length,
"forecast records")
return n
except KeyboardInterrupt:
raise
# except:
# print("[ERROR]: An error occured reading the NHC RSS feed")
# return n
@staticmethod
def print_forecast_data(year, basin, storm_name, storm_number,
advisory_number, forecast_data):
print("Basin: ", basin2string(basin), ", Year:", year, ", Storm: ",
storm_name, "(", storm_number, "), Advisory: " + advisory_number)
for f in forecast_data:
f.print()
print("")
@staticmethod
def write_atcf(filepath, basin, storm_name, storm_number, forecast_data):
import os
with open(filepath, 'w') as f:
for d in forecast_data:
line = "{:2s},{:3s},{:10s},".format(
basin,
storm_number.strip().rjust(3),
forecast_data[0].time().strftime(" %Y%m%d%H"))
line = line + " 00, OFCL,{:4.0f},".format(d.forecast_hour())
x, y = d.storm_center()
x = int(round(x * 10))
y = int(round(y * 10))
if x < 0:
x = "{:5d}".format(abs(x)) + "W"
else:
x = "{:5d}".format(x) + "E"
if y < 0:
y = "{:4d}".format(abs(y)) + "S"
else:
y = "{:4d}".format(y) + "N"
if d.max_wind() < 34:
windcode = "TD"
elif d.max_wind() < 63:
windcode = "TS"
else:
windcode = "HU"
line = line + "{:5s},{:6s},{:4.0f},{:5.0f},{:3s},".format(
y.strip().rjust(5),
x.strip().rjust(6), d.max_wind(), d.pressure(),
windcode.rjust(3))
if d.heading() > -900:
heading = d.heading()
else:
heading = 0
if d.forward_speed() > -900:
fspd = d.forward_speed()
else:
fspd = 0
if len(d.isotach_levels()) > 0:
for it in sorted(d.isotach_levels()):
iso = d.isotach(it)
itline = line + "{:4d}, NEQ,{:5d},{:5d},{:5d},{:5d},".format(
it, iso.distance(0), iso.distance(1), iso.distance(2),
iso.distance(3))
itline = itline + " 1013, 0, 0,{:4.0f}, 0, 0, ,METG,{:4d},{:4d}," \
"{:11s}, , 0, NEQ, 0, 0, 0, 0, , ,".format(
d.max_gust(), heading, fspd, storm_name.upper().rjust(11))
f.write(itline)
f.write(os.linesep)
else:
itline = line + "{:4d}, NEQ,{:5d},{:5d},{:5d},{:5d},".format(
34, 0, 0, 0, 0)
itline = itline + " 1013, 0, 0,{:4.0f}, 0, 0, ,METG,{:4d},{:4d}," \
"{:11s}, , 0, NEQ, 0, 0, 0, 0, , ,".format(
d.max_gust(), heading, fspd, storm_name.upper().rjust(11))
f.write(itline)
f.write(os.linesep)
return
@staticmethod
def get_storm_center(x, y):
if "W" in x:
x = -float(x[:-1])
else:
x = float(x[:-1])
if "S" in y:
y = -float(y[:-1])
else:
y = float(y[:-1])
return x, y
@staticmethod
def get_rss_time(start_time, time_str):
from datetime import datetime
day = int(time_str[0:2])
hr = int(time_str[3:5])
d = datetime(start_time.year, start_time.month, day, hr, 0, 0)
# ... Generate a query time since it we allege that only the hours, minutes, and seconds could put
# the forecasted time ahead of the time in the rss feed
query_time = datetime(start_time.year, start_time.month, start_time.day, 0, 0, 0)
if d < query_time:
return datetime(start_time.year, start_time.month + 1, day, hr, 0, 0)
else:
return d
@staticmethod
| |
<filename>expand.py
#!/usr/bin/env python2
from base import *
from atom import *
from quilt import *
import drum
import flatten
import globs
import vat
ExFunc, ExStaticDefn, ExInnerFunc = ADT('ExFunc',
'ExStaticDefn',
'ExInnerFunc', ('closedVars', 'set([*Var])'))
EXFUNC = new_env('EXFUNC', ExFunc)
ExGlobal = DT('ExGlobal', ('newDecls', ModuleDecls),
('newDefns', [TopFunc]),
('ownModules', ['*Module']))
EXGLOBAL = new_env('EXGLOBAL', ExGlobal)
IMPORTBINDS = new_env('IMPORTBINDS', set(['a'])) # Bindable
# DEFNS
ClosureInfo = DT('ClosureInfo', ('func', Func), ('isClosure', bool))
Closure = new_extrinsic('Closure', ClosureInfo)
ClosedVarFunc = new_extrinsic('ClosedVar', '*ExFunc')
VarGlobalReplacement = new_extrinsic('VarGlobalReplacement', '*GlobalVar')
def iconvert(a):
add_extrinsic(LLVMTypeOf, a, convert_type(extrinsic(TypeOf, a)))
def iconvert_func_var(a):
add_extrinsic(LLVMTypeOf, a, convert_func_type(extrinsic(TypeOf, a)))
class ClosureExpander(vat.Mutator):
def TopFunc(self, top):
top.func = in_env(EXFUNC, ExStaticDefn(), lambda: self.mutate('func'))
return top
def Defn(self, defn):
wasFuncExpr = matches(defn.expr, 'FuncExpr(_)')
defn = self.mutate()
if wasFuncExpr:
# Special case: extract `f := lambda [...]` form directly
m = match(defn)
if m("Defn(PatVar(var), Bind(globalVar))"):
add_extrinsic(VarGlobalReplacement, m.var, m.globalVar)
update_extrinsic(Name, m.globalVar, extrinsic(Name, m.var))
return Nop()
return defn
def FuncExpr(self, fe):
# Extract any other (inline) func expression
info = ExInnerFunc(set())
f = in_env(EXFUNC, info, lambda: self.mutate('func'))
isClosure = len(info.closedVars) > 0
var = GlobalVar()
glob = env(EXGLOBAL)
glob.newDecls.funcDecls.append(var)
glob.newDefns.append(TopFunc(var, f))
add_extrinsic(Closure, f, ClosureInfo(f, isClosure))
bind = L.Bind(var)
t = extrinsic(TypeOf, fe)
add_extrinsic(TypeOf, bind, t)
add_extrinsic(TypeOf, var, t)
add_extrinsic(Name, var, "lambda")
set_orig(var, fe)
return bind
def PatCapture(self, pat):
add_extrinsic(ClosedVarFunc, pat.var, env(EXFUNC))
pat.pattern = self.mutate('pattern')
return pat
def PatVar(self, pat):
add_extrinsic(ClosedVarFunc, pat.var, env(EXFUNC))
return pat
def Bind(self, bind):
mv = Bindable.asLocalVar(bind.target)
if isJust(mv):
v = fromJust(mv)
wasClosed = False
m = match(env(EXFUNC))
if m('f==ExInnerFunc(closedVars)'):
if has_extrinsic(ClosedVarFunc, v):
if extrinsic(ClosedVarFunc, v) is not m.f:
m.closedVars.add(v)
wasClosed = True
if has_extrinsic(VarGlobalReplacement, v):
assert not wasClosed, "TODO closed-over lambda?"
bind.target = extrinsic(VarGlobalReplacement, v)
return bind
class FuncValGenerator(vat.Mutator):
def Call(self, e):
if is_indirect_func(e.func):
e.func = self.mutate('func')
e.args = self.mutate('args')
ft = extrinsic(TypeOf, e.func)
indcall = CallIndirect(e.func, e.args, ft.meta.envParam)
add_extrinsic(TypeOf, indcall, extrinsic(TypeOf, e))
return indcall
else:
# skip e.func since no func val needs to be generated
e.args = self.mutate('args')
return e
def VoidCall(self, c):
if is_indirect_func(c.func):
return self.mutate()
"""
ft = extrinsic(TypeOf, c.func)
#indcall = VoidCallIndirect(c.func, c.args, ft.meta.envParam)
add_extrinsic(TypeOf, indcall, extrinsic(TypeOf, e))
return indcall
"""
else:
# skip c.func since no func val needs to be generated
c.args = self.mutate('args')
return c
def Bind(self, e):
if not Bindable.isLocalVar(e.target):
t = extrinsic(TypeOf, e)
if matches(t, "TFunc(_, _, _)"):
assert isinstance(e.target, GlobalVar)
val = FuncVal(e.target, Nothing())
add_extrinsic(TypeOf, val, extrinsic(TypeOf, e))
return val
return self.mutate()
def expand_closures(unit):
scope_extrinsic(ClosedVarFunc, lambda:
scope_extrinsic(VarGlobalReplacement, lambda:
vat.mutate(ClosureExpander, unit, t_DT(ExpandedUnit))))
def is_indirect_func(e):
if not matches(e, "Bind(_)"):
return True
return Bindable.isLocalVar(e.target)
class LitExpander(vat.Mutator):
def Lit(self, lit):
m = match(lit.literal)
if m('StrLit(_)'):
v = GlobalVar()
add_extrinsic(Name, v, '.LC%d' % (vat.orig_loc(lit).index,))
vat.set_orig(v, lit)
env(EXGLOBAL).newDecls.lits.append(LitDecl(v, lit.literal))
expr = L.Bind(v)
add_extrinsic(TypeOf, expr, TStr())
add_extrinsic(TypeOf, v, TStr())
return expr
else:
return lit
def builtin_call(name, args):
return L.Call(L.Bind(BUILTINS[name]), args)
class AssertionExpander(vat.Mutator):
def Assert(self, a):
check = builtin_call('not', [self.mutate('test')])
add_extrinsic(TypeOf, check.func, extrinsic(TypeOf, check.func.target))
add_extrinsic(TypeOf, check, TBool())
message = self.mutate('message')
fail = flatten.runtime_void_call('fail', [message])
case = CondCase(check, Body([fail]))
set_orig(case, a)
cond = S.Cond([case])
set_orig(cond, a)
return cond
def convert_decl_types(decls):
map_(iconvert_func_var, decls.cdecls)
for dt in decls.dts:
if extrinsic(Name, dt) == 'Maybe':
continue # XXX maybe codegen
for ctor in dt.ctors:
fts = []
for field in ctor.fields:
ft = convert_type(field.type)
fts.append(IParam(ft, is_strong_ptr(ft)))
add_extrinsic(LLVMTypeOf, field, ft)
ctort = IFunc(fts, IPtr(IData(dt)), IFuncMeta(False))
add_extrinsic(LLVMTypeOf, ctor, ctort)
for env in decls.envs:
add_extrinsic(LLVMTypeOf, env, convert_type(env.type))
for lit in decls.lits:
iconvert(lit.var)
map_(iconvert_func_var, decls.funcDecls)
ThreadedEnvVar = DT('ThreadedEnvVar', ('var', 'Maybe(Var)'), ('depth', int))
THREADENV = new_env('THREADENV', ThreadedEnvVar)
class TypeConverter(vat.Mutator):
def Call(self, e):
# Direct calls need to convert to direct func types
if matches(e.func, "Bind(_)"):
iconvert_func_var(e.func)
e.args = self.mutate('args')
else:
e = self.mutate()
iconvert(e)
return convert_expr_casts(e)
def CallVoid(self, c):
if matches(c.func, "Bind(_)"):
iconvert_func_var(c.func)
c.args = self.mutate('args')
else:
c = self.mutate()
iconvert(c)
return c
def t_LExpr(self, e):
e = self.mutate()
iconvert(e)
return convert_expr_casts(e)
def t_Pat(self, p):
p = self.mutate()
iconvert(p)
if not has_extrinsic(TypeCast, p):
return p
src, dest = extrinsic(TypeCast, p)
add_extrinsic(LLVMPatCast, p, (convert_type(src), convert_type(dest)))
return p
def Var(self, v):
iconvert(v)
return v
def convert_expr_casts(e):
if not has_extrinsic(TypeCast, e):
return e
src, dest = extrinsic(TypeCast, e)
isrc = convert_type(src)
idest = convert_type(dest)
if itypes_equal(isrc, idest):
assert types_punned(src, dest), \
"Pointless non-pun cast %s -> %s" % (src, dest)
return e
return cast(isrc, idest, e)
class MaybeConverter(vat.Mutator):
def Call(self, call):
if matches(call.func, 'Bind(_)'):
if Nullable.isMaybe(call.func.target):
args = call.args
if len(args) == 1:
arg = self.mutate('args', 0)
# cast it to Maybe, as the Just() is now omitted
argT = extrinsic(LLVMTypeOf, arg)
arg = cast(argT, i_ADT(Maybe), arg)
return arg
else:
assert len(args) == 0
null = NullPtr()
copy_type(null, call)
return null
return self.mutate()
def add_call_ctx(func, args):
if extrinsic(TypeOf, func).meta.envParam:
m = match(env(THREADENV).var)
if m('Just(ctx)'):
bind = L.Bind(m.ctx)
copy_type(bind, m.ctx)
m.ret(bind)
else:
null = NullPtr()
add_extrinsic(LLVMTypeOf, null, IVoidPtr())
m.ret(null)
args.append(m.result())
class EnvExtrConverter(vat.Mutator):
def BlockFunc(self, func):
threadedVar = Nothing()
origDepth = 0
ft = extrinsic(TypeOf, func.var)
if ft.meta.envParam:
# Add context parameter
var = new_ctx_var()
threadedVar = Just(var)
origDepth = 1
func.params.append(LVar(var))
info = ThreadedEnvVar(threadedVar, origDepth)
_ = in_env(THREADENV, info, lambda: self.mutate('blocks'))
assert info.depth == origDepth, "Unbalanced env push/pops"
return func
def FuncExpr(self, fe):
assert False
def PushEnv(self, stmt):
bindEnv = L.Bind(stmt.env)
add_extrinsic(LLVMTypeOf, bindEnv, IVoidPtr())
init = self.mutate('init')
init = cast_to_voidptr(init, extrinsic(LLVMTypeOf, init))
threaded = env(THREADENV)
threaded.depth += 1
m = match(threaded.var)
if m('Just(ctxVar)'):
# Update the old ctx value with the pushed ctx
bindCtx = L.Bind(m.ctxVar)
add_extrinsic(LLVMTypeOf, bindCtx, IVoidPtr())
call = runtime_call('_pushenv', [bindEnv, bindCtx, init])
lhs = LhsVar(m.ctxVar)
add_extrinsic(LLVMTypeOf, lhs, IVoidPtr())
return S.Assign(lhs, call)
else:
# Don't have a ctx var yet, need to introduce one
null = NullPtr()
add_extrinsic(LLVMTypeOf, null, IVoidPtr())
call = runtime_call('_pushenv', [bindEnv, null, init])
ctx = new_ctx_var()
threaded.var = Just(ctx)
pat = PatVar(ctx)
add_extrinsic(LLVMTypeOf, pat, IVoidPtr())
return S.Defn(pat, call)
def PopEnv(self, stmt):
bindEnv = L.Bind(stmt.env)
add_extrinsic(LLVMTypeOf, bindEnv, IVoidPtr())
threaded = env(THREADENV)
assert threaded.depth > 0, "Env underflow"
threaded.depth -= 1
ctxVar = fromJust(threaded.var)
bindCtx = L.Bind(ctxVar)
add_extrinsic(LLVMTypeOf, bindCtx, IVoidPtr())
call = runtime_call('_popenv', [bindEnv, bindCtx])
if threaded.depth > 0:
lhs = LhsVar(ctxVar)
add_extrinsic(LLVMTypeOf, lhs, IVoidPtr())
return S.Assign(lhs, call)
else:
# clean up this context
threaded.var = Nothing()
# TODO: check the return value here against null
# (ugh, need to insert block... couldn't this be done earlier?)
# just discard for now
discard = PatWild()
add_extrinsic(LLVMTypeOf, discard, IVoidPtr())
return S.Defn(discard, call)
def CreateCtx(self, e):
environ = bind_env(e.env)
null = NullPtr()
add_extrinsic(LLVMTypeOf, null, IVoidPtr())
init = self.mutate('init')
init = cast_to_voidptr(init, extrinsic(LLVMTypeOf, init))
call = runtime_call('_pushenv', [environ, null, init])
return call
def DestroyCtx(self, e):
environ = bind_env(e.env)
ctx = self.mutate('ctx')
ctx = cast_to_voidptr(ctx, extrinsic(LLVMTypeOf, ctx))
call = runtime_call('_popenv', [environ, ctx])
return call
def Call(self, e):
e.func = self.mutate('func')
e.args = self.mutate('args')
add_call_ctx(e.func, e.args)
return e
def IndirectCall(self, e):
e.func = self.mutate('func')
e.args = self.mutate('args')
add_call_ctx(e.func, e.args)
return e
def VoidCall(self, c):
c.func = self.mutate('func')
c.args = self.mutate('args')
add_call_ctx(c.func, c.args)
return c
def FuncVal(self, e):
assert isNothing(e.ctx)
e.ctx = env(THREADENV).var
return e
def GetEnv(self, e):
call = runtime_call('_getenv', [bind_env(e.env), bind_env_ctx()])
return cast_from_voidptr(call, extrinsic(LLVMTypeOf, e))
def HaveEnv(self, e):
return runtime_call('_haveenv', [bind_env(e.env), bind_env_ctx()])
def InEnv(self, e):
assert False, "Ought to be flattened"
def VoidInEnv(self, e):
assert False, "Ought to be flattened"
def GetExtrinsic(self, e):
extr = bind_extrinsic(e.extrinsic)
node = self.mutate('node')
node = cast_to_voidptr(node, extrinsic(LLVMTypeOf, node))
call = runtime_call('_getextrinsic', [extr, node])
return cast_from_voidptr(call, extrinsic(LLVMTypeOf, e))
def HasExtrinsic(self, e):
extr = bind_extrinsic(e.extrinsic)
node = self.mutate('node')
node = cast_to_voidptr(node, extrinsic(LLVMTypeOf, node))
return runtime_call('_hasextrinsic', [extr, node])
def ScopeExtrinsic(self, e):
# XXX actually respect extrinsic scoping
return self.mutate('expr')
def WriteExtrinsic(self, s):
f = '_addextrinsic' if s.isNew else '_updateextrinsic'
extr = bind_extrinsic(s.extrinsic)
node = self.mutate('node')
val = self.mutate('val')
node = cast_to_voidptr(node, extrinsic(LLVMTypeOf, node))
val = cast_to_voidptr(val, extrinsic(LLVMTypeOf, val))
return runtime_void_call(f, [extr, node, val])
def new_ctx_var():
var = Var()
add_extrinsic(Name, var, 'ctx')
add_extrinsic(LLVMTypeOf, var, IVoidPtr())
return var
def bind_env(e):
bind = L.Bind(e)
add_extrinsic(LLVMTypeOf, bind, IVoidPtr())
return bind
def bind_env_ctx():
bind = L.Bind(fromJust(env(THREADENV).var))
add_extrinsic(LLVMTypeOf, bind, IVoidPtr())
return bind
def bind_extrinsic(extr):
bind = L.Bind(extr)
add_extrinsic(LLVMTypeOf, bind, IVoidPtr())
return bind
CtorReplacement = new_extrinsic('CtorReplacement', '*GlobalVar')
def generate_ctor(ctor, dt):
ctort = IPtr(IDataCtor(ctor))
inst = Var()
add_extrinsic(Name, inst, 'inst')
add_extrinsic(LLVMTypeOf, inst, ctort)
sizeof = SizeOf(IPtr(IDataCtor(ctor)))
add_extrinsic(LLVMTypeOf, sizeof, IInt())
instPtr = runtime_call('gc_alloc', [sizeof])
instPtr = cast(IVoidPtr(), IPtr(IDataCtor(ctor)), instPtr)
pat = PatVar(inst)
add_extrinsic(LLVMTypeOf, pat, ctort)
instDefn = S.Defn(pat, instPtr)
ps = []
stmts = [instDefn]
def assign_slot(slot, ft, val):
instBind = L.Bind(inst)
add_extrinsic(LLVMTypeOf, | |
<gh_stars>1-10
#coding: utf-8
#created by @hiromin0627
#<NAME> v5
mlgbotver = '5.1.2'
import glob
import gettext
import os
import discord
import asyncio
import re,random
import datetime
from threading import (Event, Thread)
from urllib import request
import configparser
import json
ini = configparser.ConfigParser()
ini.read('./config.ini', 'UTF-8')
lang = ini['Language']['lang']
path_to_locale_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),'./locale'))
if lang == 'cn': translang = 'zh_TW'
elif lang == 'kr': translang = 'ko_KR'
else: translang = 'ja_JP'
translater = gettext.translation('messages',localedir=path_to_locale_dir,languages=[translang],fallback=True,codeset="utf8")
translater.install()
token = ini['tokens']['token']
bgm_id = int(ini['ids']['bgm-room'])
log_id = int(ini['ids']['log-room'])
version = ini['Data']['Version']
prefix = ini['Prefix']['commandprefix']
timeout = float(ini['Reaction']['timeout'])
aftermsgdel = ini['Reaction']['aftermsgdel']
client = discord.Client()
mlg_all = [[],[],[]]
mlg_data = [[],[],[]]
pickup_id = [[],[],[]]
gacha_mode = ['','','']
current_ver = ['','','']
pickup_name = ['','','']
pickup_img = ['','','']
rarity_str = ['R','SR','SSR','FES']
langnamelist = ['ja','kr','cn']
timer = 0
@client.event
async def on_ready():
print(strtimestamp() + '---Millishita Gacha ' + mlgbotver + '---')
print(strtimestamp() + 'discord.py ver:' + discord.__version__)
print(strtimestamp() + 'Logged in as ' + client.user.name + '(ID:' + str(client.user.id) + ')')
print(strtimestamp() + 'Bot created by @hiromin0627')
await gacha_reload(0,None,version)
@client.event
async def on_message(message):
if message.author.bot:
return
if message.content.startswith("MLhelp"):
if not aftermsgdel == 'false': await message.delete()
print(strtimestamp() + 'Start MLhelp')
await message.channel.send('ミリシタガシャシミュレーターDiscordボット ' + mlgbotver + '\n' +\
'MLhelp:ヘルプコマンドです。ミリシタガシャの説明を見ることができます。\n' +\
prefix + 'update:ミリシタガシャデータベースをダウンロードして更新します。\n' +\
prefix + 'reset:全ユーザーのMLガシャを引いた回数をリセットします。\n' +\
prefix + 'cards:MLガシャで引いたカード名を確認することができます。\n' +\
prefix + 'pickup:現在のガシャ名とピックアップカードを確認できます。\n' +\
prefix + 'call:MLガシャで引いたカード画像を検索できます。スペースを挟んでカード名を入力してください。(制服シリーズはアイドル名も記入)\n' +\
prefix + 'ガシャ or ' + prefix + '轉蛋 or ' + prefix + '촬영 or ' + prefix + 'gacha:ミリシタガシャシミュレーターができます。' +\
'10を後ろに付け加えると、10連ガシャになります。jp(日本語版)、cn(中国語繁体字版)、kr(韓国語版)を後ろに付け加えると、その言語のガシャが引くことができます。')
if message.content.startswith(prefix):
global version
if not aftermsgdel == 'false':
if "change" in message.content or "update" in message.content or "uselatest" in message.content or "retention" in message.content or "cards" in message.content or "reset" in message.content or "pickup" in message.content or "call" in message.content or "ガシャ" in message.content or "gacha" in message.content or "轉蛋" in message.content or "촬영" in message.content:
await message.delete()
langint = 0
if not message.content == '':
langint = langstrtoint(message.content[6:])
else:
langint = langtoint()
if message.content.startswith(prefix + "change"):
try:
mlgver = message.content.split()[1]
except IndexError:
await message.channel.send('コマンドが間違っています。バージョン名はスペースを空けて入力してください。(例:MLchange 20200101)')
return
if await gacha_check_available(mlgver):
version = mlgver
else:
await message.channel.send('該当バージョンが見つかりませんでした。バージョン名を確認してください。(検索バージョン名:' + mlgver + ')')
return
current = await current_version_loader()
if ini['Data']['Version'] == 'Latest':
msgupdate = await message.channel.send('**現在のガシャデータベース**\n日本語版:' + current["version"][0] + ' アジア版:' + current["version"][1] + '\n**見つかったガシャデータベース**\n' + mlgver + '\n**ローカルバージョンを維持する設定に変更**し、バージョンを入れ替えますか?')
await msgupdate.add_reaction('⭕')
await msgupdate.add_reaction('❌')
else:
msgupdate = await message.channel.send('**現在のガシャデータベース**\n日本語版:' + current["version"][0] + ' アジア版:' + current["version"][1] + '\n**見つかったガシャデータベース**\n' + mlgver + '\nバージョンを入れ替えますか?')
await msgupdate.add_reaction('⭕')
await msgupdate.add_reaction('❌')
while True:
try:
target_reaction, user = await client.wait_for('reaction_add', timeout=timeout)
if target_reaction.emoji == '⭕' and user != msgupdate.author:
await msgupdate.edit(content='入れ替えを開始します。')
await msgupdate.clear_reactions()
ini.set("Data","Version","Retention")
ini.write(open('./config.ini', 'w'), 'UTF-8')
await gacha_reload(1, message, mlgver)
await msgupdate.edit(content='入れ替えが完了しました。')
return
if target_reaction.emoji == '❌' and user != msgupdate.author:
await msgupdate.edit(content='入れ替えを中止します。')
return
except:
await msgupdate.edit(content='コマンドに失敗しました。もう一度やり直してください。')
return
elif message.content.startswith(prefix + "uselatest"):
ini.set("Data","Version","Latest")
ini.write(open('./config.ini', 'w'), 'UTF-8')
await message.channel.send('起動時に最新版をロードするように設定されました。')
elif message.content.startswith(prefix + "retention"):
ini.set("Data","Version","Retention")
ini.write(open('./config.ini', 'w'), 'UTF-8')
await message.channel.send('起動時に保存されているバージョンでロードするように設定されました。')
elif message.content.startswith(prefix + "update"):
latest = gacha_check_update()
current = await current_version_loader()
if latest["version"] == current["version"]:
msgl = await message.channel.send('現在のガシャデータベースは最新のものが使われています。')
return
else:
msgl = await message.channel.send('**最新のガシャデータベース**\n日本語版:' + latest["version"][0] + ' アジア版:' + latest["version"][1] + '\n**現在のガシャデータベース**\n日本語版:' + current["version"][0] + ' アジア版:' + current["version"][1] + '\nアップデートしますか?')
await msgl.add_reaction('⭕')
await msgl.add_reaction('❌')
while True:
try:
target_reaction, user = await client.wait_for('reaction_add', timeout=timeout)
if target_reaction.emoji == '⭕' and user != msgl.author:
await msgl.edit(content='アップデートを開始します。')
await msgl.clear_reactions()
version = latest["version"]
await gacha_reload(1, message)
await msgl.edit(content='アップデートが完了しました。')
return
if target_reaction.emoji == '❌' and user != msgl.author:
await msgl.edit(content='アップデートを中止します。')
return
except:
await msgl.edit(content='コマンドに失敗しました。もう一度やり直してください。')
return
elif message.content.startswith(prefix + 'cards'):
print(strtimestamp() + 'Start MLGacha[cards].')
await gacha_note(message,langint)
elif message.content.startswith(prefix + 'reset'):
print(strtimestamp() + 'Start MLGacha[reset].')
file_list = glob.glob("./gacha_count/*.txt")
for file in file_list:
os.remove(file)
await message.channel.send(_('すべてのユーザーのガチャカウントをリセットしました。'))
elif message.content.startswith(prefix + 'pickup'):
print(strtimestamp() + 'Start MLGacha[pickup].')
name = pickupcheck(langint)
emb = discord.Embed(title=_('ピックアップカード一覧'), description=name)
emb.set_image(url=pickup_img[langint])
emb.set_author(name=pickup_name[langint])
await message.channel.send('', embed=emb)
elif message.content.startswith(prefix + 'call'):
print(strtimestamp() + 'Start MLGacha[call].')
await gacha_call(message,langint)
elif message.content.startswith(prefix + "ガシャ") or message.content.startswith(prefix + "gacha") or message.content.startswith(prefix + "轉蛋") or message.content.startswith(prefix + "촬영"):
if voicecheck():
await message.channel.send(_('他のユーザーがプレイ中です。終了までお待ちください。'))
return
elif gacha_mode[langint] == "skip":
await message.channel.send('ガシャデータがありません。現在使われているバージョンにてこの言語のガシャ情報がありません。')
return
gacha_count = int()
try:
with open('./gacha_count/' + current_ver[langint] + '_' + str(message.author.id) + '.txt', 'r') as f:
gacha_count = int(f.read())
except:
with open('./gacha_count/' + current_ver[langint] + '_' + str(message.author.id) + '.txt', 'w') as f:
f.write('0')
if gacha_count >= 300 and (gacha_mode[langint] == "normal" or gacha_mode[langint] == "fes"):
await gacha_prepare_select(message,langint)
else:
await gacha_prepare(message,langint,gacha_count)
async def gacha_prepare_select(message,langint):
try:
vc_id = message.author.voice.channel.id
channel = client.get_channel(vc_id)
except:
vc_id = None
kind = ''
result = []
count_emoji = ['1⃣','2⃣','3⃣','4⃣','5⃣','6⃣','7⃣','8⃣','9⃣','🔟']
pickup_counter = 0
pickup_alllist = list()
name = pickupcheck(langint)
for row in mlg_data[langint]:
if row["id"] in pickup_id[langint]:
pickup_alllist.append(row)
pickup_counter += 1
mlgpickupemb = discord.Embed(title=_('交換カード一覧'), description=name)
mlgpickupemb.set_author(name=message.author.name, icon_url=message.author.avatar_url)
mlgpickupemb.set_footer(text=pickup_name[langint])
msgs = await message.channel.send(_('ドリームスターがカード交換数に達しているため、ガシャをご利用いただけません。カードを交換してください。\n該当番号のリアクションを返すと交換できます。'), embed=mlgpickupemb)
for r in range(pickup_counter):
await msgs.add_reaction(count_emoji[r])
kind = _('ドリームスター交換') + '「' + pickup_name[langint] + '」'
pickup_num = int()
numemoji_to_int = {'1⃣':0, '2⃣':1, '3⃣':2, '4⃣':3, '5⃣':4, '6⃣':5, '7⃣':6, '8⃣':7, '9⃣':8, '🔟':9}
while True:
target_reaction, user = await client.wait_for('reaction_add')
if not user == msgs.author:
if target_reaction.emoji in numemoji_to_int:
pickup_num = numemoji_to_int[target_reaction.emoji]
break
result = [pickup_alllist[pickup_num]]
await msgs.delete()
print(strtimestamp() + 'Start MLChange[' + kind + '] by ' + str(message.author.id) + '.')
try:
with open('./gacha_count/' + current_ver[langint] + '_' + str(message.author.id) + '.txt', 'w') as f:
f.write(str(0))
except:
print(strtimestamp() + '[ERROR]Gacha count FAILED.')
char_list = list()
try:
with open('./gacha/' + langnamelist[langint] + str(message.author.id) + '.txt', 'r') as f:
listline = f.read()
char_list = list(listline)
except:
pass
with open('./gacha/' + langnamelist[langint] + str(message.author.id) + '.txt', 'w+') as f:
try:
char_list[result[0]["id"]] = '1'
except:
for n in range(500):
char_list.append('0')
char_list[result[0]["id"]] = '1'
newlistline = ''.join(char_list)
f.write(newlistline)
if vc_id == None:
vc = None
botmsg = None
else:
if not bgm_id == 0:
toBot = client.get_channel(bgm_id)
botmsg = await toBot.send('ML' + str(vc_id))
vc = await channel.connect()
await mlg_touch(message,result,kind,vc,botmsg,langint)
return
async def gacha_prepare(message,langint,gacha_count):
try:
vc_id = message.author.voice.channel.id
channel = client.get_channel(vc_id)
except:
vc_id = None
role = 0
if gacha_mode[langint] == "normal" or gacha_mode[langint] == "fes" or gacha_mode[langint] == "type":
if '10' in message.content or '10' in message.content:
role = 10
else:
role = 1
elif gacha_mode[langint] == "party":
role = 10
else:
role = 1
if gacha_mode[langint] == "normal" or gacha_mode[langint] == "fes":
try:
gacha_count += role
with open('./gacha_count/' + current_ver[langint] + '_' + str(message.author.id) + '.txt', 'w') as f:
f.write(str(gacha_count))
except:
print(strtimestamp() + '[ERROR]Failed to count.')
result = await gacha_emission(langint,role)
print(strtimestamp() + 'Start MLGacha[' + pickup_name[langint] + '] by ' + message.author.name + '.')
char_list = list()
try:
with open('./gacha/' + langnamelist[langint] + str(message.author.id) + '.txt', 'r') as f:
listline = f.read()
char_list = list(listline)
except:
pass
for box in result:
with open('./gacha/' + langnamelist[langint] + str(message.author.id) + '.txt', 'w+') as f:
try:
char_list[box["id"]] = '1'
except:
for n in range(500):
char_list.append('0')
char_list[box["id"]] = '1'
newlistline = ''.join(char_list)
f.write(newlistline)
mess = random.randint(1,10)
fes_flag = 0
ssr_flag = 0
sr_flag = 0
for val in result:
if val["rarity"] == 3:
fes_flag = 1
elif val["rarity"] == 2:
ssr_flag = 1
elif val["rarity"] == 1:
sr_flag = 1
phrase = [_('最高の一枚ができましたのでぜひご確認ください!'),_('みんなのいい表情が撮れました!'),_('楽しそうなところが撮れましたよ')]
cameratxt = ''
if mess >= 2 :
if ssr_flag == 1 or fes_flag == 1: cameratxt = phrase[0]
elif sr_flag == 1 or ssr_flag == 1 or fes_flag == 1: cameratxt = phrase[1]
elif sr_flag == 1 or ssr_flag == 1 or fes_flag == 1: cameratxt = phrase[2]
if vc_id == None:
vc = None
botmsg = None
if not cameratxt == '':
camera = await message.channel.send(cameratxt)
await asyncio.sleep(3)
await camera.delete()
else:
vc = await channel.connect()
if not cameratxt == '':
vc.play(discord.FFmpegPCMAudio('./resources/message.mp3'))
camera = await message.channel.send(cameratxt)
while vc.is_playing():
await asyncio.sleep(1)
await camera.delete()
if not bgm_id == 0:
toBot = client.get_channel(bgm_id)
botmsg = await toBot.send('ML' + str(vc_id))
await mlg_touch(message,result,pickup_name[langint],vc,botmsg,langint)
async def gacha_emission(langint,role):
#慣れてないのでメモ
#gachaMode = [normal,fes,party,final,special,type,skip]
result = []
ssr_rate = 9700
pick_rate = 99
if gacha_mode[langint] == "fes":
ssr_rate = 9400
pick_rate = 198
if gacha_mode[langint] == "normal" or gacha_mode[langint] == "fes" or gacha_mode[langint] == "final":
if gacha_mode[langint] == "final":
role = 10
rpick = list()
rcard = list()
srpick = list()
srcard = list()
ssrpick = list()
ssrcard = list()
for row in mlg_data[langint]:
if row["rarity"] == 0 and row["id"] in pickup_id[langint]:
rpick.append(row)
elif row["rarity"] == 0 and not row["id"] in pickup_id[langint]:
rcard.append(row)
elif row["rarity"] == 1 and row["id"] in pickup_id[langint]:
srpick.append(row)
elif row["rarity"] == 1 and not row["id"] | |
<gh_stars>1-10
#!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2009 Prof. <NAME> (<EMAIL>) and the
# RMG Team (<EMAIL>)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module provides the :class:`PressureDependenceJob` class, which represents
a job for computing the pressure-dependent rate coefficients of a unimolecular
reaction network.
"""
import os.path
import math
import numpy
import logging
import rmgpy.constants as constants
import rmgpy.quantity as quantity
from rmgpy.kinetics import Chebyshev, PDepArrhenius, getRateCoefficientUnitsFromReactionOrder
from rmgpy.reaction import Reaction
from rmgpy.kinetics.tunneling import Wigner, Eckart
from rmgpy.cantherm.output import prettify
################################################################################
class PressureDependenceJob(object):
"""
A representation of a pressure dependence job. The attributes are:
======================= ====================================================
Attribute Description
======================= ====================================================
`Tmin` The minimum temperature at which to compute :math:`k(T,P)` values
`Tmax` The maximum temperature at which to compute :math:`k(T,P)` values
`Tcount` The number of temperatures at which to compute :math:`k(T,P)` values
`Pmin` The minimum pressure at which to compute :math:`k(T,P)` values
`Pmax` The maximum pressure at which to compute :math:`k(T,P)` values
`Pcount` The number of pressures at which to compute :math:`k(T,P)` values
`Emin` The minimum energy to use to compute :math:`k(T,P)` values
`Emax` The maximum energy to use to compute :math:`k(T,P)` values
`maximumGrainSize` The maximum energy grain size to use to compute :math:`k(T,P)` values
`minimumGrainCount` The minimum number of energy grains to use to compute :math:`k(T,P)` values
`method` The method to use to reduce the master equation to :math:`k(T,P)` values
`interpolationModel` The interpolation model to fit to the computed :math:`k(T,P)` values
`maximumAtoms` The maximum number of atoms to apply pressure dependence to (in RMG jobs)
`activeKRotor` A flag indicating whether to treat the K-rotor as active or adiabatic
`activeJRotor` A flag indicating whether to treat the J-rotor as active or adiabatic
`rmgmode` A flag that toggles "RMG mode", described below
----------------------- ----------------------------------------------------
`network` The unimolecular reaction network
`Tlist` An array of temperatures at which to compute :math:`k(T,P)` values
`Plist` An array of pressures at which to compute :math:`k(T,P)` values
`Elist` An array of energies to use to compute :math:`k(T,P)` values
======================= ====================================================
In RMG mode, several alterations to the k(T,P) algorithm are made both for
speed and due to the nature of the approximations used:
* Densities of states are not computed for product channels
* Arbitrary rigid rotor moments of inertia are included in the active modes;
these cancel in the ILT and equilibrium expressions
* k(E) for each path reaction is computed in the direction A -> products,
where A is always an explored isomer; the high-P kinetics are reversed
if necessary for this purpose
* Thermodynamic parameters are always used to compute the reverse k(E)
from the forward k(E) for each path reaction
RMG mode should be turned off by default except in RMG jobs.
"""
def __init__(self, network,
Tmin=None, Tmax=None, Tcount=0, Tlist=None,
Pmin=None, Pmax=None, Pcount=0, Plist=None,
maximumGrainSize=None, minimumGrainCount=0,
method=None, interpolationModel=None, maximumAtoms=None,
activeKRotor=True, activeJRotor=True, rmgmode=False):
self.network = network
self.Tmin = Tmin
self.Tmax = Tmax
self.Tcount = Tcount
if Tlist is not None:
self.Tlist = Tlist
self.Tmin = (numpy.min(self.Tlist.value_si),"K")
self.Tmax = (numpy.max(self.Tlist.value_si),"K")
self.Tcount = len(self.Tlist.value_si)
else:
self.Tlist = None
self.Pmin = Pmin
self.Pmax = Pmax
self.Pcount = Pcount
if Plist is not None:
self.Plist = Plist
self.Pmin = (numpy.min(self.Plist.value_si)*1e-5,"bar")
self.Pmax = (numpy.max(self.Plist.value_si)*1e-5,"bar")
self.Pcount = len(self.Plist.value_si)
else:
self.Plist = None
self.maximumGrainSize = maximumGrainSize
self.minimumGrainCount = minimumGrainCount
self.Emin = None
self.Emax = None
self.Elist = None
self.method = method
self.interpolationModel = interpolationModel
self.maximumAtoms = maximumAtoms
self.activeKRotor = activeKRotor
self.activeJRotor = activeJRotor
self.rmgmode = rmgmode
@property
def Tmin(self):
"""The minimum temperature at which the computed k(T,P) values are valid, or ``None`` if not defined."""
return self._Tmin
@Tmin.setter
def Tmin(self, value):
self._Tmin = quantity.Temperature(value)
@property
def Tmax(self):
"""The maximum temperature at which the computed k(T,P) values are valid, or ``None`` if not defined."""
return self._Tmax
@Tmax.setter
def Tmax(self, value):
self._Tmax = quantity.Temperature(value)
@property
def Tlist(self):
"""The temperatures at which the k(T,P) values are computed."""
return self._Tlist
@Tlist.setter
def Tlist(self, value):
self._Tlist = quantity.Temperature(value)
@property
def Pmin(self):
"""The minimum pressure at which the computed k(T,P) values are valid, or ``None`` if not defined."""
return self._Pmin
@Pmin.setter
def Pmin(self, value):
self._Pmin = quantity.Pressure(value)
@property
def Pmax(self):
"""The maximum pressure at which the computed k(T,P) values are valid, or ``None`` if not defined."""
return self._Pmax
@Pmax.setter
def Pmax(self, value):
self._Pmax = quantity.Pressure(value)
@property
def Plist(self):
"""The pressures at which the k(T,P) values are computed."""
return self._Plist
@Plist.setter
def Plist(self, value):
self._Plist = quantity.Pressure(value)
@property
def maximumGrainSize(self):
"""The maximum allowed energy grain size, or ``None`` if not defined."""
return self._maximumGrainSize
@maximumGrainSize.setter
def maximumGrainSize(self, value):
self._maximumGrainSize = quantity.Energy(value)
def copy(self):
"""
Return a copy of the pressure dependence job.
"""
return PressureDependenceJob(
network = self.network,
Tmin = self.Tmax,
Tmax = self.Tmax,
Tcount = self.Tcount,
Tlist = self.Tlist,
Pmin = self.Pmin,
Pmax = self.Pmax,
Pcount = self.Pcount,
Plist = self.Plist,
maximumGrainSize = self.maximumGrainSize,
minimumGrainCount = self.minimumGrainCount,
method = self.method,
interpolationModel = self.interpolationModel,
activeKRotor = self.activeKRotor,
activeJRotor = self.activeJRotor,
rmgmode = self.rmgmode,
)
def execute(self, outputFile, plot):
self.network.printSummary()
if outputFile is not None:
self.draw(os.path.dirname(outputFile))
self.initialize()
self.K = self.network.calculateRateCoefficients(self.Tlist.value_si, self.Plist.value_si, self.method)
self.fitInterpolationModels()
if outputFile is not None:
self.save(outputFile)
if plot:
self.plot(os.path.dirname(outputFile))
def generateTemperatureList(self):
"""
Returns an array of temperatures based on the interpolation `model`,
minimum and maximum temperatures `Tmin` and `Tmax` in K, and the number of
temperatures `Tcount`. For Chebyshev polynomials a Gauss-Chebyshev
distribution is used; for all others a linear distribution on an inverse
temperature domain is used. Note that the Gauss-Chebyshev grid does *not*
place `Tmin` and `Tmax` at the endpoints, yet the interpolation is still
valid up to these values.
"""
Tmin = self.Tmin.value_si
Tmax = self.Tmax.value_si
Tcount = self.Tcount
if self.Tlist is not None:
pass
elif self.interpolationModel[0].lower() == 'chebyshev':
# Distribute temperatures on a Gauss-Chebyshev grid
Tlist = numpy.zeros(Tcount, numpy.float64)
for i in range(Tcount):
T = -math.cos((2*i+1) * math.pi / (2*self.Tcount))
T = 2.0 / ((1.0/Tmax - 1.0/Tmin) * T + 1.0/Tmax + 1.0/Tmin)
Tlist[i] = T
self.Tlist = (Tlist,"K")
else:
# Distribute temperatures evenly on a T^-1 domain
Tlist = 1.0/numpy.linspace(1.0/Tmax, 1.0/Tmin, Tcount)
self.Tlist = (Tlist,"K")
return self.Tlist.value_si
def initialize(self):
for reaction in self.network.pathReactions:
tunneling = reaction.transitionState.tunneling
if isinstance(tunneling, Wigner) and tunneling.frequency is None:
tunneling.frequency = (reaction.transitionState.frequency.value_si,"cm^-1")
elif isinstance(tunneling, Eckart) and tunneling.frequency is None:
tunneling.frequency = (reaction.transitionState.frequency.value_si,"cm^-1")
tunneling.E0_reac = (sum([reactant.conformer.E0.value_si for reactant in reaction.reactants])*0.001,"kJ/mol")
tunneling.E0_TS = (reaction.transitionState.conformer.E0.value_si*0.001,"kJ/mol")
tunneling.E0_prod = (sum([product.conformer.E0.value_si for product in reaction.products])*0.001,"kJ/mol")
elif tunneling is not None:
raise ValueError('Unknown tunneling model {0!r} for path reaction {1}.'.format(tunneling, reaction))
maximumGrainSize = self.maximumGrainSize.value_si if self.maximumGrainSize is not None else 0.0
self.network.initialize(
Tmin = self.Tmin.value_si,
Tmax = self.Tmax.value_si,
Pmin = self.Pmin.value_si,
Pmax = self.Pmax.value_si,
maximumGrainSize = maximumGrainSize,
minimumGrainCount = self.minimumGrainCount,
activeJRotor = self.activeJRotor,
activeKRotor = self.activeKRotor,
rmgmode = self.rmgmode,
)
self.generateTemperatureList()
self.generatePressureList()
def generatePressureList(self):
"""
Returns an array of pressures based on the interpolation `model`,
minimum and maximum pressures `Pmin` | |
<reponame>sboosali/commands-frontends-dragon13
#
# Python Macro Language for Dragon NaturallySpeaking
# (c) Copyright 1999 by <NAME>
# Portions (c) Copyright 1999 by Dragon Systems, Inc.
#
# _mouse.py
# Sample macro file which implements mouse and keyboard movement modes
# similar to DragonDictate for Windows
#
# April 1, 2000
# Updates from <NAME>
# - cancel arrow movement when the active window changes
# - add support for tray icon during arrow movement
#
# In the grammar we map some keywords into pixel counts according to the
# following dictionary. These numbers can be safely changed within reason.
amountDict = {
'little':3, # as in 'move a little left'
'lot':10 } # as in 'move left a lot'
# For caret movement, this represents the default speed in milliseconds
# between arrow keys
defaultMoveSpeed = 250
# For caret movement, this is the rate change applied when you make it
# faster. For example, 1.5 is a 50% speed increase.
moveRateChange = 2.0
# For mouse movement, this represents the default speed in milliseconds
# between pixel movements and the default number of pixels per move. We
# do not want the update rate to be less than 50 milliseconds so if it
# gets faster than that, we adjust the mouse pixels instead.
defaultMouseSpeed = 100
defaultMousePixels = 1
# For mouse movement, this is the rate change applied when you make it
# faster. For example, 1.5 is a 50% speed increase.
mouseRateChange = 3.0
############################################################################
#
# Here are some of our instance variables
#
# self.haveCallback set when the timer callback in installed
# self.curMode 1 for caret movement, 2 for mouse movement, or None
# self.curSpeed current movement speed (milliseconds for timer)
# self.curPixels for mouse movement, pixels per move
# self.lastClock time of last timer callback or 0
# self.curDirection direction of movement as string
#
import string # for atoi
import time # for clock
import natlink
from natlinkutils import *
class ThisGrammar(GrammarBase):
# when we unload the grammar, we must make sure we clear the timer
# callback so we keep a variable which is set when we currently own
# the timer callback
def __init__(self):
self.haveCallback = 0
self.curMode = None
self.iconState = 0
GrammarBase.__init__(self)
def unload(self):
if self.haveCallback:
natlink.setTimerCallback(None,0)
self.haveCallback = 0
GrammarBase.unload(self)
# This is our grammar. The rule 'start' is what is normally active. The
# rules 'nowMoving' and 'nowMousing' are used when we are in caret or
# mouse movement mode.
gramDefn = """
# this is the rule which is normally active
<start> exported = <startMoving> | <startMousing> |
<nudgeMouse> | <mouseButton>;
# this rule is active when we are moving the caret
<nowMoving> exported =
[ move ] ( {direction} | [much] faster | [much] slower ) |
stop [ moving ];
# this rule is active when we are moving the mouse
<nowMousing> exported =
[ move ] ( {direction} | faster | slower ) |
stop [ moving ] | <mouseButton> | <mouseButton>;
# here are the subrules which deal with caret movement
<startMoving> = move {direction} | start moving {direction};
# here are the subrules which deal with mouse movement
<startMousing> = [ start moving ] mouse {direction};
<nudgeMouse> =
nudge mouse {direction} |
[ move ] mouse {direction} ( a little | a lot | {count} pixels ) |
[ move ] mouse ( a little | a lot | {count} pixels ) {direction};
<mouseButton> =
[ mouse ] [ left | middle | right ] [ single | double ] click;
"""
# These are the lists which we use in our grammar. The directions and
# counts are implemented as lists to make parsing easier (words from
# lists are referenced as part of the rule which includes the list).
listDefn = {
'direction' : ['up','down','left','right'],
'count' : ['1','2','3','4','5','6','7','8','9','10','11','12','13',
'14','15','16','17','18','19','20','25','30','35','40','45','50'] }
# Load the grammar, build the direction and count lists and activate the
# main rule ('start')
def initialize(self):
self.load(self.gramDefn)
for listName in self.listDefn.keys():
self.setList(listName,self.listDefn[listName])
self.activateSet(['start'],exclusive=0)
# This subroutine moves the mouse cursor in an indicated direction
# by an indicated number of pixels
def moveMouse(self,direction,count):
xPos,yPos = natlink.getCursorPos()
if direction == 'up': yPos = yPos - count
elif direction == 'down': yPos = yPos + count
elif direction == 'left': xPos = xPos - count
elif direction == 'right': xPos = xPos + count
xSize,ySize = natlink.getScreenSize()
if xPos < 0: xPos = 0
if xPos >= xSize: xPos = xSize - 1
if yPos < 0: yPos = 0
if yPos >= ySize: yPos = ySize - 1
natlink.playEvents([(wm_mousemove,xPos,yPos)])
# This subroutine cancels any active movement mode
def cancelMode(self):
self.curMode = None
if self.haveCallback:
natlink.setTimerCallback(None,0)
self.haveCallback = 0
self.activateSet(['start'],exclusive=0)
natlink.setTrayIcon()
# This function is called on a timer event. If we are in a movement
# mode then we move the mouse or caret by the indicated amount.
#
# The apparent speed for mouse movement is the speed divided by the
# number of pixels per move. We calculate the number of pixels per
# move to ensure that the speed is never faster than 50 milliseconds.
def onTimer(self):
if self.lastClock:
diff = int( (time.clock() - self.lastClock) * 1000 )
self.lastClock = time.clock()
if self.curMode == 1:
moduleInfo = natlink.getCurrentModule()
if natlink.getMicState() == 'on' and moduleInfo == self.moduleInfo:
self.setTrayIcon(1)
# Note: it is often during a playString operation that the
# "stop moving" command occurs
natlink.playString('{'+self.curDirection+'}')
else:
self.cancelMode()
elif self.curMode == 2:
self.moveMouse(self.curDirection,self.curPixels)
# This handles the nudgeMouse rule. We want to extract the direction
# and the count or amount.
def gotResults_nudgeMouse(self,words,fullResults):
self.cancelMode()
direction = findKeyWord(words,self.listDefn['direction'])
count = findKeyWord(words,self.listDefn['count'])
amount = findKeyWord(words,amountDict.keys())
if count:
count = string.atoi(count)
elif amount:
count = amountDict[amount]
self.moveMouse(direction,count)
# This handles the mouseButton rule. We want to extract the button
# name (if specified) and whether this is a single or double click.
def gotResults_mouseButton(self,words,fullResults):
self.cancelMode()
which = findKeyWord(words,['left','right','middle'])
if not which: which = 'left'
if 'double' in words: count = 2
else: count = 1
buttonClick(which,count)
# This handles the startMoving rule. We only need to extract the
# direction. To turn on cursor movement mode we need to install a
# timer callback (warning: this is global) and set the recognition
# state to be exclusively from the rule <nowMoving>. The cursor only
# moves in the timer callback itself.
def gotResults_startMoving(self,words,fullResults):
self.cancelMode()
direction = findKeyWord(words,self.listDefn['direction'])
self.curMode = 1
self.curDirection = direction
self.setTrayIcon(0)
self.moduleInfo = natlink.getCurrentModule()
self.curSpeed = defaultMoveSpeed
self.lastClock = time.clock()
natlink.setTimerCallback(self.onTimer,defaultMoveSpeed)
self.haveCallback = 1
self.activateSet(['nowMoving'],exclusive=1)
# This handles the nowMoving rule. We want to extract the keyword which
# tells us what to do.
def gotResults_nowMoving(self,words,fullResults):
direction = findKeyWord(words,self.listDefn['direction'])
if direction:
self.curDirection = direction
self.setTrayIcon(0)
elif 'stop' in words:
self.cancelMode()
elif 'faster' in words:
speed = int(self.curSpeed / moveRateChange)
if 'much' in words:
speed = int(speed / (moveRateChange*moveRateChange))
if speed < 50: speed = 50
self.curSpeed = speed
natlink.setTimerCallback(self.onTimer,speed)
elif 'slower' in words:
speed = int(self.curSpeed * moveRateChange)
if 'much' in words:
speed = int(speed * (moveRateChange*moveRateChange))
if speed > 4000: speed = 4000
self.curSpeed = speed
natlink.setTimerCallback(self.onTimer,speed)
# This handles the startMousing rule. We only need to extract the
# direction. To turn on cursor movement mode we need to install a
# timer callback (warning: this is global) and set the recognition
# state to be exclusively from the rule <nowMoving>. The cursor only
# moves in the timer callback itself.
def gotResults_startMousing(self,words,fullResults):
self.cancelMode()
direction = findKeyWord(words,self.listDefn['direction'])
self.curMode = 2
self.curDirection = direction
self.curSpeed = defaultMouseSpeed
self.curPixels = defaultMousePixels
self.lastClock = time.clock()
natlink.setTimerCallback(self.onTimer,defaultMouseSpeed)
self.haveCallback = 1
self.activateSet(['nowMousing'],exclusive=1)
# This handles the nowMousing rule. We want to extract the keyword which
# tells us what to do.
def gotResults_nowMousing(self,words,fullResults):
direction = findKeyWord(words,self.listDefn['direction'])
if direction:
self.curDirection = direction
elif 'stop' in words:
self.cancelMode()
elif 'faster' in words:
speed = int(self.curSpeed / moveRateChange)
pixels = self.curPixels
while speed < 50:
speed = speed * 2
pixels = pixels | |
#
# Author: <NAME> 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy import special
from scipy.special import entr, logsumexp, betaln, gammaln as gamln
from scipy._lib._numpy_compat import broadcast_to
from scipy._lib._util import _lazywhere
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
from ._distn_infrastructure import (
rv_discrete, _ncx2_pdf, _ncx2_cdf, get_distribution_names)
class binom_gen(rv_discrete):
r"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is:
.. math::
f(k) = \binom{n}{k} p^k (1-p)^{n-k}
for ``k`` in ``{0, 1,..., n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n >= 0) & (p >= 0) & (p <= 1)
def _get_support(self, n, p):
return self.a, n
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
# binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k, n, p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q, n, p))
vals1 = np.maximum(vals - 1, 0)
temp = special.bdtr(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p, moments='mv'):
q = 1.0 - p
mu = n * p
var = n * p * q
g1, g2 = None, None
if 's' in moments:
g1 = (q - p) / sqrt(var)
if 'k' in moments:
g2 = (1.0 - 6*p*q) / var
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
r"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is:
.. math::
f(k) = \begin{cases}1-p &\text{if } k = 0\\
p &\text{if } k = 1\end{cases}
for :math:`k` in :math:`\{0, 1\}`.
`bernoulli` takes :math:`p` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _get_support(self, p):
# Overrides binom_gen._get_support!x
return self.a, self.b
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
# bernoulli.pmf(k) = 1-p if k = 0
# = p if k = 1
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class nbinom_gen(rv_discrete):
r"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
Negative binomial distribution describes a sequence of i.i.d. Bernoulli
trials, repeated until a predefined, non-random number of successes occurs.
The probability mass function of the number of failures for `nbinom` is:
.. math::
f(k) = \binom{k+n-1}{n-1} p^n (1-p)^k
for :math:`k \ge 0`.
`nbinom` takes :math:`n` and :math:`p` as shape parameters where n is the
number of successes, whereas p is the probability of a single success.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n > 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
# nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q, n, p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
r"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is:
.. math::
f(k) = (1-p)^{k-1} p
for :math:`k \ge 1`.
`geom` takes :math:`p` as shape parameter.
%(after_notes)s
See Also
--------
planck
%(example)s
"""
def _rvs(self, p):
return self._random_state.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return special.xlog1py(k - 1, -p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log1p(-q) / log1p(-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
r"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
`M` is the total number of objects, `n` is total number of Type I objects.
The random variate represents the number of Type I objects in `N` drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The symbols used to denote the shape parameters (`M`, `n`, and `N`) are not
universally accepted. See the Examples for a clarification of the
definitions used here.
The probability mass function is defined as,
.. math:: p(k, M, n, N) = \frac{\binom{n}{k} \binom{M - n}{N - k}}
{\binom{M}{N}}
for :math:`k \in [\max(0, N - M + n), \min(n, N)]`, where the binomial
coefficients are defined as,
.. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
%(after_notes)s
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return self._random_state.hypergeometric(n, M-n, N, size=self._size)
def _get_support(self, M, n, N):
return np.maximum(N-(M-n), 0), np.minimum(n, N)
def _argcheck(self, M, n, N):
cond = (M > 0) & (n >= 0) & (N >= 0)
cond &= (n <= M) & (N <= M)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
result = (betaln(good+1, 1) + betaln(bad+1, 1) + betaln(tot-N+1, N+1) -
betaln(k+1, good-k+1) - betaln(N-k+1, bad-N+k+1) -
betaln(tot+1, 1))
return result
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) | |
<reponame>DaniDuran/Selenium_Inmofianza
import openpyxl
import pyodbc as pyodbc
from functions.Inicializar import Inicializar
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException, NoAlertPresentException, NoSuchWindowException, TimeoutException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.ie.options import DesiredCapabilities
from selenium.webdriver.chrome.options import Options as OpcionesChrome
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from imap_tools import MailBox, AND
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
import pytest
import json
import time
import datetime
import re
import os
import psycopg2
Scenario = {}
diaGlobal= time.strftime(Inicializar.DateFormat) # formato aaaa/mm/dd
horaGlobal = time.strftime(Inicializar.HourFormat) # formato 24 houras
basedir = os.path.abspath(os.path.join(__file__, "../.."))
import configparser
config = configparser.ConfigParser()
config.read(basedir + u'\\data\\config.ini')
class Functions(Inicializar):
##########################################################################
############## -=_INICIALIZAR DRIVERS_=- #############################
##########################################################################
def abrir_navegador(self, URL=Inicializar.URL, navegador=Inicializar.NAVEGADOR):
print("Directorio Base: " + Inicializar.basedir)
self.ventanas = {}
print("----------------")
print(navegador)
print("---------------")
if navegador == ("IExplorer"):
caps = DesiredCapabilities.INTERNETEXPLORER.copy()
caps["platform"] = "WINDOWS"
caps["browserName"] = "internet explorer"
caps["ignoreZoomSetting"] = True
caps["requireWindowFocus"] = True
caps["nativeEvents"] = True
self.driver = webdriver.Ie(Inicializar.basedir + "\\drivers\\IEDriverServer.exe", caps)
self.driver.implicitly_wait(10)
self.driver.maximize_window()
self.driver.get(URL)
self.ventanas = {'Principal':self.driver.window_handles[0]}
print(self.ventanas)
return self.driver
if navegador == ("CHROME"):
options = OpcionesChrome()
#options.binary_location = "C:\\Program Files \\Google\\Chrome\\Application\\chrome.exe"
options.binary_location = Inicializar.RUTA_CHROME
options.add_argument('start-maximized')
self.driver = webdriver.Chrome(chrome_options=options, executable_path=Inicializar.basedir + "\\drivers\\chromedriver.exe")
self.driver.implicitly_wait(10)
self.driver.get(URL)
self.ventanas = {'Principal': self.driver.window_handles[0]}
return self.driver
if navegador == ("FIREFOX"):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(10)
self.driver.maximize_window()
self.driver.get(URL)
self.ventanas = {'Principal':self.driver.window_handles[0]}
return self.driver
if navegador == ("EDGE"):
self.driver = webdriver.Edge(executable_path=Inicializar.basedir + "\\drivers\\msedgedriver.exe")
self.driver.implicitly_wait(10)
self.driver.maximize_window()
self.driver.get(URL)
self.ventanas = {'Principal':self.driver.window_handles[0]}
return self.driver
def tearDown(self):
print("Se cerrará el DRIVER")
self.driver.quit()
##########################################################################
############## -=_lOCATORS HANDLE _=- ###############
##########################################################################
def xpath_element(self, XPATH):
elements = self.driver.find_element_by_xpath(XPATH)
print("Xpath_Elements: Se interactuo con el elemento " + XPATH)
return elements
def _xpath_element(self, XPATH):
try:
wait = WebDriverWait(self.driver, 20)
wait.until(EC.visibility_of_element_located((By.XPATH, XPATH)))
elements = self.driver.find_element_by_xpath(XPATH)
print(u"Esperar_Elemento: Se visualizo el elemento " + XPATH)
return True
except TimeoutException:
print(u"Esperar_Elemento: No presente " + XPATH)
Functions.tearDown(self)
except NoSuchElementException:
print(u"Esperar_Elemento: No presente " + XPATH)
Functions.tearDown(self)
def id_element(self, ID):
elements = self.driver.find_element_by_id(ID)
print("Xpath_Elements: Se interactuo con el elemento " + ID)
return elements
def _id_element(self, ID):
try:
wait = WebDriverWait(self.driver, 20)
wait.until(EC.visibility_of_element_located((By.ID, ID)))
elements = self.driver.find_element_by_id(ID)
print(u"Esperar_Elemento: Se visualizo el elemento " + ID)
return elements
except TimeoutException:
print(u"Esperar_Elemento: No presente " + ID)
Functions.tearDown(self)
except NoSuchElementException:
print(u"Esperar_Elemento: No presente " + ID)
Functions.tearDown(self)
def name_element(self, name):
elements = self.driver.find_element_by_name(name)
print("Xpath_Elements: Se interactuo con el elemento " + name)
return elements
def _name_element(self, name):
try:
wait = WebDriverWait(self.driver, 20)
wait.until(EC.visibility_of_element_located((By.ID, name)))
elements = self.driver.find_element_by_id(name)
print(u"Esperar_Elemento: Se visualizo el elemento " + name)
return elements
except TimeoutException:
print(u"Esperar_Elemento: No presente " + name)
Functions.tearDown(self)
except NoSuchElementException:
print(u"Esperar_Elemento: No presente " + name)
Functions.tearDown(self)
##########################################################################
############## -=_JSON HANDLE _=- #################
##########################################################################
def get_json_file(self, file):
json_path = Inicializar.Json + "/" + file + '.json'
try:
with open(json_path, "r") as read_file:
self.json_strings = json.loads(read_file.read())
print ("get_json_file: "+ json_path)
except FileNotFoundError:
self.json_strings = False
pytest.skip(u"get_json_file: No se encontro el Archivo " + file)
Functions.tearDown(self)
def get_entity(self, entity):
if self.json_strings is False:
print ("Define el DOM para esta prueba")
else:
try:
self.json_ValueToFind = self.json_strings[entity]["ValueToFind"]
self.json_GetFieldBy = self.json_strings[entity]["GetFieldBy"]
return True
except KeyError:
pytest.skip(u"get_entity: No se encontro la key a la cual se hace referencia: " + entity)
#self.driver.close()
Functions.tearDown(self)
return None
def get_value_json(self, entity):
jasonValue=[]
if self.json_strings is False:
print ("Define el DOM para esta prueba")
else:
try:
jasonValue.append(self.json_strings[entity]["ValueToFind"])
jasonValue.append(self.json_strings[entity]["GetFieldBy"])
return jasonValue
except KeyError:
pytest.skip(u"get_entity: No se encontro la key a la cual se hace referencia: " + entity)
#self.driver.close()
Functions.tearDown(self)
return None
def get_elements(self, entity, MyTextElement = None):
Get_Entity = Functions.get_entity(self, entity)
if Get_Entity is None:
print("No se encontro el valor en el Json definido")
else:
try:
if self.json_GetFieldBy.lower() == "id":
elements = self.driver.find_element_by_id(self.json_ValueToFind)
if self.json_GetFieldBy.lower() == "name":
elements = self.driver.find_element_by_name(self.json_ValueToFind)
if self.json_GetFieldBy.lower() == "xpath":
if MyTextElement is not None:
self.json_ValueToFind = self.json_ValueToFind.format(MyTextElement)
print (self.json_ValueToFind)
elements = self.driver.find_element_by_xpath(self.json_ValueToFind)
if self.json_GetFieldBy.lower() == "link":
elements = self.driver.find_element_by_partial_link_text(self.json_ValueToFind)
if self.json_GetFieldBy.lower() == "css":
elements = self.driver.find_element_by_css_selector(self.json_ValueToFind)
if self.json_GetFieldBy.lower() == "class":
elements = self.driver.find_element_by_class_name(self.json_ValueToFind)
print("get_elements: " + self.json_ValueToFind)
return elements
except NoSuchElementException:
print("get_text: No se encontró el elemento: " + self.json_ValueToFind)
Functions.tearDown(self)
except TimeoutException:
print("get_text: No se encontró el elemento: " + self.json_ValueToFind)
Functions.tearDown(self)
def get_text(self, entity, MyTextElement = None):
Get_Entity = Functions.get_entity(self, entity)
if Get_Entity is None:
print("No se encontro el valor en el Json definido")
else:
try:
if self.json_GetFieldBy.lower() == "id":
elements = self.driver.find_element_by_id(self.json_ValueToFind)
if self.json_GetFieldBy.lower() == "name":
elements = self.driver.find_element_by_name(self.json_ValueToFind)
if self.json_GetFieldBy.lower() == "xpath":
if MyTextElement is not None:
self.json_ValueToFind = self.json_ValueToFind.format(MyTextElement)
print (self.json_ValueToFind)
elements = self.driver.find_element_by_xpath(self.json_ValueToFind)
if self.json_GetFieldBy.lower() == "link":
elements = self.driver.find_element_by_partial_link_text(self.json_ValueToFind)
if self.json_GetFieldBy.lower() == "css":
elements = self.driver.find_element_by_css_selector(self.json_ValueToFind)
print("get_text: " + self.json_ValueToFind)
print("Text Value : " + elements.text)
return elements.text
except NoSuchElementException:
print("get_text: No se encontró el elemento: " + self.json_ValueToFind)
Functions.tearDown(self)
except TimeoutException:
print("get_text: No se encontró el elemento: " + self.json_ValueToFind)
Functions.tearDown(self)
def get_select_elements(self, entity):
Get_Entity = Functions.get_entity(self, entity)
if Get_Entity is None:
print("No se encontro el valor en el Json definido")
else:
try:
if self.json_GetFieldBy.lower() == "id":
select = Select(self.driver.find_element_by_id(self.json_ValueToFind))
if self.json_GetFieldBy.lower() == "name":
select = Select(self.driver.find_element_by_name(self.json_ValueToFind))
if self.json_GetFieldBy.lower() == "xpath":
select = Select(self.driver.find_element_by_xpath(self.json_ValueToFind))
if self.json_GetFieldBy.lower() == "link":
select = Select(self.driver.find_element_by_partial_link_text(self.json_ValueToFind))
if self.json_GetFieldBy.lower() == "class":
select = Select(self.driver.find_element_by_class_name(self.json_ValueToFind))
print("get_select_elements: " + self.json_ValueToFind)
return select
except NoSuchElementException:
print("No se encontró el elemento: " + self.json_ValueToFind)
Functions.tearDown(self)
except TimeoutException:
print("No se encontró el elemento: " + self.json_ValueToFind)
Functions.tearDown(self)
##########################################################################
############## -=_TEXTBOX & COMBO HANDLE _=- #################
##########################################################################
def select_by_text(self, entity, text):
Functions.get_select_elements(self, entity).select_by_visible_text(text)
def send_key_text(self, entity, text):
Functions.get_elements(self, entity).clear()
Functions.get_elements(self, entity).send_keys(text)
def send_especific_keys(self, element, key):
if key == 'Enter':
Functions.get_elements(self, element).send_keys(Keys.ENTER)
if key == 'Tab':
Functions.get_elements(self, element).send_keys(Keys.TAB)
if key == 'Space':
Functions.get_elements(self, element).send_keys(Keys.SPACE)
time.sleep(3)
def switch_to_iframe(self, Locator):
iframe = Functions.get_elements(self, Locator)
self.driver.switch_to.frame(iframe)
print (f"Se realizó el switch a {Locator}")
def switch_to_parentFrame(self):
self.driver.switch_to.parent_frame()
def switch_to_windows_name(self, ventana):
if ventana in self.ventanas:
self.driver.switch_to.window(self.ventanas[ventana])
Functions.page_has_loaded(self)
print ("volviendo a " + ventana + " : " + self.ventanas[ventana])
else:
self.nWindows = len(self.driver.window_handles) - 1
self.ventanas[ventana] = self.driver.window_handles[int(self.nWindows)]
self.driver.switch_to.window(self.ventanas[ventana])
self.driver.maximize_window()
print(self.ventanas)
print ("Estas en " + ventana + " : " + self.ventanas[ventana])
Functions.page_has_loaded(self)
def close_windows_name(self, ventana):
if ventana in self.ventanas:
self.driver.switch_to.window(self.ventanas[ventana])
self.driver.get(self.ventanas[ventana])
self.driver.close()
print ("Cerrando ventana: " + self.ventanas[ventana])
###################### -=_JAVASCRIPT_=- #############################
##########################################################################
def new_window(self, URL):
self.driver.execute_script(f'''window.open("{URL}","_blank");''')
Functions.page_has_loaded(self)
def page_has_loaded(self):
driver = self.driver
print("Checking if {} page is loaded.".format(self.driver.current_url))
page_state = driver.execute_script('return document.readyState;')
yield
WebDriverWait(driver, 30).until(lambda driver: page_state == 'complete')
assert page_state == 'complete', "No se completo la carga"
def scroll_to(self, locator):
Get_Entity = Functions.get_entity(self, locator)
if Get_Entity is None:
return print("No se encontro el valor en el Json definido")
else:
try:
if self.json_GetFieldBy.lower() == "id":
localizador = self.driver.find_element(By.ID, self.json_ValueToFind)
self.driver.execute_script("arguments[0].scrollIntoView();", localizador)
print(u"scroll_to: " + locator)
return True
if self.json_GetFieldBy.lower() == "xpath":
localizador = self.driver.find_element(By.XPATH, self.json_ValueToFind)
self.driver.execute_script("arguments[0].scrollIntoView();", localizador)
print(u"scroll_to: " + locator)
return True
if self.json_GetFieldBy.lower() == "link":
localizador = self.driver.find_element(By.PARTIAL_LINK_TEXT, self.json_ValueToFind)
self.driver.execute_script("arguments[0].scrollIntoView();", localizador)
print(u"scroll_to: " + locator)
return True
except TimeoutException:
print(u"scroll_to: No presente " + locator)
Functions.tearDown(self)
def js_clic(self, locator, MyTextElement=None):
Get_Entity = Functions.get_entity(self, locator)
Functions.esperar_elemento(self, locator, MyTextElement)
if Get_Entity is None:
return print("No se encontro el valor en el Json definido")
else:
try:
if self.json_GetFieldBy.lower() == "id":
localizador = self.driver.find_element(By.ID, self.json_ValueToFind)
self.driver.execute_script("arguments[0].click();", localizador)
print(u"Se hizo click en: " + locator)
return True
if self.json_GetFieldBy.lower() == "xpath":
if MyTextElement is not None:
self.json_ValueToFind = self.json_ValueToFind.format(MyTextElement)
print(self.json_ValueToFind)
localizador = self.driver.find_element(By.XPATH, self.json_ValueToFind)
self.driver.execute_script("arguments[0].click();", localizador)
print(u"Se hizo click en: " + locator)
return True
if self.json_GetFieldBy.lower() == "link":
localizador = self.driver.find_element(By.PARTIAL_LINK_TEXT, self.json_ValueToFind)
self.driver.execute_script("arguments[0].click();", localizador)
print(u"Se hizo click en: " + locator)
return True
if self.json_GetFieldBy.lower() == "name":
localizador = self.driver.find_element(By.NAME, self.json_ValueToFind)
self.driver.execute_script("arguments[0].click();", localizador)
print(u"Se hizo click en: " + locator)
return True
if self.json_GetFieldBy.lower() == "css":
localizador = self.driver.find_element(By.CSS_SELECTOR, self.json_ValueToFind)
self.driver.execute_script("arguments[0].click();", localizador)
print(u"Se hizo click en: " + locator)
return True
except TimeoutException:
print(u"js_clic: No presente " + locator)
Functions.tearDown(self)
############## -=_Wait Elements_=- #############################
##########################################################################
def esperar(self, timeLoad=8):
print ("Esperar: Inicia ("+str(timeLoad)+")")
try:
totalWait = 0
while (totalWait < timeLoad):
#print("Cargando ... intento: " + str(totalWait))
time.sleep(1)
totalWait = totalWait + 1
finally:
print ("Esperar: Carga Finalizada ... ")
def alert_windows(self, accept ="accept"):
try:
wait = WebDriverWait(self.driver, 30)
wait.until(EC.alert_is_present(), print("Esperando alerta..."))
alert = self.driver.switch_to.alert
print (alert.text)
if accept.lower()== "accept":
alert.accept()
print ("Click in Accept")
| |
<reponame>thaibault/boostNode
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# region header
'''
This module provides an easy way to compile, run and clean up a various \
number of scripts.
'''
# # python3.5
# # pass
from __future__ import absolute_import, division, print_function, \
unicode_literals
# #
'''
For conventions see "boostnode/__init__.py" on \
https://github.com/thaibault/boostnode
'''
__author__ = '<NAME>'
__copyright__ = 'see boostnode/__init__.py'
__credits__ = '<NAME>',
__license__ = 'see boostnode/__init__.py'
__maintainer__ = '<NAME>'
__maintainer_email__ = 'info["~at~"]torben.website'
__status__ = 'stable'
__version__ = '1.0'
# # python3.5
# # import builtins
# # import collections
import __builtin__ as builtins
# #
from copy import copy
import inspect
import logging
import os
import sys
'''Make boostnode packages and modules importable via relative paths.'''
sys.path.append(os.path.abspath(sys.path[0] + 2 * (os.sep + '..')))
from boostnode.extension.file import Handler as FileHandler
from boostnode.extension.native import Module, InstancePropertyInitializer
from boostnode.extension.output import Logger, Print
from boostnode.extension.system import CommandLine, Platform, Runnable
# # python3.5 from boostnode.extension.type import Self
pass
from boostnode.paradigm.aspectOrientation import JointPoint
from boostnode.paradigm.objectOrientation import Class
from boostnode.runnable.template import Parser as TemplateParser
# endregion
# region classes
class Run(Class, Runnable):
'''
This class provides a large number of supported programming languages \
support for compiling, running and cleaning after running.
Determines a code file to run and runs them in its own thread by \
piping all outputs through the command line interface.
**code_file_path** - A given code file handler or path \
which should be run.
**default_command_sequence** - A default command sequence which \
should be executed in given order.
'''
# region properties
COMMAND_LINE_ARGUMENTS = (
{'arguments': ('-f', '--code-file'),
'specification': {
'action': 'store',
'default': {'execute': '__initializer_default_value__'},
'type': {'execute': 'type(__initializer_default_value__)'},
'required': {'execute': '__initializer_default_value__ is None'},
'help': 'Select a file for running.',
'dest': 'code_file_path',
'metavar': 'FILE_PATH'}},
{'arguments': ('-d', '--default-command-sequence'),
'specification': {
'action': 'store',
'nargs': '*',
'default': {'execute': '__initializer_default_value__'},
'type': builtins.str,
'required': {'execute': '__initializer_default_value__ is None'},
'help': {
'execute': "'Select a default sequence of things to do with "
'''code files (default: "%s").' % '''
'\'", "\'.join(__initializer_default_value__)'},
'dest': 'default_command_sequence',
'metavar': 'COMMAND'}},
{'arguments': ('-n', '--runner-meta-help'),
'specification': {
'action': 'store_true',
'default': False,
'help': 'Shows this help message.',
'dest': 'meta_help'}})
'''Holds all command line interface argument informations.'''
SUPPORTED_CODES = {
'template': {
'commands': {
'compile': "bash --login -c '"
'template "<% code_file.path %>" 1>'
'"<% code_file.directory.path %>'
'<% code_file.basename %>.html"\'',
'run': 'bash --login -c \'webbrowser '
'"<% code_file.directory.path %>'
'<% code_file.basename %>.html"\''
},
'extensions': ('tpl',)
},
'c': {
'commands': {
'compile': 'g++ "<% code_file.path %>" -o '
'"<% code_file.directory.path %>'
'<% code_file.basename %>"',
'run': '"<% code_file.directory.path %>'
'<% code_file.basename %>" <%arguments%>',
},
'code_manager': {
'file_path': 'Makefile',
'commands': {
'compile': 'make compile',
'test': 'make test',
'clean': 'make clean',
'all': 'make all'
}
},
'extensions': ('cpp', 'c', 'cc'),
'delete_patterns': ('.*\.o', '.*Main', '.*Test')
},
'bash': {
'commands': {'run': '"<% code_file.path %>" <% arguments %>'},
'extensions': ('bash',)
},
'shell': {
'commands': {'run': '"<% code_file.path %>" <% arguments %>'},
'extensions': ('sh', 'shell')
},
'python': {
'commands': {'run': '"<% code_file.path %>" <% arguments %>'},
'code_manager': {
'file_path': '__init__.<% code_file.extension %>',
'commands': {
'clean': '__init__.<% code_file.extension %> clear',
'test': '__init__.<% code_file.extension %> test',
'all': '__init__.<% code_file.extension %> all'
}
},
'extensions': ('py', 'pyc', 'pyw', 'pyo', 'pyd'),
'delete_patterns': ('.*\.py[cod]', '__pycache__', 'temp_.*')
},
'laTeX': {
'commands': {
'compile': 'pdflatex "<% code_file.path %>" && '
'cd "<% code_file.directory.path %>" && bibtex '
'"<% code_file.basename %>.aux"; '
'pdflatex "<% code_file.path %>" && '
'pdflatex "<% code_file.path %>"',
'run': ' || '.join(builtins.map(
lambda name: name + ' "<% code_file.basename %>.pdf"',
Platform.UNIX_OPEN_APPLICATIONS)
)
},
'code_manager': {
'file_path': 'Makefile',
'commands': {
'compile': 'make compile',
'run': 'make preview',
'clean': 'make clean',
'all': 'make all'
}
},
'extensions': ('tex',),
'delete_patterns': (
'.+\.aux', '.+\.log', '.+\.toc', '.+\.out', '.+\.blg',
'.+\.bbl', '.+\.lol')
},
'java': {
'commands': {
'compile': 'javac "<% code_file.path %>"',
'run': 'java "<% code_file.basename.capitalize() %>" '
'<% arguments %>'
},
'extensions': ('java',),
'delete_patterns': ('.*\.class',)
}
}
'''Holds all supported code types and there methods to do common stuff.'''
# endregion
# region dynamic methods
# # region public
# # # region special
@JointPoint
# # python3.5 def __repr__(self: Self) -> builtins.str:
def __repr__(self):
'''
Invokes if this object should describe itself by a string.
Examples:
>>> file = FileHandler(__test_folder__.path + '__repr__.py')
>>> file.content = '#!/usr/bin/env python'
>>> repr(Run(code_file_path=file)) # doctest: +ELLIPSIS
'Object of "Run" with detected path "...__repr__.py".'
'''
return (
'Object of "{class_name}" with detected path "{path}".'.format(
class_name=self.__class__.__name__,
path=self._code_file.path))
# # # endregion
# # endregion
# # region protected
# # # region runnable implementation
@JointPoint
# # python3.5 def _run(self: Self) -> Self:
def _run(self):
'''
Entry point for command line call of this program. Determines a \
meaningful file for running. Set the right code dependent \
commands and finally executes them.
Examples:
>>> sys_argv_backup = sys.argv
>>> sys.argv[1:] = ['--runner-meta-help', '--log-level', 'info']
>>> run = Run.run() # doctest: +ELLIPSIS
usage:...
>>> empty_folder = FileHandler(
... __test_folder__.path + '_run', make_directory=True)
>>> sys.argv[1:] = ['-f', empty_folder.path, '--log-level', 'info']
>>> run = Run.run() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
CodeRunnerError: No supported file path found for running.
>>> sys.argv = sys_argv_backup
'''
command_line_arguments = CommandLine.argument_parser(
meta=True, arguments=self.COMMAND_LINE_ARGUMENTS,
module_name=__name__, scope={'self': self})
if command_line_arguments.meta_help:
CommandLine.current_argument_parser.print_help()
return self
return self._initialize(**self._command_line_arguments_to_dictionary(
namespace=command_line_arguments))
@JointPoint(InstancePropertyInitializer)
# # python3.5
# # def _initialize(
# # self: Self, code_file_path=None,
# # default_command_sequence=('compile', 'run', 'clean'),
# # **keywords: builtins.object
# # ) -> Self:
def _initialize(
self, code_file_path=None,
default_command_sequence=('compile', 'run', 'clean'), **keywords
):
# #
'''Sets some instance properties.'''
# # # region properties
'''
Holds the current code file and there potentially presented code \
manager as file handler.
'''
self.code_manager_file = None
'''
Saves every properties for current code taken from \
"SUPPORTED_CODES".
'''
self._current_code = {}
'''Saves currently needed commands taken from "_current_code".'''
self._current_commands = ()
'''
Saves given arguments which should be piped through the run \
command to determined code file.
'''
self._command_line_arguments = ()
'''Saves currently determined runnable code file object.'''
self._code_file = self._determine_code_file(self.code_file_path)
# # # endregion
if not self._code_file:
raise __exception__(
'No supported file found for running with given hint "%s".',
code_file_path)
return self._run_code_file()
# # # endregion
@JointPoint
# # python3.5 def _tidy_up(self: Self) -> Self:
def _tidy_up(self):
'''
Tidies up the current working directory after running the given \
file.
Examples:
>>> garbage = FileHandler(
... __test_folder__.path + 'temp_tidy_up', make_directory=True)
>>> file = FileHandler(
... __test_folder__.path + '_tidy_up_runnable.py')
>>> file.content = '#!/usr/bin/env python'
>>> run = Run(file)
>>> run # doctest: +ELLIPSIS
Object of "Run" with detected path "..._tidy_up_runnable.py".
>>> run._tidy_up() # doctest: +ELLIPSIS
Object of "Run" with detected path "..._tidy_up_runnable.py".
>>> garbage.is_element()
False
>>> del run._current_code['properties']['delete_patterns']
>>> run._tidy_up() # doctest: +ELLIPSIS
Object of "Run" with detected path "..._tidy_up_runnable.py".
'''
if 'delete_patterns' in self._current_code['properties']:
__logger__.info(
'Delete files which matches one of "%s" pattern.',
'", "'.join(
self._current_code['properties']['delete_patterns']))
FileHandler(
location=self._code_file.directory.path
).delete_file_patterns(
*self._current_code['properties']['delete_patterns'])
return self
@JointPoint
# # python3.5 def _run_commands(self: Self) -> Self:
def _run_commands(self):
'''
Run currently needed commands.
Examples:
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
'...'
>>> __test_folder__.clear_directory()
True
>>> file = FileHandler(__test_folder__.path + '_run_commands.py')
>>> file.content = '#!/usr/bin/env python'
>>> file.change_right(700) # doctest: +ELLIPSIS
Object of "Handler" with path "..._run_commands.py" and initiall...
>>> Run(
... code_file_path=file
... )._run_commands() # doctest: +ELLIPSIS
Object of "Run" with detected path "..._run_commands.py...".
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
'...Detected "python"...No "compile" necessary...'
'''
for command_name in self.default_command_sequence:
if command_name in self._current_commands:
self._run_command(
command_name, command=self._current_commands[command_name])
else:
__logger__.info('No "%s" necessary.', command_name)
return self
@JointPoint
# # python3.5 def _check_code_manager(self: Self) -> Self:
def _check_code_manager(self):
'''
Checks if a code manager file exists for the current detected \
code file. For example it can find a makefile for a detected c++ \
source code.
Examples:
>>> file = FileHandler(
... __test_folder__.path + '_check_code_manager.py')
>>> file.content = '#!/usr/bin/env python'
>>> FileHandler(
... __test_folder__.path + '__init__.py'
... ).content = '#!/usr/bin/env python'
>>> run = Run(code_file_path=file)
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
'...'
>>> run._check_code_manager() # doctest: +ELLIPSIS
Object of "Run" with detected path "..._check_code_manager.py".
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
'...Detected code manager "...__init__.py".\\n'
>>> del run._current_code['properties']['code_manager']
>>> run._check_code_manager() # doctest: +ELLIPSIS
Object of "Run" ...
'''
if 'code_manager' in self._current_code['properties']:
file_path = self\
._current_code['properties']['code_manager']['file_path']
self.code_manager_file = FileHandler(
location=self._code_file.directory.path + file_path)
if self.code_manager_file:
self._current_commands.update(
self._current_code['properties']['code_manager'][
'commands'])
__logger__.info(
'Detected code manager "%s".', self.code_manager_file.path)
return | |
import datetime
import json
from aiohttp import request
import random
import inspect
import os
import dbl
import aiohttp
import io
import asyncpraw
import discord
import DiscordUtils
import httpx
from discord.ext import commands
from dotenv import load_dotenv
from prsaw import RandomStuff
from dotenv import load_dotenv
import os
from io import BytesIO
load_dotenv('.env')
reddit = asyncpraw.Reddit(client_id = os.getenv("redditid"),
client_secret = os.getenv("redditsecret"),
username = "UnsoughtConch",
password = os.getenv('redditpassword'),
user_agent = "ConchBotPraw")
rs = RandomStuff(async_mode=True, api_key = os.getenv("aiapikey"))
dbltoken = os.getenv('DBLTOKEN')
class Fun(commands.Cog):
def __init__(self, client):
self.client = client
self.dbl = dbl.DBLClient(self.client, dbltoken)
@commands.command()
async def test(self, ctx):
await ctx.send("Test")
@commands.Cog.listener()
async def on_message(self, message):
if message.author.bot:
return
if message.channel.name == "conchchat":
try:
flag = False
votes = await self.dbl.get_bot_upvotes()
for item in votes:
if int(item['id']) == int(message.author.id):
flag = True
break
if flag is True:
await message.channel.trigger_typing()
aimsg = await rs.get_ai_response(message.content)
await message.reply(aimsg)
else:
await message.channel.trigger_typing()
aimsg = await rs.get_ai_response(message.content)
await message.reply(f"{aimsg}\n\n*Consider voting for me on Top.gg! (<https://bit.ly/2PiLbwh>) "
"It only takes a second of your time and you won't see this message anymore!*")
except AttributeError:
await message.channel.trigger_typing()
aimsg = await rs.get_ai_response(message.content)
await message.reply(aimsg)
except httpx.ReadTimeout:
await message.channel.send("It seems my API has timed out. Please give me a few minutes, and if the problem "
"continues, please contact UnsoughtConch via my `cb support` command.")
else:
pass
try:
if message.guild.id == 724050498847506433:
if "retard" in message.content.lower():
await message.add_reaction("☹")
except:
pass
if message.content == "<@!786620946412863538>":
await message.channel.send("My prefix is `cb `")
@commands.command(aliases=["chatbot"])
@commands.has_permissions(manage_guild=True)
async def ai(self, ctx, channel:discord.TextChannel):
await ctx.send("You can set up a chatbot channel by naming any channel 'conchchat,' or I can do it for you! "
"would you like me to do it for you? `Yes` or `no`.")
msg = await self.client.wait_for('message', check=lambda message: message.author == ctx.author, timeout=30)
if "yes" in msg.content.lower():
await ctx.send("What category would you like this channel in? Channel categories ***must be the exact "
"name, capitalization and all.***")
msg0 = await self.client.wait_for('message', check=lambda message: message.author == ctx.author, timeout=30)
category = discord.utils.get(ctx.guild.categories, name=msg0.content)
try:
channel = await ctx.guild.create_text_channel('conchchat', category=category)
except:
await ctx.send("I'm sorry, but I do not have the `manage guild` requirement needed to create channels.")
return
await ctx.send(f"Done! The channel `conchchat` was created in the category `{msg0.content}`")
elif "no" in msg.content.lower():
await ctx.send("Okay. Cancelling...")
else:
await ctx.send("That's not a valid option.")
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def reddit(self, ctx, subreddit):
message = await ctx.send("This may take a hot minute... Sit tight!")
try:
subreddit = await reddit.subreddit(subreddit)
top = subreddit.top(limit=50)
all_subs = []
async for submission in top:
all_subs.append(submission)
ransub = random.choice(all_subs)
if ransub.over_18:
if ctx.channel.is_nsfw() == True:
pass
else:
await ctx.send("Looks like that post is marked over 18, meaning you need to be in an NSFW marked"
" channel to look at that post.")
return
if ransub.is_self:
embed = discord.Embed(title=f"{ransub.author}'s Post", colour=ctx.author.colour)
embed.add_field(name=ransub.title, value=ransub.selftext)
embed.set_footer(text=f"❤ {ransub.ups} | 💬 {ransub.num_comments}")
else:
embed = discord.Embed(title=ransub.title, colour=ctx.author.colour, url=ransub.url)
embed.set_footer(text=f"Posted by {ransub.author} on Reddit. | ❤ {ransub.ups} | 💬 {ransub.num_comments}")
embed.set_image(url=ransub.url)
await message.delete()
await ctx.send(embed=embed)
except:
await ctx.send("Something went wrong. This may be the fact that the subreddit does not exist or is locked.")
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def itft(self, ctx):
async with aiohttp.ClientSession() as session:
async with session.get('http://itsthisforthat.com/api.php?json') as thing:
try:
load = await thing.read()
jdata = json.loads(load)
embed = discord.Embed(title="Wait, what does your startup do?", colour=ctx.author.colour)
embed.add_field(name="So, basically, it's like a", value=f"**{jdata['this']}**", inline=False)
embed.add_field(name="For", value=f"**{jdata['that']}**", inline=False)
embed.set_footer(text="ItsThisForThat API | itsthisforthat.com")
await ctx.send(embed=embed)
except:
await ctx.send("Woops! Something went wrong.")
@commands.group(invoke_without_command=True)
async def fbi(self, ctx):
await ctx.send("What page?")
msg = await self.client.wait_for('message', check=lambda message: message.author == ctx.author, timeout=10)
page = int(msg.content)
async with aiohttp.ClientSession() as session:
async with session.get("https://api.fbi.gov/wanted/v1/list", params={'page': page}) as response:
data = json.loads(await response.read())
embeds = []
for item in data["items"]:
embed = discord.Embed(title=f"FBI Wanted | {item['title']}")
embed.add_field(name="Details:", value=item['details'])
embed.add_field(name="Warning Message:", value=item['warning_message'])
embed.add_field(name="Reward:", value=item['reward_text'])
embed.add_field(name="UID:", value=item['uid'])
embed.set_footer(text="Data from FBI API | For more info on an entry, use 'cb fbi details {UID}'")
embeds.append(embed)
paginator = DiscordUtils.Pagination.CustomEmbedPaginator(ctx, remove_reactions=True)
paginator.add_reaction('⏪', "back")
paginator.add_reaction('⏩', "next")
await paginator.run(embeds)
@fbi.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def details(self, ctx, uid, value=None):
async with aiohttp.ClientSession() as session:
async with session.get(f"https://api.fbi.gov/@wanted-person/{uid}") as response:
data = json.loads(await response.read())
details = data["details"]
title = data["title"]
desc = data["description"]
reward = data["reward_text"]
warnmsg = data["warning_message"]
sex = data["sex"]
if value is None:
pass
else:
embed = discord.Embed(title=f"FBI Wanted | {title}", colour=discord.Colour.red(),
description=f"Published on {data['publication']}", url=data['url'])
try:
embed.add_field(name=value, value=data[value])
embed.set_footer(text="Data from FBI API | https://api.fbi.gov.docs")
await ctx.send(embed=embed)
return
except:
await ctx.send("That's an invalid value. Use 'cb help fbi' for more details.")
return
return
embed = discord.Embed(title=f"FBI Wanted | {title}", colour=discord.Colour.red(),
description=f"Published on {data['publication']}", url=data["url"])
if details is not None:
embed.add_field(name="Details:",value=details, inline=False)
else:
pass
if desc is not None:
embed.add_field(name="Description", value=desc)
else:
pass
if warnmsg is not None:
embed.add_field(name="Warning Message:", value=warnmsg, inline=False)
else:
pass
if reward is not None:
embed.add_field(name="Reward:", value=reward)
else:
pass
if sex is not None:
embed.add_field(name="Sex:", value=sex, inline=False)
else:
pass
embed.set_thumbnail(url="https://upload.wikimedia.org/wikipedia/commons/thumb/d/da/Seal_of_the_Federal_Bureau_of_Investigation.svg/300px-Seal_of_the_Federal_Bureau_of_Investigation.svg.png")
try:
embed.set_image(url = data["images"][0]["large"])
except:
pass
await ctx.send(embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def covid(self, ctx, country):
async with aiohttp.ClientSession() as session:
async with session.get("https://covid-api.mmediagroup.fr/v1/cases") as response:
data = json.loads(await response.read())
try:
embed = discord.Embed(title=f"COVID-19 in {country}", colour=discord.Colour.gold(),)
embed.add_field(name="Cases:", value=data[country]['All']['confirmed'])
embed.add_field(name="Recovered Cases:", value=data[country]['All']['recovered'])
embed.add_field(name="Deaths:", value=data[country]['All']['deaths'])
embed.add_field(name="Country Population:", value=data[country]['All']['population'])
embed.add_field(name="Life Expectancy:", value=data[country]['All']['life_expectancy'])
embed.set_footer(text="Stats brought to you by M-Media-Group's COVID-19 API")
await ctx.send(embed=embed)
except:
await ctx.send("Country not found. Country names ***are case-sensitive***.")
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def joke(self, ctx):
msg = await ctx.send("Grabbing your joke...")
subreddit = await reddit.subreddit("jokes")
top = subreddit.top(limit=50)
all_subs = []
async for submission in top:
all_subs.append(submission)
ransub = random.choice(all_subs)
embed = discord.Embed(name=f"{ransub.author}'s Joke", colour=ctx.author.colour)
embed.add_field(name=ransub.title, value=ransub.selftext)
embed.set_footer(text=f"❤ {ransub.ups} | 💬 {ransub.num_comments}")
await msg.delete()
await ctx.send(embed=embed)
@commands.command(aliases=['repeat'])
@commands.cooldown(1, 3, commands.BucketType.user)
async def echo(self, ctx, channel:discord.TextChannel=None, *, msg):
if channel is None:
await ctx.send(msg)
else:
await channel.send(msg)
@commands.command(name='8ball')
@commands.cooldown(1, 5, commands.BucketType.user)
async def _8ball(self, ctx, *, msg):
responses = ['As I see it, yes.',
'Ask again later.',
'Better not tell you now.',
'Cannot predict now.',
'Concentrate and ask again.',
'Don’t count on it.',
'It is certain.',
'It is decidedly so.',
'Most likely.',
'My reply is no.',
'My sources say no.',
'Outlook not so good.',
'Outlook good.',
'Reply hazy, try again.',
'Signs point to yes.',
'Very doubtful.',
'Without a doubt.',
'Yes.',
'Yes – definitely.',
'You may rely on it.']
embed = discord.Embed(
title="Magic 8 Ball",
colour=discord.Colour.blurple()
)
embed.add_field(name="Question:", value=msg)
embed.add_field(name="Answer:", value=random.choice(responses))
await ctx.send(embed=embed)
@commands.command(aliases=["LMGTFY"])
@commands.cooldown(1, 3, commands.BucketType.user)
async def google(self, ctx, *, query):
nquery = query.replace(' ', '+').lower()
await ctx.send(f"https://www.google.com/search?q={nquery}")
@commands.command(aliases=['chances', 'odds', 'odd'])
@commands.cooldown(1, 5, commands.BucketType.user)
async def chance(self, ctx, *, msg):
chancenum = random.randint(0, 10)
embed = discord.Embed(
title="What are the Chances?",
colour = ctx.author.colour
)
embed.add_field(name="Question:", value=msg)
embed.add_field(name="The chances are...", value=chancenum)
await ctx.send(embed=embed)
@commands.command(aliases=['avatar'])
@commands.cooldown(1, 3, commands.BucketType.user)
async def pfp(self, ctx, member: discord.Member=None):
if member is None:
embed = discord.Embed(
title=f"{ctx.author}'s Avatar",
colour=ctx.author.colour
)
embed.set_image(url=ctx.author.avatar_url)
await ctx.send(embed=embed)
else:
embed = discord.Embed(
title=f"{member}'s Avatar",
colour=member.colour
)
embed.set_image(url=member.avatar_url)
await ctx.send(embed=embed)
@commands.command()
@commands.cooldown(1, 5, commands.BucketType.user)
async def animal(self, ctx, animal=None):
animal_options = ["dog", "cat", "panda", "fox", "bird", "koala", "red_panda"]
if animal is None:
animal = random.choice(animal_options)
if (animal := animal.lower()) in animal_options:
animal_fact_url = f"https://some-random-api.ml/facts/{animal}"
animal_image_url = f"https://some-random-api.ml/img/{animal}"
async with ctx.typing():
async with request("GET", animal_image_url, headers={}) as response:
if response.status == 200:
animal_api = await response.json()
image_link = animal_api["link"]
else:
image_link = None
async with request("GET", animal_fact_url, headers={}) as response:
if response.status == 200:
animal_api = await response.json()
embed = discord.Embed(title=f"{animal.title()} fact")
embed.add_field(name="Fact", value=animal_api["fact"])
if image_link is not None:
embed.set_image(url=image_link)
await ctx.send(embed=embed)
else:
await ctx.send(f"API returned a {response.status} status.")
else:
await ctx.send(f"Sorry but {animal} isn't in my api")
@commands.command()
@commands.cooldown(1, 5, commands.BucketType.user)
async def token(self, ctx):
token_web = "https://some-random-api.ml/bottoken"
async with ctx.typing():
async with request("GET", token_web, headers={}) as response:
if response.status == 200:
api = await response.json()
bottoken = api["token"]
else:
await ctx.send(f"API returned a {response.status} status.")
await ctx.send(bottoken)
@commands.command()
@commands.cooldown(1, 5, commands.BucketType.user)
async def meme(self, ctx):
meme_web = "https://some-random-api.ml/meme"
async with ctx.typing():
async with request("GET", meme_web, headers={}) as response:
if response.status == 200:
| |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 24 13:43:28 2017
@author: nightowl
"""
from __future__ import print_function
import os
# from fuzzywuzzy import fuzz
from shutil import copyfile
from ..io.database.sql_to_python import QuerySQL
from ..io.database.sql_connector import DVH_SQL
from ...paths import PREF_DIR, SCRIPT_DIR
from ...tools.utilities import flatten_list_of_lists
class Physician:
def __init__(self, initials):
self.initials = initials
self.physician_rois = {}
def add_physician_roi(self, institutional_roi, physician_roi):
institutional_roi = clean_name(institutional_roi)
physician_roi = clean_name(physician_roi)
self.physician_rois[physician_roi] = {'institutional_roi': institutional_roi,
'variations': [physician_roi]}
def add_physician_roi_variation(self, physician_roi, variation):
physician_roi = clean_name(physician_roi)
variation = clean_name(variation)
if physician_roi in list(self.physician_rois):
if variation not in self.physician_rois[physician_roi]['variations']:
self.physician_rois[physician_roi]['variations'].append(variation)
self.physician_rois[physician_roi]['variations'].sort()
class DatabaseROIs:
def __init__(self):
self.physicians = {}
self.institutional_rois = []
# Copy default ROI files to user folder if they do not exist
if not os.path.isfile(os.path.join(PREF_DIR, 'institutional.roi')):
initialize_roi_preference_file('institutional.roi')
initialize_roi_preference_file('physician_BBM.roi')
# Import institutional roi names
abs_file_path = os.path.join(PREF_DIR, 'institutional.roi')
if os.path.isfile(abs_file_path):
with open(abs_file_path, 'r') as document:
for line in document:
if not line:
continue
line = clean_name(str(line))
self.institutional_rois.append(line)
physicians = get_physicians_from_roi_files()
for physician in physicians:
self.add_physician(physician, add_institutional_rois=(physician == 'DEFAULT'))
self.import_physician_roi_maps()
if 'uncategorized' not in self.institutional_rois:
self.institutional_rois.append('uncategorized')
self.branched_institutional_rois = {}
##############################################
# Import from file functions
##############################################
def import_physician_roi_maps(self):
for physician in list(self.physicians):
rel_path = 'physician_%s.roi' % physician
abs_file_path = os.path.join(PREF_DIR, rel_path)
if os.path.isfile(abs_file_path):
self.import_physician_roi_map(abs_file_path, physician)
def import_physician_roi_map(self, abs_file_path, physician):
with open(abs_file_path, 'r') as document:
for line in document:
if not line:
continue
line = str(line).lower().strip().replace(':', ',').split(',')
institutional_roi = line[0].strip()
physician_roi = line[1].strip()
self.add_institutional_roi(institutional_roi)
self.add_physician_roi(physician, institutional_roi, physician_roi)
for i in range(2, len(line)):
variation = clean_name(line[i])
self.add_variation(physician, physician_roi, variation)
###################################
# Physician functions
###################################
def add_physician(self, physician, add_institutional_rois=True):
physician = clean_name(physician).upper()
if physician not in self.get_physicians():
self.physicians[physician] = Physician(physician)
if add_institutional_rois:
for institutional_roi in self.institutional_rois:
self.add_physician_roi(physician, institutional_roi, institutional_roi)
def delete_physician(self, physician):
physician = clean_name(physician).upper()
self.physicians.pop(physician, None)
def get_physicians(self):
return list(self.physicians)
def get_physician(self, physician):
return self.physicians[physician]
def is_physician(self, physician):
physician = clean_name(physician).upper()
for initials in self.get_physicians():
if physician == initials:
return True
return False
def set_physician(self, new_physician, physician):
new_physician = clean_name(new_physician).upper()
physician = clean_name(physician).upper()
self.physicians[new_physician] = self.physicians.pop(physician)
#################################
# Institutional ROI functions
#################################
def get_institutional_rois(self):
return self.institutional_rois
def get_institutional_roi(self, physician, physician_roi):
physician = clean_name(physician).upper()
physician_roi = clean_name(physician_roi)
if physician == 'DEFAULT':
return physician_roi
else:
return self.physicians[physician].physician_rois[physician_roi]['institutional_roi']
def add_institutional_roi(self, roi):
roi = clean_name(roi)
if roi not in self.institutional_rois:
self.institutional_rois.append(roi)
self.institutional_rois.sort()
def set_institutional_roi(self, new_institutional_roi, institutional_roi):
new_institutional_roi = clean_name(new_institutional_roi)
institutional_roi = clean_name(institutional_roi)
index = self.institutional_rois.index(institutional_roi)
self.institutional_rois.pop(index)
self.add_institutional_roi(new_institutional_roi)
for physician in self.get_physicians():
if physician != 'DEFAULT':
for physician_roi in self.get_physician_rois(physician):
physician_roi_obj = self.physicians[physician].physician_rois[physician_roi]
if physician_roi_obj['institutional_roi'] == institutional_roi:
physician_roi_obj['institutional_roi'] = new_institutional_roi
def set_linked_institutional_roi(self, new_institutional_roi, physician, physician_roi):
self.physicians[physician].physician_rois[physician_roi]['institutional_roi'] = new_institutional_roi
def delete_institutional_roi(self, roi):
self.set_institutional_roi('uncategorized', roi)
def is_institutional_roi(self, roi):
roi = clean_name(roi)
for institutional_roi in self.institutional_rois:
if roi == institutional_roi:
return True
return False
def get_unused_institutional_rois(self, physician):
physician = clean_name(physician).upper()
used_rois = []
if self.get_physician_rois(physician)[0] != '':
for physician_roi in self.get_physician_rois(physician):
used_rois.append(self.get_institutional_roi(physician, physician_roi))
unused_rois = []
for roi in self.institutional_rois:
if roi not in used_rois:
unused_rois.append(roi)
if 'uncategorized' not in unused_rois:
unused_rois.append('uncategorized')
return unused_rois
########################################
# Physician ROI functions
########################################
def get_physician_rois(self, physician):
physician = clean_name(physician).upper()
if self.is_physician(physician):
physician_rois = list(self.physicians[physician].physician_rois)
if physician_rois:
physician_rois.sort()
return physician_rois
return ['']
def get_physician_roi(self, physician, roi):
physician = clean_name(physician).upper()
roi = clean_name(roi)
for physician_roi in self.get_physician_rois(physician):
for variation in self.get_variations(physician, physician_roi):
if roi == variation:
return physician_roi
return 'uncategorized'
def get_physician_roi_from_institutional_roi(self, physician, institutional_roi):
physician = clean_name(physician).upper()
institutional_roi = clean_name(institutional_roi)
if institutional_roi == 'uncategorized':
return ['uncategorized']
for physician_roi in self.get_physician_rois(physician):
if institutional_roi == self.get_institutional_roi(physician, physician_roi):
return physician_roi
else:
return ['uncategorized']
def add_physician_roi(self, physician, institutional_roi, physician_roi):
physician = clean_name(physician).upper()
institutional_roi = clean_name(institutional_roi)
physician_roi = clean_name(physician_roi)
if physician_roi not in self.get_physician_rois(physician):
if institutional_roi in self.institutional_rois:
self.physicians[physician].add_physician_roi(institutional_roi, physician_roi)
def set_physician_roi(self, new_physician_roi, physician, physician_roi):
new_physician_roi = clean_name(new_physician_roi)
physician = clean_name(physician).upper()
physician_roi = clean_name(physician_roi)
if new_physician_roi != physician_roi:
self.physicians[physician].physician_rois[new_physician_roi] = \
self.physicians[physician].physician_rois.pop(physician_roi, None)
self.add_variation(physician, new_physician_roi, new_physician_roi)
# self.delete_variation(physician, new_physician_roi, physician_roi)
def delete_physician_roi(self, physician, physician_roi):
physician = clean_name(physician).upper()
physician_roi = clean_name(physician_roi)
if physician_roi in self.get_physician_rois(physician):
self.physicians[physician].physician_rois.pop(physician_roi, None)
def is_physician_roi(self, roi, physician):
roi = clean_name(roi)
for physician_roi in self.get_physician_rois(physician):
if roi == physician_roi:
return True
return False
def get_unused_physician_rois(self, physician):
physician = clean_name(physician).upper()
unused_rois = []
for physician_roi in self.get_physician_rois(physician):
if self.get_institutional_roi(physician, physician_roi) == 'uncategorized':
unused_rois.append(physician_roi)
if not unused_rois:
unused_rois = ['']
return unused_rois
def merge_physician_rois(self, physician, physician_rois, final_physician_roi):
variation_lists = [self.get_variations(physician, physician_roi) for physician_roi in physician_rois]
variations = flatten_list_of_lists(variation_lists, remove_duplicates=True)
for variation in variations:
self.add_variation(physician, final_physician_roi, variation)
for physician_roi in physician_rois:
if physician_roi != final_physician_roi:
self.delete_physician_roi(physician, physician_roi)
###################################################
# Variation-of-Physician-ROI functions
###################################################
def get_variations(self, physician, physician_roi):
physician = clean_name(physician).upper()
physician_roi = clean_name(physician_roi)
if physician_roi == 'uncategorized':
return ['uncategorized']
if self.is_physician_roi(physician_roi, physician):
variations = self.physicians[physician].physician_rois[physician_roi]['variations']
if variations:
return variations
return ['']
def get_all_variations_of_physician(self, physician):
physician = clean_name(physician).upper()
variations = []
for physician_roi in self.get_physician_rois(physician):
for variation in self.get_variations(physician, physician_roi):
variations.append(variation)
if variations:
variations.sort()
else:
variations = ['']
return variations
def add_variation(self, physician, physician_roi, variation):
physician = clean_name(physician).upper()
physician_roi = clean_name(physician_roi)
variation = clean_name(variation)
if variation and variation not in self.get_variations(physician, physician_roi):
self.physicians[physician].add_physician_roi_variation(physician_roi, variation)
def delete_variation(self, physician, physician_roi, variation):
physician = clean_name(physician).upper()
physician_roi = clean_name(physician_roi)
variation = clean_name(variation)
if variation in self.get_variations(physician, physician_roi):
index = self.physicians[physician].physician_rois[physician_roi]['variations'].index(variation)
self.physicians[physician].physician_rois[physician_roi]['variations'].pop(index)
self.physicians[physician].physician_rois[physician_roi]['variations'].sort()
def set_variation(self, new_variation, physician, physician_roi, variation):
new_variation = clean_name(new_variation)
physician = clean_name(physician).upper()
physician_roi = clean_name(physician_roi)
variation = clean_name(variation)
if new_variation != variation:
self.add_variation(physician, physician_roi, new_variation)
self.delete_variation(physician, physician_roi, variation)
def is_roi(self, roi):
roi = clean_name(roi)
for physician in self.get_physicians():
for physician_roi in self.get_physician_rois(physician):
for variation in self.get_variations(physician, physician_roi):
if roi == variation:
return True
return False
# def get_best_roi_match(self, roi, length=None):
# roi = clean_name(roi)
#
# scores = []
# rois = []
# physicians = []
#
# for physician in self.get_physicians():
# for physician_roi in self.get_physician_rois(physician):
# scores.append(get_combined_fuzz_score(physician_roi, roi))
# rois.append(physician_roi)
# physicians.append(physician)
# for variation in self.get_variations(physician, physician_roi):
# scores.append(get_combined_fuzz_score(variation, roi))
# rois.append(variation)
# physicians.append(physician)
#
# for institutional_roi in self.institutional_rois:
# scores.append(get_combined_fuzz_score(institutional_roi, roi))
# rois.append(institutional_roi)
# physicians.append('DEFAULT')
#
# best = []
#
# if length:
# if length > len(scores):
# length = len(scores)
# else:
# length = 1
#
# for i in range(length):
# max_score = max(scores)
# index = scores.index(max_score)
# scores.pop(index)
# best_match = rois.pop(index)
# best_physician = physicians.pop(index)
# if self.is_institutional_roi(best_match):
# best_institutional_roi = best_match
# else:
# best_institutional_roi = 'uncategorized'
#
# best_physician_roi = self.get_physician_roi(best_physician, best_match)
#
# best.append({'variation': best_match,
# 'physician_roi': best_physician_roi,
# 'physician': best_physician,
# 'institutional_roi': best_institutional_roi,
# 'score': max_score})
#
# return best
########################
# Export to file
########################
def write_to_file(self):
file_name = 'institutional.roi'
abs_file_path = os.path.join(PREF_DIR, file_name)
document = open(abs_file_path, 'w')
lines = self.institutional_rois
lines.sort()
lines = '\n'.join(lines)
for line in lines:
document.write(line)
document.close()
physicians = self.get_physicians()
physicians.pop(physicians.index('DEFAULT')) # remove 'DEFAULT' physician
for physician in physicians:
file_name = 'physician_' + physician + '.roi'
abs_file_path = os.path.join(PREF_DIR, file_name)
lines = []
for physician_roi in self.get_physician_rois(physician):
institutional_roi = self.get_institutional_roi(physician, physician_roi)
variations = self.get_variations(physician, physician_roi)
variations = ', '.join(variations)
line = [institutional_roi,
physician_roi,
variations]
line = ': '.join(line)
line += '\n'
lines.append(line)
lines.sort()
if lines:
document = open(abs_file_path, 'w')
for line in lines:
document.write(line)
document.close()
for physician in get_physicians_from_roi_files():
if physician not in physicians and physician != 'DEFAULT':
file_name = 'physician_' + physician + '.roi'
abs_file_path = os.path.join(PREF_DIR, file_name)
os.remove(abs_file_path)
################
# Plotting tools
################
def get_physician_roi_visual_coordinates(self, physician, physician_roi):
# All 0.5 subtractions due to a workaround of a Bokeh 0.12.9 bug
institutional_roi = self.get_institutional_roi(physician, physician_roi)
# x and y are coordinates for the circles
# x0, y0 is beggining of line segment, x1, y1 is end of line-segment
if institutional_roi == 'uncategorized':
table = {'name': [physician_roi],
'x': [2 - 0.5],
'y': [0],
'x0': [2 - 0.5],
'y0': [0],
'x1': [2 - 0.5],
'y1': [0]}
else:
table = {'name': [institutional_roi, physician_roi],
'x': [1 - 0.5, 2 - 0.5],
'y': [0, 0],
'x0': [1 - 0.5, 2 - 0.5],
'y0': [0, 0],
'x1': [2 - 0.5, 1 - 0.5],
'y1': [0, 0]}
variations = self.get_variations(physician, physician_roi)
for i, variation in enumerate(variations):
y = -i
table['name'].append(variation)
table['x'].append(3 - 0.5)
table['y'].append(y)
table['x0'].append(2 - 0.5)
table['y0'].append(0)
table['x1'].append(3 - 0.5)
table['y1'].append(y)
table_length = len(table['name'])
table['color'] = ['#1F77B4'] * table_length
table['institutional_roi'] = [institutional_roi] * table_length
table['physician_roi'] | |
pulumi.get(self, "mirror_overwrites_diverged_branches")
@property
@pulumi.getter(name="mirrorTriggerBuilds")
def mirror_trigger_builds(self) -> bool:
return pulumi.get(self, "mirror_trigger_builds")
@property
@pulumi.getter(name="mirrorUserId")
def mirror_user_id(self) -> int:
return pulumi.get(self, "mirror_user_id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the project.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nameWithNamespace")
def name_with_namespace(self) -> str:
"""
In `group / subgroup / project` or `user / project` format.
"""
return pulumi.get(self, "name_with_namespace")
@property
@pulumi.getter
def namespace(self) -> 'outputs.GetProjectsProjectNamespaceResult':
return pulumi.get(self, "namespace")
@property
@pulumi.getter(name="onlyAllowMergeIfAllDiscussionsAreResolved")
def only_allow_merge_if_all_discussions_are_resolved(self) -> bool:
return pulumi.get(self, "only_allow_merge_if_all_discussions_are_resolved")
@property
@pulumi.getter(name="onlyAllowMergeIfPipelineSucceeds")
def only_allow_merge_if_pipeline_succeeds(self) -> bool:
return pulumi.get(self, "only_allow_merge_if_pipeline_succeeds")
@property
@pulumi.getter(name="onlyMirrorProtectedBranches")
def only_mirror_protected_branches(self) -> bool:
return pulumi.get(self, "only_mirror_protected_branches")
@property
@pulumi.getter(name="openIssuesCount")
def open_issues_count(self) -> int:
return pulumi.get(self, "open_issues_count")
@property
@pulumi.getter
def owner(self) -> 'outputs.GetProjectsProjectOwnerResult':
return pulumi.get(self, "owner")
@property
@pulumi.getter(name="packagesEnabled")
def packages_enabled(self) -> bool:
return pulumi.get(self, "packages_enabled")
@property
@pulumi.getter
def path(self) -> str:
return pulumi.get(self, "path")
@property
@pulumi.getter(name="pathWithNamespace")
def path_with_namespace(self) -> str:
"""
In `group/subgroup/project` or `user/project` format.
"""
return pulumi.get(self, "path_with_namespace")
@property
@pulumi.getter
def permissions(self) -> 'outputs.GetProjectsProjectPermissionsResult':
return pulumi.get(self, "permissions")
@property
@pulumi.getter
def public(self) -> bool:
"""
Whether the project is public.
"""
return pulumi.get(self, "public")
@property
@pulumi.getter(name="publicBuilds")
def public_builds(self) -> bool:
return pulumi.get(self, "public_builds")
@property
@pulumi.getter(name="readmeUrl")
def readme_url(self) -> str:
return pulumi.get(self, "readme_url")
@property
@pulumi.getter(name="requestAccessEnabled")
def request_access_enabled(self) -> bool:
return pulumi.get(self, "request_access_enabled")
@property
@pulumi.getter(name="resolveOutdatedDiffDiscussions")
def resolve_outdated_diff_discussions(self) -> bool:
return pulumi.get(self, "resolve_outdated_diff_discussions")
@property
@pulumi.getter(name="runnersToken")
def runners_token(self) -> str:
return pulumi.get(self, "runners_token")
@property
@pulumi.getter(name="sharedRunnersEnabled")
def shared_runners_enabled(self) -> bool:
return pulumi.get(self, "shared_runners_enabled")
@property
@pulumi.getter(name="sharedWithGroups")
def shared_with_groups(self) -> Sequence['outputs.GetProjectsProjectSharedWithGroupResult']:
return pulumi.get(self, "shared_with_groups")
@property
@pulumi.getter(name="snippetsEnabled")
def snippets_enabled(self) -> bool:
return pulumi.get(self, "snippets_enabled")
@property
@pulumi.getter(name="sshUrlToRepo")
def ssh_url_to_repo(self) -> str:
"""
The SSH clone URL of the project.
"""
return pulumi.get(self, "ssh_url_to_repo")
@property
@pulumi.getter(name="starCount")
def star_count(self) -> int:
return pulumi.get(self, "star_count")
@property
@pulumi.getter
def statistics(self) -> Mapping[str, int]:
"""
Include project statistics. Cannot be used with `group_id`.
"""
return pulumi.get(self, "statistics")
@property
@pulumi.getter(name="tagLists")
def tag_lists(self) -> Sequence[str]:
"""
A set of the project topics (formerly called "project tags").
"""
return pulumi.get(self, "tag_lists")
@property
@pulumi.getter
def visibility(self) -> str:
"""
Limit by visibility `public`, `internal`, or `private`.
"""
return pulumi.get(self, "visibility")
@property
@pulumi.getter(name="webUrl")
def web_url(self) -> str:
return pulumi.get(self, "web_url")
@property
@pulumi.getter(name="wikiEnabled")
def wiki_enabled(self) -> bool:
return pulumi.get(self, "wiki_enabled")
@pulumi.output_type
class GetProjectsProjectForkedFromProjectResult(dict):
def __init__(__self__, *,
http_url_to_repo: str,
id: int,
name: str,
name_with_namespace: str,
path: str,
path_with_namespace: str,
web_url: str):
"""
:param str http_url_to_repo: The HTTP clone URL of the project.
:param int id: The ID of the project.
:param str name: The name of the project.
:param str name_with_namespace: In `group / subgroup / project` or `user / project` format.
:param str path_with_namespace: In `group/subgroup/project` or `user/project` format.
"""
pulumi.set(__self__, "http_url_to_repo", http_url_to_repo)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "name_with_namespace", name_with_namespace)
pulumi.set(__self__, "path", path)
pulumi.set(__self__, "path_with_namespace", path_with_namespace)
pulumi.set(__self__, "web_url", web_url)
@property
@pulumi.getter(name="httpUrlToRepo")
def http_url_to_repo(self) -> str:
"""
The HTTP clone URL of the project.
"""
return pulumi.get(self, "http_url_to_repo")
@property
@pulumi.getter
def id(self) -> int:
"""
The ID of the project.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the project.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nameWithNamespace")
def name_with_namespace(self) -> str:
"""
In `group / subgroup / project` or `user / project` format.
"""
return pulumi.get(self, "name_with_namespace")
@property
@pulumi.getter
def path(self) -> str:
return pulumi.get(self, "path")
@property
@pulumi.getter(name="pathWithNamespace")
def path_with_namespace(self) -> str:
"""
In `group/subgroup/project` or `user/project` format.
"""
return pulumi.get(self, "path_with_namespace")
@property
@pulumi.getter(name="webUrl")
def web_url(self) -> str:
return pulumi.get(self, "web_url")
@pulumi.output_type
class GetProjectsProjectNamespaceResult(dict):
def __init__(__self__, *,
full_path: str,
id: int,
kind: str,
name: str,
path: str):
"""
:param int id: The ID of the project.
:param str name: The name of the project.
"""
pulumi.set(__self__, "full_path", full_path)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "kind", kind)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "path", path)
@property
@pulumi.getter(name="fullPath")
def full_path(self) -> str:
return pulumi.get(self, "full_path")
@property
@pulumi.getter
def id(self) -> int:
"""
The ID of the project.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the project.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def path(self) -> str:
return pulumi.get(self, "path")
@pulumi.output_type
class GetProjectsProjectOwnerResult(dict):
def __init__(__self__, *,
avatar_url: str,
id: int,
name: str,
state: str,
username: str,
website_url: str):
"""
:param int id: The ID of the project.
:param str name: The name of the project.
"""
pulumi.set(__self__, "avatar_url", avatar_url)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "state", state)
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "website_url", website_url)
@property
@pulumi.getter(name="avatarUrl")
def avatar_url(self) -> str:
return pulumi.get(self, "avatar_url")
@property
@pulumi.getter
def id(self) -> int:
"""
The ID of the project.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the project.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def state(self) -> str:
return pulumi.get(self, "state")
@property
@pulumi.getter
def username(self) -> str:
return pulumi.get(self, "username")
@property
@pulumi.getter(name="websiteUrl")
def website_url(self) -> str:
return pulumi.get(self, "website_url")
@pulumi.output_type
class GetProjectsProjectPermissionsResult(dict):
def __init__(__self__, *,
group_access: Mapping[str, int],
project_access: Mapping[str, int]):
pulumi.set(__self__, "group_access", group_access)
pulumi.set(__self__, "project_access", project_access)
@property
@pulumi.getter(name="groupAccess")
def group_access(self) -> Mapping[str, int]:
return pulumi.get(self, "group_access")
@property
@pulumi.getter(name="projectAccess")
def project_access(self) -> Mapping[str, int]:
return pulumi.get(self, "project_access")
@pulumi.output_type
class GetProjectsProjectSharedWithGroupResult(dict):
def __init__(__self__, *,
group_access_level: str,
group_id: int,
group_name: str):
"""
:param int group_id: The ID of the group owned by the authenticated user to look projects for within. Cannot be used with `min_access_level`, `with_programming_language` or `statistics`.
"""
pulumi.set(__self__, "group_access_level", group_access_level)
pulumi.set(__self__, "group_id", group_id)
pulumi.set(__self__, "group_name", group_name)
@property
@pulumi.getter(name="groupAccessLevel")
def group_access_level(self) -> str:
return pulumi.get(self, "group_access_level")
@property
@pulumi.getter(name="groupId")
def group_id(self) -> int:
"""
The ID of the group owned by the authenticated user to look projects for within. Cannot be used with `min_access_level`, `with_programming_language` or `statistics`.
"""
return pulumi.get(self, "group_id")
@property
@pulumi.getter(name="groupName")
def group_name(self) -> str:
return pulumi.get(self, "group_name")
@pulumi.output_type
class GetUsersUserResult(dict):
def __init__(__self__, *,
avatar_url: str,
bio: str,
can_create_group: bool,
can_create_project: bool,
color_scheme_id: int,
created_at: str,
current_sign_in_at: str,
email: str,
extern_uid: str,
external: bool,
id: int,
is_admin: bool,
last_sign_in_at: str,
linkedin: str,
location: str,
name: str,
organization: str,
projects_limit: int,
provider: str,
skype: str,
state: str,
theme_id: int,
twitter: str,
two_factor_enabled: bool,
username: str,
website_url: str):
"""
:param str avatar_url: The avatar URL of the user.
:param str bio: The bio of the user.
:param bool can_create_group: Whether the user can create groups.
:param bool can_create_project: Whether the user can create projects.
:param int color_scheme_id: User's color scheme ID.
:param str created_at: Date the user was created at.
:param str current_sign_in_at: Current user's sign-in date.
:param str email: The e-mail address of the user.
:param str extern_uid: Lookup users by external UID. (Requires administrator privileges)
:param bool external: Whether the user is external.
:param int id: The unique id assigned to the user by the gitlab server.
:param bool is_admin: Whether the user is an admin.
:param str last_sign_in_at: Last user's sign-in date.
:param str linkedin: LinkedIn profile of the user.
:param str location: The location of the user.
:param str name: The name of the user.
:param str organization: The organization of the user.
:param int projects_limit: Number of projects the user can create.
:param str provider: The UID provider of the user.
:param str skype: Skype username of the user.
:param str state: Whether the user is active or blocked.
:param int theme_id: User's theme ID.
:param str twitter: Twitter username of the user.
:param bool two_factor_enabled: Whether user's two-factor auth is enabled.
:param str username: The username of the user.
:param str website_url: User's website URL.
"""
pulumi.set(__self__, "avatar_url", avatar_url)
pulumi.set(__self__, "bio", bio)
pulumi.set(__self__, "can_create_group", can_create_group)
pulumi.set(__self__, "can_create_project", can_create_project)
pulumi.set(__self__, "color_scheme_id", color_scheme_id)
pulumi.set(__self__, "created_at", created_at)
pulumi.set(__self__, "current_sign_in_at", current_sign_in_at)
pulumi.set(__self__, "email", email)
pulumi.set(__self__, "extern_uid", extern_uid)
pulumi.set(__self__, "external", external)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "is_admin", is_admin)
pulumi.set(__self__, "last_sign_in_at", last_sign_in_at)
pulumi.set(__self__, "linkedin", linkedin)
pulumi.set(__self__, "location", location)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "organization", organization)
pulumi.set(__self__, "projects_limit", projects_limit)
pulumi.set(__self__, "provider", provider)
pulumi.set(__self__, "skype", skype)
pulumi.set(__self__, "state", state)
pulumi.set(__self__, "theme_id", theme_id)
pulumi.set(__self__, "twitter", twitter)
pulumi.set(__self__, "two_factor_enabled", two_factor_enabled)
pulumi.set(__self__, "username", username)
pulumi.set(__self__, "website_url", website_url)
@property
@pulumi.getter(name="avatarUrl")
def avatar_url(self) -> str:
"""
The avatar URL of the user.
"""
return pulumi.get(self, "avatar_url")
@property
@pulumi.getter
| |
<gh_stars>10-100
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Flex Messaging implementation.
This module contains the message classes used with Flex Data Services.
@see: U{RemoteObject on OSFlash (external)
<http://osflash.org/documentation/amf3#remoteobject>}
@since: 0.1
"""
import uuid
import pyamf.util
from pyamf import amf3
__all__ = [
'RemotingMessage',
'CommandMessage',
'AcknowledgeMessage',
'ErrorMessage',
'AbstractMessage',
'AsyncMessage'
]
NAMESPACE = 'flex.messaging.messages'
SMALL_FLAG_MORE = 0x80
class AbstractMessage(object):
"""
Abstract base class for all Flex messages.
Messages have two customizable sections; headers and data. The headers
property provides access to specialized meta information for a specific
message instance. The data property contains the instance specific data
that needs to be delivered and processed by the decoder.
@see: U{AbstractMessage on Livedocs<http://
help.adobe.com/en_US/FlashPlatform/reference/actionscript/3/mx/
messaging/messages/AbstractMessage.html>}
@ivar body: Specific data that needs to be delivered to the remote
destination.
@type body: C{mixed}
@ivar clientId: Indicates which client sent the message.
@type clientId: C{str}
@ivar destination: Message destination.
@type destination: C{str}
@ivar headers: Message headers. Core header names start with C{DS}.
@type headers: C{dict}
@ivar messageId: Unique Message ID.
@type messageId: C{str}
@ivar timeToLive: How long the message should be considered valid and
deliverable.
@type timeToLive: C{int}
@ivar timestamp: Timestamp when the message was generated.
@type timestamp: C{int}
"""
class __amf__:
amf3 = True
static = (
'body',
'clientId',
'destination',
'headers',
'messageId',
'timestamp',
'timeToLive'
)
#: Each message pushed from the server will contain this header identifying
#: the client that will receive the message.
DESTINATION_CLIENT_ID_HEADER = "DSDstClientId"
#: Messages are tagged with the endpoint id for the channel they are sent
#: over.
ENDPOINT_HEADER = "DSEndpoint"
#: Messages that need to set remote credentials for a destination carry the
#: C{Base64} encoded credentials in this header.
REMOTE_CREDENTIALS_HEADER = "DSRemoteCredentials"
#: The request timeout value is set on outbound messages by services or
#: channels and the value controls how long the responder will wait for an
#: acknowledgement, result or fault response for the message before timing
#: out the request.
REQUEST_TIMEOUT_HEADER = "DSRequestTimeout"
SMALL_ATTRIBUTE_FLAGS = [0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40]
SMALL_ATTRIBUTES = dict(zip(
SMALL_ATTRIBUTE_FLAGS,
__amf__.static
))
SMALL_UUID_FLAGS = [0x01, 0x02]
SMALL_UUIDS = dict(zip(
SMALL_UUID_FLAGS,
['clientId', 'messageId']
))
def __new__(cls, *args, **kwargs):
obj = object.__new__(cls)
obj.__init__(*args, **kwargs)
return obj
def __init__(self, *args, **kwargs):
self.body = kwargs.get('body', None)
self.clientId = kwargs.get('clientId', None)
self.destination = kwargs.get('destination', None)
self.headers = kwargs.get('headers', {})
self.messageId = kwargs.get('messageId', None)
self.timestamp = kwargs.get('timestamp', None)
self.timeToLive = kwargs.get('timeToLive', None)
def __repr__(self):
m = '<%s ' % self.__class__.__name__
for k in self.__dict__:
m += ' %s=%r' % (k, getattr(self, k))
return m + " />"
def decodeSmallAttribute(self, attr, input):
"""
@since: 0.5
"""
obj = input.readObject()
if attr in ['timestamp', 'timeToLive']:
return pyamf.util.get_datetime(obj / 1000.0)
return obj
def encodeSmallAttribute(self, attr):
"""
@since: 0.5
"""
obj = getattr(self, attr)
if not obj:
return obj
if attr in ['timestamp', 'timeToLive']:
return pyamf.util.get_timestamp(obj) * 1000.0
elif attr in ['clientId', 'messageId']:
if isinstance(obj, uuid.UUID):
return None
return obj
def __readamf__(self, input):
flags = read_flags(input)
if len(flags) > 2:
raise pyamf.DecodeError(
'Expected <=2 (got %d) flags for the AbstractMessage portion '
'of the small message for %r' % (
len(flags), self.__class__
)
)
for index, byte in enumerate(flags):
if index == 0:
for flag in self.SMALL_ATTRIBUTE_FLAGS:
if flag & byte:
attr = self.SMALL_ATTRIBUTES[flag]
setattr(
self,
attr,
self.decodeSmallAttribute(attr, input)
)
elif index == 1:
for flag in self.SMALL_UUID_FLAGS:
if flag & byte:
attr = self.SMALL_UUIDS[flag]
setattr(self, attr, decode_uuid(input.readObject()))
def __writeamf__(self, output):
flag_attrs = []
uuid_attrs = []
byte = 0
for flag in self.SMALL_ATTRIBUTE_FLAGS:
value = self.encodeSmallAttribute(self.SMALL_ATTRIBUTES[flag])
if value:
byte |= flag
flag_attrs.append(value)
flags = byte
byte = 0
for flag in self.SMALL_UUID_FLAGS:
attr = self.SMALL_UUIDS[flag]
value = getattr(self, attr)
if not value:
continue
byte |= flag
uuid_attrs.append(amf3.ByteArray(value.bytes))
del attr
if not byte:
output.writeUnsignedByte(flags)
else:
output.writeUnsignedByte(flags | SMALL_FLAG_MORE)
output.writeUnsignedByte(byte)
[output.writeObject(attr) for attr in flag_attrs]
[output.writeObject(attr) for attr in uuid_attrs]
def getSmallMessage(self):
"""
Return a C{ISmallMessage} representation of this object. If one is not
available, C{NotImplementedError} will be raised.
@since: 0.5
@see: U{ISmallMessage on Adobe Help (external)<http://
help.adobe.com/en_US/FlashPlatform/reference/actionscript/3/mx/
messaging/messages/ISmallMessage.html>}
"""
raise NotImplementedError
class AsyncMessage(AbstractMessage):
"""
I am the base class for all asynchronous Flex messages.
@see: U{AsyncMessage on Adobe Help<http://
help.adobe.com/en_US/FlashPlatform/reference/actionscript/3/mx/
messaging/messages/AsyncMessage.html>}
@ivar correlationId: Correlation id of the message.
@type correlationId: C{str}
"""
#: Messages that were sent with a defined subtopic property indicate their
#: target subtopic in this header.
SUBTOPIC_HEADER = "DSSubtopic"
class __amf__:
static = ('correlationId',)
def __init__(self, *args, **kwargs):
AbstractMessage.__init__(self, *args, **kwargs)
self.correlationId = kwargs.get('correlationId', None)
def __readamf__(self, input):
AbstractMessage.__readamf__(self, input)
flags = read_flags(input)
if len(flags) > 1:
raise pyamf.DecodeError(
'Expected <=1 (got %d) flags for the AsyncMessage portion of '
'the small message for %r' % (
len(flags), self.__class__
)
)
byte = flags[0]
if byte & 0x01:
self.correlationId = input.readObject()
if byte & 0x02:
self.correlationId = decode_uuid(input.readObject())
def __writeamf__(self, output):
AbstractMessage.__writeamf__(self, output)
if not isinstance(self.correlationId, uuid.UUID):
output.writeUnsignedByte(0x01)
output.writeObject(self.correlationId)
else:
output.writeUnsignedByte(0x02)
output.writeObject(pyamf.amf3.ByteArray(self.correlationId.bytes))
def getSmallMessage(self):
"""
Return a C{ISmallMessage} representation of this async message.
@since: 0.5
"""
return AsyncMessageExt(**self.__dict__)
class AcknowledgeMessage(AsyncMessage):
"""
I acknowledge the receipt of a message that was sent previously.
Every message sent within the messaging system must receive an
acknowledgement.
@see: U{AcknowledgeMessage on Adobe Help (external)<http://
help.adobe.com/en_US/FlashPlatform/reference/actionscript/3/mx/
messaging/messages/AcknowledgeMessage.html>}
"""
#: Used to indicate that the acknowledgement is for a message that
#: generated an error.
ERROR_HINT_HEADER = "DSErrorHint"
def __readamf__(self, input):
AsyncMessage.__readamf__(self, input)
flags = read_flags(input)
if len(flags) > 1:
raise pyamf.DecodeError(
'Expected <=1 (got %d) flags for the AcknowledgeMessage '
'portion of the small message for %r' % (
len(flags), self.__class__
)
)
def __writeamf__(self, output):
AsyncMessage.__writeamf__(self, output)
output.writeUnsignedByte(0)
def getSmallMessage(self):
"""
Return a C{ISmallMessage} representation of this acknowledge message.
@since: 0.5
"""
return AcknowledgeMessageExt(**self.__dict__)
class CommandMessage(AsyncMessage):
"""
Provides a mechanism for sending commands related to publish/subscribe
messaging, ping, and cluster operations.
@see: U{CommandMessage on Adobe Help (external)<http://
help.adobe.com/en_US/FlashPlatform/reference/actionscript/3/mx/
messaging/messages/CommandMessage.html>}
@ivar operation: The command
@type operation: C{int}
@ivar messageRefType: hmm, not sure about this one.
@type messageRefType: C{str}
"""
#: The server message type for authentication commands.
AUTHENTICATION_MESSAGE_REF_TYPE = (
"flex.messaging.messages.AuthenticationMessage"
)
#: This is used to test connectivity over the current channel to the remote
#: endpoint.
PING_OPERATION = 5
#: This is used by a remote destination to sync missed or cached messages
#: back to a client as a result of a client issued poll command.
SYNC_OPERATION = 4
#: This is used to request a list of failover endpoint URIs for the remote
#: destination based on cluster membership.
CLUSTER_REQUEST_OPERATION = 7
#: This is used to send credentials to the endpoint so that the user can be
#: logged in over the current channel. The credentials need to be C{Base64}
#: encoded and stored in the body of the message.
LOGIN_OPERATION = 8
#: This is used to log the user out of the current channel, and will
#: invalidate the server session if the channel is HTTP based.
LOGOUT_OPERATION = 9
#: This is used to poll a remote destination for pending, undelivered
#: messages.
POLL_OPERATION = 2
#: Subscribe commands issued by a consumer pass the consumer's C{selector}
#: expression in this header.
SELECTOR_HEADER = "DSSelector"
#: This is used to indicate that the client's session with a remote
#: destination has timed out.
SESSION_INVALIDATE_OPERATION = 10
#: This is used to subscribe to a remote destination.
SUBSCRIBE_OPERATION = 0
#: This is the default operation for new L{CommandMessage} instances.
UNKNOWN_OPERATION = 1000
#: This is used to unsubscribe from a remote destination.
UNSUBSCRIBE_OPERATION = 1
#: This operation is used to indicate that a channel has disconnected.
DISCONNECT_OPERATION = 12
class __amf__:
static = ('operation',)
def __init__(self, *args, **kwargs):
AsyncMessage.__init__(self, *args, **kwargs)
self.operation = kwargs.get('operation', None)
def __readamf__(self, input):
AsyncMessage.__readamf__(self, input)
flags = read_flags(input)
if not flags:
return
if len(flags) > 1:
raise pyamf.DecodeError(
'Expected <=1 (got %d) flags for the CommandMessage portion '
'of the small message for %r' % (
len(flags), self.__class__
)
)
byte = flags[0]
if byte & 0x01:
self.operation = input.readObject()
def | |
<reponame>BlackLight/platypush<filename>platypush/backend/button/flic/fliclib/aioflic.py
"""Flic client library for python
Requires python 3.3 or higher.
For detailed documentation, see the protocol documentation.
Notes on the data type used in this python implementation compared to the protocol documentation:
All kind of integers are represented as python integers.
Booleans use the Boolean type.
Enums use the defined python enums below.
Bd addr are represented as standard python strings, e.g. "aa:bb:cc:dd:ee:ff".
"""
import asyncio
from enum import Enum
from collections import namedtuple
import struct
import itertools
import threading
class CreateConnectionChannelError(Enum):
NoError = 0
MaxPendingConnectionsReached = 1
class ConnectionStatus(Enum):
Disconnected = 0
Connected = 1
Ready = 2
class DisconnectReason(Enum):
Unspecified = 0
ConnectionEstablishmentFailed = 1
TimedOut = 2
BondingKeysMismatch = 3
class RemovedReason(Enum):
RemovedByThisClient = 0
ForceDisconnectedByThisClient = 1
ForceDisconnectedByOtherClient = 2
ButtonIsPrivate = 3
VerifyTimeout = 4
InternetBackendError = 5
InvalidData = 6
CouldntLoadDevice = 7
class ClickType(Enum):
ButtonDown = 0
ButtonUp = 1
ButtonClick = 2
ButtonSingleClick = 3
ButtonDoubleClick = 4
ButtonHold = 5
class BdAddrType(Enum):
PublicBdAddrType = 0
RandomBdAddrType = 1
class LatencyMode(Enum):
NormalLatency = 0
LowLatency = 1
HighLatency = 2
class BluetoothControllerState(Enum):
Detached = 0
Resetting = 1
Attached = 2
class ScanWizardResult(Enum):
WizardSuccess = 0
WizardCancelledByUser = 1
WizardFailedTimeout = 2
WizardButtonIsPrivate = 3
WizardBluetoothUnavailable = 4
WizardInternetBackendError = 5
WizardInvalidData = 6
class ButtonScanner:
"""ButtonScanner class.
Usage:
scanner = ButtonScanner()
scanner.on_advertisement_packet = lambda scanner, bd_addr, name, rssi, is_private, already_verified: ...
client.add_scanner(scanner)
"""
_cnt = itertools.count()
def __init__(self):
self._scan_id = next(ButtonScanner._cnt)
self.on_advertisement_packet = lambda scanner, bd_addr, name, rssi, is_private, already_verified: None
class ScanWizard:
"""ScanWizard class
Usage:
wizard = ScanWizard()
wizard.on_found_private_button = lambda scan_wizard: ...
wizard.on_found_public_button = lambda scan_wizard, bd_addr, name: ...
wizard.on_button_connected = lambda scan_wizard, bd_addr, name: ...
wizard.on_completed = lambda scan_wizard, result, bd_addr, name: ...
client.add_scan_wizard(wizard)
"""
_cnt = itertools.count()
def __init__(self):
self._scan_wizard_id = next(ScanWizard._cnt)
self._bd_addr = None
self._name = None
self.on_found_private_button = lambda scan_wizard: None
self.on_found_public_button = lambda scan_wizard, bd_addr, name: None
self.on_button_connected = lambda scan_wizard, bd_addr, name: None
self.on_completed = lambda scan_wizard, result, bd_addr, name: None
class ButtonConnectionChannel:
"""ButtonConnectionChannel class.
This class represents a connection channel to a Flic button.
Add this button connection channel to a FlicClient by executing client.add_connection_channel(connection_channel).
You may only have this connection channel added to one FlicClient at a time.
Before you add the connection channel to the client, you should set up your callback functions by assigning
the corresponding properties to this object with a function. Each callback function has a channel parameter as the first one,
referencing this object.
Available properties and the function parameters are:
on_create_connection_channel_response: channel, error, connection_status
on_removed: channel, removed_reason
on_connection_status_changed: channel, connection_status, disconnect_reason
on_button_up_or_down / on_button_click_or_hold / on_button_single_or_double_click / on_button_single_or_double_click_or_hold: channel, click_type, was_queued, time_diff
"""
_cnt = itertools.count()
def __init__(self, bd_addr, latency_mode=LatencyMode.NormalLatency, auto_disconnect_time=511):
self._conn_id = next(ButtonConnectionChannel._cnt)
self._bd_addr = bd_addr
self._latency_mode = latency_mode
self._auto_disconnect_time = auto_disconnect_time
self._client = None
self.on_create_connection_channel_response = lambda channel, error, connection_status: None
self.on_removed = lambda channel, removed_reason: None
self.on_connection_status_changed = lambda channel, connection_status, disconnect_reason: None
self.on_button_up_or_down = lambda channel, click_type, was_queued, time_diff: None
self.on_button_click_or_hold = lambda channel, click_type, was_queued, time_diff: None
self.on_button_single_or_double_click = lambda channel, click_type, was_queued, time_diff: None
self.on_button_single_or_double_click_or_hold = lambda channel, click_type, was_queued, time_diff: None
@property
def bd_addr(self):
return self._bd_addr
@property
def latency_mode(self):
return self._latency_mode
@latency_mode.setter
def latency_mode(self, latency_mode):
if self._client is None:
self._latency_mode = latency_mode
return
self._latency_mode = latency_mode
if not self._client._closed:
self._client._send_command("CmdChangeModeParameters",
{"conn_id": self._conn_id, "latency_mode": self._latency_mode,
"auto_disconnect_time": self._auto_disconnect_time})
@property
def auto_disconnect_time(self):
return self._auto_disconnect_time
@auto_disconnect_time.setter
def auto_disconnect_time(self, auto_disconnect_time):
if self._client is None:
self._auto_disconnect_time = auto_disconnect_time
return
self._auto_disconnect_time = auto_disconnect_time
if not self._client._closed:
self._client._send_command("CmdChangeModeParameters",
{"conn_id": self._conn_id, "latency_mode": self._latency_mode,
"auto_disconnect_time": self._auto_disconnect_time})
class FlicClient(asyncio.Protocol):
"""FlicClient class.
When this class is constructed, a socket connection is established.
You may then send commands to the server and set timers.
Once you are ready with the initialization you must call the handle_events() method which is a main loop that never exits, unless the socket is closed.
For a more detailed description of all commands, events and enums, check the protocol specification.
All commands are wrapped in more high level functions and events are reported using callback functions.
All methods called on this class will take effect only if you eventually call the handle_events() method.
The ButtonScanner is used to set up a handler for advertisement packets.
The ButtonConnectionChannel is used to interact with connections to flic buttons and receive their events.
Other events are handled by the following callback functions that can be assigned to this object (and a list of the callback function parameters):
on_new_verified_button: bd_addr
on_no_space_for_new_connection: max_concurrently_connected_buttons
on_got_space_for_new_connection: max_concurrently_connected_buttons
on_bluetooth_controller_state_change: state
"""
_EVENTS = [
("EvtAdvertisementPacket", "<I6s17pb??", "scan_id bd_addr name rssi is_private already_verified"),
("EvtCreateConnectionChannelResponse", "<IBB", "conn_id error connection_status"),
("EvtConnectionStatusChanged", "<IBB", "conn_id connection_status disconnect_reason"),
("EvtConnectionChannelRemoved", "<IB", "conn_id removed_reason"),
("EvtButtonUpOrDown", "<IBBI", "conn_id click_type was_queued time_diff"),
("EvtButtonClickOrHold", "<IBBI", "conn_id click_type was_queued time_diff"),
("EvtButtonSingleOrDoubleClick", "<IBBI", "conn_id click_type was_queued time_diff"),
("EvtButtonSingleOrDoubleClickOrHold", "<IBBI", "conn_id click_type was_queued time_diff"),
("EvtNewVerifiedButton", "<6s", "bd_addr"),
("EvtGetInfoResponse", "<B6sBBhBBH",
"bluetooth_controller_state my_bd_addr my_bd_addr_type max_pending_connections max_concurrently_connected_buttons current_pending_connections currently_no_space_for_new_connection nb_verified_buttons"),
("EvtNoSpaceForNewConnection", "<B", "max_concurrently_connected_buttons"),
("EvtGotSpaceForNewConnection", "<B", "max_concurrently_connected_buttons"),
("EvtBluetoothControllerStateChange", "<B", "state"),
("EvtPingResponse", "<I", "ping_id"),
("EvtGetButtonUUIDResponse", "<6s16s", "bd_addr uuid"),
("EvtScanWizardFoundPrivateButton", "<I", "scan_wizard_id"),
("EvtScanWizardFoundPublicButton", "<I6s17p", "scan_wizard_id bd_addr name"),
("EvtScanWizardButtonConnected", "<I", "scan_wizard_id"),
("EvtScanWizardCompleted", "<IB", "scan_wizard_id result")
]
_EVENT_STRUCTS = list(map(lambda x: None if x is None else struct.Struct(x[1]), _EVENTS))
_EVENT_NAMED_TUPLES = list(map(lambda x: None if x is None else namedtuple(x[0], x[2]), _EVENTS))
_COMMANDS = [
("CmdGetInfo", "", ""),
("CmdCreateScanner", "<I", "scan_id"),
("CmdRemoveScanner", "<I", "scan_id"),
("CmdCreateConnectionChannel", "<I6sBh", "conn_id bd_addr latency_mode auto_disconnect_time"),
("CmdRemoveConnectionChannel", "<I", "conn_id"),
("CmdForceDisconnect", "<6s", "bd_addr"),
("CmdChangeModeParameters", "<IBh", "conn_id latency_mode auto_disconnect_time"),
("CmdPing", "<I", "ping_id"),
("CmdGetButtonUUID", "<6s", "bd_addr"),
("CmdCreateScanWizard", "<I", "scan_wizard_id"),
("CmdCancelScanWizard", "<I", "scan_wizard_id")
]
_COMMAND_STRUCTS = list(map(lambda x: struct.Struct(x[1]), _COMMANDS))
_COMMAND_NAMED_TUPLES = list(map(lambda x: namedtuple(x[0], x[2]), _COMMANDS))
_COMMAND_NAME_TO_OPCODE = dict((x[0], i) for i, x in enumerate(_COMMANDS))
@staticmethod
def _bdaddr_bytes_to_string(bdaddr_bytes):
return ":".join(map(lambda x: "%02x" % x, reversed(bdaddr_bytes)))
@staticmethod
def _bdaddr_string_to_bytes(bdaddr_string):
return bytearray.fromhex("".join(reversed(bdaddr_string.split(":"))))
def __init__(self, loop, parent=None):
self.loop = loop
self.buffer = b""
self.transport = None
self.parent = parent
self._scanners = {}
self._scan_wizards = {}
self._connection_channels = {}
self._closed = False
self.on_new_verified_button = lambda bd_addr: None
self.on_no_space_for_new_connection = lambda max_concurrently_connected_buttons: None
self.on_got_space_for_new_connection = lambda max_concurrently_connected_buttons: None
self.on_bluetooth_controller_state_change = lambda state: None
self.on_get_info = lambda items: None
self.on_get_button_uuid = lambda addr, uuid: None
def connection_made(self, transport):
self.transport = transport
if self.parent:
self.parent.register_protocol(self)
def close(self):
"""Closes the client. The handle_events() method will return."""
if self._closed:
return
self._closed = True
def add_scanner(self, scanner):
"""Add a ButtonScanner object.
The scan will start directly once the scanner is added.
"""
if scanner._scan_id in self._scanners:
return
self._scanners[scanner._scan_id] = scanner
self._send_command("CmdCreateScanner", {"scan_id": scanner._scan_id})
def remove_scanner(self, scanner):
"""Remove a ButtonScanner object.
You will no longer receive advertisement packets.
"""
if scanner._scan_id not in self._scanners:
return
del self._scanners[scanner._scan_id]
self._send_command("CmdRemoveScanner", {"scan_id": scanner._scan_id})
def add_scan_wizard(self, scan_wizard):
"""Add a ScanWizard object.
The scan wizard will start directly once the scan wizard is added.
"""
if scan_wizard._scan_wizard_id in self._scan_wizards:
return
self._scan_wizards[scan_wizard._scan_wizard_id] = scan_wizard
self._send_command("CmdCreateScanWizard", {"scan_wizard_id": scan_wizard._scan_wizard_id})
def cancel_scan_wizard(self, scan_wizard):
"""Cancel a ScanWizard.
Note: The effect of this command will take place at the time the on_completed event arrives on the scan wizard object.
If cancelled due to this command, "result" in the on_completed event will be "WizardCancelledByUser".
"""
if scan_wizard._scan_wizard_id not in self._scan_wizards:
return
self._send_command("CmdCancelScanWizard", {"scan_wizard_id": scan_wizard._scan_wizard_id})
def add_connection_channel(self, channel):
"""Adds a connection channel to a specific Flic button.
This will start listening for a specific Flic button's connection and button events.
Make sure the Flic is either in public mode (by holding it down for 7 seconds) or already verified before calling this method.
The on_create_connection_channel_response callback property will be called on the
connection channel after this command has been received by the server.
You may have as many connection channels as you wish for a specific Flic Button.
"""
if channel._conn_id in self._connection_channels:
return
channel._client = self
self._connection_channels[channel._conn_id] = channel
self._send_command("CmdCreateConnectionChannel", {"conn_id": channel._conn_id, "bd_addr": channel.bd_addr,
"latency_mode": channel._latency_mode,
"auto_disconnect_time": channel._auto_disconnect_time})
def remove_connection_channel(self, channel):
"""Remove a connection channel.
This will stop listening for new events for a specific connection channel that has previously been added.
Note: The effect of this command will take place at the time the on_removed event arrives on the connection channel object.
"""
if | |
<filename>orix/tests/quaternion/test_symmetry.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright 2018-2022 the orix developers
#
# This file is part of orix.
#
# orix is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# orix is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with orix. If not, see <http://www.gnu.org/licenses/>.
from copy import deepcopy
from diffpy.structure.spacegroups import GetSpaceGroup
from matplotlib import pyplot as plt
import numpy as np
import pytest
from orix.quaternion import Rotation, Symmetry, get_point_group
# fmt: off
# isort: off
from orix.quaternion.symmetry import (
C1, Ci, # triclinic
C2x, C2y, C2z, Csx, Csy, Csz, Cs, C2, C2h, # monoclinic
D2, C2v, D2h, # orthorhombic
C4, S4, C4h, D4, C4v, D2d, D4h, # tetragonal
C3, S6, D3x, D3y, D3, C3v, D3d, # trigonal
C6, C3h, C6h, D6, C6v, D3h, D6h, # hexagonal
T, Th, O, Td, Oh, # cubic
spacegroup2pointgroup_dict, _groups, _get_unique_symmetry_elements
)
# isort: on
# fmt: on
from orix.vector import Vector3d
@pytest.fixture(params=[(1, 2, 3)])
def vector(request):
return Vector3d(request.param)
@pytest.fixture(params=_groups)
def all_symmetries(request):
return request.param
@pytest.mark.parametrize(
"symmetry, vector, expected",
[
(Ci, (1, 2, 3), [(1, 2, 3), (-1, -2, -3)]),
(Csx, (1, 2, 3), [(1, 2, 3), (-1, 2, 3)]),
(Csy, (1, 2, 3), [(1, 2, 3), (1, -2, 3)]),
(Csz, (1, 2, 3), [(1, 2, 3), (1, 2, -3)]),
(C2, (1, 2, 3), [(1, 2, 3), (-1, -2, 3)]),
(
C2v,
(1, 2, 3),
[
(1, 2, 3),
(1, -2, 3),
(1, -2, -3),
(1, 2, -3),
],
),
(
C4v,
(1, 2, 3),
[
(1, 2, 3),
(-2, 1, 3),
(-1, -2, 3),
(2, -1, 3),
(-1, 2, 3),
(2, 1, 3),
(-2, -1, 3),
(1, -2, 3),
],
),
(
D4,
(1, 2, 3),
[
(1, 2, 3),
(-2, 1, 3),
(-1, -2, 3),
(2, -1, 3),
(-1, 2, -3),
(2, 1, -3),
(-2, -1, -3),
(1, -2, -3),
],
),
(
C6,
(1, 2, 3),
[
(1, 2, 3),
(-1.232, 1.866, 3),
(-2.232, -0.134, 3),
(-1, -2, 3),
(1.232, -1.866, 3),
(2.232, 0.134, 3),
],
),
(
Td,
(1, 2, 3),
[
(1, 2, 3),
(3, 1, 2),
(2, 3, 1),
(-2, -1, 3),
(3, -2, -1),
(-1, 3, -2),
(2, -1, -3),
(-3, 2, -1),
(-1, -3, 2),
(1, -2, -3),
(-3, 1, -2),
(-2, -3, 1),
(-1, -2, 3),
(3, -1, -2),
(-2, 3, -1),
(2, 1, 3),
(3, 2, 1),
(1, 3, 2),
(-2, 1, -3),
(-3, -2, 1),
(1, -3, -2),
(-1, 2, -3),
(-3, -1, 2),
(2, -3, -1),
],
),
(
Oh,
(1, 2, 3),
[
(1, 2, 3),
(3, 1, 2),
(2, 3, 1),
(2, 1, -3),
(-3, 2, 1),
(1, -3, 2),
(-2, 1, 3),
(3, -2, 1),
(1, 3, -2),
(1, -2, -3),
(-3, 1, -2),
(-2, -3, 1),
(-1, -2, 3),
(3, -1, -2),
(-2, 3, -1),
(-2, -1, -3),
(-3, -2, -1),
(-1, -3, -2),
(2, -1, 3),
(3, 2, -1),
(-1, 3, 2),
(-1, 2, -3),
(-3, -1, 2),
(2, -3, -1),
(-1, -2, -3),
(-3, -1, -2),
(-2, -3, -1),
(-2, -1, 3),
(3, -2, -1),
(-1, 3, -2),
(2, -1, -3),
(-3, 2, -1),
(-1, -3, 2),
(-1, 2, 3),
(3, -1, 2),
(2, 3, -1),
(1, 2, -3),
(-3, 1, 2),
(2, -3, 1),
(2, 1, 3),
(3, 2, 1),
(1, 3, 2),
(-2, 1, -3),
(-3, -2, 1),
(1, -3, -2),
(1, -2, 3),
(3, 1, -2),
(-2, 3, 1),
],
),
],
indirect=["vector"],
)
def test_symmetry(symmetry, vector, expected):
vector_calculated = [
tuple(v.round(3)) for v in symmetry.outer(vector).unique().data
]
assert set(vector_calculated) == set(expected)
def test_same_symmetry_unique(all_symmetries):
# test unique symmetry elements between two identical symmetries
# are the symmetry itself
symmetry = all_symmetries
u = symmetry.outer(symmetry).unique()
assert u.size == symmetry.size
delta = (symmetry * ~u).angle
assert np.allclose(delta, 0)
assert np.allclose(u.data, symmetry.data)
def test_get_unique_symmetry_elements_symmetry_first_arg(all_symmetries):
sym = all_symmetries
assert sym in sym.subgroups
result1 = []
result2 = []
for sg in sym.subgroups:
# if 2nd arg is a subgroup of 1st arg then unique will be same
# as symmetry
u1 = _get_unique_symmetry_elements(sym, sg, check_subgroups=True)
result1.append(u1)
# explicit computation of sym1.outer(sym2).unique()
u2 = _get_unique_symmetry_elements(sym, sg, check_subgroups=False)
result2.append(u2)
# in this case sym is explicitly returned by function
assert all(s == sym for s in result1)
# in this case sym is explicitly calculated by function
assert all(s == sym for s in result2)
@pytest.mark.parametrize("symmetry", [C4, C4h, S4, D6, Th, O, Oh])
def test_get_unique_symmetry_elements_subgroup_first_arg(symmetry):
sizes = []
result = []
for sg in symmetry.subgroups:
u = _get_unique_symmetry_elements(sg, symmetry, check_subgroups=False)
sizes.append(u.size == symmetry.size)
result.append(u == symmetry)
# sizes are the same
assert all(sizes)
# data is not the same as symmetry for all subgroups, order matters
assert not all(result)
@pytest.mark.parametrize(
"symmetry, expected",
[(C2h, 4), (C6, 6), (D6h, 24), (T, 12), (Td, 24), (Oh, 48), (O, 24)],
)
def test_order(symmetry, expected):
assert symmetry.order == expected
@pytest.mark.parametrize(
"symmetry, expected",
[
(D2d, False),
(C4, True),
(C6v, False),
(O, True),
],
)
def test_is_proper(symmetry, expected):
assert symmetry.is_proper == expected
@pytest.mark.parametrize(
"symmetry, expected",
[
(C1, [C1]),
(D2, [C1, C2x, C2y, C2z, D2]),
(C6v, [C1, Csx, Csy, C2z, C3, C3v, C6, C6v]),
],
)
def test_subgroups(symmetry, expected):
print(len(symmetry.subgroups))
assert set(symmetry.subgroups) == set(expected)
@pytest.mark.parametrize(
"symmetry, expected",
[
(C1, [C1]),
(D2, [C1, C2x, C2y, C2z, D2]),
(C6v, [C1, C2z, C3, C6]),
],
)
def test_proper_subgroups(symmetry, expected):
assert set(symmetry.proper_subgroups) == set(expected)
@pytest.mark.parametrize(
"symmetry, expected",
[
(C1, C1),
(Ci, C1),
(C2, C2),
(Cs, C1),
(C2h, C2),
(D2, D2),
(C2v, C2x),
(C4, C4),
(C4h, C4),
(C3h, C3),
(C6v, C6),
(D3h, D3y),
(T, T),
(Td, T),
(Oh, O),
],
)
def test_proper_subgroup(symmetry, expected):
assert symmetry.proper_subgroup._tuples == expected._tuples
@pytest.mark.parametrize(
"symmetry, expected",
[
(C1, Ci),
(Ci, Ci),
(C2, C2h),
(C2h, C2h),
(C4, C4h),
(C4h, C4h),
(D4, D4h),
(D4h, D4h),
(C6v, D6h),
(D6h, D6h),
(T, Th),
(Td, Oh),
],
)
def test_laue(symmetry, expected):
assert symmetry.laue._tuples == expected._tuples
def test_is_laue():
laue_groups = [Ci, C2h, D2h, C4h, D4h, S6, D3d, C6h, D6h, Th, Oh]
assert all(i.contains_inversion for i in laue_groups)
@pytest.mark.parametrize(
"symmetry, expected",
[
(Cs, C2),
(C4v, D4),
(Th, T),
(Td, O),
(O, O),
(Oh, O),
],
)
def test_proper_inversion_subgroup(symmetry, expected):
assert symmetry.laue_proper_subgroup._tuples == expected._tuples
@pytest.mark.parametrize(
"symmetry, expected",
[
(C1, False),
(Ci, True),
(Cs, False),
(C2, False),
(C2h, True),
(D4, False),
(D2d, False),
(D3d, True),
(C6, False),
(C3h, False),
(Td, False),
(Oh, True),
],
)
def test_contains_inversion(symmetry, expected):
assert symmetry.contains_inversion == expected
@pytest.mark.parametrize(
"symmetry, other, expected",
[
(D2, C1, [C1]),
(C1, C1, [C1]),
(D2, C2, [C1, C2z]),
(C4, S4, [C1, C2z]),
],
)
def test_and(symmetry, other, expected):
overlap = symmetry & other
expected = Symmetry.from_generators(*expected)
assert overlap._tuples == expected._tuples
@pytest.mark.parametrize(
"symmetry, other, expected",
[
(C1, C1, True),
(C1, C2, False),
],
)
def test_eq(symmetry, other, expected):
assert (symmetry == other) == expected
@pytest.mark.parametrize(
"symmetry, expected",
[
(C1, np.zeros((0, 3))),
(C2, [0, 1, 0]),
(D2, [[0, 1, 0], [0, 0, 1]]),
(C4, [[0, 1, 0], [1, 0, 0]]),
(
T,
[
[0.5**0.5, -(0.5**0.5), 0],
[0, -(0.5**0.5), 0.5**0.5],
[0, 0.5**0.5, 0.5**0.5],
[0.5**0.5, 0.5**0.5, 0],
],
),
],
)
def test_fundamental_zone(symmetry, expected):
fz = symmetry.fundamental_zone()
assert np.allclose(fz.data, expected)
def test_no_symm_fundamental_zone():
nosym = Symmetry.from_generators(Rotation([1, 0, 0, 0]))
assert nosym.fundamental_zone().size == 0
def test_get_point_group():
"""Makes sure all the ints from 1 to 230 give answers."""
for sg_number in np.arange(1, 231):
proper_pg = get_point_group(sg_number, proper=True)
assert proper_pg in [C1, C2, C3, C4, C6, D2, D3, D4, D6, O, T]
sg = GetSpaceGroup(sg_number)
pg = get_point_group(sg_number, proper=False)
assert proper_pg == spacegroup2pointgroup_dict[sg.point_group_name]["proper"]
assert pg == spacegroup2pointgroup_dict[sg.point_group_name]["improper"]
def test_unique_symmetry_elements_subgroups(all_symmetries):
# test that the unique symmetry elements between a symmetry and its
# subgroups are the original symmetry
sym = all_symmetries
for sg in sym.subgroups:
# outer of symmetry with its subgroups
u = sym.outer(sg).unique()
# assert that unique is same size as main symmetry
assert u.size == sym.size
# check that there is no difference between unique
# and main symmetry
assert np.allclose((sym * ~u).angle, 0)
def test_two_symmetries_are_not_in_each_others_subgroup(all_symmetries):
# if given | |
#!/usr/bin/env python
from __future__ import print_function
import re
import shutil
import unittest
from ruffus.combinatorics import *
from ruffus.ruffus_utility import RUFFUS_HISTORY_FILE, CHECKSUM_FILE_TIMESTAMPS
from ruffus.ruffus_exceptions import RethrownJobError
from ruffus import pipeline_run, pipeline_printout, Pipeline, formatter, originate, follows, merge
import ruffus
import sys
"""
test_combinatorics.py
test product, combine, permute, combine_with_replacement
"""
import os
tempdir = os.path.relpath(os.path.abspath(os.path.splitext(__file__)[0]))
# add grandparent to search path for testing
grandparent_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, grandparent_dir)
# module name = script name without extension
module_name = os.path.splitext(os.path.basename(__file__))[0]
# funky code to import by file name
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
try:
from StringIO import StringIO
except:
from io import StringIO
# sub-1s resolution in system?
one_second_per_job = None
def touch(filename):
with open(filename, "w"):
pass
# ___________________________________________________________________________
#
# generate_initial_files1
# ___________________________________________________________________________
@originate(output=[tempdir + "/" + prefix + "_name.tmp1" for prefix in "abcd"])
def generate_initial_files1(out_name):
with open(out_name, 'w') as outfile:
pass
# ___________________________________________________________________________
#
# generate_initial_files1
# ___________________________________________________________________________
@originate([tempdir + "/e_name.tmp1", tempdir + "/f_name.tmp1"])
def generate_initial_files2(out_name):
with open(out_name, 'w') as outfile:
pass
# ___________________________________________________________________________
#
# generate_initial_files1
# ___________________________________________________________________________
@originate([tempdir + "/g_name.tmp1", tempdir + "/h_name.tmp1"])
def generate_initial_files3(out_name):
with open(out_name, 'w') as outfile:
pass
# ___________________________________________________________________________
#
# check_product_task
# ___________________________________________________________________________
@follows(generate_initial_files1)
@product(
[tempdir + "/" + prefix + "_name.tmp1" for prefix in "abcd"],
formatter(".*/(?P<FILE_PART>.+).tmp1$"),
generate_initial_files2,
formatter(),
"{path[0][0]}/{FILE_PART[0][0]}.{basename[1][0]}.{basename[2][0]}.tmp2",
input3=generate_initial_files3,
filter3=formatter(r"tmp1$"),
extras=["{basename[0][0][0]}{basename[1][0][0]}{basename[2][0][0]}", # extra: prefices only (abcd etc)
# extra: path for 2nd input, 1st file
"{subpath[0][0][0]}",
"{subdir[0][0][0]}"])
def check_product_task(infiles, outfile,
prefices,
subpath,
subdir):
with open(outfile, "w") as p:
p.write(prefices + ",")
# ___________________________________________________________________________
#
# check_product_merged_task
# ___________________________________________________________________________
@merge(check_product_task, tempdir + "/merged.results")
def check_product_merged_task(infiles, outfile):
with open(outfile, "w") as p:
for infile in sorted(infiles):
with open(infile) as ii:
p.write(ii.read())
# ___________________________________________________________________________
#
# check_product_misspelt_capture_error_task
# ___________________________________________________________________________
@product(
generate_initial_files1,
formatter(".*/(?P<FILE_PART>.+).tmp1$"),
"{path[0][0]}/{FILEPART[0][0]}.tmp2")
def check_product_misspelt_capture_error_task(infiles, outfile):
"""
FILE_PART mispelt as FILE_PART
"""
with open(outfile, "w") as p:
pass
# ___________________________________________________________________________
#
# check_product_out_of_range_formatter_ref_error_task
# ___________________________________________________________________________
@product(
generate_initial_files1,
formatter(".*/(?P<FILE_PART>.+).tmp1$"),
"{path[2][0]}/{basename[0][0]}.tmp2",
"{FILE_PART[0][0]}")
def check_product_out_of_range_formatter_ref_error_task(infiles, outfile, ignored_filter):
"""
{path[2][0]} when len(path) == 1
"""
with open(outfile, "w") as p:
pass
# ___________________________________________________________________________
#
# check_product_formatter_ref_index_error_task
# ___________________________________________________________________________
@product(
generate_initial_files1,
formatter(".*/(?P<FILE_PART>.+).tmp1$"),
"{path[0][0][1000]}/{basename[0][0]}.tmp2",
"{FILE_PART[0][0]}")
def check_product_formatter_ref_index_error_task(infiles, outfile, ignored_filter):
"""
{path[0][0][1000} when len of the path string len(path[0][0]) < 1000
"""
with open(outfile, "w") as p:
pass
# ___________________________________________________________________________
#
# check_combinations2_task
# ___________________________________________________________________________
@combinations(
generate_initial_files1,
formatter(".*/(?P<FILE_PART>.+).tmp1$"),
2,
"{path[0][0]}/{FILE_PART[0][0]}.{basename[1][0]}.tmp2",
"{basename[0][0][0]}{basename[1][0][0]}", # extra: prefices
"{subpath[0][0][0]}", # extra: path for 2nd input, 1st file
"{subdir[0][0][0]}")
def check_combinations2_task(infiles, outfile,
prefices,
subpath,
subdir):
"""
Test combinations with k-tuple = 2
"""
with open(outfile, "w") as outf:
outf.write(prefices + ",")
@merge(check_combinations2_task, tempdir + "/merged.results")
def check_combinations2_merged_task(infiles, outfile):
with open(outfile, "w") as p:
for infile in sorted(infiles):
with open(infile) as ii:
p.write(ii.read())
# ___________________________________________________________________________
#
# check_combinations3_task
# ___________________________________________________________________________
@combinations(
generate_initial_files1,
formatter(".*/(?P<FILE_PART>.+).tmp1$"),
3,
"{path[0][0]}/{FILE_PART[0][0]}.{basename[1][0]}.{basename[2][0]}.tmp2",
# extra: prefices
"{basename[0][0][0]}{basename[1][0][0]}{basename[2][0][0]}",
"{subpath[0][0][0]}", # extra: path for 2nd input, 1st file
"{subdir[0][0][0]}")
def check_combinations3_task(infiles, outfile,
prefices,
subpath,
subdir):
"""
Test combinations with k-tuple = 3
"""
with open(outfile, "w") as outf:
outf.write(prefices + ",")
@merge(check_combinations3_task, tempdir + "/merged.results")
def check_combinations3_merged_task(infiles, outfile):
with open(outfile, "w") as p:
for infile in sorted(infiles):
with open(infile) as ii:
p.write(ii.read())
# ___________________________________________________________________________
#
# check_permutations2_task
# ___________________________________________________________________________
@permutations(
generate_initial_files1,
formatter(".*/(?P<FILE_PART>.+).tmp1$"),
2,
"{path[0][0]}/{FILE_PART[0][0]}.{basename[1][0]}.tmp2",
"{basename[0][0][0]}{basename[1][0][0]}", # extra: prefices
"{subpath[0][0][0]}", # extra: path for 2nd input, 1st file
"{subdir[0][0][0]}")
def check_permutations2_task(infiles, outfile,
prefices,
subpath,
subdir):
"""
Test permutations with k-tuple = 2
"""
with open(outfile, "w") as outf:
outf.write(prefices + ",")
@merge(check_permutations2_task, tempdir + "/merged.results")
def check_permutations2_merged_task(infiles, outfile):
with open(outfile, "w") as p:
for infile in sorted(infiles):
with open(infile) as ii:
p.write(ii.read())
# ___________________________________________________________________________
#
# check_permutations3_task
# ___________________________________________________________________________
@permutations(
generate_initial_files1,
formatter(".*/(?P<FILE_PART>.+).tmp1$"),
3,
"{path[0][0]}/{FILE_PART[0][0]}.{basename[1][0]}.{basename[2][0]}.tmp2",
# extra: prefices
"{basename[0][0][0]}{basename[1][0][0]}{basename[2][0][0]}",
"{subpath[0][0][0]}", # extra: path for 2nd input, 1st file
"{subdir[0][0][0]}")
def check_permutations3_task(infiles, outfile,
prefices,
subpath,
subdir):
"""
Test permutations with k-tuple = 3
"""
with open(outfile, "w") as outf:
outf.write(prefices + ",")
@merge(check_permutations3_task, tempdir + "/merged.results")
def check_permutations3_merged_task(infiles, outfile):
with open(outfile, "w") as p:
for infile in sorted(infiles):
with open(infile) as ii:
p.write(ii.read())
# ___________________________________________________________________________
#
# check_combinations_with_replacement2_task
# ___________________________________________________________________________
@combinations_with_replacement(
input=generate_initial_files1,
filter=formatter(".*/(?P<FILE_PART>.+).tmp1$"),
tuple_size=2,
output="{path[0][0]}/{FILE_PART[0][0]}.{basename[1][0]}.tmp2",
extras=["{basename[0][0][0]}{basename[1][0][0]}", # extra: prefices
"{subpath[0][0][0]}", # extra: path for 2nd input, 1st file
"{subdir[0][0][0]}"])
def check_combinations_with_replacement2_task(infiles, outfile,
prefices,
subpath,
subdir):
"""
Test combinations_with_replacement with k-tuple = 2
"""
with open(outfile, "w") as outf:
outf.write(prefices + ",")
@merge(check_combinations_with_replacement2_task, tempdir + "/merged.results")
def check_combinations_with_replacement2_merged_task(infiles, outfile):
with open(outfile, "w") as p:
for infile in sorted(infiles):
with open(infile) as ii:
p.write(ii.read())
# ___________________________________________________________________________
#
# check_combinations_with_replacement3_task
# ___________________________________________________________________________
@combinations_with_replacement(
generate_initial_files1,
formatter(".*/(?P<FILE_PART>.+).tmp1$"),
3,
"{path[0][0]}/{FILE_PART[0][0]}.{basename[1][0]}.{basename[2][0]}.tmp2",
# extra: prefices
"{basename[0][0][0]}{basename[1][0][0]}{basename[2][0][0]}",
"{subpath[0][0][0]}", # extra: path for 2nd input, 1st file
"{subdir[0][0][0]}")
def check_combinations_with_replacement3_task(infiles, outfile,
prefices,
subpath,
subdir):
"""
Test combinations_with_replacement with k-tuple = 3
"""
with open(outfile, "w") as outf:
outf.write(prefices + ",")
@merge(check_combinations_with_replacement3_task, tempdir + "/merged.results")
def check_combinations_with_replacement3_merged_task(infiles, outfile):
with open(outfile, "w") as p:
for infile in sorted(infiles):
with open(infile) as ii:
p.write(ii.read())
def cleanup_tmpdir():
os.system('rm -f %s %s' %
(os.path.join(tempdir, '*'), RUFFUS_HISTORY_FILE))
class TestCombinatorics(unittest.TestCase):
def setUp(self):
try:
os.mkdir(tempdir)
except OSError:
pass
# ___________________________________________________________________________
#
# test product() pipeline_printout and pipeline_run
# ___________________________________________________________________________
def test_product_printout(self):
"""Input file exists, output doesn't exist"""
cleanup_tmpdir()
s = StringIO()
pipeline_printout(s, [check_product_merged_task],
verbose=5, wrap_width=10000, pipeline="main")
self.assertTrue(re.search('Job needs update:.*Missing files.*'
'\[.*{tempdir}/a_name.tmp1, '
'.*{tempdir}/e_name.tmp1, '
'.*{tempdir}/h_name.tmp1, '
'.*{tempdir}/a_name.e_name.h_name.tmp2\]'.format(tempdir=tempdir), s.getvalue(), re.DOTALL))
def test_product_run(self):
"""Run product"""
# output is up to date, but function body changed (e.g., source different)
cleanup_tmpdir()
pipeline_run([check_product_merged_task], verbose=0, multiprocess=100,
one_second_per_job=one_second_per_job, pipeline="main")
with open(tempdir + "/merged.results") as oo:
self.assertEqual(oo.read(),
"aeg,aeh,afg,afh,beg,beh,bfg,bfh,ceg,ceh,cfg,cfh,deg,deh,dfg,dfh,")
# ___________________________________________________________________________
#
# test product() pipeline_printout diagnostic error messsages
#
# require verbose >= 3 or an empty jobs list
# ___________________________________________________________________________
def test_product_misspelt_capture_error(self):
"""Misspelt named capture group
Requires verbose >= 3 or an empty jobs list
"""
cleanup_tmpdir()
s = StringIO()
pipeline_printout(s, [check_product_misspelt_capture_error_task],
verbose=3, wrap_width=10000, pipeline="main")
self.assertIn("Warning: Input substitution failed:", s.getvalue())
self.assertIn("Unmatched field {FILEPART}", s.getvalue())
def test_product_out_of_range_formatter_ref_error(self):
"""
{path[2][0]} when len(path) == 1
Requires verbose >= 3 or an empty jobs list
"""
cleanup_tmpdir()
s = StringIO()
pipeline_printout(s, [check_product_out_of_range_formatter_ref_error_task],
verbose=3, wrap_width=10000, pipeline="main")
self.assertIn("Warning: Input substitution failed:", s.getvalue())
self.assertIn("Unmatched field {2}", s.getvalue())
def test_product_formatter_ref_index_error(self):
"""
{path[0][0][1000} when len of the path string len(path[0][0]) < 1000
Requires verbose >= 3 or an empty jobs list
"""
cleanup_tmpdir()
s = StringIO()
pipeline_printout(s, [check_product_formatter_ref_index_error_task],
verbose=3, wrap_width=10000, pipeline="main")
self.assertIn("Warning: Input substitution failed:", s.getvalue())
self.assertIn(
"Unmatched field {string index out of range}", s.getvalue())
# print s.getvalue()
# ___________________________________________________________________________
#
# test combinations() pipeline_printout and pipeline_run
# ___________________________________________________________________________
def test_combinations2_printout(self):
"""Input file exists, output doesn't exist"""
cleanup_tmpdir()
s = StringIO()
pipeline_printout(s, [check_combinations2_merged_task],
verbose=5, wrap_width=10000, pipeline="main")
self.assertTrue(re.search('Job needs update:.*Missing files.*'
'\[.*{tempdir}/a_name.tmp1, '
'.*{tempdir}/b_name.tmp1, '
'.*{tempdir}/a_name.b_name.tmp2\]'.format(tempdir=tempdir), s.getvalue(), re.DOTALL))
def test_combinations2_run(self):
"""Run product"""
# output is up to date, but function body changed (e.g., source different)
cleanup_tmpdir()
pipeline_run([check_combinations2_merged_task], verbose=0, multiprocess=100,
one_second_per_job=one_second_per_job, pipeline="main")
with open(tempdir + "/merged.results") as oo:
self.assertEqual(oo.read(),
'ab,ac,ad,bc,bd,cd,')
# ___________________________________________________________________________
#
# test combinations() pipeline_printout and pipeline_run
# ___________________________________________________________________________
def test_combinations3_printout(self):
"""Input file exists, output doesn't exist"""
cleanup_tmpdir()
s = StringIO()
pipeline_printout(s, [check_combinations3_merged_task],
verbose=5, wrap_width=10000, pipeline="main")
self.assertTrue(re.search(
'\[.*{tempdir}/a_name.tmp1, '
'.*{tempdir}/b_name.tmp1, '
'.*{tempdir}/c_name.tmp1, '
'.*{tempdir}/a_name.b_name.c_name.tmp2\]'.format(tempdir=tempdir), s.getvalue()))
def test_combinations3_run(self):
"""Run product"""
# output is up to date, but function body changed (e.g., source different)
cleanup_tmpdir()
pipeline_run([check_combinations3_merged_task], verbose=0, multiprocess=100,
one_second_per_job=one_second_per_job, pipeline="main")
with open(tempdir + "/merged.results") as oo:
self.assertEqual(oo.read(),
"abc,abd,acd,bcd,")
# ___________________________________________________________________________
#
# test permutations() pipeline_printout and pipeline_run
# ___________________________________________________________________________
def test_permutations2_printout(self):
"""Input file exists, output doesn't exist"""
cleanup_tmpdir()
s = StringIO()
pipeline_printout(s, [check_permutations2_merged_task],
verbose=5, wrap_width=10000, pipeline="main")
self.assertTrue(re.search('\[.*{tempdir}/a_name.tmp1, '
'.*{tempdir}/b_name.tmp1, '
'.*{tempdir}/a_name.b_name.tmp2\]'.format(tempdir=tempdir), s.getvalue()))
def test_permutations2_run(self):
"""Run product"""
# output is up to date, but function body changed (e.g., source different)
cleanup_tmpdir()
pipeline_run([check_permutations2_merged_task], verbose=0, multiprocess=100,
one_second_per_job=one_second_per_job, pipeline="main")
with open(tempdir + "/merged.results") as oo:
self.assertEqual(oo.read(),
"ab,ac,ad,ba,bc,bd,ca,cb,cd,da,db,dc,")
# ___________________________________________________________________________
#
# test permutations() pipeline_printout and pipeline_run
# ___________________________________________________________________________
def test_permutations3_printout(self):
"""Input file exists, output doesn't exist"""
cleanup_tmpdir()
s = StringIO()
pipeline_printout(s, [check_permutations3_merged_task],
verbose=5, wrap_width=10000, pipeline="main")
self.assertTrue(re.search('\[.*{tempdir}/a_name.tmp1, '
'.*{tempdir}/b_name.tmp1, '
'.*{tempdir}/c_name.tmp1, '
'.*{tempdir}/a_name.b_name.c_name.tmp2\]'.format(tempdir=tempdir), s.getvalue()))
def test_permutations3_run(self):
"""Run product"""
# output is up to date, but function body changed (e.g., source different)
cleanup_tmpdir()
pipeline_run([check_permutations3_merged_task], verbose=0, multiprocess=100,
one_second_per_job=one_second_per_job, pipeline="main")
with open(tempdir + "/merged.results") as oo:
self.assertEqual(oo.read(),
'abc,abd,acb,acd,adb,adc,bac,bad,bca,bcd,bda,bdc,cab,cad,cba,cbd,cda,cdb,dab,dac,dba,dbc,dca,dcb,')
# ___________________________________________________________________________
#
# test combinations_with_replacement() pipeline_printout and pipeline_run
# ___________________________________________________________________________
def test_combinations_with_replacement2_printout(self):
"""Input file exists, output doesn't exist"""
cleanup_tmpdir()
s = StringIO()
pipeline_printout(s, [check_combinations_with_replacement2_merged_task],
verbose=5, wrap_width=10000, pipeline="main")
self.assertTrue(re.search('\[.*{tempdir}/a_name.tmp1, '
'.*{tempdir}/b_name.tmp1, '
'.*{tempdir}/a_name.b_name.tmp2\]'.format(tempdir=tempdir), s.getvalue()))
def test_combinations_with_replacement2_run(self):
"""Run product"""
# output is up to date, but function body changed (e.g., source different)
cleanup_tmpdir()
pipeline_run([check_combinations_with_replacement2_merged_task], verbose=0,
multiprocess=100, one_second_per_job=one_second_per_job, pipeline="main")
with open(tempdir + "/merged.results") as oo:
self.assertEqual(oo.read(),
"aa,ab,ac,ad,bb,bc,bd,cc,cd,dd,")
# ___________________________________________________________________________
#
# test combinations_with_replacement() pipeline_printout and pipeline_run
# ___________________________________________________________________________
def test_combinations_with_replacement3_printout(self):
"""Input file exists, output doesn't exist"""
cleanup_tmpdir()
s = StringIO()
pipeline_printout(s, [check_combinations_with_replacement3_merged_task],
verbose=5, wrap_width=10000, pipeline="main")
self.assertTrue(re.search('\[.*{tempdir}/a_name.tmp1, '
'.*{tempdir}/b_name.tmp1, '
'.*{tempdir}/c_name.tmp1, '
'.*{tempdir}/a_name.b_name.c_name.tmp2\]'.format(tempdir=tempdir), s.getvalue()))
def test_combinations_with_replacement3_run(self):
"""Run product"""
# output is up to date, but function body changed (e.g., source different)
cleanup_tmpdir()
pipeline_run([check_combinations_with_replacement3_merged_task], verbose=0,
multiprocess=100, one_second_per_job=one_second_per_job, pipeline="main")
with open(tempdir + "/merged.results") as oo:
self.assertEqual(oo.read(),
| |
from typing import List
from summer.compute import ComputedValueProcessor
from autumn.models.covid_19.constants import (
INFECT_DEATH, INFECTION, Compartment, NOTIFICATIONS, HISTORY_STRATA, INFECTION_DEATHS,
COMPARTMENTS, Vaccination, PROGRESS, Clinical, History, Tracing, NOTIFICATION_CLINICAL_STRATA,
HOSTPIALISED_CLINICAL_STRATA,
)
from autumn.models.covid_19.parameters import Sojourn, VaccinationRisk
from autumn.settings import COVID_BASE_AGEGROUPS
from autumn.models.covid_19.stratifications.clinical import CLINICAL_STRATA
from autumn.models.covid_19.constants import INCIDENCE
from autumn.models.covid_19.stratifications.strains import Strain
from autumn.core.utils.utils import get_complement_prop
from autumn.model_features.outputs import OutputsBuilder
class TimeProcess(ComputedValueProcessor):
"""
This is just sitting here ready to go, in case anyone wants to produce any outputs that are dependent on modelled
time.
"""
def process(self, compartment_values, computed_values, time):
return time
class CovidOutputsBuilder(OutputsBuilder):
"""
The object responsible for collating and requesting all the derived outputs for the model.
Attributes:
is_contact_tracing: Whether contact tracing is active in this model
"""
def __init__(self, model, compartments, is_contact_tracing):
OutputsBuilder.__init__(self, model, compartments)
self.is_contact_tracing = is_contact_tracing
self.untraced_stratum = {"tracing": Tracing.UNTRACED} if is_contact_tracing else {}
def request_incidence(self):
"""
Incidence is the transition from late exposed to early active - i.e. the rate of onset of an "episode".
Generates the following derived outputs both overall and by age group:
incidence: Rate of onset of new episodes
"""
# Unstratified
self.model.request_output_for_flow(name=INCIDENCE, flow_name=INCIDENCE)
# Stratified by age group
self.request_stratified_output_for_flow(INCIDENCE, COVID_BASE_AGEGROUPS, "agegroup")
# Stratified by age group and by clinical stratum
self.request_double_stratified_output_for_flow(
INCIDENCE, COVID_BASE_AGEGROUPS, "agegroup", CLINICAL_STRATA, "clinical"
)
def request_infection(self):
"""
Track the rate at which people are newly infected.
Infection is the transition from susceptible to early exposed, of course.
Generates the following derived outputs both overall and by age group:
infection: Rate of new infections by time
"""
self.model.request_output_for_flow("infection", INFECTION)
def request_notifications(self, cumul_inc_start_time: float, hospital_reporting: float):
"""
Calculate the rate of notifications over time.
Args:
cumul_inc_start_time: The starting time to start the cumulative calculations
hospital_reporting: The proportion of hospitalisations notified (defaults to one)
Generates the following derived outputs:
notifications overall: Progressions to active for those in the last three strata or any strata if traced
notifications by age group: As previous, split by age
"""
# Age-specific notifications
for agegroup in COVID_BASE_AGEGROUPS:
notification_pathways = []
# First track all traced cases (regardless of clinical stratum, on the assumption that all traced patients
# are identified as they are in quarantine)
if self.is_contact_tracing:
traced_dest_strata = {"agegroup": agegroup, "tracing": Tracing.TRACED}
name = f"progress_{Tracing.TRACED}Xagegroup_{agegroup}"
notification_pathways.append(name)
self.model.request_output_for_flow(
name=name, flow_name=PROGRESS, dest_strata=traced_dest_strata, save_results=False,
)
# Then track untraced cases that are passively detected - either regardless of tracing status,
# (empty dictionary if tracing not active) or just in the untraced group otherwise
for clinical in NOTIFICATION_CLINICAL_STRATA:
# Include reporting adjustment if requested
reporting = hospital_reporting if clinical in HOSTPIALISED_CLINICAL_STRATA else 1.
untraced_dest_strata = {"clinical": clinical, "agegroup": agegroup}
untraced_dest_strata.update(self.untraced_stratum)
notified_hospitalisations = f"_progress_{Tracing.UNTRACED}Xagegroup_{agegroup}Xclinical_{clinical}"
self.model.request_output_for_flow(
name=notified_hospitalisations, flow_name=PROGRESS, dest_strata=untraced_dest_strata,
save_results=False,
)
# Has to have a different name to the flow output to avoid summer error
name = notified_hospitalisations[1:]
notification_pathways.append(name)
self.model.request_function_output(
name=name, func=lambda rate: rate * reporting, sources=(notified_hospitalisations,),
save_results=False,
)
final_name = f"{NOTIFICATIONS}Xagegroup_{agegroup}"
self.model.request_aggregate_output(name=final_name, sources=notification_pathways)
notifications_by_agegroup = [f"{NOTIFICATIONS}Xagegroup_{i_age}" for i_age in COVID_BASE_AGEGROUPS]
self.model.request_aggregate_output(name=NOTIFICATIONS, sources=notifications_by_agegroup)
# Cumulative unstratified notifications
if cumul_inc_start_time:
self.model.request_cumulative_output(
name=f"accum_{NOTIFICATIONS}", source=NOTIFICATIONS, start_time=cumul_inc_start_time,
)
def request_non_hosp_notifications(self):
"""
Calculate the rate of non-hospitalised notifications over time and
the prevalence of the non-hospitalised notifications to reflect the number of quarantined persons.
Generates the following derived outputs:
non hospitalised notifications: Age specific notifications in Symptomatic ambulatory ever detected
prevalence non hospitalised notifications: Age specific outputs. For the traced everyone in notification
clinical strata and infectious compartments. For the untraced symptomatic detected in active compartments.
"""
# Age-specific non-hospitalised notifications
for agegroup in COVID_BASE_AGEGROUPS:
age_notification_pathways = []
# First track all traced cases (in Symptomatic ambulatory ever detected)
if self.is_contact_tracing:
name = f"progress_traced_non_hospitalisedXagegroup_{agegroup}"
age_notification_pathways.append(name)
self.model.request_output_for_flow(
name=name,
flow_name=PROGRESS,
dest_strata={"clinical": Clinical.SYMPT_ISOLATE, "tracing": Tracing.TRACED, "agegroup": agegroup},
save_results=False,
)
# Then track untraced cases in Symptomatic ambulatory ever detected
name = f"progress_non_hosp_Xclinical_{Clinical.SYMPT_ISOLATE}Xagegroup_{agegroup}Xtraced_{Tracing.UNTRACED}"
dest_strata = {"clinical": Clinical.SYMPT_ISOLATE, "agegroup": agegroup}.update(self.untraced_stratum)
age_notification_pathways.append(name)
self.model.request_output_for_flow(
name=name,
flow_name=PROGRESS,
dest_strata=dest_strata,
save_results=False,
)
agg_name = f"non_hospitalised_notificationsXagegroup_{agegroup}"
self.model.request_aggregate_output(name=agg_name, sources=age_notification_pathways)
# calculating the prevalence of the non hospitalised notifications by age group
for agegroup in COVID_BASE_AGEGROUPS:
age_notification_pathways = []
# First track traced cases in all clinical strata except hospitalisations
if self.is_contact_tracing:
for clinical in NOTIFICATION_CLINICAL_STRATA:
name = f"progress_prevalence_traced_X{agegroup}X{clinical}"
age_notification_pathways.append(name)
self.model.request_output_for_compartments(
name=name,
compartments=[Compartment.LATE_ACTIVE],
strata={"clinical": clinical, "agegroup": agegroup, "tracing": Tracing.TRACED},
)
# Then track untraced cases (everyone in notified clinical stratum)
dest_strata = {"clinical": Clinical.SYMPT_ISOLATE, "agegroup": agegroup}.update(self.untraced_stratum)
name = f"progress_prevalence_{Tracing.UNTRACED}Xagegroup_{agegroup}Xclinical_{Clinical.SYMPT_ISOLATE}"
age_notification_pathways.append(name)
self.model.request_output_for_compartments(
name=name,
compartments=[Compartment.LATE_ACTIVE],
strata=dest_strata,
)
self.model.request_aggregate_output(
name=f"prevalence_non_hospitalised_notificationsXagegroup_{agegroup}", sources=age_notification_pathways
)
def request_adult_paeds_notifications(self):
"""
Split the age-specific notifications previously generated into paediatric and adult.
Generates the following derived outputs:
notificationsXpaediatric: Notifications for those aged under 15
notificationsXadult: Notifications for those aged 15 and above
"""
# Split by child and adult
paed_notifications = [f"notificationsXagegroup_{agegroup}" for agegroup in COVID_BASE_AGEGROUPS[:3]]
adult_notifications = [f"notificationsXagegroup_{agegroup}" for agegroup in COVID_BASE_AGEGROUPS[3:]]
self.model.request_aggregate_output(name="notificationsXpaediatric", sources=paed_notifications)
self.model.request_aggregate_output(name="notificationsXadult", sources=adult_notifications)
def request_cdr(self):
"""
Just make the computed value CDR (case detection rate) available as a derived output.
"""
self.model.request_computed_value_output("cdr")
def request_deaths(self):
"""
Track COVID-19-related deaths.
Generates the following derived outputs both overall and by age group:
infection_deaths: Rate of deaths over time
accum_deaths: Cumulative deaths that have accrued by that point in time
"""
# Unstratified
self.model.request_output_for_flow(name=INFECTION_DEATHS, flow_name=INFECT_DEATH)
self.model.request_cumulative_output(name="accum_deaths", source="infection_deaths")
# Stratified by age
self.request_stratified_output_for_flow(
INFECT_DEATH, COVID_BASE_AGEGROUPS, "agegroup", name_stem=INFECTION_DEATHS, filter_on="source"
)
for agegroup in COVID_BASE_AGEGROUPS:
self.model.request_cumulative_output(
name=f"accum_deathsXagegroup_{agegroup}",
source=f"infection_deathsXagegroup_{agegroup}",
)
# Stratified by age and clinical stratum
self.request_double_stratified_output_for_flow(
INFECT_DEATH, COVID_BASE_AGEGROUPS, "agegroup", CLINICAL_STRATA, "clinical", name_stem=INFECTION_DEATHS,
filter_on="source"
)
def request_admissions(self):
"""
Track COVID-19-attributable admissions to hospital and to ICU.
Generates the following derived outputs both overall and by age group:
icu_admissions: Only those being admitted to ICU
hospital_admissions: All those being admitted to hospital
"""
# Track non-ICU hospital admissions (transition from early to late active in hospital, non-ICU stratum)
self.model.request_output_for_flow(
name="non_icu_admissions",
flow_name=PROGRESS,
source_strata={"clinical": Clinical.HOSPITAL_NON_ICU},
dest_strata={"clinical": Clinical.HOSPITAL_NON_ICU},
save_results=False,
)
# Track ICU admissions (transition from early to late active in ICU stratum)
self.model.request_output_for_flow(
name="icu_admissions",
flow_name=PROGRESS,
source_strata={"clinical": Clinical.ICU},
dest_strata={"clinical": Clinical.ICU},
)
# Track all hospitalisations as the sum of hospital non-ICU and ICU
self.model.request_aggregate_output("hospital_admissions", sources=["icu_admissions", "non_icu_admissions"])
for agegroup in COVID_BASE_AGEGROUPS:
self.model.request_output_for_flow(
name=f"non_icu_admissionsXagegroup_{agegroup}",
flow_name=PROGRESS,
source_strata={"clinical": Clinical.HOSPITAL_NON_ICU, "agegroup": agegroup},
dest_strata={"clinical": Clinical.HOSPITAL_NON_ICU, "agegroup": agegroup},
save_results=False,
)
self.model.request_output_for_flow(
name=f"icu_admissionsXagegroup_{agegroup}",
flow_name=PROGRESS,
source_strata={"clinical": Clinical.ICU, "agegroup": agegroup},
dest_strata={"clinical": Clinical.ICU, "agegroup": agegroup},
save_results=False,
)
self.model.request_aggregate_output(
f"hospital_admissionsXagegroup_{agegroup}",
sources=[f"icu_admissionsXagegroup_{agegroup}", f"non_icu_admissionsXagegroup_{agegroup}"]
)
def request_occupancy(self, sojourn_periods: Sojourn):
"""
Track the number of people in hospital or in ICU over time.
Args:
sojourn_periods: The sojourn periods for the hospitalised compartments
Generates the following derived outputs both overall and by age group:
icu_occupancy: Only those currently in ICU
hospital_occupancy: All those currently in hospital
"""
# Hospital occupancy is represented as all ICU, all hospital late active, and some early active ICU cases
compartment_periods = sojourn_periods
icu_early_period = compartment_periods["icu_early"]
hospital_early_period = compartment_periods["hospital_early"]
period_icu_patients_in_hospital = max(icu_early_period - hospital_early_period, 0.)
proportion_icu_patients_in_hospital = period_icu_patients_in_hospital / icu_early_period
# Unstratified calculations
self.model.request_output_for_compartments(
"icu_occupancy",
compartments=[Compartment.LATE_ACTIVE],
strata={"clinical": Clinical.ICU},
)
self.model.request_output_for_compartments(
"_late_active_hospital",
compartments=[Compartment.LATE_ACTIVE],
strata={"clinical": Clinical.HOSPITAL_NON_ICU},
save_results=False,
)
self.model.request_output_for_compartments(
"_early_active_icu",
compartments=[Compartment.EARLY_ACTIVE],
strata={"clinical": Clinical.ICU},
save_results=False,
)
self.model.request_function_output(
name="_early_active_icu_proportion",
func=lambda patients: patients * proportion_icu_patients_in_hospital,
sources=["_early_active_icu"],
save_results=False,
)
self.model.request_aggregate_output(
name="hospital_occupancy",
sources=[
"_late_active_hospital",
"icu_occupancy",
"_early_active_icu_proportion",
],
)
# Stratified by age group
for agegroup in COVID_BASE_AGEGROUPS:
age_icu_name = f"icu_occupancyXagegroup_{agegroup}"
self.model.request_output_for_compartments(
name=age_icu_name,
compartments=[Compartment.LATE_ACTIVE],
strata={"clinical": Clinical.ICU, "agegroup": agegroup},
save_results=True,
)
age_late_hospital_name = f"late_hospitalXagegroup_{agegroup}"
self.model.request_output_for_compartments(
name=age_late_hospital_name,
compartments=[Compartment.LATE_ACTIVE],
strata={"clinical": Clinical.HOSPITAL_NON_ICU, "agegroup": agegroup},
save_results=False,
)
age_icu_ealy_name = f"early_active_icuXagegroup_{agegroup}"
self.model.request_output_for_compartments(
name=age_icu_ealy_name,
compartments=[Compartment.EARLY_ACTIVE],
strata={"clinical": Clinical.ICU, "agegroup": agegroup},
save_results=False,
)
age_icu_early_in_hospital_name = f"early_active_icu_in_hospitalXagegroup_{agegroup}"
self.model.request_function_output(
name=age_icu_early_in_hospital_name,
func=lambda patients: patients * proportion_icu_patients_in_hospital,
sources=[age_icu_ealy_name],
save_results=False,
)
self.model.request_aggregate_output(
name=f"hospital_occupancyXagegroup_{agegroup}",
sources=[
age_icu_name,
age_late_hospital_name,
age_icu_early_in_hospital_name,
],
)
def request_tracing(self):
"""
Collate up all the computed values used during the process of working out the effect of contact tracing.
Additionally generates the following derived outputs:
prop_contacts_quarantined: The proportion of all contacts (including those of undetected cases) identified
"""
# Standard calculations always computed when contact tracing requested
self.model.request_computed_value_output("prevalence")
self.model.request_computed_value_output("prop_detected_traced")
self.model.request_computed_value_output("prop_contacts_with_detected_index")
self.model.request_computed_value_output("traced_flow_rate")
# Proportion of quarantined contacts among all contacts
self.model.request_function_output(
name="prop_contacts_quarantined",
func=lambda prop_detected_traced, prop_detected_index: prop_detected_traced * prop_detected_index,
sources=["prop_detected_traced", "prop_contacts_with_detected_index"],
)
def request_strains(self, voc_names: List[str]):
"""
VoC-related outputs.
Args:
voc_names: The names of all the VoCs being implemented in the model
Generates the following derived outputs:
incidence (by strain): See above for definition
prop_incidence (by strain): Proportion of incidence | |
<reponame>vermaport/crossenv
import venv
import os
import sysconfig
import glob
import sys
import shutil
from textwrap import dedent
import subprocess
import logging
import importlib
import types
from configparser import ConfigParser
import random
import shlex
import platform
import pprint
import re
from .utils import F
from . import utils
__version__ = '0.6'
logger = logging.getLogger(__name__)
class CrossEnvBuilder(venv.EnvBuilder):
"""
A class to build a cross-compiling virtual environment useful for
cross compiling wheels or developing firmware images.
Here the `host` is the device on which the final code will run, such
as an embedded system of some sort. `build` is the machine doing the
compiling, usually a desktop or server. Usually the `host` Python
executables won't run on the `build` machine.
When we refer to `build-python`, we mean the current interpreter. (It is
*always* the current interpreter.) When we refer to `host-pytohn`, we mean
the interpreter that will run on the host. When we refer to `cross-python`,
we mean an interpreter that runs on `build` but reports system information
as if it were running on `host`. In other words, `cross-python` does the
cross compiling, and is what this class will create for us.
You must have the toolchain used to compile the host Python binary
available when using this virtual environment. The virtual environment
will pick the correct compiler based on info recorded when the host
Python binary was compiled.
:param host_python: The path to the host Python binary. This may be in
a build directory (i.e., after `make`), or in an
install directory (after `make install`). It
*must* be the exact same version as build-python.
:param extra_env_vars: When cross-python starts, this is an iterable of
(name, op, value) tuples. op may be one of '=' to
indicate that the variable will be set
unconditionally, or '?=' to indicate that the
variable will be set only if not already set by the
environment.
:param build_system_site_packages:
Whether or not build-python's virtual environment
will have access to the system site packages.
cross-python never has access, for obvious reasons.
:param clear: Whether to delete the contents of the environment
directories if they already exist, before
environment creation. May be a false value, or one
of 'default', 'cross', 'build', or 'both'.
'default' means to clear cross only when
cross_prefix is None.
:param cross_prefix: Explicitly set the location of the cross-python
virtual environment.
:param with_cross_pip: If True, ensure pip is installed in the
cross-python virtual environment.
:param with_build_pip: If True, ensure pip is installed in the
build-python virtual environment.
:param host_sysroot: If given, the cross-compiler toolchain's sysroot.
If not given, an attempt will be made to guess.
These will be added (redundantly) to the default
search paths to help trick some packages.
:param host_cc: If given, override CC and related variables with
this value.
:param host_cxx: If given, override CXX and related variables with
this value.
:param host_ar: If given, override AR and related variables with
this value.
:param host_relativize: If True, convert absolute paths in CC, CXX, and
related variables to use the base name. Tools must
be in $PATH for this to work.
:param host_config_vars: Extra config_vars (build_time_vars) to override,
such as CC, CCSHARED, etc.
"""
def __init__(self, *,
host_python,
extra_env_vars=(),
build_system_site_packages=False,
clear=False,
cross_prefix=None,
with_cross_pip=False,
with_build_pip=False,
host_sysroot=None,
host_cc=None,
host_cxx=None,
host_ar=None,
host_relativize=False,
host_config_vars=()):
self.host_sysroot = host_sysroot
self.find_host_python(host_python)
self.find_compiler_info()
self.build_system_site_packages = build_system_site_packages
self.extra_env_vars = extra_env_vars
self.clear_build = clear in ('default', 'build', 'both')
if with_cross_pip and not with_build_pip:
raise ValueError("Cannot have cross-pip without build-pip")
self.with_cross_pip = with_cross_pip
self.with_build_pip = with_build_pip
if cross_prefix:
self.cross_prefix = os.path.abspath(cross_prefix)
self.clear_cross = clear in ('cross', 'both')
else:
self.cross_prefix = None
self.clear_cross = clear in ('default', 'cross', 'both')
self.repl_host_cc = host_cc
self.repl_host_cxx = host_cxx
self.repl_host_ar = host_ar
self.host_relativize = host_relativize
self.host_config_vars = host_config_vars
super().__init__(
system_site_packages=False,
clear=False,
symlinks=True,
upgrade=False,
with_pip=False)
def find_installed_host_home(self):
# Assume host_project_base == {prefix}/bin and that this Python
# mirrors the host Python's install paths.
# On caveat: on native host Python (for testing) this might be a
# virtualenv.
home = os.path.dirname(self.host_project_base)
pyvenv = os.path.join(home, 'pyvenv.cfg')
if os.path.exists(pyvenv):
with open(pyvenv) as fp:
for line in fp:
key, _, val = line.partition('=')
key = key.strip()
val = val.strip()
if key == 'home':
return os.path.dirname(val)
return home
def find_sysconfig_data(self, paths):
maybe = []
for path in paths:
pattern = os.path.join(path, '_sysconfigdata*.py*')
maybe.extend(glob.glob(pattern))
sysconfig_paths = set()
for filename in maybe:
if (os.path.isfile(filename) and
os.path.splitext(filename)[1] in ('.py', '.pyc')):
sysconfig_paths.add(filename)
# Multiples can happen, but so long as they all have the same
# info we should be okay. Seen in buildroot
# When choosing the correct one, prefer, in order:
# 1) The .py file
# 2) The .pyc file
# 3) Any .opt-*.pyc files
# so sort by the length of the longest extension
sysconfig_paths = sorted(sysconfig_paths,
key=lambda x: len(x.split('.',1)[1]))
self.host_sysconfigdata = None
for path in sysconfig_paths:
basename = os.path.basename(path)
name, _ = os.path.splitext(basename)
spec = importlib.util.spec_from_file_location(name, path)
syscfg = importlib.util.module_from_spec(spec)
spec.loader.exec_module(syscfg)
if self.host_sysconfigdata is None:
self.host_sysconfigdata = syscfg
self.host_sysconfigdata_file = path
self.host_sysconfigdata_name = name
elif (self.host_sysconfigdata.build_time_vars !=
syscfg.build_time_vars):
logger.error("Conflicting build info in %s and %s",
self.host_sysconfigdata_file, path)
raise ValueError("Malformed Python installation!")
if not self.host_sysconfigdata:
logger.error("Cannot find _sysconfigdata*.py. Looked in %s",
', '.join(paths))
raise FileNotFoundError("No _sysconfigdata*.py found in host lib")
def find_host_python(self, host):
"""
Find Python paths and other info based on a path.
:param host: Path to the host Python executable.
"""
build_version = sysconfig.get_config_var('VERSION')
host = os.path.abspath(host)
if not os.path.exists(host):
raise FileNotFoundError("%s does not exist" % host)
elif not os.path.isfile(host):
raise ValueError("Expected a path to a Python executable. "
"Got %s" % host)
else:
self.host_project_base = os.path.dirname(host)
if sysconfig._is_python_source_dir(self.host_project_base):
self.host_makefile = os.path.join(self.host_project_base, 'Makefile')
pybuilddir = os.path.join(self.host_project_base, 'pybuilddir.txt')
try:
with open(pybuilddir, 'r') as fp:
build_dir = fp.read().strip()
except IOError:
raise IOError(
"Cannot read %s: Build the host Python first " % s) from None
self.host_home = self.host_project_base
sysconfig_paths = [os.path.join(self.host_project_base, build_dir)]
else:
self.host_home = self.find_installed_host_home()
python_ver = 'python' + sysconfig.get_config_var('py_version_short')
libdir = os.path.join(self.host_home, 'lib', python_ver)
sysconfig_paths = [
libdir,
# Ubuntu puts it in libdir/plat-<arch>
os.path.join(libdir, '*'),
# Below might be a version mismatch, but try to use it
#os.path.join(self.host_home, 'lib', 'python*'),
#os.path.join(self.host_home, 'lib', 'python*', '*'),
]
makefile = glob.glob(os.path.join(libdir, '*', 'Makefile'))
if not makefile:
self.host_makefile = '' # fail later
else:
self.host_makefile = makefile[0]
# We need paths to sysconfig data, and we need to import it to ask
# a few questions.
self.find_sysconfig_data(sysconfig_paths)
# CC could be compound command, like 'gcc --sysroot=...' (Issue #5)
# but that can cause issues (#7) so let the user know.
host_cc = self.host_sysconfigdata.build_time_vars['CC']
self.host_cc = shlex.split(host_cc)
if len(self.host_cc) > 1:
logger.warning("CC is a compound command (%s)", host_cc)
logger.warning("This can cause issues for modules that don't "
"expect it.")
logger.warning("Consider setting CC='%s' and CFLAGS='%s'",
self.host_cc[0], ' '.join(self.host_cc[1:]))
host_cxx = self.host_sysconfigdata.build_time_vars['CXX']
self.host_cxx = shlex.split(host_cxx)
if len(self.host_cxx) > 1:
logger.warning("CXX is a compound command (%s)", host_cxx)
logger.warning("This can cause issues for modules that don't "
"expect it.")
logger.warning("Consider setting CXX='%s' and CXXFLAGS='%s'",
self.host_cxx[0], ' '.join(self.host_cxx[1:]))
host_ar = self.host_sysconfigdata.build_time_vars['AR']
self.host_ar = shlex.split(host_ar)
self.host_version = self.host_sysconfigdata.build_time_vars['VERSION']
# Ask the makefile a few questions too
if not os.path.exists(self.host_makefile):
raise FileNotFoundError("Cannot find Makefile")
self.host_platform = sys.platform # Default: not actually cross compiling
with open(self.host_makefile, 'r') as fp:
for line in fp:
line = line.strip()
if line.startswith('_PYTHON_HOST_PLATFORM='):
host_platform = line.split('=',1)[-1]
if host_platform:
self.host_platform = line.split('=',1)[-1]
break
# Sanity checks
if self.host_version != build_version:
raise ValueError("Version mismatch: host=%s, build=%s" % (
self.host_version, build_version))
def find_compiler_info(self):
"""
Query the compiler for extra info useful for cross-compiling,
and also check that it exists.
"""
def run_compiler(arg):
cmdline = self.host_cc + [arg]
try:
return subprocess.check_output(cmdline, universal_newlines=True)
except subprocess.CalledProcessError:
return None
if run_compiler('--version') is None:
# I guess we could continue...but why?
raise RuntimeError(
"Cannot run cross-compiler! Extension modules won't build!")
return
# TODO: Clang doesn't have this option
if self.host_sysroot is None:
self.host_sysroot = run_compiler('-print-sysroot')
if self.host_sysroot:
self.host_sysroot = self.host_sysroot.strip()
def create(self, env_dir):
"""
Create a cross virtual environment in a directory
:param env_dir: The target directory to create an environment in.
"""
env_dir = os.path.abspath(env_dir)
context = self.ensure_directories(env_dir)
self.create_configuration(context)
| |
related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with a diagnosis of optic chiasm disorders or injuries or optic glioma.
**Exclusion Criteria:** Excludes concepts that pertain to 'unspecified eye.'
"""
OID = '2.16.840.1.113883.3.526.3.1457'
VALUE_SET_NAME = 'Disorders of Optic Chiasm'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'H4741',
'H4742',
'H4743',
'H4749'
}
SNOMEDCT = {
'194057002',
'194058007',
'194059004',
'194060009',
'230517004',
'302902003',
'404653000',
'404664002',
'431769004',
'64246009',
'69820004',
'703429003',
'70476006',
'89654006'
}
class DisordersOfTheImmuneSystem(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent diagnoses affecting the working of the immune system, including disorders of the immunoglobulins and the complement system.
**Data Element Scope:** This value set may use the Quality Data Model (QDM)category related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with diagnoses that indicate disorders of the immune system that can interfere with the bodies immune function.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.120.12.1001'
VALUE_SET_NAME = 'Disorders of the Immune System'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'D800',
'D801',
'D802',
'D803',
'D804',
'D805',
'D806',
'D807',
'D808',
'D809',
'D810',
'D811',
'D812',
'D814',
'D816',
'D817',
'D8189',
'D819',
'D820',
'D821',
'D822',
'D823',
'D824',
'D828',
'D829',
'D830',
'D831',
'D832',
'D838',
'D839',
'D840',
'D841',
'D848',
'D849',
'D893',
'D89810',
'D89811',
'D89812',
'D89813',
'D8982',
'D8989',
'D899'
}
SNOMEDCT = {
'121121000119106',
'121131000119109',
'128631000119109',
'18827005',
'190979003',
'190980000',
'190981001',
'190996002',
'190997006',
'190998001',
'191001007',
'191002000',
'191011000',
'191012007',
'191013002',
'191018006',
'23238000',
'234416002',
'234532001',
'234562007',
'234593008',
'234594002',
'234595001',
'234596000',
'234597009',
'234598004',
'234599007',
'234600005',
'234601009',
'234602002',
'234603007',
'234604001',
'234605000',
'234607008',
'234608003',
'234609006',
'234611002',
'234612009',
'234613004',
'234614005',
'234615006',
'234616007',
'234617003',
'234618008',
'234619000',
'234620006',
'234621005',
'234622003',
'234623008',
'234624002',
'234625001',
'234626000',
'234627009',
'234628004',
'234629007',
'234630002',
'234631003',
'234646005',
'24419001',
'24743004',
'25109007',
'263661007',
'36070007',
'39674000',
'402355000',
'402356004',
'402357008',
'402358003',
'402360001',
'403835002',
'403836001',
'403837005',
'414029004',
'426202004',
'427167008',
'442459007',
'444547006',
'449187006',
'55602000',
'702444009',
'719156006',
'720853005',
'722067005',
'81166004',
'82286005',
'82966003',
'88714009'
}
class DisordersOfVisualCortex(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent a diagnosis of disorders of visual cortex.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) category related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with a diagnosis of a disorder of visual cortex, including inflammatory disorders, neoplasm, and cortical blindness.
**Exclusion Criteria:** Excludes concepts that pertain to 'unspecified eye.'
"""
OID = '2.16.840.1.113883.3.526.3.1458'
VALUE_SET_NAME = 'Disorders of Visual Cortex'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'H47611',
'H47612'
}
SNOMEDCT = {
'128329001',
'194068002',
'194069005',
'194070006',
'230526001',
'302904002',
'342741000119103',
'342751000119101',
'68574006',
'732251003'
}
class DisseminatedChorioretinitisAndDisseminatedRetinochoroiditis(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent a diagnosis of disseminated chorioretinitis and/or retinochoroiditis.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) category related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with a diagnosis of disseminated chorioretinal inflammation, disseminated retinochoroiditis, and placoid pigment epitheliopathy.
**Exclusion Criteria:** Excludes concepts that pertain to 'unspecified eye.'
"""
OID = '2.16.840.1.113883.3.526.3.1459'
VALUE_SET_NAME = 'Disseminated Chorioretinitis and Disseminated Retinochoroiditis'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'A1853',
'H30101',
'H30102',
'H30103',
'H30111',
'H30112',
'H30113',
'H30121',
'H30122',
'H30123',
'H30131',
'H30132',
'H30133',
'H30141',
'H30142',
'H30143'
}
SNOMEDCT = {
'14894002',
'193442009',
'193443004',
'193444005',
'371099005',
'48225000',
'51579006',
'61517002',
'69811008',
'77939001',
'78769001'
}
class DxaDualEnergyXrayAbsorptiometryScan(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent a dual-energy x-ray absorptiometry (DXA) scan.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) category related to Diagnostic Study.
**Inclusion Criteria:** Includes only relevant concepts associated with DXA scans of the femur, radius, ulna, lumbar spine, hip, calcaneus and skeletal system. This is a grouping of LOINC codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.113.12.1051'
VALUE_SET_NAME = 'DXA (Dual energy Xray Absorptiometry) Scan'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
LOINC = {
'24701-5',
'24890-6',
'24966-4',
'38261-4',
'38262-2',
'38263-0',
'38264-8',
'38265-5',
'38266-3',
'38267-1',
'38268-9',
'38269-7',
'46278-8',
'46279-6',
'46383-6',
'62910-5'
}
class Dysthymia(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent diagnoses related to dysthymia.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) category related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with diagnoses for dysthymia, including early and late onset, and primary and secondary dysthymia. This groups ICD-10-CM and SNOMED CT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.67.1.101.1.254'
VALUE_SET_NAME = 'Dysthymia'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'F341'
}
SNOMEDCT = {
'19694002',
'2506003',
'3109008',
'36170009',
'38451003',
'67711008',
'78667006',
'83176005',
'85080004'
}
class EhlersDanlosSyndrome(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent a diagnosis of Ehlers-Danlos syndrome.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) category related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with diagnosis codes for Ehlers-Danlos syndrome. This is a grouping of ICD-9-CM, ICD-10-CM and SNOMED CT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.113.12.1047'
VALUE_SET_NAME = 'Ehlers Danlos Syndrome'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'Q796',
'Q7960',
'Q7961',
'Q7962',
'Q7963',
'Q7969'
}
ICD9CM = {
'75683'
}
SNOMEDCT = {
'10033001',
'17025000',
'20766005',
'25606004',
'30652003',
'31798004',
'398114001',
'4170004',
'50869007',
'55711009',
'59399004',
'67202007',
'70610001',
'71322004',
'83470009',
'83586000',
'86667008'
}
class EjectionFraction(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent ejection fraction.
**Data Element Scope:** This value set may use Quality Data Model (QDM) category related to Diagnostic Study.
**Inclusion Criteria:** Includes only relevant concepts associated with studies specific to obtaining left ventricular ejection fraction.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.526.3.1134'
VALUE_SET_NAME = 'Ejection Fraction'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
LOINC = {
'10230-1',
'18043-0',
'18044-8',
'18045-5',
'18046-3',
'18047-1',
'18048-9',
'18049-7',
'77889-4',
'77890-2',
'77891-0',
'77892-8',
'79990-8',
'79991-6',
'79992-4',
'79993-2',
'8806-2',
'8807-0',
'8808-8',
'8809-6',
'8810-4',
'8811-2',
'8812-0'
}
class EncephalopathyDueToChildhoodVaccination(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent a diagnosis of encephalopathy due to childhood vaccination.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) category related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with encephalopathy (a brain injury) due to childhood vaccination.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.114.12.1007'
VALUE_SET_NAME = 'Encephalopathy due to Childhood Vaccination'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'G0432'
}
SNOMEDCT = {
'192704009',
'192705005',
'192706006',
'192707002',
'192708007',
'192709004',
'192710009',
'192711008',
'192712001',
'192713006',
'192714000',
'192715004',
'192716003',
'192717007',
'192718002',
'192719005',
'192720004',
'192721000',
'192722007',
'192723002',
'192724008'
}
class EndStageRenalDisease(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent a diagnosis of end stage renal disease.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) category related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with a diagnosis of end stage renal disease.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.526.3.353'
VALUE_SET_NAME = 'End Stage Renal Disease'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'N186'
}
SNOMEDCT = {
'236434000',
'236435004',
'236436003',
'46177005'
}
class EssentialHypertension(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent essential hypertension.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) category related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with identifying patients who are diagnosed with essential hypertension.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.104.12.1011'
VALUE_SET_NAME = 'Essential Hypertension'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'I10'
}
ICD9CM = {
'4010',
'4011',
'4019'
}
SNOMEDCT = {
'10725009',
'1201005',
'276789009',
'371125006',
'429457004',
'46481004',
'48146000',
'56218007',
'59621000',
'59720008',
'65518004',
'762463000',
'78975002'
}
class FindingOfElevatedBloodPressureOrHypertension(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent elevated blood pressure or hypertension.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) categories or attributes related to a Diagnosis or Finding.
**Inclusion Criteria:** Includes only relevant concepts associated with elevated blood pressure or hypertension.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113762.1.4.1047.514'
VALUE_SET_NAME = 'Finding of Elevated Blood Pressure or Hypertension'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'R030'
}
SNOMEDCT = {
'198941007',
'24184005',
'31992008',
'371622005',
'38341003',
'443482000',
'48146000',
'48194001',
'56218007'
| |
<reponame>holly-evans/airflow
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Databricks hook.
This hook enable the submitting and running of jobs to the Databricks platform. Internally the
operators talk to the
``api/2.1/jobs/run-now``
`endpoint <https://docs.databricks.com/dev-tools/api/latest/jobs.html#operation/JobsRunNow>_`
or the ``api/2.1/jobs/runs/submit``
`endpoint <https://docs.databricks.com/dev-tools/api/latest/jobs.html#operation/JobsRunsSubmit>`_.
"""
import json
from typing import Any, Dict, List, Optional
from requests import exceptions as requests_exceptions
from airflow.exceptions import AirflowException
from airflow.providers.databricks.hooks.databricks_base import BaseDatabricksHook
RESTART_CLUSTER_ENDPOINT = ("POST", "api/2.0/clusters/restart")
START_CLUSTER_ENDPOINT = ("POST", "api/2.0/clusters/start")
TERMINATE_CLUSTER_ENDPOINT = ("POST", "api/2.0/clusters/delete")
RUN_NOW_ENDPOINT = ('POST', 'api/2.1/jobs/run-now')
SUBMIT_RUN_ENDPOINT = ('POST', 'api/2.1/jobs/runs/submit')
GET_RUN_ENDPOINT = ('GET', 'api/2.1/jobs/runs/get')
CANCEL_RUN_ENDPOINT = ('POST', 'api/2.1/jobs/runs/cancel')
OUTPUT_RUNS_JOB_ENDPOINT = ('GET', 'api/2.1/jobs/runs/get-output')
INSTALL_LIBS_ENDPOINT = ('POST', 'api/2.0/libraries/install')
UNINSTALL_LIBS_ENDPOINT = ('POST', 'api/2.0/libraries/uninstall')
LIST_JOBS_ENDPOINT = ('GET', 'api/2.1/jobs/list')
WORKSPACE_GET_STATUS_ENDPOINT = ('GET', 'api/2.0/workspace/get-status')
RUN_LIFE_CYCLE_STATES = ['PENDING', 'RUNNING', 'TERMINATING', 'TERMINATED', 'SKIPPED', 'INTERNAL_ERROR']
class RunState:
"""Utility class for the run state concept of Databricks runs."""
def __init__(
self, life_cycle_state: str, result_state: str = '', state_message: str = '', *args, **kwargs
) -> None:
self.life_cycle_state = life_cycle_state
self.result_state = result_state
self.state_message = state_message
@property
def is_terminal(self) -> bool:
"""True if the current state is a terminal state."""
if self.life_cycle_state not in RUN_LIFE_CYCLE_STATES:
raise AirflowException(
(
'Unexpected life cycle state: {}: If the state has '
'been introduced recently, please check the Databricks user '
'guide for troubleshooting information'
).format(self.life_cycle_state)
)
return self.life_cycle_state in ('TERMINATED', 'SKIPPED', 'INTERNAL_ERROR')
@property
def is_successful(self) -> bool:
"""True if the result state is SUCCESS"""
return self.result_state == 'SUCCESS'
def __eq__(self, other: object) -> bool:
if not isinstance(other, RunState):
return NotImplemented
return (
self.life_cycle_state == other.life_cycle_state
and self.result_state == other.result_state
and self.state_message == other.state_message
)
def __repr__(self) -> str:
return str(self.__dict__)
def to_json(self) -> str:
return json.dumps(self.__dict__)
@classmethod
def from_json(cls, data: str) -> 'RunState':
return RunState(**json.loads(data))
class DatabricksHook(BaseDatabricksHook):
"""
Interact with Databricks.
:param databricks_conn_id: Reference to the :ref:`Databricks connection <howto/connection:databricks>`.
:param timeout_seconds: The amount of time in seconds the requests library
will wait before timing-out.
:param retry_limit: The number of times to retry the connection in case of
service outages.
:param retry_delay: The number of seconds to wait between retries (it
might be a floating point number).
:param retry_args: An optional dictionary with arguments passed to ``tenacity.Retrying`` class.
"""
hook_name = 'Databricks'
def __init__(
self,
databricks_conn_id: str = BaseDatabricksHook.default_conn_name,
timeout_seconds: int = 180,
retry_limit: int = 3,
retry_delay: float = 1.0,
retry_args: Optional[Dict[Any, Any]] = None,
) -> None:
super().__init__(databricks_conn_id, timeout_seconds, retry_limit, retry_delay, retry_args)
def run_now(self, json: dict) -> int:
"""
Utility function to call the ``api/2.0/jobs/run-now`` endpoint.
:param json: The data used in the body of the request to the ``run-now`` endpoint.
:return: the run_id as an int
:rtype: str
"""
response = self._do_api_call(RUN_NOW_ENDPOINT, json)
return response['run_id']
def submit_run(self, json: dict) -> int:
"""
Utility function to call the ``api/2.0/jobs/runs/submit`` endpoint.
:param json: The data used in the body of the request to the ``submit`` endpoint.
:return: the run_id as an int
:rtype: str
"""
response = self._do_api_call(SUBMIT_RUN_ENDPOINT, json)
return response['run_id']
def list_jobs(self, limit: int = 25, offset: int = 0, expand_tasks: bool = False) -> List[Dict[str, Any]]:
"""
Lists the jobs in the Databricks Job Service.
:param limit: The limit/batch size used to retrieve jobs.
:param offset: The offset of the first job to return, relative to the most recently created job.
:param expand_tasks: Whether to include task and cluster details in the response.
:return: A list of jobs.
"""
has_more = True
jobs = []
while has_more:
json = {
'limit': limit,
'offset': offset,
'expand_tasks': expand_tasks,
}
response = self._do_api_call(LIST_JOBS_ENDPOINT, json)
jobs += response['jobs'] if 'jobs' in response else []
has_more = response.get('has_more', False)
if has_more:
offset += len(response['jobs'])
return jobs
def find_job_id_by_name(self, job_name: str) -> Optional[int]:
"""
Finds job id by its name. If there are multiple jobs with the same name, raises AirflowException.
:param job_name: The name of the job to look up.
:return: The job_id as an int or None if no job was found.
"""
all_jobs = self.list_jobs()
matching_jobs = [j for j in all_jobs if j['settings']['name'] == job_name]
if len(matching_jobs) > 1:
raise AirflowException(
f"There are more than one job with name {job_name}. Please delete duplicated jobs first"
)
if not matching_jobs:
return None
else:
return matching_jobs[0]['job_id']
def get_run_page_url(self, run_id: int) -> str:
"""
Retrieves run_page_url.
:param run_id: id of the run
:return: URL of the run page
"""
json = {'run_id': run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
return response['run_page_url']
async def a_get_run_page_url(self, run_id: int) -> str:
"""
Async version of `get_run_page_url()`.
:param run_id: id of the run
:return: URL of the run page
"""
json = {'run_id': run_id}
response = await self._a_do_api_call(GET_RUN_ENDPOINT, json)
return response['run_page_url']
def get_job_id(self, run_id: int) -> int:
"""
Retrieves job_id from run_id.
:param run_id: id of the run
:return: Job id for given Databricks run
"""
json = {'run_id': run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
return response['job_id']
def get_run_state(self, run_id: int) -> RunState:
"""
Retrieves run state of the run.
Please note that any Airflow tasks that call the ``get_run_state`` method will result in
failure unless you have enabled xcom pickling. This can be done using the following
environment variable: ``AIRFLOW__CORE__ENABLE_XCOM_PICKLING``
If you do not want to enable xcom pickling, use the ``get_run_state_str`` method to get
a string describing state, or ``get_run_state_lifecycle``, ``get_run_state_result``, or
``get_run_state_message`` to get individual components of the run state.
:param run_id: id of the run
:return: state of the run
"""
json = {'run_id': run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
state = response['state']
return RunState(**state)
async def a_get_run_state(self, run_id: int) -> RunState:
"""
Async version of `get_run_state()`.
:param run_id: id of the run
:return: state of the run
"""
json = {'run_id': run_id}
response = await self._a_do_api_call(GET_RUN_ENDPOINT, json)
state = response['state']
return RunState(**state)
def get_run_state_str(self, run_id: int) -> str:
"""
Return the string representation of RunState.
:param run_id: id of the run
:return: string describing run state
"""
state = self.get_run_state(run_id)
run_state_str = (
f"State: {state.life_cycle_state}. Result: {state.result_state}. {state.state_message}"
)
return run_state_str
def get_run_state_lifecycle(self, run_id: int) -> str:
"""
Returns the lifecycle state of the run
:param run_id: id of the run
:return: string with lifecycle state
"""
return self.get_run_state(run_id).life_cycle_state
def get_run_state_result(self, run_id: int) -> str:
"""
Returns the resulting state of the run
:param run_id: id of the run
:return: string with resulting state
"""
return self.get_run_state(run_id).result_state
def get_run_state_message(self, run_id: int) -> str:
"""
Returns the state message for the run
:param run_id: id of the run
:return: string with state message
"""
return self.get_run_state(run_id).state_message
def get_run_output(self, run_id: int) -> dict:
"""
Retrieves run output of the run.
:param run_id: id of the run
:return: output of the run
"""
json = {'run_id': run_id}
run_output = self._do_api_call(OUTPUT_RUNS_JOB_ENDPOINT, json)
return run_output
def cancel_run(self, run_id: int) -> None:
"""
Cancels the run.
:param run_id: id of the run
"""
json = {'run_id': run_id}
self._do_api_call(CANCEL_RUN_ENDPOINT, json)
def restart_cluster(self, json: dict) -> None:
"""
Restarts the cluster.
:param json: json dictionary containing cluster specification.
"""
self._do_api_call(RESTART_CLUSTER_ENDPOINT, json)
def start_cluster(self, json: dict) -> None:
"""
Starts the cluster.
:param json: json dictionary containing cluster specification.
"""
self._do_api_call(START_CLUSTER_ENDPOINT, json)
def terminate_cluster(self, json: dict) -> None:
"""
Terminates the cluster.
:param json: json dictionary containing cluster specification.
"""
self._do_api_call(TERMINATE_CLUSTER_ENDPOINT, json)
def install(self, json: dict) -> None:
"""
Install libraries on the cluster.
Utility function to call the ``2.0/libraries/install`` endpoint.
:param json: json dictionary containing cluster_id and an array of library
"""
self._do_api_call(INSTALL_LIBS_ENDPOINT, json)
def uninstall(self, json: dict) -> None:
"""
Uninstall libraries on the cluster.
Utility function to call | |
'''
Created on 5 Jan 2022
@author: laurentmichel
'''
from lxml import etree
from copy import deepcopy
from . import logger
from .exceptions import *
from .annotation_seeker import AnnotationSeeker
from .resource_seeker import ResourceSeeker
from .table_iterator import TableIterator
from .static_reference_resolver import StaticReferenceResolver
from .dynamic_reference import DynamicReference
from .to_json_converter import ToJsonConverter
from .json_block_extractor import JsonBlockExtractor
from .join_operator import JoinOperator
from pyvo.vomas.stc_classes.measure import Measure
from pyvo.vomas.stc_classes.measure import Position, Time, GenericMeasure
from pyvo.vomas.astropy_wrapper.sky_coord import SkyCoord
from pyvo.vomas.utils.dict_utils import DictUtils
from pyvo.vomas.utils.xml_utils import XmlUtils
class ModelViewer(object):
'''
ModelViewer is a PyVO table wrapper aiming at providing a model view on VOTable data read with usual tools
Standard usage applied to data rows
.. code-block:: python
votable = parse(votable_path)
for resource in votable.resources:
model_viewer = ModelViewer(resource)
model_viewer.connect_table("results")
while True:
data_row = model_viewer.get_next_row()
if data_row is None:
break
model_view = model_viewer.get_model_view()
json_model_view = model_viewer.get_json_model_view()
break
Standard usage applied to global instances
.. code-block:: python
votable = parse(votable_path)
for resource in votable.resources:
model_viewer = ModelViewer(resource)
time_series = model_viewer.get_globals_instance("cube:TimeSeries")
'''
def __init__(self, resource):
'''
Constructor
votable_path is a workaround allowing to extract the annotation block outside of astropy
:param resource: VOTable resource
:type resource: astropy.Resource
'''
self._resource = resource
self._assert_resource_is_result()
self._annotation_seeker = None
self._resource_seeker = ResourceSeeker(self._resource)
self._connected_table = None
self._connected_tableref = None
self._current_data_row = None
# when the search object is in GLOBALS
self._globals_instance = None
self._last_row=None
self._templates = None
self._joins = {}
self._dyn_references = {}
self._extract_mapping_block()
"""
Properties
"""
@property
def annotation_seeker(self):
"""
Return an API to search various components in the XML mapping block
"""
return self._annotation_seeker
@property
def resource_seeker(self):
"""
Return an API to search various components in the VOTabel resource
"""
return self._resource_seeker
@property
def connected_table(self):
return self._connected_table
@property
def connected_table_ref(self):
return self._connected_table_ref
@property
def current_data_row(self):
self._assert_table_is_connected()
return self._current_data_row
"""
Global accessors
"""
def get_table_ids(self):
"""
return a list of the table located just below self.resource
"""
return self.resource_seeker.get_table_ids()
def get_globals_models(self):
"""
Collection types are GLOBALS/COLLECTION/INSTANCE@dmtype: used for collections of static objects
:return : The dmtypes of all the top level INSTANCE/COLLECTION of GLOBALS
:rtype: {'COLLECTION': [dmtpyes], 'INSTANCE': [dmtypes]}
"""
retour = {}
retour["COLLECTION"] = self._annotation_seeker.get_globals_collection_dmtypes()
retour["INSTANCE"] = self._annotation_seeker.get_globals_instance_dmtypes()
return retour
def get_templates_models(self):
"""
COLLECTION not implemented yet
:return : The dmtypes (except ivoa:*) of all INSTANCE/COLLECTION of all TEMPLATES
:rtype: {'tableref: {'COLLECTIONS': [dmtpyes], 'INSTANCE': [dmtypes]}, ...}
"""
retour = {}
gni = self._annotation_seeker.get_instance_dmtypes()['TEMPLATES']
for tid, tmplids in gni.items():
retour[tid] = {'COLLECTIONS':[], 'INSTANCE':tmplids}
return retour
"""
Data browsing
"""
def get_globals_instance(self, dmtype, resolve_ref=True):
"""
The a model view on the GLOBALS object (INSTANCE or COLLECTION) with @dmtype=dmtype
"""
globals_models = self.get_globals_models()
found = False
retour = []
for globals_type in globals_models["COLLECTION"]:
if globals_type == dmtype:
found = True
# We process only one instance for now
self._globals_instance = self.annotation_seeker.get_instance_by_dmtype(globals_type)['GLOBALS'][0]
self._squash_globals_join_and_references()
globals_instance_copy = deepcopy(self._globals_instance)
if resolve_ref is True:
StaticReferenceResolver.resolve(self._annotation_seeker, None, globals_instance_copy)
for join_tag, join in self._joins.items():
logger.info("resolve join %s", join_tag)
join_operator = JoinOperator(self, self._connected_tableref, join)
join_operator._set_filter()
join_operator._set_foreign_instance()
join_operator.get_matching_data(None)
ref_element = globals_instance_copy.xpath("//" + join_tag)[0]
ref_host = ref_element.getparent()
for cpart in join_operator.get_matching_model_view(resolve_ref=resolve_ref):
ref_host.append(deepcopy(cpart))
# Drop the reference
ref_host.remove(ref_element)
retour.append(globals_instance_copy);
if found is True:
return retour
else:
for globals_type in globals_models["INSTANCE"]:
if globals_type == dmtype:
raise NotImplementedException("GLOBALS/INSTANCE access not implemented yet")
raise NotImplementedException(f"no {dmtype} type found in GLOBALS")
def get_globals_instance_json_model_view(self, dmtype, resolve_ref=True):
"""
return a JSON model view of the last read row
"""
retour = []
xml_instances = self.get_globals_instance(dmtype, resolve_ref=resolve_ref)
for xml_instance in xml_instances:
logger.debug("build json view")
tjc = ToJsonConverter(xml_instance)
retour.append(tjc.get_json_instance())
return retour
def connect_table(self, tableref):
"""
Iterate over the table identified by tableref
Required to browse table data.
Connect to the first table if tableref is None
"""
self._connected_tableref = tableref
self._connected_table = self._resource_seeker.get_table(tableref)
if self.connected_table is None:
raise MappingException("Cannot find table {} in VOTable".format(tableref))
logger.debug("table %s found in VOTable", tableref)
self._templates = deepcopy(self.annotation_seeker.get_templates_block(tableref))
if self._templates is None:
raise MappingException("Cannot find TEMPLATES {} ".format(tableref))
logger.debug("TEMPLATES %s found ", tableref)
self.table_iterator = TableIterator(tableref, self.connected_table.to_table())
self._squash_join_and_references()
self._set_column_indices()
self._set_column_units()
def get_next_row(self):
"""
Return the next data row of the connected table
"""
self._assert_table_is_connected()
self._current_data_row = self.table_iterator._get_next_row()
return self._current_data_row
def rewind(self):
"""
Rewind the table iterator of the connected table
"""
self._assert_table_is_connected()
self.table_iterator._rewind()
def get_model_view(self, resolve_ref=True):
"""
return a XML model view of the last read row
"""
self._assert_table_is_connected()
templates_copy = deepcopy(self._templates)
if resolve_ref is True:
StaticReferenceResolver.resolve(self._annotation_seeker, self._connected_tableref, templates_copy)
for ele in templates_copy.xpath("//ATTRIBUTE"):
ref = ele.get("ref")
if ref is not None:
index = ele.attrib["index"]
ele.attrib["value"] = str(self._current_data_row[int(index)])
for dref_tag, dref in self._dyn_references.items():
logger.info("resolve reference %s", dref_tag)
dyn_resolver = DynamicReference(self, dref_tag, self._connected_tableref, dref)
dyn_resolver._set_mode()
ref_target = dyn_resolver.get_target_instance(self._current_data_row)
ref_element = templates_copy.xpath("//" + dref_tag)[0]
ref_host = ref_element.getparent()
ref_target_copy = deepcopy(ref_target)
# Set the reference role to the copied instance
ref_target_copy.attrib["dmrole"] = ref_element.get('dmrole')
# Insert the referenced object
ref_host.append(ref_target_copy)
# Drop the reference
ref_host.remove(ref_element)
for join_tag, join in self._joins.items():
logger.info("resolve join %s", join_tag)
join_operator = JoinOperator(self, self._connected_tableref, join)
join_operator._set_filter()
join_operator._set_foreign_instance()
join_operator.get_matching_data(self._current_data_row)
ref_element = templates_copy.xpath("//" + join_tag)[0]
ref_host = ref_element.getparent()
for cpart in join_operator.get_matching_model_view(resolve_ref=resolve_ref):
ref_host.append(deepcopy(cpart))
# Drop the reference
ref_host.remove(ref_element)
return templates_copy
def get_json_model_view(self, resolve_ref=True):
"""
return a JSON model view of the last read row
"""
self._assert_table_is_connected()
logger.debug("build json view")
tjc = ToJsonConverter(self.get_model_view(resolve_ref=resolve_ref))
return tjc.get_json_instance()
def get_json_model_component_by_type(self, searched_dmtype):
"""
return the first instance with @dmtype=searched_ from the json view of the current data row
Return a {} if no matching dmtype was found
"""
self._assert_table_is_connected()
json_view = self.get_json_model_view()
return JsonBlockExtractor.search_subelement_by_type(json_view, searched_dmtype)
def get_model_component_by_type(self, searched_dmtype):
"""
return the list of the xml instances with @dmtype=searched_ type from the model view of the current data row
Return a {} if no matching dmtype was found
"""
self._assert_table_is_connected()
retour = []
model_view = self.get_model_view(resolve_ref=True)
for ele in model_view.xpath(f'.//INSTANCE[@dmtype="{searched_dmtype}"]'):
retour.append(deepcopy(ele))
return retour
def get_json_model_component_by_role(self, searched_dmrole):
"""
return the first instance with dmrole=dmrole from the json view of the current data row
Return a {} if no matching dmrole is found
"""
self._assert_table_is_connected()
json_view = self.get_json_model_view()
return JsonBlockExtractor.search_subelement_by_role(json_view, searched_dmrole)
def get_stc_positions(self):
"""
returns the all positions found as a list of STC Positions instances
"""
retour = []
for position in self.get_model_component_by_type("meas:Position"):
retour.append(Position(position))
return retour
def get_stc_times(self):
"""
returns the all time measure found as a list of STC Time instances
"""
retour = []
for time in self.get_model_component_by_type("meas:Time"):
retour.append(Time(time))
return retour
def get_stc_generic_measures(self):
"""
returns the all generic measures found as a list of STC Time instances
"""
retour = []
for measure in self.get_model_component_by_type("meas:GenericMeasure"):
retour.append(GenericMeasure(measure))
return retour
def get_stc_measures(self):
"""
returns the all measures found as a list of STC Positions instances
"""
retour = []
model_view = self.get_model_view(resolve_ref=True)
for ele in model_view.xpath(f'.//INSTANCE[ATTRIBUTE[@dmrole="meas:Measure.ucd"]]'):
retour.append(Measure.get_measure(ele))
return retour
def get_stc_measures_by_ucd(self, ucd):
"""
returns the all measures found as a list of STC Positions instances
"""
retour = []
model_view = self.get_model_view(resolve_ref=True)
for ele in model_view.xpath(f'.//INSTANCE[ATTRIBUTE[@dmrole="meas:Measure.ucd" and @value="{ucd}"]]'):
retour.append(Measure.get_measure(ele))
return retour
def get_astropy_sky_coord(self):
"""
Returns an instance of Astropy.SkyCoord if the set of mapped STC Measures allows it.
None otherwise
"""
return SkyCoord(self.get_stc_measures()).get_sky_coord()
"""
Private methods
"""
def _assert_table_is_connected(self):
assert self._connected_table is not None, "Operation failed: no connected data table"
def _assert_resource_is_result(self):
assert self._resource.type == "results", "ModelViewer must be set on a Resource with type=results"
def _extract_mapping_block(self):
"""
Extract the annotation block from the current resource and
initialize the annotation seeker
The annotation seeker is let a None of no annotation
"""
model_mapping = self._resource.model_mapping
if model_mapping is None:
logger.warning("No model annotation found in the resource")
return
self._annotation_seeker = AnnotationSeeker(
etree.fromstring(model_mapping.mapping_block)
)
def _squash_join_and_references(self):
"""
Remove both JOINs and REFERENCEs from the templates and store them in to be resolved later on
This avoid to have the model view polluted with elements that are not in the model
"""
for ele in self._templates.xpath("//*[starts-with(name(), 'REFERENCE_')]"):
if ele.get("sourceref") is not None:
self._dyn_references = {ele.tag: deepcopy(ele)}
for child in list(ele):
ele.remove(child)
for ele in self._templates.xpath("//*[starts-with(name(), 'JOIN')]"):
self._joins = {ele.tag: deepcopy(ele)}
for child in list(ele):
ele.remove(child)
| |
Se ha producido un error comprobando apuestas, cancelamos")
print(e)
flag_click = 0
contador = 0
while(flag_click == 0):
try:
cancelar = driver.find_element_by_xpath("(//button[@class='deleteAll button button-md button-clear button-clear-md button-full button-full-md'])[2]/span")
cancelar.click()
flag_click = 1
except:
contador += 1
if (contador == 50):
flag_click = 1
pass
self.cola2.put(False)
descartar = self.cola1.get()
# Apuesta de tipo ganador de test
elif (texto_apuesta[2] == "Set" and texto_apuesta[1] != "Final"):
xpath_aux = "//ion-row[@class = 'rowTitTable row']/ion-col[1]"
numero_set = driver.find_element_by_xpath(xpath_aux)
x = numero_set.text[0]
# Si es el set actual
if(texto_apuesta[3] == x):
self.cola2.put(["Set", texto_apuesta[3], True])
# Si no estamos en el set actual
else:
self.cola2.put(["Set", texto_apuesta[3], False])
# Proceso de apertura de apuesta
flag_click = 0
link_apuesta = driver.find_elements_by_xpath("//ion-list[@class='events-list list list-md']/div/market-card[" + str(k + 1) + "]/ion-card[@class='market-card animated card card-md collapsed listadoMercado']")
if(link_apuesta):
flag_click = 0
while(flag_click == 0):
try:
copyright = WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, "//ion-list[@class='events-list list list-md']/div/market-card[" + str(k + 1) + "]/ion-card[@class='market-card animated card card-md collapsed listadoMercado']")))
driver.execute_script("return arguments[0].scrollIntoView(true);", copyright)
link_cerrado = driver.find_element_by_xpath("//ion-list[@class='events-list list list-md']/div/market-card[" + str(k + 1) + "]/ion-card[@class='market-card animated card card-md collapsed listadoMercado']")
link_cerrado.click()
flag_click = 1
except:
pass
# Guardamos cotizaciones
cotizaciones_link = driver.find_elements_by_xpath("(//ion-list[@class='events-list list list-md']/div/market-card)[" + str(k + 1) + "]/ion-card/div/ion-grid/ion-row/ion-col/button/span/span")
try:
cotizaciones = [cotizaciones_link[0].text, cotizaciones_link[1].text]
except StaleElementReferenceException:
cotizaciones = [":(", ":("]
check = self.cola1.get()
if (check == True):
self.cola5.put(cotizaciones)
# Proceso de comprobacion de sure bet
datos = self.cola6.get()
# Estamos ante una apuesta segura
# Aqui hay que hacer apuesta
if datos[0] == True:
apuesta1 = datos[1]
apuesta2 = datos[2]
if apuesta1 == 0:
try:
flag_click = 0
while(flag_click == 0):
try:
link_apuesta = driver.find_elements_by_xpath("//ion-list[@class='events-list list list-md']/div/market-card[" + str(k + 1) + "]/ion-card[@class='market-card animated card card-md collapsed listadoMercado']")
if(link_apuesta):
flag_click = 0
while(flag_click == 0):
try:
copyright = WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, "//ion-list[@class='events-list list list-md']/div/market-card[" + str(k + 1) + "]/ion-card[@class='market-card animated card card-md collapsed listadoMercado']")))
driver.execute_script("return arguments[0].scrollIntoView(true);", copyright)
link_cerrado = driver.find_element_by_xpath("//ion-list[@class='events-list list list-md']/div/market-card[" + str(k + 1) + "]/ion-card[@class='market-card animated card card-md collapsed listadoMercado']")
link_cerrado.click()
flag_click = 1
except:
pass
#copyright = WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, "(//ion-list[@class='events-list list list-md']/div/market-card)[" + str(k + 1) + "]/ion-card/div/ion-grid/ion-row/ion-col/button/span/span")))
#driver.execute_script("return arguments[0].scrollIntoView(true);", copyright)
apostar = driver.find_elements_by_xpath("(//ion-list[@class='events-list list list-md']/div/market-card)[" + str(k + 1) + "]/ion-card/div/ion-grid/ion-row/ion-col/button/span/span")
apostar[1].click()
print("Se ha hecho click correctamente")
flag_click = 1
except:
pass
print("Codere-> Estamos en el caso de que hay que apostar por el segundo jugador con tipo de apuesta ganador de set")
print("Codere-> Pasamos ahora a comprobar si los datos son correctos en codere")
flag_click = 0
contador = 0
while(flag_click == 0):
try:
jugador = driver.find_element_by_xpath("//span[@class='nameAp-title is-bold']")
flag_click = 1
except:
contador += 1
if contador == 20:
flag_click = 1
pass
print("Codere-> Comparando " + jugador.text.lower() + " con " + participantes[0].lower())
if(similar(jugador.text.lower(), participantes[0].lower()) < 0.7):
print("Codere-> Jugador comprobado correctamente")
todo_correcto = True
else:
todo_correcto = False
apostar = driver.find_element_by_xpath("//p[@class='typeAp']")
comparar = "Ganador del Set " + texto_apuesta[3]
print("Codere-> Comparando " + str(apostar.text) + " con " + comparar)
if (str(apostar.text) == comparar):
print("Codere-> Tipo de apuesta correcto")
else:
todo_correcto = False
print("Codere-> Tipo de apuesta incorrecta")
cotizacion = driver.find_element_by_xpath("//span[@class='nameAp']/b")
print("Codere-> Comparando " + str(cotizacion.text) + " con " + cotizaciones[1])
if (float(cotizacion.text.replace(",", ".")) >= float(cotizaciones[1].replace(",","."))):
print("Codere-> Cotizacion correcta")
else:
todo_correcto = False
print("Codere-> Cotizacion incorrecta")
# Aqui confirmamos a bet
if todo_correcto == True:
print("Codere-> Todo correcto, procedemos a apostar")
dinero = driver.find_element_by_xpath("(//div[@class='ticket-input-wrapper']/ion-input)[3]/input")
dinero.send_keys(Keys.BACK_SPACE)
dinero.send_keys(str(apuesta2))
#ActionChains(driver).move_to_element(dinero).click().key_down(Keys.CONTROL).send_keys('a').key_up(Keys.CONTROL).send_keys(str(apuesta2)).perform()
print("Soy codere y voy a apostar " + str(apuesta2))
self.cola2.put(True)
comprobacion = self.cola1.get()
# Aqui realizariamos apuesta
if (comprobacion == True):
print("Codere realizaria apuesta porque bet confirma")
s.call(['notify-send','Codere','Sure Bet encontrada'])
flag_click = 0
while(flag_click == 0):
try:
aceptar = driver.find_element_by_xpath("(//button[@class='is-ticket-button endAp'])[2]/p")
aceptar.click()
flag_click = 1
print("Codere ha realizado apuesta")
except Exception as e:
print("Error al apostar codere-> " + str(e))
print("Codere no ha realizado apuesta")
pass
else:
print("Codere no realiza apuesta porque bet no confirma")
# En vez de realizar quitamos la apuesta
flag_click = 0
while(flag_click == 0):
try:
cancelar = driver.find_element_by_xpath("(//button[@class='deleteAll button button-md button-clear button-clear-md button-full button-full-md'])[2]/span")
cancelar.click()
flag_click = 1
except:
pass
else:
print("Codere-> Error en la comprobacion, no se va a realizar ninguna apuesta")
self.cola2.put(False)
descartar = self.cola1.get()
flag_click = 0
while(flag_click == 0):
try:
cancelar = driver.find_element_by_xpath("(//button[@class='deleteAll button button-md button-clear button-clear-md button-full button-full-md'])[2]/span")
cancelar.click()
flag_click = 1
except:
pass
except Exception as e:
print("Codere-> Se ha producido un error comprobando apuestas, cancelamos")
print(e)
flag_click = 0
contador = 0
while(flag_click == 0):
try:
cancelar = driver.find_element_by_xpath("(//button[@class='deleteAll button button-md button-clear button-clear-md button-full button-full-md'])[2]/span")
cancelar.click()
flag_click = 1
except:
contador += 1
if (contador == 50):
flag_click = 1
pass
self.cola2.put(False)
descartar = self.cola1.get()
# Hay que apostar por el primer jugador
else:
try:
flag_click = 0
while(flag_click == 0):
try:
link_apuesta = driver.find_elements_by_xpath("//ion-list[@class='events-list list list-md']/div/market-card[" + str(k + 1) + "]/ion-card[@class='market-card animated card card-md collapsed listadoMercado']")
if(link_apuesta):
flag_click = 0
while(flag_click == 0):
try:
copyright = WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, "//ion-list[@class='events-list list list-md']/div/market-card[" + str(k + 1) + "]/ion-card[@class='market-card animated card card-md collapsed listadoMercado']")))
driver.execute_script("return arguments[0].scrollIntoView(true);", copyright)
link_cerrado = driver.find_element_by_xpath("//ion-list[@class='events-list list list-md']/div/market-card[" + str(k + 1) + "]/ion-card[@class='market-card animated card card-md collapsed listadoMercado']")
link_cerrado.click()
flag_click = 1
except:
pass
#copyright = WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, "(//ion-list[@class='events-list list list-md']/div/market-card)[" + str(k + 1) + "]/ion-card/div/ion-grid/ion-row/ion-col/button/span/span")))
#driver.execute_script("return arguments[0].scrollIntoView(true);", copyright)
apostar = driver.find_elements_by_xpath("(//ion-list[@class='events-list list list-md']/div/market-card)[" + str(k + 1) + "]/ion-card/div/ion-grid/ion-row/ion-col/button/span/span")
apostar[0].click()
print("Se ha hecho click correctamente")
flag_click = 1
except:
pass
print("Codere-> Estamos en el caso de que hay que apostar por el primer jugador con tipo de apuesta ganador de set")
print("Codere-> Pasamos ahora a comprobar si los datos son correctos en codere")
flag_click = 0
contador = 0
while(flag_click == 0):
try:
jugador = driver.find_element_by_xpath("//span[@class='nameAp-title is-bold']")
flag_click = 1
except:
contador += 1
if contador == 20:
flag_click = 1
pass
print("Codere-> Comparando " + jugador.text.lower() + " con " + participantes[0].lower())
if(similar(jugador.text.lower(), participantes[0].lower()) > 0.7):
print("Codere-> Jugador comprobado correctamente")
todo_correcto = True
else:
todo_correcto = False
apostar = driver.find_element_by_xpath("//p[@class='typeAp']")
comparar = "Ganador del Set " + texto_apuesta[3]
print("Codere-> Comparando " + str(apostar.text) + " con " + comparar)
if (str(apostar.text) == comparar):
print("Codere-> Tipo de apuesta correcto")
else:
todo_correcto = False
print("Codere-> Tipo de apuesta incorrecta")
cotizacion = driver.find_element_by_xpath("//span[@class='nameAp']/b")
print("Codere-> Comparando " + str(cotizacion.text) + " con " + cotizaciones[0])
if (float(cotizacion.text.replace(",", ".")) >= float(cotizaciones[0].replace(",","."))):
print("Codere-> Cotizacion correcta")
else:
todo_correcto = False
print("Codere-> Cotizacion incorrecta")
# Aqui confirmamos a bet
if todo_correcto == True:
print("Codere-> Todo correcto, procedemos a apostar")
dinero = driver.find_element_by_xpath("(//div[@class='ticket-input-wrapper']/ion-input)[3]/input")
dinero.send_keys(Keys.BACK_SPACE)
dinero.send_keys(str(apuesta1))
#ActionChains(driver).move_to_element(dinero).click().key_down(Keys.CONTROL).send_keys('a').key_up(Keys.CONTROL).send_keys(str(apuesta1)).perform()
print("Soy codere y voy a apostar " + str(apuesta1))
self.cola2.put(True)
comprobacion = self.cola1.get()
# Aqui realizariamos apuesta
if (comprobacion == True):
print("Codere realizaria apuesta, porque bet confirma")
s.call(['notify-send','Codere','Sure Bet encontrada'])
flag_click = 0
while(flag_click == 0):
try:
aceptar = driver.find_element_by_xpath("(//button[@class='is-ticket-button endAp'])[2]/p")
aceptar.click()
flag_click = 1
print("Codere ha realizado apuesta")
except Exception as e:
print("Error al apostar codere-> " + str(e))
print("Codere no ha realizado apuesta")
pass
else:
print("Codere no realiza apuesta porque bet no confirma")
flag_click = 0
while(flag_click == 0):
try:
cancelar = driver.find_element_by_xpath("(//button[@class='deleteAll button button-md button-clear button-clear-md button-full button-full-md'])[2]/span")
cancelar.click()
flag_click = 1
except:
pass
else:
print("Codere-> Error en la comprobacion, no se va a realizar ninguna apuesta")
self.cola2.put(False)
descartar = self.cola1.get()
flag_click = 0
while(flag_click == 0):
try:
cancelar = driver.find_element_by_xpath("(//button[@class='deleteAll button button-md button-clear button-clear-md button-full button-full-md'])[2]/span")
cancelar.click()
flag_click = 1
except:
pass
except Exception as e:
print("Codere-> Se ha producido un error comprobando apuestas, cancelamos")
print(e)
flag_click = 0
contador = 0
while(flag_click == 0):
try:
cancelar = driver.find_element_by_xpath("(//button[@class='deleteAll button button-md button-clear button-clear-md button-full button-full-md'])[2]/span")
cancelar.click()
flag_click = 1
except:
contador += 1
if (contador == 50):
flag_click = 1
pass
self.cola2.put(False)
descartar = self.cola1.get()
else:
texto_juego = driver.find_elements_by_xpath("//ion-row[@class='rowContTable row']/ion-col[not(contains(@class,'liveResultSet col serve') or contains(@class,'liveResultSet col'))]")
juego = 0
cuenta = 0
for juego_numero in texto_juego:
cuenta += 1
if(cuenta != len(texto_juego)/2 and cuenta != len(texto_juego)):
try:
a = float(juego_numero.text)
juego = juego + a
except ValueError:
pass
# Apuesta de tipo ganador de punto
# Lo dejo marcado abajo, vamos a hacer pruebas sin ganador de punto
# Apuesta de tipo ganador de juego
if (texto_apuesta[1] == "Juego"):
juego_apuesta = juego + float(texto_apuesta[2])
self.cola2.put(["Juego", str(juego_apuesta)])
# Proceso de apertura de apuesta
flag_click = 0
link_apuesta = driver.find_elements_by_xpath("//ion-list[@class='events-list list list-md']/div/market-card[" + str(k + 1) + "]/ion-card[@class='market-card animated card card-md collapsed listadoMercado']")
if(link_apuesta):
flag_click = 0
while(flag_click == 0):
try:
copyright = WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, "//ion-list[@class='events-list list list-md']/div/market-card[" + str(k + 1) + "]/ion-card[@class='market-card animated card card-md collapsed listadoMercado']")))
driver.execute_script("return arguments[0].scrollIntoView(true);", copyright)
link_cerrado = driver.find_element_by_xpath("//ion-list[@class='events-list list list-md']/div/market-card[" + str(k + 1) + "]/ion-card[@class='market-card animated card card-md collapsed listadoMercado']")
link_cerrado.click()
flag_click = 1
except:
pass
# Guardamos cotizaciones
cotizaciones_link = driver.find_elements_by_xpath("(//ion-list[@class='events-list list list-md']/div/market-card)[" + str(k + 1) + "]/ion-card/div/ion-grid/ion-row/ion-col/button/span/span")
try:
cotizaciones = [cotizaciones_link[0].text, cotizaciones_link[1].text]
except StaleElementReferenceException:
cotizaciones = [":(", ":("]
check = self.cola1.get()
if (check == True):
# Proceso de obtencion de cotizaciones
self.cola5.put(cotizaciones)
# Proceso de comprobacion de sure bet
datos = self.cola6.get()
# Estamos ante una apuesta segura
# Aqui hay que hacer apuesta
if datos[0] == True:
apuesta1 = datos[1]
apuesta2 = datos[2]
if apuesta1 == 0:
try:
flag_click = 0
while(flag_click == 0):
try:
link_apuesta = driver.find_elements_by_xpath("//ion-list[@class='events-list list list-md']/div/market-card[" + str(k + 1) + "]/ion-card[@class='market-card animated card card-md collapsed listadoMercado']")
if(link_apuesta):
flag_click = 0
while(flag_click == 0):
try:
copyright = WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, "//ion-list[@class='events-list list list-md']/div/market-card[" + str(k + 1) + "]/ion-card[@class='market-card animated card card-md collapsed listadoMercado']")))
driver.execute_script("return arguments[0].scrollIntoView(true);", copyright)
link_cerrado = driver.find_element_by_xpath("//ion-list[@class='events-list list list-md']/div/market-card[" + str(k + 1) + "]/ion-card[@class='market-card animated card card-md collapsed listadoMercado']")
link_cerrado.click()
flag_click = 1
except:
pass
#copyright = WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, "(//ion-list[@class='events-list list list-md']/div/market-card)[" + str(k + 1) + "]/ion-card/div/ion-grid/ion-row/ion-col/button/span/span")))
#driver.execute_script("return arguments[0].scrollIntoView(true);", copyright)
apostar = driver.find_elements_by_xpath("(//ion-list[@class='events-list list list-md']/div/market-card)[" + str(k + 1) + "]/ion-card/div/ion-grid/ion-row/ion-col/button/span/span")
apostar[1].click()
print("Se ha hecho click correctamente")
flag_click = 1
except:
pass
print("Codere-> Estamos en el caso de que hay que apostar por el segundo jugador con tipo de apuesta ganador de juego")
print("Codere-> Pasamos ahora a comprobar si los datos son correctos en codere")
flag_click = 0
contador = 0
while(flag_click == 0):
try:
jugador = driver.find_element_by_xpath("//span[@class='nameAp-title is-bold']")
flag_click = 1
except:
contador += 1
if contador == 20:
flag_click = 1
pass
print("Codere-> Comparando " + jugador.text.lower() + " con " + participantes[0].lower())
if(similar(jugador.text.lower(), participantes[0].lower()) < 0.7):
print("Codere-> Jugador comprobado correctamente")
todo_correcto = True
else:
todo_correcto = False
apostar = driver.find_element_by_xpath("//p[@class='typeAp']")
comparar = "Ganador Juego " + texto_apuesta[2] + " del Set " + texto_apuesta[5]
print("Codere-> Comparando " + str(apostar.text) + " con " | |
import argparse
import json
import logging
import operator
import os
import re
import sys
import traceback
import yaml
from collections import defaultdict
from greent.servicecontext import ServiceContext
from greent.graph import Frame
from greent.graph import TypeGraph
from greent.graph_components import KNode, KEdge
from greent.identifiers import Identifiers
from greent.program import Program
from greent.program import QueryDefinition
from greent.synonymization import Synonymizer
#from greent.transreg import TranslatorRegistry
from greent.util import LoggingUtil
from greent.util import Text
from greent.annotators.annotator_factory import annotate_shortcut
logger = LoggingUtil.init_logging(__name__, level=logging.INFO)
class Rosetta:
""" Rosetta's translates between semantic domains generically and automatically.
Based on a configuration file, it builds a directed graph where types are nodes.
Types are concepts from a model. Edges are annotated with the names of operators used
to transition between the connected types. The engine can then accept requests to
translate a term from one domain to another. It does this by collecting transitions
from the graph and executing the list of transitions. """
def __init__(self, greentConf=None,
config_file=os.path.join(os.path.dirname(__file__), "rosetta.yml"),
delete_type_graph=False,
init_db=False,
build_indexes=False,
debug=False):
""" The constructor loads the config file an prepares the type graph.
If delete_type_graph flag is true, the graph is deleted entirely.
If the init_db flag is true, the type_graph will be loaded from the config file. """
self.debug = False
logger.debug("-- rosetta init.")
self.service_context = ServiceContext(self,greentConf)
self.core = self.service_context.core
""" Load configuration. """
with open(config_file, 'r') as stream:
self.config = yaml.load(stream, Loader= yaml.FullLoader)
self.operators = self.config["@operators"]
self.type_checks = self.config["@type_checks"]
# Abbreviation
self.cache = self.service_context.cache # core.service_context.cache
""" Initialize type graph. """
self.type_graph = TypeGraph(self.service_context, debug=debug)
self.synonymizer = Synonymizer(self.type_graph.concept_model, self)
""" Merge identifiers.org vocabulary into Rosetta vocab. """
self.identifiers = Identifiers()
if delete_type_graph:
logger.debug("--Deleting type graph")
self.type_graph.delete_all()
if init_db:
""" Initialize type graph metadata. """
'''
for k, v in self.identifiers.vocab.items():
if isinstance(v, str):
self.type_graph.find_or_create(k, v)
'''
self.type_graph.find_or_create_list(self.identifiers.vocab.items())
# self.configure_local_operators ()
self.type_graph.configure_operators(self.operators.items())
# self.configure_translator_registry ()
self.type_graph.cast_edges(self.type_checks)
if build_indexes:
"""Create neo4j indices for identifier on different labels"""
self.type_graph.create_constraints()
self.type_graph.create_indexes()
# deprecated
def configure_local_operators(self):
logger.debug("Configure operators in the Rosetta config.")
for a_concept, transition_list in self.operators.items():
for b_concept, transitions in transition_list.items():
for transition in transitions:
link = transition['link']
op = transition['op']
self.create_concept_transition(a_concept, b_concept, link, op)
def configure_translator_registry(self):
logger.debug("Configure operators derived from the Translator Registry.")
self.core.translator_registry = TranslatorRegistry(self.core.service_context)
subscriptions = self.core.translator_registry.get_subscriptions()
registrations = defaultdict(list)
skip_patterns = list(map(lambda v: re.compile(v),
self.config.get('@translator-registry', {}).get('skip_list', [])))
for sub in subscriptions:
in_concept = sub.in_concept
out_concept = sub.out_concept
op = f"translator_registry.{sub.op}"
key = f"{in_concept}-{out_concept}-{op}"
link = sub.predicate if sub.predicate else "unknown"
link = link.upper()
if any([p.match(sub.op) for p in skip_patterns]):
logger.debug(f"==> Skipping registration of translator API {sub.op} based on configuration setting.")
continue
if key in registrations:
continue
registrations[key] = sub
if not in_concept:
logger.debug(f"Unable to find in concept for {sub}")
elif not out_concept:
logger.debug(f"Unable to find out concept for {sub}")
else:
if link and op:
self.create_concept_transition(in_concept, out_concept, link, op)
def create_concept_transition(self, a_concept, b_concept, link, op):
""" Create a link between two concepts in the type graph. """
logger.debug(" -+ {} {} link: {} op: {}".format(a_concept, b_concept, link, op))
try:
self.type_graph.add_concepts_edge(a_concept, b_concept, predicate=link, op=op)
except Exception as e:
logger.error(f"Failed to create edge from {a_concept} to {b_concept} with link {link} and op {op}")
logger.error(e)
def terminate(self, d):
for k, v in d.items():
if isinstance(v, str) and not v.endswith("/"):
d[k] = "{0}/".format(v)
def unterminate(self, text):
return text[:-1] if text.endswith('/') else text
def get_ops(self, names):
""" Dynamically locate python methods corresponding to names configured for semantic links. """
return operator.attrgetter(names)(self.core) if isinstance(names, str) else [
operator.attrgetter(n)(self.core) for n in names]
def log_debug(self, text, cycle=0, if_empty=False):
if cycle < 3:
if (text and len(text) > 0) or if_empty:
logger.debug("{}".format(text))
def construct_knowledge_graph(self, inputs, query):
programs = self.type_graph.get_knowledge_map_programs(query)
results = []
for program in programs:
print(f" program --**-->> {program}")
results += self.execute_knowledge_graph_program(inputs, program)
return results
def execute_knowledge_graph_program(self, inputs, program):
""" Construct a knowledge graph given a set of input nodes and a program - a list
of frames, each of which contains the name of a concept, a collector containing a list of edges and
nodes where all target nodes are instances of the frame's concept, and a list of operations for
transitioning from one frame's concept space to the next frames.
This method assumes a linear path.
"""
""" Convert inputs to be structured like edges-and-nodes returned by a previous services. """
next_nodes = {key: [(None, KNode(val, type=key)) for val in val_list] for key, val_list in inputs.items()}
logger.debug(f"inputs: {next_nodes}")
""" Validated the input program. """
if len(program) == 0:
logger.info(f"No program found for {query}")
return []
logger.info(f"program> {program}")
result = []
""" Each frame's name is a concept. We use the top frame's as a key to index the arguments. """
top_frame = program[0]
inputs = next_nodes[top_frame.name]
for i in inputs:
self.synonymizer.synonymize(i[1])
annotate_shortcut(i[1], self)
""" Stack is the overall executable. We prepend a base frame with a collector primed with input arguments. """
stack = [Frame(collector=inputs)] + program
""" Execute the program frame by frame. """
for index, frame in enumerate(program):
# logger.debug (f"--inputs: {stack[index].collector}")
for k, o in frame.ops.items():
logger.debug(f"-- frame-index--> {frame} {index} {k}=>{o.op}")
""" Process each node in the collector. """
index = 0
for edge, source_node in stack[index].collector:
""" Process each operator in the frame. """
for op_name, operator in frame.ops.items():
""" Generate a cache key. """
key = f"{operator.op}({source_node.id})"
try:
logger.debug(f" --op: {key}")
""" Load the object from cache. """
response = self.cache.get(key)
if not response:
""" Invoke the knowledge source with the given input. """
op = self.get_ops(operator.op)
if not op:
raise Exception(f"Unable to find op: {operator.op}")
response = op(source_node)
for edge, node in response:
""" Process the edge adding metadata. """
if isinstance(edge, KEdge):
edge.predicate = operator.predicate
edge.source_node = source_node
self.synonymizer.synonymize(node)
annotate_shortcut(node,self)
edge.target_node = node
""" Validate the id space of the returned data maps to the target concept. """
if index < len(program) - 1:
target_concept_name = program[index + 1].name
prefixes = self.type_graph.concept_model.get(target_concept_name).id_prefixes
valid = any([node.id.upper().startswith(p.upper()) for p in prefixes])
if not valid:
logger.debug(
f"Operator {operator} wired to type: {concept_name} returned node with id: {node.id}")
""" Cache the annotated and validated response. """
self.cache.set(key, response)
""" Add processed edges to the overall result. """
result += [edge for edge, node in response]
logger.debug(f"{key} => {Text.short(response)}")
""" Response edges go in the collector to become input for the next operation. """
frame.collector += response
except Exception as e:
traceback.print_exc()
logger.warning("Error invoking> {key}")
logger.debug(f"returning {len(result)} values.")
return result
def get_knowledge_graph(self, inputs, query, ends=None):
""" Handles two sided queries and direction changes. """
print(f"query: {query}")
print(f"inputs: {inputs}")
print(f"ends: {ends}")
graph = []
query_definition = QueryDefinition()
query_definition.start_type = inputs["type"]
query_definition.start_values = inputs["values"]
query_definition.end_values = ends
plans = self.type_graph.get_transitions(query)
programs = [Program(plan, query_definition=query_definition, rosetta=self, program_number=i) for i, plan in
enumerate(plans)]
for program in programs:
g = program.run_program()
graph += g
# print(elements_to_json(g))
return graph
def n2chem(self, name):
return self.core.ctd.drugname_string_to_drug_identifier(name) + \
[x[0] for x in self.core.pharos.drugname_string_to_pharos_info(name)] + \
['PUBCHEM:{}'.format(r['drugID'].split('/')[-1]) for r in self.core.chembio.drugname_to_pubchem(name)]
def n2disease(self, name):
# This performs a case-insensitive exact match, and also inverts comma-ed names
return self.core.mondo.search(name)
def execute_query(args, outputs, rosetta):
""" Query rosetta. """
blackboard = rosetta.construct_knowledge_graph(**args)
""" Lower case all output values. """
expect = list(map(lambda v: v.lower(), outputs['nodes']))
""" Make a list of result ids. """
ids = [e.target_node.id.lower() for e in blackboard]
logger.debug(f"Received {len(ids)} nodes.")
logger.debug(f"Expected {len(expect)} nodes.")
logger.debug(f" ==> ids: {ids}")
matched = 0
for o in expect:
if o in ids:
matched = matched + 1
else:
logger.error(f" {o} not in ids")
assert matched == len(expect)
return blackboard, ids
def test_disease_gene(rosetta):
execute_query(**{
"args": {
"inputs": {
"disease": [
"DOID:2841"
]
},
"query":
"""MATCH (a:disease),(b:gene), p = allShortestPaths((a)-[*]->(b))
WHERE NONE (r IN relationships(p) WHERE type(r) = 'UNKNOWN' OR r.op is null)
RETURN p"""
},
"outputs": {
"nodes": ['ncbigene:191585', 'ncbigene:2289', 'ncbigene:4057', 'ncbigene:1442', 'ncbigene:4843',
'ncbigene:165829', 'ncbigene:5739', 'ncbigene:79034', 'ncbigene:7031', 'ncbigene:1048',
'ncbigene:80206', 'ncbigene:6541', 'ncbigene:340547', 'ncbigene:55600', 'ncbigene:55076',
'ncbigene:9173', 'ncbigene:115362', 'ncbigene:85413', 'ncbigene:948', 'ncbigene:56521',
'ncbigene:2043', 'ncbigene:133308', 'ncbigene:1359', 'ncbigene:1475', 'ncbigene:1469',
'ncbigene:1803', 'ncbigene:6402', 'ncbigene:11254', 'ncbigene:5625', 'ncbigene:29126',
'ncbigene:137835', 'ncbigene:5744', 'ncbigene:10964', 'ncbigene:10085', 'ncbigene:6783',
'ncbigene:6318', 'ncbigene:7903', 'ncbigene:55107', 'ncbigene:3081', 'ncbigene:60437',
'ncbigene:1178', 'ncbigene:59340', 'ncbigene:7033', 'ncbigene:760', 'ncbigene:1470',
'ncbigene:3371', 'ncbigene:10631', 'ncbigene:6528', 'ncbigene:400823', 'ncbigene:117157',
'ncbigene:405753', 'ncbigene:154064', 'ncbigene:202333', 'ncbigene:150', 'ncbigene:1179',
'ncbigene:84830', 'ncbigene:2015', 'ncbigene:25803', 'ncbigene:5055', | |
<gh_stars>10-100
"""
qtl.py contains wrappers around C++ Limix objects to streamline common tasks in GWAS.
"""
import scipy as SP
import scipy.stats as ST
import limix
import limix.utils.preprocess as preprocess
import limix.deprecated.modules.varianceDecomposition as VAR
import limix.utils.fdr as FDR
import time
#TODO: externally visible function?
#I propose to make this internal using _
def estimateKronCovariances(phenos,K1r=None,K1c=None,K2r=None,K2c=None,covs=None,Acovs=None,covar_type='lowrank_diag',rank=1):
"""
estimates the background covariance model before testing
Args:
phenos: [N x P] SP.array of P phenotypes for N individuals
K1r: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
K1c: [P x P] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
K2r: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
K2c: [P x P] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
covs: list of SP.arrays holding covariates. Each covs[i] has one corresponding Acovs[i]
Acovs: list of SP.arrays holding the phenotype design matrices for covariates.
Each covs[i] has one corresponding Acovs[i].
covar_type: type of covaraince to use. Default 'freeform'. possible values are
'freeform': free form optimization,
'fixed': use a fixed matrix specified in covar_K0,
'diag': optimize a diagonal matrix,
'lowrank': optimize a low rank matrix. The rank of the lowrank part is specified in the variable rank,
'lowrank_id': optimize a low rank matrix plus the weight of a constant diagonal matrix. The rank of the lowrank part is specified in the variable rank,
'lowrank_diag': optimize a low rank matrix plus a free diagonal matrix. The rank of the lowrank part is specified in the variable rank,
'block': optimize the weight of a constant P x P block matrix of ones,
'block_id': optimize the weight of a constant P x P block matrix of ones plus the weight of a constant diagonal matrix,
'block_diag': optimize the weight of a constant P x P block matrix of ones plus a free diagonal matrix,
rank: rank of a possible lowrank component (default 1)
Returns:
CVarianceDecomposition object
"""
print(".. Training the backgrond covariance with a GP model")
vc = VAR.CVarianceDecomposition(phenos)
if K1r is not None:
vc.addRandomEffect(K1r,covar_type=covar_type,rank=rank)
if K2r is not None:
#TODO: fix this; forces second term to be the noise covariance
vc.addRandomEffect(is_noise=True,K=K2r,covar_type=covar_type,rank=rank)
for ic in range(len(Acovs)):
vc.addFixedEffect(covs[ic],Acovs[ic])
start = time.time()
conv = vc.findLocalOptimum(fast=True)
assert conv, "CVariance Decomposition has not converged"
time_el = time.time()-start
print(("Background model trained in %.2f s" % time_el))
return vc
#TODO: externally visible function?
#what does this do?
def updateKronCovs(covs,Acovs,N,P):
"""
make sure that covs and Acovs are lists
"""
if (covs is None) and (Acovs is None):
covs = [SP.ones([N,1])]
Acovs = [SP.eye(P)]
if Acovs is None or covs is None:
raise Exception("Either Acovs or covs is None, while the other isn't")
if (type(Acovs)!=list) and (type(covs)!=list):
Acovs= [Acovs]
covs = [covs]
if (type(covs)!=list) or (type(Acovs)!=list) or (len(covs)!=len(Acovs)):
raise Exception("Either Acovs or covs is not a list or they missmatch in length")
return covs, Acovs
def simple_interaction_kronecker_deprecated(snps,phenos,covs=None,Acovs=None,Asnps1=None,Asnps0=None,K1r=None,K1c=None,K2r=None,K2c=None,covar_type='lowrank_diag',rank=1,searchDelta=False):
"""
I-variate fixed effects interaction test for phenotype specific SNP effects.
(Runs multiple likelihood ratio tests and computes the P-values in python from the likelihood ratios)
Args:
snps: [N x S] SP.array of S SNPs for N individuals (test SNPs)
phenos: [N x P] SP.array of P phenotypes for N individuals
covs: list of SP.arrays holding covariates. Each covs[i] has one corresponding Acovs[i]
Acovs: list of SP.arrays holding the phenotype design matrices for covariates.
Each covs[i] has one corresponding Acovs[i].
Asnps1: list of SP.arrays of I interaction variables to be tested for N
individuals. Note that it is assumed that Asnps0 is already included.
If not provided, the alternative model will be the independent model
Asnps0: single SP.array of I0 interaction variables to be included in the
background model when testing for interaction with Inters
K1r: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
K1c: [P x P] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
K2r: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
K2c: [P x P] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
covar_type: type of covaraince to use. Default 'freeform'. possible values are
'freeform': free form optimization,
'fixed': use a fixed matrix specified in covar_K0,
'diag': optimize a diagonal matrix,
'lowrank': optimize a low rank matrix. The rank of the lowrank part is specified in the variable rank,
'lowrank_id': optimize a low rank matrix plus the weight of a constant diagonal matrix. The rank of the lowrank part is specified in the variable rank,
'lowrank_diag': optimize a low rank matrix plus a free diagonal matrix. The rank of the lowrank part is specified in the variable rank,
'block': optimize the weight of a constant P x P block matrix of ones,
'block_id': optimize the weight of a constant P x P block matrix of ones plus the weight of a constant diagonal matrix,
'block_diag': optimize the weight of a constant P x P block matrix of ones plus a free diagonal matrix,
rank: rank of a possible lowrank component (default 1)
searchDelta: Boolean indicator if delta is optimized during SNP testing (default False)
Returns:
pv: P-values of the interaction test
lrt0: log likelihood ratio statistics of the null model
pv0: P-values of the null model
lrt: log likelihood ratio statistics of the interaction test
lrtAlt: log likelihood ratio statistics of the alternative model
pvAlt: P-values of the alternative model
"""
S=snps.shape[1]
#0. checks
N = phenos.shape[0]
P = phenos.shape[1]
if K1r==None:
K1r = SP.dot(snps,snps.T)
else:
assert K1r.shape[0]==N, 'K1r: dimensions dismatch'
assert K1r.shape[1]==N, 'K1r: dimensions dismatch'
if K2r==None:
K2r = SP.eye(N)
else:
assert K2r.shape[0]==N, 'K2r: dimensions dismatch'
assert K2r.shape[1]==N, 'K2r: dimensions dismatch'
covs,Acovs = updateKronCovs(covs,Acovs,N,P)
#Asnps can be several designs
if (Asnps0 is None):
Asnps0 = [SP.ones([1,P])]
if Asnps1 is None:
Asnps1 = [SP.eye([P])]
if (type(Asnps0)!=list):
Asnps0 = [Asnps0]
if (type(Asnps1)!=list):
Asnps1 = [Asnps1]
assert (len(Asnps0)==1) and (len(Asnps1)>0), "need at least one Snp design matrix for null and alt model"
#one row per column design matrix
pv = SP.zeros((len(Asnps1),snps.shape[1]))
lrt = SP.zeros((len(Asnps1),snps.shape[1]))
pvAlt = SP.zeros((len(Asnps1),snps.shape[1]))
lrtAlt = SP.zeros((len(Asnps1),snps.shape[1]))
#1. run GP model to infer suitable covariance structure
if K1c==None or K2c==None:
vc = estimateKronCovariances(phenos=phenos, K1r=K1r, K2r=K2r, K1c=K1c, K2c=K2c, covs=covs, Acovs=Acovs, covar_type=covar_type, rank=rank)
K1c = vc.getEstTraitCovar(0)
K2c = vc.getEstTraitCovar(1)
else:
assert K1c.shape[0]==P, 'K1c: dimensions dismatch'
assert K1c.shape[1]==P, 'K1c: dimensions dismatch'
assert K2c.shape[0]==P, 'K2c: dimensions dismatch'
assert K2c.shape[1]==P, 'K2c: dimensions dismatch'
#2. run kroneckerLMM for null model
lmm = limix.CKroneckerLMM()
lmm.setK1r(K1r)
lmm.setK1c(K1c)
lmm.setK2r(K2r)
lmm.setK2c(K2c)
lmm.setSNPs(snps)
#add covariates
for ic in range(len(Acovs)):
lmm.addCovariates(covs[ic],Acovs[ic])
lmm.setPheno(phenos)
if searchDelta: lmm.setNumIntervalsAlt(100)
else: lmm.setNumIntervalsAlt(0)
lmm.setNumIntervals0(100)
#add SNP design
lmm.setSNPcoldesign(Asnps0[0])
lmm.process()
dof0 = Asnps0[0].shape[0]
pv0 = lmm.getPv()
lrt0 = ST.chi2.isf(pv0,dof0)
for iA in range(len(Asnps1)):
dof1 = Asnps1[iA].shape[0]
dof = dof1-dof0
lmm.setSNPcoldesign(Asnps1[iA])
lmm.process()
pvAlt[iA,:] = lmm.getPv()[0]
lrtAlt[iA,:] = ST.chi2.isf(pvAlt[iA,:],dof1)
lrt[iA,:] = lrtAlt[iA,:] - lrt0[0] # Don't need the likelihood ratios, as null model is the same between the two models
pv[iA,:] = ST.chi2.sf(lrt[iA,:],dof)
return pv,lrt0,pv0,lrt,lrtAlt,pvAlt
#TODO: (O.S), I have changed the parametrization of delta optimization steps. Happy with that?
#TODO: Do we really want to keep these "simple_XXX" names? Which functions are simple, which ones are not? I don't like it.
def simple_interaction_kronecker(snps,phenos,covs=None,Acovs=None,Asnps1=None,Asnps0=None,K1r=None,K1c=None,K2r=None,K2c=None,covar_type='lowrank_diag',rank=1,NumIntervalsDelta0=100,NumIntervalsDeltaAlt=0,searchDelta=False):
"""
I-variate fixed effects interaction test for phenotype specific SNP effects
Args:
snps: [N x S] SP.array of S SNPs for N individuals (test SNPs)
phenos: [N x P] SP.array of P phenotypes for N individuals
covs: list of SP.arrays holding covariates. Each covs[i] has one corresponding Acovs[i]
Acovs: list of SP.arrays holding the phenotype design matrices for covariates.
Each covs[i] has one corresponding Acovs[i].
Asnps1: list of SP.arrays of I interaction variables to be tested for N
individuals. Note that it is assumed that Asnps0 is already included.
If not provided, the alternative model will be the independent model
Asnps0: single | |
from __future__ import (division)
from pomegranate import *
from pomegranate.io import DataGenerator
from pomegranate.io import DataFrameGenerator
from nose.tools import with_setup
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_less_equal
from nose.tools import assert_raises
from nose.tools import assert_true
from numpy.testing import assert_array_almost_equal
import pandas
import random
import pickle
import numpy as np
nan = numpy.nan
def setup_multivariate_gaussian():
mu, cov = [0, 0, 0], numpy.eye(3)
d1 = MultivariateGaussianDistribution(mu, cov)
mu, cov = [2, 2, 2], numpy.eye(3)
d2 = MultivariateGaussianDistribution(mu, cov)
global model
model = BayesClassifier([d1, d2])
global X
X = numpy.array([[ 0.3, 0.5, 0.1],
[ 0.8, 1.4, 0.5],
[ 1.4, 2.6, 1.8],
[ 4.2, 3.3, 3.7],
[ 2.6, 3.6, 3.3],
[ 3.1, 2.2, 1.7],
[ 1.8, 2.2, 1.8],
[-1.2, -1.8, -1.5],
[-1.8, 0.3, 0.5],
[ 0.7, -1.3, -0.1]])
global y
y = [0, 0, 0, 1, 1, 1, 1, 0, 0, 0]
global X_nan
X_nan = numpy.array([[ 0.3, nan, 0.1],
[ nan, 1.4, nan],
[ 1.4, 2.6, nan],
[ nan, nan, nan],
[ nan, 3.6, 3.3],
[ 3.1, nan, 1.7],
[ nan, nan, 1.8],
[-1.2, -1.8, -1.5],
[ nan, 0.3, 0.5],
[ nan, -1.3, nan]])
def setup_multivariate_mixed():
mu, cov = [0, 0, 0], numpy.eye(3)
d1 = MultivariateGaussianDistribution(mu, cov)
d21 = ExponentialDistribution(5)
d22 = LogNormalDistribution(0.2, 0.8)
d23 = PoissonDistribution(3)
d2 = IndependentComponentsDistribution([d21, d22, d23])
global model
model = BayesClassifier([d1, d2])
global X
X = numpy.array([[ 0.3, 0.5, 0.1],
[ 0.8, 1.4, 0.5],
[ 1.4, 2.6, 1.8],
[ 4.2, 3.3, 3.7],
[ 2.6, 3.6, 3.3],
[ 3.1, 2.2, 1.7],
[ 1.8, 2.2, 1.8],
[ 1.2, 1.8, 1.5],
[ 1.8, 0.3, 0.5],
[ 0.7, 1.3, 0.1]])
global y
y = [0, 0, 0, 1, 1, 1, 1, 0, 0, 0]
global X_nan
X_nan = numpy.array([[ 0.3, nan, 0.1],
[ nan, 1.4, nan],
[ 1.4, 2.6, nan],
[ nan, nan, nan],
[ nan, 3.6, 3.3],
[ 3.1, nan, 1.7],
[ nan, nan, 1.8],
[ 1.2, 1.8, 1.5],
[ nan, 0.3, 0.5],
[ nan, 1.3, nan]])
def setup_hmm():
global model
global hmm1
global hmm2
global hmm3
rigged = State( DiscreteDistribution({ 'H': 0.8, 'T': 0.2 }) )
unrigged = State( DiscreteDistribution({ 'H': 0.5, 'T':0.5 }) )
hmm1 = HiddenMarkovModel()
hmm1.start = rigged
hmm1.add_transition(rigged, rigged, 1)
hmm1.bake()
hmm2 = HiddenMarkovModel()
hmm2.start = unrigged
hmm2.add_transition(unrigged, unrigged, 1)
hmm2.bake()
hmm3 = HiddenMarkovModel()
hmm3.add_transition(hmm3.start, unrigged, 0.5)
hmm3.add_transition(hmm3.start, rigged, 0.5)
hmm3.add_transition(rigged, rigged, 0.5)
hmm3.add_transition(rigged, unrigged, 0.5)
hmm3.add_transition(unrigged, rigged, 0.5)
hmm3.add_transition(unrigged, unrigged, 0.5)
hmm3.bake()
model = BayesClassifier([hmm1, hmm2, hmm3])
def setup_multivariate():
pass
def teardown():
pass
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_initialization():
assert_equal(model.d, 3)
assert_equal(model.n, 2)
assert_equal(model.is_vl_, False)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_initialization():
assert_equal(model.d, 3)
assert_equal(model.n, 2)
assert_equal(model.is_vl_, False)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_log_proba():
y_hat = model.predict_log_proba(X)
y = [[ -1.48842547e-02, -4.21488425e+00],
[ -4.37487950e-01, -1.03748795e+00],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.64000001e+01, -7.54345812e-08],
[ -1.30000023e+01, -2.26032685e-06],
[ -8.00033541e+00, -3.35406373e-04],
[ -5.60369104e+00, -3.69104343e-03],
[ -3.05902274e-07, -1.50000003e+01],
[ -3.35406373e-04, -8.00033541e+00],
[ -6.11066022e-04, -7.40061107e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_log_proba():
y_hat = model.predict_log_proba(X)
y = [[ -5.03107596e-01, -9.27980626e-01],
[ -1.86355320e-01, -1.77183117e+00],
[ -5.58542088e-01, -8.48731256e-01],
[ -7.67315597e-01, -6.24101927e-01],
[ -2.32860808e+00, -1.02510436e-01],
[ -3.06641866e-03, -5.78877778e+00],
[ -9.85292840e-02, -2.36626165e+00],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.01640009e-03, -6.20744952e+00],
[ -1.47371167e-01, -1.98758175e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict_log_proba():
y_hat = model.predict_log_proba(X_nan)
y = [[ -3.99533332e-02, -3.23995333e+00],
[ -1.17110067e+00, -3.71100666e-01],
[ -4.01814993e+00, -1.81499279e-02],
[ -6.93147181e-01, -6.93147181e-01],
[ -9.80005545e+00, -5.54500620e-05],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.78390074e+00, -1.83900741e-01],
[ -3.05902274e-07, -1.50000003e+01],
[ -8.68361522e-02, -2.48683615e+00],
[ -1.00016521e-02, -4.61000165e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict_log_proba():
y_hat = model.predict_log_proba(X_nan)
y = [[ -3.57980882e-01, -1.20093223e+00],
[ -1.20735130e+00, -3.55230506e-01],
[ -2.43174286e-01, -1.53310132e+00],
[ -6.93147181e-01, -6.93147181e-01],
[ -9.31781101e+00, -8.98143220e-05],
[ -6.29755079e-04, -7.37049444e+00],
[ -1.31307006e+00, -3.13332194e-01],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.29725479e-01, -1.58353505e+00],
[ -1.17299253e+00, -3.70251760e-01]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_log_proba_parallel():
y_hat = model.predict_log_proba(X, n_jobs=2)
y = [[ -1.48842547e-02, -4.21488425e+00],
[ -4.37487950e-01, -1.03748795e+00],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.64000001e+01, -7.54345812e-08],
[ -1.30000023e+01, -2.26032685e-06],
[ -8.00033541e+00, -3.35406373e-04],
[ -5.60369104e+00, -3.69104343e-03],
[ -3.05902274e-07, -1.50000003e+01],
[ -3.35406373e-04, -8.00033541e+00],
[ -6.11066022e-04, -7.40061107e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_log_proba_parallel():
y_hat = model.predict_log_proba(X, n_jobs=2)
y = [[ -5.03107596e-01, -9.27980626e-01],
[ -1.86355320e-01, -1.77183117e+00],
[ -5.58542088e-01, -8.48731256e-01],
[ -7.67315597e-01, -6.24101927e-01],
[ -2.32860808e+00, -1.02510436e-01],
[ -3.06641866e-03, -5.78877778e+00],
[ -9.85292840e-02, -2.36626165e+00],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.01640009e-03, -6.20744952e+00],
[ -1.47371167e-01, -1.98758175e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_proba():
y_hat = model.predict_proba(X)
y = [[ 9.85225968e-01, 1.47740317e-02],
[ 6.45656306e-01, 3.54343694e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 7.54345778e-08, 9.99999925e-01],
[ 2.26032430e-06, 9.99997740e-01],
[ 3.35350130e-04, 9.99664650e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.99664650e-01, 3.35350130e-04],
[ 9.99389121e-01, 6.10879359e-04]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_proba():
y_hat = model.predict_proba(X)
y = [[ 0.60464873, 0.39535127],
[ 0.82997863, 0.17002137],
[ 0.57204244, 0.42795756],
[ 0.46425765, 0.53574235],
[ 0.09743127, 0.90256873],
[ 0.99693828, 0.00306172],
[ 0.90616916, 0.09383084],
[ 0.76969251, 0.23030749],
[ 0.99798563, 0.00201437],
[ 0.86297361, 0.13702639]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict_proba():
y_hat = model.predict_proba(X_nan)
y = [[ 9.60834277e-01, 3.91657228e-02],
[ 3.10025519e-01, 6.89974481e-01],
[ 1.79862100e-02, 9.82013790e-01],
[ 5.00000000e-01, 5.00000000e-01],
[ 5.54485247e-05, 9.99944551e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 1.67981615e-01, 8.32018385e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.16827304e-01, 8.31726965e-02],
[ 9.90048198e-01, 9.95180187e-03]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict_proba():
y_hat = model.predict_proba(X_nan)
y = [[ 6.99086440e-01, 3.00913560e-01],
[ 2.98988163e-01, 7.01011837e-01],
[ 7.84134838e-01, 2.15865162e-01],
[ 5.00000000e-01, 5.00000000e-01],
[ 8.98102888e-05, 9.99910190e-01],
[ 9.99370443e-01, 6.29556825e-04],
[ 2.68992964e-01, 7.31007036e-01],
[ 7.69692511e-01, 2.30307489e-01],
[ 7.94751748e-01, 2.05248252e-01],
[ 3.09439547e-01, 6.90560453e-01]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_proba_parallel():
y_hat = model.predict_proba(X, n_jobs=2)
y = [[ 9.85225968e-01, 1.47740317e-02],
[ 6.45656306e-01, 3.54343694e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 7.54345778e-08, 9.99999925e-01],
[ 2.26032430e-06, 9.99997740e-01],
[ 3.35350130e-04, 9.99664650e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.99664650e-01, 3.35350130e-04],
[ 9.99389121e-01, 6.10879359e-04]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_proba_parallel():
y_hat = model.predict_proba(X, n_jobs=2)
y = [[ 0.60464873, 0.39535127],
[ 0.82997863, 0.17002137],
[ 0.57204244, 0.42795756],
[ 0.46425765, 0.53574235],
[ 0.09743127, 0.90256873],
[ 0.99693828, 0.00306172],
[ 0.90616916, 0.09383084],
[ 0.76969251, 0.23030749],
[ 0.99798563, 0.00201437],
[ 0.86297361, 0.13702639]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict():
y_hat = model.predict(X)
y = [0, 0, 1, 1, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict():
y_hat = model.predict(X)
y = [0, 0, 0, 1, 1, 0, 0, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict():
y_hat = model.predict(X_nan)
y = [0, 1, 1, 0, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict():
y_hat = model.predict(X_nan)
y = [0, 1, 0, 0, 1, 0, 1, 0, 0, 1]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_parallel():
y_hat = model.predict(X, n_jobs=2)
y = [0, 0, 1, 1, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_parallel():
y_hat = model.predict(X, n_jobs=2)
y = [0, 0, 0, 1, 1, 0, 0, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_fit_parallel():
model.fit(X, y, n_jobs=2)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [0.03333333, 0.28333333, 0.21666666]
cov1_t = [[1.3088888, 0.9272222, 0.6227777],
[0.9272222, 2.2513888, 1.3402777],
[0.6227777, 1.3402777, 0.9547222]]
mu2 = model.distributions[1].parameters[0]
cov2 = model.distributions[1].parameters[1]
mu2_t = [2.925, 2.825, 2.625]
cov2_t = [[0.75687499, 0.23687499, 0.4793750],
[0.23687499, 0.40187499, 0.5318749],
[0.47937500, 0.53187499, 0.7868750]]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(mu2, mu2_t)
assert_array_almost_equal(cov2, cov2_t)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_fit_parallel():
model.fit(X, y, n_jobs=2)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [1.033333, 1.3166667, 0.75]
cov1_t = [[0.242222, 0.0594444, 0.178333],
[0.059444, 0.5980555, 0.414166],
[0.178333, 0.4141666, 0.439166]]
d21 = model.distributions[1].distributions[0]
d22 = model.distributions[1].distributions[1]
d23 = model.distributions[1].distributions[2]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(d21.parameters, [0.34188034])
assert_array_almost_equal(d22.parameters, [1.01294275, 0.22658346])
assert_array_almost_equal(d23.parameters, [2.625])
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_from_samples():
model = BayesClassifier.from_samples(MultivariateGaussianDistribution, X, y)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [0.03333333, 0.2833333, 0.21666666]
cov1_t = [[1.308888888, 0.9272222222, 0.6227777777],
[0.927222222, 2.251388888, 1.340277777],
[0.622777777, 1.340277777, 0.9547222222]]
mu2 = model.distributions[1].parameters[0]
cov2 = model.distributions[1].parameters[1]
mu2_t = [2.925, 2.825, 2.625]
cov2_t = [[0.75687500, 0.23687499, 0.47937500],
[0.23687499, 0.40187499, 0.53187499],
[0.47937500, 0.53187499, 0.78687500]]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(mu2, mu2_t)
assert_array_almost_equal(cov2, cov2_t)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_pickle():
model2 = pickle.loads(pickle.dumps(model))
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_pickle():
model2 = pickle.loads(pickle.dumps(model))
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_to_json():
model2 = BayesClassifier.from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_to_json():
model2 = BayesClassifier.from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_robust_from_json():
model2 = from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_robust_from_json():
model2 = from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_hmm, teardown)
def test_model():
assert_almost_equal(hmm1.log_probability(list('H')), -0.2231435513142097 )
assert_almost_equal(hmm1.log_probability(list('T')), -1.6094379124341003 )
assert_almost_equal(hmm1.log_probability(list('HHHH')), -0.8925742052568388 )
assert_almost_equal(hmm1.log_probability(list('THHH')), -2.2788685663767296 )
assert_almost_equal(hmm1.log_probability(list('TTTT')), -6.437751649736401 )
assert_almost_equal(hmm2.log_probability(list('H')), -0.6931471805599453 )
assert_almost_equal(hmm2.log_probability(list('T')), -0.6931471805599453 )
assert_almost_equal(hmm2.log_probability(list('HHHH')), -2.772588722239781 )
assert_almost_equal(hmm2.log_probability(list('THHH')), -2.772588722239781 )
assert_almost_equal(hmm2.log_probability(list('TTTT')), -2.772588722239781 )
assert_almost_equal(hmm3.log_probability(list('H')), -0.43078291609245417)
assert_almost_equal(hmm3.log_probability(list('T')), -1.0498221244986776)
assert_almost_equal(hmm3.log_probability(list('HHHH')), -1.7231316643698167)
assert_almost_equal(hmm3.log_probability(list('THHH')), -2.3421708727760397)
assert_almost_equal(hmm3.log_probability(list('TTTT')), -4.1992884979947105)
assert_almost_equal(hmm3.log_probability(list('THTHTHTHTHTH')), -8.883630243546788)
assert_almost_equal(hmm3.log_probability(list('THTHHHHHTHTH')), -7.645551826734343)
assert_equal(model.d, 1)
@with_setup(setup_hmm, teardown)
def test_hmm_log_proba():
logs = model.predict_log_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_almost_equal(logs[0][0], -0.89097292388986515)
assert_almost_equal(logs[0][1], -1.3609765531356006)
assert_almost_equal(logs[0][2], -1.0986122886681096)
assert_almost_equal(logs[1][0], -0.93570553121744293)
assert_almost_equal(logs[1][1], -1.429425687080494)
assert_almost_equal(logs[1][2], -0.9990078376167526)
assert_almost_equal(logs[2][0], -3.9007882563128864)
assert_almost_equal(logs[2][1], -0.23562532881626597)
assert_almost_equal(logs[2][2], -1.6623251045711958)
assert_almost_equal(logs[3][0], -3.1703366478831185)
assert_almost_equal(logs[3][1], -0.49261403211260379)
assert_almost_equal(logs[3][2], -1.058478108940049)
assert_almost_equal(logs[4][0], -1.3058441172130273)
assert_almost_equal(logs[4][1], -1.4007102236822906)
assert_almost_equal(logs[4][2], -0.7284958836972919)
@with_setup(setup_hmm, teardown)
def test_hmm_proba():
probs = model.predict_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_almost_equal(probs[0][0], 0.41025641025641024)
assert_almost_equal(probs[0][1], 0.25641025641025639)
assert_almost_equal(probs[0][2], 0.33333333333333331)
assert_almost_equal(probs[1][0], 0.39230898163446098)
assert_almost_equal(probs[1][1], 0.23944639992337707)
assert_almost_equal(probs[1][2], 0.36824461844216183)
assert_almost_equal(probs[2][0], 0.020225961918306088)
assert_almost_equal(probs[2][1], 0.79007663743383105)
assert_almost_equal(probs[2][2], 0.18969740064786292)
assert_almost_equal(probs[3][0], 0.041989459861032523)
assert_almost_equal(probs[3][1], 0.61102706038265642)
assert_almost_equal(probs[3][2], 0.346983479756311)
assert_almost_equal(probs[4][0], 0.27094373022369794)
assert_almost_equal(probs[4][1], 0.24642188711704707)
assert_almost_equal(probs[4][2], 0.48263438265925512)
@with_setup(setup_hmm, teardown)
def test_hmm_prediction():
predicts = model.predict(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_equal(predicts[0], 0)
assert_equal(predicts[1], 0)
assert_equal(predicts[2], 1)
assert_equal(predicts[3], 1)
assert_equal(predicts[4], 2)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_log_probability():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
logp1 = model.log_probability(X)
logp2 = model.log_probability(X2)
logp3 = model.log_probability(X3)
assert_array_almost_equal(logp1, logp2)
assert_array_almost_equal(logp1, logp3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict(X)
y_hat2 = model.predict(X2)
y_hat3 = model.predict(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict_proba():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict_proba(X)
y_hat2 = model.predict_proba(X2)
y_hat3 = model.predict_proba(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict_log_proba():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict_log_proba(X)
y_hat2 = model.predict_log_proba(X2)
y_hat3 = model.predict_log_proba(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
def test_io_fit():
X = numpy.random.randn(100, 5) + 0.5
weights = numpy.abs(numpy.random.randn(100))
y = numpy.random.randint(2, size=100)
data_generator = DataGenerator(X, weights, y)
mu1 = numpy.array([0, 0, 0, 0, 0])
mu2 = numpy.array([1, 1, 1, 1, 1])
cov = numpy.eye(5)
d1 = MultivariateGaussianDistribution(mu1, cov)
d2 = MultivariateGaussianDistribution(mu2, cov)
bc1 = BayesClassifier([d1, d2])
bc1.fit(X, y, weights)
d1 = MultivariateGaussianDistribution(mu1, cov)
d2 = MultivariateGaussianDistribution(mu2, cov)
bc2 = BayesClassifier([d1, d2])
bc2.fit(data_generator)
logp1 = bc1.log_probability(X)
logp2 = bc2.log_probability(X)
assert_array_almost_equal(logp1, logp2)
def test_io_from_samples():
X = numpy.random.randn(100, 5) + 0.5
weights = numpy.abs(numpy.random.randn(100))
y = numpy.random.randint(2, size=100)
data_generator = DataGenerator(X, weights, y)
d = MultivariateGaussianDistribution
bc1 = BayesClassifier.from_samples(d, X=X, y=y, weights=weights)
bc2 = BayesClassifier.from_samples(d, | |
<gh_stars>0
"""API for JupyterHub's proxy.
Custom proxy implementations can subclass :class:`Proxy`
and register in JupyterHub config:
.. sourcecode:: python
from mymodule import MyProxy
c.JupyterHub.proxy_class = MyProxy
Route Specification:
- A routespec is a URL prefix ([host]/path/), e.g.
'host.tld/path/' for host-based routing or '/path/' for default routing.
- Route paths should be normalized to always start and end with '/'
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import asyncio
import json
import os
import signal
import time
from functools import wraps
from subprocess import Popen
from urllib.parse import quote
from weakref import WeakKeyDictionary
from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPError
from tornado.httpclient import HTTPRequest
from tornado.ioloop import PeriodicCallback
from traitlets import Any
from traitlets import Bool
from traitlets import default
from traitlets import Dict
from traitlets import Instance
from traitlets import Integer
from traitlets import observe
from traitlets import Unicode
from traitlets.config import LoggingConfigurable
from . import utils
from .metrics import CHECK_ROUTES_DURATION_SECONDS
from .metrics import PROXY_POLL_DURATION_SECONDS
from .objects import Server
from .utils import exponential_backoff
from .utils import url_path_join
from jupyterhub.traitlets import Command
def _one_at_a_time(method):
"""decorator to limit an async method to be called only once
If multiple concurrent calls to this method are made,
queue them instead of allowing them to be concurrently outstanding.
"""
# use weak dict for locks
# so that the lock is always acquired within the current asyncio loop
# should only be relevant in testing, where eventloops are created and destroyed often
method._locks = WeakKeyDictionary()
@wraps(method)
async def locked_method(*args, **kwargs):
loop = asyncio.get_event_loop()
lock = method._locks.get(loop, None)
if lock is None:
lock = method._locks[loop] = asyncio.Lock()
async with lock:
return await method(*args, **kwargs)
return locked_method
class Proxy(LoggingConfigurable):
"""Base class for configurable proxies that JupyterHub can use.
A proxy implementation should subclass this and must define the following methods:
- :meth:`.get_all_routes` return a dictionary of all JupyterHub-related routes
- :meth:`.add_route` adds a route
- :meth:`.delete_route` deletes a route
In addition to these, the following method(s) may need to be implemented:
- :meth:`.start` start the proxy, if it should be launched by the Hub
instead of externally managed.
If the proxy is externally managed, it should set :attr:`should_start` to False.
- :meth:`.stop` stop the proxy. Only used if :meth:`.start` is also used.
And the following method(s) are optional, but can be provided:
- :meth:`.get_route` gets a single route.
There is a default implementation that extracts data from :meth:`.get_all_routes`,
but implementations may choose to provide a more efficient implementation
of fetching a single route.
"""
db_factory = Any()
@property
def db(self):
return self.db_factory()
app = Any()
hub = Any()
public_url = Unicode()
ssl_key = Unicode()
ssl_cert = Unicode()
host_routing = Bool()
should_start = Bool(
True,
config=True,
help="""Should the Hub start the proxy
If True, the Hub will start the proxy and stop it.
Set to False if the proxy is managed externally,
such as by systemd, docker, or another service manager.
""",
)
extra_routes = Dict(
{},
config=True,
help="""
Additional routes to be maintained in the proxy.
A dictionary with a route specification as key, and
a URL as target. The hub will ensure this route is present
in the proxy.
If the hub is running in host based mode (with
JupyterHub.subdomain_host set), the routespec *must*
have a domain component (example.com/my-url/). If the
hub is not running in host based mode, the routespec
*must not* have a domain component (/my-url/).
Helpful when the hub is running in API-only mode.
""",
)
def start(self):
"""Start the proxy.
Will be called during startup if should_start is True.
**Subclasses must define this method**
if the proxy is to be started by the Hub
"""
def stop(self):
"""Stop the proxy.
Will be called during teardown if should_start is True.
**Subclasses must define this method**
if the proxy is to be started by the Hub
"""
def validate_routespec(self, routespec):
"""Validate a routespec
- Checks host value vs host-based routing.
- Ensures trailing slash on path.
"""
if routespec == '/':
# / is the default route.
# don't check host-based routing
return routespec
# check host routing
host_route = not routespec.startswith('/')
if host_route and not self.host_routing:
raise ValueError(
"Cannot add host-based route %r, not using host-routing" % routespec
)
if self.host_routing and not host_route:
raise ValueError(
"Cannot add route without host %r, using host-routing" % routespec
)
# add trailing slash
if not routespec.endswith('/'):
return routespec + '/'
else:
return routespec
async def add_route(self, routespec, target, data):
"""Add a route to the proxy.
**Subclasses must define this method**
Args:
routespec (str): A URL prefix ([host]/path/) for which this route will be matched,
e.g. host.name/path/
target (str): A full URL that will be the target of this route.
data (dict): A JSONable dict that will be associated with this route, and will
be returned when retrieving information about this route.
Will raise an appropriate Exception (FIXME: find what?) if the route could
not be added.
The proxy implementation should also have a way to associate the fact that a
route came from JupyterHub.
"""
pass
async def delete_route(self, routespec):
"""Delete a route with a given routespec if it exists.
**Subclasses must define this method**
"""
pass
async def get_all_routes(self):
"""Fetch and return all the routes associated by JupyterHub from the
proxy.
**Subclasses must define this method**
Should return a dictionary of routes, where the keys are
routespecs and each value is a dict of the form::
{
'routespec': the route specification ([host]/path/)
'target': the target host URL (proto://host) for this route
'data': the attached data dict for this route (as specified in add_route)
}
"""
pass
async def get_route(self, routespec):
"""Return the route info for a given routespec.
Args:
routespec (str):
A URI that was used to add this route,
e.g. `host.tld/path/`
Returns:
result (dict):
dict with the following keys::
'routespec': The normalized route specification passed in to add_route
([host]/path/)
'target': The target host for this route (proto://host)
'data': The arbitrary data dict that was passed in by JupyterHub when adding this
route.
None: if there are no routes matching the given routespec
"""
# default implementation relies on get_all_routes
routespec = self.validate_routespec(routespec)
routes = await self.get_all_routes()
return routes.get(routespec)
# Most basic implementers must only implement above methods
async def add_service(self, service, client=None):
"""Add a service's server to the proxy table."""
if not service.server:
raise RuntimeError(
"Service %s does not have an http endpoint to add to the proxy.",
service.name,
)
self.log.info(
"Adding service %s to proxy %s => %s",
service.name,
service.proxy_spec,
service.server.host,
)
await self.add_route(
service.proxy_spec, service.server.host, {'service': service.name}
)
async def delete_service(self, service, client=None):
"""Remove a service's server from the proxy table."""
self.log.info("Removing service %s from proxy", service.name)
await self.delete_route(service.proxy_spec)
async def add_user(self, user, server_name='', client=None):
"""Add a user's server to the proxy table."""
spawner = user.spawners[server_name]
self.log.info(
"Adding user %s to proxy %s => %s",
user.name,
spawner.proxy_spec,
spawner.server.host,
)
if spawner.pending and spawner.pending != 'spawn':
raise RuntimeError(
"%s is pending %s, shouldn't be added to the proxy yet!"
% (spawner._log_name, spawner.pending)
)
await self.add_route(
spawner.proxy_spec,
spawner.server.host,
{'user': user.name, 'server_name': server_name},
)
async def delete_user(self, user, server_name=''):
"""Remove a user's server from the proxy table."""
routespec = user.proxy_spec
if server_name:
routespec = url_path_join(user.proxy_spec, server_name, '/')
self.log.info("Removing user %s from proxy (%s)", user.name, routespec)
await self.delete_route(routespec)
async def add_all_services(self, service_dict):
"""Update the proxy table from the database.
Used when loading up a new proxy.
"""
futures = []
for service in service_dict.values():
if service.server:
futures.append(self.add_service(service))
# wait after submitting them all
await asyncio.gather(*futures)
async def add_all_users(self, user_dict):
"""Update the proxy table from the database.
Used when loading up a new proxy.
"""
futures = []
for user in user_dict.values():
for name, spawner in user.spawners.items():
if spawner.ready:
futures.append(self.add_user(user, name))
# wait after submitting them all
await asyncio.gather(*futures)
@_one_at_a_time
async def check_routes(self, user_dict, service_dict, routes=None):
"""Check that all users are properly routed on the proxy."""
start = time.perf_counter() # timer starts here when user is created
if not routes:
self.log.debug("Fetching routes to check")
routes = await self.get_all_routes()
self.log.debug("Checking routes")
user_routes = {path for path, r | |
<filename>resrace_ppo.py<gh_stars>1-10
import numpy as np
import tensorflow.compat.v1 as tf
import gym
import time
import math
import pybullet
import matplotlib.pyplot as plt
import spinup.algos.tf1.ppo.core as core
from spinup.utils.logx import EpochLogger
from spinup.utils.mpi_tf import MpiAdamOptimizer, sync_all_params
from spinup.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs
from utils.functions import modify_obs
from racecar_gym import SingleAgentScenario
from racecar_gym.envs import SingleAgentRaceEnv
from utils.config import Config, APF_Config
from mapf import Nonlinear_Controller
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '/gpu:0'
num_workers = 1
manual_action_dim = Config['action_dim']
imu_dim = Config['imu_dim']
manual_obs_dim = Config['lidar_dim'] + imu_dim + Config['action_dim']
show_flag, new_train_flag, save_flag = True, True, True
display_frequency = 1.1
render_pause = 0
track = 'plechaty'
train_mode = 'resrace_ppo'
wait_frame = 180
exp_prove = 0.1
still_threshold = 1/45
save_path = './logs/' + train_mode + '/' + track
EPS = 1e-8
lap_reward = 0
class PPOBuffer:
"""
A buffer for storing trajectories experienced by a PPO agent interacting
with the environment, and using Generalized Advantage Estimation (GAE-Lambda)
for calculating the advantages of state-action pairs.
"""
def __init__(self, obs_dim, act_dim, size, gamma=0.99, lam=0.95):
self.obs_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)
self.adv_buf = np.zeros(size, dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.ret_buf = np.zeros(size, dtype=np.float32)
self.val_buf = np.zeros(size, dtype=np.float32)
self.logp_buf = np.zeros(size, dtype=np.float32)
self.gamma, self.lam = gamma, lam
self.ptr, self.path_start_idx, self.max_size = 0, 0, size
def store(self, obs, act, rew, val, logp):
"""
Append one timestep of agent-environment interaction to the buffer.
"""
assert self.ptr < self.max_size # buffer has to have room so you can store
self.obs_buf[self.ptr] = obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.val_buf[self.ptr] = val
self.logp_buf[self.ptr] = logp
self.ptr += 1
def finish_path(self, last_val=0):
"""
Call this at the end of a trajectory, or when one gets cut off
by an epoch ending. This looks back in the buffer to where the
trajectory started, and uses rewards and value estimates from
the whole trajectory to compute advantage estimates with GAE-Lambda,
as well as compute the rewards-to-go for each state, to use as
the targets for the value function.
The "last_val" argument should be 0 if the trajectory ended
because the agent reached a terminal state (died), and otherwise
should be V(s_T), the value function estimated for the last state.
This allows us to bootstrap the reward-to-go calculation to account
for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
"""
path_slice = slice(self.path_start_idx, self.ptr)
rews = np.append(self.rew_buf[path_slice], last_val)
vals = np.append(self.val_buf[path_slice], last_val)
# the next two lines implement GAE-Lambda advantage calculation
deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
self.adv_buf[path_slice] = core.discount_cumsum(deltas, self.gamma * self.lam)
# the next line computes rewards-to-go, to be targets for the value function
self.ret_buf[path_slice] = core.discount_cumsum(rews, self.gamma)[:-1]
self.path_start_idx = self.ptr
def get(self):
"""
Call this at the end of an epoch to get all of the data from
the buffer, with advantages appropriately normalized (shifted to have
mean zero and std one). Also, resets some pointers in the buffer.
"""
assert self.ptr == self.max_size # buffer has to be full before you can get
self.ptr, self.path_start_idx = 0, 0
# the next two lines implement the advantage normalization trick
adv_mean, adv_std = mpi_statistics_scalar(self.adv_buf)
self.adv_buf = (self.adv_buf - adv_mean) / adv_std
return [self.obs_buf, self.act_buf, self.adv_buf,
self.ret_buf, self.logp_buf]
def learning_curve_display(epoch, last_show_num, logger, train_pro_list):
train_pro_list.append(np.mean(logger.epoch_dict['EpPro']))
if epoch / last_show_num > display_frequency:
plt.cla()
plt.title(track + train_mode, loc='center')
plt.plot(train_pro_list, label="train_progress")
plt.legend()
plt.pause(0.1)
last_show_num = epoch
return train_pro_list, last_show_num
def ppo(env_fn, actor_critic=core.mlp_actor_critic, ac_kwargs=dict(), seed=0,
steps_per_epoch=10000, epochs=50, gamma=1, clip_ratio=0.2, pi_lr=3e-4,
vf_lr=1e-3, train_pi_iters=80, train_v_iters=80, lam=0.97, max_ep_len=5000,
target_kl=0.01, logger_kwargs=dict(), save_freq=1):
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
seed += 10000 * proc_id()
tf.set_random_seed(seed)
np.random.seed(seed)
env = env_fn()
obs_dim = manual_obs_dim
act_dim = manual_action_dim
# Share information about action space with policy architecture
ac_kwargs['action_space'] = env.action_space
# Inputs to computation graph
x_ph, a_ph = core.placeholder(manual_obs_dim), core.placeholder(manual_action_dim)
adv_ph, ret_ph, logp_old_ph = core.placeholders(None, None, None)
# Main outputs from computation graph
pi, logp, logp_pi, v = actor_critic(x_ph, a_ph, **ac_kwargs)
# Need all placeholders in *this* order later (to zip with data from buffer)
all_phs = [x_ph, a_ph, adv_ph, ret_ph, logp_old_ph]
# Every step, get: action, value, and logprob
get_action_ops = [pi, v, logp_pi]
# Experience buffer
local_steps_per_epoch = int(steps_per_epoch / num_procs())
buf = PPOBuffer(obs_dim, act_dim, local_steps_per_epoch, gamma, lam)
# Count variables
var_counts = tuple(core.count_vars(scope) for scope in ['pi', 'v'])
logger.log('\nNumber of parameters: \t pi: %d, \t v: %d\n'%var_counts)
# PPO objectives
ratio = tf.exp(logp - logp_old_ph) # pi(a|s) / pi_old(a|s)
min_adv = tf.where(adv_ph>0, (1+clip_ratio)*adv_ph, (1-clip_ratio)*adv_ph)
pi_loss = -tf.reduce_mean(tf.minimum(ratio * adv_ph, min_adv))
v_loss = tf.reduce_mean((ret_ph - v)**2)
# Info (useful to watch during learning)
approx_kl = tf.reduce_mean(logp_old_ph - logp) # a sample estimate for KL-divergence, easy to compute
approx_ent = tf.reduce_mean(-logp) # a sample estimate for entropy, also easy to compute
clipped = tf.logical_or(ratio > (1+clip_ratio), ratio < (1-clip_ratio))
clipfrac = tf.reduce_mean(tf.cast(clipped, tf.float32))
# Optimizers
train_pi = MpiAdamOptimizer(learning_rate=pi_lr).minimize(pi_loss)
train_v = MpiAdamOptimizer(learning_rate=vf_lr).minimize(v_loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Sync params across processes
sess.run(sync_all_params())
# Setup model saving
logger.setup_tf_saver(sess, inputs={'x': x_ph}, outputs={'pi': pi, 'v': v})
def update():
inputs = {k:v for k,v in zip(all_phs, buf.get())}
pi_l_old, v_l_old, ent = sess.run([pi_loss, v_loss, approx_ent], feed_dict=inputs)
# Training
for i in range(train_pi_iters):
_, kl = sess.run([train_pi, approx_kl], feed_dict=inputs)
kl = mpi_avg(kl)
if kl > 1.5 * target_kl:
logger.log('Early stopping at step %d due to reaching max kl.'%i)
break
logger.store(StopIter=i)
for _ in range(train_v_iters):
sess.run(train_v, feed_dict=inputs)
# Log changes from update
pi_l_new, v_l_new, kl, cf = sess.run([pi_loss, v_loss, approx_kl, clipfrac], feed_dict=inputs)
logger.store(LossPi=pi_l_old, LossV=v_l_old,
KL=kl, Entropy=ent, ClipFrac=cf,
DeltaLossPi=(pi_l_new - pi_l_old),
DeltaLossV=(v_l_new - v_l_old))
start_time = time.time()
#-----------------------------------------------------------------#
saver = tf.train.Saver()
network_file_name = save_path + ".ckpt"
last_show_num = 1
if new_train_flag:
print("!!!!!!-------- Attention: Begin a NEW TRAINING --------!!!!!!")
train_pro_list, test_pro_list = [], []
else:
print("!!!!!!------ Attention: Inherit PERVIOUS TRAINING ------!!!!!!")
saver.restore(sess, network_file_name)
train_pro_list = np.load(save_path+'train.npy').tolist()
start_steps = 0
network = tf.trainable_variables()
variable_name = [v.name for v in tf.trainable_variables()]
o_raw, ep_ret, ep_len = env.reset(), 0, 0
last_loc, last_steering, last_motor = o_raw['pose'][:3], 0, 0
lidar_obs = o_raw['lidar']
o = modify_obs(o_raw)
progress_history, velo_history = [0]*wait_frame, [0]*wait_frame
total_steps = steps_per_epoch * epochs
last_total_time = 0
epoch, agent_lap, last_progress, lap_counter = 0, 0, 0, 0
eps_pro_list, epo_pro_list = [], []
# Main loop: collect experience in env and update/log each epoch
for epoch in range(epochs):
for t in range(local_steps_per_epoch):
a, v_t, logp_t = sess.run(get_action_ops, feed_dict={x_ph: o.reshape(1,-1)})
a = a[0]
#----------------------------------------------------------------------------------------------------------#
a_residual = a
a_mapf = Nonlinear_Controller(lidar_obs, rep_range=APF_Config[track])
a = np.clip((a_mapf+a_residual), -1, +1)
o2_raw, r, d, _ = env.step(a)
d = False
current_loc = o2_raw['pose'][:3]
delta_loc, delta_steering, delta_motor = math.sqrt(np.sum(np.square(current_loc-last_loc))), abs(a[0]-last_steering), abs(a[1]-last_motor)
lidar_obs = o2_raw['lidar']
velocity_bonus = 0.5 * math.sqrt(np.sum(np.square(o2_raw['velocity'][:2])))
steering_penalty = -0.2 * (delta_steering+abs(a[0]))
motor_penalty = -0.5 * (delta_motor+1-a[1])
o2 = modify_obs(o2_raw, a)
# r += (steering_penalty+motor_penalty)
# r += velocity_bonus
last_loc, last_steering, last_motor = current_loc, a[0], a[1]
agent_state = env.scenario.world.state()
agent_progress = agent_state['A']['progress']
delta_progress = agent_progress - last_progress
velo_history = velo_history[1:] + [delta_loc]
if (ep_len > wait_frame and sum(velo_history)/wait_frame < still_threshold):
d = True
print('Agent Died: For Collision or Long-time Brake.')
if agent_progress <=0.05 and last_progress>=0.95 and lap_counter >= 450:
agent_lap += 1
r += lap_reward
lap_counter = 0
print('Training Agent: Finish one LAP.')
elif track != 'montreal':
progress_history = progress_history[1:] + [delta_progress]
if (ep_len > wait_frame and sum(progress_history) < -0.1):
d = True
print('Agent Died: For Wrong Way.')
last_progress = agent_progress
ep_ret += r
ep_len += 1
lap_counter += 1
#----------------------------------------------------------------------------------------------------------#
# if not d and ep_len == max_ep_len:
# r += agent_progress*lap_reward
# save and log
buf.store(o, a_residual, r, v_t, logp_t)
logger.store(VVals=v_t)
# Update obs (critical!)
o = o2
terminal = d or (ep_len == max_ep_len)
if terminal or (t==local_steps_per_epoch-1):
logger.store(EpPro=(agent_progress+agent_lap))
if not(terminal):
print('Warning: trajectory cut off by epoch at %d steps.'%ep_len)
# if trajectory didn't reach terminal state, bootstrap value target
last_val = 0 if d else sess.run(v, feed_dict={x_ph: o.reshape(1,-1)})
buf.finish_path(last_val)
if terminal:
# only save EpRet / EpLen if trajectory finished
logger.store(EpRet=ep_ret, EpLen=ep_len)
o, ep_ret, ep_len = env.reset(), 0, 0
o = modify_obs(o_raw)
agent_progress, agent_lap, last_progress, lap_counter = 0, 0, 0, 0
# Save model
if ((epoch % save_freq == 0) or (epoch == epochs-1)) and not save_flag:
log_path = saver.save(sess, network_file_name)
np.save(save_path+'train.npy', train_pro_list)
print("Save to path: ", log_path)
# Perform | |
"GYRAL",
"GYRED",
"GYRES",
"GYRON",
"GYROS",
"GYRUS",
"GYTES",
"GYVED",
"GYVES",
"HAAFS",
"HAARS",
"HABLE",
"HABUS",
"HACEK",
"HACKS",
"HADAL",
"HADED",
"HADES",
"HADJI",
"HADST",
"HAEMS",
"HAETS",
"HAFFS",
"HAFIZ",
"HAFTS",
"HAGGS",
"HAHAS",
"HAICK",
"HAIKA",
"HAIKS",
"HAIKU",
"HAILS",
"HAILY",
"HAINS",
"HAINT",
"HAIRS",
"HAITH",
"HAJES",
"HAJIS",
"HAJJI",
"HAKAM",
"HAKAS",
"HAKEA",
"HAKES",
"HAKIM",
"HAKUS",
"HALAL",
"HALED",
"HALER",
"HALES",
"HALFA",
"HALFS",
"HALID",
"HALLO",
"HALLS",
"HALMA",
"HALMS",
"HALON",
"HALOS",
"HALSE",
"HALTS",
"HALVA",
"HALWA",
"HAMAL",
"HAMBA",
"HAMED",
"HAMES",
"HAMMY",
"HAMZA",
"HANAP",
"HANCE",
"HANCH",
"HANDS",
"HANGI",
"HANGS",
"HANKS",
"HANKY",
"HANSA",
"HANSE",
"HANTS",
"HAOLE",
"HAOMA",
"HAPAX",
"HAPLY",
"HAPPI",
"HAPUS",
"HARAM",
"HARDS",
"HARED",
"HARES",
"HARIM",
"HARKS",
"HARLS",
"HARMS",
"HARNS",
"HAROS",
"HARPS",
"HARTS",
"HASHY",
"HASKS",
"HASPS",
"HASTA",
"HATED",
"HATES",
"HATHA",
"HAUDS",
"HAUFS",
"HAUGH",
"HAULD",
"HAULM",
"HAULS",
"HAULT",
"HAUNS",
"HAUSE",
"HAVER",
"HAVES",
"HAWED",
"HAWKS",
"HAWMS",
"HAWSE",
"HAYED",
"HAYER",
"HAYEY",
"HAYLE",
"HAZAN",
"HAZED",
"HAZER",
"HAZES",
"HEADS",
"HEALD",
"HEALS",
"HEAME",
"HEAPS",
"HEAPY",
"HEARE",
"HEARS",
"HEAST",
"HEATS",
"HEBEN",
"HEBES",
"HECHT",
"HECKS",
"HEDER",
"HEDGY",
"HEEDS",
"HEEDY",
"HEELS",
"HEEZE",
"HEFTE",
"HEFTS",
"HEIDS",
"HEIGH",
"HEILS",
"HEIRS",
"HEJAB",
"HEJRA",
"HELED",
"HELES",
"HELIO",
"HELLS",
"HELMS",
"HELOS",
"HELOT",
"HELPS",
"HELVE",
"HEMAL",
"HEMES",
"HEMIC",
"HEMIN",
"HEMPS",
"HEMPY",
"HENCH",
"HENDS",
"HENGE",
"HENNA",
"HENNY",
"HENRY",
"HENTS",
"HEPAR",
"HERBS",
"HERBY",
"HERDS",
"HERES",
"HERLS",
"HERMA",
"HERMS",
"HERNS",
"HEROS",
"HERRY",
"HERSE",
"HERTZ",
"HERYE",
"HESPS",
"HESTS",
"HETES",
"HETHS",
"HEUCH",
"HEUGH",
"HEVEA",
"HEWED",
"HEWER",
"HEWGH",
"HEXAD",
"HEXED",
"HEXER",
"HEXES",
"HEXYL",
"HEYED",
"HIANT",
"HICKS",
"HIDED",
"HIDER",
"HIDES",
"HIEMS",
"HIGHS",
"HIGHT",
"HIJAB",
"HIJRA",
"HIKED",
"HIKER",
"HIKES",
"HIKOI",
"HILAR",
"HILCH",
"HILLO",
"HILLS",
"HILTS",
"HILUM",
"HILUS",
"HIMBO",
"HINAU",
"HINDS",
"HINGS",
"HINKY",
"HINNY",
"HINTS",
"HIOIS",
"HIPLY",
"HIRED",
"HIREE",
"HIRER",
"HIRES",
"HISSY",
"HISTS",
"HITHE",
"HIVED",
"HIVER",
"HIVES",
"HIZEN",
"HOAED",
"HOAGY",
"HOARS",
"HOARY",
"HOAST",
"HOBOS",
"HOCKS",
"HOCUS",
"HODAD",
"HODJA",
"HOERS",
"HOGAN",
"HOGEN",
"HOGGS",
"HOGHS",
"HOHED",
"HOICK",
"HOIED",
"HOIKS",
"HOING",
"HOISE",
"HOKAS",
"HOKED",
"HOKES",
"HOKEY",
"HOKIS",
"HOKKU",
"HOKUM",
"HOLDS",
"HOLED",
"HOLES",
"HOLEY",
"HOLKS",
"HOLLA",
"HOLLO",
"HOLME",
"HOLMS",
"HOLON",
"HOLOS",
"HOLTS",
"HOMAS",
"HOMED",
"HOMES",
"HOMEY",
"HOMIE",
"HOMME",
"HOMOS",
"HONAN",
"HONDA",
"HONDS",
"HONED",
"HONER",
"HONES",
"HONGI",
"HONGS",
"HONKS",
"HONKY",
"HOOCH",
"HOODS",
"HOODY",
"HOOEY",
"HOOFS",
"HOOKA",
"HOOKS",
"HOOKY",
"HOOLY",
"HOONS",
"HOOPS",
"HOORD",
"HOORS",
"HOOSH",
"HOOTS",
"HOOTY",
"HOOVE",
"HOPAK",
"HOPED",
"HOPER",
"HOPES",
"HOPPY",
"HORAH",
"HORAL",
"HORAS",
"HORIS",
"HORKS",
"HORME",
"HORNS",
"HORST",
"HORSY",
"HOSED",
"HOSEL",
"HOSEN",
"HOSER",
"HOSES",
"HOSEY",
"HOSTA",
"HOSTS",
"HOTCH",
"HOTEN",
"HOTTY",
"HOUFF",
"HOUFS",
"HOUGH",
"HOURI",
"HOURS",
"HOUTS",
"HOVEA",
"HOVED",
"HOVEN",
"HOVES",
"HOWBE",
"HOWES",
"HOWFF",
"HOWFS",
"HOWKS",
"HOWLS",
"HOWRE",
"HOWSO",
"HOXED",
"HOXES",
"HOYAS",
"HOYED",
"HOYLE",
"HUBBY",
"HUCKS",
"HUDNA",
"HUDUD",
"HUERS",
"HUFFS",
"HUFFY",
"HUGER",
"HUGGY",
"HUHUS",
"HUIAS",
"HULAS",
"HULES",
"HULKS",
"HULKY",
"HULLO",
"HULLS",
"HULLY",
"HUMAS",
"HUMFS",
"HUMIC",
"HUMPS",
"HUMPY",
"HUNKS",
"HUNTS",
"HURDS",
"HURLS",
"HURLY",
"HURRA",
"HURST",
"HURTS",
"HUSHY",
"HUSKS",
"HUSOS",
"HUTIA",
"HUZZA",
"HUZZY",
"HWYLS",
"HYDRA",
"HYENS",
"HYGGE",
"HYING",
"HYKES",
"HYLAS",
"HYLEG",
"HYLES",
"HYLIC",
"HYMNS",
"HYNDE",
"HYOID",
"HYPED",
"HYPES",
"HYPHA",
"HYPHY",
"HYPOS",
"HYRAX",
"HYSON",
"HYTHE",
"IAMBI",
"IAMBS",
"IBRIK",
"ICERS",
"ICHED",
"ICHES",
"ICHOR",
"ICIER",
"ICKER",
"ICKLE",
"ICONS",
"ICTAL",
"ICTIC",
"ICTUS",
"IDANT",
"IDEAS",
"IDEES",
"IDENT",
"IDLED",
"IDLES",
"IDOLA",
"IDOLS",
"IDYLS",
"IFTAR",
"IGAPO",
"IGGED",
"IGLUS",
"IHRAM",
"IKANS",
"IKATS",
"IKONS",
"ILEAC",
"ILEAL",
"ILEUM",
"ILEUS",
"ILIAD",
"ILIAL",
"ILIUM",
"ILLER",
"ILLTH",
"IMAGO",
"IMAMS",
"IMARI",
"IMAUM",
"IMBAR",
"IMBED",
"IMIDE",
"IMIDO",
"IMIDS",
"IMINE",
"IMINO",
"IMMEW",
"IMMIT",
"IMMIX",
"IMPED",
"IMPIS",
"IMPOT",
"IMPRO",
"IMSHI",
"IMSHY",
"INAPT",
"INARM",
"INBYE",
"INCEL",
"INCLE",
"INCOG",
"INCUS",
"INCUT",
"INDEW",
"INDIA",
"INDIE",
"INDOL",
"INDOW",
"INDRI",
"INDUE",
"INERM",
"INFIX",
"INFOS",
"INFRA",
"INGAN",
"INGLE",
"INION",
"INKED",
"INKER",
"INKLE",
"INNED",
"INNIT",
"INORB",
"INRUN",
"INSET",
"INSPO",
"INTEL",
"INTIL",
"INTIS",
"INTRA",
"INULA",
"INURE",
"INURN",
"INUST",
"INVAR",
"INWIT",
"IODIC",
"IODID",
"IODIN",
"IOTAS",
"IPPON",
"IRADE",
"IRIDS",
"IRING",
"IRKED",
"IROKO",
"IRONE",
"IRONS",
"ISBAS",
"ISHES",
"ISLED",
"ISLES",
"ISNAE",
"ISSEI",
"ISTLE",
"ITEMS",
"ITHER",
"IVIED",
"IVIES",
"IXIAS",
"IXNAY",
"IXORA",
"IXTLE",
"IZARD",
"IZARS",
"IZZAT",
"JAAPS",
"JABOT",
"JACAL",
"JACKS",
"JACKY",
"JADED",
"JADES",
"JAFAS",
"JAFFA",
"JAGAS",
"JAGER",
"JAGGS",
"JAGGY",
"JAGIR",
"JAGRA",
"JAILS",
"JAKER",
"JAKES",
"JAKEY",
"JALAP",
"JALOP",
"JAMBE",
"JAMBO",
"JAMBS",
"JAMBU",
"JAMES",
"JAMMY",
"JAMON",
"JANES",
"JANNS",
"JANNY",
"JANTY",
"JAPAN",
"JAPED",
"JAPER",
"JAPES",
"JARKS",
"JARLS",
"JARPS",
"JARTA",
"JARUL",
"JASEY",
"JASPE",
"JASPS",
"JATOS",
"JAUKS",
"JAUPS",
"JAVAS",
"JAVEL",
"JAWAN",
"JAWED",
"JAXIE",
"JEANS",
"JEATS",
"JEBEL",
"JEDIS",
"JEELS",
"JEELY",
"JEEPS",
"JEERS",
"JEEZE",
"JEFES",
"JEFFS",
"JEHAD",
"JEHUS",
"JELAB",
"JELLO",
"JELLS",
"JEMBE",
"JEMMY",
"JENNY",
"JEONS",
"JERID",
"JERKS",
"JERRY",
"JESSE",
"JESTS",
"JESUS",
"JETES",
"JETON",
"JEUNE",
"JEWED",
"JEWIE",
"JHALA",
"JIAOS",
"JIBBA",
"JIBBS",
"JIBED",
"JIBER",
"JIBES",
"JIFFS",
"JIGGY",
"JIGOT",
"JIHAD",
"JILLS",
"JILTS",
"JIMMY",
"JIMPY",
"JINGO",
"JINKS",
"JINNE",
"JINNI",
"JINNS",
"JIRDS",
"JIRGA",
"JIRRE",
"JISMS",
"JIVED",
"JIVER",
"JIVES",
"JIVEY",
"JNANA",
"JOBED",
"JOBES",
"JOCKO",
"JOCKS",
"JOCKY",
"JOCOS",
"JODEL",
"JOEYS",
"JOHNS",
"JOINS",
"JOKED",
"JOKES",
"JOKEY",
"JOKOL",
"JOLED",
"JOLES",
"JOLLS",
"JOLTS",
"JOLTY",
"JOMON",
"JOMOS",
"JONES",
"JONGS",
"JONTY",
"JOOKS",
"JORAM",
"JORUM",
"JOTAS",
"JOTTY",
"JOTUN",
"JOUAL",
"JOUGS",
"JOUKS",
"JOULE",
"JOURS",
"JOWAR",
"JOWED",
"JOWLS",
"JOWLY",
"JOYED",
"JUBAS",
"JUBES",
"JUCOS",
"JUDAS",
"JUDGY",
"JUDOS",
"JUGAL",
"JUGUM",
"JUJUS",
"JUKED",
"JUKES",
"JUKUS",
"JULEP",
"JUMAR",
"JUMBY",
"JUMPS",
"JUNCO",
"JUNKS",
"JUNKY",
"JUPES",
"JUPON",
"JURAL",
"JURAT",
"JUREL",
"JURES",
"JUSTS",
"JUTES",
"JUTTY",
"JUVES",
"JUVIE",
"KAAMA",
"KABAB",
"KABAR",
"KABOB",
"KACHA",
"KACKS",
"KADAI",
"KADES",
"KADIS",
"KAFIR",
"KAGOS",
"KAGUS",
"KAHAL",
"KAIAK",
"KAIDS",
"KAIES",
"KAIFS",
"KAIKA",
"KAIKS",
"KAILS",
"KAIMS",
"KAING",
"KAINS",
"KAKAS",
"KAKIS",
"KALAM",
"KALES",
"KALIF",
"KALIS",
"KALPA",
"KAMAS",
"KAMES",
"KAMIK",
"KAMIS",
"KAMME",
"KANAE",
"KANAS",
"KANDY",
"KANEH",
"KANES",
"KANGA",
"KANGS",
"KANJI",
"KANTS",
"KANZU",
"KAONS",
"KAPAS",
"KAPHS",
"KAPOK",
"KAPOW",
"KAPUS",
"KAPUT",
"KARAS",
"KARAT",
"KARKS",
"KARNS",
"KAROO",
"KAROS",
"KARRI",
"KARST",
"KARSY",
"KARTS",
"KARZY",
"KASHA",
"KASME",
"KATAL",
"KATAS",
"KATIS",
"KATTI",
"KAUGH",
"KAURI",
"KAURU",
"KAURY",
"KAVAL",
"KAVAS",
"KAWAS",
"KAWAU",
"KAWED",
"KAYLE",
"KAYOS",
"KAZIS",
"KAZOO",
"KBARS",
"KEBAR",
"KEBOB",
"KECKS",
"KEDGE",
"KEDGY",
"KEECH",
"KEEFS",
"KEEKS",
"KEELS",
"KEEMA",
"KEENO",
"KEENS",
"KEEPS",
"KEETS",
"KEEVE",
"KEFIR",
"KEHUA",
"KEIRS",
"KELEP",
"KELIM",
"KELLS",
"KELLY",
"KELPS",
"KELPY",
"KELTS",
"KELTY",
"KEMBO",
"KEMBS",
"KEMPS",
"KEMPT",
"KEMPY",
"KENAF",
"KENCH",
"KENDO",
"KENOS",
"KENTE",
"KENTS",
"KEPIS",
"KERBS",
"KEREL",
"KERFS",
"KERKY",
"KERMA",
"KERNE",
"KERNS",
"KEROS",
"KERRY",
"KERVE",
"KESAR",
"KESTS",
"KETAS",
"KETCH",
"KETES",
"KETOL",
"KEVEL",
"KEVIL",
"KEXES",
"KEYED",
"KEYER",
"KHADI",
"KHAFS",
"KHANS",
"KHAPH",
"KHATS",
"KHAYA",
"KHAZI",
"KHEDA",
"KHETH",
"KHETS",
"KHOJA",
"KHORS",
"KHOUM",
"KHUDS",
"KIAAT",
"KIACK",
"KIANG",
"KIBBE",
"KIBBI",
"KIBEI",
"KIBES",
"KIBLA",
"KICKS",
"KICKY",
"KIDDO",
"KIDDY",
"KIDEL",
"KIDGE",
"KIEFS",
"KIERS",
"KIEVE",
"KIEVS",
"KIGHT",
"KIKES",
"KIKOI",
"KILEY",
"KILIM",
"KILLS",
"KILNS",
"KILOS",
"KILPS",
"KILTS",
"KILTY",
"KIMBO",
"KINAS",
"KINDA",
"KINDS",
"KINDY",
"KINES",
"KINGS",
"KININ",
"KINKS",
"KINOS",
"KIORE",
"KIPES",
"KIPPA",
"KIPPS",
"KIRBY",
"KIRKS",
"KIRNS",
"KIRRI",
"KISAN",
"KISSY",
"KISTS",
"KITED",
"KITER",
"KITES",
"KITHE",
"KITHS",
"KITUL",
"KIVAS",
"KIWIS",
"KLANG",
"KLAPS",
"KLETT",
"KLICK",
"KLIEG",
"KLIKS",
"KLONG",
"KLOOF",
"KLUGE",
"KLUTZ",
"KNAGS",
"KNAPS",
"KNARL",
"KNARS",
"KNAUR",
"KNAWE",
"KNEES",
"KNELL",
"KNISH",
"KNITS",
"KNIVE",
"KNOBS",
"KNOPS",
"KNOSP",
"KNOTS",
"KNOUT",
"KNOWE",
"KNOWS",
"KNUBS",
"KNURL",
"KNURR",
"KNURS",
"KNUTS",
"KOANS",
"KOAPS",
"KOBAN",
"KOBOS",
"KOELS",
"KOFFS",
"KOFTA",
"KOGAL",
"KOHAS",
"KOHEN",
"KOHLS",
"KOINE",
"KOJIS",
"KOKAM",
"KOKAS",
"KOKER",
"KOKRA",
"KOKUM",
"KOLAS",
"KOLOS",
"KOMBU",
"KONBU",
"KONDO",
"KONKS",
"KOOKS",
"KOOKY",
"KOORI",
"KOPEK",
"KOPHS",
"KOPJE",
"KOPPA",
"KORAI",
"KORAS",
"KORAT",
"KORES",
"KORMA",
"KOROS",
"KORUN",
"KORUS",
"KOSES",
"KOTCH",
"KOTOS",
"KOTOW",
"KOURA",
"KRAAL",
"KRABS",
"KRAFT",
"KRAIS",
"KRAIT",
"KRANG",
"KRANS",
"KRANZ",
"KRAUT",
"KRAYS",
"KREEP",
"KRENG",
"KREWE",
"KRONA",
"KRONE",
"KROON",
"KRUBI",
"KRUNK",
"KSARS",
"KUBIE",
"KUDOS",
"KUDUS",
"KUDZU",
"KUFIS",
"KUGEL",
"KUIAS",
"KUKRI",
"KUKUS",
"KULAK",
"KULAN",
"KULAS",
"KULFI",
"KUMIS",
"KUMYS",
"KURIS",
"KURRE",
"KURTA",
"KURUS",
"KUSSO",
"KUTAS",
"KUTCH",
"KUTIS",
"KUTUS",
"KUZUS",
"KVASS",
"KVELL",
"KWELA",
"KYACK",
"KYAKS",
"KYANG",
"KYARS",
"KYATS",
"KYBOS",
"KYDST",
"KYLES",
"KYLIE",
"KYLIN",
"KYLIX",
"KYLOE",
"KYNDE",
"KYNDS",
"KYPES",
"KYRIE",
"KYTES",
"KYTHE",
"LAARI",
"LABDA",
"LABIA",
"LABIS",
"LABRA",
"LACED",
"LACER",
"LACES",
"LACET",
"LACEY",
"LACKS",
"LADDY",
"LADED",
"LADER",
"LADES",
"LAERS",
"LAEVO",
"LAGAN",
"LAHAL",
"LAHAR",
"LAICH",
"LAICS",
"LAIDS",
"LAIGH",
"LAIKA",
"LAIKS",
"LAIRD",
"LAIRS",
"LAIRY",
"LAITH",
"LAITY",
"LAKED",
"LAKER",
"LAKES",
"LAKHS",
"LAKIN",
"LAKSA",
"LALDY",
"LALLS",
"LAMAS",
"LAMBS",
"LAMBY",
"LAMED",
"LAMER",
"LAMES",
"LAMIA",
"LAMMY",
"LAMPS",
"LANAI",
"LANAS",
"LANCH",
"LANDE",
"LANDS",
"LANES",
"LANKS",
"LANTS",
"LAPIN",
"LAPIS",
"LAPJE",
"LARCH",
"LARDS",
"LARDY",
"LAREE",
"LARES",
"LARGO",
"LARIS",
"LARKS",
"LARKY",
"LARNS",
"LARNT",
"LARUM",
"LASED",
"LASER",
"LASES",
"LASSI",
"LASSU",
"LASSY",
"LASTS",
"LATAH",
"LATED",
"LATEN",
"LATEX",
"LATHI",
"LATHS",
"LATHY",
"LATKE",
"LATUS",
"LAUAN",
"LAUCH",
"LAUDS",
"LAUFS",
"LAUND",
"LAURA",
"LAVAL",
"LAVAS",
"LAVED",
"LAVER",
"LAVES",
"LAVRA",
"LAVVY",
"LAWED",
"LAWER",
"LAWIN",
"LAWKS",
"LAWNS",
"LAWNY",
"LAXED",
"LAXER",
"LAXES",
"LAXLY",
"LAYED",
"LAYIN",
"LAYUP",
"LAZAR",
"LAZED",
"LAZES",
"LAZOS",
"LAZZI",
"LAZZO",
"LEADS",
"LEADY",
"LEAFS",
"LEAKS",
"LEAMS",
"LEANS",
"LEANY",
"LEAPS",
"LEARE",
"LEARS",
"LEARY",
"LEATS",
"LEAVY",
"LEAZE",
"LEBEN",
"LECCY",
"LEDES",
"LEDGY",
"LEDUM",
"LEEAR",
"LEEKS",
"LEEPS",
"LEERS",
"LEESE",
"LEETS",
"LEEZE",
"LEFTE",
"LEFTS",
"LEGER",
"LEGES",
"LEGGE",
"LEGGO",
"LEGIT",
"LEHRS",
"LEHUA",
"LEIRS",
"LEISH",
"LEMAN",
"LEMED",
"LEMEL",
"LEMES",
"LEMMA",
"LEMME",
"LENDS",
"LENES",
"LENGS",
"LENIS",
"LENOS",
"LENSE",
"LENTI",
"LENTO",
"LEONE",
"LEPID",
"LEPRA",
"LEPTA",
"LERED",
"LERES",
"LERPS",
"LESBO",
"LESES",
"LESTS",
"LETCH",
"LETHE",
| |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 16 15:24:36 2017
@author: Zoë
"""
import numpy as np
#%%
# Implement an 8x3x8 autoencoder. This neural network should take a matrix input and return the same matrix as an output.
# First, represent the neural network as a list of layers, where each layer in the network is represented as a class with a weight matrix, bias vector, activation function, function's derivative.
class Layer:
"""
A class to represent a layer of nodes in a neural network.
"""
def __init__(self, nodes, inputLayer, nextLayerNodes = None):
self.nodes = nodes # number of nodes in this layer
self.inputLayer = inputLayer # the previous layer of the network
self.nextLayerNodes = nextLayerNodes # number of nodes in next layer
self.weights = self.computeWeights() # weight matrix
self.bias = self.computeBias() # bias vector
#self.activation = self.activationfunction(self.inputLayer) # output vector of activations, one for each node
def computeWeights(self):
""" Initialize weight matrix with small random values generated from a normal distribution with mean = 0 and sd = 0.01**2 in order to break symmetry."""
# if type(self.inputLayer) == np.ndarray: # input layer is array of inputs
# innodes = len(self.inputLayer)
# else: # input layer is class Layer (a hidden layer)
# innodes = len(self.inputLayer.activation) # number of nodes in input
# weights = np.zeros((innodes,self.nodes)) # weight matrix
# for i in range(innodes): # rows are input nodes
# for j in range(self.nodes): # columns are current layer nodes
#
# # for testing, stop use random seed
# np.random.seed(3)
# weights[i][j] = np.random.normal(0,0.01**2)
# For weights associated with the previous layer
if self.nextLayerNodes == None:
weights = None # output layer has no weights
else: # current layer is not the output (i.e it is the input or hidden layer)
nextnodes = self.nextLayerNodes
weights = np.zeros((self.nodes,nextnodes)) # weight matrix
for i in range(self.nodes): # rows are input nodes
for j in range(nextnodes): # columns are next layer nodes
# for testing, stop use random seed
#np.random.seed(3)
weights[i][j] = np.random.normal(0,0.01**2)
self.weights = weights
return weights
def computeBias(self):
""" Initialize bias vector with small random values generated from a normal distribution with mean = 0 and sd = 0.01**2 in order to break symmetry."""
# bias = np.zeros(self.nodes) # vector length is number of nodes in current layer
# for i in range(self.nodes):
# # for testing, stop use random seed
# np.random.seed(3)
# bias[i] = np.random.normal(0,0.01**2)
# For bias associated with next layer.
if self.nextLayerNodes == None:
bias = None # output layer has no weights
else: # current layer is not the output (i.e it is the input or hidden layer)
bias = np.zeros([self.nextLayerNodes,1]) # vector length is number of nodes in next layer
for i in range(self.nextLayerNodes):
# for testing, stop use random seed
#np.random.seed(3)
bias[i] = np.random.normal(0,0.01**2)
return bias
def activation(self, inputLayer, x):
""" Compute the activation function for a given array of inputs. Use the sigmoid function as the activation function for forward propagation. f(x) = 1/(1 + e^-x). Derivative of the sigmoid function is f'(x) = f(x)*(1 - f(x))."""
# If input is a Layer, not an array:
# if type(inputLayer) == np.ndarray: # input layer is array of inputs
# activation = np.transpose(np.array([inputLayer]))
# else: # input layer is class Layer (a hidden layer)
# #x = inputLayer.activation # array of activation functions coming from input
# Rework activation function so that it evaluates the activation function for a given input layer. The current input (x) is the activation of the previous layer.
# x is an array containing either the inputs to the network or the activation array of the previous layer.
# weights and bias are from a layer are the output weights and bias of that layer. Calculate the input to the next layer using the weights and bias of the previous layer.
z = np.dot(np.transpose(inputLayer.weights),x) + inputLayer.bias # z = Wtranspose*x + b, total weighted sum of inputs
activation = 1/(1 + np.exp(-z)) # the sigmoid function
return activation
# Overload the __repr__ operator to make printing simpler.
def __repr__(self):
return "{0} {1}".format(self.nodes, self.activation)
#%%
# Backpropagation algorithm to calculate delta values.
def backpropagation(network, x, y):
""" Calculate delta values of a network for array inputs x and outputs y.
Return gradw and gradb of the cost function as lists of matrices for each layer.
"""
# Initialize lists to store gradwJ(W,b,x,y) = d(l+1)*a(l)t and gradbJ(W,b,x,y) = d(l+1) with each item in list corresponding to one layer from layer 0 to second to last (nl-1)
gradw = [None]*(len(network)-1)
gradb = [None]*(len(network)-1)
# for output layer, d = -(y-a)*f'(z), f'(z) = a*(1-a)
#a = network[-1].activation # get the activation of the output layer
# Rework activation function. Feedforward pass to calculate activation.
# Forward pass
activations = [None]*(len(network)) # store activations, each item in list is a column vector of activations for that layer
for layer in range(len(network)):
if layer == 0: # for the input layer
activations[layer] = x
else: # for all other layers after input
activations[layer] = network[layer].activation(network[layer-1],activations[layer-1])
# Backward pass
#a = network[-1].activationfunction(network[-1].inputLayer)
# Rework for activation update
a = activations[-1]
endact = activations[-1] # store the activations of last layer
fprime = a*(1-a) # f'(z) = a*(1-a)
d = -(y-a)*fprime
# for every other layer from the second to last to first, d = W*d(l+1)*f'(z)
for layer in range(len(network)-2,-1,-1): # count backwards from second to last layer to the second layer (i.e. for 3 layers, this is just the hidden layer)
#a = network[layer].activation # get the activation of the current layer
# Rework a
a = activations[layer]
# calculate grad of current layer once we've calculated delta of next layer
gradw[layer] = np.dot(a,np.transpose(d))
gradb[layer] = d
if layer == 0: # don't calculate delta for the first layer
break
# update fprime and delta for the current layer
fprime = a*(1-a) # f'(z) = a*(1-a)
d = np.dot(network[layer].weights,d)*fprime
# collect the activations of the last layer for each input by returning a
return gradw, gradb, endact
def gradientdescent(network, xmatrix, ymatrix, alpha = 0.5, weightdecay = 0.9):
""" Use a linear regression cost function. J(W,b,x,y) = 0.5*(h(x) - y(x))**2 where x,y are true input and answer from training set and h is the predicted answer."""
# set parameters
# alpha = 0.5 # learning rate (start with 0.05, could try 0.5 or 0.1)
# weightdecay = .9 # lambda (weight decay parameter, start with 0.9)
# Initialize delW and delB with zeros of same dimensions as weight/bias at each layer.
delW = [None]*(len(network)-1) # list to store weight matrix for each layer
delB = [None]*(len(network)-1) # list to store bias vector for each layer
for layer in range(len(network)-1): # for each layer except the last
currentnodes = network[layer].nodes
nextnodes = network[layer].nextLayerNodes
delW[layer] = np.zeros([currentnodes, nextnodes])
delB[layer] = np.zeros([nextnodes, 1])
# Calculate delW and delB by adding the grad values for each input
nodesperinput = np.shape(xmatrix)[0] # number of rows in input matrix
nodesperoutput = np.shape(ymatrix)[0] # number of rows in output matrix
assert nodesperinput == network[0].nodes # make sure input is correct size
assert nodesperoutput == network[-1].nodes # make sure output is correct size
m = np.shape(xmatrix)[1] # number of columns in the input matrix.
assert m == np.shape(ymatrix)[1] # make sure same number of inputs and outputs
# Store output values to compare to correct y values. Create a matrix of zeros with same shape as ymatrix.
finalactivation = np.zeros_like(ymatrix)
# Each column is one set of inputs.
for i in range(m): # loop through each input
x = np.zeros([nodesperinput, 1])
y = np.zeros([nodesperoutput, 1])
x[:,0] = xmatrix[:,i]
y[:,0] = ymatrix[:,i]
# calculate the deltas using backpropagation
# actxy is the evaluated output of a given x,y input
gradW, gradB, actxy = backpropagation(network, x, y)
| |
<reponame>tecnickcom/binsearch<filename>python/test/test_binsearch_col.py
"""Tests for binsearch module - column mode."""
import binsearch as bs
import os
from unittest import TestCase
nrows = 251
testDataCol8 = [
(0, 251, 0x00, 0, 0, 0, 1, 2, 2),
(1, 251, 0x00, 1, 1, 1, 1, 2, 2),
(0, 251, 0x01, 2, 2, 2, 2, 3, 3),
(0, 251, 0x0F, 16, 16, 16, 16, 17, 17),
(0, 251, 0x10, 17, 17, 17, 17, 18, 18),
(0, 251, 0x1F, 32, 32, 32, 32, 33, 33),
(0, 251, 0x20, 33, 33, 33, 33, 34, 34),
(0, 251, 0x2F, 48, 48, 48, 48, 49, 49),
(0, 251, 0x30, 49, 49, 49, 49, 50, 50),
(0, 251, 0x3F, 64, 64, 64, 64, 65, 65),
(0, 251, 0x40, 65, 65, 65, 65, 66, 66),
(0, 251, 0x4F, 80, 80, 80, 80, 81, 81),
(0, 251, 0x50, 81, 81, 81, 81, 82, 82),
(0, 251, 0x5F, 96, 96, 96, 96, 97, 97),
(0, 251, 0x60, 97, 97, 97, 97, 98, 98),
(0, 251, 0x6F, 112, 112, 112, 112, 113, 113),
(0, 251, 0x70, 113, 113, 113, 113, 114, 114),
(0, 251, 0x7F, 128, 128, 128, 128, 129, 129),
(0, 251, 0x80, 129, 129, 129, 129, 130, 130),
(0, 251, 0x8F, 144, 144, 144, 144, 145, 145),
(0, 251, 0x90, 145, 145, 145, 145, 146, 146),
(0, 251, 0x9F, 160, 160, 160, 160, 161, 161),
(0, 251, 0xA0, 161, 161, 161, 161, 162, 162),
(0, 251, 0xAF, 176, 176, 176, 176, 177, 177),
(0, 251, 0xB0, 177, 177, 177, 177, 178, 178),
(0, 251, 0xBF, 192, 192, 192, 192, 193, 193),
(0, 251, 0xC0, 193, 193, 193, 193, 194, 194),
(0, 251, 0xCF, 208, 208, 208, 208, 209, 209),
(0, 251, 0xD0, 209, 209, 209, 209, 210, 210),
(0, 251, 0xDF, 224, 224, 224, 224, 225, 225),
(0, 251, 0xE0, 225, 225, 225, 225, 226, 226),
(0, 251, 0xEF, 240, 240, 240, 240, 241, 241),
(0, 251, 0xF0, 241, 241, 241, 241, 242, 242),
(0, 251, 0xF8, 249, 249, 249, 249, 250, 250),
(0, 251, 0xFF, 250, 250, 250, 250, 251, 251),
(0, 251, 0xF9, 251, 249, 250, 251, 249, 250),
(0, 51, 0x70, 51, 50, 51, 51, 50, 51),
(150, 251, 0x70, 251, 149, 150, 251, 149, 150),
]
testDataColSub8 = [
(0, 251, 0x00, 0, 0, 0, 1, 2, 2),
(1, 251, 0x00, 1, 1, 1, 1, 2, 2),
(0, 251, 0x01, 2, 2, 2, 2, 3, 3),
(0, 251, 0x0F, 16, 16, 16, 16, 17, 17),
(0, 251, 0x10, 17, 17, 17, 17, 18, 18),
(0, 251, 0x1F, 32, 32, 32, 32, 33, 33),
(0, 251, 0x20, 33, 33, 33, 33, 34, 34),
(0, 251, 0x2F, 48, 48, 48, 48, 49, 49),
(0, 251, 0x30, 49, 49, 49, 49, 50, 50),
(0, 251, 0x3F, 64, 64, 64, 64, 65, 65),
(0, 251, 0x40, 65, 65, 65, 65, 66, 66),
(0, 251, 0x4F, 80, 80, 80, 80, 81, 81),
(0, 251, 0x50, 81, 81, 81, 81, 82, 82),
(0, 251, 0x5F, 96, 96, 96, 96, 97, 97),
(0, 251, 0x60, 97, 97, 97, 97, 98, 98),
(0, 251, 0x6F, 112, 112, 112, 112, 113, 113),
(0, 251, 0x70, 113, 113, 113, 113, 114, 114),
(0, 251, 0x7F, 128, 128, 128, 128, 129, 129),
(0, 251, 0x80, 129, 129, 129, 129, 130, 130),
(0, 251, 0x8F, 144, 144, 144, 144, 145, 145),
(0, 251, 0x90, 145, 145, 145, 145, 146, 146),
(0, 251, 0x9F, 160, 160, 160, 160, 161, 161),
(0, 251, 0xA0, 161, 161, 161, 161, 162, 162),
(0, 251, 0xAF, 176, 176, 176, 176, 177, 177),
(0, 251, 0xB0, 177, 177, 177, 177, 178, 178),
(0, 251, 0xBF, 192, 192, 192, 192, 193, 193),
(0, 251, 0xC0, 193, 193, 193, 193, 194, 194),
(0, 251, 0xCF, 208, 208, 208, 208, 209, 209),
(0, 251, 0xD0, 209, 209, 209, 209, 210, 210),
(0, 251, 0xDF, 224, 224, 224, 224, 225, 225),
(0, 251, 0xE0, 225, 225, 225, 225, 226, 226),
(0, 251, 0xEF, 240, 240, 240, 240, 241, 241),
(0, 251, 0xF0, 241, 241, 241, 241, 242, 242),
(0, 251, 0xF8, 249, 249, 249, 249, 250, 250),
(0, 251, 0xFF, 250, 250, 250, 250, 251, 251),
(0, 251, 0xF9, 251, 249, 250, 251, 249, 250),
(0, 51, 0x70, 51, 50, 51, 51, 50, 51),
(150, 251, 0x70, 251, 149, 150, 251, 149, 150),
]
testDataCol16 = [
(0, 251, 0x0000, 0, 0, 0, 0, 1, 1),
(1, 251, 0x0001, 1, 1, 1, 1, 2, 2),
(0, 251, 0x0102, 2, 2, 2, 2, 3, 3),
(0, 251, 0x0F10, 16, 16, 16, 16, 17, 17),
(0, 251, 0x1011, 17, 17, 17, 17, 18, 18),
(0, 251, 0x1F20, 32, 32, 32, 32, 33, 33),
(0, 251, 0x2021, 33, 33, 33, 33, 34, 34),
(0, 251, 0x2F30, 48, 48, 48, 48, 49, 49),
(0, 251, 0x3031, 49, 49, 49, 49, 50, 50),
(0, 251, 0x3F40, 64, 64, 64, 64, 65, 65),
(0, 251, 0x4041, 65, 65, 65, 65, 66, 66),
(0, 251, 0x4F50, 80, 80, 80, 80, 81, 81),
(0, 251, 0x5051, 81, 81, 81, 81, 82, 82),
(0, 251, 0x5F60, 96, 96, 96, 96, 97, 97),
(0, 251, 0x6061, 97, 97, 97, 97, 98, 98),
(0, 251, 0x6F70, 112, 112, 112, 112, 113, 113),
(0, 251, 0x7071, 113, 113, 113, 113, 114, 114),
(0, 251, 0x7F80, 128, 128, 128, 128, 129, 129),
(0, 251, 0x8081, 129, 129, 129, 129, 130, 130),
(0, 251, 0x8F90, 144, 144, 144, 144, 145, 145),
(0, 251, 0x9091, 145, 145, 145, 145, 146, 146),
(0, 251, 0x9FA0, 160, 160, 160, 160, 161, 161),
(0, 251, 0xA0A1, 161, 161, 161, 161, 162, 162),
(0, 251, 0xAFB0, 176, 176, 176, 176, 177, 177),
(0, 251, 0xB0B1, 177, 177, 177, 177, 178, 178),
(0, 251, 0xBFC0, 192, 192, 192, 192, 193, 193),
(0, 251, 0xC0C1, 193, 193, 193, 193, 194, 194),
(0, 251, 0xCFD0, 208, 208, 208, 208, 209, 209),
(0, 251, 0xD0D1, 209, 209, 209, 209, 210, 210),
(0, 251, 0xDFE0, 224, 224, 224, 224, 225, 225),
(0, 251, 0xE0E1, 225, 225, 225, 225, 226, 226),
(0, 251, 0xEFF0, 240, 240, 240, 240, 241, 241),
(0, 251, 0xF0F1, 241, 241, 241, 241, 242, 242),
(0, 251, 0xF8F9, 249, 249, 249, 249, 250, 250),
(0, 251, 0xFFFF, 250, 250, 250, 250, 251, 251),
(0, 251, 0xF9F9, 251, 249, 250, 251, 249, 250),
(0, 51, 0x7071, 51, 50, 51, 51, 50, 51),
(150, 251, 0x7071, 251, 149, 150, 251, 149, 150),
]
testDataColSub16 = [
(0, 251, 0x0000, 0, 0, 0, 0, 1, 1),
(1, 251, 0x0001, 1, 1, 1, 1, 2, 2),
(0, 251, 0x0102, 2, 2, 2, 2, 3, 3),
(0, 251, 0x0F10, 16, 16, 16, 16, 17, 17),
(0, 251, 0x1011, 17, 17, 17, 17, 18, 18),
(0, 251, 0x1F20, 32, 32, 32, 32, 33, 33),
(0, 251, 0x2021, 33, 33, 33, 33, 34, 34),
(0, 251, 0x2F30, 48, 48, 48, 48, 49, 49),
(0, 251, 0x3031, 49, 49, 49, 49, 50, 50),
(0, 251, 0x3F40, 64, 64, 64, 64, 65, 65),
(0, 251, 0x4041, 65, 65, 65, 65, 66, 66),
(0, 251, 0x4F50, 80, 80, 80, 80, 81, 81),
(0, 251, 0x5051, 81, 81, 81, 81, 82, 82),
(0, 251, 0x5F60, 96, 96, 96, 96, 97, 97),
(0, 251, 0x6061, 97, 97, 97, 97, 98, 98),
(0, 251, 0x6F70, 112, 112, 112, 112, 113, 113),
(0, 251, 0x7071, 113, 113, 113, 113, 114, 114),
(0, 251, 0x7F80, 128, 128, 128, 128, 129, 129),
(0, 251, 0x8081, 129, 129, 129, 129, 130, 130),
(0, 251, 0x8F90, 144, 144, 144, 144, 145, 145),
(0, 251, 0x9091, 145, 145, 145, 145, 146, 146),
(0, 251, 0x9FA0, 160, 160, 160, 160, 161, 161),
(0, 251, 0xA0A1, 161, 161, 161, 161, 162, 162),
(0, 251, 0xAFB0, 176, 176, 176, 176, 177, 177),
(0, 251, 0xB0B1, 177, 177, 177, 177, 178, 178),
(0, 251, 0xBFC0, 192, 192, | |
os.mkdir mode parameter must have length of 4")
pythonPg = """\"\"\"import os
if os.path.exists('%s') == False:
os.mkdir('%s', %s)\"\"\"
""" % (directory, directory, str(mode))
cmdStr = """python -c %s""" % pythonPg
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
@staticmethod
def remote(name,remote_host,directory,mode=None):
cmd=CreateDirIfNecessaryWithMode(name,directory,ctxt=REMOTE,remoteHost=remote_host,mode=mode)
cmd.run(validateAfter=False)
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
class CopyToLocal(Command):
def __init__(self, name, srcHost, srcDirectory, dstDirectory, ctxt=LOCAL):
self.srcDirectory=srcDirectory
self.srcHost=srcHost
self.dstDirectory=dstDirectory
cmdStr="%s -r %s:%s %s" % (findCmdInPath('scp'), srcHost, srcDirectory, dstDirectory)
Command.__init__(self, name, cmdStr, ctxt=LOCAL)
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
class RemoteCopyPreserve(Command):
def __init__(self,name,srcDirectory,dstHost,dstDirectory,ctxt=LOCAL,remoteHost=None):
self.srcDirectory=srcDirectory
self.dstHost=dstHost
self.dstDirectory=dstDirectory
cmdStr="%s -rp %s %s:%s" % (findCmdInPath('scp'),srcDirectory,dstHost,dstDirectory)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
class GPumDiskFree(Command):
""" This method checks for available space in a directory """
""" The same method, without the GPum prefix is in gppylib, """
""" but it doesn't work correctly in 3.3.x. """
def __init__(self,name,directory,ctxt=LOCAL,remoteHost=None):
self.directory=directory
dfCommand = SYSTEM.getDiskFreeCmd()
if options.phase2 == False and isinstance(SYSTEM, SolarisPlatform):
""" This is a work around for a bug in 3.3.x (see MPP-6647) """
dfCommand = dfCommand + 'k'
if options.phase2 == False and isinstance(SYSTEM, LinuxPlatform):
""" This is a work around for a bug in 3.3.x (see MPP-6647) """
dfCommand = dfCommand + 'Pk'
cmdStr="%s %s" % (dfCommand,directory)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
@staticmethod
def get_size(name,remote_host,directory):
dfCmd=GPumDiskFree( name = name
, directory = directory
, ctxt = REMOTE
, remoteHost = remote_host
)
dfCmd.run(validateAfter=True)
return dfCmd.get_bytes_free()
def get_bytes_free(self):
'''expected output of the form:
Filesystem 512-blocks Used Available Capacity Mounted on
/dev/disk0s2 194699744 158681544 35506200 82% /
'''
rawIn=self.results.stdout.split('\n')[1]
bytesFree=int(rawIn.split()[3])*1024
return bytesFree
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
class CreateHardLink(Command):
""" This class create a hard link or links. """
def __init__(self, name, srcFile, hardLink, ctxt = LOCAL, remoteHost = None, argsFile = None):
if argsFile == None:
self.srcFile = srcFile
self.hardLink = hardLink
cmdStr="%s %s %s" % (findCmdInPath('ln'), srcFile, hardLink)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
else:
cmdStr="%s %s | %s -n 2 %s" % (findCmdInPath("cat"), argsFile, findCmdInPath("xargs"), findCmdInPath('ln'))
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
class GPumMoveDirectory(Command):
""" This class moves a local directory."""
def __init__(self,name,srcDirectory,dstDirectory,ctxt=LOCAL,remoteHost=None):
self.srcDirectory=srcDirectory
self.dstDirectory=dstDirectory
cmdStr="%s -f %s %s" % (findCmdInPath('mv'),srcDirectory,dstDirectory)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
class GPumMoveDirectoryContents(Command):
""" This class moves the contents of a local directory."""
def __init__(self,name,srcDirectory,dstDirectory,ctxt=LOCAL,remoteHost=None):
self.srcDirectory = srcDirectory
self.srcDirectoryFiles = self.srcDirectory + "." + 'dirfilelist'
self.dstDirectory = dstDirectory
ls = findCmdInPath("ls")
cat = findCmdInPath("cat")
xargs = findCmdInPath("xargs")
mv = findCmdInPath("mv")
cmdStr = "%s -1 %s > %s" % (ls, self.srcDirectory, self.srcDirectoryFiles)
cmdStr = cmdStr + ";%s %s" % (cat, self.srcDirectoryFiles)
cmdStr = cmdStr + " | %s -I xxx %s %s/xxx %s" % (xargs, mv, self.srcDirectory, self.dstDirectory)
cmdStr = cmdStr + "; rm %s" % (self.srcDirectoryFiles)
"""
ls -1 temp1 > temp1.list;cat temp1.list | xargs -I xxx mv temp1/xxx temp2
cmdStr = "%s -1 %s > %s;%s %s | %s -I xxx %s %s/xxx %s"
cmdStr="%s -f %s %s" % (findCmdInPath('mv'),srcDirectory,dstDirectory)
"""
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
class RemoveFileDirectoryFromList(Command):
"""
This class will remove a list of files from a given locaiton.
"""
def __init__(self,name,fileListLocation,ctxt=LOCAL,remoteHost=None):
self.fileListLocation = fileListLocation
self.ctxt = ctxt
self.remoteHost = remoteHost
cmdStr="%s %s | %s %s -rf" % \
( findCmdInPath("cat")
, fileListLocation
, findCmdInPath("xargs")
, findCmdInPath('rm')
)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
def setupListFile(self, tempDir, theList):
fd, fdName = tempfile.mkstemp(prefix='tmpRFDFL', dir=tempDir)
fd = open(fdName, "w")
for row in theList:
fd.write(row +"\n")
fd.close()
rmCmd = RemoteCopy( name = "gpupgrademirror copy RFDFL: %s to %s:%s" % (fdName, self.remoteHost, self.fileListLocation)
, srcDirectory = fdName
, dstHost = self.remoteHost
, dstDirectory = self.fileListLocation
, ctxt = LOCAL
, remoteHost = None
)
rmCmd.run(validateAfter=True)
os.unlink(fdName)
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
class CreateTarFromList(Command):
"""
This class will create a tar file from a list of files.
WARNING. This class is not used and is untested.
"""
def __init__(self, name, fileListLocation, dstTarFile, srcDirectory, ctxt=LOCAL, remoteHost=None):
self.fileListLocation = fileListLocation
self.dstTarFile = dstTarFile
self.srcDirectory = srcDirectory
self.ctxt = ctxt
self.remoteHost = remoteHost
tarCmd = SYSTEM.getTarCmd()
cmdStr="%s %s | %s %s rvPpf %s -C %s " % \
( findCmdInPath("cat")
, fileListLocation
, findCmdInPath('xargs')
, tarCmd
, self.dstTarFile
, srcDirectory
)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
def setupListFile(self, tempDir, theList):
fd, fdName = tempfile.mkstemp(prefix='tmpCTFL', dir=tempDir)
fd = open(fdName, "w")
for row in theList:
fd.write(row +"\n")
fd.close()
rmCmd = RemoteCopy( name = "gpupgrademirror copy CTFL: %s to %s:%s" % (fdName, self.remoteHost, self.fileListLocation)
, srcDirectory = fdName
, dstHost = self.remoteHost
, dstDirectory = self.fileListLocation
, ctxt = LOCAL
, remoteHost = None
)
rmCmd.run(validateAfter=True)
os.unlink(fdName)
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
class FileDirectoryList(Command):
""" This class gets list of files based on a pattern used by ls. """
def __init__(self, name, filePattern, ctxt=LOCAL, remoteHost=None):
self.filePattern = filePattern
cmdStr="%s -1 %s" % (findCmdInPath('ls'), filePattern)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
@staticmethod
def get_list(name, remote_host, file_pattern):
lsCmd = FileDirectoryList(name, filePattern = file_pattern, ctxt = REMOTE, remoteHost = remote_host)
lsCmd.run(validateAfter=False)
return lsCmd.get_result_list()
def get_result_list(self):
files = self.results.stdout.split('\n')
if files != None and len(files) > 0 and files[-1] == '':
''' Remove any trailing empty string '''
files.pop()
return files
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
class FilesInDir(Command):
""" This class gets a list of files in a directory """
def __init__(self, name, filePattern, ctxt=LOCAL, remoteHost=None, remoteTempFile=None):
self.filePattern = filePattern
self.remoteHost = remoteHost
self.remoteTempFile = remoteTempFile
if remoteTempFile != None:
remoteTempFilePart = " > %s" % remoteTempFile
else:
remoteTempFilePart = ""
find = findCmdInPath('find')
cmdStr= "%s %s -type f %s" % (find, filePattern, remoteTempFilePart)
self.cmdStr = cmdStr
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
def get_result_list(self, localTempFile = None):
retList = []
if localTempFile != None:
rcCmd = CopyToLocal( name = "gpupgrademirror get result list copy: %s:%s to %s" % (self.remoteHost, self.remoteTempFile, localTempFile)
, srcHost = self.remoteHost
, srcDirectory = self.remoteTempFile
, dstDirectory = localTempFile
)
rcCmd.run(validateAfter = True)
rmCmd = RemoveFiles( name = 'gpupgrademirror remove remote temp file: %s:%s' % (self.remoteHost, self.remoteTempFile)
, directory = self.remoteTempFile
, ctxt = REMOTE
, remoteHost = self.remoteHost
)
rmCmd.run(validateAfter = True)
fd = open(localTempFile, "r")
fd.seek(0)
fileList = []
for line in fd:
line = line.rstrip('\n')
fileList.append(line)
fd.close()
else:
fileList = self.results.stdout.split('\n')
if fileList != None and len(fileList) > 0 and fileList[-1] == '':
''' Remove any trailing empty string '''
fileList.pop()
retList = fileList
return retList
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
class FileAndSize(Command):
"""
This class gets a list of files in a directory and their sizes in k-bytes
The results are sorted by file size. If "zeroSize is true" only files that
have 0 bytes are returned.
"""
def __init__(self, name, filePattern, ctxt=LOCAL, remoteHost=None, zeroSize=False, remoteTempFile=None):
self.filePattern = filePattern
self.remoteHost = remoteHost
self.remoteTempFile = remoteTempFile
if remoteTempFile != None:
remoteTempFilePart = " > %s" % remoteTempFile
else:
remoteTempFilePart = ""
if zeroSize == True:
sizeArg = "-size 0c"
else:
sizeArg = ''
find = findCmdInPath('find')
xargs = findCmdInPath('xargs')
du = findCmdInPath('du')
sort = findCmdInPath('sort')
cmdStr= "%s %s -type f %s | %s %s -k | %s -n %s" % (find, filePattern, sizeArg, xargs, du, sort, remoteTempFilePart)
self.cmdStr = cmdStr
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
def get_result_list(self, localTempFile = None):
retList = []
if localTempFile != None:
rcCmd = CopyToLocal( name = "gpupgrademirror get result list copy: %s:%s to %s" % (self.remoteHost, self.remoteTempFile, localTempFile)
, srcHost = self.remoteHost
, srcDirectory = self.remoteTempFile
, dstDirectory = localTempFile
)
rcCmd.run(validateAfter = True)
rmCmd = RemoveFiles( name = 'gpupgrademirror remove remote temp file: %s:%s' % (self.remoteHost, self.remoteTempFile)
, directory = self.remoteTempFile
, ctxt = REMOTE
, remoteHost = self.remoteHost
)
rmCmd.run(validateAfter = True)
fd = open(localTempFile, "r")
fd.seek(0)
fileList = []
for line in fd:
fileList.append(line)
fd.close()
else:
fileList = self.results.stdout.split('\n')
if fileList != None and len(fileList) > 0 and fileList[-1] == '':
''' Remove any trailing empty string '''
fileList.pop()
for file in fileList:
sizef, filef = file.split()
sizef = sizef.strip()
filef = filef.strip()
###print "sizef = " + str(sizef)
###print "filef = >>>" + str(filef) + "<<<"
retList.append([sizef, filef])
return retList
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
class DirectoryList(Command):
""" This method gets a list of all directories and any sub-directories in a directory """
def __init__(self, name, dirLocation, ctxt=LOCAL, remoteHost=None):
self.dirLocation = dirLocation
find = findCmdInPath('find')
sort = findCmdInPath('sort')
cmdStr= "%s %s -type d | %s " % (find, dirLocation, sort)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
@staticmethod
def get_list(name, remote_host, dir_location):
theCmd = DirectoryList(name, dirLocation = dir_location, ctxt = REMOTE, remoteHost = remote_host)
theCmd.run(validateAfter=False)
return theCmd.get_result_list()
def get_result_list(self):
dirList = self.results.stdout.split('\n')
if dirList | |
<reponame>timcu/irc_builder<filename>python/ircbuilder/__init__.py
import base64
import json
import logging
import math
import pprint
import queue
import random
import socket
import ssl
import string
import sys
import threading
import time
import zlib
from contextlib import contextmanager
from ircbuilder import nodebuilder
from ircbuilder.version import VERSION
# Maximum length of nickname used in IRC
NICK_MAX_LEN = 9
CHAR_SET = "UTF-8"
# logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def str_xyz(x, y, z):
return "(" + str(x) + "," + str(y) + "," + str(z) + ") "
def str_xyz_int(x, y, z):
return str_xyz(math.floor(x + 0.5), math.floor(y + 0.5), math.floor(z + 0.5))
def encode(s):
return bytes(s, CHAR_SET)
def escape(s):
"""Escape content of strings which will break the api using html entity type escaping"""
s = s.replace("&", "&")
s = s.replace("\r\n", " ")
s = s.replace("\n", " ")
s = s.replace("\r", " ")
s = s.replace("(", "(")
s = s.replace(")", ")")
s = s.replace(",", ",")
s = s.replace("§", "§")
return s
class MinetestConnection:
"""Connection to IRC Server sending commands to Minetest"""
def __init__(self, ircserver, mtbotnick, pybotnick, port=6697):
context = ssl.create_default_context()
self.ircsock = context.wrap_socket(socket.socket(socket.AF_INET, socket.SOCK_STREAM), server_hostname=ircserver)
try:
self.ircsock.connect((ircserver, port))
cert = self.ircsock.getpeercert()
logger.debug(f"cert={pprint.pformat(cert)}")
except ssl.SSLCertVerificationError as scve:
# Probably hostname mismatch or certificate expiry
logger.warning(f"Certificate verification failed so retrying without verification. This will be disallowed in future. {scve}")
# Retry with out requiring certificate verification. In future we can disallow this. 20201230
context = ssl.SSLContext()
self.ircsock = context.wrap_socket(socket.socket(socket.AF_INET, socket.SOCK_STREAM), server_hostname=ircserver)
self.ircsock.connect((ircserver, port))
cert = self.ircsock.getpeercert()
logger.debug(f"cert={pprint.pformat(cert)}")
except ssl.SSLError as se:
logger.warning(f"You have initiated a connection without using SSL. Data packets not encrypted, Recommend using port 6697 instead of {port}. {se}")
# retry without TLS. In future we can disallow this 20201230
self.ircsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.ircsock.connect((ircserver, port))
self.mtbotnick = mtbotnick
self.pybotnick = pybotnick
self.channel = "##" + "".join(random.choice(string.ascii_letters) for _ in range(6))
logger.debug(f"__init__: Random channel {self.channel}")
self.pycharm_edu_check_task = len(sys.argv) > 1 and "_window" in sys.argv[1]
# self.pycharm_edu_check_task = True # For testing only
self.irc_disabled_message = "IRC disabled because sys.argv[1] contains '_window' meaning PyCharm Edu is checking task"
self.irc_disabled_message_printed = False
self.ircserver = ircserver
self.ircserver_name = None
# building is a node dict which stores results of build commands before sending to minetest in a batch
self.building = {}
self.q_msg = queue.Queue()
self.q_num = queue.Queue()
self.receive_thread = threading.Thread(target=self.receive_irc)
# Set daemon so thread will stop when main program stops
self.receive_thread.setDaemon(True)
self.receive_thread.start()
def join_channel(self, channel=None):
if channel:
# if channel not set, use randomly generated channel
self.channel = channel
else:
logger.debug("join_channel: Joining IRC channel " + self.channel)
self.send_string("JOIN " + self.channel)
def part_channel(self):
self.send_string("PART " + self.channel)
self.ircsock.shutdown(0) # stop sending and receiving
self.ircsock.close()
def close(self):
self.part_channel()
def send_string(self, s):
if self.pycharm_edu_check_task:
if not self.irc_disabled_message_printed:
print(self.irc_disabled_message)
self.irc_disabled_message_printed = True
return
# Adding a short delay here can stop occasional SSLError SSLV3_ALERT_BAD_RECORD_MAC
time.sleep(0.1)
self.ircsock.send(encode(s.strip("\r\n") + "\n"))
if s.startswith('PRIVMSG') and ': login' in s:
idx_pass = s.rfind(' ')
s = s[:idx_pass] + ' <PASSWORD REMOVED FROM LOG>'
logger.info("SEND: " + s)
def pong(self, *items):
items = ['PONG'] + [x for x in items if x is not None]
self.send_string(' '.join(items))
def receive_irc(self):
if self.pycharm_edu_check_task:
logger.warning(self.irc_disabled_message)
if self.q_msg.empty():
self.q_msg.put(self.irc_disabled_message)
return
buffer = ''
while True:
try:
buffer += self.ircsock.recv(2048).decode(CHAR_SET)
except socket.timeout:
logger.warning("socket.recv timed out!!")
except ssl.SSLError as se:
logger.debug(f"SSLError but not stopping receive thread {se=}")
except OSError as ose:
# socket has been closed so stop receiving thread probably in part_channel
logger.debug(f"Socket closed so stopping receive thread {ose=}")
break
last_line_complete = len(buffer) > 0 and buffer[-1:] in '\r\n'
lines = buffer.split('\r\n')
if not last_line_complete:
buffer = lines[-1]
del lines[-1]
else:
buffer = ''
for line in lines:
if len(line) == 0:
continue
if logger.level > logging.DEBUG:
logger.info(f"RECV: {line}")
else:
logger.debug(f"RECV {len(line)} {len(buffer)}: {line}")
if line.find("PING :") == 0:
self.pong(line[6:])
elif line.find("VERSION") == 0:
self.send_string("VERSION python ircbuilder " + VERSION)
else:
if line.startswith(':'):
parts = line[1:].split(' ', maxsplit=3)
# 0: sender
# 1: PRIVMSG
# 2: recipient
# 3: message (starting with a :)
sender = parts[0]
sender_name = sender.split('!', 1)[0]
if not self.ircserver_name:
self.ircserver_name = sender_name
recipient_name = parts[2]
logger.debug(f"receive_irc: SENDER: {sender_name} RECIPIENT: {recipient_name}")
if recipient_name == self.pybotnick:
if parts[1] == "PRIVMSG" and parts[3] == ":\x01VERSION\x01":
self.send_string("VERSION python ircbuilder " + VERSION)
if sender_name == self.ircserver_name:
try:
message_num = int(parts[1])
except ValueError:
message_num = None
if message_num:
self.q_num.put(message_num)
logger.debug(f"receive_irc: Queued msg_num: {parts[1]}")
elif sender_name == self.mtbotnick:
if parts[1] == "PRIVMSG":
# get the message to look for commands
message = parts[3].split(':', 1)[1]
self.q_msg.put(message)
logger.debug(f"receive_irc: Queued message: {message}")
def send_msg(self, msg): # send private message to mtbotnick
self.send_privmsg(self.channel + " :" + msg)
def send_privmsg(self, msg): # send private message to mtbotnick
self.send_string("PRIVMSG " + msg)
def wait_for_privmsg(self, timeout=5.0):
start = time.time()
while time.time() - start < timeout:
if not self.q_msg.empty():
return self.q_msg.get()
else:
time.sleep(0.1)
logger.info("Timeout waiting for privmsg " + str(time.time() - start))
return
def wait_for_message_num(self, message_num, timeout=15.0):
start = time.time()
while time.time() - start < timeout:
if not self.q_num.empty():
num = self.q_num.get()
if message_num == num or num >= 400:
# logger.debug(f"wait_for_message_num: Seconds {(time.time()-start)} waiting for {message_num} and found {num}")
return num
else:
time.sleep(0.1)
logger.warning(f"Timeout waiting for {message_num}. Time taken {time.time() - start} ")
return None
def send_irccmd(self, msg): # send private message to mtbotnick
# self.send_msg(self.mtbotnick + ': ' + msg) # displays in chat room
self.send_privmsg(self.mtbotnick + ' : ' + msg) # doesn't display in chat room
return self.wait_for_privmsg()
def send_cmd(self, msg): # send private message to mtbotnick
return self.send_irccmd("cmd " + msg)
def get_node(self, x, y, z):
"""Get block (x,y,z) => item:string"""
return self.send_cmd("get_node " + str_xyz_int(x, y, z))
def compare_nodes(self, x1, y1, z1, x2, y2, z2, item):
"""Compare a cuboid of blocks (x1, y1, z1, x2, y2, z2) with an item => count of differences"""
return self.send_cmd("compare_nodes " + str_xyz_int(x1, y1, z1) + str_xyz_int(x2, y2, z2) + " " + item)
def set_node(self, x, y, z, item):
"""Set block (x, y, z, item)"""
return self.send_cmd("set_node " + str_xyz_int(x, y, z) + item)
def set_nodes(self, x1, y1, z1, x2, y2, z2, item):
"""Set a cuboid of blocks (x1, y1, z1, x2, y2, z2, item)"""
return self.send_cmd("set_nodes " + str_xyz_int(x1, y1, z1) + str_xyz_int(x2, y2, z2) + item)
def set_node_list(self, list_pos, item):
"""Set all blocks at a list of position tuples to the same item ([(x1, y1, z1), (x2, y2, z2), ...], item)"""
batches = 0
max_per_batch = 0
b64 = None
while batches == 0 or max_per_batch > 400:
# keep increasing the number of batches until max_per_batch <= 400
max_per_batch = 0
batches += 1
batch_size = len(list_pos) // batches + 1
b64 = []
for batch in range(batches):
beg = batch * batch_size
end = beg + batch_size
s = '|'
for pos in list_pos[beg:end]:
if len(pos) == 2 and len(pos[0]) == 3 and len(pos[1]) == 3:
s = s + str_xyz_int(pos[0][0], pos[0][1], pos[0][2]).strip("() ") + " "
s = s + str_xyz_int(pos[1][0], pos[1][1], pos[1][2]).strip("() ") + "|"
else:
s = s + str_xyz_int(pos[0], pos[1], pos[2]).strip("() ") + "|"
bytes_unzipped = s.encode('utf-8')
bytes_zipped = zlib.compress(bytes_unzipped)
b64.append(base64.standard_b64encode(bytes_zipped).decode('utf-8') + " ")
len_transmit = len("set_node_list " + b64[batch] + item)
max_per_batch = max(len_transmit, max_per_batch)
logger.debug("set_node_list: Batch {} of {} from {} to {} len {}".format(batch, batches, beg, end, len_transmit))
str_error = ''
str_item = ''
count = 0
for batch in range(batches):
ret = self.send_cmd("set_node_list " + b64[batch] + item)
try:
list_ret = ret.split(' ', maxsplit=1)
count += int(list_ret[1])
if len(list_ret) > 1 and list_ret[0] not in str_item:
str_item += list_ret[0] + " "
except AttributeError:
# list_ret is None so can't be split
pass
except ValueError:
str_error += " [" + ret + "]"
return str_item + str(count) + str_error
def set_sign(self, x, y, z, direction, text, sign_node="default:sign_wall_wood", **kwargs):
"""Set a sign at a location with text and facing direction
sign_node: "default:sign_wall_wood"
text:"#0Formatted body #3of\n#1text"
direction: "+x" or "-x" or "+y" or "-y" or "+z" or "-z" : Which way user is facing to read sign
Example sign_node names:
default:sign_wall_wood
default:sign_wall_steel
signs:sign_hanging
signs:sign_yard
signs:sign_wall_green
signs:sign_wall_yellow
signs:sign_wall_red
signs:sign_wall_white_red
signs:sign_wall_white_black
signs:sign_wall_orange
signs:sign_wall_blue
signs:sign_wall_brown
locked_sign:sign_wall_locked
Can also mount signs on fences"""
# dirx, diry, | |
from time import sleep
import uuid
import requests
from requests import RequestException
import logging
from typing import Union
from prefect import Task
from prefect.engine.signals import FAIL
from prefect.utilities.tasks import defaults_from_attrs
class ConnectionNotFoundException(Exception):
pass
class AirbyteServerNotHealthyException(Exception):
pass
class JobNotFoundException(Exception):
pass
class AirbyteSyncJobFailed(Exception):
pass
class AirbyteExportConfigurationFailed(Exception):
pass
class AirbyteClient:
"""
Esablishes a session with an Airbyte instance and evaluates its current health
status.
This client assumes that you're using Airbyte Open-Source, since "For
Airbyte Open-Source you don't need the API Token for
Authentication! All endpoints are possible to access using the
API without it."
For more information refer to the [Airbyte docs](https://docs.airbyte.io/api-documentation).
Args:
- airbyte_base_url (str, mandatory): base api endpoint url for airbyte.
ex. http://localhost:8000/api/v1
Returns:
- session connection with Airbyte
"""
def __init__(self, logger, airbyte_base_url: str):
self.airbyte_base_url = airbyte_base_url
self.logger = logger
def _establish_session(self):
session = requests.Session()
if self._check_health_status(session):
return session
def _check_health_status(self, session):
get_connection_url = self.airbyte_base_url + "/health/"
try:
response = session.get(get_connection_url)
self.logger.debug("Health check response: %s", response.json())
key = "available" if "available" in response.json() else "db"
health_status = response.json()[key]
if not health_status:
raise AirbyteServerNotHealthyException(
f"Airbyte Server health status: {health_status}"
)
return True
except RequestException as e:
raise AirbyteServerNotHealthyException(e)
def _export_configuration(self, session) -> bytearray:
"""
Trigger an export of Airbyte configuration, see:
https://airbyte-public-api-docs.s3.us-east-2.amazonaws.com/rapidoc-api-docs.html#post-/v1/deployment/export
Args:
- session: requests session with which to make call to the Airbyte server.
- airbyte_base_url: URL of Airbyte server.
Returns:
- byte array of Airbyte configuration data
"""
get_connection_url = self.airbyte_base_url + "/deployment/export/"
try:
response = session.post(get_connection_url)
if response.status_code == 200:
self.logger.debug("Export configuration response: %s", response)
export_config = response.content
return export_config
except RequestException as e:
raise AirbyteExportConfigurationFailed(e)
class AirbyteConnectionTask(Task):
"""
Task for triggering Airbyte Connections, where "A connection is
a configuration for syncing data between a source and a destination."
For more information refer to the
[Airbyte docs](https://docs.airbyte.io/understanding-airbyte/connections)
This task assumes that the Airbyte Open-Source, since "For
Airbyte Open-Source you don't need the API Token for
Authentication! All endpoints are possible to access using the
API without it."
For more information refer to the [Airbyte docs](https://docs.airbyte.io/api-documentation)
Args:
- airbyte_server_host (str, optional): Hostname of Airbyte server where connection is configured.
Defaults to localhost.
- airbyte_server_port (str, optional): Port that the Airbyte server is listening on.
Defaults to 8000.
- airbyte_api_version (str, optional): Version of Airbyte API to use to trigger connection sync.
Defaults to v1.
- connection_id (str, optional): Default connection id to
use for sync jobs, if none is specified to `run`.
- stream_output (Union[bool, int, str], optional): specifies whether this task should log
the output as it occurs, and at what logging level. If `True` is passed,
the logging level defaults to `INFO`; otherwise, any integer or string
value that's passed will be treated as the log level, provided
the `logging` library can successfully interpret it.
- **kwargs (Any, optional): additional kwargs to pass to the
base Task constructor
"""
# Connection statuses
CONNECTION_STATUS_ACTIVE = "active"
CONNECTION_STATUS_INACTIVE = "inactive"
CONNECTION_STATUS_DEPRECATED = "deprecated"
# Job statuses
JOB_STATUS_SUCCEEDED = "succeeded"
JOB_STATUS_FAILED = "failed"
JOB_STATUS_PENDING = "pending"
def __init__(
self,
airbyte_server_host: str = "localhost",
airbyte_server_port: int = 8000,
airbyte_api_version: str = "v1",
connection_id: str = None,
stream_output: Union[bool, int, str] = False,
**kwargs,
):
self.airbyte_server_host = airbyte_server_host
self.airbyte_server_port = airbyte_server_port
self.airbyte_api_version = airbyte_api_version
self.connection_id = connection_id
if isinstance(stream_output, str):
stream_output = logging.getLevelName(stream_output)
if not isinstance(stream_output, int):
raise TypeError(
f"'stream_output': {stream_output} is not a valid log level"
)
self.stream_output = logging.INFO if stream_output is True else stream_output
super().__init__(**kwargs)
def _get_connection_status(self, session, airbyte_base_url, connection_id):
get_connection_url = airbyte_base_url + "/connections/get/"
# TODO - Missing authentication because Airbyte servers currently do not support authentication
try:
response = session.post(
get_connection_url, json={"connectionId": connection_id}
)
self.logger.log(level=self.stream_output, msg=response.json())
response.raise_for_status()
# check whether a schedule exists ...
schedule = response.json()["schedule"]
if schedule:
self.logger.warning("Found existing Connection schedule, removing ...")
# mandatory fields for Connection update ...
sync_catalog = response.json()["syncCatalog"]
connection_status = response.json()["status"]
update_connection_url = airbyte_base_url + "/connections" "/update/"
response2 = session.post(
update_connection_url,
json={
"connectionId": connection_id,
"syncCatalog": sync_catalog,
"schedule": None,
"status": connection_status,
},
)
self.logger.log(level=self.stream_output, msg=response2.json())
if response2.status_code == 200:
self.logger.info("Schedule removed ok.")
else:
self.logger.warning("Schedule not removed.")
connection_status = response.json()["status"]
return connection_status
except RequestException as e:
raise AirbyteServerNotHealthyException(e)
def _trigger_manual_sync_connection(self, session, airbyte_base_url, connection_id):
"""
Trigger a manual sync of the Connection, see:
https://airbyte-public-api-docs.s3.us-east-2.amazonaws.com/rapidoc
-api-docs.html#post-/v1/connections/sync
Args:
session: requests session with which to make call to Airbyte server
airbyte_base_url: URL of Airbyte server
connection_id: ID of connection to sync
Returns: created_at - timestamp of sync job creation
"""
get_connection_url = airbyte_base_url + "/connections/sync/"
# TODO - missing authentication ...
try:
response = session.post(
get_connection_url, json={"connectionId": connection_id}
)
if response.status_code == 200:
self.logger.log(level=self.stream_output, msg=response.json())
job_id = response.json()["job"]["id"]
job_created_at = response.json()["job"]["createdAt"]
return job_id, job_created_at
elif response.status_code == 404:
# connection_id not found
self.logger.warning(
f"Connection {connection_id} not found, please double "
f"check the connection_id ..."
)
raise ConnectionNotFoundException(
f"Connection {connection_id} not found, please double "
f"check the connection_id ..."
)
except RequestException as e:
raise AirbyteServerNotHealthyException(e)
def _get_job_status(self, session, airbyte_base_url, job_id):
get_connection_url = airbyte_base_url + "/jobs/get/"
try:
response = session.post(get_connection_url, json={"id": job_id})
if response.status_code == 200:
self.logger.log(level=self.stream_output, msg=response.json())
job_status = response.json()["job"]["status"]
job_created_at = response.json()["job"]["createdAt"]
job_updated_at = response.json()["job"]["updatedAt"]
return job_status, job_created_at, job_updated_at
elif response.status_code == 404:
self.logger.error(f"Job {job_id} not found...")
raise JobNotFoundException(f"Job {job_id} not found...")
except RequestException as e:
raise AirbyteServerNotHealthyException(e)
@defaults_from_attrs(
"airbyte_server_host",
"airbyte_server_port",
"airbyte_api_version",
"connection_id",
)
def run(
self,
airbyte_server_host: str = None,
airbyte_server_port: int = None,
airbyte_api_version: str = None,
connection_id: str = None,
poll_interval_s: int = 15,
) -> dict:
"""
Task run method for triggering an Airbyte Connection.
*It is assumed that the user will have previously configured
a Source & Destination into a Connection.*
e.g. MySql -> CSV
An invocation of `run` will attempt to start a sync job for
the specified `connection_id` representing the Connection in
Airbyte.
`run` will poll Airbyte Server for the Connection status and
will only complete when the sync has completed or
when it receives an error status code from an API call.
Args:
- airbyte_server_host (str, optional): Hostname of Airbyte server where connection is
configured. Will overwrite the value provided at init if provided.
- airbyte_server_port (str, optional): Port that the Airbyte server is listening on.
Will overwrite the value provided at init if provided.
- airbyte_api_version (str, optional): Version of Airbyte API to use to trigger connection
sync. Will overwrite the value provided at init if provided.
- connection_id (str, optional): if provided,
will overwrite the value provided at init.
- poll_interval_s (int, optional): this task polls the
Airbyte API for status, if provided this value will
override the default polling time of 15 seconds.
Returns:
- dict: connection_id (str) and succeeded_at (timestamp str)
"""
if not connection_id:
raise ValueError(
"Value for parameter `connection_id` *must* \
be provided."
)
try:
uuid.UUID(connection_id)
except (TypeError, ValueError):
raise ValueError(
"Parameter `connection_id` *must* be a valid UUID \
i.e. 32 hex characters, including hyphens."
)
# see https://airbyte-public-api-docs.s3.us-east-2.amazonaws.com
# /rapidoc-api-docs.html#overview
airbyte_base_url = (
f"http://{airbyte_server_host}:"
f"{airbyte_server_port}/api/{airbyte_api_version}"
)
airbyte = AirbyteClient(self.logger, airbyte_base_url)
session = airbyte._establish_session()
self.logger.info(
f"Getting Airbyte Connection {connection_id}, poll interval "
f"{poll_interval_s} seconds, airbyte_base_url {airbyte_base_url}"
)
connection_status = self._get_connection_status(
session, airbyte_base_url, connection_id
)
if connection_status == self.CONNECTION_STATUS_ACTIVE:
# Trigger manual sync on the Connection ...
job_id, job_created_at = self._trigger_manual_sync_connection(
session, airbyte_base_url, connection_id
)
job_status = self.JOB_STATUS_PENDING
while job_status not in [self.JOB_STATUS_FAILED, self.JOB_STATUS_SUCCEEDED]:
job_status, job_created_at, job_updated_at = self._get_job_status(
session, airbyte_base_url, job_id
)
# pending┃running┃incomplete┃failed┃succeeded┃cancelled
if job_status == self.JOB_STATUS_SUCCEEDED:
self.logger.info(f"Job {job_id} succeeded.")
elif job_status == self.JOB_STATUS_FAILED:
self.logger.error(f"Job {job_id} failed.")
raise AirbyteSyncJobFailed(f"Job {job_id} failed.")
else:
# wait for next poll interval
sleep(poll_interval_s)
return {
"connection_id": connection_id,
"status": connection_status,
"job_status": job_status,
"job_created_at": job_created_at,
"job_updated_at": job_updated_at,
}
elif connection_status == self.CONNECTION_STATUS_INACTIVE:
self.logger.error(
f"Please enable the Connection {connection_id} in Airbyte Server."
)
raise FAIL(
f"Please enable the Connection {connection_id} in Airbyte Server."
)
elif connection_status == self.CONNECTION_STATUS_DEPRECATED:
self.logger.error(f"Connection {connection_id} is deprecated.")
raise FAIL(f"Connection {connection_id} is deprecated.")
class AirbyteConfigurationExport(Task):
"""
Task for triggering an export of the Airbyte configuration.
This task assumes that you are using Airbyte Open-Source, since "For
Airbyte Open-Source you don't need the API Token for
Authentication! All | |
<filename>test/mitmproxy/test_flow.py
import mock
import io
import netlib.utils
from netlib.http import Headers
from mitmproxy import filt, controller, flow, options
from mitmproxy.contrib import tnetstring
from mitmproxy.exceptions import FlowReadException
from mitmproxy.models import Error
from mitmproxy.models import Flow
from mitmproxy.models import HTTPFlow
from mitmproxy.models import HTTPRequest
from mitmproxy.models import HTTPResponse
from mitmproxy.proxy import ProxyConfig
from mitmproxy.proxy.server import DummyServer
from mitmproxy.models.connections import ClientConnection
from . import tutils
def test_app_registry():
ar = flow.AppRegistry()
ar.add("foo", "domain", 80)
r = HTTPRequest.wrap(netlib.tutils.treq())
r.host = "domain"
r.port = 80
assert ar.get(r)
r.port = 81
assert not ar.get(r)
r = HTTPRequest.wrap(netlib.tutils.treq())
r.host = "domain2"
r.port = 80
assert not ar.get(r)
r.headers["host"] = "domain"
assert ar.get(r)
class TestClientPlaybackState:
def test_tick(self):
first = tutils.tflow()
s = flow.State()
fm = flow.FlowMaster(None, None, s)
fm.start_client_playback([first, tutils.tflow()], True)
c = fm.client_playback
c.testing = True
assert not c.done()
assert not s.flow_count()
assert c.count() == 2
c.tick(fm)
assert s.flow_count()
assert c.count() == 1
c.tick(fm)
assert c.count() == 1
c.clear(c.current)
c.tick(fm)
assert c.count() == 0
c.clear(c.current)
assert c.done()
fm.state.clear()
fm.tick(timeout=0)
fm.stop_client_playback()
assert not fm.client_playback
class TestServerPlaybackState:
def test_hash(self):
s = flow.ServerPlaybackState(
None,
[],
False,
False,
None,
False,
None,
False)
r = tutils.tflow()
r2 = tutils.tflow()
assert s._hash(r)
assert s._hash(r) == s._hash(r2)
r.request.headers["foo"] = "bar"
assert s._hash(r) == s._hash(r2)
r.request.path = "voing"
assert s._hash(r) != s._hash(r2)
r.request.path = "path?blank_value"
r2.request.path = "path?"
assert s._hash(r) != s._hash(r2)
def test_headers(self):
s = flow.ServerPlaybackState(
["foo"],
[],
False,
False,
None,
False,
None,
False)
r = tutils.tflow(resp=True)
r.request.headers["foo"] = "bar"
r2 = tutils.tflow(resp=True)
assert not s._hash(r) == s._hash(r2)
r2.request.headers["foo"] = "bar"
assert s._hash(r) == s._hash(r2)
r2.request.headers["oink"] = "bar"
assert s._hash(r) == s._hash(r2)
r = tutils.tflow(resp=True)
r2 = tutils.tflow(resp=True)
assert s._hash(r) == s._hash(r2)
def test_load(self):
r = tutils.tflow(resp=True)
r.request.headers["key"] = "one"
r2 = tutils.tflow(resp=True)
r2.request.headers["key"] = "two"
s = flow.ServerPlaybackState(
None, [
r, r2], False, False, None, False, None, False)
assert s.count() == 2
assert len(s.fmap.keys()) == 1
n = s.next_flow(r)
assert n.request.headers["key"] == "one"
assert s.count() == 1
n = s.next_flow(r)
assert n.request.headers["key"] == "two"
assert s.count() == 0
assert not s.next_flow(r)
def test_load_with_nopop(self):
r = tutils.tflow(resp=True)
r.request.headers["key"] = "one"
r2 = tutils.tflow(resp=True)
r2.request.headers["key"] = "two"
s = flow.ServerPlaybackState(
None, [
r, r2], False, True, None, False, None, False)
assert s.count() == 2
s.next_flow(r)
assert s.count() == 2
def test_ignore_params(self):
s = flow.ServerPlaybackState(
None, [], False, False, [
"param1", "param2"], False, None, False)
r = tutils.tflow(resp=True)
r.request.path = "/test?param1=1"
r2 = tutils.tflow(resp=True)
r2.request.path = "/test"
assert s._hash(r) == s._hash(r2)
r2.request.path = "/test?param1=2"
assert s._hash(r) == s._hash(r2)
r2.request.path = "/test?param2=1"
assert s._hash(r) == s._hash(r2)
r2.request.path = "/test?param3=2"
assert not s._hash(r) == s._hash(r2)
def test_ignore_payload_params(self):
s = flow.ServerPlaybackState(
None, [], False, False, None, False, [
"param1", "param2"], False)
r = tutils.tflow(resp=True)
r.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
r.request.content = b"paramx=x¶m1=1"
r2 = tutils.tflow(resp=True)
r2.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
r2.request.content = b"paramx=x¶m1=1"
# same parameters
assert s._hash(r) == s._hash(r2)
# ignored parameters !=
r2.request.content = b"paramx=x¶m1=2"
assert s._hash(r) == s._hash(r2)
# missing parameter
r2.request.content = b"paramx=x"
assert s._hash(r) == s._hash(r2)
# ignorable parameter added
r2.request.content = b"paramx=x¶m1=2"
assert s._hash(r) == s._hash(r2)
# not ignorable parameter changed
r2.request.content = b"paramx=y¶m1=1"
assert not s._hash(r) == s._hash(r2)
# not ignorable parameter missing
r2.request.content = b"param1=1"
assert not s._hash(r) == s._hash(r2)
def test_ignore_payload_params_other_content_type(self):
s = flow.ServerPlaybackState(
None, [], False, False, None, False, [
"param1", "param2"], False)
r = tutils.tflow(resp=True)
r.request.headers["Content-Type"] = "application/json"
r.request.content = b'{"param1":"1"}'
r2 = tutils.tflow(resp=True)
r2.request.headers["Content-Type"] = "application/json"
r2.request.content = b'{"param1":"1"}'
# same content
assert s._hash(r) == s._hash(r2)
# distint content (note only x-www-form-urlencoded payload is analysed)
r2.request.content = b'{"param1":"2"}'
assert not s._hash(r) == s._hash(r2)
def test_ignore_payload_wins_over_params(self):
# NOTE: parameters are mutually exclusive in options
s = flow.ServerPlaybackState(
None, [], False, False, None, True, [
"param1", "param2"], False)
r = tutils.tflow(resp=True)
r.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
r.request.content = b"paramx=y"
r2 = tutils.tflow(resp=True)
r2.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
r2.request.content = b"paramx=x"
# same parameters
assert s._hash(r) == s._hash(r2)
def test_ignore_content(self):
s = flow.ServerPlaybackState(
None,
[],
False,
False,
None,
False,
None,
False)
r = tutils.tflow(resp=True)
r2 = tutils.tflow(resp=True)
r.request.content = b"foo"
r2.request.content = b"foo"
assert s._hash(r) == s._hash(r2)
r2.request.content = b"bar"
assert not s._hash(r) == s._hash(r2)
# now ignoring content
s = flow.ServerPlaybackState(
None,
[],
False,
False,
None,
True,
None,
False)
r = tutils.tflow(resp=True)
r2 = tutils.tflow(resp=True)
r.request.content = b"foo"
r2.request.content = b"foo"
assert s._hash(r) == s._hash(r2)
r2.request.content = b"bar"
assert s._hash(r) == s._hash(r2)
r2.request.content = b""
assert s._hash(r) == s._hash(r2)
r2.request.content = None
assert s._hash(r) == s._hash(r2)
def test_ignore_host(self):
s = flow.ServerPlaybackState(
None,
[],
False,
False,
None,
False,
None,
True)
r = tutils.tflow(resp=True)
r2 = tutils.tflow(resp=True)
r.request.host = "address"
r2.request.host = "address"
assert s._hash(r) == s._hash(r2)
r2.request.host = "wrong_address"
assert s._hash(r) == s._hash(r2)
class TestHTTPFlow(object):
def test_copy(self):
f = tutils.tflow(resp=True)
f.get_state()
f2 = f.copy()
a = f.get_state()
b = f2.get_state()
del a["id"]
del b["id"]
assert a == b
assert not f == f2
assert f is not f2
assert f.request.get_state() == f2.request.get_state()
assert f.request is not f2.request
assert f.request.headers == f2.request.headers
assert f.request.headers is not f2.request.headers
assert f.response.get_state() == f2.response.get_state()
assert f.response is not f2.response
f = tutils.tflow(err=True)
f2 = f.copy()
assert f is not f2
assert f.request is not f2.request
assert f.request.headers == f2.request.headers
assert f.request.headers is not f2.request.headers
assert f.error.get_state() == f2.error.get_state()
assert f.error is not f2.error
def test_match(self):
f = tutils.tflow(resp=True)
assert not f.match("~b test")
assert f.match(None)
assert not f.match("~b test")
f = tutils.tflow(err=True)
assert f.match("~e")
tutils.raises(ValueError, f.match, "~")
def test_backup(self):
f = tutils.tflow()
f.response = HTTPResponse.wrap(netlib.tutils.tresp())
f.request.content = b"foo"
assert not f.modified()
f.backup()
f.request.content = b"bar"
assert f.modified()
f.revert()
assert f.request.content == b"foo"
def test_backup_idempotence(self):
f = tutils.tflow(resp=True)
f.backup()
f.revert()
f.backup()
f.revert()
def test_getset_state(self):
f = tutils.tflow(resp=True)
state = f.get_state()
assert f.get_state() == HTTPFlow.from_state(
state).get_state()
f.response = None
f.error = Error("error")
state = f.get_state()
assert f.get_state() == HTTPFlow.from_state(
state).get_state()
f2 = f.copy()
f2.id = f.id # copy creates a different uuid
assert f.get_state() == f2.get_state()
assert not f == f2
f2.error = Error("e2")
assert not f == f2
f.set_state(f2.get_state())
assert f.get_state() == f2.get_state()
def test_kill(self):
s = flow.State()
fm = flow.FlowMaster(None, None, s)
f = tutils.tflow()
f.intercept(mock.Mock())
f.kill(fm)
for i in s.view:
assert "killed" in str(i.error)
def test_killall(self):
s = flow.State()
fm = flow.FlowMaster(None, None, s)
f = tutils.tflow()
f.intercept(fm)
s.killall(fm)
for i in s.view:
assert "killed" in str(i.error)
def test_accept_intercept(self):
f = tutils.tflow()
f.intercept(mock.Mock())
assert not f.reply.acked
f.accept_intercept(mock.Mock())
assert f.reply.acked
def test_replace_unicode(self):
f = tutils.tflow(resp=True)
f.response.content = b"\xc2foo"
f.replace(b"foo", u"bar")
def test_replace_no_content(self):
f = tutils.tflow()
f.request.content = None
assert f.replace("foo", "bar") == 0
def test_replace(self):
f = tutils.tflow(resp=True)
f.request.headers["foo"] = "foo"
f.request.content = b"afoob"
f.response.headers["foo"] = "foo"
f.response.content = b"afoob"
assert f.replace("foo", "bar") == 6
assert f.request.headers["bar"] == "bar"
assert f.request.content == b"abarb"
assert f.response.headers["bar"] == "bar"
assert f.response.content == b"abarb"
def test_replace_encoded(self):
f = tutils.tflow(resp=True)
f.request.content = b"afoob"
f.request.encode("gzip")
f.response.content = b"afoob"
f.response.encode("gzip")
f.replace("foo", "bar")
assert f.request.raw_content != b"abarb"
f.request.decode()
assert f.request.raw_content == b"abarb"
assert f.response.raw_content != b"abarb"
f.response.decode()
assert f.response.raw_content == b"abarb"
class TestTCPFlow:
def test_match(self):
f = tutils.ttcpflow()
assert not f.match("~b nonexistent")
assert f.match(None)
assert not f.match("~b nonexistent")
f = tutils.ttcpflow(err=True)
assert f.match("~e")
tutils.raises(ValueError, f.match, "~")
class TestState:
def test_backup(self):
c = flow.State()
f = tutils.tflow()
c.add_flow(f)
f.backup()
c.revert(f)
def test_flow(self):
"""
normal flow:
connect -> request -> response
"""
c = flow.State()
f = tutils.tflow()
c.add_flow(f)
assert f
assert c.flow_count() == 1
assert c.active_flow_count() == 1
newf = tutils.tflow()
assert c.add_flow(newf)
assert c.active_flow_count() == 2
f.response = HTTPResponse.wrap(netlib.tutils.tresp())
assert c.update_flow(f)
assert c.flow_count() == 2
assert c.active_flow_count() == 1
assert not c.update_flow(None)
assert c.active_flow_count() == 1
newf.response = HTTPResponse.wrap(netlib.tutils.tresp())
assert c.update_flow(newf)
assert c.active_flow_count() == 0
def test_err(self):
c = flow.State()
f = tutils.tflow()
c.add_flow(f)
f.error = Error("message")
assert c.update_flow(f)
c = flow.State()
f = tutils.tflow()
c.add_flow(f)
c.set_view_filter("~e")
assert not c.view
f.error = tutils.terr()
assert c.update_flow(f)
assert c.view
def test_set_view_filter(self):
c = flow.State()
f = tutils.tflow()
assert len(c.view) == 0
c.add_flow(f)
assert len(c.view) == 1
c.set_view_filter("~s")
assert c.filter_txt == "~s"
assert len(c.view) == 0
f.response = HTTPResponse.wrap(netlib.tutils.tresp())
c.update_flow(f)
assert len(c.view) == 1
c.set_view_filter(None)
assert len(c.view) == 1
f = tutils.tflow()
c.add_flow(f)
assert len(c.view) == 2
c.set_view_filter("~q")
assert len(c.view) == 1
c.set_view_filter("~s")
assert len(c.view) == | |
1. Does not require partial derivatives, thus can be used with
complicated, 3D velocity structures
2. Accurate recovery of moderately irregular (non-ellipsoidal)
PDF's with a single minimum
3. Only only moderately slower (about 10 times slower) than linearised,
iterative location techniques, and is much faster
(about 100 times faster) than the grid-search
4. Results can be used to obtain confidence contours
Drawbacks:
1. Stochastic coverage of search region - may miss important features
2. Inconsistent recovery of very irregular (non-ellipsoidal)
PDF's with multiple minima
3. Requires careful selection of sampling parameters
4. Attempts to read full 3D travel-time grid files into memory,
thus may run very slowly with large number of observations and large
3D travel-time grids
:param num_samples: total number of accepted samples to obtain (min:0)
:type num_samples: int
:param num_learn: number of accepted samples for learning stage of
search (min:0)
:type num_learn: int
:param num_equil: number of accepted samples for equilibration stage
of search (min:0)
:type num_equil: int
:param num_begin_save: number of accepted samples after which to begin
saving stage of search, denotes end of equilibration stage (min:0)
:type num_begin_save: int
:param num_skip: number of accepted samples to skip between saves
(numSkip = 1 saves every accepted sample, min:1)
:type num_skip: int
:param step_init: initial step size in km for the learning stage
(stepInit < 0.0 gives automatic step size selection. If the search
takes too long, the initial step size may be too large;
this may be the case if the search region is very large relative
to the volume of the high confidence region for the locations.)
:type step_init: float
:param step_min: minimum step size allowed during any search stage
(This parameter should not be critical, set it to a low value. min:0)
:type step_min: float
:param prob_min: minimum value of the maximum probability (likelihood)
that must be found by the end of learning stage, if this value is not
reached the search is aborted (This parameters allows the filtering of
locations outside of the search grid and locations with large
residuals.)
:type prob_min: float
:param step_fact: step factor for scaling step size during
equilibration stage (Try a value of 8.0 to start.) Default=8.
:type step_fact: float
"""
self.num_samples = num_samples
self.num_learn = num_learn
self.num_equil = num_equil
self.num_begin_save = num_begin_save
self.num_skip = num_skip
self.step_init = step_init
self.step_min = step_min
self.prob_min = prob_min
self.step_fact = step_fact
def __repr__(self):
line = f'LOCSEARCH MET {self.num_samples} {self.num_learn} ' \
f'{self.num_equil} {self.num_begin_save} {self.num_skip} ' \
f'{self.step_min} {self.step_min} {self.step_fact} ' \
f'{self.prob_min}\n'
return line
@classmethod
def init_with_default(cls):
pass
@property
def type(self):
return 'LOCSEARCH'
class LocSearchOctTree:
def __init__(self, init_num_cell_x, init_num_cell_y, init_num_cell_z,
min_node_size, max_num_nodes, num_scatter,
use_station_density=False, stop_on_min_node_size=True):
"""
Container for the Octree Location algorithm parameters
Documenation: http://alomax.free.fr/nlloc/octtree/OctTree.html
Developed in collaboration with <NAME>; Schlumberger Cambridge
Research, Cambridge CB3 0EL, England; <EMAIL>
The oct-tree importance sampling algorithm gives accurate, efficient
and complete mapping of earthquake location PDFs in 3D space (x-y-z).
Advantages:
1. Much faster than grid-search (factor 1/100)
2. More global and complete than Metropolis-simulated annealing
3. Simple, with very few parameters (initial grid size, number of samples)
Drawbacks:
1. Results are weakly dependant on initial grid size - the method may
not identify narrow, local maxima in the PDF.
2. Attempts to read full 3D travel-time grid files into memory,
thus may run very slowly with large number of observations and large
3D travel-time grids
:param init_num_cell_x: initial number of octtree cells in the x
direction
:type init_num_cell_x: int
:param init_num_cell_y: initial number of octtree cells in the y
direction
:type init_num_cell_y: int
:param init_num_cell_z: initial number of octtree cells in the z
direction
:type init_num_cell_z: int
:param min_node_size: smallest octtree node side length to process,
the octree search is terminated after a node with a side smaller
than this length is generated
:type min_node_size: float
:param max_num_nodes: total number of nodes to process
:type max_num_nodes: int
:param num_scatter: the number of scatter samples to draw from the
octtree results
:type num_scatter: int
:param use_station_density: flag, if True weights oct-tree cell
probability values used for subdivide decision in proportion to number
of stations in oct-tree cell; gives higher search priority to cells
containing stations, stablises convergence to local events when global
search used with dense cluster of local stations
(default:False)
:type use_station_density: bool
:param stop_on_min_node_size: flag, if True, stop search when first
min_node_size reached, if False stop subdividing a given cell when
min_node_size reached (default:True)
:type stop_on_min_node_size: bool
"""
self.init_num_cell_x = init_num_cell_x
self.init_num_cell_y = init_num_cell_y
self.init_num_cell_z = init_num_cell_z
self.min_node_size = min_node_size
self.max_num_nodes = max_num_nodes
self.num_scatter = num_scatter
self.use_station_density = use_station_density
self.stop_on_min_node_size = stop_on_min_node_size
@classmethod
def init_default(cls):
init_num_cell_x = 5
init_num_cell_y = 5
init_num_cell_z = 5
min_node_size = 1E-6
max_num_nodes = 5000
num_scatter = 500
use_station_density = False
stop_on_min_node_size = True
return cls(init_num_cell_x, init_num_cell_y, init_num_cell_z,
min_node_size, max_num_nodes, num_scatter,
use_station_density, stop_on_min_node_size)
def __repr__(self):
line = f'LOCSEARCH OCT {self.init_num_cell_x} ' \
f'{self.init_num_cell_y} {self.init_num_cell_z} ' \
f'{self.min_node_size} {self.max_num_nodes} ' \
f'{self.num_scatter} {self.use_station_density:d} ' \
f'{self.stop_on_min_node_size:d}\n'
return line
@property
def type(self):
return 'LOCSEARCH'
class GaussianModelErrors:
def __init__(self, sigma_time, correlation_length):
"""
container for Gaussian Error Model
:param sigma_time: typical error in seconds for travel-time to one
station due to model errors
:type sigma_time: float
:param correlation_length: correlation length that controls covariance
between stations ( i.e. may be related to a characteristic scale length
of the medium if variations on this scale are not included in the
velocity model)
:type correlation_length: float
"""
self.sigma_time = sigma_time
self.correlation_length = correlation_length
@classmethod
def init_default(cls):
sigma_time = 1E-3
correlation_length = 1E-3
return cls(sigma_time, correlation_length)
def __repr__(self):
return f'LOCGAU {self.sigma_time} {self.correlation_length}\n'
__valid_location_methods__ = ['GAU_ANALYTIC', 'EDT', 'EDT_OT_WT',
'EDT_OT_WT_ML']
class LocationMethod:
def __init__(self, method, max_dist_sta_grid, min_number_phases,
max_number_phases, min_number_s_phases, vp_vs_ratio,
max_number_3d_grid_memory, min_dist_sta_grid):
"""
container for location method
:param method: location method/algorithm ( GAU_ANALYTIC = the inversion
approach of Tarantola and Valette (1982) with L2-RMS likelihood
function. EDT = Equal Differential Time likelihood function cast into
the inversion approach of Tarantola and Valette (1982) EDT_OT_WT =
Weights EDT-sum probabilities by the variance of origin-time estimates
over all pairs of readings. This reduces the probability (PDF values)
at points with inconsistent OT estimates, and leads to more compact
location PDF's. EDT_OT_WT_ML = version of EDT_OT_WT with EDT
origin-time weighting applied using a grid-search, maximum-likelihood
estimate of the origin time. Less efficient than EDT_OT_WT which
uses simple statistical estimate of the origin time.)
:param max_dist_sta_grid: maximum distance in km between a station and the
center of the initial search grid; phases from stations beyond this
distance will not be used for event location
:param min_number_phases: minimum number of phases that must be
accepted before event will be located
:param max_number_phases: maximum number of accepted phases that will
be used for event location; only the first maxNumberPhases read from
the phase/observations file are used for location
:param min_number_s_phases: minimum number of S phases that must be
accepted before event will be located
:param vp_vs_ratio: P velocity to S velocity ratio. If VpVsRatio > 0.0
then only P phase travel-times grids are read and VpVsRatio is used to
calculate S phase travel-times. If VpVsRatio < 0.0 then S phase
travel-times grids are used.
:param max_number_3d_grid_memory: maximum number of 3D travel-time
grids to attempt to read into memory for Metropolis-Gibbs search. This
helps to avoid time-consuming memory swapping that occurs if the total
size of grids read exceeds the real memory of the computer. 3D grids
not in memory are read directly from disk. If maxNum3DGridMemory < 0
then NLLoc attempts to read all grids into memory.
:param min_dist_sta_grid: minimum distance in km between a station and
the center of the initial search grid; phases from stations closer than
this distance will not be used for event location
"""
validate(method, __valid_location_methods__)
self.method = method
self.max_dist_sta_grid = max_dist_sta_grid
self.min_number_phases = min_number_phases
self.max_number_phases = max_number_phases
self.min_number_s_phases = min_number_s_phases
self.vp_vs_ratio = vp_vs_ratio
self.max_number_3d_grid_memory = max_number_3d_grid_memory
self.min_dist_sta_grid = min_dist_sta_grid
@classmethod
def init_default(cls):
| |
from collections import defaultdict
import numpy as np
import pandas as pd
from scipy.stats import chi2_contingency, fisher_exact, f_oneway
from .simulations import classifier_posterior_probabilities
from .utils.crosstabs import (crosstab_bayes_factor,
crosstab_ztest,
top_bottom_crosstab)
from .utils.validate import boolean_array, check_consistent_length
def anova(labels, results, subset_labels=None):
"""
Returns one-way ANOVA f-statistic and p-value from
input vectors of categorical labels and numeric results
Parameters
------------
labels : array_like
containing categorical values like ['M', 'F']
results : array_like
containing real numbers
subset_labels : list of strings, optional
if only specific labels should be included
Returns
----------
F_onewayResult : scipy.stats object (essentially a 2-tuple)
contains one-way f-statistic and p-value, indicating whether
scores have same sample mean
"""
check_consistent_length(labels, results)
df = pd.DataFrame(list(zip(labels, results)), columns=['label', 'result'])
if subset_labels is not None:
df = df.loc[df['label'].isin(subset_labels)]
unique_labels = df['label'].dropna().unique()
score_vectors = [df.loc[df['label'] == lab, 'result']
for lab in unique_labels]
return f_oneway(*score_vectors)
def bias_test_check(labels, results, category=None, test_thresh=0.5 **kwargs):
"""
Utility function for checking if statistical tests are passed
at a reference threshold
Parameters
--------
labels : array_like
containing categorical values like ['M', 'F']
results : array_like
containing real numbers
category : string, optional
the name of the category labels are in, e.g. 'Gender'
test_thresh : numeric
threshold value to test
**kwargs : optional additional arguments for compare_groups
Returns
--------
print statement indicating whether specific statistical tests pass or fail
"""
min_props, z_ps, fisher_ps, chi_ps, bfs = compare_groups(
labels, results, low=test_thresh, num=1, **kwargs)
# if no category is specified, concatenate strings
if category is None:
category = '_vs_'.join(set(labels))[:20]
# test if passes at test_thresh
passes_all = True
if min_props[test_thresh] < .8:
passes_all = False
print("*%s fails 4/5 test at %.2f*" % (category, test_thresh))
print(" - %s minimum proportion at %.2f: %.3f" %
(category, test_thresh, min_props[test_thresh]))
if fisher_ps[test_thresh] < .05:
passes_all = False
print("*%s fails Fisher exact test at %.2f*" % (category, test_thresh))
print(" - %s p-value at %.2f: %.3f" %
(category, test_thresh, fisher_ps[test_thresh]))
if chi_ps[test_thresh] < .05:
passes_all = False
print("*%s fails Chi squared test at %.2f*" % (category, test_thresh))
print(" - %s p-value at %.2f: %.3f" %
(category, test_thresh, chi_ps[test_thresh]))
if z_ps[test_thresh] < .05:
passes_all = False
print("*%s fails z test at %.2f*" % (category, test_thresh))
print(" - %s Z-test p-value at %.2f: %.3f" %
(category, test_thresh, z_ps[test_thresh]))
if bfs[test_thresh] > 3.:
passes_all = False
print("*%s Bayes Factor test at %.2f*" % (category, test_thresh))
print(" - %s Bayes Factor at %.2f: %.3f" %
(category, test_thresh, bfs[test_thresh]))
if passes_all:
print("*%s passes 4/5 test, Fisher p-value, Chi-Squared p-value, "
"z-test p-value and Bayes Factor at %.2f*\n"
% (category, test_thresh))
def make_bias_report(clf, df, feature_names, categories,
low=None, high=None, num=100, ref_threshold=None):
"""
Utility function for report dictionary from
`classifier_posterior_probabilities`. Used for plotting
bar plots in `bias_bar_plot`
Parameters
-----------
clf : sklearn clf
fitted clf with predict object
df : pandas DataFrame
reference dataframe containing labeled features to test for bias
feature_names : list of strings
names of features used in fitting clf
categories : list of strings
names of categories to test for bias, e.g. ['gender']
low, high, num : float, float, int
range of values for thresholds
ref_threshold : float
cutoff value at which to generate metrics
Returns
--------
out_dict : dictionary
contains category names, average probabilities and errors by category
of form {'gender': {'categories':['F', 'M'],
'averages': [.5, .5],
'errors': [.1, .1]}
}
"""
threshes, probs = classifier_posterior_probabilities(
df, clf, feature_names, categories, low, high, num)
# if not specified, set ref_threshold at 80% of max(threshes)
if ref_threshold is None:
idx_80 = int(len(threshes)*.8)
ref_threshold = sorted(threshes)[idx_80]
ref_idx = list(threshes).index(ref_threshold)
out_dict = {}
for category in categories:
cat_vals = [k.split('__')[1]
for k in probs.keys() if k.split('__')[0] == category]
cat_avgs = [probs[val][ref_idx][0] for val in cat_vals]
cat_errors = [probs[val][ref_idx][1:] for val in cat_vals]
out_dict[category] = {
'categories': cat_vals,
'averages': cat_avgs,
'errors': cat_errors}
return out_dict
def get_group_proportions(labels, results, low=None, high=None, num=100):
"""
Returns pass proportions for each group present in labels, according to
their results
Parameters
------------
labels : array_like
contains categorical labels
results : array_like
contains numeric or boolean values
low : float
if None, will default to min(results)
high : float
if None, will default to max(results)
num : int, default 100
number of thresholds to check
Returns
--------
prop_dict: dictionary
contains {group_name : [[thresholds, pass_proportions]]}
"""
if not low:
low = min(results)
if not high:
high = max(results)
thresholds = np.linspace(low, high, num).tolist()
groups = set(labels)
prop_dict = defaultdict(list)
for group in groups:
pass_props = []
for thresh in thresholds:
decs = [i <= thresh for i in results]
crosstab = pd.crosstab(pd.Series(labels), pd.Series(decs))
row = crosstab.loc[group]
pass_prop = row[True] / float(row.sum())
pass_props.append(pass_prop)
prop_dict[group].append(thresholds)
prop_dict[group].append(pass_props)
return prop_dict
def compare_groups(labels, results,
low=None, high=None, num=100,
comp_groups=None, print_skips=False):
"""
Function to plot proportion of largest and smallest bias groups and
get relative z scores
Parameters
--------
labels : array_like
contains categorical values like ['M', 'F']
results : array_like
contains real numbers, e.g. threshold scores or floats in (0,1)
low : float
lower threshold value
high : float
upper threshold value
num : int
number of thresholds to check
comp_groups : list of strings, optional
subset of labels to compare, e.g. ['white', 'black']
print_skips : bool
whether to display thresholds skipped
Returns
---------
min_props : dict
contains (key, value) of (threshold : max group/min group proportions)
z_ps : dict
contains (key, value) of (threshold : p-value of two tailed z test)
fisher_ps : dict
contains (key, value) of (threshold : p-value of fisher exact test)
chi_ps : dict
contains (key, value) of (threshold : p-value of chi squared test)
bayes_facts : dict
contains (key, value) of (threshold : bayes factor)
"""
# cast labels and scores to pandas Series
df = pd.DataFrame(list(zip(labels, results)), columns=['label', 'result'])
min_props = {}
fisher_ps = {}
chi_ps = {}
z_ps = {}
bayes_facts = {}
if comp_groups is not None:
df = df[df['label'].isin(comp_groups)]
# define range of values to test over if not inputted
if low is None:
low = min(results)
if high is None:
high = max(results)
thresholds = np.linspace(low, high, num)
skip_thresholds = []
for thresh in thresholds:
df['dec'] = [i >= thresh for i in results]
# compare rates of passing across groups
ctabs = pd.crosstab(df['label'], df['dec'])
# skip any thresholds for which the crosstabs are one-dimensional
if 1 in ctabs.shape:
skip_thresholds.append(thresh)
continue
normed_ctabs = ctabs.div(ctabs.sum(axis=1), axis=0)
true_val = max(set(df['dec']))
max_group = normed_ctabs[true_val].max()
normed_proportions = normed_ctabs[true_val] / max_group
min_proportion = normed_proportions.min()
# run statistical tests
if ctabs.shape == (2, 2):
test_results = test_multiple(df['label'].values, df['dec'].values)
z_pval = test_results.get('z_score')[1]
fisher_pval = test_results.get('fisher_p')[1]
chi2_pval = test_results.get('chi2_p')[1]
bayes_fact = test_results.get('BF')
else:
top_bottom_ctabs = top_bottom_crosstab(df['label'], df['dec'])
z_pval = crosstab_ztest(top_bottom_ctabs)[1]
fisher_pval = fisher_exact(top_bottom_ctabs)[1]
chi2_pval = chi2_contingency(ctabs)[1]
bayes_fact = crosstab_bayes_factor(ctabs)
min_props[thresh] = min_proportion
z_ps[thresh] = z_pval
fisher_ps[thresh] = fisher_pval
chi_ps[thresh] = chi2_pval
bayes_facts[thresh] = bayes_fact
if len(skip_thresholds) > 0 and print_skips:
print('One-dimensional thresholds were skipped: %s' % skip_thresholds)
return min_props, z_ps, fisher_ps, chi_ps, bayes_facts
def test_multiple(labels, decisions,
tests=('ztest', 'fisher', 'chi2', 'BF'), display=False):
"""
Function that returns p_values for z-score, fisher exact, and chi2 test
of 2x2 crosstab of passing rate by labels and decisions
See docs for z_test_ctabs, fisher_exact, chi2_contingency and
bf_ctabs for details of specific tests
Parameters
----------
labels : array_like
categorical labels for each corresponding value of `decision` ie. M/F
decisions : array_like
binary decision values, ie. True/False or 0/1
tests : list
a list of strings specifying the tests to run, valid options
are 'ztest', 'fisher', 'chi2' and 'bayes'. Defaults to all four.
-ztest: p-value for two-sided z-score for proportions
-fisher: p-value for Fisher's exact test for proportions
-chi2: p-value for chi-squared test of independence for proportions
-bayes: bayes factor for independence assuming uniform prior
display : bool
print the results of each test in addition to returning them
Returns
-------
results : dict
dictionary of values, one for each test.
Valid keys are: 'z_score', 'fisher_p', 'chi2_p', and 'BF'
Examples
--------
>>> # no real difference between groups
>>> labels = ['group1']*100 + ['group2']*100 + ['group3']*100
>>> decisions = [1,0,0]*100
>>> all_test_ctabs(dependent_ctabs)
(0.0, 1.0, | |
an instance attribute using the general syntax `object.attribute`:
# Check gary's attributes
print(gary.sound) # This is an class attribute
print(gary.name) # This is a instance attribute
For completeness' sake, note that we are still able to carry out the associated `Dog` methods on `gary`:
# Check gary's methods
gary.speak()
**A word of caution about `self`**
The use of `self` as the first parameter in instance attribute definition is a frequent criticism of the Python programming language. This is partially because `self` is what Pythonistas have agreed to use as the first parameter passed during instance attribute definition - it is *not* a keyword. In theory, you could use any word to refer to the current instance of a `class` object; however, you (and others reading your code) will have to remember later on what you used to refer to the current instance. As such, we tacitly agree that we'll use `self` as the first parameter in instance attribute definition to refer to the current instance of a `class` object.
## Class Inheritance
With an understanding of `class` definition and instance attributes, we can now introduce the concept of `class` inheritance. The concept here is taht objects can be built from other objects, inheriting the other objects' properties and building off of them. This allows for `class` objects to be related in a hierarchical order.
For examplle, with our `Dog()` example above, we could also create a `Cat` object. If we then wanted to create a general `Animal` class, we could then have `Dog` and `Cat` inherit any general animal properties from `Animal()`.
To look at a specific example, let's consider the general class `Tool` and how what the syntax would look like if we wanted to inherit `Tool`'s properties in the more specific class `Hammer`:
class Tool():
# define instance attributes
def __init__(self):
self.is_tool = True
self.tool_type = None
# define class method
def use_tool(self):
print('Using tool.')
Here, se see that there are two *instance* attributes: `is_tool` and `tool_type`. The class `Tool` specifies that any `Tool` will have the value `True` for `is_tool` and `None` for `tool_type`, as this is the more general class.
There's also the `Tool` method `use_tool`, which, for a given instance, when executed will print `'Using tool.'`
As previously, we can initialize a `Tool` type object, storing it in `my_tool`. We can then access the attributes of `Tool` and carry out any `Tool` methods:
# access attribute
my_tool = Tool()
my_tool.is_tool
# use method
my_tool.use_tool()
However, what we're most interested in here is the concept of **inheritance**. In the code below, we see how you can inherit `Tool` properties within a new `class` object `Hammer`.
By passing `Tool` in when creating the `class` `Hammer`, `Hammer`-type objects will have the attributes and methods specifed within `Tool`.
We also see with the code here that we're specifying that `tool_type` for a `Hammer` will be `'Hammer'` and provide an additional instance attribute `why`, giving the `Hammer` a purpose - `'To hammer things.'`
class Hammer(Tool): #inherit Tool class
def __init__(self):
self.tool_type = 'Hammer'
self.why = 'To hammer things.'
We can now create a `Hammer`-type object and use the methods from `Tool` - such as `use_tool`- since that is inherited.
# Hammer has Tool attributes and methods
my_hammer = Hammer()
my_hammer.use_tool()
And, we can also access the `Hammer`-specific attributes:
# Hammer has Hammer attributes
my_hammer.why
However, you'll note that `Hammer` does *not* have access to the *attributes* from `Tool`:
# this code will error
my_hammer.is_tool
This is becuase our `Hammer` `class` defines its own *instance attributes*, overriding the instance attributes of the inherited class. We'll see in just a second how to avoid this using `super()`.
Further, note that inheritance is a one-way street. If you created a `Tool` class object (which is the parent class here) and does *not* inherit from `Hammer`, it only has access to `Tool` attributes and methods, so the following would execute without issue:
# Tool objects only have Tool attributes & methods
my_tool = Tool()
my_tool.is_tool
However, if you tried to access the `why` attribute on a `Tool` type object, you would encounter an `AttributeError`, as there is no attribute `why` in the `Tool` class object:
# Tool does NOT inherit from Hammer
# this code will produce an error
my_tool.why
Note in the example above that `tool_type` of `Hammer` class overrides the `tool_type` specified in the `Tool` class that it inherits. The `class` definition can and will override attributes and methods defined in the Parent class.
### `super()`
`super()` allows you to refer to the inherited, parent class, without naming it specifically.
This is particularly helpful when you want to extend the functionality of a class from which you are inheriting.
In the example above, we inherited from `Tool` in our `Hammer` definition and saw that while the `Tool` method were availalbe, the `Tool` instance attraibutes were *not*, due to the fact that our `Hammer` definition had overridden the `Tool` instance attribute definition.
So...how do we avoid that? This is where `super()` comes in. In the example below, we've added a single line of code (`super().__init__()`) to our `Hammer` definition from above. This specifies that we want to inherit the instance attributes from our parent class `Tool`.
class Hammer(Tool): #inherit Tool class
def __init__(self):
super().__init__()
self.tool_type = 'Hammer'
self.why = 'To hammer things.'
With this revised `Hammer` definition, we now are able to access the instance attributes from the parent class
new_tool = Hammer()
new_tool.is_tool
Note, however, that for attributes defined in both `Hammer` and `Tool`, even when we inherit instance attributes from the parent `Tool` class, the child class `Hammer` takes precendence, such that `tool_type` still stores 'Hammer' (from the `Hammer` definition), rather than `None` (from the parent `Tool` class definition)
new_tool.tool_type
## `class`: Summary
- `class` creates a new class type
- names tend to use CapWords case
- can have attributes (including instance attributes) and methods
- `obj.attribute` accesses data stored in attribute
- `obj.method()` carries out code defined within method
- instance attributes defined with `__init__`
- `__init__` is a reserved method in Python
- This "binds the attributes with the given arguments"
- `self` refers to current instance
- `super()` refers to the inherited `class`
- to create an object (instance) of a specified class type (`ClassType`):
- `object_name = ClassType(input1, input2)`
- `self` is not given an input when creating an object of a specified class
## Everything in Python is an Object!
We mentioned the fact that Python is an object-oriented programming (OOP) language before without providing a formal definition or discussing it in much detail. OOP is a programming paradigm in which code is organized around objects. Now that we have a better undersatnding of what `class` objects are, we can better understand why this is a helpful paradigm.
<div class="alert alert-success">
<b>Object-oriented programming (OOP)</b> is a programming paradigm in which code is organized around objects. Python is an OOP programming langauge.
</div>
To drive this point home, below we use `isinstance()` which checks whether the first parameter is of the second parameter's specified type. For example, we see below that the following returns `True`, as 6 is an `int` type object:
isinstance(6, int)
Alternatively, the following returns `False` as 'hi' is a `str` type object and _not_ an `int`:
isinstance('hi', int)
With this logic we can see that everything we've discussed thus far in this textbook is an `object`, as every `isinstanc()` statement below demonstrates that everything is in fact an object in Python:
### Data variables are objects
print(isinstance(True, object))
print(isinstance(1, object))
print(isinstance('word', object))
print(isinstance(None, object))
a = 3
print(isinstance(a, object))
### Functions are objects
print(isinstance(sum, object))
print(isinstance(max, object))
# Custom function are also objects
def my_function():
print('yay Python!')
isinstance(my_function, object)
### Class definitions & instances are objects
class MyClass():
def __init__(self):
self.data = 13
my_instance = MyClass()
print(isinstance(MyClass, object))
print(isinstance(my_instance, object))
## `class`: `ProfCourses`
With the basic explanations of concepts and provided examples thus far, you should have a sense that new objects can be created to store information (in the form of attributes) that are *attached* or *belong* to the object type *and* that methods are functions that are *attached* or *operate on* the object type directly. However, to this | |
<reponame>zc-BEAR/Course_Repo<filename>CS303_Pro/AI_Project1/test1.py
import numpy as np
COLOR_BLACK = 1
COLOR_WHITE = -1
COLOR_NONE = 0
class AI(object):
def __init__(self, chessboard_size, color, time_out):
self.chessboard_size = chessboard_size
self.color = color
self.time_out = time_out
self.candidate_list = []
def go(self, chessboard):
chessboard = np.array(chessboard)
self.candidate_list.clear()
changeLimit(countRound(chessboard))
len, list = getMove(self.color, chessboard)
list = BubbleSort(list, len)
for i in range(0, len):
self.candidate_list.append((list[i][0], list[i][1]))
if (len != 0):
list.append((list[0][0], list[0][1]))
findMove(chessboard, self.color, 1, Limit, -INF, INF)
self.candidate_list.append((resultX, resultY))
DIR = ((-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1))
Varr = [
[300, -20, 11, 5, 5, 10, -20, 300],
[-20, -80, 1, 1, 1, 2, -80, -20],
[11, 2, 4, 2, 2, 4, 2, 11],
[5, 1, 2, 1, 1, 2, 1, 5],
[5, 1, 2, 1, 1, 2, 1, 5],
[11, 2, 4, 2, 2, 4, 2, 11],
[-20, -80, 2, 1, 1, 2, -80, -20],
[300, -20, 11, 5, 5, 11, -20, 300]
]
BLACK = 1
WHITE = -1
INF = 1e8
Limit = 7
curr: int = 0
def countRound(board):
b, w = 0, 0
for i in range(0, 8):
for j in range(0, 8):
if (board[i][j] == BLACK):
b = b + 1
elif (board[i][j] == WHITE):
w = w + 1
return b + w
def changeLimit(curr: int):
global Limit
if curr <= 7:
Limit = 6
elif curr <= 15:
Limit = 6
elif curr <= 33:
Limit = 4
elif curr <= 44:
Limit = 5
elif curr <= 48:
Limit = 6
elif curr <= 50:
Limit = 6
elif curr < 52:
Limit = 6
elif curr < 54:
Limit = 7
else:
Limit = 20
def getV(color, board):
global Varr
global INF
now = 0
op = 0
opColor = -color
res = 0
for i in range(0, 8):
for j in range(0, 8):
if (board[i][j] == color):
res += Varr[i][j]
now = now + 1
elif (board[i][j] == opColor):
res -= Varr[i][j]
op = op + 1
if (now + op == 64 and now > op):
return INF + now - op
elif (now + op == 64 and now < op):
return -INF + now - op
elif (now + op == 64):
return 0
if (now == 0):
return -INF - op
if (op == 0):
return INF + now
return res
def getMove(color, board):
nxtMove = []
opColor = -color
numOfMove = 0
for i in range(0, 8):
for j in range(0, 8):
if (board[i][j] == 0):
change = 0
for d in range(0, 8):
if (change != 0):
break
nxtx = i + DIR[d][0]
nxty = j + DIR[d][1]
if (nxtx < 0 or nxtx >= 8 or nxty < 0 or nxty >= 8 or board[nxtx][nxty] != opColor):
continue
for k in range(1, 8):
nxtx = nxtx + DIR[d][0]
nxty = nxty + DIR[d][1]
if (nxtx < 0 or nxtx >= 8 or nxty < 0 or nxty >= 8 or board[nxtx][nxty] == 0):
break
if (board[nxtx][nxty] == color):
nxtMove.append([i, j])
change = 1
numOfMove = numOfMove + 1
break
if (change == 1):
break
return numOfMove, nxtMove
def Moves(board, color, x, y):
board[x][y] = color
opColor = -color
for i in range(0, 8):
nxtx = x + DIR[i][0]
nxty = y + DIR[i][1]
if nxtx < 0 or nxtx >= 8 or nxty < 0 or nxty >= 8 or board[nxtx][nxty] != opColor:
continue
for k in range(1, 8):
nxtx = nxtx + DIR[i][0]
nxty = nxty + DIR[i][1]
if (nxtx < 0 or nxtx >= 8 or nxty < 0 or nxty >= 8 or board[nxtx][nxty] == 0):
break
if (board[nxtx][nxty] == color):
while (nxtx != x or nxty != y):
nxtx = nxtx - DIR[i][0]
nxty = nxty - DIR[i][1]
board[nxtx][nxty] = color
return board
def getstable(color, board):
h, s, k1, k0 = [], [], [], []
for i in range(0, 8):
h.append(1)
s.append(1)
for i in range(0, 15):
k1.append(1)
k0.append(1)
for i in range(0, 8):
for j in range(0, 8):
if (board[i][j] == 0):
h[i] = 0
s[j] = 0
k1[i - j + 7] = 1
k0[i + j] = 1
stab, stb = 0, []
for i in range(0, 8):
L = []
for j in range(0, 8):
L.append(0)
if (i == 0 or i == 7 or j == 0 or j == 7):
if (i != 0 or i != 7 or j != 0 or j != 7):
continue
stab = stab + color * board[i][j]
stb[i][j] = abs(board[i][j])
elif (h[i] and s[j] and k1[i - j + 7] and k0[i + j]):
stab = stab + color * board[i][j]
stb.append(L)
for y in range(0, 8, 7):
if (h[y] == 1):
for x in range(1, 7):
stab = stab + color * board[y][x]
continue
for d in range(-1, 2, 2):
for X in range(1, 7):
x = X
if (d == 1):
x = 7 - X
if (board[y][x] == 0):
break
elif (board[y][x] == board[y][x + d]):
stb[y][x] = stb[y][x + d]
stab = stab + color * board[y][x]
elif (board[y][x] * board[y][x + d] == -1 and stb[y][x + d] == 1):
stb[y][x] = -1
elif (board[y][x] * board[y][x + d] == -1 and stb[y][x + d] == -1):
tem = x + d
while (stb[y][tem] != -1):
stb[y][tem] = 1
stab = stab + color * board[y][tem]
tem = tem + d
for y in range(0, 8, 7):
if (s[y] == 1):
for x in range(1, 7):
stab = stab + color * board[x][y]
continue
for d in range(-1, 2, 2):
for X in range(1, 7):
x = X
if (d == 1):
x = 7 - X
if (board[x][y] == 0):
break
if (board[x][y] == board[x + d][y]):
stb[x][y] = stb[x + d][y]
stab = stab + color * board[x][y]
elif (board[x][y] * board[x + d][y] == -1 and stb[x + d][y] == 1):
stb[x][y] = -1
elif (board[x][y] * board[x + d][y] == -1 and stb[x + d][y] == -1):
tem = x + d
while (stb[tem][y] != -1):
stb[tem][y] = 1
stab = stab + color * board[tem][y]
tem = tem + d
return stab
CurrentRound = 0
NumOfFind = 0
def Function(color, isMyTurn, board, movePower):
global NumOfFind
NumOfFind = NumOfFind + 1
if (isMyTurn == 0):
color = -color
movePower = -movePower
return getV(color, board) + movePower * 6 + getstable(color, board) * 20 + 6 * stableLine(color, board)
def stableLine(color, board):
ct = [0, 1, 2, 3, 4, 5, 6, 7, 8]
nowcolor = 0
nowcnt = 0
start = 1
res = 0
for i in range(0, 8):
if (start == 0 and board[i][0] == 0):
start = 1
elif (start == 1 and nowcnt != 0 and board[i][0] == 0):
res = res + ct[nowcnt] * color * nowcolor
start = 1
nowcnt = 0
nowcolor = 0
elif (nowcolor == 0 and start == 1 and board[i][0] != 0):
nowcolor = board[i][0]
nowcnt = nowcnt + 1
elif (start == 1 and board[i][0] != 0 and nowcolor != board[i][0]):
start = 0
elif (start == 1 and board[i][0] == nowcolor):
nowcnt = nowcnt + 1
if (start == 1):
res = res + ct[nowcnt] * color * nowcolor
return res
def Finished(board):
b, w = 0, 0
for i in range(0, 8):
for j in range(0, 8):
if (board[i][j] == BLACK):
b = b + 1
elif (board[i][j] == WHITE):
w = w + 1
if (b == 0 or w == 0 or b + w == 64):
return 1
return 0
def BubbleSort(moves, numOfMoves):
global Varr
for i in range(0, numOfMoves):
for j in range(i + 1, numOfMoves):
if (Varr[moves[i][0]][moves[i][1]] < Varr[moves[j][0]][moves[j][1]]):
moves[i], moves[j] = moves[j], moves[i]
return moves
MAX_FIND = 15000
resultX, resultY = -1, -1
def findMove(board, color, isMyTurn, round, alpha, beta):
global NumOfFind
global resultX
global resultY
if (round == 0 or Finished(board) == | |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import urllib
from urllib2 import HTTPError
from urlparse import urlparse, urljoin, parse_qs
from collections import defaultdict
import logging
import os
import requests
from BeautifulSoup import BeautifulSoup
from formencode import validators as fev
from allura.lib import helpers as h
from allura import model as M
from forgeimporters.base import ProjectExtractor
from forgeimporters.base import File
log = logging.getLogger(__name__)
def _as_text(node, chunks=None):
"""
Similar to node.text, but preserves whitespace around tags,
and converts <br/>s to \n.
"""
if chunks is None:
chunks = []
for n in node:
if isinstance(n, basestring):
chunks.append(n)
elif n.name == 'br':
chunks.append('\n')
else:
_as_text(n, chunks)
return ''.join(chunks)
def _as_markdown(tag, project_name):
fragments = []
for fragment in tag:
if getattr(fragment, 'name', None) == 'a':
href = urlparse(fragment['href'])
qs = parse_qs(href.query)
gc_link = not href.netloc or href.netloc == 'code.google.com'
path_parts = href.path.split('/')
target_project = None
if gc_link:
if len(path_parts) >= 5 and path_parts[1] == 'a':
target_project = '/'.join(path_parts[1:5])
elif len(path_parts) >= 3:
target_project = path_parts[2]
internal_link = target_project == project_name
if gc_link and internal_link and 'id' in qs:
# rewrite issue 123 project-internal issue links
fragment = '[%s](#%s)' % (fragment.text, qs['id'][0])
elif gc_link and internal_link and 'r' in qs:
# rewrite r123 project-internal revision links
fragment = '[r%s]' % qs['r'][0]
elif gc_link:
# preserve GC-internal links (probably issue PROJECT:123
# inter-project issue links)
fragment = '[%s](%s)' % (
h.plain2markdown(
fragment.text, preserve_multiple_spaces=True, has_html_entities=True),
# possibly need to adjust this URL for /a/ hosted domain URLs,
# but it seems fragment['href'] always starts with / so it replaces the given path
urljoin('https://code.google.com/p/%s/issues/' %
project_name, fragment['href']),
)
else:
# convert all other links to Markdown syntax
fragment = '[%s](%s)' % (fragment.text, fragment['href'])
elif getattr(fragment, 'name', None) == 'i':
# preserve styling of "(No comment was entered for this change.)"
# messages
fragment = '*%s*' % h.plain2markdown(fragment.text,
preserve_multiple_spaces=True, has_html_entities=True)
elif getattr(fragment, 'name', None) == 'b':
# preserve styling of issue template
fragment = '**%s**' % h.plain2markdown(fragment.text,
preserve_multiple_spaces=True, has_html_entities=True)
elif getattr(fragment, 'name', None) == 'br':
# preserve forced line-breaks
fragment = '\n'
else:
# convert all others to plain MD
fragment = h.plain2markdown(
unicode(fragment), preserve_multiple_spaces=True, has_html_entities=True)
fragments.append(fragment)
return ''.join(fragments).strip()
def csv_parser(page):
lines = page.readlines()
if not lines:
return []
# skip CSV header
lines = lines[1:]
# skip "next page here" info footer
if not lines[-1].startswith('"'):
lines.pop()
# remove CSV wrapping (quotes, commas, newlines)
return [line.strip('",\n') for line in lines]
class GoogleCodeProjectNameValidator(fev.FancyValidator):
not_empty = True
messages = {
'invalid': 'Please enter a project URL, or a project name containing '
'only letters, numbers, and dashes.',
'unavailable': 'This project is unavailable for import',
}
def _to_python(self, value, state=None):
project_name_re = re.compile(r'^[a-z0-9][a-z0-9-]{,61}$')
if project_name_re.match(value):
# just a name
project_name = value
else:
# try as a URL
project_name = None
project_name_simple = None
url = urlparse(value.strip())
if url.netloc.endswith('.googlecode.com'):
project_name = url.netloc.split('.')[0]
elif url.netloc == 'code.google.com':
path_parts = url.path.lstrip('/').split('/')
if len(path_parts) >= 2 and path_parts[0] == 'p':
project_name = path_parts[1]
elif len(path_parts) >= 4 and path_parts[0] == 'a' and path_parts[2] == 'p':
project_name_simple = path_parts[3]
project_name = '/'.join(path_parts[0:4])
if not project_name_simple:
project_name_simple = project_name
if not project_name or not project_name_re.match(project_name_simple):
raise fev.Invalid(self.message('invalid', state), value, state)
if not GoogleCodeProjectExtractor(project_name).check_readable():
raise fev.Invalid(self.message('unavailable', state), value, state)
return project_name
def split_project_name(project_name):
'''
For hosted projects, the project_name includes the hosted domain. Split, like:
:param str project_name: "a/eclipselabs.org/p/restclient-tool"
:return: ``("/a/eclipselabs.org", "restclient-tool")``
'''
if project_name.startswith('a/'):
hosted_domain_prefix = '/a/' + project_name.split('/')[1]
project_name = project_name.split('/')[3]
else:
hosted_domain_prefix = ''
project_name = project_name
return hosted_domain_prefix, project_name
class GoogleCodeProjectExtractor(ProjectExtractor):
BASE_URL = 'http://code.google.com'
RE_REPO_TYPE = re.compile(r'(svn|hg|git)')
PAGE_MAP = {
'project_info': BASE_URL + '{hosted_domain_prefix}/p/{project_name}/',
'source_browse': BASE_URL + '{hosted_domain_prefix}/p/{project_name}/source/browse/',
'issues_csv': BASE_URL + '{hosted_domain_prefix}/p/{project_name}/issues/csv?can=1&colspec=ID&sort=ID&start={start}',
'issue': BASE_URL + '{hosted_domain_prefix}/p/{project_name}/issues/detail?id={issue_id}',
}
LICENSE_MAP = defaultdict(lambda: 'Other/Proprietary License', {
'Apache License 2.0': 'Apache License V2.0',
'Artistic License/GPL': 'Artistic License',
'Eclipse Public License 1.0': 'Eclipse Public License',
'GNU GPL v2': 'GNU General Public License version 2.0 (GPLv2)',
'GNU GPL v3': 'GNU General Public License version 3.0 (GPLv3)',
'GNU Lesser GPL': 'GNU Library or Lesser General Public License version 2.0 (LGPLv2)',
'MIT License': 'MIT License',
'Mozilla Public License 1.1': 'Mozilla Public License 1.1 (MPL 1.1)',
'New BSD License': 'BSD License',
'Other Open Source': 'Open Software License',
})
DEFAULT_ICON = 'http://www.gstatic.com/codesite/ph/images/defaultlogo.png'
def get_page_url(self, page_name, **kw):
# override, to handle hosted domains
hosted_domain_prefix, project_name = split_project_name(self.project_name)
return self.PAGE_MAP[page_name].format(
project_name=urllib.quote(project_name),
hosted_domain_prefix=hosted_domain_prefix,
**kw)
def check_readable(self):
resp = requests.head(self.get_page_url('project_info'))
return resp.status_code == 200
def get_short_description(self, project):
page = self.get_page('project_info')
project.short_description = page.find(
itemprop='description').text.strip()
def get_icon(self, project):
page = self.get_page('project_info')
icon_url = urljoin(self.url, page.find(itemprop='image').get('src'))
if icon_url == self.DEFAULT_ICON:
return
icon_name = urllib.unquote(urlparse(icon_url).path).split('/')[-1]
icon = File(icon_url, icon_name)
filetype = icon.type
# work around Google Code giving us bogus file type
if filetype.startswith('text/html'):
filetype = 'image/png'
M.ProjectFile.save_image(
icon_name, icon.file, filetype,
square=True, thumbnail_size=(48, 48),
thumbnail_meta={'project_id': project._id, 'category': 'icon'})
def get_license(self, project):
page = self.get_page('project_info')
license = page.find(text='Code license').findNext().find(
'a').text.strip()
trove = M.TroveCategory.query.get(fullname=self.LICENSE_MAP[license])
project.trove_license.append(trove._id)
def get_repo_type(self):
page = self.get_page('source_browse')
repo_type = page.find(id="crumb_root")
if not repo_type:
raise Exception("Couldn't detect repo type: no #crumb_root in "
"{0}".format(self.url))
re_match = self.RE_REPO_TYPE.match(repo_type.text.lower())
if re_match:
return re_match.group(0)
else:
raise Exception("Unknown repo type: {0}".format(repo_type.text))
@classmethod
def iter_issues(cls, project_name):
"""
Iterate over all issues for a project,
using paging to keep the responses reasonable.
"""
extractor = cls(project_name)
issue_ids = extractor.get_issue_ids(start=0)
while issue_ids:
for issue_id in sorted(issue_ids):
try:
yield (int(issue_id), cls(project_name, 'issue', issue_id=issue_id))
except HTTPError as e:
if e.code == 404:
log.warn('Unable to load GC issue: %s #%s: %s: %s',
project_name, issue_id, e, e.url)
continue
else:
raise
# get any new issues that were created while importing
# (jumping back a few in case some were deleted and new ones added)
new_ids = extractor.get_issue_ids(start=len(issue_ids) - 10)
issue_ids = new_ids - issue_ids
def get_issue_ids(self, start=0):
limit = 100
issue_ids = set()
page = self.get_page('issues_csv', parser=csv_parser, start=start)
while page:
if len(page) <= 0:
return
issue_ids.update(page)
start += limit
page = self.get_page('issues_csv', parser=csv_parser, start=start)
return issue_ids
def get_issue_summary(self):
text = self.page.find(id='issueheader').findAll(
'td', limit=2)[1].span.text.strip()
bs = BeautifulSoup(text, convertEntities=BeautifulSoup.HTML_ENTITIES)
return bs.text
def get_issue_description(self):
return _as_markdown(self.page.find(id='hc0').pre, self.project_name)
def get_issue_created_date(self):
return self.page.find(id='hc0').find('span', 'date').get('title')
def get_issue_mod_date(self):
comments = list(self.iter_comments())
if comments:
last_update = comments[-1]
return last_update.created_date
else:
return self.get_issue_created_date()
def get_issue_creator(self):
a = self.page.find(id='hc0').find(True, 'userlink')
return UserLink(a)
def get_issue_status(self):
tag = self.page.find(id='issuemeta').find(
'th', text=re.compile('Status:')).findNext().span
if tag:
return tag.text.strip()
else:
return ''
def get_issue_owner(self):
tag = self.page.find(id='issuemeta').find(
'th', text=re.compile('Owner:')).findNext().find(True, 'userlink')
if tag:
return UserLink(tag)
else:
return None
def get_issue_labels(self):
label_nodes = self.page.find(id='issuemeta').findAll('a', 'label')
return [_as_text(l) for l in label_nodes]
def get_issue_attachments(self):
return _get_attachments(self.page.find(id='hc0'))
def get_issue_stars(self):
stars_re = re.compile(r'(\d+) (person|people) starred this issue')
stars = self.page.find(id='issueheader').find(text=stars_re)
if stars:
return int(stars_re.search(stars).group(1))
return 0
def iter_comments(self):
# first, get all pages if there are multiple pages of comments
looking_for_comment_pages = True
comment_page_urls = [self.url]
while looking_for_comment_pages:
first_comment = self.page.find('div', 'vt issuecomment')
looking_for_comment_pages = False
if first_comment and 'cursor_off' not in first_comment['class']:
# this is not a real comment, just forward/back links
for link in first_comment.findAll('a'):
if link.text.startswith('Older'):
prev_comments_page = urljoin(self.url, link['href'])
comment_page_urls.insert(0, prev_comments_page)
looking_for_comment_pages = True
self.get_page(prev_comments_page) # prep for next iteration of loop
# then go through those to get the actual comments
for comment_page_url in comment_page_urls:
self.get_page(comment_page_url)
# regular comments have cursor_off class
for comment in self.page.findAll('div', 'cursor_off vt issuecomment'):
yield Comment(comment, self.project_name)
class UserLink(object):
def __init__(self, tag):
self.name = tag.text.strip()
if tag.get('href'):
self.url = urljoin(
GoogleCodeProjectExtractor.BASE_URL, tag.get('href'))
else:
self.url = None
def __str__(self):
if self.url:
return '[{name}]({url})'.format(name=self.name, url=self.url)
else:
return self.name
def _get_attachments(tag):
attachment_links = tag.find('div', 'attachments')
if attachment_links:
attachments = []
for | |
#!/usr/bin/env python3
# Copyright (C) 2020 <NAME> and mtrycz
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Tests basic functionality of the getblocktemplatelight and submitblocklight RPC methods.
"""
import os
import platform
import random
import shutil
import stat
import tempfile
import time
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, assert_blocktemplate_equal
from test_framework import messages, script, util, blocktools
def to_bytes(x, encoding='utf8'):
if isinstance(x, bytes):
return x
elif isinstance(x, str):
return x.encode(encoding)
elif isinstance(x, bytearray):
return bytes(x)
else:
raise TypeError("Not a string or bytes-like object")
def hash160(b):
b = to_bytes(b)
return script.hash160(b)
def hash256(x):
x = to_bytes(x)
return messages.hash256(x)
def hex2bytes(x, rev=True):
""" Decodes x and reverses its bytes if rev=True. If rev=False, no reversal is done.
x can be a list in which case this operation is performed for each element recursively. """
if isinstance(x, (str, bytes, bytearray)):
if rev:
return bytes.fromhex(x)[::-1]
else:
return bytes.fromhex(x)
elif isinstance(x, (list, tuple)):
return [hex2bytes(i, rev=rev) for i in x]
raise TypeError('Unsupported type')
def bytes2hex(x, rev=True):
""" Reverses x and returns its hex representation as a string. If rev=False, no reversal is done.
x can be a list in which case this operation is performed for each element recursively. """
if isinstance(x, (list, tuple)):
return [bytes2hex(i, rev=rev) for i in x]
else:
if rev:
return to_bytes(x)[::-1].hex()
else:
return to_bytes(x).hex()
def get_merkle_branch(hashes):
""" See src/mining.cpp MakeMerkleBranch() for an explanation of this algorithm. """
res = []
if not hashes:
return res
assert isinstance(hashes[0], (bytes, bytearray))
while len(hashes) > 1:
res.append(hashes[0]) # take the first one
if len(hashes) % 2 == 0:
# enforce odd number of hashes
hashes.append(hashes[-1])
new_size = (len(hashes) - 1) // 2
h = []
for i in range(new_size):
h.append(hash256(hashes[i * 2 + 1] + hashes[i * 2 + 2]))
hashes = h
assert len(hashes) == 1
res.append(hashes[0]) # put the last one left
return res
def merkle_root_from_cb_and_branch(cbhash, branch):
""" Given a coinbase tx hash (bytes) and a merkle branch (list of bytes), calculate the merkle root.
The merkle root is returned as a uint256 suitable for setting a CBlock.hashMerkleRoot """
hashes = [cbhash] + branch
while len(hashes) > 1:
hashes[0] = hash256(hashes[0] + hashes[1])
del hashes[1]
return messages.uint256_from_str(hashes[0]) # this is now the root
class GBTLightTest(BitcoinTestFramework):
""" Functional tests for the getblocktemplatelight and submitblocklight RPC methods. """
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2 # We need two nodes for getblocktemplatelight RPC to function (bitcoind node policy)
self._cache_size = 10
my_args = [
# We specify a cache size to a known value for this test.
'-gbtcachesize={}'.format(self._cache_size),
]
self.extra_args = [my_args] * self.num_nodes
# lastly, make node[1] have a custom -gbtstoredir to test that this arg takes effect
self._custom_gbt_dir = tempfile.gettempdir()
assert self._custom_gbt_dir
uniq_suf = hash256(str(time.time()).encode('utf8')).hex()[:10] # make a unique suffix based on the current time
self._custom_gbt_dir = os.path.join(self._custom_gbt_dir, "gbt_{}".format(uniq_suf))
self.extra_args[1].append(
'-gbtstoredir={}'.format(self._custom_gbt_dir)
)
def check_job_id(self, gbtl):
# check job_id is ok
assert_equal(
gbtl['job_id'],
bytes2hex(hash160(hex2bytes(gbtl['previousblockhash']) + b''.join(hex2bytes(gbtl['merkle']))))
)
self.log.info("job_id ok!")
def check_merkle(self, gbtl, txids):
assert_equal(gbtl['merkle'], bytes2hex(get_merkle_branch(hex2bytes(sorted(txids)))))
self.log.info("merkle ok!")
min_relay_fee = 0
def gen_valid_tx(self, node_num=0):
""" Generate a single, valid, signed transaction using the wallet from node_num, and return its hex.
This transaction is not submitted to mempool; it is simply generated, signed, and returned. """
node = self.nodes[node_num]
fee = self.min_relay_fee
amount = Decimal("0.00002") * random.randrange(10)
(total_in, inputs) = util.gather_inputs(node, amount + fee)
outputs = util.make_change(node, total_in, amount, fee)
outputs[node.getnewaddress()] = float(amount)
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransactionwithwallet(rawtx)
return signresult["hex"]
def wait_for_txs(self, txids, nodenum=0, timeout=15):
""" Wait up to timeout seconds for getblocktemplate's tx set to contain `txids`. """
node = self.nodes[nodenum]
end = time.time() + timeout
txids = set(txids)
while time.time() < end:
txs = {x["txid"] for x in node.getblocktemplate()["transactions"]}
if not (txids - txs):
return
time.sleep(0.250)
raise RuntimeError("Timed out waiting for required txids from getblocktemplate")
def set_mock_time(self, timeval):
""" Enables mock time for all nodes. Currently unused but here in case we need it. """
for node in self.nodes:
node.setmocktime(int(timeval))
def clear_mock_time(self):
""" Disables mock time for all nodes (reverting back to wall clock time).
Currently unused but here in case we need it. """
self.set_mock_time(0) # 0 clears mock time
def run_test(self):
try:
self.log.info("Node 1 is using custom -gbtstoredir: {}".format(self._custom_gbt_dir))
self.__run_test()
# We execute the test twice to test that after mining the block, all data structures (pblocktemplate, etc)
# are properly reset and everything is sane on the C++-side.
self.__run_test(nblocks_to_gen=0, ntx_to_gen=12, test_additional_txs=False)
finally:
# Unconditionally remove the custom gbtstoredir since the test framework doesn't know about it.
shutil.rmtree(self._custom_gbt_dir)
self.log.info("Cleaned-up custom -gbtstoredir: {}".format(self._custom_gbt_dir))
def __run_test(self, *, nblocks_to_gen=150, ntx_to_gen=19, test_additional_txs=True):
assert ntx_to_gen > 0
# we will need this value for random_transaction below, and for self.gen_valid_tx
self.min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
if nblocks_to_gen > 0:
if self.is_wallet_compiled():
# generate some blocks to wallet to have spendable coins
self.nodes[0].generate(nblocks_to_gen)
else:
# generate just 1 block to leave IBD state (no wallet so no spending in this mode)
self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)
self.sync_all()
gbtl0 = self.nodes[0].getblocktemplatelight()
gbtl1 = self.nodes[1].getblocktemplatelight()
assert_blocktemplate_equal(gbtl0, gbtl1)
def check_gbt_store_dir(gbtdir, job_id):
expected_data_file = os.path.join(gbtdir, job_id)
assert os.path.exists(gbtdir), "The -gbtstoredir must exist"
assert os.path.exists(expected_data_file), "The -gbtstoredir must contain the expected job_id file"
# check that node[1] is using the custom -gbtstoredir argument we gave it.
check_gbt_store_dir(self._custom_gbt_dir, gbtl1['job_id'])
self.check_job_id(gbtl0)
self.check_merkle(gbtl0, []) # empty merkle should be ok
# generate a bunch of transactions
txids = []
ntx = ntx_to_gen if self.is_wallet_compiled() else 0
for i in range(ntx):
txid, txhex, fee = util.random_transaction((self.nodes[0],), Decimal("0.000123"),
self.min_relay_fee, Decimal("0.000001"), 0)
txids.append(txid)
# Since we have two nodes, sync the mempools
self.sync_all()
# Wait for getblocktemplate to see the txids (it uses a 5s caching strategy before it calculates a new template)
# 'setmocktime' here worked too but we prefer to let the clock advance normally, rather than use a pegged
# mocktime for this test case. (Real execution time for this whole test case is about the same whether using
# mocktime or this polling strategy, so better to keep time advancing normally).
self.wait_for_txs(txids, 0)
self.wait_for_txs(txids, 1)
# Check that, once the nodes are synced, they give the same template
gbtl0 = self.nodes[0].getblocktemplatelight()
gbtl1 = self.nodes[1].getblocktemplatelight()
assert_blocktemplate_equal(gbtl0, gbtl1)
# check job_id is ok
self.check_job_id(gbtl0)
# check merkle is ok
self.check_merkle(gbtl0, txids)
if self.is_wallet_compiled() and test_additional_txs:
# add the signed tx to a job.. we wil submit this later (only iff wallet enabled)
signedtx = self.gen_valid_tx()
signedtxid = bytes2hex(hash256(bytes.fromhex(signedtx)))
self.log.info("Signed txid: {} hex: {}".format(signedtxid, signedtx))
gbtl0 = self.nodes[0].getblocktemplatelight({}, [signedtx])
submit_job_id = gbtl0['job_id']
submit_tmpl = gbtl0
self.check_job_id(gbtl0)
self.check_merkle(gbtl0, txids + [signedtxid])
else:
# No wallet (or caller wants to not test additional_tx).
# Just use the last job with no additional_txs as the submit job
submit_job_id, submit_tmpl = gbtl0['job_id'], gbtl0
# These tx's are invalid on this chain, but they do at least deserialize correctly, so we can use them
# to make a bunch of jobs
extratxs = [
"0100000002ae54229545be8d2738e245e7ea41d089fa3def0a48e9410b49f39ec43826971d010000006a4730440220204169229eb1"
"7dc49ad83675d693e4012453db9a8d1af6f118278152c709f6be022077081ab76df0356e53c1ba26145a3fb98ca58553a98b1c130a"
"2f6cff4d39767f412103cfbc58232f0761a828ced4ee93e87ce27f26d005dd9c87150aad5e5f07073dcaffffffff4eca0e441d0a27"
"f874f41739382cb80fdf3aac0f7b8316e197dd42e7155590c1010000006a47304402203832a75ccfc2f12474c1d3d2fc22cd72cc92"
"4c1b73995a27a0d07b9c5a745f3a022035d98e1017a4cb02ff1509d17c752047dca2b270b927793f2eb9e30af1ac02d6412103cfbc"
"58232f0761a828ced4ee93e87ce27f26d005dd9c87150aad5e5f07073dcaffffffff0260ea00000000000017a9149eefc3ae114359"
"8a830d66cbc32aa583fa3d987687fb030100000000001976a914bddb57be877bd32264fc40670b87b6fb271813f688ac00000000",
"0100000001993b9740d3e289876cbe6920008a35c4a08b7dc4bd48ff61b198f163af3f354900000000644102a8588b2e1a808ade29"
"4aa76a1e63137099fa087841603a361171f0c1473396f482d8d1a61e2d3ff94280b1125114868647bff822d2a74461c6bbe6ffc06f"
"9d412102abaad90841057ddb1ed929608b536535b0cd8a18ba0a90dba66ba7b1c1f7b4eafeffffff0176942200000000001976a91"
"40a373caf0ab3c2b46cd05625b8d545c295b93d7a88acf3fa1400",
]
extratxids = bytes2hex([hash256(x) for x in hex2bytes(extratxs, rev=False)])
# test "additional_txs"
gbtl0 = self.nodes[0].getblocktemplatelight({}, extratxs)
self.check_job_id(gbtl0)
self.check_merkle(gbtl0, txids + extratxids)
# test that the "additional_txs" didn't stick around in the cached pblocktemplate in getblocktemplatecommon
gbtl0 = self.nodes[0].getblocktemplatelight({}, extratxs)
self.check_merkle(gbtl0, txids + extratxids)
gbt0 = self.nodes[0].getblocktemplate()
assert_equal(sorted(txids), [x['txid'] for x in gbt0['transactions']])
# try extratxs twice; they should both be present (known behavior)
gbtl0 = self.nodes[0].getblocktemplatelight({}, extratxs + extratxs)
self.check_merkle(gbtl0, txids + extratxids + extratxids)
# try regular getblocktemplatelight again, without extratxs, test that extratxs didn't stick around
gbtl0 = self.nodes[0].getblocktemplatelight()
gbtl1 = self.nodes[1].getblocktemplatelight()
assert_blocktemplate_equal(gbtl0, gbtl1)
self.check_merkle(gbtl0, txids)
# Test RPC errors
# bad txn hex (decode failure) at index 1
assert_raises_rpc_error(-22,
"additional_txs transaction 1 decode failure",
self.nodes[0].getblocktemplatelight,
{}, [extratxs[1], extratxs[0][:-15]])
tmpl = submit_tmpl
job_id = submit_job_id
coinbase_tx = blocktools.create_coinbase(height=int(tmpl["height"]) + 1)
coinbase_tx.vin[0].nSequence = 2 ** 32 - 2
coinbase_tx.rehash()
block = messages.CBlock()
block.nVersion = tmpl["version"]
block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
block.nTime = tmpl["curtime"]
block.nBits = int(tmpl["bits"], 16)
block.nNonce = 0
block.vtx = [coinbase_tx]
block.hashMerkleRoot = merkle_root_from_cb_and_branch(hash256(coinbase_tx.serialize()), hex2bytes(tmpl['merkle']))
block.solve()
# Be | |
___class_destructor__ = _gui.GuiWindowArray____class_destructor__
GuiWindowArray_swigregister = _gui.GuiWindowArray_swigregister
GuiWindowArray_swigregister(GuiWindowArray)
def GuiWindowArray_class_info():
return _gui.GuiWindowArray_class_info()
GuiWindowArray_class_info = _gui.GuiWindowArray_class_info
def GuiWindowArray____class_destructor__(instance, is_array):
return _gui.GuiWindowArray____class_destructor__(instance, is_array)
GuiWindowArray____class_destructor__ = _gui.GuiWindowArray____class_destructor__
class GuiWindowVector(GuiWindowBasicArray):
__swig_setmethods__ = {}
for _s in [GuiWindowBasicArray]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, GuiWindowVector, name, value)
__swig_getmethods__ = {}
for _s in [GuiWindowBasicArray]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, GuiWindowVector, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _gui.new_GuiWindowVector(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _gui.delete_GuiWindowVector
__del__ = lambda self: None
def append(self, *args):
return _gui.GuiWindowVector_append(self, *args)
def add(self, element):
return _gui.GuiWindowVector_add(self, element)
def insert(self, element, index):
return _gui.GuiWindowVector_insert(self, element, index)
def remove_last(self):
return _gui.GuiWindowVector_remove_last(self)
def empty(self):
return _gui.GuiWindowVector_empty(self)
def remove_all(self):
return _gui.GuiWindowVector_remove_all(self)
def clear(self, *args):
return _gui.GuiWindowVector_clear(self, *args)
def remove(self, *args):
return _gui.GuiWindowVector_remove(self, *args)
def is_empty(self):
return _gui.GuiWindowVector_is_empty(self)
def remove_item(self, item, preserve_order):
return _gui.GuiWindowVector_remove_item(self, item, preserve_order)
def remove_items(self, item):
return _gui.GuiWindowVector_remove_items(self, item)
def get_count(self):
return _gui.GuiWindowVector_get_count(self)
def get_capacity(self):
return _gui.GuiWindowVector_get_capacity(self)
def set_count(self, *args):
return _gui.GuiWindowVector_set_count(self, *args)
def set_capacity(self, *args):
return _gui.GuiWindowVector_set_capacity(self, *args)
def refit(self):
return _gui.GuiWindowVector_refit(self)
def swap(self, swap_v1, swap_v2):
return _gui.GuiWindowVector_swap(self, swap_v1, swap_v2)
def resize(self, *args):
return _gui.GuiWindowVector_resize(self, *args)
def reserve(self, *args):
return _gui.GuiWindowVector_reserve(self, *args)
def copy_from(self, *args):
return _gui.GuiWindowVector_copy_from(self, *args)
def copy_to(self, dest):
return _gui.GuiWindowVector_copy_to(self, dest)
def get_list(self, list):
return _gui.GuiWindowVector_get_list(self, list)
def set_list(self, list):
return _gui.GuiWindowVector_set_list(self, list)
def get_array(self, array):
return _gui.GuiWindowVector_get_array(self, array)
def set_array(self, array):
return _gui.GuiWindowVector_set_array(self, array)
def move(self, arg2, to):
return _gui.GuiWindowVector_move(self, arg2, to)
def item(self, index):
return _gui.GuiWindowVector_item(self, index)
def get_memory_size(self):
return _gui.GuiWindowVector_get_memory_size(self)
def get_class_info(self):
return _gui.GuiWindowVector_get_class_info(self)
if _newclass:
class_info = staticmethod(_gui.GuiWindowVector_class_info)
else:
class_info = _gui.GuiWindowVector_class_info
if _newclass:
___class_destructor__ = staticmethod(_gui.GuiWindowVector____class_destructor__)
else:
___class_destructor__ = _gui.GuiWindowVector____class_destructor__
GuiWindowVector_swigregister = _gui.GuiWindowVector_swigregister
GuiWindowVector_swigregister(GuiWindowVector)
def GuiWindowVector_class_info():
return _gui.GuiWindowVector_class_info()
GuiWindowVector_class_info = _gui.GuiWindowVector_class_info
def GuiWindowVector____class_destructor__(instance, is_array):
return _gui.GuiWindowVector____class_destructor__(instance, is_array)
GuiWindowVector____class_destructor__ = _gui.GuiWindowVector____class_destructor__
class GuiWindowSet(base.CoreBaseObject):
__swig_setmethods__ = {}
for _s in [base.CoreBaseObject]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, GuiWindowSet, name, value)
__swig_getmethods__ = {}
for _s in [base.CoreBaseObject]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, GuiWindowSet, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _gui.new_GuiWindowSet(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def get_count(self):
return _gui.GuiWindowSet_get_count(self)
def exists(self, *args):
return _gui.GuiWindowSet_exists(self, *args)
def is_empty(self):
return _gui.GuiWindowSet_is_empty(self)
def is_included(self, set):
return _gui.GuiWindowSet_is_included(self, set)
def get_items(self):
return _gui.GuiWindowSet_get_items(self)
def get_item(self, index):
return _gui.GuiWindowSet_get_item(self, index)
def back(self, *args):
return _gui.GuiWindowSet_back(self, *args)
def get_array(self, array):
return _gui.GuiWindowSet_get_array(self, array)
def get_list(self, list):
return _gui.GuiWindowSet_get_list(self, list)
def get_vector(self, vector):
return _gui.GuiWindowSet_get_vector(self, vector)
def to_array(self):
return _gui.GuiWindowSet_to_array(self)
def add(self, *args):
return _gui.GuiWindowSet_add(self, *args)
def remove(self, index):
return _gui.GuiWindowSet_remove(self, index)
def remove_item(self, item):
return _gui.GuiWindowSet_remove_item(self, item)
def remove_set(self, set):
return _gui.GuiWindowSet_remove_set(self, set)
def remove_all(self):
return _gui.GuiWindowSet_remove_all(self)
def toggle(self, item):
return _gui.GuiWindowSet_toggle(self, item)
def unite(self, set):
return _gui.GuiWindowSet_unite(self, set)
def intersect(self, set):
return _gui.GuiWindowSet_intersect(self, set)
def __eq__(self, set):
if not isinstance(obj, type(self)):
return False
return _gui.GuiWindowSet___eq__(self, set)
def __ne__(self, set):
return _gui.GuiWindowSet___ne__(self, set)
def begin(self, *args):
return _gui.GuiWindowSet_begin(self, *args)
def end(self, *args):
return _gui.GuiWindowSet_end(self, *args)
if _newclass:
get_linear_search_threshold = staticmethod(_gui.GuiWindowSet_get_linear_search_threshold)
else:
get_linear_search_threshold = _gui.GuiWindowSet_get_linear_search_threshold
def get_memory_size(self):
return _gui.GuiWindowSet_get_memory_size(self)
def __setitem__(self, index, value):
return _gui.GuiWindowSet___setitem__(self, index, value)
def __len__(self):
return _gui.GuiWindowSet___len__(self)
def __getitem__(self, index):
if (index < self.get_count()):
return self.get_item(index)
else:
raise IndexError("The index (" + str(index) + ") is out of range")
def __nonzero__(self): return True
__swig_destroy__ = _gui.delete_GuiWindowSet
__del__ = lambda self: None
GuiWindowSet_swigregister = _gui.GuiWindowSet_swigregister
GuiWindowSet_swigregister(GuiWindowSet)
def GuiWindowSet_get_linear_search_threshold():
return _gui.GuiWindowSet_get_linear_search_threshold()
GuiWindowSet_get_linear_search_threshold = _gui.GuiWindowSet_get_linear_search_threshold
class GuiFloatingWindow(GuiWindow):
__swig_setmethods__ = {}
for _s in [GuiWindow]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, GuiFloatingWindow, name, value)
__swig_getmethods__ = {}
for _s in [GuiWindow]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, GuiFloatingWindow, name)
__repr__ = _swig_repr
def __init__(self, *args):
if self.__class__ == GuiFloatingWindow:
_self = None
else:
_self = self
this = _gui.new_GuiFloatingWindow(_self, *args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
self.__pycreated__ = True
self.__collect__()
def get_viewport_widget(self, index):
return _gui.GuiFloatingWindow_get_viewport_widget(self, index)
def get_viewport(self):
return _gui.GuiFloatingWindow_get_viewport(self)
def get_content_widget(self, index):
return _gui.GuiFloatingWindow_get_content_widget(self, index)
def get_module_widget(self, index):
return _gui.GuiFloatingWindow_get_module_widget(self, index)
def get_widget_count(self):
return _gui.GuiFloatingWindow_get_widget_count(self)
def process_event(self, evt):
return _gui.GuiFloatingWindow_process_event(self, evt)
def get_layout(self, group, get_settings):
return _gui.GuiFloatingWindow_get_layout(self, group, get_settings)
def set_layout(self, entry):
return _gui.GuiFloatingWindow_set_layout(self, entry)
__swig_setmethods__["s_do_not_save_in_layout"] = _gui.GuiFloatingWindow_s_do_not_save_in_layout_set
__swig_getmethods__["s_do_not_save_in_layout"] = _gui.GuiFloatingWindow_s_do_not_save_in_layout_get
if _newclass:
s_do_not_save_in_layout = _swig_property(_gui.GuiFloatingWindow_s_do_not_save_in_layout_get, _gui.GuiFloatingWindow_s_do_not_save_in_layout_set)
__swig_setmethods__["s_do_not_allow_disable"] = _gui.GuiFloatingWindow_s_do_not_allow_disable_set
__swig_getmethods__["s_do_not_allow_disable"] = _gui.GuiFloatingWindow_s_do_not_allow_disable_get
if _newclass:
s_do_not_allow_disable = _swig_property(_gui.GuiFloatingWindow_s_do_not_allow_disable_get, _gui.GuiFloatingWindow_s_do_not_allow_disable_set)
__swig_setmethods__["s_has_custom_data"] = _gui.GuiFloatingWindow_s_has_custom_data_set
__swig_getmethods__["s_has_custom_data"] = _gui.GuiFloatingWindow_s_has_custom_data_get
if _newclass:
s_has_custom_data = _swig_property(_gui.GuiFloatingWindow_s_has_custom_data_get, _gui.GuiFloatingWindow_s_has_custom_data_set)
def restore_view(self):
return _gui.GuiFloatingWindow_restore_view(self)
def maximize_view(self, view):
return _gui.GuiFloatingWindow_maximize_view(self, view)
def is_enabled(self):
return _gui.GuiFloatingWindow_is_enabled(self)
if _newclass:
class_info = staticmethod(_gui.GuiFloatingWindow_class_info)
else:
class_info = _gui.GuiFloatingWindow_class_info
if _newclass:
___class_destructor__ = staticmethod(_gui.GuiFloatingWindow____class_destructor__)
else:
___class_destructor__ = _gui.GuiFloatingWindow____class_destructor__
def get_class_info(self):
return _gui.GuiFloatingWindow_get_class_info(self)
def __gui_destroy__(self):
return _gui.GuiFloatingWindow___gui_destroy__(self)
def __collect__(self):
return _gui.GuiFloatingWindow___collect__(self)
def __uncollect__(self):
return _gui.GuiFloatingWindow___uncollect__(self)
def is_created_by_python(self):
if hasattr(self, '__pycreated__'):
return self.__pycreated__
else: return False
def destroy(self):
if self.is_created_by_python():
self.__disown__()
self.__gui_destroy__()
self.__uncollect__()
def __del__(self):
if not self.is_created_by_python(): return
if self.is_shown():
self.hide()
if self.is_destroyed():
if self.thisown: self.__disown__()
else: self.destroy()
def __disown__(self):
self.this.disown()
_gui.disown_GuiFloatingWindow(self)
return weakref_proxy(self)
GuiFloatingWindow_swigregister = _gui.GuiFloatingWindow_swigregister
GuiFloatingWindow_swigregister(GuiFloatingWindow)
def GuiFloatingWindow_class_info():
return _gui.GuiFloatingWindow_class_info()
GuiFloatingWindow_class_info = _gui.GuiFloatingWindow_class_info
def GuiFloatingWindow____class_destructor__(instance, is_array):
return _gui.GuiFloatingWindow____class_destructor__(instance, is_array)
GuiFloatingWindow____class_destructor__ = _gui.GuiFloatingWindow____class_destructor__
class GuiFloatingWindowBasicArray(base.CoreBaseType):
__swig_setmethods__ = {}
for _s in [base.CoreBaseType]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, GuiFloatingWindowBasicArray, name, value)
__swig_getmethods__ = {}
for _s in [base.CoreBaseType]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, GuiFloatingWindowBasicArray, name)
__repr__ = _swig_repr
INVALID_INDEX = _gui.GuiFloatingWindowBasicArray_INVALID_INDEX
def __init__(self, *args):
this = _gui.new_GuiFloatingWindowBasicArray(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _gui.delete_GuiFloatingWindowBasicArray
__del__ = lambda self: None
def get_count(self):
return _gui.GuiFloatingWindowBasicArray_get_count(self)
def get_item(self, index):
return _gui.GuiFloatingWindowBasicArray_get_item(self, index)
def set_item(self, index, item):
return _gui.GuiFloatingWindowBasicArray_set_item(self, index, item)
def front(self):
return _gui.GuiFloatingWindowBasicArray_front(self)
def back(self):
return _gui.GuiFloatingWindowBasicArray_back(self)
def exists(self, item):
return _gui.GuiFloatingWindowBasicArray_exists(self, item)
def get_index(self, item):
return _gui.GuiFloatingWindowBasicArray_get_index(self, item)
def sub(self, index, count):
return _gui.GuiFloatingWindowBasicArray_sub(self, index, count)
def get_memory_size(self):
return _gui.GuiFloatingWindowBasicArray_get_memory_size(self)
def begin(self, *args):
return _gui.GuiFloatingWindowBasicArray_begin(self, *args)
def end(self, *args):
return _gui.GuiFloatingWindowBasicArray_end(self, *args)
def get_class_info(self):
return _gui.GuiFloatingWindowBasicArray_get_class_info(self)
if _newclass:
class_info = staticmethod(_gui.GuiFloatingWindowBasicArray_class_info)
else:
class_info = _gui.GuiFloatingWindowBasicArray_class_info
if _newclass:
___class_destructor__ = staticmethod(_gui.GuiFloatingWindowBasicArray____class_destructor__)
else:
___class_destructor__ = _gui.GuiFloatingWindowBasicArray____class_destructor__
def __setitem__(self, index, value):
return _gui.GuiFloatingWindowBasicArray___setitem__(self, index, value)
def __len__(self):
return _gui.GuiFloatingWindowBasicArray___len__(self)
def __getitem__(self, index):
if (index < self.get_count()):
return self.get_item(index)
else:
raise IndexError("The index (" + str(index) + ") is out of range")
def __nonzero__(self): return True
GuiFloatingWindowBasicArray_swigregister = _gui.GuiFloatingWindowBasicArray_swigregister
GuiFloatingWindowBasicArray_swigregister(GuiFloatingWindowBasicArray)
def GuiFloatingWindowBasicArray_class_info():
return _gui.GuiFloatingWindowBasicArray_class_info()
GuiFloatingWindowBasicArray_class_info = _gui.GuiFloatingWindowBasicArray_class_info
def GuiFloatingWindowBasicArray____class_destructor__(instance, is_array):
return _gui.GuiFloatingWindowBasicArray____class_destructor__(instance, is_array)
GuiFloatingWindowBasicArray____class_destructor__ = _gui.GuiFloatingWindowBasicArray____class_destructor__
class GuiFloatingWindowArray(GuiFloatingWindowBasicArray):
__swig_setmethods__ = {}
for _s in [GuiFloatingWindowBasicArray]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, GuiFloatingWindowArray, name, value)
__swig_getmethods__ = {}
for _s in [GuiFloatingWindowBasicArray]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, GuiFloatingWindowArray, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _gui.new_GuiFloatingWindowArray(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _gui.delete_GuiFloatingWindowArray
__del__ = lambda self: None
def append(self, *args):
return _gui.GuiFloatingWindowArray_append(self, *args)
def get_count(self):
return _gui.GuiFloatingWindowArray_get_count(self)
def remove_all(self):
return _gui.GuiFloatingWindowArray_remove_all(self)
def resize(self, *args):
return _gui.GuiFloatingWindowArray_resize(self, *args)
def copy_from(self, *args):
return _gui.GuiFloatingWindowArray_copy_from(self, *args)
def copy_to(self, dest):
return _gui.GuiFloatingWindowArray_copy_to(self, dest)
def get_list(self, list):
return _gui.GuiFloatingWindowArray_get_list(self, list)
def set_list(self, list):
return _gui.GuiFloatingWindowArray_set_list(self, list)
def get_memory_size(self):
return _gui.GuiFloatingWindowArray_get_memory_size(self)
def get_class_info(self):
return _gui.GuiFloatingWindowArray_get_class_info(self)
if _newclass:
class_info = staticmethod(_gui.GuiFloatingWindowArray_class_info)
else:
class_info = _gui.GuiFloatingWindowArray_class_info
if _newclass:
___class_destructor__ = staticmethod(_gui.GuiFloatingWindowArray____class_destructor__)
else:
___class_destructor__ = _gui.GuiFloatingWindowArray____class_destructor__
GuiFloatingWindowArray_swigregister = _gui.GuiFloatingWindowArray_swigregister
GuiFloatingWindowArray_swigregister(GuiFloatingWindowArray)
def GuiFloatingWindowArray_class_info():
return _gui.GuiFloatingWindowArray_class_info()
GuiFloatingWindowArray_class_info = _gui.GuiFloatingWindowArray_class_info
def GuiFloatingWindowArray____class_destructor__(instance, is_array):
return _gui.GuiFloatingWindowArray____class_destructor__(instance, is_array)
GuiFloatingWindowArray____class_destructor__ = _gui.GuiFloatingWindowArray____class_destructor__
class GuiFloatingWindowVector(GuiFloatingWindowBasicArray):
__swig_setmethods__ = {}
for _s in [GuiFloatingWindowBasicArray]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, GuiFloatingWindowVector, name, value)
__swig_getmethods__ = {}
for _s in [GuiFloatingWindowBasicArray]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, GuiFloatingWindowVector, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _gui.new_GuiFloatingWindowVector(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _gui.delete_GuiFloatingWindowVector
__del__ = lambda self: None
def append(self, *args):
return _gui.GuiFloatingWindowVector_append(self, *args)
def add(self, element):
return _gui.GuiFloatingWindowVector_add(self, element)
def insert(self, element, index):
return _gui.GuiFloatingWindowVector_insert(self, element, index)
def remove_last(self):
return _gui.GuiFloatingWindowVector_remove_last(self)
def empty(self):
return _gui.GuiFloatingWindowVector_empty(self)
def remove_all(self):
return _gui.GuiFloatingWindowVector_remove_all(self)
def clear(self, *args):
return _gui.GuiFloatingWindowVector_clear(self, *args)
def remove(self, *args):
return _gui.GuiFloatingWindowVector_remove(self, *args)
def is_empty(self):
return _gui.GuiFloatingWindowVector_is_empty(self)
def remove_item(self, item, preserve_order):
return _gui.GuiFloatingWindowVector_remove_item(self, item, preserve_order)
def remove_items(self, item):
return _gui.GuiFloatingWindowVector_remove_items(self, item)
def get_count(self):
return _gui.GuiFloatingWindowVector_get_count(self)
def get_capacity(self):
return _gui.GuiFloatingWindowVector_get_capacity(self)
def set_count(self, *args):
return _gui.GuiFloatingWindowVector_set_count(self, *args)
def set_capacity(self, *args):
return _gui.GuiFloatingWindowVector_set_capacity(self, *args)
def | |
<reponame>adrienxu/SATE<filename>fairseq/models/dlcl_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional, Tuple
import logging
import torch
from fairseq import checkpoint_utils, utils
from fairseq.models import (
register_model,
register_model_architecture,
)
from fairseq.models.transformer import (
TransformerModel,
TransformerEncoder,
TransformerDecoder
)
from fairseq.modules.layer_history import CreateLayerHistory
from torch import Tensor
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
logger = logging.getLogger(__name__)
@register_model("dlcl_transformer")
class DLCLTransformerModel(TransformerModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.dlcl_transformer_parser
:prog:
"""
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
TransformerModel.add_args(parser)
# dense layer parameters
parser.add_argument('--encoder-history-type',
default="learnable_dense",
help='encoder layer history type')
parser.add_argument('--decoder-history-type',
default="learnable_dense",
help='decoder layer history type')
parser.add_argument('--encoder-integration-type', choices=['avg', 'sum'],
help='encoder layer integration type')
parser.add_argument('--decoder-integration-type', choices=['avg', 'sum'],
help='decoder layer integration type')
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
encoder = DLCLTransformerEncoder(args, src_dict, embed_tokens)
if getattr(args, "load_pretrained_encoder_from", None):
logger.info(
f"loaded pretrained encoder from: "
f"{args.load_pretrained_encoder_from}"
)
encoder = checkpoint_utils.load_pretrained_component_from_model(
component=encoder, checkpoint=args.load_pretrained_encoder_from, strict=False
)
return encoder
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
decoder = DLCLTransformerDecoder(
args,
tgt_dict,
embed_tokens,
no_encoder_attn=getattr(args, "no_cross_attention", False),
)
if getattr(args, "load_pretrained_decoder_from", None):
logger.info(
f"loaded pretrained decoder from: "
f"{args.load_pretrained_decoder_from}"
)
decoder = checkpoint_utils.load_pretrained_component_from_model(
component=decoder, checkpoint=args.load_pretrained_decoder_from, strict=False
)
return decoder
class DLCLTransformerEncoder(TransformerEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, args, dictionary, embed_tokens):
self.args = args
super().__init__(args, dictionary, embed_tokens)
self.history = CreateLayerHistory(args, is_encoder=True)
def forward(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
return self.forward_scriptable(src_tokens,
src_lengths,
return_all_hiddens,
token_embeddings)
# TorchScript doesn't support super() method so that the scriptable Subclass
# can't access the base class model in Torchscript.
# Current workaround is to add a helper function with different name and
# call the helper function from scriptable Subclass.
def forward_scriptable(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
if self.history is not None:
self.history.clean()
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
has_pads = (src_tokens.device.type == "xla" or encoder_padding_mask.any())
x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings)
# account for padding while computing the representation
if encoder_padding_mask is not None:
x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x))
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_states = []
if return_all_hiddens:
encoder_states.append(x)
# add emb into history
if self.history is not None:
self.history.add(x)
# encoder layers
for layer in self.layers:
if self.history is not None:
x = self.history.pop()
x = layer(
x, encoder_padding_mask=encoder_padding_mask if has_pads else None
)
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
if self.history is not None:
self.history.add(x)
if self.history is not None:
x = self.history.pop()
if self.layer_norm is not None:
x = self.layer_norm(x)
# The Pytorch Mobile lite interpreter does not supports returning NamedTuple in
# `forward` so we use a dictionary instead.
# TorchScript does not support mixed values so the values are all lists.
# The empty list is equivalent to None.
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask], # B x T
"encoder_embedding": [encoder_embedding], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
class DLCLTransformerDecoder(TransformerDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
self.args = args
super().__init__(args, dictionary, embed_tokens, no_encoder_attn)
self.history = CreateLayerHistory(args, is_encoder=False)
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
)
if not features_only:
x = self.output_layer(x)
return x, extra
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
return self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
"""
A scriptable subclass of this class has an extract_features method and calls
super().extract_features, but super() is not supported in torchscript. A copy of
this function is made to be used in the subclass instead.
"""
def extract_features_scriptable(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
"""
Similar to *forward* but only return features.
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
alignment_layer (int, optional): return mean alignment over
heads at this layer (default: last layer).
alignment_heads (int, optional): only average alignment over
this many heads (default: all heads).
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
if self.history is not None:
self.history.clean()
if alignment_layer is None:
alignment_layer = self.num_layers - 1
# embed positions
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None and self.attn_type != "rel_selfattn":
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# add emb into history
if self.history is not None:
self.history.add(x)
self_attn_padding_mask: Optional[Tensor] = None
if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# decoder layers
| |
<reponame>jojoba106/OpenPype<filename>openpype/modules/default_modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py
import collections
import datetime
import ftrack_api
from openpype_modules.ftrack.lib import (
BaseEvent,
query_custom_attributes
)
class PushFrameValuesToTaskEvent(BaseEvent):
# Ignore event handler by default
cust_attrs_query = (
"select id, key, object_type_id, is_hierarchical, default"
" from CustomAttributeConfiguration"
" where key in ({}) and"
" (object_type_id in ({}) or is_hierarchical is true)"
)
cust_attr_query = (
"select value, entity_id from ContextCustomAttributeValue "
"where entity_id in ({}) and configuration_id in ({})"
)
_cached_task_object_id = None
_cached_interest_object_ids = None
_cached_user_id = None
_cached_changes = []
_max_delta = 30
settings_key = "sync_hier_entity_attributes"
def session_user_id(self, session):
if self._cached_user_id is None:
user = session.query(
"User where username is \"{}\"".format(session.api_user)
).one()
self._cached_user_id = user["id"]
return self._cached_user_id
def launch(self, session, event):
filtered_entities_info = self.filter_entities_info(event)
if not filtered_entities_info:
return
for project_id, entities_info in filtered_entities_info.items():
self.process_by_project(session, event, project_id, entities_info)
def filter_entities_info(self, event):
# Filter if event contain relevant data
entities_info = event["data"].get("entities")
if not entities_info:
return
entities_info_by_project_id = {}
for entity_info in entities_info:
# Care only about tasks
if entity_info.get("entityType") != "task":
continue
# Care only about changes of status
changes = entity_info.get("changes")
if not changes:
continue
# Get project id from entity info
project_id = None
for parent_item in reversed(entity_info["parents"]):
if parent_item["entityType"] == "show":
project_id = parent_item["entityId"]
break
if project_id is None:
continue
# Skip `Task` entity type if parent didn't change
if entity_info["entity_type"].lower() == "task":
if (
"parent_id" not in changes
or changes["parent_id"]["new"] is None
):
continue
if project_id not in entities_info_by_project_id:
entities_info_by_project_id[project_id] = []
entities_info_by_project_id[project_id].append(entity_info)
return entities_info_by_project_id
def process_by_project(self, session, event, project_id, entities_info):
project_name = self.get_project_name_from_event(
session, event, project_id
)
# Load settings
project_settings = self.get_project_settings_from_event(
event, project_name
)
# Load status mapping from presets
event_settings = (
project_settings
["ftrack"]
["events"]
["sync_hier_entity_attributes"]
)
# Skip if event is not enabled
if not event_settings["enabled"]:
self.log.debug("Project \"{}\" has disabled {}".format(
project_name, self.__class__.__name__
))
return
interest_attributes = event_settings["interest_attributes"]
if not interest_attributes:
self.log.info((
"Project \"{}\" does not have filled 'interest_attributes',"
" skipping."
))
return
interest_entity_types = event_settings["interest_entity_types"]
if not interest_entity_types:
self.log.info((
"Project \"{}\" does not have filled 'interest_entity_types',"
" skipping."
))
return
interest_attributes = set(interest_attributes)
interest_entity_types = set(interest_entity_types)
# Separate value changes and task parent changes
_entities_info = []
task_parent_changes = []
for entity_info in entities_info:
if entity_info["entity_type"].lower() == "task":
task_parent_changes.append(entity_info)
else:
_entities_info.append(entity_info)
entities_info = _entities_info
# Filter entities info with changes
interesting_data, changed_keys_by_object_id = self.filter_changes(
session, event, entities_info, interest_attributes
)
if not interesting_data and not task_parent_changes:
return
# Prepare object types
object_types = session.query("select id, name from ObjectType").all()
object_types_by_name = {}
for object_type in object_types:
name_low = object_type["name"].lower()
object_types_by_name[name_low] = object_type
# NOTE it would be nice to check if `interesting_data` do not contain
# value changs of tasks that were created or moved
# - it is a complex way how to find out
if interesting_data:
self.process_attribute_changes(
session, object_types_by_name,
interesting_data, changed_keys_by_object_id,
interest_entity_types, interest_attributes
)
if task_parent_changes:
self.process_task_parent_change(
session, object_types_by_name, task_parent_changes,
interest_entity_types, interest_attributes
)
def process_task_parent_change(
self, session, object_types_by_name, task_parent_changes,
interest_entity_types, interest_attributes
):
"""Push custom attribute values if task parent has changed.
Parent is changed if task is created or if is moved under different
entity. We don't care about all task changes only about those that
have it's parent in interest types (from settings).
Tasks hierarchical value should be unset or set based on parents
real hierarchical value and non hierarchical custom attribute value
should be set to hierarchical value.
"""
# Store task ids which were created or moved under parent with entity
# type defined in settings (interest_entity_types).
task_ids = set()
# Store parent ids of matching task ids
matching_parent_ids = set()
# Store all entity ids of all entities to be able query hierarchical
# values.
whole_hierarchy_ids = set()
# Store parent id of each entity id
parent_id_by_entity_id = {}
for entity_info in task_parent_changes:
# Ignore entities with less parents than 2
# NOTE entity itself is also part of "parents" value
parents = entity_info.get("parents") or []
if len(parents) < 2:
continue
parent_info = parents[1]
# Check if parent has entity type we care about.
if parent_info["entity_type"] not in interest_entity_types:
continue
task_ids.add(entity_info["entityId"])
matching_parent_ids.add(parent_info["entityId"])
# Store whole hierarchi of task entity
prev_id = None
for item in parents:
item_id = item["entityId"]
whole_hierarchy_ids.add(item_id)
if prev_id is None:
prev_id = item_id
continue
parent_id_by_entity_id[prev_id] = item_id
if item["entityType"] == "show":
break
prev_id = item_id
# Just skip if nothing is interesting for our settings
if not matching_parent_ids:
return
# Query object type ids of parent ids for custom attribute
# definitions query
entities = session.query(
"select object_type_id from TypedContext where id in ({})".format(
self.join_query_keys(matching_parent_ids)
)
)
# Prepare task object id
task_object_id = object_types_by_name["task"]["id"]
# All object ids for which we're querying custom attribute definitions
object_type_ids = set()
object_type_ids.add(task_object_id)
for entity in entities:
object_type_ids.add(entity["object_type_id"])
attrs_by_obj_id, hier_attrs = self.attrs_configurations(
session, object_type_ids, interest_attributes
)
# Skip if all task attributes are not available
task_attrs = attrs_by_obj_id.get(task_object_id)
if not task_attrs:
return
# Skip attributes that is not in both hierarchical and nonhierarchical
# TODO be able to push values if hierarchical is available
for key in interest_attributes:
if key not in hier_attrs:
task_attrs.pop(key, None)
elif key not in task_attrs:
hier_attrs.pop(key)
# Skip if nothing remained
if not task_attrs:
return
# Do some preparations for custom attribute values query
attr_key_by_id = {}
nonhier_id_by_key = {}
hier_attr_ids = []
for key, attr_id in hier_attrs.items():
attr_key_by_id[attr_id] = key
hier_attr_ids.append(attr_id)
conf_ids = list(hier_attr_ids)
for key, attr_id in task_attrs.items():
attr_key_by_id[attr_id] = key
nonhier_id_by_key[key] = attr_id
conf_ids.append(attr_id)
# Query custom attribute values
# - result does not contain values for all entities only result of
# query callback to ftrack server
result = query_custom_attributes(
session, conf_ids, whole_hierarchy_ids
)
# Prepare variables where result will be stored
# - hierachical values should not contain attribute with value by
# default
hier_values_by_entity_id = {
entity_id: {}
for entity_id in whole_hierarchy_ids
}
# - real values of custom attributes
values_by_entity_id = {
entity_id: {
attr_id: None
for attr_id in conf_ids
}
for entity_id in whole_hierarchy_ids
}
for item in result:
attr_id = item["configuration_id"]
entity_id = item["entity_id"]
value = item["value"]
values_by_entity_id[entity_id][attr_id] = value
if attr_id in hier_attr_ids and value is not None:
hier_values_by_entity_id[entity_id][attr_id] = value
# Prepare values for all task entities
# - going through all parents and storing first value value
# - store None to those that are already known that do not have set
# value at all
for task_id in tuple(task_ids):
for attr_id in hier_attr_ids:
entity_ids = []
value = None
entity_id = task_id
while value is None:
entity_value = hier_values_by_entity_id[entity_id]
if attr_id in entity_value:
value = entity_value[attr_id]
if value is None:
break
if value is None:
entity_ids.append(entity_id)
entity_id = parent_id_by_entity_id.get(entity_id)
if entity_id is None:
break
for entity_id in entity_ids:
hier_values_by_entity_id[entity_id][attr_id] = value
# Prepare changes to commit
changes = []
for task_id in tuple(task_ids):
parent_id = parent_id_by_entity_id[task_id]
for attr_id in hier_attr_ids:
attr_key = attr_key_by_id[attr_id]
nonhier_id = nonhier_id_by_key[attr_key]
# Real value of hierarchical attribute on parent
# - If is none then should be unset
real_parent_value = values_by_entity_id[parent_id][attr_id]
# Current hierarchical value of a task
# - Will be compared to real parent value
hier_value = hier_values_by_entity_id[task_id][attr_id]
# Parent value that can be inherited from it's parent entity
parent_value = hier_values_by_entity_id[parent_id][attr_id]
# Task value of nonhierarchical custom attribute
nonhier_value = values_by_entity_id[task_id][nonhier_id]
if real_parent_value != hier_value:
changes.append({
"new_value": real_parent_value,
"attr_id": attr_id,
"entity_id": task_id,
"attr_key": attr_key
})
if parent_value != nonhier_value:
changes.append({
"new_value": parent_value,
"attr_id": nonhier_id,
"entity_id": task_id,
"attr_key": attr_key
})
self._commit_changes(session, changes)
def _commit_changes(self, session, changes):
uncommited_changes = False
for idx, item in enumerate(changes):
new_value = item["new_value"]
attr_id = item["attr_id"]
entity_id = item["entity_id"]
attr_key = item["attr_key"]
entity_key = collections.OrderedDict()
entity_key["configuration_id"] = attr_id
entity_key["entity_id"] = entity_id
self._cached_changes.append({
"attr_key": attr_key,
"entity_id": entity_id,
"value": new_value,
"time": datetime.datetime.now()
})
if new_value is None:
op = ftrack_api.operation.DeleteEntityOperation(
"CustomAttributeValue",
entity_key
)
else:
op = ftrack_api.operation.UpdateEntityOperation(
"ContextCustomAttributeValue",
entity_key,
"value",
ftrack_api.symbol.NOT_SET,
new_value
)
session.recorded_operations.push(op)
self.log.info((
"Changing Custom Attribute \"{}\" to value"
" \"{}\" on entity: {}"
).format(attr_key, new_value, entity_id))
if (idx + 1) % 20 == | |
self._xref_table(model, 'kzz_table', msg=msg)
self._xref_table(model, 'cp_table', msg=msg)
self._xref_table(model, 'hgen_table', msg=msg)
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.mid = self.Mid()
self.kxx_table = self.Kxx_table()
self.kxy_table = self.Kxy_table()
self.kxz_table = self.Kxz_table()
self.kyy_table = self.Kyy_table()
self.kyz_table = self.Kyz_table()
self.kzz_table = self.Kzz_table()
self.cp_table = self.Cp_table()
self.hgen_table = self.Hgen_table()
self.mid_ref = None
self.kxx_table_ref = None
self.kxy_table_ref = None
self.kxz_table_ref = None
self.kyy_table_ref = None
self.kyz_table_ref = None
self.kzz_table_ref = None
self.cp_table_ref = None
self.hgen_table_ref = None
def Kxx_table(self):
return self._get_table('kxx_table')
def Kxy_table(self):
return self._get_table('kxy_table')
def Kxz_table(self):
return self._get_table('kxz_table')
def Kyy_table(self):
return self._get_table('kyy_table')
def Kyz_table(self):
return self._get_table('kyz_table')
def Kzz_table(self):
return self._get_table('kzz_table')
def Cp_table(self):
return self._get_table('cp_table')
def Hgen_table(self):
return self._get_table('hgen_table')
def raw_fields(self):
list_fields = ['MATT5', self.Mid(),
self.Kxx_table(), self.Kxy_table(), self.Kxz_table(),
self.Kyy_table(), self.Kyz_table(), self.Kzz_table(),
self.Cp_table(), None, self.Hgen_table()]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class MATT8(MaterialDependenceThermal):
"""
Specifies temperature-dependent material properties on MAT2 entry
fields via TABLEMi entries.
+-------+--------+--------+-------+---------+--------+--------+--------+--------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+========+========+=======+=========+========+========+========+========+
| MATT8 | MID | T(E1) | T(E2) | T(Nu12) | T(G12) | T(G1z) | T(G2z) | T(RHO) |
+-------+--------+--------+-------+---------+--------+--------+--------+--------+
| | T(A1) | T(A2) | | T(Xt) | T(Xc) | T(Yt) | T(Yc) | T(S) |
+-------+--------+--------+-------+---------+--------+--------+--------+--------+
| | T(GE) | T(F12) | | | | | | |
+-------+--------+--------+-------+---------+--------+--------+--------+--------+
"""
type = 'MATT8'
@classmethod
def _init_from_empty(cls):
mid = 1
return MATT8(mid, e1_table=None, e2_table=None, nu12_table=None, g12_table=None,
g1z_table=None, g2z_table=None, rho_table=None,
a1_table=None, a2_table=None, xt_table=None, xc_table=None,
yt_table=None, yc_table=None, s_table=None, ge_table=None,
f12_table=None, comment='')
def __init__(self, mid, e1_table=None, e2_table=None, nu12_table=None,
g12_table=None, g1z_table=None, g2z_table=None, rho_table=None,
a1_table=None, a2_table=None,
xt_table=None, xc_table=None, yt_table=None, yc_table=None,
s_table=None, ge_table=None, f12_table=None, comment=''):
MaterialDependenceThermal.__init__(self)
if comment:
self.comment = comment
self.mid = mid
self.e1_table = e1_table
self.e2_table = e2_table
self.nu12_table = nu12_table
self.g12_table = g12_table
self.g1z_table = g1z_table
self.g2z_table = g2z_table
self.rho_table = rho_table
self.a1_table = a1_table
self.a2_table = a2_table
self.xt_table = xt_table
self.xc_table = xc_table
self.yt_table = yt_table
self.yc_table = yc_table
self.s_table = s_table
self.ge_table = ge_table
self.f12_table = f12_table
self.mid_ref = None
self.e1_table_ref = None
self.e2_table_ref = None
self.nu12_table_ref = None
self.g12_table_ref = None
self.g1z_table_ref = None
self.g2z_table_ref = None
self.rho_table_ref = None
self.a1_table_ref = None
self.a2_table_ref = None
self.xt_table_ref = None
self.xc_table_ref = None
self.yt_table_ref = None
self.yc_table_ref = None
self.s_table_ref = None
self.ge_table_ref = None
self.f12_table_ref = None
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a MATT8 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
mid = integer(card, 1, 'mid')
e1_table = integer_or_blank(card, 2, 'T(E1)')
e2_table = integer_or_blank(card, 3, 'T(E2)')
nu12_table = integer_or_blank(card, 3, 'T(Nu12)')
g12_table = integer_or_blank(card, 5, 'T(G12)')
g1z_table = integer_or_blank(card, 6, 'T(G1z)')
g2z_table = integer_or_blank(card, 7, 'T(G2z)')
rho_table = integer_or_blank(card, 8, 'T(Rho)')
a1_table = integer_or_blank(card, 9, 'T(A1)')
a2_table = integer_or_blank(card, 10, 'T(A2)')
xt_table = integer_or_blank(card, 12, 'T(Xt)')
xc_table = integer_or_blank(card, 13, 'T(Xc)')
yt_table = integer_or_blank(card, 14, 'T(Yt)')
yc_table = integer_or_blank(card, 15, 'T(Yc)')
s_table = integer_or_blank(card, 16, 'T(S)')
ge_table = integer_or_blank(card, 17, 'T(GE)')
f12_table = integer_or_blank(card, 18, 'T(F12)')
assert len(card) <= 19, 'len(MATT8 card) = %i\ncard=%s' % (len(card), card)
return MATT8(mid, e1_table, e2_table, nu12_table, g12_table,
g1z_table, g2z_table, rho_table,
a1_table, a2_table, xt_table,
xc_table, yt_table, yc_table,
s_table, ge_table, f12_table,
comment=comment)
def cross_reference(self, model):
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by MATT1 mid=%s' % self.mid
self.mid_ref = model.Material(self.mid, msg=msg)
if self.e1_table is not None:
self.e1_table_ref = model.TableM(self.e1_table)
if self.e2_table is not None:
self.e2_table_ref = model.TableM(self.e2_table)
if self.nu12_table is not None:
self.nu12_table_ref = model.TableM(self.nu12_table)
if self.g12_table is not None:
self.g12_table_ref = model.TableM(self.g12_table)
if self.g1z_table is not None:
self.g1z_table_ref = model.TableM(self.g1z_table)
if self.g2z_table is not None:
self.g2z_table_ref = model.TableM(self.g2z_table)
if self.rho_table is not None:
self.rho_table_ref = model.TableM(self.rho_table)
if self.a1_table is not None:
self.a1_table_ref = model.TableM(self.a1_table)
if self.a2_table is not None:
self.a2_table_ref = model.TableM(self.a2_table)
if self.xt_table is not None:
self.xt_table_ref = model.TableM(self.xt_table)
if self.xc_table is not None:
self.xc_table_ref = model.TableM(self.xc_table)
if self.yt_table is not None:
self.yt_table_ref = model.TableM(self.yt_table)
if self.s_table is not None:
self.s_table_ref = model.TableM(self.s_table)
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.e1_table = self.E1_table()
self.e2_table = self.E2_table()
self.nu12_table = self.Nu12_table()
self.g12_table = self.G12_table()
self.g1z_table = self.G1z_table()
self.g2z_table = self.G2z_table()
self.rho_table = self.Rho_table()
self.a1_table = self.A1_table()
self.a2_table = self.A2_table()
self.xt_table = self.Xt_table()
self.xc_table = self.Xc_table()
self.yt_table = self.Yt_table()
self.yc_table = self.Yc_table()
self.s_table = self.S_table()
self.ge_table = self.Ge_table()
self.f12_table = self.F12_table()
self.e1_table_ref = None
self.e2_table_ref = None
self.nu12_table_ref = None
self.g12_table_ref = None
self.g1z_table_ref = None
self.g2z_table_ref = None
self.rho_table_ref = None
self.a1_table_ref = None
self.a2_table_ref = None
self.xt_table_ref = None
self.xc_table_ref = None
self.yt_table_ref = None
self.yc_table_ref = None
self.s_table_ref = None
self.ge_table_ref = None
self.f12_table_ref = None
def E1_table(self):
if self.e1_table_ref is not None:
return self.e1_table_ref.tid
return self.e1_table
def E2_table(self):
if self.e2_table_ref is not None:
return self.e2_table_ref.tid
return self.e2_table
def Nu12_table(self):
if self.nu12_table_ref is not None:
return self.nu12_table_ref.tid
return self.nu12_table
def G12_table(self):
if self.g12_table_ref is not None:
return self.g12_table_ref.tid
return self.g12_table
def G1z_table(self):
if self.g1z_table_ref is not None:
return self.g1z_table_ref.tid
return self.g1z_table
def G2z_table(self):
if self.g2z_table_ref is not None:
return self.g2z_table_ref.tid
return self.g2z_table
def Rho_table(self):
if self.rho_table_ref is not None:
return self.rho_table_ref.tid
return self.rho_table
def A1_table(self):
if self.a1_table_ref is not None:
return self.a1_table_ref.tid
return self.a1_table
def A2_table(self):
if self.a2_table_ref is not None:
return self.a2_table_ref.tid
return self.a2_table
def S_table(self):
if self.s_table_ref is not None:
return self.s_table_ref.tid
return self.s_table
def Ge_table(self):
if self.ge_table_ref is not None:
return self.ge_table_ref.tid
return self.ge_table
def F12_table(self):
if self.f12_table_ref is not None:
return self.f12_table_ref.tid
return self.f12_table
def Xt_table(self):
if self.xt_table_ref is not None:
return self.xt_table_ref.tid
return self.xt_table
def Xc_table(self):
if self.xc_table_ref is not None:
return self.xc_table_ref.tid
return self.xc_table
def Yt_table(self):
if self.yt_table_ref is not None:
return self.yt_table_ref.tid
return self.yt_table
def Yc_table(self):
if self.yc_table_ref is not None:
return self.yc_table_ref.tid
return self.yc_table
def raw_fields(self):
list_fields = ['MATT8', self.mid, self.E1_table(), self.E2_table(), self.G12_table(),
self.G1z_table(), self.G2z_table(), self.Rho_table(),
self.A1_table(), self.A2_table(), None,
self.Xt_table(), self.Xc_table(), self.Yt_table(), self.Yc_table(),
self.S_table(), self.Ge_table(), self.F12_table()]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
"""
+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+========+========+========+========+========+========+========+========+========+
| MATT9 | MID | T(G11) | T(G12) | T(G13) | T(G14) | T(G15) | T(G16) | T(G22) |
+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| | T(G23) | T(G24) | T(G25) | T(G26) | T(G33) | T(G34) | T(G35) | T(G36) |
+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| | T(G44) | T(G45) | T(G46) | T(G55) | T(G56) | T(G66) | T(RHO) | T(A1) |
+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| | T(A2) | T(A3) | T(A4) | T(A5) | T(A6) | | T(GE) | |
+--------+--------+--------+--------+--------+--------+--------+--------+--------+
"""
list_fields = self.raw_fields()
return self.comment + print_card_8(list_fields)
class MATT9(MaterialDependenceThermal):
type = 'MATT9'
@classmethod
def _init_from_empty(cls):
mid = 1
return MATT9(mid, g11_table=None, g12_table=None, g13_table=None, g14_table=None,
g15_table=None, g16_table=None, g22_table=None, g23_table=None,
g24_table=None, g25_table=None, g26_table=None, g33_table=None,
g34_table=None, g35_table=None, g36_table=None, g44_table=None,
g45_table=None, g46_table=None, g55_table=None, g56_table=None,
g66_table=None, rho_table=None,
a1_table=None, a2_table=None, a3_table=None,
a4_table=None, a5_table=None, a6_table=None, ge_table=None, comment='')
def __init__(self, mid,
g11_table=None, g12_table=None, g13_table=None, g14_table=None,
g15_table=None, g16_table=None,
g22_table=None, g23_table=None, g24_table=None,
g25_table=None, g26_table=None,
g33_table=None, g34_table=None, g35_table=None, g36_table=None,
g44_table=None, g45_table=None, g46_table=None,
g55_table=None, g56_table=None,
g66_table=None,
rho_table=None,
a1_table=None, a2_table=None, a3_table=None,
a4_table=None, a5_table=None, a6_table=None,
ge_table=None,
comment=''):
MaterialDependenceThermal.__init__(self)
if comment:
self.comment = comment
self.mid = mid
self.g11_table = g11_table
self.g12_table = g12_table
self.g13_table = g13_table
self.g14_table = g14_table
self.g15_table = g15_table
self.g16_table = g16_table
self.g22_table = g22_table
self.g23_table = g23_table
self.g24_table = g24_table
self.g25_table = g25_table
self.g26_table = g26_table
self.g33_table = g33_table
self.g34_table = g34_table
self.g35_table = g35_table
self.g36_table = g36_table
self.g44_table = g44_table
self.g45_table = g45_table
self.g46_table = g46_table
self.g55_table = g55_table
self.g56_table = g56_table
self.g66_table = g66_table
self.rho_table = rho_table
self.a1_table = a1_table
self.a2_table = a2_table
self.a3_table = a3_table
self.a4_table = a4_table
self.a5_table = a5_table
self.a6_table = a6_table
self.ge_table = ge_table
self.mid_ref = None
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a MATT8 card from ``BDF.add_card(...)``
Parameters
----------
| |
<filename>AlGDock/argument_parser.py
import os
import multiprocessing
import cPickle as pickle
import gzip
import numpy as np
from collections import OrderedDict
from AlGDock import arguments
from AlGDock import dictionary_tools
from AlGDock import path_tools
from AlGDock.IO import load_pkl_gz
from AlGDock.IO import write_pkl_gz
class SimulationArguments:
"""Stores simulation arguments
...
Attributes
----------
cores : int
The number of CPU cores to use
dir : dict of str
Directories where parameters and data are stored
do_CD : bool
Whether to perform simulation for states CD
params : OrderedDict of OrderedDict
Simulation parameters for BC and CD
FNs : OrderedDict of OrderedDict
Paths of input files
original_Es : list of list of Dictionaries
Energies of the receptor snapshot in different force fields
random_seed : int
The random number seed
toClear : list
Files to remove at the end of the calculation
"""
def __init__(self, **kwargs):
"""Parses the input arguments
"""
# Set undefined keywords to None
for key in arguments.args.keys():
if not key in kwargs.keys():
kwargs[key] = None
if kwargs['dir_grid'] is None:
kwargs['dir_grid'] = ''
# Multiprocessing options.
# Default is to use 1 core.
# If cores is a number, then that number (or the maximum number)
# of cores will be used.
# Default
availablecores = multiprocessing.cpu_count()
if kwargs['cores'] is None:
self.cores = 1
elif (kwargs['cores'] == -1):
self.cores = availablecores
else:
self.cores = min(kwargs['cores'], availablecores)
print "using %d/%d available cores" % (self.cores, availablecores)
if kwargs['rotate_matrix'] is not None:
self._view_args_rotate_matrix = kwargs['rotate_matrix']
if kwargs['random_seed'] is None:
self.random_seed = 0
else:
self.random_seed = kwargs['random_seed']
print 'using random number seed of %d' % self.random_seed
self.dir = {}
self.dir['start'] = os.getcwd()
if kwargs['dir_CD'] is not None:
self.dir['CD'] = os.path.abspath(kwargs['dir_CD'])
else:
self.dir['CD'] = os.path.abspath('.')
if kwargs['dir_BC'] is not None:
self.dir['BC'] = os.path.abspath(kwargs['dir_BC'])
else:
self.dir['BC'] = self.dir['CD'] # Default that may be
# overwritten by stored directory
# Load previously stored file names and arguments
FNs = OrderedDict()
args = OrderedDict()
for p in ['CD', 'BC']:
params = self.load_pkl_gz(p, kwargs['pose'])
if params is not None:
(fn_dict, param_dict) = params
FNs[p] = dictionary_tools.convert_dictionary_relpath(
fn_dict, relpath_o=self.dir[p], relpath_n=None)
args[p] = param_dict
if (p=='CD') and (kwargs['dir_BC'] is None) and \
('dir_BC' in FNs[p].keys()) and \
(FNs[p]['dir_BC'] is not None):
self.dir['BC'] = FNs[p]['dir_BC']
else:
FNs[p] = OrderedDict()
args[p] = OrderedDict()
print '\n*** Directories ***'
print dictionary_tools.dict_view(self.dir)
# Identify tarballs
tarFNs = [kwargs[prefix + '_tarball'] \
for prefix in ['ligand','receptor','complex'] \
if (prefix + '_tarball') in kwargs.keys() and
kwargs[(prefix + '_tarball')] is not None]
for p in ['BC', 'CD']:
if (p in FNs.keys()) and ('tarball' in FNs[p].keys()):
tarFNs += [tarFN for tarFN in FNs[p]['tarball'].values() \
if tarFN is not None]
tarFNs = set([FN for FN in tarFNs if os.path.isfile(FN)])
# Identify files to look for in the tarballs
seekFNs = []
if len(tarFNs) > 0:
# From the keyword arguments
for prefix in ['ligand', 'receptor', 'complex']:
for postfix in ('database', 'prmtop', 'inpcrd', 'mol2', 'fixed_atoms'):
key = '%s_%s' % (prefix, postfix)
if (key in kwargs.keys()) and (kwargs[key] is not None):
FN = os.path.abspath(kwargs[key])
if not os.path.isfile(FN):
seekFNs.append(os.path.basename(FN))
if kwargs['score'] != 'default':
seekFNs.append(kwargs['score'])
# From files in a previous instance
for p in ['BC', 'CD']:
if p in FNs.keys():
for level1 in ['ligand_database','receptor_database', \
'prmtop','inpcrd','fixed_atoms']:
if level1 in FNs[p].keys():
if isinstance(FNs[p][level1], dict):
for level2 in ['L', 'R', 'RL']:
if level2 in FNs[p][level1].keys():
seekFNs.append(os.path.basename(FNs[p][level1][level2]))
else:
seekFNs.append(os.path.basename(FNs[p][level1]))
seekFNs = set(seekFNs)
seek_frcmod = (kwargs['frcmodList'] is None) or \
(not os.path.isfile(kwargs['frcmodList'][0]))
if kwargs['keep_tar']:
print 'Files extracted from tarballs will be kept\n'
# Decompress tarballs into self.dir['CD']
self.toClear = []
if len(seekFNs) > 0:
import tarfile
print ">>> Decompressing tarballs"
print 'looking for:\n ' + '\n '.join(seekFNs)
if seek_frcmod:
print ' and frcmod files'
for tarFN in tarFNs:
print 'reading ' + tarFN
tarF = tarfile.open(tarFN, 'r')
for member in tarF.getmembers():
for seekFN in seekFNs:
if member.name.endswith(seekFN):
tarF.extract(member, path=self.dir['CD'])
if not kwargs['keep_tar']:
self.toClear.append(os.path.join(self.dir['CD'], seekFN))
print ' extracted ' + seekFN
if seek_frcmod and member.name.endswith('frcmod'):
FN = os.path.abspath(os.path.join(self.dir['CD'], member.name))
if not os.path.isfile(FN):
tarF.extract(member, path=self.dir['CD'])
kwargs['frcmodList'] = [FN]
if not kwargs['keep_tar']:
self.toClear.append(FN)
print ' extracted ' + FN
print
# Set up file name dictionary
print '*** Files ***'
for p in ['BC', 'CD']:
if p in FNs.keys():
if FNs[p] != {}:
print 'previously stored in %s directory:' % p
print dictionary_tools.dict_view(FNs[p], relpath=self.dir['start'])
if not (FNs['BC'] == {} and FNs['CD'] == {}):
print 'from arguments and defaults:'
def cdir_or_dir_CD(FN):
if FN is not None:
return path_tools.findPath([FN, os.path.join(self.dir['CD'], FN)])
else:
return None
if kwargs['frcmodList'] is not None:
if isinstance(kwargs['frcmodList'], str):
kwargs['frcmodList'] = [kwargs['frcmodList']]
kwargs['frcmodList'] = [cdir_or_dir_CD(FN) \
for FN in kwargs['frcmodList']]
if 'score' in kwargs.keys() and \
(kwargs['score'] is not None) and \
(kwargs['score'] != 'default'):
kwargs['score'] = path_tools.findPath([kwargs['score'], \
os.path.join(self.dir['CD'],kwargs['score'])])
FFpath = path_tools.search_paths['gaff'] \
if 'gaff' in path_tools.search_paths.keys() else []
FNs['new'] = OrderedDict([
('ligand_database',cdir_or_dir_CD(kwargs['ligand_database'])),
('receptor_database',cdir_or_dir_CD(kwargs['receptor_database'])),
('forcefield',path_tools.findPath(\
[kwargs['forcefield'],'../Data/gaff2.dat'] + FFpath)),
('frcmodList',kwargs['frcmodList']),
('tarball',OrderedDict([
('L',path_tools.findPath([kwargs['ligand_tarball']])),
('R',path_tools.findPath([kwargs['receptor_tarball']])),
('RL',path_tools.findPath([kwargs['complex_tarball']]))])),
('prmtop',OrderedDict([
('L',cdir_or_dir_CD(kwargs['ligand_prmtop'])),
('R',cdir_or_dir_CD(kwargs['receptor_prmtop'])),
('RL',cdir_or_dir_CD(kwargs['complex_prmtop']))])),
('inpcrd',OrderedDict([
('L',cdir_or_dir_CD(kwargs['ligand_inpcrd'])),
('R',cdir_or_dir_CD(kwargs['receptor_inpcrd'])),
('RL',cdir_or_dir_CD(kwargs['complex_inpcrd']))])),
('mol2',OrderedDict([
('L',cdir_or_dir_CD(kwargs['ligand_mol2']))])),
('fixed_atoms',OrderedDict([
('R',cdir_or_dir_CD(kwargs['receptor_fixed_atoms'])),
('RL',cdir_or_dir_CD(kwargs['complex_fixed_atoms']))])),
('grids',OrderedDict([
('LJr',path_tools.findPath([kwargs['grid_LJr'],
os.path.join(kwargs['dir_grid'],'LJr.nc'),
os.path.join(kwargs['dir_grid'],'LJr.dx'),
os.path.join(kwargs['dir_grid'],'LJr.dx.gz')])),
('LJa',path_tools.findPath([kwargs['grid_LJa'],
os.path.join(kwargs['dir_grid'],'LJa.nc'),
os.path.join(kwargs['dir_grid'],'LJa.dx'),
os.path.join(kwargs['dir_grid'],'LJa.dx.gz')])),
('sELE',path_tools.findPath([kwargs['grid_sELE'],
kwargs['grid_ELE'],
os.path.join(kwargs['dir_grid'],'pb.nc'),
os.path.join(kwargs['dir_grid'],'pbsa.nc'),
os.path.join(kwargs['dir_grid'],'direct_ELE.nc')])),
('ELE',path_tools.findPath([kwargs['grid_ELE'],
os.path.join(kwargs['dir_grid'],'direct_ELE.nc'),
os.path.join(kwargs['dir_grid'],'pb.nc'),
os.path.join(kwargs['dir_grid'],'pbsa.nc')])),
('desolv',path_tools.findPath([kwargs['grid_desolv'],
os.path.join(kwargs['dir_grid'],'desolv.nc'),
os.path.join(kwargs['dir_grid'],'desolv.dx'),
os.path.join(kwargs['dir_grid'],'desolv.dx.gz')]))])),
('score',kwargs['score']),
('dir_BC',self.dir['BC'])])
if not (FNs['BC'] == {} and FNs['CD'] == {}):
print dictionary_tools.dict_view(FNs['new'], relpath=self.dir['start'])
print 'to be used:'
self.FNs = dictionary_tools.merge_dictionaries(
[FNs[src] for src in ['new', 'BC', 'CD']])
# Default: a force field modification is in the same directory as the ligand
if (self.FNs['frcmodList'] is None):
if self.FNs['prmtop']['L'] is not None:
dir_lig = os.path.dirname(self.FNs['prmtop']['L'])
frcmodpaths = [os.path.abspath(os.path.join(dir_lig, \
os.path.basename(self.FNs['prmtop']['L'])[:-7]+'.frcmod'))]
else:
dir_lig = '.'
frcmodpaths = []
if kwargs['frcmodList'] is None:
frcmodpaths.extend([\
os.path.abspath(os.path.join(dir_lig,'lig.frcmod')),\
os.path.abspath(os.path.join(dir_lig,'ligand.frcmod'))])
frcmod = path_tools.findPath(frcmodpaths)
self.FNs['frcmodList'] = [frcmod]
elif not isinstance(self.FNs['frcmodList'], list):
self.FNs['frcmodList'] = [self.FNs['frcmodList']]
# Check for existence of required files
do_CD = (hasattr(args,'run_type') and \
(args.run_type not in ['store_params', 'BC']))
for key in ['ligand_database', 'forcefield']:
if (self.FNs[key] is None) or (not os.path.isfile(self.FNs[key])):
raise Exception('File for %s is missing!' % key)
for (key1, key2) in [('prmtop', 'L'), ('inpcrd', 'L')]:
FN = self.FNs[key1][key2]
if (FN is None) or (not os.path.isfile(FN)):
raise Exception('File for %s %s is missing' % (key1, key2))
for (key1,key2) in [\
('prmtop','RL'), ('inpcrd','RL'), \
('grids','LJr'), ('grids','LJa'), ('grids','ELE')]:
FN = self.FNs[key1][key2]
errstring = 'Missing file %s %s required for CD!' % (key1, key2)
if (FN is None) or (not os.path.isfile(FN)):
if do_CD:
raise Exception(errstring)
else:
print errstring
if ((self.FNs['inpcrd']['RL'] is None) and \
(self.FNs['inpcrd']['R'] is None)):
if do_CD:
raise Exception('Receptor coordinates needed for CD!')
else:
print 'Receptor coordinates needed for CD!'
print dictionary_tools.dict_view(self.FNs,
relpath=self.dir['start'],
show_None=True)
args['default_BC'] = OrderedDict([
('protocol', 'Adaptive'), ('therm_speed', 30.0), ('T_HIGH', 600.),
('T_SIMMIN', 300.), ('T_TARGET', 300.),
('H_mass', 4.0), ('delta_t', 4.0), ('sampler', 'NUTS'),
('steps_per_seed', 1000), ('seeds_per_state', 50), ('darts_per_seed', 0),
('repX_cycles', 20), ('min_repX_acc', 0.4), ('sweeps_per_cycle', 1000),
('snaps_per_cycle', 50), ('attempts_per_sweep', 25),
('steps_per_sweep', 50), ('darts_per_sweep', 0),
('phases', ['NAMD_Gas', 'NAMD_OBC']),
('sampling_importance_resampling', False), ('solvation', 'Desolvated'),
('keep_intermediate', False), ('GMC_attempts', 0),
('GMC_tors_threshold', 0.0)
])
args['default_CD'] = OrderedDict(args['default_BC'].items() + [
('temperature_scaling','Linear'),
('site',None),
('site_center',None),
('site_direction',None),
('site_max_Z',None),
('site_max_R',None),
('site_density',50.),
('site_measured',None),
('pose',-1),
('k_pose', 1000.0), # * MMTK.Units.kJ / MMTK.Units.mol / MMTK.Units.K
('MCMC_moves',1),
('rmsd',False)] + \
[('receptor_'+phase,None) for phase in arguments.allowed_phases])
args['default_CD']['snaps_per_cycle'] = 50
# Store passed arguments in dictionary
for p in ['BC', 'CD']:
args['new_' + p] = OrderedDict()
for key in args['default_' + p].keys():
specific_key = p + '_' + key
if (specific_key in kwargs.keys()) and \
(kwargs[specific_key] is not None):
# Use the specific key if it exists
args['new_' + p][key] = kwargs[specific_key]
elif (key in ['site_center', 'site_direction'] +
['receptor_'+phase for phase in arguments.allowed_phases]) and \
(kwargs[key] is not None):
# Convert these to arrays of floats
args['new_' + p][key] = np.array(kwargs[key], dtype=float)
elif key in kwargs.keys():
# Use the general key
args['new_' + p][key] = kwargs[key]
self.params = OrderedDict()
for p in ['BC', 'CD']:
self.params[p] = dictionary_tools.merge_dictionaries(
[args[src] for src in ['new_' + p, p, 'default_' + p]])
# Check that phases are permitted
for phase in (self.params['BC']['phases'] + self.params['CD']['phases']):
if phase not in arguments.allowed_phases:
raise Exception(phase + ' phase is not supported!')
# Make sure prerequistite phases are included:
# sander_Gas is necessary for any sander or gbnsr6 phase
# NAMD_Gas is necessary for APBS_PBSA
for process in ['BC', 'CD']:
phase_list | |
<reponame>Aazhar/biblio-glutton-harvester
import boto3
import botocore
import sys
import os
import shutil
import gzip
import json
import pickle
import lmdb
import uuid
import subprocess
import argparse
import time
import S3
from concurrent.futures import ThreadPoolExecutor
import subprocess
import tarfile
from random import randint
import numpy
import math
map_size = 100 * 1024 * 1024 * 1024
shuffle_range = math.pow(10, 6)# we will consider 1million entry for the shuffle, but there are more
'''
This version uses the standard ThreadPoolExecutor for parallelizing the download/processing/upload processes.
Given the limits of ThreadPoolExecutor (input stored in memory, blocking Executor.map until the whole input
is processed and output stored in memory until all input is consumed), it works with batches of PDF of a size
indicated in the config.json file (default is 100 entries). We are moving from first batch to the second one
only when the first is entirely processed.
'''
class OAHarverster(object):
def __init__(self, config_path='./config.json', thumbnail=False, sample=None):
self.config = None
self.size = size
# standard lmdb environment for storing biblio entries by uuid
self.env = None
# lmdb environment for storing mapping between doi/pmcid and uuid
self.env_doi = None
# lmdb environment for keeping track of failures
self.env_fail = None
self._load_config(config_path)
# boolean indicating if we want to generate thumbnails of front page of PDF
self.thumbnail = thumbnail
self._init_lmdb()
# if a sample value is provided, indicate that we only harvest the indicated number of PDF
self.sample = sample
self.s3 = None
if self.config["bucket_name"] is not None and len(self.config["bucket_name"]) is not 0:
self.s3 = S3.S3(self.config)
def _load_config(self, path='./config.json'):
"""
Load the json configuration
"""
config_json = open(path).read()
self.config = json.loads(config_json)
def _init_lmdb(self):
# open in write mode
envFilePath = os.path.join(self.config["data_path"], 'entries')
if not os.path.exists(envFilePath):
os.makedirs(envFilePath)
self.env = lmdb.open(envFilePath, map_size=map_size)
envFilePath = os.path.join(self.config["data_path"], 'doi')
if not os.path.exists(envFilePath):
os.makedirs(envFilePath)
self.env_doi = lmdb.open(envFilePath, map_size=map_size)
envFilePath = os.path.join(self.config["data_path"], 'fail')
if not os.path.exists(envFilePath):
os.makedirs(envFilePath)
self.env_fail = lmdb.open(envFilePath, map_size=map_size)
def harvestUnpaywall(self, filepath):
"""
Main method, use the Unpaywall dataset for getting pdf url for Open Access resources,
download in parallel PDF, generate thumbnails, upload resources on S3 and update
the json description of the entries
"""
batch_size_pdf = self.config['batch_size']
# batch size for lmdb commit
batch_size_lmdb = 10
n = 0
i = 0
j = 0
urls = []
entries = []
filenames = []
selection = None
if self.sample is not None:
# check the overall number of entries based on the line number
with gzip.open(filepath, 'rb') as gz:
count = 0
while 1:
buffer = gz.read(8192*1024)
if not buffer: break
count += buffer.count(b'\n')
# random selection corresponding to the requested sample size
selection = [randint(0, count-1) for p in range(0, sample)]
selection.sort()
gz = gzip.open(filepath, 'rt')
for count, line in enumerate(gz):
if selection is not None and not count in selection:
continue
#if n >= 100:
# break
if i == batch_size_pdf:
failed = self.processBatch(urls, filenames, entries)#, txn, txn_doi, txn_fail)
# reinit
i = 0
urls = []
entries = []
filenames = []
n += batch_size_pdf
# one json entry per line
entry = json.loads(line)
doi = entry['doi']
# check if the entry has already been processed
if self.getUUIDByDoi(doi) is not None:
continue
if 'best_oa_location' in entry:
if entry['best_oa_location'] is not None:
if 'url_for_pdf' in entry['best_oa_location']:
pdf_url = entry['best_oa_location']['url_for_pdf']
if pdf_url is not None:
print(pdf_url)
urls.append(pdf_url)
entry['id'] = str(uuid.uuid4())
entries.append(entry)
filenames.append(os.path.join(self.config["data_path"], entry['id']+".pdf"))
i += 1
gz.close()
# we need to process the latest incomplete batch (if not empty)
if len(urls) >0:
self.processBatch(urls, filenames, entries)#, txn, txn_doi, txn_fail)
n += len(urls)
print("total entries:", n)
def harvestPMC(self, filepath):
"""
Main method for PMC, use the provided PMC list file for getting pdf url for Open Access resources,
or download the list file on NIH server if not provided, download in parallel PDF, generate thumbnails,
upload resources on S3 and update the json description of the entries
"""
batch_size_pdf = self.config['batch_size']
pmc_base = self.config['pmc_base']
# batch size for lmdb commit
batch_size_lmdb = 10
n = 0
i = 0
urls = []
entries = []
filenames = []
selection = None
if self.sample is not None:
# check the overall number of entries based on the line number
with open(filepath, 'rb') as fp:
count = 0
while 1:
buffer = fp.read(8192*1024)
if not buffer: break
count += buffer.count(b'\n')
# random selection corresponding to the requested sample size
selection = [randint(0, count-1) for p in range(0, sample)]
selection.sort()
with open(filepath, 'rt') as fp:
for count, line in enumerate(fp):
if selection is not None and not count in selection:
continue
# skip first line which gives the date when the list has been generated
if count == 0:
continue
#if n >= 100:
# break
if i == batch_size_pdf:
self.processBatch(urls, filenames, entries)#, txn, txn_doi, txn_fail)
# reinit
i = 0
urls = []
entries = []
filenames = []
n += batch_size_pdf
# one PMC entry per line
tokens = line.split('\t')
subpath = tokens[0]
pmcid = tokens[2]
pmid = tokens[3]
ind = pmid.find(":")
if ind != -1:
pmid = pmid[ind+1:]
if pmcid is None:
continue
# check if the entry has already been processed
if self.getUUIDByDoi(pmcid) is not None:
continue
if subpath is not None:
entry = {}
tar_url = pmc_base + subpath
print(tar_url)
urls.append(tar_url)
entry['id'] = str(uuid.uuid4())
entry['pmcid'] = pmcid
entry['pmid'] = pmid
entry['doi'] = pmcid
entry_url = {}
entry_url['url_for_pdf'] = tar_url
entry['best_oa_location'] = entry_url
entries.append(entry)
filenames.append(os.path.join(self.config["data_path"], entry['id']+".tar.gz"))
i += 1
# we need to process the latest incomplete batch (if not empty)
if len(urls) >0:
self.processBatch(urls, filenames, entries)#, txn, txn_doi, txn_fail)
n += len(urls)
print("total entries:", n)
def processBatch(self, urls, filenames, entries):#, txn, txn_doi, txn_fail):
with ThreadPoolExecutor(max_workers=12) as executor:
results = executor.map(download, urls, filenames, entries)
# LMDB write transaction must be performed in the thread that created the transaction, so
# we need to have the following lmdb updates out of the paralell process
entries = []
i = 0
for result in results:
local_entry = result[1]
# conservative check if the downloaded file is of size 0 with a status code sucessful (code: 0),
# it should not happen *in theory*
empty_file = False
local_filename = os.path.join(self.config["data_path"], local_entry['id']+".pdf")
if os.path.isfile(local_filename):
if os.path.getsize(local_filename) == 0:
empty_file = True
local_filename = os.path.join(self.config["data_path"], local_entry['id']+".tar.gz")
if os.path.isfile(local_filename):
if os.path.getsize(local_filename) == 0:
empty_file = True
if result[0] is None or result[0] == "0" and not empty_file:
#update DB
with self.env.begin(write=True) as txn:
txn.put(local_entry['id'].encode(encoding='UTF-8'), _serialize_pickle(local_entry))
#print(" update txn_doi")
with self.env_doi.begin(write=True) as txn_doi:
txn_doi.put(local_entry['doi'].encode(encoding='UTF-8'), local_entry['id'].encode(encoding='UTF-8'))
entries.append(local_entry)
else:
print(" error: " + result[0])
#update DB
with self.env.begin(write=True) as txn:
txn.put(local_entry['id'].encode(encoding='UTF-8'), _serialize_pickle(local_entry))
with self.env_doi.begin(write=True) as txn_doi:
txn_doi.put(local_entry['doi'].encode(encoding='UTF-8'), local_entry['id'].encode(encoding='UTF-8'))
with self.env_fail.begin(write=True) as txn_fail:
txn_fail.put(local_entry['id'].encode(encoding='UTF-8'), result[0].encode(encoding='UTF-8'))
# if an empty pdf or tar file is present, we clean it
local_filename = os.path.join(self.config["data_path"], local_entry['id']+".pdf")
if os.path.isfile(local_filename):
os.remove(local_filename)
local_filename = os.path.join(self.config["data_path"], local_entry['id']+".tar.gz")
if os.path.isfile(local_filename):
os.remove(local_filename)
local_filename = os.path.join(self.config["data_path"], local_entry['id']+".nxml")
if os.path.isfile(local_filename):
os.remove(local_filename)
print("failed documents :", i)
# finally we can parallelize the thumbnail/upload/file cleaning steps for this batch
# with ThreadPoolExecutor(max_workers=12) as executor:
# results = executor.map(self.manageFiles, entries)
return i
def processBatchReprocess(self, urls, filenames, entries):#, txn, txn_doi, txn_fail):
with ThreadPoolExecutor(max_workers=12) as executor:
results = executor.map(download, urls, filenames, entries)
# LMDB write transactions in the thread that created the transaction
entries = []
for result in results:
local_entry = result[1]
if result[0] is None or result[0] == "0":
entries.append(local_entry)
# remove the entry in fail, as it is now sucessful
with self.env_fail.begin(write=True) as txn_fail2:
txn_fail2.delete(local_entry['id'].encode(encoding='UTF-8'))
else:
# still an error
# if an empty pdf file is present, we clean it
local_filename = os.path.join(self.config["data_path"], local_entry['id']+".pdf")
if os.path.isfile(local_filename):
os.remove(local_filename)
local_filename = os.path.join(self.config["data_path"], local_entry['id']+".tar.gz")
if os.path.isfile(local_filename):
os.remove(local_filename)
local_filename = os.path.join(self.config["data_path"], local_entry['id']+".nxml")
if os.path.isfile(local_filename):
os.remove(local_filename)
print("manage files")
# finally we can parallelize the thumbnail/upload/file cleaning steps for this batch
with ThreadPoolExecutor(max_workers=12) as executor:
results = executor.map(self.manageFiles, entries)
def getUUIDByDoi(self, doi):
txn = self.env_doi.begin()
return txn.get(doi.encode(encoding='UTF-8'))
def manageFiles(self, local_entry):
local_filename | |
source_ca_cert
target_api_addrs_ = target_api_addrs
target_ca_cert_ = target_ca_cert
# Validate arguments against known Juju API types.
if attempt_ is not None and not isinstance(attempt_, int):
raise Exception("Expected attempt_ to be a int, received: {}".format(type(attempt_)))
if migration_id_ is not None and not isinstance(migration_id_, (bytes, str)):
raise Exception("Expected migration_id_ to be a str, received: {}".format(type(migration_id_)))
if phase_ is not None and not isinstance(phase_, (bytes, str)):
raise Exception("Expected phase_ to be a str, received: {}".format(type(phase_)))
if source_api_addrs_ is not None and not isinstance(source_api_addrs_, (bytes, str, list)):
raise Exception("Expected source_api_addrs_ to be a Sequence, received: {}".format(type(source_api_addrs_)))
if source_ca_cert_ is not None and not isinstance(source_ca_cert_, (bytes, str)):
raise Exception("Expected source_ca_cert_ to be a str, received: {}".format(type(source_ca_cert_)))
if target_api_addrs_ is not None and not isinstance(target_api_addrs_, (bytes, str, list)):
raise Exception("Expected target_api_addrs_ to be a Sequence, received: {}".format(type(target_api_addrs_)))
if target_ca_cert_ is not None and not isinstance(target_ca_cert_, (bytes, str)):
raise Exception("Expected target_ca_cert_ to be a str, received: {}".format(type(target_ca_cert_)))
self.attempt = attempt_
self.migration_id = migration_id_
self.phase = phase_
self.source_api_addrs = source_api_addrs_
self.source_ca_cert = source_ca_cert_
self.target_api_addrs = target_api_addrs_
self.target_ca_cert = target_ca_cert_
self.unknown_fields = unknown_fields
class MigrationTargetInfo(Type):
_toSchema = {'addrs': 'addrs', 'auth_tag': 'auth-tag', 'ca_cert': 'ca-cert', 'controller_alias': 'controller-alias', 'controller_tag': 'controller-tag', 'macaroons': 'macaroons', 'password': 'password'}
_toPy = {'addrs': 'addrs', 'auth-tag': 'auth_tag', 'ca-cert': 'ca_cert', 'controller-alias': 'controller_alias', 'controller-tag': 'controller_tag', 'macaroons': 'macaroons', 'password': 'password'}
def __init__(self, addrs=None, auth_tag=None, ca_cert=None, controller_alias=None, controller_tag=None, macaroons=None, password=<PASSWORD>, **unknown_fields):
'''
addrs : typing.Sequence[str]
auth_tag : str
ca_cert : str
controller_alias : str
controller_tag : str
macaroons : str
password : <PASSWORD>
'''
addrs_ = addrs
auth_tag_ = auth_tag
ca_cert_ = ca_cert
controller_alias_ = controller_alias
controller_tag_ = controller_tag
macaroons_ = macaroons
password_ = password
# Validate arguments against known Juju API types.
if addrs_ is not None and not isinstance(addrs_, (bytes, str, list)):
raise Exception("Expected addrs_ to be a Sequence, received: {}".format(type(addrs_)))
if auth_tag_ is not None and not isinstance(auth_tag_, (bytes, str)):
raise Exception("Expected auth_tag_ to be a str, received: {}".format(type(auth_tag_)))
if ca_cert_ is not None and not isinstance(ca_cert_, (bytes, str)):
raise Exception("Expected ca_cert_ to be a str, received: {}".format(type(ca_cert_)))
if controller_alias_ is not None and not isinstance(controller_alias_, (bytes, str)):
raise Exception("Expected controller_alias_ to be a str, received: {}".format(type(controller_alias_)))
if controller_tag_ is not None and not isinstance(controller_tag_, (bytes, str)):
raise Exception("Expected controller_tag_ to be a str, received: {}".format(type(controller_tag_)))
if macaroons_ is not None and not isinstance(macaroons_, (bytes, str)):
raise Exception("Expected macaroons_ to be a str, received: {}".format(type(macaroons_)))
if password_ is not None and not isinstance(password_, (bytes, str)):
raise Exception("Expected password_ to be a str, received: {}".format(type(password_)))
self.addrs = addrs_
self.auth_tag = auth_tag_
self.ca_cert = ca_cert_
self.controller_alias = controller_alias_
self.controller_tag = controller_tag_
self.macaroons = macaroons_
self.password = password_
self.unknown_fields = unknown_fields
class MinionReport(Type):
_toSchema = {'migration_id': 'migration-id', 'phase': 'phase', 'success': 'success'}
_toPy = {'migration-id': 'migration_id', 'phase': 'phase', 'success': 'success'}
def __init__(self, migration_id=None, phase=None, success=None, **unknown_fields):
'''
migration_id : str
phase : str
success : bool
'''
migration_id_ = migration_id
phase_ = phase
success_ = success
# Validate arguments against known Juju API types.
if migration_id_ is not None and not isinstance(migration_id_, (bytes, str)):
raise Exception("Expected migration_id_ to be a str, received: {}".format(type(migration_id_)))
if phase_ is not None and not isinstance(phase_, (bytes, str)):
raise Exception("Expected phase_ to be a str, received: {}".format(type(phase_)))
if success_ is not None and not isinstance(success_, bool):
raise Exception("Expected success_ to be a bool, received: {}".format(type(success_)))
self.migration_id = migration_id_
self.phase = phase_
self.success = success_
self.unknown_fields = unknown_fields
class MinionReports(Type):
_toSchema = {'failed': 'failed', 'migration_id': 'migration-id', 'phase': 'phase', 'success_count': 'success-count', 'unknown_count': 'unknown-count', 'unknown_sample': 'unknown-sample'}
_toPy = {'failed': 'failed', 'migration-id': 'migration_id', 'phase': 'phase', 'success-count': 'success_count', 'unknown-count': 'unknown_count', 'unknown-sample': 'unknown_sample'}
def __init__(self, failed=None, migration_id=None, phase=None, success_count=None, unknown_count=None, unknown_sample=None, **unknown_fields):
'''
failed : typing.Sequence[str]
migration_id : str
phase : str
success_count : int
unknown_count : int
unknown_sample : typing.Sequence[str]
'''
failed_ = failed
migration_id_ = migration_id
phase_ = phase
success_count_ = success_count
unknown_count_ = unknown_count
unknown_sample_ = unknown_sample
# Validate arguments against known Juju API types.
if failed_ is not None and not isinstance(failed_, (bytes, str, list)):
raise Exception("Expected failed_ to be a Sequence, received: {}".format(type(failed_)))
if migration_id_ is not None and not isinstance(migration_id_, (bytes, str)):
raise Exception("Expected migration_id_ to be a str, received: {}".format(type(migration_id_)))
if phase_ is not None and not isinstance(phase_, (bytes, str)):
raise Exception("Expected phase_ to be a str, received: {}".format(type(phase_)))
if success_count_ is not None and not isinstance(success_count_, int):
raise Exception("Expected success_count_ to be a int, received: {}".format(type(success_count_)))
if unknown_count_ is not None and not isinstance(unknown_count_, int):
raise Exception("Expected unknown_count_ to be a int, received: {}".format(type(unknown_count_)))
if unknown_sample_ is not None and not isinstance(unknown_sample_, (bytes, str, list)):
raise Exception("Expected unknown_sample_ to be a Sequence, received: {}".format(type(unknown_sample_)))
self.failed = failed_
self.migration_id = migration_id_
self.phase = phase_
self.success_count = success_count_
self.unknown_count = unknown_count_
self.unknown_sample = unknown_sample_
self.unknown_fields = unknown_fields
class Model(Type):
_toSchema = {'name': 'name', 'owner_tag': 'owner-tag', 'type_': 'type', 'uuid': 'uuid'}
_toPy = {'name': 'name', 'owner-tag': 'owner_tag', 'type': 'type_', 'uuid': 'uuid'}
def __init__(self, name=None, owner_tag=None, type_=None, uuid=None, **unknown_fields):
'''
name : str
owner_tag : str
type_ : str
uuid : str
'''
name_ = name
owner_tag_ = owner_tag
type__ = type_
uuid_ = uuid
# Validate arguments against known Juju API types.
if name_ is not None and not isinstance(name_, (bytes, str)):
raise Exception("Expected name_ to be a str, received: {}".format(type(name_)))
if owner_tag_ is not None and not isinstance(owner_tag_, (bytes, str)):
raise Exception("Expected owner_tag_ to be a str, received: {}".format(type(owner_tag_)))
if type__ is not None and not isinstance(type__, (bytes, str)):
raise Exception("Expected type__ to be a str, received: {}".format(type(type__)))
if uuid_ is not None and not isinstance(uuid_, (bytes, str)):
raise Exception("Expected uuid_ to be a str, received: {}".format(type(uuid_)))
self.name = name_
self.owner_tag = owner_tag_
self.type_ = type__
self.uuid = uuid_
self.unknown_fields = unknown_fields
class ModelAccess(Type):
_toSchema = {'access': 'access', 'model': 'model'}
_toPy = {'access': 'access', 'model': 'model'}
def __init__(self, access=None, model=None, **unknown_fields):
'''
access : str
model : str
'''
access_ = access
model_ = model
# Validate arguments against known Juju API types.
if access_ is not None and not isinstance(access_, (bytes, str)):
raise Exception("Expected access_ to be a str, received: {}".format(type(access_)))
if model_ is not None and not isinstance(model_, (bytes, str)):
raise Exception("Expected model_ to be a str, received: {}".format(type(model_)))
self.access = access_
self.model = model_
self.unknown_fields = unknown_fields
class ModelArgs(Type):
_toSchema = {'model_tag': 'model-tag'}
_toPy = {'model-tag': 'model_tag'}
def __init__(self, model_tag=None, **unknown_fields):
'''
model_tag : str
'''
model_tag_ = model_tag
# Validate arguments against known Juju API types.
if model_tag_ is not None and not isinstance(model_tag_, (bytes, str)):
raise Exception("Expected model_tag_ to be a str, received: {}".format(type(model_tag_)))
self.model_tag = model_tag_
self.unknown_fields = unknown_fields
class ModelBlockInfo(Type):
_toSchema = {'blocks': 'blocks', 'model_uuid': 'model-uuid', 'name': 'name', 'owner_tag': 'owner-tag'}
_toPy = {'blocks': 'blocks', 'model-uuid': 'model_uuid', 'name': 'name', 'owner-tag': 'owner_tag'}
def __init__(self, blocks=None, model_uuid=None, name=None, owner_tag=None, **unknown_fields):
'''
blocks : typing.Sequence[str]
model_uuid : str
name : str
owner_tag : str
'''
blocks_ = blocks
model_uuid_ = model_uuid
name_ = name
owner_tag_ = owner_tag
# Validate arguments against known Juju API types.
if blocks_ is not None and not isinstance(blocks_, (bytes, str, list)):
raise Exception("Expected blocks_ to be a Sequence, received: {}".format(type(blocks_)))
if model_uuid_ is not None and not isinstance(model_uuid_, (bytes, str)):
raise Exception("Expected model_uuid_ to be a str, received: {}".format(type(model_uuid_)))
if name_ is not None and not isinstance(name_, (bytes, str)):
raise Exception("Expected name_ to be a str, received: {}".format(type(name_)))
if owner_tag_ is not None and not isinstance(owner_tag_, (bytes, str)):
raise Exception("Expected owner_tag_ to be a str, received: {}".format(type(owner_tag_)))
self.blocks = blocks_
self.model_uuid = model_uuid_
self.name = name_
self.owner_tag = owner_tag_
self.unknown_fields = unknown_fields
class ModelBlockInfoList(Type):
_toSchema = {'models': 'models'}
_toPy = {'models': 'models'}
def __init__(self, models=None, **unknown_fields):
'''
models : typing.Sequence[~ModelBlockInfo]
'''
models_ = [ModelBlockInfo.from_json(o) for o in models or []]
# Validate arguments against known Juju API types.
if models_ is not None and not isinstance(models_, (bytes, str, list)):
raise Exception("Expected models_ to be a Sequence, received: {}".format(type(models_)))
self.models = models_
self.unknown_fields = unknown_fields
class ModelConfigResult(Type):
_toSchema = {'config': 'config'}
_toPy = | |
#-------------------------------------------------------------------------------
# The Blob Test
# Based on the test presented in
# 1. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, et al.
# Fundamental differences between SPH and grid methods. Monthly Notices of
# the Royal Astronomical Society. 2007; 380(3):963-978.
# doi:10.1111/j.1365-2966.2007.12183.x.
#-------------------------------------------------------------------------------
import shutil
from math import *
from Spheral2d import *
from SpheralTestUtilities import *
from SpheralGnuPlotUtilities import *
from findLastRestart import *
from GenerateNodeDistribution2d import *
from CloudMassFraction import *
import mpi
import DistributeNodes
title("3-D integrated hydro test -- Blob Test")
#-------------------------------------------------------------------------------
# Rejecter to help establishing initial conditions.
# This is used by GenerateNodeDistrubtion2d to carve out a spherical region.
#-------------------------------------------------------------------------------
class SphericalRejecterBlob:
def __init__(self, origin, radius):
self.origin = origin
self.radius = radius
return
def __call__(self, x, y, m, H):
n = len(x)
assert (len(y) == n and len(m) == n and len(H) == n)
xnew, ynew, mnew, Hnew = [], [], [], []
R2 = self.radius**2
for i in xrange(n):
if ((x[i] - self.origin[0])**2 +
(y[i] - self.origin[1])**2 < R2):
xnew.append(x[i])
ynew.append(y[i])
mnew.append(m[i])
Hnew.append(H[i])
return xnew, ynew, mnew, Hnew
#-------------------------------------------------------------------------------
# Rejecter to help establishing initial conditions.
# This is used by GenerateNodeDistrubtion2d to cut out a spherical region cavity
#-------------------------------------------------------------------------------
class SphericalRejecter:
def __init__(self, origin, radius):
self.origin = origin
self.radius = radius
return
def __call__(self, x, y, m, H):
n = len(x)
assert (len(y) == n and len(m) == n and len(H) == n)
xnew, ynew, mnew, Hnew = [], [], [], []
R2 = self.radius**2
for i in xrange(n):
if ((x[i] - self.origin[0])**2 +
(y[i] - self.origin[1])**2 >= R2):
xnew.append(x[i])
ynew.append(y[i])
mnew.append(m[i])
Hnew.append(H[i])
return xnew, ynew, mnew, Hnew
#-------------------------------------------------------------------------------
# Generic problem parameters
#-------------------------------------------------------------------------------
commandLine(
#Set external state (Internal state is dependent so dont set it)
gamma = 5.0/3.0,
rhoext = 1.0,
Pequi = 1.0,
mach = 2.7, #mach number
# Geometry Ambient medium box
xb0 = 0.0,
xb1 = 40.0,
yb0 = 0.0,
yb1 = 10.0,
#Blob radius and central location
br = 1.0,
bx = 5.0,
by = 5.0,
chi = 10.0, # Ratio of rhoblob/rhoext
goalTKH = 2.5, # Goal time in units of t_KH
# Resolution and node seeding.
nx1 = 256,
ny1 = 64,
massMatch = True, # If False, match spatial resolution in blob
nPerh = 1.35,
SVPH = False,
CRKSPH = False,
PSPH = False,
ASPH = False, # This just chooses the H algorithm -- you can use this with CRKSPH for instance.
filter = 0.0, # For CRKSPH
HopkinsConductivity = False, # For PSPH
Qconstructor = MonaghanGingoldViscosity,
#Qconstructor = TensorMonaghanGingoldViscosity,
KernelConstructor = NBSplineKernel,
order = 5,
boolReduceViscosity = False,
nhQ = 5.0,
nhL = 10.0,
boolCullenViscosity = False,
alphMax = 2.0,
alphMin = 0.02,
betaC = 0.7,
betaD = 0.05,
betaE = 1.0,
fKern = 1.0/3.0,
boolHopkinsCorrection = True,
aMin = 0.1,
aMax = 2.0,
linearConsistent = False,
fcentroidal = 0.0,
fcellPressure = 0.0,
Cl = 1.0,
Cq = 1.0,
Qlimiter = False,
balsaraCorrection = False,
epsilon2 = 1e-2,
hmin = 1e-5,
hmax = 0.5,
hminratio = 0.1,
cfl = 0.25,
XSPH = False,
epsilonTensile = 0.0,
nTensile = 8,
IntegratorConstructor = CheapSynchronousRK2Integrator,
steps = None,
vizCycle = 20,
vizTime = 0.1,
dt = 0.0001,
dtMin = 1.0e-5,
dtMax = 0.1,
dtGrowth = 2.0,
maxSteps = None,
statsStep = 10,
HUpdate = IdealH,
domainIndependent = False,
rigorousBoundaries = False,
dtverbose = False,
densityUpdate = RigorousSumDensity, # VolumeScaledDensity,
compatibleEnergy = True,
evolveTotalEnergy = False,
gradhCorrection = True,
correctVelocityGradient = True,
clearDirectories = False,
restoreCycle = -1,
restartStep = 200,
dataDir = "dumps-blobtest-2d",
# Parameters for the cloud mass fraction history
histfilename = "cloud_mass_history.gnu",
rhoThresholdFrac = 0.64,
epsThresholdFrac = 0.9,
massFracFreq = 10,
)
# Check the input.
assert not (boolReduceViscosity and boolCullenViscosity)
assert not (compatibleEnergy and evolveTotalEnergy)
# Decide on our hydro algorithm.
if SVPH:
if ASPH:
HydroConstructor = ASVPHFacetedHydro
else:
HydroConstructor = SVPHFacetedHydro
elif CRKSPH:
Qconstructor = CRKSPHMonaghanGingoldViscosity
if ASPH:
HydroConstructor = ACRKSPHHydro
else:
HydroConstructor = CRKSPHHydro
elif PSPH:
if ASPH:
HydroConstructor = APSPHHydro
else:
HydroConstructor = PSPHHydro
else:
if ASPH:
HydroConstructor = ASPHHydro
else:
HydroConstructor = SPHHydro
# Build our directory paths.
densityUpdateLabel = {IntegrateDensity : "IntegrateDensity",
SumDensity : "SumDensity",
RigorousSumDensity : "RigorousSumDensity",
SumVoronoiCellDensity : "SumVoronoiCellDensity"}
baseDir = os.path.join(dataDir,
HydroConstructor.__name__,
Qconstructor.__name__,
KernelConstructor.__name__,
densityUpdateLabel[densityUpdate],
"compatibleEnergy=%s" % compatibleEnergy,
"evolveTotalEnergy=%s" % evolveTotalEnergy,
"Cullen=%s" % boolCullenViscosity,
"XSPH=%s" % XSPH,
"nPerh=%3.1f" % nPerh,
"fcentroidal=%1.3f" % fcentroidal,
"fcellPressure = %1.3f" % fcellPressure,
"massMatch=%s" % massMatch,
"%ix%i" % (nx1, ny1))
restartDir = os.path.join(baseDir, "restarts")
restartBaseName = os.path.join(restartDir, "blob-2d-%ix%i" % (nx1, ny1))
vizDir = os.path.join(baseDir, "visit")
if vizTime is None and vizCycle is None:
vizBaseName = None
else:
vizBaseName = "blobtest-2d-%ix%i" % (nx1, ny1)
# Figure out the goal time.
csext = sqrt(gamma*Pequi/rhoext)
vext = mach*csext
tCrush = 2.0*br*sqrt(chi)/vext
tKH = 1.6*tCrush
goalTime = goalTKH * tKH
print "Computed times (tCrush, tKH, goalTime) = (%g, %g, %g)" % (tCrush, tKH, goalTime)
#-------------------------------------------------------------------------------
# Check if the necessary output directories exist. If not, create them.
#-------------------------------------------------------------------------------
import os, sys
if mpi.rank == 0:
if clearDirectories and os.path.exists(baseDir):
shutil.rmtree(baseDir)
if not os.path.exists(restartDir):
os.makedirs(restartDir)
if not os.path.exists(vizDir):
os.makedirs(vizDir)
mpi.barrier()
#-------------------------------------------------------------------------------
# Material properties.
#-------------------------------------------------------------------------------
mu = 1.0
eos1 = GammaLawGasMKS(gamma, mu)
eos2 = GammaLawGasMKS(gamma, mu)
#-------------------------------------------------------------------------------
# Interpolation kernels.
#-------------------------------------------------------------------------------
if KernelConstructor==NBSplineKernel:
WT = TableKernel(NBSplineKernel(order), 1000)
else:
WT = TableKernel(KernelConstructor(), 1000)
output("WT")
kernelExtent = WT.kernelExtent
#-------------------------------------------------------------------------------
# Make the NodeLists.
#-------------------------------------------------------------------------------
#Ambient Medium nodes
outerNodes = makeFluidNodeList("outer", eos1,
hmin = hmin,
hmax = hmax,
hminratio = hminratio,
nPerh = nPerh)
#Blob nodes
innerNodes = makeFluidNodeList("inner", eos2,
hmin = hmin,
hmax = hmax,
hminratio = hminratio,
nPerh = nPerh)
nodeSet = (outerNodes, innerNodes)
for nodes in nodeSet:
output("nodes.name")
output(" nodes.hmin")
output(" nodes.hmax")
output(" nodes.hminratio")
output(" nodes.nodesPerSmoothingScale")
del nodes
#-------------------------------------------------------------------------------
# Set the node properties.
#-------------------------------------------------------------------------------
#Blob density is defined to be 10 times the external density
rhoblob=rhoext*chi
generatorOuter = GenerateNodeDistribution2d(nx1, ny1, rhoext,
distributionType = "lattice",
xmin = (xb0, yb0),
xmax = (xb1, yb1),
rejecter = SphericalRejecter(origin = (bx, by),
radius = br),
nNodePerh = nPerh,
SPH = (not ASPH))
#generatorOuter = GenerateNodeDistribution2d(nx1, ny1, nz1, rhoext,
# distributionType = "lattice",
# xmin = (bx-br, by-br, bz-br),
# xmax = (bx+br, by+br, bz+br),
# rmin = br,
# nNodePerh = nPerh,
# SPH = (not ASPH))
#generatorInner = GenerateNodeDistribution2d(nx1, ny1, nz1, rhoblob,
# distributionType = "lattice",
# xmin = (xb0, yb0, zb0),
# xmax = (xb1, yb1, zb1),
# rejecter = SphericalRejecterBlob(origin = (bx, by, bz),
# radius = br),
# nNodePerh = nPerh,
# SPH = (not ASPH))
if massMatch:
# Figure out a mass matched resolution for the blob.
mouter = (xb1 - xb0)*(yb1 - yb0)*rhoext/(nx1*ny1)
nxinner = max(2, int(((2*br)**2*rhoblob/mouter)**(1.0/2.0) + 0.5))
generatorInner = GenerateNodeDistribution2d(nxinner, nxinner, rhoblob,
distributionType = "lattice",
xmin = (bx-br, by-br),
xmax = (bx+br, by+br),
originreject = (bx, by),
rreject = br,
nNodePerh = nPerh,
SPH = (not ASPH))
else:
generatorInner = GenerateNodeDistribution2d(nx1, ny1, rhoblob,
distributionType = "lattice",
xmin = (xb0, yb0),
xmax = (xb1, yb1),
originreject = (bx, by),
rreject = br,
nNodePerh = nPerh,
SPH = (not ASPH))
if mpi.procs > 1:
from VoronoiDistributeNodes import distributeNodes2d
else:
from DistributeNodes import distributeNodes2d
distributeNodes2d((outerNodes, generatorOuter),
(innerNodes, generatorInner))
for nodes in nodeSet:
print nodes.name, ":"
output(" mpi.reduce(nodes.numInternalNodes, mpi.MIN)")
output(" mpi.reduce(nodes.numInternalNodes, mpi.MAX)")
output(" mpi.reduce(nodes.numInternalNodes, mpi.SUM)")
del nodes
# Set node specific thermal energies
for (nodes, rho) in ((outerNodes, rhoext),
(innerNodes, rhoblob)):
eps0 = Pequi/((gamma - 1.0)*rho)
nodes.specificThermalEnergy(ScalarField("tmp", nodes, eps0))
del nodes
#for nodes in nodeSet:
# vel = nodes.velocity()
# for i in xrange(nodes.numInternalNodes):
# vel[i]=Vector(velx,vely)
vel = outerNodes.velocity() #wind velocity
for i in xrange(outerNodes.numInternalNodes):
vel[i].x = vext
#-------------------------------------------------------------------------------
# Construct a DataBase to hold our node lists
#-------------------------------------------------------------------------------
db = DataBase()
output("db")
for nodes in nodeSet:
db.appendNodeList(nodes)
del nodes
output("db.numNodeLists")
output("db.numFluidNodeLists")
#-------------------------------------------------------------------------------
# Construct the artificial viscosity.
#-------------------------------------------------------------------------------
q = Qconstructor(Cl, Cq)
q.epsilon2 = epsilon2
q.limiter = Qlimiter
q.balsaraShearCorrection = balsaraCorrection
output("q")
output("q.Cl")
output("q.Cq")
output("q.epsilon2")
output("q.limiter")
output("q.balsaraShearCorrection")
#-------------------------------------------------------------------------------
# Construct the hydro physics object.
#-------------------------------------------------------------------------------
if SVPH:
hydro = HydroConstructor(W = WT,
Q = q,
cfl = cfl,
compatibleEnergyEvolution = compatibleEnergy,
densityUpdate = densityUpdate,
XSVPH = XSPH,
linearConsistent = linearConsistent,
generateVoid = False,
HUpdate = HUpdate,
fcentroidal = fcentroidal,
fcellPressure = fcellPressure,
xmin = Vector(xb0 - (xb1 - xb0), yb0 - (yb1 - yb0)),
xmax = Vector(xb1 + (xb1 - xb0), yb1 + (yb1 - yb0)))
elif CRKSPH:
hydro = HydroConstructor(W = WT,
Q = q,
filter = filter,
epsTensile = epsilonTensile,
nTensile = nTensile,
cfl = cfl,
compatibleEnergyEvolution = compatibleEnergy,
evolveTotalEnergy = evolveTotalEnergy,
XSPH = XSPH,
densityUpdate = densityUpdate,
HUpdate = HUpdate)
elif PSPH:
hydro = HydroConstructor(W = WT,
Q = q,
filter = filter,
cfl = cfl,
compatibleEnergyEvolution = compatibleEnergy,
evolveTotalEnergy = evolveTotalEnergy,
HopkinsConductivity = HopkinsConductivity,
densityUpdate = densityUpdate,
HUpdate = HUpdate,
XSPH = XSPH)
else:
hydro = HydroConstructor(W = WT,
Q = | |
'''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
"""
This module create a tree of ConfigurationElements from xml documents or elements of
ElementTrees
"""
import xml.etree.ElementTree as ElementTree
import re
import decimal
from warrior.Framework.Utils.print_Utils import print_error
from warrior.Framework import Utils
class ConfigurationElement(object):
"""
Configuration element to support arbitrary xml object depth
"""
def __init__(self, name='base', start_pat="${", end_pat="}"):
""" Constructor """
self.name = name
self.attributes = {}
self.children = {}
self.start_pat = start_pat
self.end_pat = end_pat
def __find_match(self, string):
"""
Internal ufunction to match against a regular expression
:param string:
:return:
"""
# Create a regex search object which contains
# a group object with the text within the start and end pattern
# and another group object with the text and start/end pattern
# if the regex pattern doesn't match with the string, it return None
# .* matches everything
# ? matches until the first occurence of the next pattern, in this case
# it matches the first occurence of the end pattern
text_between_pattern = r"(.*?)"
return re.search(r".*(" + re.escape(self.start_pat) + text_between_pattern +\
re.escape(self.end_pat) + r").*", string)
def expand_variables(self, string):
"""
Replaces embedded variables in the string with their values. Works with $(var) or
${var}. These can be nested to any level
:param string:
:return:
"""
new_string = self.__expand_variables(string)
return new_string
def __expand_variables(self, string):
"""
Replaces embedded variables in the string with their values. Can handle multiple levels
of such as ${${node.some_var}.node.${some_other_var}}. All occurrences of variables
will be replaced
:param string:
:return:
"""
# The string that is currently being processed
return_value = string
# When end_pat_index == -1, which means end_pattern is not found in the return_value string
# Get the regex match object of the substring
# which looks for text between start and endpattern
match = self.__find_match(return_value)
# Only substitued the string when there is a match
while match is not None:
# match.group(2) contains the pre-sub value
# substitued value is the actual value after parsing the pre-sub value
substitued_value = self.get_value(match.group(2))
# match.group(1) contains start_pattern, pre-sub value and end_pattern
# for default pattern, it looks like ${PRESUB_VALUE}
# this step replace the pre_sub value
return_value = return_value.replace(match.group(1), substitued_value, 1)
# Call other substitute functions
return_value = Utils.data_Utils.sub_from_env_var(
return_value, self.start_pat, self.end_pat)
return_value = Utils.data_Utils.sub_from_data_repo(
return_value, self.start_pat, self.end_pat)
# Doing another search for the next value to substitue
match = self.__find_match(return_value)
return return_value
def get_list_direct(self, string):
"""
entry function for __get_list
:param string:
:return:
a dictionary
if nothing wrong with list range
return a dict with orignal list/range as key "1..4,5,6..7:0.5"
parsed list as value [1,2,3,4,5,6,6.5,7]
if invalid range found
return a dict {'Error':False}
if no list/range found
return a dict {'False':False}
"""
return_value = string
result = {}
check = self.end_pat
match = self.__find_match(return_value[:return_value.find(check) + len(check)])
while match is not None:
try:
parsed_value = self.__parse_list("{" + match.group(2) + "}")
except (ValueError, TypeError, AttributeError):
print_error("Invalid list range found")
return {'Error':False}
if parsed_value:
result[match.group(2)] = parsed_value
return_value = return_value.replace(return_value[:return_value.find(check) + len(check)], '')
match = self.__find_match(return_value[:return_value.find(check) + len(check)])
if result == {}:
return {'False':False}
return result
def get_list(self, string):
"""
entry function for __get_list
:param string:
:return:
a dictionary
if nothing wrong with list range
return a dict with orignal list/range as key "1..4,5,6..7:0.5"
parsed list as value [1,2,3,4,5,6,6.5,7]
if invalid range found
return a dict {'Error':False}
if no list/range found
return a dict {'False':False}
"""
new_string = self.__get_list(string)
return new_string
def __get_list(self, string):
"""
Get the list of variable from the string (can have multiple ${list} )
:param string:
:return:
a dictionary
if nothing wrong with list range
return a dict with orignal list/range as key "1..4,5,6..7:0.5"
parsed list as value [1,2,3,4,5,6,6.5,7]
if invalid range found
return a dict {'Error':False}
if no list/range found
return a dict {'False':False}
"""
return_value = string
result = {}
check = self.end_pat
match = self.__find_match(return_value[:return_value.find(check) + len(check)])
while match is not None:
try:
parsed_value = self.__parse_list(self.get_value(match.group(2)))
except (ValueError, TypeError, AttributeError):
print_error("Invalid list range found")
return {'Error':False}
if parsed_value:
result[match.group(2)] = parsed_value
return_value = return_value.replace(return_value[:return_value.find(check) + len(check)], '')
match = self.__find_match(return_value[:return_value.find(check) + len(check)])
if result == {}:
return {'False':False}
return result
def __parse_list(self, string):
"""
Parsed a string of value list, extract values and put it in a list
:param string:
:return:
a list of string - list of values that is extracted from original string or original string
"""
if string.startswith("{") and string.endswith("}"):
result = []
parsed = string[1:-1].split(',')
for var in parsed:
if '..' in var and ':' in var:
indexes = re.split('\.\.|:', var)
x = self.__frange(indexes[0], indexes[1], indexes[2])
for index in x:
result.append(str(index))
elif '..' in var and ':' not in var:
indexes = re.split('\.\.', var)
x = self.__frange(indexes[0], indexes[1])
for index in x:
result.append(str(index))
else:
result.append(var)
return result
else:
return False
def __frange(self, x, y, jump=None):
"""
Customized range function to support floating point step in/decrement
:param x:
:param y:
:param jump:
:return:
a generator with each value in the range
"""
# Find the floating number that has the longest exponents part
x_dec = abs(decimal.Decimal(x).as_tuple().exponent)
y_dec = abs(decimal.Decimal(y).as_tuple().exponent)
if jump is None:
jump = 1 if float(x) < float(y) else -1
jump_dec = abs(decimal.Decimal(jump).as_tuple().exponent)
dec_offset = max([x_dec, y_dec, jump_dec])
if dec_offset == 0:
x, y, jump = (int(x), int(y), int(jump))
else:
x, y, jump = (float(x), float(y), float(jump))
if jump == 0:
raise ValueError("step value cannot be 0")
if (x > y and jump > 0) or (x < y and jump < 0):
raise ValueError("sign of step value must not be the same as difference between comparing values")
if x < y:
while x <= y:
yield x
x += jump
if dec_offset > 0:
x = round(x, int(dec_offset))
else:
while x >= y:
yield x
x += jump
if dec_offset > 0:
x = round(x, int(dec_offset))
def get_list_of_values(self, *args):
"""
Looks up all values requested and returns a list in the order requested
:param args:
:return:
"""
values = []
for data in args:
values.append(self.get_value(data))
return values
def get_dictionary_of_values(self, *args):
"""
Looks up all values requested and returns a dictionary of key value pairs
:param args:
:return:
"""
values = {}
for data in args:
values[data] = self.get_value(data)
return values
def get_value(self, value):
"""
Returns requested value or None if not found and prints an error
:param value:
:return:
"""
try:
if len(value.split('.')) > 1:
child = value.split('.')[0].strip()
return self.children[child].get_value(value.split('.', 1)[1].strip())
else:
return self.attributes[value]
except KeyError:
print_error("Key error node " + self.name + " does not have sub node "
+ value.split('.')[0] + ". Cannot complete remainder of search for " + value)
return None
def set_value(self, key, value):
"""
Adds or changes the key value pair in the tree
:param key:
:param value:
:return:
"""
try:
if len(key.split('.')) > 1:
child = key.split('.')[0]
self.children[child].set_value(key.split('.', 1)[1], value)
else:
self.attributes[key] = value
except KeyError:
print_error("Key error node " + self.name + " does not have sub node "
+ key.split('.')[0] + ". Cannot complete remainder of search for " + key)
def get_node(self, value):
"""
Returns a ConfigurationElement that is the tree rooted at the requested node
:param value:
:return:
"""
if len(value.split('.')) > 1:
child = value.split('.')[0]
return self.children[child].get_node(value.split('.', 1)[1])
else:
return self.children[value]
def parse_data(self, *arg, **kwargs):
"""
parse_data constructs a tree of ConfigurationElements from a list of xml files or a list
of ElementTrees. The name attribute is the primary key for each node, so if there is any
repetition of names at the same tree level, the two nodes will be merged with the | |
<gh_stars>0
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
# @Time : 2019/9/13 15:36
# @Author : liufang
# @File : model.py
import os
import sys
import cv2
import numpy as np
import torch
import torch.nn as nn
from torch.nn import init
import torch.optim as optim
import torch.nn.functional as F
import functools
from models.seg_model import get_segmentation_network
nclass=11
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
init_weights(net, init_type, init_gain=init_gain)
return net
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
user_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
kernel_size=4
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=kernel_size,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=kernel_size, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=kernel_size, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=kernel_size, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
class NetG(nn.Module):
"""Create a Unet-based generator"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(NetG, self).__init__()
# construct unet structure
self.conv1 = nn.Conv2d(9, 3, 3,1,1)
self.BatchNorm1=nn.BatchNorm2d(3)
self.LeakyReLU1 = nn.LeakyReLU(0.2, True)
self.conv2 = nn.Conv2d(3, 3, 3,1,1)
self.BatchNorm2 = nn.BatchNorm2d(3)
self.LeakyReLU2 = nn.LeakyReLU(0.2, True)
self.tanh = nn.Tanh()
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(3, ngf, input_nc=3, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
x=self.conv1(input)
# print(x.shape)
x=self.BatchNorm1(x)
x = self.LeakyReLU1(x)
x = self.conv2(x)
x = self.BatchNorm2(x)
x = self.LeakyReLU2(x)
x = self.model(x)
output=self.tanh(x)
return output
#TODO modify this network to segmentation network
class NetC(nn.Module):
def __init__(self,input_nc,output_nc):
super(NetC, self).__init__()
self.net = get_segmentation_network(output_nc)
def forward(self, x):
x = self.net(x)
return x
class NetD(nn.Module):
def __init__(self, input_nc,output_nc, ndf=64, norm_layer=nn.BatchNorm2d):
super(NetD, self).__init__()
# size: 3 * 36 * 120
self.conv1 =nn.Conv2d(input_nc, ndf, kernel_size=3, stride=2, padding=1)
self.LeakyReLU1 =nn.LeakyReLU(0.2, True)
kernel_size=3
padding=1
self.localNet = [
nn.Conv2d(ndf, ndf * 2, kernel_size=kernel_size, stride=2, padding=padding),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, ndf * 4, kernel_size=kernel_size, stride=2, padding=padding),
norm_layer(ndf * 4),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 4, ndf * 8, kernel_size=kernel_size, stride=2, padding=padding),
norm_layer(ndf * 8),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 8, ndf * 8, kernel_size=kernel_size, stride=2, padding=padding),
norm_layer(ndf * 8),
nn.LeakyReLU(0.2, True),
# nn.Conv2d(ndf * 8, ndf * 8, kernel_size=4, stride=2, padding=0),
# norm_layer(ndf * 8),
# nn.LeakyReLU(0.2, True),
]
self.localNet = nn.Sequential(*self.localNet)
self.globalNet = [
nn.Conv2d(ndf, ndf * 2, kernel_size=kernel_size, stride=2, padding=padding),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, ndf * 4, kernel_size=kernel_size, stride=2, padding=padding),
norm_layer(ndf * 4),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 4, ndf * 8, kernel_size=kernel_size, stride=2, padding=padding),
norm_layer(ndf * 8),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 8, ndf * 8, kernel_size=kernel_size, stride=2, padding=padding),
norm_layer(ndf * 8),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 8, ndf * 8, kernel_size=kernel_size, stride=2, padding=padding),
norm_layer(ndf * 8),
nn.LeakyReLU(0.2, True)
]
self.globalNet = nn.Sequential(*self.globalNet)
self.fc1 = nn.Linear(8 * 8 * 512, 1024)
self.drop1 = nn.Dropout(0.5)
self.fc2 = nn.Linear(4 * 4 * 512, 1024)
self.drop2 = nn.Dropout(0.5)
self.fc3 = nn.Linear(2048, output_nc)
self.drop3 = nn.Dropout(0.5)
self.tanh = nn.Tanh()
def forward(self, input):
x=self.LeakyReLU1(self.conv1(input))
localx = self.localNet(x)
localx = localx.view(-1, 8 * 8 * 512)
localx = self.drop1(F.relu(self.fc1(localx)))
globalx = self.globalNet(x)
globalx = globalx.view(-1, 4 * 4 * 512)
globalx = self.drop2(F.relu(self.fc2(globalx)))
combinex=torch.cat([localx,globalx],-1)
combinex = self.drop3(F.relu(self.fc3(combinex)))
output=self.tanh(combinex)
return output
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def define_Net(input_nc, output_nc, ngf, netType, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[], class_number=7):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which | |
).
The name can be up to 256 characters long.
When using ReceiveMessage , you can send a list of attribute names to receive, or you can return all of the attributes by specifying All or .* in your request. You can also use all message attributes starting with a prefix, for example bar.* .
(string) --
MaxNumberOfMessages (integer) -- The maximum number of messages to return. Amazon SQS never returns more messages than this value (however, fewer messages might be returned). Valid values are 1 to 10. Default is 1.
VisibilityTimeout (integer) -- The duration (in seconds) that the received messages are hidden from subsequent retrieve requests after being retrieved by a ReceiveMessage request.
WaitTimeSeconds (integer) -- The duration (in seconds) for which the call waits for a message to arrive in the queue before returning. If a message is available, the call returns sooner than WaitTimeSeconds .
ReceiveRequestAttemptId (string) -- This parameter applies only to FIFO (first-in-first-out) queues.
The token used for deduplication of ReceiveMessage calls. If a networking issue occurs after a ReceiveMessage action, and instead of a response you receive a generic error, you can retry the same action with an identical ReceiveRequestAttemptId to retrieve the same set of messages, even if their visibility timeout has not yet expired.
You can use ReceiveRequestAttemptId only for 5 minutes after a ReceiveMessage action.
When you set FifoQueue , a caller of the ReceiveMessage action can provide a ReceiveRequestAttemptId explicitly.
If a caller of the ReceiveMessage action doesn't provide a ReceiveRequestAttemptId , Amazon SQS generates a ReceiveRequestAttemptId .
You can retry the ReceiveMessage action with the same ReceiveRequestAttemptId if none of the messages have been modified (deleted or had their visibility changes).
During a visibility timeout, subsequent calls with the same ReceiveRequestAttemptId return the same messages and receipt handles. If a retry occurs within the deduplication interval, it resets the visibility timeout. For more information, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide .
Warning
If a caller of the ReceiveMessage action is still processing messages when the visibility timeout expires and messages become visible, another worker reading from the same queue can receive the same messages and therefore process duplicates. Also, if a reader whose message processing time is longer than the visibility timeout tries to delete the processed messages, the action fails with an error. To mitigate this effect, ensure that your application observes a safe threshold before the visibility timeout expires and extend the visibility timeout as necessary.
While messages with a particular MessageGroupId are invisible, no more messages belonging to the same MessageGroupId are returned until the visibility timeout expires. You can still receive messages with another MessageGroupId as long as it is also visible.
If a caller of ReceiveMessage can't track the ReceiveRequestAttemptId , no retries work until the original visibility timeout expires. As a result, delays might occur but the messages in the queue remain in a strict order.
The length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId can contain alphanumeric characters (a-z , A-Z , 0-9 ) and punctuation (!"#$%'()*+,-./:;=?@[\]^_`{|}~ ).
For best practices of using ReceiveRequestAttemptId , see Using the ReceiveRequestAttemptId Request Parameter in the Amazon Simple Queue Service Developer Guide .
"""
pass
def remove_permission(QueueUrl=None, Label=None):
"""
Revokes any permissions in the queue policy that matches the specified Label parameter. Only the owner of the queue can remove permissions.
See also: AWS API Documentation
:example: response = client.remove_permission(
QueueUrl='string',
Label='string'
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue from which permissions are removed.
Queue URLs are case-sensitive.
:type Label: string
:param Label: [REQUIRED]
The identification of the permission to remove. This is the label added using the `` AddPermission `` action.
"""
pass
def send_message(QueueUrl=None, MessageBody=None, DelaySeconds=None, MessageAttributes=None, MessageDeduplicationId=None, MessageGroupId=None):
"""
Delivers a message to the specified queue.
See also: AWS API Documentation
:example: response = client.send_message(
QueueUrl='string',
MessageBody='string',
DelaySeconds=123,
MessageAttributes={
'string': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'StringListValues': [
'string',
],
'BinaryListValues': [
b'bytes',
],
'DataType': 'string'
}
},
MessageDeduplicationId='string',
MessageGroupId='string'
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue to which a message is sent.
Queue URLs are case-sensitive.
:type MessageBody: string
:param MessageBody: [REQUIRED]
The message to send. The maximum string size is 256 KB.
Warning
A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed:
#x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF
Any characters not included in this list will be rejected. For more information, see the W3C specification for characters .
:type DelaySeconds: integer
:param DelaySeconds: The length of time, in seconds, for which to delay a specific message. Valid values: 0 to 900. Maximum: 15 minutes. Messages with a positive DelaySeconds value become available for processing after the delay period is finished. If you don't specify a value, the default value for the queue applies.
Note
When you set FifoQueue , you can't set DelaySeconds per message. You can set this parameter only on a queue level.
:type MessageAttributes: dict
:param MessageAttributes: Each message attribute consists of a Name , Type , and Value . For more information, see Message Attribute Items and Validation in the Amazon SQS Developer Guide .
(string) --
(dict) --The user-specified message attribute value. For string data types, the Value attribute has the same restrictions on the content as the message body. For more information, see `` SendMessage .``
Name , type , value and the message body must not be empty or null. All parts of the message attribute, including Name , Type , and Value , are part of the message size restriction (256 KB or 262,144 bytes).
StringValue (string) --Strings are Unicode with UTF-8 binary encoding. For a list of code values, see ASCII Printable Characters .
BinaryValue (bytes) --Binary type attributes can store any binary data, such as compressed data, encrypted data, or images.
StringListValues (list) --Not implemented. Reserved for future use.
(string) --
BinaryListValues (list) --Not implemented. Reserved for future use.
(bytes) --
DataType (string) -- [REQUIRED]Amazon SQS supports the following logical data types: String , Number , and Binary . For the Number data type, you must use StringValue .
You can also append custom labels. For more information, see Message Attribute Data Types and Validation in the Amazon SQS Developer Guide .
:type MessageDeduplicationId: string
:param MessageDeduplicationId: This parameter applies only to FIFO (first-in-first-out) queues.
The token used for deduplication of sent messages. If a message with a particular MessageDeduplicationId is sent successfully, any messages sent with the same MessageDeduplicationId are accepted successfully but aren't delivered during the 5-minute deduplication interval. For more information, see Exactly-Once Processing in the Amazon SQS Developer Guide .
Every message must have a unique MessageDeduplicationId ,
You may provide a MessageDeduplicationId explicitly.
If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).
If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.
If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.
When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.
If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the | |
!= 0:
raise PulpSolverError("Error in CPXmipopt status="
+ str(status))
else:
status = CPLEX_DLL.lib.CPXlpopt(self.env, self.hprob)
if status != 0:
raise PulpSolverError("Error in CPXlpopt status="
+ str(status))
self.cplexTime += clock()
def actualSolve(self, lp):
"""Solve a well formulated lp problem"""
#TODO alter so that msg parameter is handled correctly
status = ctypes.c_int()
byref = ctypes.byref #shortcut to function
if self.hprob is not None:
CPLEX_DLL.lib.CPXfreeprob(self.env, self.hprob)
self.hprob = CPLEX_DLL.lib.CPXcreateprob(self.env,
byref(status), lp.name)
if status.value != 0:
raise PulpSolverError("Error in CPXcreateprob status="
+ str(status))
(numcols, numrows, numels, rangeCount,
objSense, obj, objconst,
rhs, rangeValues, rowSense, matbeg, matcnt, matind,
matval, lb, ub, initValues, colname,
rowname, xctype, n2v, n2c )= self.getCplexStyleArrays(lp)
status.value = CPLEX_DLL.lib.CPXcopylpwnames (self.env, self.hprob,
numcols, numrows,
objSense, obj, rhs, rowSense, matbeg, matcnt,
matind, matval, lb, ub, None, colname, rowname)
if status.value != 0:
raise PulpSolverError("Error in CPXcopylpwnames status=" +
str(status))
if lp.isMIP() and self.mip:
status.value = CPLEX_DLL.lib.CPXcopyctype(self.env,
self.hprob,
xctype)
if status.value != 0:
raise PulpSolverError("Error in CPXcopyctype status=" +
str(status))
#set the initial solution
self.callSolver(lp.isMIP())
#get the solution information
solutionStatus = self.findSolutionValues(lp, numcols, numrows)
for var in lp._variables:
var.modified = False
return solutionStatus
def actualResolve(self, lp, **kwargs):
"""looks at which variables have been modified and changes them
"""
#TODO: Add changing variables not just adding them
#TODO: look at constraints
modifiedVars = [var for var in lp.variables() if var.modified]
#assumes that all variables flagged as modified
#need to be added to the problem
newVars = modifiedVars
#print newVars
self.v2n.update([(var, i+self.addedVars)
for i,var in enumerate(newVars)])
self.n2v.update([(i+self.addedVars, var)
for i,var in enumerate(newVars)])
self.vname2n.update([(var.name, i+self.addedVars)
for i,var in enumerate(newVars)])
oldVars = self.addedVars
self.addedVars += len(newVars)
(ccnt,nzcnt,obj,cmatbeg,
cmatlen, cmatind,cmatval,
lb,ub, initvals,
colname, coltype) = self.getSparseCols(newVars, lp, oldVars,
defBound = 1e20)
CPXaddcolsStatus = CPLEX_DLL.lib.CPXaddcols(self.env, self.hprob,
ccnt, nzcnt,
obj,cmatbeg,
cmatind,cmatval,
lb,ub,colname)
#add the column types
if lp.isMIP() and self.mip:
indices = (ctypes.c_int * len(newVars))()
for i,var in enumerate(newVars):
indices[i] = oldVars +i
CPXchgctypeStatus = \
CPLEX_DLL.lib.CPXchgctype(self.env, self.hprob, ccnt, indices, coltype)
#solve the problem
self.callSolver(lp.isMIP())
#get the solution information
solutionStatus = self.findSolutionValues(lp, self.addedVars,
self.addedRows)
for var in modifiedVars:
var.modified = False
return solutionStatus
def getSparseCols(self, vars, lp, offset = 0, defBound = 1e20):
"""
outputs the variables in var as a sparse matrix,
suitable for cplex and Coin
Copyright (c) <NAME> 2007
"""
numVars = len(vars)
obj = (ctypes.c_double * numVars)()
cmatbeg = (ctypes.c_int * numVars)()
mycmatind = []
mycmatval = []
rangeCount = 0
#values for variables
colNames = (ctypes.c_char_p * numVars)()
lowerBounds = (ctypes.c_double * numVars)()
upperBounds = (ctypes.c_double * numVars)()
initValues = (ctypes.c_double * numVars)()
i=0
for v in vars:
colNames[i] = str(v.name)
initValues[i] = v.init
if v.lowBound != None:
lowerBounds[i] = v.lowBound
else:
lowerBounds[i] = -defBound
if v.upBound != None:
upperBounds[i] = v.upBound
else:
upperBounds[i] = defBound
i+= 1
#create the new variables
#values for constraints
#return the coefficient matrix as a series of vectors
myobjectCoeffs = {}
numRows = len(lp.constraints)
sparseMatrix = sparse.Matrix(list(range(numRows)), list(range(numVars)))
for var in vars:
for row,coeff in var.expression.items():
if row.name == lp.objective.name:
myobjectCoeffs[var] = coeff
else:
sparseMatrix.add(self.c2n[row.name], self.v2n[var] - offset, coeff)
#objective values
objectCoeffs = (ctypes.c_double * numVars)()
for var in vars:
objectCoeffs[self.v2n[var]-offset] = myobjectCoeffs[var]
(numels, mystartsBase, mylenBase, myindBase,
myelemBase) = sparseMatrix.col_based_arrays()
elemBase = ctypesArrayFill(myelemBase, ctypes.c_double)
indBase = ctypesArrayFill(myindBase, ctypes.c_int)
startsBase = ctypesArrayFill(mystartsBase, ctypes.c_int)
lenBase = ctypesArrayFill(mylenBase, ctypes.c_int)
#MIP Variables
NumVarCharArray = ctypes.c_char * numVars
columnType = NumVarCharArray()
if lp.isMIP():
CplexLpCategories = {constants.LpContinuous: "C",
constants.LpInteger: "I"}
for v in vars:
columnType[self.v2n[v] - offset] = CplexLpCategories[v.cat]
return numVars, numels, objectCoeffs, \
startsBase, lenBase, indBase, \
elemBase, lowerBounds, upperBounds, initValues, colNames, \
columnType
def objSa(self, vars = None):
"""Objective coefficient sensitivity analysis.
Called after a problem has been solved, this function
returns a dict mapping variables to pairs (lo, hi) indicating
that the objective coefficient of the variable can vary
between lo and hi without changing the optimal basis
(if other coefficients remain constant). If an iterable
vars is given, results are returned only for variables in vars.
"""
if vars is None:
v2n = self.v2n
else:
v2n = dict((v, self.v2n[v]) for v in vars)
ifirst = min(v2n.values())
ilast = max(v2n.values())
row_t = ctypes.c_double * (ilast - ifirst + 1)
lo = row_t()
hi = row_t()
status = ctypes.c_int()
status.value = CPLEX_DLL.lib.CPXobjsa(self.env, self.hprob,
ifirst, ilast, lo, hi)
if status.value != 0:
raise PulpSolverError("Error in CPXobjsa, status="
+ str(status))
return dict((v, (lo[i - ifirst], hi[i - ifirst]))
for v, i in v2n.items())
CPLEX = CPLEX_DLL
except (ImportError,OSError):
class CPLEX_DLL(LpSolver):
"""The CPLEX LP/MIP solver PHANTOM Something went wrong!!!!"""
def available(self):
"""True if the solver is available"""
return False
def actualSolve(self, lp):
"""Solve a well formulated lp problem"""
raise PulpSolverError("CPLEX_DLL: Not Available")
CPLEX = CPLEX_CMD
try:
import cplex
except (Exception) as e:
class CPLEX_PY(LpSolver):
"""The CPLEX LP/MIP solver from python PHANTOM Something went wrong!!!!"""
def available(self):
"""True if the solver is available"""
return False
def actualSolve(self, lp):
"""Solve a well formulated lp problem"""
raise PulpSolverError("CPLEX_PY: Not Available: " + str(e))
else:
class CPLEX_PY(LpSolver):
"""
The CPLEX LP/MIP solver (via a Python Binding)
This solver wraps the python api of cplex.
It has been tested against cplex 12.3.
For api functions that have not been wrapped in this solver please use
the base cplex classes
"""
def __init__(self,
mip = True,
msg = True,
timeLimit = None,
epgap = None,
logfilename = None,
mip_start=False):
"""
Initializes the CPLEX_PY solver.
@param mip: if False the solver will solve a MIP as an LP
@param msg: displays information from the solver to stdout
@param epgap: sets the integer bound gap
@param logfilename: sets the filename of the cplex logfile
"""
LpSolver.__init__(self, mip, msg, mip_start=mip_start)
self.timeLimit = timeLimit
self.epgap = epgap
self.logfilename = logfilename
def available(self):
"""True if the solver is available"""
return True
def actualSolve(self, lp, callback = None):
"""
Solve a well formulated lp problem
creates a cplex model, variables and constraints and attaches
them to the lp model which it then solves
"""
self.buildSolverModel(lp)
#set the initial solution
log.debug("Solve the Model using cplex")
self.callSolver(lp)
#get the solution information
solutionStatus = self.findSolutionValues(lp)
for var in lp._variables:
var.modified = False
for constraint in lp.constraints.values():
constraint.modified = False
return solutionStatus
def buildSolverModel(self, lp):
"""
Takes the pulp lp model and translates it into a cplex model
"""
model_variables = lp.variables()
self.n2v = dict((var.name, var) for var in model_variables)
if len(self.n2v) != len(model_variables):
raise PulpSolverError(
'Variables must have unique names for cplex solver')
log.debug("create the cplex model")
self.solverModel = lp.solverModel = cplex.Cplex()
log.debug("set the name of the problem")
if not self.mip:
self.solverModel.set_problem_name(lp.name)
log.debug("set the sense of the problem")
if lp.sense == constants.LpMaximize:
lp.solverModel.objective.set_sense(
lp.solverModel.objective.sense.maximize)
obj = [float(lp.objective.get(var, 0.0)) for var in model_variables]
def cplex_var_lb(var):
if var.lowBound is not None:
return float(var.lowBound)
else:
return -cplex.infinity
lb = [cplex_var_lb(var) for var in model_variables]
def cplex_var_ub(var):
if var.upBound is not None:
return float(var.upBound)
else:
return cplex.infinity
ub = [cplex_var_ub(var) for var in model_variables]
colnames = [var.name for var in model_variables]
def cplex_var_types(var):
if var.cat == constants.LpInteger:
return 'I'
else:
return 'C'
ctype = [cplex_var_types(var) for var in model_variables]
ctype = "".join(ctype)
lp.solverModel.variables.add(obj=obj, lb=lb, ub=ub, types=ctype,
names=colnames)
rows = []
senses = []
rhs = []
rownames = []
for name,constraint in lp.constraints.items():
#build the expression
expr = [(var.name, float(coeff)) for var, coeff in constraint.items()]
if not expr:
#if the constraint is empty
rows.append(([],[]))
else:
rows.append(list(zip(*expr)))
if constraint.sense == constants.LpConstraintLE:
senses.append('L')
elif constraint.sense == constants.LpConstraintGE:
senses.append('G')
elif constraint.sense == constants.LpConstraintEQ:
senses.append('E')
else:
raise PulpSolverError('Detected an invalid constraint type')
rownames.append(name)
rhs.append(float(-constraint.constant))
lp.solverModel.linear_constraints.add(lin_expr=rows, senses=senses,
rhs=rhs, names=rownames)
log.debug("set the type of the problem")
if not self.mip:
self.solverModel.set_problem_type(cplex.Cplex.problem_type.LP)
log.debug("set the logging")
if not self.msg:
self.solverModel.set_error_stream(None)
self.solverModel.set_log_stream(None)
self.solverModel.set_warning_stream(None)
self.solverModel.set_results_stream(None)
if self.logfilename is not None:
self.setlogfile(self.logfilename)
if self.epgap is not None:
self.changeEpgap(self.epgap)
if self.timeLimit is not None:
self.setTimeLimit(self.timeLimit)
if self.mip_start:
# We assume "auto" for the effort_level
effort = self.solverModel.MIP_starts.effort_level.auto
start = [(k, v.value()) for k, v in self.n2v.items() if v.value() is not None]
ind, val = zip(*start)
self.solverModel.MIP_starts.add(cplex.SparsePair(ind=ind, val=val), effort, '1')
def setlogfile(self, filename):
"""
sets the logfile | |
<filename>foo/wx/wx_voucher.py
#!/usr/bin/env python
# _*_ coding: utf-8_*_
#
# Copyright 2016 <EMAIL>
# <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tornado.web
import logging
import uuid
import time
import re
import json as JSON # 启用别名,不会跟方法里的局部变量混淆
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../"))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../dao"))
from tornado.escape import json_encode, json_decode
from tornado.httpclient import HTTPClient
from tornado.httputil import url_concat
from bson import json_util
from comm import *
from dao import budge_num_dao
from dao import category_dao
from dao import activity_dao
from dao import group_qrcode_dao
from dao import cret_template_dao
from dao import bonus_template_dao
from dao import bonus_dao
from dao import apply_dao
from dao import order_dao
from dao import group_qrcode_dao
from dao import vendor_member_dao
from dao import voucher_dao
from dao import insurance_template_dao
from dao import contact_dao
from dao import vendor_hha_dao
from dao import voucher_pay_dao
from dao import vendor_wx_dao
from dao import voucher_order_dao
from dao import trip_router_dao
from dao import triprouter_share_dao
from dao import club_dao
from dao import activity_share_dao
from auth import auth_email
from auth import auth_phone
from wx_wrap import getAccessTokenByClientCredential
from wx_wrap import getJsapiTicket
from wx_wrap import Sign
from wx_wrap import getNonceStr
from wx_wrap import getOrderSign
from wx_wrap import getPaySign
from wx_wrap import getAccessToken
from wx_wrap import getUserInfo
from xml_parser import parseWxOrderReturn, parseWxPayReturn
from global_const import *
# 显示分享的代金券页面 可购买
class WxVoucherShareHandler(tornado.web.RequestHandler):
def get(self, vendor_id, voucher_id):
logging.info("got vendor_id %r in uri", vendor_id)
voucher = voucher_pay_dao.voucher_pay_dao().query_not_safe(voucher_id)
voucher['amount'] = float(voucher['amount']) / 100
voucher['price'] = float(voucher['price']) / 100
vendor_wx = vendor_wx_dao.vendor_wx_dao().query(vendor_id)
wx_app_id = vendor_wx['wx_app_id']
wx_app_secret=vendor_wx['wx_app_secret']
wx_notify_domain = wx_app_info['wx_notify_domain']
logging.info("------------------------------------uri: "+self.request.uri)
_access_token = getAccessTokenByClientCredential(wx_app_id, wx_app_secret)
_jsapi_ticket = getJsapiTicket(_access_token)
_sign = Sign(_jsapi_ticket, wx_notify_domain+self.request.uri).sign()
logging.info("------------------------------------nonceStr: "+_sign['nonceStr'])
logging.info("------------------------------------jsapi_ticket: "+_sign['jsapi_ticket'])
logging.info("------------------------------------timestamp: "+str(_sign['timestamp']))
logging.info("------------------------------------url: "+_sign['url'])
logging.info("------------------------------------signature: "+_sign['signature'])
_account_id = self.get_secure_cookie("account_id")
self.render('wx/voucher-pay-info.html',
vendor_id=vendor_id,
voucher=voucher,
wx_app_id=wx_app_id,
wx_notify_domain=wx_notify_domain,
sign=_sign, account_id=_account_id,
vendor_wx=vendor_wx)
# 微信支付结果通用通知
# 该链接是通过【统一下单API】中提交的参数notify_url设置,如果链接无法访问,商户将无法接收到微信通知。
# 通知url必须为直接可访问的url,不能携带参数。示例:notify_url:“https://pay.weixin.qq.com/wxpay/pay.action”
class WxVoucherOrderNotifyHandler(tornado.web.RequestHandler):
def post(self):
# 返回参数
#<xml>
# <appid><![CDATA[wxaa328c83d3132bfb]]></appid>\n
# <attach><![CDATA[Aplan]]></attach>\n
# <bank_type><![CDATA[CFT]]></bank_type>\n
# <cash_fee><![CDATA[1]]></cash_fee>\n
# <fee_type><![CDATA[CNY]]></fee_type>\n
# <is_subscribe><![CDATA[Y]]></is_subscribe>\n
# <mch_id><![CDATA[1340430801]]></mch_id>\n
# <nonce_str><![CDATA[jOhHjqDfx9VQGmU]]></nonce_str>\n
# <openid><![CDATA[oy0Kxt7zNpZFEldQmHwFF-RSLNV0]]></openid>\n
# <out_trade_no><![CDATA[e358738e30fe11e69a7e00163e007b3e]]></out_trade_no>\n
# <result_code><![CDATA[SUCCESS]]></result_code>\n
# <return_code><![CDATA[SUCCESS]]></return_code>\n
# <sign><![CDATA[6291D73149D05F09D18C432E986C4DEB]]></sign>\n
# <time_end><![CDATA[20160613083651]]></time_end>\n
# <total_fee>1</total_fee>\n
# <trade_type><![CDATA[JSAPI]]></trade_type>\n
# <transaction_id><![CDATA[4007652001201606137183943151]]></transaction_id>\n
#</xml>
_xml = self.request.body
logging.info("got return_body %r", _xml)
_pay_return = parseWxPayReturn(_xml)
logging.info("got result_code %r", _pay_return['result_code'])
logging.info("got total_fee %r", _pay_return['total_fee'])
logging.info("got time_end %r", _pay_return['time_end'])
logging.info("got transaction_id %r", _pay_return['transaction_id'])
logging.info("got out_trade_no %r", _pay_return['out_trade_no'])
_order_id = _pay_return['out_trade_no']
_result_code = _pay_return['result_code']
if _result_code == 'SUCCESS' :
# 查询过去是否填报,有则跳过此步骤。主要是防止用户操作回退键,重新回到此页面
_old_order = voucher_order_dao.voucher_order_dao().query(_order_id)
if _old_order['status'] > 30:
return
else:
_timestamp = int(time.time())
json = {'_id':_order_id,
'last_update_time': _timestamp, "status": ORDER_STATUS_WECHAT_PAY_SUCCESS,
'transaction_id':_pay_return['transaction_id'], 'payed_total_fee':_pay_return['total_fee']}
voucher_order_dao.voucher_order_dao().update(json)
else:
_timestamp = (int)(time.time())
json = {'_id':_order_id,
'last_update_time': _timestamp, "status": ORDER_STATUS_WECHAT_PAY_FAILED}
voucher_order_dao.voucher_order_dao().update(json)
# 点击购买优惠券 先检查用户 再创建订单 然后返回确认再微信支付 最后提示成功
class WxVoucherBuyStep0Handler(tornado.web.RequestHandler):
def get(self, vendor_id, voucher_id):
wx_app_info = vendor_wx_dao.vendor_wx_dao().query(vendor_id)
wx_app_id = wx_app_info['wx_app_id']
wx_notify_domain = wx_app_info['wx_notify_domain']
logging.info("got wx_app_id %r in uri", wx_app_id)
redirect_url = "https://open.weixin.qq.com/connect/oauth2/authorize?appid=" + wx_app_id + "&redirect_uri=" + wx_notify_domain + "/bf/wx/vendors/" + vendor_id + "/vouchers/"+voucher_id+"/buy/step1&response_type=code&scope=snsapi_userinfo&state=1#wechat_redirect"
# FIXME 这里应改为从缓存取自己的access_token然后查myinfo是否存在wx_openid
# 存在就直接用,不存在再走微信授权并更新用户信息 /api/myinfo-as-wx-user
access_token=self.get_secure_cookie("access_token")
logging.info("access_token %r======", access_token)
if access_token:
try:
url = API_DOMAIN + "/api/myinfo-as-wx-user"
http_client = HTTPClient()
headers = {"Authorization":"Bearer "+access_token}
response = http_client.fetch(url, method="GET", headers=headers)
logging.info("got response.body %r", response.body)
data = json_decode(response.body)
user = data['rs']
account_id=user['_id']
avatar=user['avatar']
nickname=user['nickname']
timestamp = time.time()
vendor_member = vendor_member_dao.vendor_member_dao().query_not_safe(vendor_id, account_id)
if not vendor_member:
memeber_id = str(uuid.uuid1()).replace('-', '')
_json = {'_id':memeber_id, 'vendor_id':vendor_id,
'account_id':account_id, 'account_nickname':nickname, 'account_avatar':avatar,
'comment':'...',
'bonus':0, 'history_bonus':0, 'vouchers':0, 'crets':0,
'rank':0, 'tour_leader':False,
'distance':0,
'create_time':timestamp, 'last_update_time':timestamp}
vendor_member_dao.vendor_member_dao().create(_json)
logging.info("create vendor member %r", account_id)
else:
_json = {'vendor_id':vendor_id,
'account_id':account_id, 'account_nickname':nickname, 'account_avatar':avatar,
'last_update_time':timestamp}
vendor_member_dao.vendor_member_dao().update(_json)
_voucher = voucher_pay_dao.voucher_pay_dao().query_not_safe(voucher_id);
_voucher['amount'] = float(_voucher['amount']) / 100
_voucher['price'] = float(_voucher['price']) / 100
vendor_member = vendor_member_dao.vendor_member_dao().query_not_safe(vendor_id, account_id)
if(vendor_member):
try:
vendor_member['account_nickname']
except:
vendor_member['account_nickname'] = ''
try:
vendor_member['account_avatar']
except:
vendor_member['account_avatar'] = ''
_avatar = vendor_member['account_avatar']
_nickname = vendor_member['account_nickname']
self.render('wx/voucher-order-confirm.html',
vendor_id=vendor_id,
voucher=_voucher)
except:
self.redirect(redirect_url)
else:
self.redirect(redirect_url)
class WxVoucherBuyStep1Handler(tornado.web.RequestHandler):
def get(self, vendor_id, voucher_id):
logging.info("got vendor_id %r in uri", vendor_id)
logging.info("got voucher_id %r in uri", voucher_id)
user_agent = self.request.headers["User-Agent"]
lang = self.request.headers["Accept-Language"]
wx_code = self.get_argument("code", "")
logging.info("got wx_code=[%r] from argument", wx_code)
wx_app_info = vendor_wx_dao.vendor_wx_dao().query(vendor_id)
wx_app_id = wx_app_info['wx_app_id']
logging.info("got wx_app_id %r in uri", wx_app_id)
wx_app_secret = wx_app_info['wx_app_secret']
wx_notify_domain = wx_app_info['wx_notify_domain']
if not wx_code:
redirect_url = "https://open.weixin.qq.com/connect/oauth2/authorize?appid=" + wx_app_id + "&redirect_uri=" + wx_notify_domain + "/bf/wx/vendors/" + vendor_id + "/vouchers/"+voucher_id+"/buy/step1&response_type=code&scope=snsapi_userinfo&state=1#wechat_redirect"
self.redirect(redirect_url)
return
accessToken = getAccessToken(wx_app_id, wx_app_secret, wx_code);
access_token = accessToken["access_token"];
logging.info("got access_token %r", access_token)
wx_openid = accessToken["openid"];
logging.info("got wx_openid %r", wx_openid)
wx_userInfo = getUserInfo(access_token, wx_openid)
nickname = wx_userInfo["nickname"]
#nickname = unicode(nickname).encode('utf-8')
logging.info("got nickname=[%r]", nickname)
avatar = wx_userInfo['headimgurl']
logging.info("got avatar=[%r]", avatar)
# 表情符号乱码,无法存入数据库,所以过滤掉
try:
# UCS-4
Emoji = re.compile(u'[\U00010000-\U0010ffff]')
nickname = Emoji.sub(u'\u25FD', nickname)
# UCS-2
Emoji = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
nickname = Emoji.sub(u'\u25FD', nickname)
logging.info("got nickname=[%r]", nickname)
except re.error:
logging.error("got nickname=[%r]", nickname)
nickname = "anonymous"
url = API_DOMAIN + "/api/auth/wx/register"
http_client = HTTPClient()
random = str(uuid.uuid1()).replace('-', '')
headers = {"Authorization":"Bearer "+random}
_json = json_encode({'wx_openid':wx_openid,'nickname':nickname,'avatar':avatar})
response = http_client.fetch(url, method="POST", headers=headers, body=_json)
logging.info("got response.body %r", response.body)
data = json_decode(response.body)
session_ticket = data['rs']
account_id = session_ticket['account_id']
self.set_secure_cookie("access_token", session_ticket['access_token'])
self.set_secure_cookie("expires_at", str(session_ticket['expires_at']))
self.set_secure_cookie("account_id",account_id)
# self.set_secure_cookie("wx_openid",wx_openid)
timestamp = time.time()
vendor_member = vendor_member_dao.vendor_member_dao().query_not_safe(vendor_id, account_id)
if not vendor_member:
memeber_id = str(uuid.uuid1()).replace('-', '')
_json = {'_id':memeber_id, 'vendor_id':vendor_id,
'account_id':account_id, 'account_nickname':nickname, 'account_avatar':avatar,
'comment':'...',
'bonus':0, 'history_bonus':0, 'vouchers':0, 'crets':0,
'rank':0, 'tour_leader':False,
'distance':0,
'create_time':timestamp, 'last_update_time':timestamp}
vendor_member_dao.vendor_member_dao().create(_json)
logging.info("create vendor member %r", account_id)
else:
_json = {'vendor_id':vendor_id,
'account_id':account_id, 'account_nickname':nickname, 'account_avatar':avatar,
'last_update_time':timestamp}
vendor_member_dao.vendor_member_dao().update(_json)
_voucher = voucher_pay_dao.voucher_pay_dao().query_not_safe(voucher_id);
_voucher['amount'] = float(_voucher['amount']) / 100
_voucher['price'] = float(_voucher['price']) / 100
vendor_member = vendor_member_dao.vendor_member_dao().query_not_safe(vendor_id, account_id)
if(vendor_member):
try:
vendor_member['account_nickname']
except:
vendor_member['account_nickname'] = ''
try:
vendor_member['account_avatar']
except:
vendor_member['account_avatar'] = ''
_avatar = vendor_member['account_avatar']
_nickname = vendor_member['account_nickname']
self.render('wx/voucher-order-confirm.html',
vendor_id=vendor_id,
voucher=_voucher)
class WxVoucherBuyStep2Handler(BaseHandler):
def post(self):
vendor_id = self.get_argument("vendor_id", "")
logging.info("got vendor_id %r", vendor_id)
voucher_id = self.get_argument("voucher_id", "")
account_id = self.get_secure_cookie("account_id")
_timestamp = time.time()
# 一分钟内不能创建第二个订单,
# 防止用户点击回退按钮,产生第二个订单
_old_orders = voucher_order_dao.voucher_order_dao().query_by_account(voucher_id, account_id)
# if len(_old_orders) > 0:
# for _old_order in _old_orders:
# if (_timestamp - _old_order['create_time']) < 60:
# return
# # 订单申报数目
# _applicant_num = self.get_argument("applicant_num", 1)
# 转换成元
_voucher = voucher_pay_dao.voucher_pay_dao().query_not_safe(voucher_id);
_amount = _voucher['amount']
_price = _voucher['price']
_voucher_id = _voucher['_id']
_create_time = _voucher['create_time']
_expired_time = _voucher['expired_time']
_qrcode_url = _voucher['qrcode_url']
_customer = vendor_member_dao.vendor_member_dao().query_not_safe(vendor_id,account_id);
try:
_customer['account_nickname']
except:
_customer['account_nickname'] = ''
try:
_customer['account_avatar']
except:
_customer['account_avatar'] = ''
_nickname = _customer['account_nickname']
_avatar = _customer['account_avatar']
# 创建一个代金券订单
_status = ORDER_STATUS_BF_INIT
if _price == 0:
_status = ORDER_STATUS_WECHAT_PAY_SUCCESS
_order_id = str(uuid.uuid1()).replace('-', '')
_timestamp = time.time()
# 创建订单索引
order_index = {
"_id": _order_id,
"order_tyoe": "buy_voucher",
"club_id": vendor_id,
"item_type": "voucher",
"item_id": _voucher_id,
"item_name": _title,
"distributor_type": "club",
"distributor_id": guest_club_id,
"create_time": _timestamp,
"pay_type": "wxpay",
"pay_status": _status,
"total_amount": _amount, #已经转换为分,注意转为数值
}
self.create_order(order_index)
_order = {"_id":_order_id, "vendor_id":vendor_id,
"account_id":account_id, "account_avatar":_avatar, "account_nickname":_nickname,
"voucher_id":_voucher_id, "voucher_price":_price, "voucher_amount":_amount,
"pay_type":"wxpay","applicant_num":1,
"create_time":_timestamp, "last_update_time":_timestamp,
'status':_status, 'review':False} # status=99, 微信返回的支付状态
voucher_order_dao.voucher_order_dao().create(_order);
num = voucher_order_dao.voucher_order_dao().count_not_review_by_vendor(vendor_id)
budge_num_dao.budge_num_dao().update({"_id":vendor_id, "voucher_order":num})
#创建微信订单
_total_amount = int(_voucher['price'])
_timestamp = (int)(time.time())
if _total_amount != 0:
# wechat 统一下单
# _openid = self.get_secure_cookie("wx_openid")
# logging.info("got _openid %r", _openid)
# 从comm中统一取
myinfo = self.get_myinfo_login()
_openid = myinfo['login']
_store_id = 'Aplan'
logging.info("got _store_id %r", _store_id)
_product_description = "voucher"
logging.info("got _product_description %r", _product_description)
wx_app_info = vendor_wx_dao.vendor_wx_dao().query(vendor_id)
wx_app_id = wx_app_info['wx_app_id']
logging.info("got wx_app_id %r in uri", wx_app_id)
wx_mch_key = wx_app_info['wx_mch_key']
wx_mch_id = wx_app_info['wx_mch_id']
wx_notify_domain = wx_app_info['wx_notify_domain']
key = wx_mch_key
nonceA = getNonceStr();
logging.info("got nonceA %r", nonceA)
#_ip = self.request.remote_ip
_remote_ip = self.request.headers['X-Real-Ip']
logging.info("got _remote_ip %r", _remote_ip)
total_fee = str(_total_amount)
logging.info("got total_fee %r", total_fee)
notify_url = wx_notify_domain + '/bf/wx/voucher-orders/notify'
logging.info("got notify_url %r", notify_url)
signA = getOrderSign(_remote_ip, notify_url, wx_app_id, wx_mch_id, nonceA, _openid, key, _store_id, _order_id, _product_description, total_fee)
logging.info("got signA %r", signA)
_xml = '<xml>' \
+ '<appid>' + wx_app_id + '</appid>' \
+ '<attach>' + _store_id + '</attach>' \
+ '<body>' + _product_description + '</body>' \
+ '<mch_id>' + wx_mch_id + '</mch_id>' \
+ '<nonce_str>' + nonceA + '</nonce_str>' \
+ '<notify_url>' + notify_url + '</notify_url>' \
+ '<openid>' + _openid + '</openid>' \
+ '<out_trade_no>' + _order_id + '</out_trade_no>' \
+ '<spbill_create_ip>' + _remote_ip + '</spbill_create_ip>' \
+ '<total_fee>' + total_fee + '</total_fee>' \
+ '<trade_type>JSAPI</trade_type>' \
+ '<sign>' + signA + '</sign>' \
+ '</xml>'
logging.info("got xml-------- %r", _xml)
url = "https://api.mch.weixin.qq.com/pay/unifiedorder"
http_client = HTTPClient()
response = http_client.fetch(url, method="POST", body=_xml)
logging.info("got response %r", response.body)
_order_return = parseWxOrderReturn(response.body)
logging.info("got _timestamp %r", str(_timestamp))
try:
prepayId = _order_return['prepay_id']
except:
_order_return['prepay_id'] = ''
prepayId = ''
logging.info("got prepayId %r", prepayId)
try:
nonceB = _order_return['nonce_str']
| |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SQuAD data parsing module for tf.learn model.
This module loads TFRecord and hyperparameters from a specified directory
(files dumped by `squad_prepro.py`) and provides tensors for data feeding.
This module also provides data-specific functions for evaluation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
import json
import os
import re
import string
import numpy as np
import tensorflow as tf
import squad_prepro
def get_input_fn(root_data_dir,
glove_dir,
data_type,
batch_size,
glove_size,
shuffle_files=True,
shuffle_examples=False,
queue_capacity=5000,
min_after_dequeue=1000,
num_epochs=None,
oom_test=False):
"""Get input function for the given data type from the given data directory.
Args:
root_data_dir: The directory to load data from. Corresponds to `to_dir`
of `squad_prepro_main.py` file.
glove_dir: path to the directory that contains GloVe files.
data_type: `str` object, either `train` or `dev`.
batch_size: Batch size of the inputs.
glove_size: size of GloVe vector to load.
shuffle_files: If `True`, shuffle the queue for the input files.
shuffle_examples: If `True`, shuffle the queue for the examples.
queue_capacity: `int`, maximum number of examples in input queue.
min_after_dequeue: `int`, for`RandomShuffleQueue`, minimum number of
examples before dequeueing to ensure randomness.
num_epochs: Number of epochs on the data. `None` means infinite.
This queue comes after the file queue.
oom_test: Stress test to see if the current dataset and model causes
out-of-memory error on GPU.
Returns:
Function definition `input_fn` compatible with `Experiment` object.
"""
filenames = tf.gfile.Glob(
os.path.join(root_data_dir, data_type, 'data', 'squad_data_*'))
tf.logging.info('reading examples from following files:')
for filename in filenames:
tf.logging.info(filename)
sequence_feature = tf.FixedLenSequenceFeature(
[], tf.int64, allow_missing=True)
str_sequence_feature = tf.FixedLenSequenceFeature(
[], tf.string, allow_missing=True)
int_feature = tf.FixedLenFeature([], tf.int64)
str_feature = tf.FixedLenFeature([], tf.string)
# Let N = batch_size, JX = max num context words, JQ = max num ques words,
# C = num chars per word (fixed, default = 16)
features = {
'indexed_context_words': sequence_feature, # Shape = [JX]
'glove_indexed_context_words': sequence_feature,
'indexed_context_chars': sequence_feature, # Shape = [JX * C]
'indexed_question_words': sequence_feature, # Shape = [JQ]
'glove_indexed_question_words': sequence_feature,
'indexed_question_chars': sequence_feature, # Shape = [JQ * C]
'word_answer_starts': sequence_feature, # Answer start index.
'word_answer_ends': sequence_feature, # Answer end index.
'context_num_words':
int_feature, # Number of context words in each example. [A]
'question_num_words':
int_feature, # Number of question words in each example. [A]
'answers': str_sequence_feature, # List of answers in each example. [A]
'context_words': str_sequence_feature, # [JX]
'question_words': str_sequence_feature, # [JQ]
'context': str_feature,
'id': str_feature,
'num_answers': int_feature,
'question': str_feature,
}
exp_metadata_path = os.path.join(root_data_dir, 'metadata.json')
with tf.gfile.GFile(exp_metadata_path, 'r') as fp:
exp_metadata = json.load(fp)
metadata_path = os.path.join(root_data_dir, data_type, 'metadata.json')
with tf.gfile.GFile(metadata_path, 'r') as fp:
metadata = json.load(fp)
emb_mat = squad_prepro.get_idx2vec_mat(glove_dir, glove_size,
metadata['glove_word2idx'])
def _input_fn():
"""Input function compatible with `Experiment` object.
Returns:
A tuple of feature tensors and target tensors.
"""
# TODO(seominjoon): There is bottleneck in data feeding, slow for N >= 128.
filename_queue = tf.train.string_input_producer(
filenames, shuffle=shuffle_files, num_epochs=num_epochs)
reader = tf.TFRecordReader()
_, se = reader.read(filename_queue)
# TODO(seominjoon): Consider moving data filtering to here.
features_op = tf.parse_single_example(se, features=features)
names = list(features_op.keys())
dtypes = [features_op[name].dtype for name in names]
shapes = [features_op[name].shape for name in names]
if shuffle_examples:
# Data shuffling.
rq = tf.RandomShuffleQueue(
queue_capacity, min_after_dequeue, dtypes, names=names)
else:
rq = tf.FIFOQueue(queue_capacity, dtypes, names=names)
enqueue_op = rq.enqueue(features_op)
dequeue_op = rq.dequeue()
dequeue_op = [dequeue_op[name] for name in names]
qr = tf.train.QueueRunner(rq, [enqueue_op])
tf.train.add_queue_runner(qr)
batch = tf.train.batch(
dequeue_op,
batch_size,
capacity=queue_capacity,
dynamic_pad=True,
shapes=shapes,
allow_smaller_final_batch=True,
num_threads=5)
batch = {name: each for name, each in zip(names, batch)}
target_keys = [
'word_answer_starts', 'word_answer_ends', 'answers', 'num_answers'
]
# TODO(seominjoon) For cheating-safe, comment out #.
features_batch = {
key: val
for key, val in batch.items() # if key not in target_keys
}
# `metadata['emb_mat`]` contains GloVe embedding, and `xv` in
# `features_batch` index into the vectors.
features_batch['emb_mat'] = tf.constant(emb_mat)
targets_batch = {key: batch[key] for key in target_keys}
# Postprocessing for character data.
# Due to the limitation of the python wrapper for prototxt,
# the characters (by index) need to be flattened when saving on prototxt.
# The following 'unflattens' the character tensor.
actual_batch_size = tf.shape(batch['indexed_context_chars'])[0]
features_batch['indexed_context_chars'] = tf.reshape(
features_batch['indexed_context_chars'],
[actual_batch_size, -1, metadata['num_chars_per_word']])
features_batch['indexed_question_chars'] = tf.reshape(
features_batch['indexed_question_chars'],
[actual_batch_size, -1, metadata['num_chars_per_word']])
# Make sure answer start and end positions are less than sequence lengths.
# TODO(seominjoon) This will need to move to a separate test.
with tf.control_dependencies([
tf.assert_less(
tf.reduce_max(targets_batch['word_answer_starts'], 1),
features_batch['context_num_words'])
]):
targets_batch['word_answer_starts'] = tf.identity(
targets_batch['word_answer_starts'])
with tf.control_dependencies([
tf.assert_less(
tf.reduce_max(targets_batch['word_answer_ends'], 1),
features_batch['context_num_words'])
]):
targets_batch['word_answer_ends'] = tf.identity(
targets_batch['word_answer_ends'])
# Stress test to ensure no OOM for GPU occurs.
if oom_test:
features_batch['indexed_context_words'] = tf.constant(
np.ones(
[batch_size, exp_metadata['max_context_size']], dtype='int64'))
features_batch['glove_indexed_context_words'] = tf.constant(
np.ones(
[batch_size, exp_metadata['max_context_size']], dtype='int64'))
features_batch['indexed_context_chars'] = tf.constant(
np.ones(
[
batch_size, exp_metadata['max_context_size'], exp_metadata[
'num_chars_per_word']
],
dtype='int64'))
features_batch['indexed_question_words'] = tf.constant(
np.ones([batch_size, exp_metadata['max_ques_size']], dtype='int64'))
features_batch['glove_indexed_question_words'] = tf.constant(
np.ones([batch_size, exp_metadata['max_ques_size']], dtype='int64'))
features_batch['indexed_question_chars'] = tf.constant(
np.ones(
[
batch_size, exp_metadata['max_ques_size'], exp_metadata[
'num_chars_per_word']
],
dtype='int64'))
features_batch['question_num_words'] = tf.constant(
np.ones([batch_size], dtype='int64') * exp_metadata['max_ques_size'])
features_batch['context_num_words'] = tf.constant(
np.ones([batch_size], dtype='int64') *
exp_metadata['max_context_size'])
return features_batch, targets_batch
return _input_fn
def get_params(root_data_dir):
"""Load data-specific parameters from `root_data_dir`.
Args:
root_data_dir: The data directory to load parameter files from.
This is equivalent to the `output_dir` of `data/squad_prepro.py`.
Returns:
A dict of hyperparameters.
"""
indexer_path = os.path.join(root_data_dir, 'indexer.json')
with tf.gfile.GFile(indexer_path, 'r') as fp:
indexer = json.load(fp)
return {
'vocab_size': len(indexer['word2idx']),
'char_vocab_size': len(indexer['char2idx']),
}
def get_eval_metric_ops(targets, predictions):
"""Get a dictionary of eval metrics for `Experiment` object.
Args:
targets: `targets` that go into `model_fn` of `Experiment`.
predictions: Dictionary of predictions, output of `get_preds`.
Returns:
A dictionary of eval metrics.
"""
# TODO(seominjoon): yp should also consider no answer case.
yp1 = tf.expand_dims(predictions['yp1'], -1)
yp2 = tf.expand_dims(predictions['yp2'], -1)
answer_mask = tf.sequence_mask(targets['num_answers'])
start_correct = tf.reduce_any(
tf.equal(targets['word_answer_starts'], yp1) & answer_mask, 1)
end_correct = tf.reduce_any(
tf.equal(targets['word_answer_ends'], yp2) & answer_mask, 1)
correct = start_correct & end_correct
em = tf.py_func(
_enum_fn(_exact_match_score, dtype='float32'), [
predictions['a'], targets['answers'], predictions['has_answer'],
answer_mask
], 'float32')
f1 = tf.py_func(
_enum_fn(_f1_score, dtype='float32'), [
predictions['a'], targets['answers'], predictions['has_answer'],
answer_mask
], 'float32')
eval_metric_ops = {
'acc1': tf.metrics.mean(tf.cast(start_correct, 'float')),
'acc2': tf.metrics.mean(tf.cast(end_correct, 'float')),
'acc': tf.metrics.mean(tf.cast(correct, 'float')),
'em': tf.metrics.mean(em),
'f1': tf.metrics.mean(f1),
}
return eval_metric_ops
def get_answer_op(context, context_words, answer_start, answer_end):
return tf.py_func(
_enum_fn(_get_answer), [context, context_words, answer_start, answer_end],
'string')
def _get_answer(context, context_words, answer_start, answer_end):
"""Get answer given context, context_words, and span.
Args:
context: A list of bytes, to be decoded with utf-8.
context_words: A list of a list of bytes, to be decoded with utf-8.
answer_start: An int for answer start.
answer_end: An int for answer end.
Returns:
A list of bytes, encoded with utf-8, for the answer.
"""
context = context.decode('utf-8')
context_words = [word.decode('utf-8') for word in context_words]
pos = 0
answer_start_char = None
answer_end_char = None
for i, word in enumerate(context_words):
pos = context.index(word, pos)
if answer_start == i:
answer_start_char = pos
pos += len(word)
if answer_end == i:
answer_end_char = pos
break
assert answer_start_char is not None, (
'`answer_start` is not found in context. '
'context=`%s`, context_words=`%r`, '
'answer_start=%d, answer_end=%d') % (context, context_words, answer_start,
answer_end)
assert answer_end_char is not None, (
'`answer_end` is not found in context. '
'context=`%s`, context_words=`%r`, '
'answer_start=%d, answer_end=%d') % (context, context_words, answer_start,
answer_end)
answer = context[answer_start_char:answer_end_char].encode('utf-8')
return answer
def _f1_score(prediction, ground_truths, has_answer, answer_mask):
prediction = prediction.decode('utf-8')
ground_truths = [
ground_truth.decode('utf-8') for ground_truth in ground_truths
]
if not has_answer:
return float(ground_truths[0] == squad_prepro.NO_ANSWER)
elif ground_truths[0] == squad_prepro.NO_ANSWER:
return 0.0
else:
scores = np.array([
_f1_score_(prediction, ground_truth) for ground_truth in ground_truths
])
return max(scores * answer_mask.astype(float))
def _exact_match_score(prediction, ground_truths, has_answer, answer_mask):
prediction = prediction.decode('utf-8')
ground_truths = [
ground_truth.decode('utf-8') for ground_truth in ground_truths
]
if not has_answer:
return float(ground_truths[0] == squad_prepro.NO_ANSWER)
elif ground_truths[0] == squad_prepro.NO_ANSWER:
return 0.0
else:
scores = np.array([
float(_exact_match_score_(prediction, ground_truth))
for ground_truth in ground_truths
])
return max(scores * answer_mask.astype(float))
def _enum_fn(fn, dtype='object'):
def new_fn(*args):
return np.array([fn(*each_args) for each_args in zip(*args)], dtype=dtype)
return new_fn
# Functions below are |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.