text stringlengths 38 1.54M |
|---|
import pandas as pd
import numpy as np
school_id_list = [{'name': 'John', 'job': "teacher", 'age': 40},
{'name': 'Nate', 'job': "teacher", 'age': 35},
{'name': 'Yuna', 'job': "teacher", 'age': 37},
{'name': 'Abraham', 'job': "student", 'age': 10},
{'name': 'Brian', 'job': "student", 'age': 12},
{'name': 'Janny', 'job': "student", 'age': 11},
{'name': 'Nate', 'job': "teacher", 'age': None},
{'name': 'John', 'job': "student", 'age': None}
]
df = pd.DataFrame(school_id_list, columns = ['name', 'job', 'age'])
df['age'].fillna(df.groupby('job')['age'].transform('median'), inplace=True)
# print(df)
date_list = [{'yyyy-mm-dd': '2000-06-27'},
{'yyyy-mm-dd': '2002-09-24'},
{'yyyy-mm-dd': '2005-12-20'}]
df2 = pd.DataFrame(date_list, columns = ['yyyy-mm-dd'])
def extract_year(column):
return column.split("-")[0]
def get_age(year, current_year):
return current_year - int(year)
def get_introduce(age, prefix, suffix):
return prefix + str(age) + suffix
df2['year'] = df2['yyyy-mm-dd'].apply(extract_year)
df2['age'] = df2['year'].apply(get_age, current_year=2018)
def get_inroduce2(row):
return "I was born in "+str(row.year)+" my age is " + str(row.age)
df2['introduce2'] = df2.apply(get_inroduce2, axis=1) # df2.introduce는 df2['introduce']와 동일하다.
df2['introduce'] = df2['age'].apply(get_introduce, prefix="I am ", suffix=" years old")
# print(df2)
date_list2 = [{'date': '2000-06-27'},
{'date': '2002-09-24'},
{'date': '2005-12-20'}]
df3 = pd.DataFrame(date_list2, columns=['date'])
df3['year'] = df3['date'].apply(extract_year)
# print(df3)
job_list = [{'age': 20, 'job': 'student'},
{'age': 30, 'job': 'developer'},
{'age': 30, 'job': 'teacher'}]
df4 = pd.DataFrame(job_list)
df4.job = df4.job.map({"student":1, "developer":2, "teacher":3})
# print(df4)
x_y = [{'x': 5.5, 'y': -5.6, 'z':-1.1},
{'x': -5.2, 'y': 5.5, 'z':-2.2},
{'x': -1.6, 'y': -4.5, 'z':-3.3}]
df5 = pd.DataFrame(x_y)
df5 = df5.applymap(np.around) #모든 컬럼에 적용하고 싶은 때는 applymap, 한 컬럼에 적용하고 싶을 때는 apply
print(df5) |
student={'name':'john','age':25,'courses':['math','CompSci']}
print(student)
# use value by key
print(student['name'])
print(student['courses'][1])
# if any key does not exist than it gives error so use the get way
print(student.get('courses','not found'))
# add new key value in dict
student['mobile_mumber'] = '555-555-5555'
print(student.get('mobile_mumber','not found'))
# update new key value in dict
student['name'] = 'jane'
print(student.get('name','not found'))
# update multipale values
student.update({'name':'june','age':21,'address' : '45 jems street'})
print (student)
# delete from dictionaries
del student['address']
print(student)
# pop values with return
pop_values = student.pop('age')
print(student)
# find lenth
print(len(student))
# get list of all keys
print(student.keys())
# get list of all values
print(student.values())
# get list of all keys and values as a per
print(student.items ())
# acess all keys and values
for key,value in student.items():
print (key,value) |
import sys
import re
from node import *
value = open(sys.argv[1]) #opening a file
disp=[]
for wall in value:
disp.append(re.split(",",re.sub(r"\n", "",wall )))
arr=[]
arg=disp[0]
loc = ""
for row in disp:
loc = ""
if row[-1] is'1':
iterator = 0
loc += '{'
for nod in arg[:-1]:
if row[iterator]is '1':
loc +=nod
else:
loc +="-" + nod
if iterator + 1 < len(arg[: -1]):
loc += "."
iterator += 1
loc += '}'
arr.append(loc)
currentroot = Node(arr[0])
root = currentroot
for lis in arr[1:]:
currentroot.add_child(Node(lis))
currentroot = currentroot.child
print(root.getstring())
|
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to handle Hive related utilities like creating connection to Hive
database, executing a query, check whether the provided database & table
exists etc."""
import json
import logging
import os
import time
from dateutil.parser import parse
from uuid import uuid4
from pyhive import exc, hive
from thrift.transport import TTransport
from hive_to_bigquery import custom_exceptions
from hive_to_bigquery.utilities import calculate_time
from hive_to_bigquery.database_component import DatabaseComponent
logger = logging.getLogger('Hive2BigQuery')
class HiveComponent(DatabaseComponent):
"""Hive component to handle functions related to it.
Has utilities which do Hive operations using the Hive connection, such as
creating staging table, loading data into staging table, listing
underlying files, getting information on data to migrate, migrating data
to BigQuery, and checking for new data in the source Hive table etc.
Attributes:
host (str): Hive server host name.
port (int): Port to be used.
user (str): Hive user name.
connection (pyhive.hive.Connection): Hive connection object.
"""
def __init__(self, **kwargs):
logger.debug("Initializing Hive Component")
super(HiveComponent, self).__init__(**kwargs)
def get_connection(self):
"""Gets connection to the Hive server.
Returns:
pyhive.hive.Connection: Hive Connection object.
"""
logger.debug("Getting Hive Connection")
try:
connection = hive.connect(host=self.host,
port=self.port,
username=self.user)
return connection
except TTransport.TTransportException as error:
logger.error("Failed to establish Hive connection")
raise custom_exceptions.ConnectionError from error
def get_cursor(self):
"""Gets the Hive cursor.
Returns:
pyhive.hive.Cursor: pyhive cursor object.
"""
logger.debug("Getting Hive cursor")
cursor = self.connection.cursor()
return cursor
def execute_query(self, query_cmds):
"""Executes Hive query and returns the results.
Args:
query_cmds (Union[List,str]): To be executed query/queries.
Returns:
List: Results of the query.
"""
cursor = self.get_cursor()
try:
if isinstance(query_cmds, list):
for query in query_cmds:
cursor.execute(query)
else:
cursor.execute(query_cmds)
try:
results = cursor.fetchall()
except exc.ProgrammingError:
results = []
finally:
return results
except exc.OperationalError as error:
logger.error("Hive Query {} execution failed".format(
str(query_cmds)))
raise custom_exceptions.HiveExecutionError from error
def check_database_exists(self, database_name):
"""Checks whether the Hive database exists.
Args:
database_name (str): Hive database name.
Returns:
boolean : True, if database exists else False.
"""
results = self.execute_query("SHOW DATABASES")
for name in results:
if database_name in name:
return True
return False
def check_table_exists(self, database_name, table_name):
"""Checks whether the Hive table exists.
Args:
database_name (str): Hive database name.
table_name (str): Hive table name.
Returns:
boolean : True, if table exists else False.
"""
results = self.execute_query(
"SHOW TABLES FROM {}".format(database_name))
for name in results:
if table_name in name:
return True
return False
def get_table_location(self, database_name, table_name):
"""Returns the Hive table location.
Args:
database_name (str): Hive database name.
table_name (str): Hive table name.
Returns:
str: Location of the Hive table.
"""
queries = [
"set hive.ddl.output.format=json",
"desc extended {0}.{1}".format(database_name, table_name)
]
results = self.execute_query(queries)
location = json.loads(results[0][0])['tableInfo']['sd']['location']
return location
@staticmethod
def list_hdfs_files(location):
"""Lists the underlying HDFS files with non-zero size.
Args:
location (str): Hive table location.
Returns:
List: List of the underlying data files.
"""
file_name = "hdfs_files_{}.txt".format(uuid4())
status_code = os.system("hdfs dfs -ls {0} > {1}".format(
location, file_name))
if status_code:
logger.error("hdfs command execution failed")
raise custom_exceptions.HDFSCommandError
with open(file_name, "r") as file_content:
content = file_content.readlines()
os.remove(file_name)
hdfs_files_list = []
i = 0
for i, line in enumerate(content):
if line.startswith("Found "):
break
for j in range(i + 1, len(content)):
size = content[j].split()[4]
if size != '0':
hdfs_files_list.append(content[j].split()[-1])
return hdfs_files_list
def list_partitions(self, database_name, table_name):
"""Gets information about the different partitions.
Args:
database_name (str): Hive database name.
table_name (str): Hive table name.
Returns:
List: A list of dict elements containing information of every
partition.
"""
tracking_data = []
queries = [
"set hive.ddl.output.format=json",
"SHOW PARTITIONS {0}.{1}".format(database_name, table_name)
]
result_set = self.execute_query(queries)
results = json.loads(result_set[0][0])['partitions']
for item in results:
# Form the WHERE clause by joining the partition column names and
# their values
clause = 'WHERE ' + ' AND '.join(
partition['columnName'] + '=' + '"' +
partition['columnValue'] + '"' for partition in item['values'])
tracking_data.append({
'table_name':
'stage__{}__{}'.format(table_name.lower(),
str(uuid4()).replace("-", "_")),
'clause':
clause
})
return tracking_data
def get_hive_table_row_count(self, hive_table_model, clause=''):
"""Queries Hive table to get number of rows.
Args:
hive_table_model (:class:`HiveTableModel`): Wrapper to Hive table
details.
clause (str): WHERE clause to filter the table on partitions, if any.
Returns:
int: Number of rows as an output from the query.
"""
query = "SELECT COUNT(*) FROM {0}.{1} {2}".format(
hive_table_model.db_name, hive_table_model.table_name, clause)
results = self.execute_query(query)
n_rows = results[0][0]
return n_rows
def get_info_on_data_to_migrate(self, hive_table_model):
"""Gets information on data to be migrated in case of first run of
migration.
Args:
hive_table_model (:class:`HiveTableModel`): Wrapper to Hive table
details.
Returns:
List: A list of dict elements each containing information of the
data that needs to be migrated.
"""
if hive_table_model.is_partitioned is False:
tracking_data = self.get_non_partition_table_info(hive_table_model)
else:
tracking_data = self.get_partition_table_info(hive_table_model)
return tracking_data
def get_non_partition_table_info(self, hive_table_model):
"""Gets information on data to be migrated in case of a non-partition
table.
Validates the incremental column (if any provided), queries the Hive
table to get the minimum and maximum values of the column and sets
the HiveTableModel attributes related to incremental column.
Args:
hive_table_model (:class:`HiveTableModel`): Wrapper to Hive table
details.
Returns:
List: A list of only one dict element containing the information
of data to migrate.
"""
tracking_data = list()
if hive_table_model.inc_col is not None:
logger.info("Validating given incremental column...")
# If the provided incremental column is of timestamp/date type,
# it cannot be validated by counting the number of rows.
if hive_table_model.inc_col in hive_table_model.timestamp_type_col:
logger.debug(
"Fetching minimum and maximum values of the timestamp "
"incremental column...")
results = self.execute_query(
"SELECT MIN({0}),MAX({0}) FROM {1}.{2}".format(
hive_table_model.inc_col, hive_table_model.db_name,
hive_table_model.table_name))
col_min, col_max = results[0]
# Sets incremental attributes of hive_table_model.
hive_table_model.inc_col_type = 'ts'
logger.info(
"Incremental column {} found. Range - {} - {}".format(
hive_table_model.inc_col, col_min, col_max))
# Validates the incremental column of int data type by comparing
# the number of distinct values and number of rows.
elif hive_table_model.inc_col in hive_table_model.int_type_col:
logger.debug("Counting the total number of rows...")
results = self.execute_query(
"SELECT COUNT(*) FROM {}.{}".format(
hive_table_model.db_name, hive_table_model.table_name))
n_rows = results[0][0]
logger.debug("Number of rows in the table: %d", n_rows)
logger.debug(
"Fetching maximum value of the incremental column...")
query = "SELECT COUNT(DISTINCT({0})),MIN({0}),MAX({0}) " \
"FROM {1}.{2}".format(
hive_table_model.inc_col, hive_table_model.db_name,
hive_table_model.table_name)
results = self.execute_query(query)
distinct_col_values, col_min, col_max = results[0]
# Checks if number of distinct values matches the number of rows.
if n_rows == distinct_col_values and (1 + col_max -
col_min == n_rows):
# Sets incremental attributes of hive_table_model.
hive_table_model.inc_col_type = 'int'
logger.info(
"Incremental column {} valid. Range - {} - {}".format(
hive_table_model.inc_col, col_min, col_max))
else:
logger.error(
"Incremental column {0} not valid. Range - {1} - "
"{2}\nTry another incremental column or without "
"providing incremental column".format(
hive_table_model.inc_col, col_min, col_max))
raise custom_exceptions.IncrementalColumnError
else:
logger.error("Given incremental column is not present.")
raise custom_exceptions.IncrementalColumnError
if hive_table_model.is_inc_col_present:
tracking_data.append({
'table_name': hive_table_model.staging_table_name,
'id': 1,
'inc_col_min': col_min,
'inc_col_max': col_max,
'clause': ""
})
else:
tracking_data.append({
'table_name': hive_table_model.staging_table_name,
'clause': ""
})
return tracking_data
def get_partition_table_info(self, hive_table_model):
"""Gets information on data to be migrated in case of a partition table.
Validates the incremental column (if any provided), queries the Hive
table to get the minimum and maximum values of the column in every
partition and sets the HiveTableModel attributes related to
incremental column.
Args:
hive_table_model (:class:`HiveTableModel`): Wrapper to Hive table
details.
Returns:
List: A list of dict elements containing the information of data
to migrate.
"""
# Information about partitions.
tracking_data = self.list_partitions(hive_table_model.db_name,
hive_table_model.table_name)
for item in tracking_data:
# Id is set to 1 since the partition is migrated for the first time.
item['id'] = 1
if hive_table_model.inc_col is not None:
logger.info("Validating given incremental column...")
# If the provided incremental column is of timestamp/date type,
# it cannot be validated by counting the number of rows.
if hive_table_model.inc_col in hive_table_model.timestamp_type_col:
logger.debug(
"Fetching minimum and maximum values of the timestamp "
"incremental column...")
for item in tracking_data:
clause = item['clause']
results = self.execute_query(
"SELECT MIN({0}),MAX({0}) FROM {1}.{2} {3}".format(
hive_table_model.inc_col, hive_table_model.db_name,
hive_table_model.table_name, clause))
col_min, col_max = results[0]
item['inc_col_min'] = col_min
item['inc_col_max'] = col_max
# Setting incremental attributes of hive_table_model.
hive_table_model.inc_col_type = 'ts'
logger.info(
"Incremental column {} found in table {}. Range - {} "
"- {}".format(hive_table_model.inc_col, clause,
col_min, col_max))
# Validates the incremental column of int data type by comparing
# the number of distinct values and number of rows.
elif hive_table_model.inc_col in hive_table_model.int_type_col:
# Sets incremental attributes of hive_table_model.
hive_table_model.inc_col_type = 'int'
n_rows = {}
for data in tracking_data:
clause = data['clause']
logger.debug("Counting the number of rows %s ...", clause)
results = self.execute_query(
"SELECT COUNT(*) FROM {0}.{1} {2}".format(
hive_table_model.db_name,
hive_table_model.table_name, clause))
n_rows[clause] = results[0][0]
logger.debug("Number of rows in the table %s : %s", clause,
n_rows[clause])
for item in tracking_data:
clause = item['clause']
logger.debug(
"Fetching maximum value of the incremental column %s "
"...", clause)
query = "SELECT COUNT(DISTINCT({0})),MIN({0}),MAX({0}) " \
"FROM {1}.{2} {3}".format(
hive_table_model.inc_col,
hive_table_model.db_name,
hive_table_model.table_name, clause)
results = self.execute_query(query)
distinct_col_values, col_min, col_max = results[0]
# Checks if the number of distinct values matches the
# number of rows for every partition.
if n_rows[clause] == distinct_col_values and (
1 + col_max - col_min == n_rows[clause]):
# Sets incremental attributes of hive_table_model.
item['inc_col_min'] = col_min
item['inc_col_max'] = col_max
logger.debug(
"Incremental column %s found in table %s. Range - "
"%s - %s", hive_table_model.inc_col, clause,
col_min, col_max)
else:
logger.error(
"Incremental column {} not valid in partition {}. "
"Range - {} - {}\nTry another incremental column "
"or without providing incremental column".format(
hive_table_model.inc_col, clause, col_min,
col_max))
raise custom_exceptions.IncrementalColumnError
if hive_table_model.is_inc_col_present:
logger.info("Incremental column {} found".format(
hive_table_model.inc_col))
else:
logger.error("Given incremental column is not present.")
raise custom_exceptions.IncrementalColumnError
return tracking_data
def create_and_load_stage_table(self,
hive_table_model,
table_name,
clause=''):
"""Creates Hive staging table and inserts data into it from the
source table.
Args:
hive_table_model (:class:`HiveTableModel`): Wrapper to Hive table
details.
table_name (str): Staging table name.
clause (str): WHERE clause to filter the table (if any),
and insert only the filtered data into the staging table.
"""
logger.info("Staging for table " + table_name + "...")
# Replaces TABLE_NAME_HERE place holder with staging table name.
create_ddl_statement = hive_table_model.create_statement.replace(
"TABLE_NAME_HERE", table_name)
# Creates staging table.
self.execute_query(create_ddl_statement)
logger.debug("Table %s created in Hive. Inserting data...", table_name)
start = time.time()
# Inserts data into staging table.
query = "INSERT OVERWRITE TABLE {} SELECT * FROM {}.{} {}".format(
table_name, hive_table_model.db_name, hive_table_model.table_name,
clause)
logger.info(query)
self.execute_query(query)
end = time.time()
time_hive_stage = calculate_time(start, end)
logger.debug("Loaded data from %s into %s - Time taken - %s",
hive_table_model.table_name, table_name, time_hive_stage)
def migrate_data(self, mysql_component, bq_component, gcs_component,
hive_table_model, bq_table_model, gcs_bucket_name,
table_data):
"""Invokes the function to migrate data based on whether the Hive
table is partitioned.
Args:
mysql_component (:class:`MySQLComponent`): Instance of
MySQLComponent to connect to MySQL.
bq_component (:class:`BigQueryComponent`): Instance of
BigQueryComponent to do BigQuery operations.
gcs_component (:class:`GCSStorageComponent`): Instance of
GCSStorageComponent to do GCS operations.
hive_table_model (:class:`HiveTableModel`): Wrapper to Hive table
details.
bq_table_model (:class:`BigQueryTableModel`): Wrapper to BigQuery
table details.
gcs_bucket_name (str): GCS bucket name.
table_data (List): Information of data to migrate.
"""
logger.debug("Populating tracking table..")
if hive_table_model.is_partitioned is False:
self.migrate_non_partition_table(mysql_component, bq_component,
gcs_component, hive_table_model,
bq_table_model, gcs_bucket_name,
table_data)
else:
self.migrate_partition_table(mysql_component, bq_component,
gcs_component, hive_table_model,
bq_table_model, gcs_bucket_name,
table_data)
def migrate_non_partition_table(self, mysql_component, bq_component,
gcs_component, hive_table_model,
bq_table_model, gcs_bucket_name,
table_data):
"""Migrates Hive data in case of a non-partitioned table.
Invokes the function to create and load stage table, gets the staging
table location, and lists down the underneath HDFS files. Updates the
file paths in the tracking table and calls the function stage_to_gcs
to copy files to GCS.
Args:
mysql_component (:class:`MySQLComponent`): Instance of
MySQLComponent to connect to MySQL.
bq_component (:class:`BigQueryComponent`): Instance of
BigQueryComponent to do BigQuery operations.
gcs_component (:class:`GCSStorageComponent`): Instance of
GCSStorageComponent to do GCS operations.
hive_table_model (:class:`HiveTableModel`): Wrapper to Hive table
details.
bq_table_model (:class:`BigQueryTableModel`): Wrapper to BigQuery
table details.
gcs_bucket_name (str): GCS bucket name.
table_data (List): Information of data to migrate.
"""
table_name = table_data[0]['table_name']
clause = table_data[0]['clause']
insert_clause = clause
if hive_table_model.is_inc_col_present:
identifier = int(table_data[0]['id'])
inc_col_min = table_data[0]['inc_col_min']
inc_col_max = table_data[0]['inc_col_max']
if identifier == 1:
# Includes lower bound value in the stage table.
insert_clause = "where {0}>='{1}' and {0}<='{2}'".format(
hive_table_model.inc_col, inc_col_min, inc_col_max)
else:
insert_clause = "where {0}>'{1}' and {0}<='{2}'".format(
hive_table_model.inc_col, inc_col_min, inc_col_max)
# Creating staging table and loading data.
if hive_table_model.is_table_type_supported is False:
self.create_and_load_stage_table(hive_table_model, table_name,
insert_clause)
source_location = self.get_table_location("default", table_name)
else:
if hive_table_model.is_inc_col_present and \
hive_table_model.is_first_run is False:
self.create_and_load_stage_table(hive_table_model, table_name,
insert_clause)
source_location = self.get_table_location(
"default", table_name)
else:
source_location = self.get_table_location(
hive_table_model.db_name, hive_table_model.table_name)
# Lists underlying HDFS files.
hdfs_files_list = self.list_hdfs_files(source_location)
logger.info("Updating file paths in the tracking table..")
for file_path in hdfs_files_list:
if hive_table_model.is_inc_col_present:
query = "INSERT INTO {0} (id,table_name,inc_col_min," \
"inc_col_max,clause,file_path,gcs_copy_status," \
"bq_job_id,bq_job_retries,bq_job_status) VALUES({1}," \
"'{2}','{3}','{4}','{5}','{6}','TODO','TODO',0," \
"'TODO')".format(
hive_table_model.tracking_table_name, identifier,
table_name, inc_col_min, inc_col_max, clause,
file_path)
else:
query = "INSERT INTO {0} (table_name,clause,file_path," \
"gcs_copy_status,bq_job_id,bq_job_retries," \
"bq_job_status) VALUES('{1}','{2}','{3}','TODO'," \
"'TODO',0,'TODO')".format(
hive_table_model.tracking_table_name, table_name,
clause, file_path)
# Commits information about the staging files.
mysql_component.execute_transaction(query)
# Copies files from HDFS to GCS.
gcs_component.stage_to_gcs(mysql_component, bq_component,
hive_table_model, bq_table_model,
gcs_bucket_name)
def migrate_partition_table(self, mysql_component, bq_component,
gcs_component, hive_table_model,
bq_table_model, gcs_bucket_name, table_data):
"""Migrates Hive data in case of a partition table.
Invokes the function to create and load stage table, gets the staging
table location, and lists down the underneath HDFS files. Updates the
file paths in the tracking table and calls the function stage_to_gcs
to copy files to GCS.
Args:
mysql_component (:class:`MySQLComponent`): Instance of
MySQLComponent to connect to MySQL.
bq_component (:class:`BigQueryComponent`): Instance of
BigQueryComponent to do BigQuery operations.
gcs_component (:class:`GCSStorageComponent`): Instance of
GCSStorageComponent to do GCS operations.
hive_table_model (:class:`HiveTableModel`): Wrapper to Hive table
details.
bq_table_model (:class:`BigQueryTableModel`): Wrapper to BigQuery
table details.
gcs_bucket_name (str): GCS bucket name.
table_data (List): Information of data to migrate.
"""
for data in table_data:
if hive_table_model.is_inc_col_present:
insert_query = "INSERT INTO {0} (id,table_name,inc_col_min," \
"inc_col_max,clause,file_path) VALUES('{1}'," \
"'{2}','{3}','{4}','{5}','TODO')".format(
hive_table_model.tracking_table_name,
data['id'], data['table_name'],
data['inc_col_min'], data['inc_col_max'],
data['clause'])
select_query = "SELECT id,table_name,inc_col_min,inc_col_max," \
"clause FROM {} WHERE file_path='TODO'".format(
hive_table_model.tracking_table_name)
else:
insert_query = "INSERT INTO {0} (table_name,clause," \
"file_path)VALUES('{1}','{2}','TODO')".format(
hive_table_model.tracking_table_name,
data['table_name'], data['clause'])
select_query = "SELECT table_name,clause FROM {} WHERE " \
"file_path='TODO'".format(
hive_table_model.tracking_table_name)
# Inserts a row in the tracking table for every partition.
mysql_component.execute_transaction(insert_query)
results = mysql_component.execute_query(select_query)
for row in results:
if hive_table_model.is_inc_col_present:
identifier, table_name, inc_col_min, inc_col_max, clause \
= row
if identifier == 1:
insert_clause = "{0} and {1}>='{2}' and " \
"{1}<='{3}'".format(
clause, hive_table_model.inc_col,
inc_col_min, inc_col_max)
else:
insert_clause = "{0} and {1}>'{2}' and " \
"{1}<='{3}'".format(
clause, hive_table_model.inc_col,
inc_col_min, inc_col_max)
else:
table_name, clause = row
insert_clause = clause
# Creates staging table and inserting data.
self.create_and_load_stage_table(hive_table_model, table_name,
insert_clause)
# Gets table location
source_location = self.get_table_location(
"default", table_name)
# Lists underlying HDFS files.
hdfs_files_list = self.list_hdfs_files(source_location)
logger.info("Updating file paths in the tracking table..")
for file_path in hdfs_files_list:
if hive_table_model.is_inc_col_present:
query = "INSERT INTO {0} (id,table_name,inc_col_min," \
"inc_col_max,clause,file_path," \
"gcs_copy_status,bq_job_id,bq_job_retries," \
"bq_job_status) VALUES('{1}','{2}','{3}'," \
"'{4}','{5}','{6}','TODO','TODO',0," \
"'TODO')".format(
hive_table_model.tracking_table_name,
identifier, table_name, inc_col_min,
inc_col_max, clause, file_path)
else:
query = "INSERT INTO {0} (table_name,clause," \
"file_path,gcs_copy_status,bq_job_id," \
"bq_job_retries,bq_job_status) VALUES('{1}'," \
"'{2}','{3}','TODO','TODO',0,'TODO')".format(
hive_table_model.tracking_table_name,
table_name, clause, file_path)
# Commits information about the staging files.
mysql_component.execute_transaction(query)
query = "DELETE FROM {0} WHERE table_name='{1}' AND clause " \
"='{2}' AND file_path='TODO'".format(
hive_table_model.tracking_table_name,
table_name, clause)
mysql_component.execute_transaction(query)
# Copies files from HDFS to GCS.
gcs_component.stage_to_gcs(mysql_component, bq_component,
hive_table_model, bq_table_model,
gcs_bucket_name)
@staticmethod
def compare_max_values(hive_table_model, old_max, new_max):
"""Compares the previously obtained maximum value with the newly
obtained maximum value of incremental column.
Args:
hive_table_model (:class:`HiveTableModel`): Wrapper to Hive table
details.
old_max (str): Maximum value of the incremental column from the
tracking table.
new_max (str): Maximum value of the incremental column from the
Hive table.
Returns:
boolean: True if new_max value is greater than the old_max value,
else False.
"""
if hive_table_model.inc_col_type == "ts":
try:
old_max = parse(old_max)
new_max = parse(new_max)
except ValueError as error:
logger.exception(error)
logger.info("Failed to detect incremental column type")
raise
if new_max > old_max:
return True
else:
# incremental column is of int type.
if int(new_max) > int(old_max):
return True
return False
def check_inc_data(self, mysql_component, bq_component, gcs_component,
hive_table_model, bq_table_model, gcs_bucket_name):
"""Invokes the functions to check for incremental data.
Args:
mysql_component (:class:`MySQLComponent`): Instance of
MySQLComponent to connect to MySQL.
bq_component (:class:`BigQueryComponent`): Instance of
BigQueryComponent to do BigQuery operations.
gcs_component (:class:`GCSStorageComponent`): Instance of
GCSStorageComponent to do GCS operations.
hive_table_model (:class:`HiveTableModel`): Wrapper to Hive table
details.
bq_table_model (:class:`BigQueryTableModel`): Wrapper to BigQuery
table details.
gcs_bucket_name (str): GCS bucket name.
Returns:
List: A list of dict elements each containing information of the
incremental data that needs to be migrated.
"""
logger.info("Checking for any new data...")
if hive_table_model.is_partitioned is False:
tracking_data = self.check_inc_non_partition_table(
mysql_component, bq_component, gcs_component, hive_table_model,
bq_table_model, gcs_bucket_name)
else:
tracking_data = self.check_inc_partition_table(
mysql_component, hive_table_model)
return tracking_data
def check_inc_non_partition_table(self, mysql_component, bq_component,
gcs_component, hive_table_model,
bq_table_model, gcs_bucket_name):
"""Checks for incremental data in case of a non-partitioned table.
If there is an incremental column, the function queries the Hive
table to get the maximum value and compares it with the maximum value
from the tracking table. If there is no incremental column then
Case A: If the data format is supported [Avro/ORC/Parquet], it looks
for new files.
Case B: If the data format is not supported, it cannot detect new data.
Args:
mysql_component (:class:`MySQLComponent`): Instance of
MySQLComponent to connect to MySQL.
bq_component (:class:`BigQueryComponent`): Instance of
BigQueryComponent to do BigQuery operations.
gcs_component (:class:`GCSStorageComponent`): Instance of
GCSStorageComponent to do GCS operations.
hive_table_model (:class:`HiveTableModel`): Wrapper to Hive table
details.
bq_table_model (:class:`BigQueryTableModel`): Wrapper to BigQuery
table details.
gcs_bucket_name (str): GCS bucket name.
Returns:
List: A list of only one dict element, containing information of
the incremental data.
"""
tracking_data = []
if hive_table_model.is_inc_col_present:
# Fetches maximum value of the incremental column for each
# partition from the tracking table and from Hive table and
# compare these values to decide whether there is new data.
results = mysql_component.execute_query(
"SELECT MAX(id),MAX(inc_col_max) FROM {}".format(
hive_table_model.tracking_table_name))
identifier, old_data_max = results[0]
results = self.execute_query("SELECT MAX({0}) FROM {1}.{2}".format(
hive_table_model.inc_col, hive_table_model.db_name,
hive_table_model.table_name))
new_data_max = results[0][0]
new_data_exists = self.compare_max_values(hive_table_model,
old_data_max,
new_data_max)
if new_data_exists:
logger.info("New data found in source table")
logger.debug(
"Previously incremental column %s maximum value "
"%s.Current maximum value %s", hive_table_model.inc_col,
old_data_max, new_data_max)
tracking_data.append({
'table_name': hive_table_model.staging_table_name,
'id': identifier + 1,
'inc_col_min': old_data_max,
'inc_col_max': new_data_max,
'clause': ""
})
else:
logger.info("No new data found")
elif not hive_table_model.is_inc_col_present and \
hive_table_model.is_table_type_supported is False:
logger.info(
"cannot check for new data in case of Non partitioned - No "
"Incremental column - Text format table")
elif not hive_table_model.is_inc_col_present and \
hive_table_model.is_table_type_supported is True:
# Lists HDFS files and compares them with tracking table and
# migrates files which aren't present in the tracking table.
results = mysql_component.execute_query(
"SELECT file_path FROM {}".format(
hive_table_model.tracking_table_name))
old_file_paths = [row[0] for row in results]
new_file_paths = self.list_hdfs_files(
self.get_table_location(hive_table_model.db_name,
hive_table_model.table_name))
new_data_exists = False
for file_path in new_file_paths:
if file_path not in old_file_paths:
# Updates the tracking table with new file paths.
new_data_exists = True
logger.debug("Found new data at file path %s", file_path)
query = "INSERT INTO {0} (table_name,file_path," \
"gcs_copy_status,bq_job_id,bq_job_retries," \
"bq_job_status) VALUES('{1}','{2}','TODO','TODO'," \
"0,'TODO')".format(
hive_table_model.tracking_table_name,
hive_table_model.table_name, file_path)
mysql_component.execute_transaction(query)
# Copies the new files to GCS.
if new_data_exists:
logger.info("New files found in source table")
gcs_component.stage_to_gcs(mysql_component, bq_component,
hive_table_model, bq_table_model,
gcs_bucket_name)
else:
logger.info("No new data found")
return tracking_data
def check_inc_partition_table(self, mysql_component, hive_table_model):
"""Checks for incremental data in case of a partition table.
If there is no incremental column, this function queries the Hive
table to get the list of new partitions, if any. If there is an
incremental column, in addition to finding new partitions, this also
gets the information of the incremental data in existing partitions.
Args:
mysql_component (:class:`MySQLComponent`): Instance of
MySQLComponent to connect to MySQL.
hive_table_model (:class:`HiveTableModel`): Wrapper to Hive table
details.
Returns:
List: A list of only one dict element containing information of
the incremental data.
"""
tracking_data = []
# Checks for any new partitions which haven't been recorded in
# the tracking table and appends information to the list.
logger.info("Checking for new partitions...")
results = mysql_component.execute_query(
"SELECT DISTINCT(clause) FROM {}".format(
hive_table_model.tracking_table_name))
old_partitions_list = [row[0] for row in results]
partitions_list = self.list_partitions(hive_table_model.db_name,
hive_table_model.table_name)
present_partitions_list = [item['clause'] for item in partitions_list]
new_partitions_list = list(
set(present_partitions_list).difference(old_partitions_list))
if not hive_table_model.is_inc_col_present:
for clause in new_partitions_list:
logger.info("Found new partition {}".format(clause))
tracking_data.append({
'table_name': hive_table_model.staging_table_name,
'clause': clause
})
else:
for clause in new_partitions_list:
logger.info("Found new partition {}".format(clause))
results = self.execute_query(
"SELECT MIN({0}),MAX({0}) FROM {1}.{2} {3}".format(
hive_table_model.inc_col, hive_table_model.db_name,
hive_table_model.table_name, clause))
col_min, col_max = results[0]
tracking_data.append({
"table_name": hive_table_model.staging_table_name,
"id": 1,
"inc_col_min": col_min,
"inc_col_max": col_max,
"clause": clause
})
logger.info("Checking for new data in existing partitions...")
# Fetches maximum value of the incremental column for each
# partition from the tracking table and from Hive table and
# compare these values to decide whether there is new data.
for clause in old_partitions_list:
results = mysql_component.execute_query(
"SELECT MAX(id),MAX(inc_col_max) FROM {0} WHERE "
"clause='{1}'".format(hive_table_model.tracking_table_name,
clause))
identifier, old_data_max = results[0]
logger.debug("Old maximum value %s - %s", clause, old_data_max)
results = self.execute_query(
"SELECT MAX({0}) FROM {1}.{2} {3}".format(
hive_table_model.inc_col, hive_table_model.db_name,
hive_table_model.table_name, clause))
new_data_max = results[0][0]
logger.debug("New maximum value %s - %s", clause, new_data_max)
new_data_exists = self.compare_max_values(
hive_table_model, old_data_max, new_data_max)
# Appends information to the list if new data is found.
if new_data_exists:
logger.info(
"New data found in partition - {}".format(clause))
tracking_data.append({
"table_name": hive_table_model.staging_table_name,
"id": identifier + 1,
"inc_col_min": old_data_max,
"inc_col_max": new_data_max,
"clause": clause
})
else:
logger.info(
"No New data found in partition - {}".format(clause))
return tracking_data
|
import socket
remoteip = "misc.chal.csaw.io"
remoteport = 8000
def sock(remoteip, remoteport):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((remoteip, remoteport))
return s, s.makefile('rw', bufsize=0)
def read_until(f, delim='\n'):
data = ''
while not data.endswith(delim):
data += f.read(1)
return data
s, f = sock(remoteip, remoteport)
def solve(target):
target = int(round(target * 100, 0))
bills = [10000, 5000, 1000, 500, 100, 50, 20, 10, 5, 1,
0.5, 0.25, 0.1, 0.05, 0.01]
ans = ""
for bi in bills:
bi = int(bi * 100)
num = target // bi
ans += (str(num) + "\n")
target = target % bi
s.send(ans)
read_until(f, "(1c): ")
print read_until(f, "!\n")
count = 0
while True:
print "[+] Iter:", count
line = read_until(f, "\n")
print line,
target = float(line.replace("$", "").strip())
print "[+] target:", target
solve(target)
count += 1
s.close()
# flag: flag{started-from-the-bottom-now-my-whole-team-fucking-here} |
# Generated by Django 3.2 on 2021-05-02 17:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("app", "0008_alter_person_managers"),
("app", "0008_rename_plataform_offer_platform"),
]
operations = []
|
#
# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from cuda import cudart
import tensorrt as trt
from img2img_pipeline import Img2ImgPipeline
from utilities import preprocess_image, TRT_LOGGER, add_arguments, download_image
import PIL
from PIL import Image
def parseArgs():
parser = argparse.ArgumentParser(description="Options for Stable Diffusion Img2Img Demo")
parser = add_arguments(parser)
parser.add_argument('--scheduler', type=str, default="DDIM", choices=["DDIM", "EulerA", "LMSD", "DPM", "PNDM"], help="Scheduler for diffusion process")
parser.add_argument('--input-image', type=str, default="", help="Path to the input image")
return parser.parse_args()
if __name__ == "__main__":
print("[I] Initializing StableDiffusion img2img demo using TensorRT")
args = parseArgs()
# Process prompt
if not isinstance(args.prompt, list):
raise ValueError(f"`prompt` must be of type `str` or `str` list, but is {type(args.prompt)}")
prompt = args.prompt * args.repeat_prompt
if not isinstance(args.negative_prompt, list):
raise ValueError(f"`--negative-prompt` must be of type `str` or `str` list, but is {type(args.negative_prompt)}")
if len(args.negative_prompt) == 1:
negative_prompt = args.negative_prompt * len(prompt)
else:
negative_prompt = args.negative_prompt
if args.input_image:
input_image = Image.open(args.input_image)
else:
url = "https://pajoca.com/wp-content/uploads/2022/09/tekito-yamakawa-1.png"
input_image = download_image(url)
image_width, image_height = input_image.size
# Validate image dimensions
if image_height % 8 != 0 or image_width % 8 != 0:
raise ValueError(f"Image height and width have to be divisible by 8 but specified as: {image_height} and {image_width}.")
if isinstance(input_image, PIL.Image.Image):
input_image = preprocess_image(input_image)
# Register TensorRT plugins
trt.init_libnvinfer_plugins(TRT_LOGGER, '')
max_batch_size = 16
if args.build_dynamic_shape:
max_batch_size = 4
batch_size = len(prompt)
if batch_size > max_batch_size:
raise ValueError(f"Batch size {len(prompt)} is larger than allowed {max_batch_size}. If dynamic shape is used, then maximum batch size is 4")
if args.use_cuda_graph and (not args.build_static_batch or args.build_dynamic_shape):
raise ValueError(f"Using CUDA graph requires static dimensions. Enable `--build-static-batch` and do not specify `--build-dynamic-shape`")
# Initialize demo
demo = Img2ImgPipeline(
scheduler=args.scheduler,
denoising_steps=args.denoising_steps,
output_dir=args.output_dir,
version=args.version,
hf_token=args.hf_token,
verbose=args.verbose,
nvtx_profile=args.nvtx_profile,
max_batch_size=max_batch_size)
# Load TensorRT engines and pytorch modules
demo.loadEngines(args.engine_dir, args.onnx_dir, args.onnx_opset,
opt_batch_size=len(prompt), opt_image_height=image_height, opt_image_width=image_width, \
force_export=args.force_onnx_export, force_optimize=args.force_onnx_optimize, \
force_build=args.force_engine_build, \
static_batch=args.build_static_batch, static_shape=not args.build_dynamic_shape, \
enable_refit=args.build_enable_refit, enable_preview=args.build_preview_features, enable_all_tactics=args.build_all_tactics, \
timing_cache=args.timing_cache, onnx_refit_dir=args.onnx_refit_dir)
demo.loadResources(image_height, image_width, batch_size, args.seed)
if args.use_cuda_graph:
# inference once to get cuda graph
images = demo.infer(prompt, negative_prompt, input_image, image_height, image_width, strength=0.75, warmup=True)
print("[I] Warming up ..")
for _ in range(args.num_warmup_runs):
images = demo.infer(prompt, negative_prompt, input_image, image_height, image_width, strength=0.75, warmup=True)
print("[I] Running StableDiffusion pipeline")
if args.nvtx_profile:
cudart.cudaProfilerStart()
images = demo.infer(prompt, negative_prompt, input_image, image_height, image_width, seed=args.seed, strength=0.75)
if args.nvtx_profile:
cudart.cudaProfilerStop()
|
# Generated by Django 2.2 on 2020-02-10 11:35
import DjangoUeditor.models
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ativity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('createtime', models.DateTimeField(auto_created=True, help_text='创建时间', verbose_name='创建时间')),
('title', models.CharField(help_text='标题', max_length=100, verbose_name='标题')),
('registration_time', models.DateField(help_text='报名时间', verbose_name='报名时间')),
('activity_time', models.IntegerField(help_text='活动时长', verbose_name='活动时长')),
('link_phone', models.CharField(help_text='联系电话', max_length=11, verbose_name='联系电话')),
('link_pople', models.CharField(help_text='联系人', max_length=20, verbose_name='联系人')),
('activity_content', DjangoUeditor.models.UEditorField(blank=True, help_text='活动内容', null=True, verbose_name='活动内容')),
('people_count', models.IntegerField(default=100, help_text='招募人数', verbose_name='招募人数')),
('activity_starttime', models.DateTimeField(help_text='活动时间', verbose_name='活动时间')),
('status', models.IntegerField(choices=[(0, '待审核'), (1, '审核通过'), (2, '审核未通过'), (3, '报名中'), (4, '活动开始'), (5, '活动结束'), (6, '活动取消')], help_text='活动状态', verbose_name='活动状态')),
('updatetime', models.DateTimeField(auto_now=True, help_text='最后修改时间', verbose_name='最后修改时间')),
('hot', models.IntegerField(default=0, help_text='热度', verbose_name='热度')),
],
options={
'verbose_name': '义工活动',
'verbose_name_plural': '义工活动',
},
),
migrations.CreateModel(
name='Community',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('createtime', models.DateTimeField(auto_created=True, help_text='创建时间', verbose_name='创建时间')),
('title', models.CharField(help_text='标题', max_length=100, verbose_name='标题')),
('content', DjangoUeditor.models.UEditorField(blank=True, help_text='活动内容', null=True, verbose_name='活动内容')),
('updatetime', models.DateTimeField(auto_now=True, help_text='最后修改时间', verbose_name='最后修改时间')),
],
options={
'verbose_name': '社区',
'verbose_name_plural': '社区',
},
),
migrations.CreateModel(
name='Communitypic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.FileField(blank=True, help_text='比赛结果', null=True, upload_to='historypic/', verbose_name='比赛结果')),
],
),
migrations.CreateModel(
name='Histroy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('createtime', models.DateTimeField(auto_created=True, help_text='创建时间', verbose_name='创建时间')),
('title', models.CharField(help_text='题目', max_length=20, verbose_name='题目')),
('content', DjangoUeditor.models.UEditorField(blank=True, help_text='活动内容', null=True, verbose_name='活动内容')),
('updatetime', models.DateTimeField(auto_now=True, help_text='最后修改时间', verbose_name='最后修改时间')),
],
options={
'verbose_name': '历史活动',
'verbose_name_plural': '历史活动',
},
),
migrations.CreateModel(
name='HPic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.FileField(blank=True, help_text='比赛结果', null=True, upload_to='historypic/', verbose_name='比赛结果')),
],
),
migrations.CreateModel(
name='Type',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='类型名', max_length=100, verbose_name='类型名')),
],
options={
'verbose_name': '类型',
'verbose_name_plural': '类型',
},
),
migrations.CreateModel(
name='UserandActivity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rank', models.IntegerField(choices=[(0, '差'), (1, '一般'), (2, '优秀')], default=2, help_text='等级', verbose_name='等级')),
('activity_time', models.IntegerField(default=5, help_text='义工时', verbose_name='义工时')),
('signtime', models.DateTimeField(default=datetime.datetime(2020, 2, 10, 11, 35, 44, 56705), help_text='报名时间', verbose_name='报名时间')),
('activity', models.ForeignKey(help_text='活动', on_delete=django.db.models.deletion.CASCADE, related_name='activity', to='activity.Ativity', verbose_name='活动')),
],
),
]
|
from tkinter import *
from frameworkGUI import GUI
import subprocess
import threading
def data_collection():
subprocess.call("pythonw.exe dataCollector.py", shell=False)
def gui():
root = Tk()
gui = GUI(root)
gui.show_app_buttons()
gui.run_applications('Home')
root.mainloop()
# this threading code was originally intended to allow for passive data collection (auto detection of entering champion
# selection while the app is running. This will most likely be changed in the future due to not having used the feature.
t2 = threading.Thread(target=data_collection, args=())
t1 = threading.Thread(target=gui, args=())
t1.start()
t2.start()
t1.join()
t2.join()
|
import sys
class colCount:
"""Module used to get the col count"""
def getColCnt(self,filename):
try:
filename=filename+".txt"
#print "Step5.1: Obtain the col count data:"+filename
fin = open(filename, "r")
#print "Step5.2: Read the map file data into a list"
lineList = fin.readlines()
colCnt=lineList[0]
#rowCnt=lineList[1]
#print "colCnt: %s" %colCnt
#print "rowCnt: %s" %rowCnt
fin.close()
except IOError:
print "Exception: File %s does not exist!" % filename
#print "Step5.3: End of Col count Module"
#print ""
return colCnt
|
from django.utils.translation import ugettext_lazy as _
from django.db import models
from cms.models import CMSPlugin
from os.path import join
from django.conf import settings
from cmsplugin_news_remote.utils import update_cache
# Create your models here.
class LatestNewsRemotePlugin(CMSPlugin):
# code is partly borrowed from LatestNewsPlugin model of cmsplugin_news
"""
Model for the settings when using the latest news cms plugin
"""
limit = models.PositiveIntegerField(_('Number of news items to show'),
help_text=_('Limits the number of items that will be displayed'))
last_detailed = models.BooleanField(
_("Show detailed item"),
help_text=_("Show detailed version of most recent item"))
# end of borrowed code
source_url = models.CharField(_('URL of the data source'),
max_length=250,
help_text=_('specifies address for requesting data'))
def get_cache_path(self):
return join(settings.PROJECT_DIR, "cache/news_remote_%d" % self.id)
def update_cache(self):
return update_cache(self.get_cache_path(), self.source_url) |
from django.contrib import admin
# Register your models here.
from .models import ChatRoom, ChatMessage
admin.site.register(ChatRoom)
admin.site.register(ChatMessage)
|
"""Utility functions for running nested cross-validation of sampling methods
"""
# Authors: Lyubomir Danov <->
# License: -
import pandas
import pytest
from sklearn.datasets import load_breast_cancer
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, f1_score, make_scorer
from sklearn.model_selection import GridSearchCV, StratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
from ..cv_wrappers import basic_cv, nested_cv
from ..param_grid import generate_param_grid
from ..score_grid import ScoreGrid
def get_test1_settings():
pipeline_steps = {
'preprocessor': {'skip': None},
'classifier': {
'svm': SVC(probability=True),
'rf': RandomForestClassifier()
}
}
params_dict = {
'skip': {},
'svm': {'C': [1, 10, 100],
'gamma': [.01, .1],
'kernel': ['rbf']},
'rf': {'n_estimators': [1, 10, 100],
'max_features': [1, 5, 10, 20]}
}
scorer_selection_input = [
{'score_name': 'Accuracy', 'score_key': 'rank_test_Accuracy',
'score_criteria': 'min', 'scorer': make_scorer(accuracy_score)},
{'score_name': 'F1-Score', 'score_key': 'rank_test_F1-Score',
'score_criteria': 'min', 'scorer': make_scorer(f1_score)}
]
pipe = Pipeline([('preprocessor', None), ('classifier', None)])
param_grid, _ = generate_param_grid(steps=pipeline_steps,
param_dict=params_dict)
scorer_selection = ScoreGrid(scorer_selection_input)
cv_grid = GridSearchCV(
estimator=pipe,
param_grid=param_grid,
cv=StratifiedKFold(shuffle=True, n_splits=5),
scoring=scorer_selection.get_sklearn_dict(),
return_train_score=False,
refit=False,
verbose=1
)
random_states = [0, 1]
outer_cv = StratifiedKFold(n_splits=2)
X, y = load_breast_cancer(return_X_y=True)
kwargs = {
'additional_info': {'data_name': "breast_cancer"},
'cv_grid': cv_grid,
'X': X,
'y': y,
'inner_cv_seeds': random_states,
'outer_cv': outer_cv,
'score_selection': scorer_selection
}
return kwargs
def test_nested_cv():
full_exp_cols = [
'data_name', 'estimator', 'inner_cv_random_state', 'outer_fold_n',
'param_classifier', 'param_classifier__C', 'param_classifier__gamma',
'param_classifier__kernel', 'param_classifier__max_features',
'param_classifier__n_estimators', 'param_preprocessor', 'params',
'score_name', 'score_value', 'scorer', 'type_classifier',
'type_preprocessor'
]
# two classifiers, two inner folds and three scores
exp_min_rows_outer = 2 * 2 * 3
input_settings = get_test1_settings()
full_result, _ = nested_cv(**input_settings)
df_full_result = pandas.DataFrame(full_result)
print(df_full_result.columns)
assert (sorted(df_full_result.columns.tolist()) == sorted(full_exp_cols))
assert df_full_result.shape[0] >= exp_min_rows_outer
def test_basic_cv():
exp_cols = [
'mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time',
'param_classifier', 'param_classifier__C', 'param_classifier__gamma',
'param_classifier__kernel', 'param_preprocessor',
'param_classifier__max_features', 'param_classifier__n_estimators',
'params', 'split0_test_Accuracy', 'split1_test_Accuracy',
'split2_test_Accuracy', 'split3_test_Accuracy', 'split4_test_Accuracy',
'mean_test_Accuracy', 'std_test_Accuracy', 'rank_test_Accuracy',
'split0_test_F1-Score', 'split1_test_F1-Score', 'split2_test_F1-Score',
'split3_test_F1-Score', 'split4_test_F1-Score', 'mean_test_F1-Score',
'std_test_F1-Score', 'rank_test_F1-Score', 'type_preprocessor',
'type_classifier', 'data_name'
]
# 6 param combinations for SVC, 12 for RF
exp_min_rows_outer = 18
input_settings = get_test1_settings()
input_settings_sub = {
k: v
for k, v in input_settings.items()
if k in ['cv_grid', 'X', 'y', 'additional_info']
}
basic_result, _ = basic_cv(**input_settings_sub)
df_basic_result = pandas.DataFrame(basic_result)
print(df_basic_result.columns)
assert sorted(df_basic_result.columns) == sorted(exp_cols)
assert df_basic_result.shape[0] >= exp_min_rows_outer
|
from django import forms
from django.contrib.auth.models import User
from Jetbrain.user.models import Profile
class SignUp(forms.ModelForm):
username = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Username'}), label='')
first_name = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'First Name'}), label='')
last_name = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Last Name'}), label='')
email = forms.EmailField(widget=forms.TextInput(attrs={'placeholder': 'Email Address'}), label='')
password = forms.CharField(widget=forms.TextInput(attrs={'type': 'password', 'placeholder': 'Password'}), label='')
class Meta:
model = User
fields = ['username', 'first_name', 'last_name', 'email', 'password']
class UpdateUser(forms.ModelForm):
first_name = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Username'}))
last_name = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Username'}))
class Meta:
model = User
fields = ['first_name', 'last_name']
class UpdateProfile(forms.ModelForm):
class Meta:
model = Profile
fields = ['image'] |
import sqlite3, os, sys, time, traceback, random, signal
import numpy as np
DB_DIR = "/var/log/rampart/db/"
DATABASE = "stat.db"
WAL = False
RULE_LIFE_SPAN = 30
RULE_EXPIRY_TIME = 10
def getlocaltime():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def signal_term_handler(sig, frame):
global conn
current_time = getlocaltime()
current_pid = os.getpid()
msg = '%s\tPROCESS [%d] received SIGTERM!!! Cleaning up...' % (current_time, current_pid)
current_time = time.time()
stat_files = [f for f in os.listdir('.') if f.endswith('.stat') or f.endswith('.rule')]
for f in stat_files:
try:
os.remove(f)
except OSError as e:
pass
try:
conn.rollback()
conn.close()
except Exception as e:
pass
current_time = getlocaltime()
msg = '%s\tPROCESS [%d] terminated' % (current_time, current_pid)
sys.stdout.flush()
sys.exit()
def test_file_timestamp(f, end, ext):
result = False
if f.endswith(ext):
parts = f.split('-')
try:
t = int(parts[-3])
except Exception as e:
print(e)
print(parts)
sys.exit(0)
result = (t < end)
return result
def process_rulefile(f, add_list, renew_list, test_list, remove_list):
try:
with open(f, 'r') as input_f:
for line in input_f:
try:
data = line[:-1].split('\t')
except ValueError as e:
continue
rule_type = data[0]
uid = data[1]
uri_key = data[2]
keys = (uid, uri_key)
if rule_type == 'KILL':
add_list.add(keys)
elif rule_type == 'RENEW':
renew_list.add(keys)
elif rule_type == 'TEST':
test_list.add(keys)
elif rule_type == 'REVOKE':
remove_list.add(keys)
except IOError as e:
pass
def update_rule(cur, add_list, renew_list, test_list, remove_list):
global conn
add_queries = list()
renew_queries = list()
update_queries = list()
remove_queries = list()
current_time = time.time()
local_time = getlocaltime()
for i, v in enumerate(add_list):
uid, uri_key = v
status = 1 # KILL, ACTIVE
count = 1
cur.execute('SELECT ct FROM RULES WHERE uid=(?1) AND uri_key = (?2)', (uid, uri_key))
row = cur.fetchone()
if row is not None:
count = int(row[0])
count += 1
interval = count * RULE_EXPIRY_TIME
expiry_time = current_time + interval
print("%s\tSetting KILL rule for (%s, %s), interval=%.2f" % (local_time, uid, uri_key, interval))
add_queries.append((uid, uri_key, expiry_time, count, status))
for i, v in enumerate(renew_list):
uid, uri_key = v
interval = RULE_EXPIRY_TIME
expiry_time = current_time + interval
print("%s\tSetting RENEW rule for (%s, %s), interval=%.2f" % (local_time, uid, uri_key, interval))
renew_queries.append((expiry_time, uid, uri_key))
for i, v in enumerate(test_list):
uid, uri_key = v
status = 0 # TEST
print("%s\tSetting TEST rule for (%s, %s)" % (local_time, uid, uri_key))
update_queries.append((status, uid, uri_key))
for i, v in enumerate(remove_list):
uid, uri_key = v
status = -1 # INACTIVE
cur.execute('SELECT expiry FROM RULES WHERE uid=(?1) AND uri_key = (?2)', (uid, uri_key))
row = cur.fetchone()
timestamp = 0
expiry_time = 0
if row is not None:
expiry_time = float(row[0])
if expiry_time + RULE_LIFE_SPAN < current_time:
print("%s\tRemoving rule for (%s, %s)" % (local_time, uid, uri_key))
remove_queries.append(v)
else:
print("%s\tSetting REVOKE rule for (%s, %s)" % (local_time, uid, uri_key))
update_queries.append((status, uid, uri_key))
# Transaction is managed automatically, see https://docs.python.org/2/library/sqlite3.html#sqlite3-controlling-transactions
if len(add_queries) > 0:
cur.executemany("INSERT OR REPLACE INTO RULES VALUES (?1, ?2, ?3, ?4, ?5)", add_queries)
if len(renew_queries) > 0:
cur.executemany("UPDATE RULES SET expiry = (?1) WHERE uid = (?2) AND uri_key = (?3)", renew_queries)
if len(update_queries) > 0:
cur.executemany("UPDATE RULES SET status = (?1) WHERE uid = (?2) AND uri_key = (?3)", update_queries)
if len(remove_queries) > 0:
cur.executemany("DELETE FROM RULES WHERE uid = (?1) AND uri_key = (?2)", remove_queries)
conn.commit()
def process_statfile(f, hash2stat):
try:
with open(f, 'r') as input_f:
for line in input_f:
try:
data = line[:-1].split('\t')
except ValueError as e:
continue
hash_entry = int(data[0])
stat = data[1]
stat_data = [int(d) for d in data[2:]]
if hash_entry not in hash2stat:
hash2stat[hash_entry] = [stat_data]
else:
hash2stat[hash_entry].append(stat_data)
except IOError as e:
pass
def process_stats(hash2stat, hash2merged_stat):
for hash_entry, stat_list in hash2stat.items():
values = []
for stat in stat_list:
values += stat
cpu_sum = np.sum(values)
ct_sum = len(values)
var = np.var(values)
hash2merged_stat[hash_entry] = [ct_sum, cpu_sum, var]
def query_old_stat(cur, hash2stat, hash2old_stat):
for hash_entry in hash2stat.keys():
cur.execute('SELECT ct, cpu, cpu_variance FROM PERF_RECORDS WHERE hash=?', (hash_entry,))
row = cur.fetchone()
if row is not None:
hash2old_stat[hash_entry] = [int(row[0]), int(row[1]), float(row[2])]
def update_new_stat(cur, hash2stat, hash2old_stat):
global conn
new_stats = list()
count = len(hash2stat)
for hash_entry, stat in hash2stat.items():
ct, cpu, var = stat
if hash_entry in hash2old_stat:
old_stat = hash2old_stat[hash_entry]
ct_p, cpu_p, var_p = old_stat
mean = 1.0 * cpu / ct
mean_p = 1.0 * cpu_p / ct_p
mean_n = 1.0 * (cpu_p + cpu) / (ct_p + ct)
mean_sq_p = mean_p * mean_p
mean_sq = mean * mean
mean_sq_n = mean_n * mean_n
var = (ct_p * (mean_sq_p + var_p) + ct * (mean_sq + var)) / (ct_p + ct) - mean_sq_n
ct += ct_p
cpu += cpu_p
new_stats.append((hash_entry, ct, int(cpu), float(var)))
#print("Inserting stat of hash=%u, %d" % (hash_entry, hash_entry))
# Transaction is managed automatically, see https://docs.python.org/2/library/sqlite3.html#sqlite3-controlling-transactions
if count > 0:
cur.executemany("INSERT OR REPLACE INTO PERF_RECORDS VALUES (?1, ?2, ?3, ?4)", new_stats)
conn.commit()
def main(argv):
global conn
signal.signal(signal.SIGTERM, signal_term_handler)
parent_pid = os.getpid()
last_time = 0
log_file = "stat_db.log"
#log_f = open(log_file, 'w')
os.chdir(DB_DIR)
conn = sqlite3.connect(DATABASE)
cur = conn.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS PERF_RECORDS (hash INTEGER PRIMARY KEY, ct INTEGER, cpu INTEGER, cpu_variance REAL)")
cur.execute("DROP TABLE IF EXISTS RULES")
cur.execute("CREATE TABLE IF NOT EXISTS RULES (uid TEXT, uri_key TEXT, expiry REAL, ct INTEGER, status INTEGER, PRIMARY KEY (uid, uri_key))")
if WAL:
conn.commit()
conn.close()
else:
cur.execute("PRAGMA synchronous = OFF")
cur.execute("PRAGMA journal_mode = MEMORY")
#cur.execute("PRAGMA journal_mode = WAL")
cur.execute("PRAGMA cache_size = 100000")
should_sleep = True
while True:
try:
current_time = time.time()
if should_sleep and current_time - last_time < 1:
time.sleep(min(1-(current_time-last_time), 1))
else:
rule_files = [f for f in os.listdir('.') if test_file_timestamp(f, current_time-1, '.rule')]
kill_list = set()
renew_list = set()
test_list = set()
remove_list = set()
for f in rule_files:
process_rulefile(f, kill_list, renew_list, test_list, remove_list)
num_rules = len(kill_list) + len(renew_list) + len(test_list) + len(remove_list)
if num_rules > 0:
if WAL:
conn = sqlite3.connect(DATABASE)
cur = conn.cursor()
cur.execute("PRAGMA synchronous = OFF")
#cur.execute("PRAGMA journal_mode = MEMORY")
cur.execute("PRAGMA journal_mode = WAL")
cur.execute("PRAGMA cache_size = 100000")
update_rule(cur, kill_list, renew_list, test_list, remove_list)
num_rule_files = len(rule_files)
print('%s\tProcessed [%d] rule files with [%d] kill_rules, [%d] renew_rules, [%d] test_rules and [%d] remove_rules' % (getlocaltime(), num_rule_files, len(kill_list), len(renew_list), len(test_list), len(remove_list)))
if WAL:
conn.close()
stat_files = [f for f in os.listdir('.') if test_file_timestamp(f, current_time-1, '.stat')]
sample_size = min(len(stat_files), 100)
should_sleep = sample_size == len(stat_files)
stat_files = random.sample(stat_files, sample_size)
sys.stdout.flush()
hash2stat = dict()
for f in stat_files:
process_statfile(f, hash2stat)
if len(hash2stat) > 0:
hash2merged_stat = dict()
process_stats(hash2stat, hash2merged_stat)
if WAL:
conn = sqlite3.connect(DATABASE)
cur = conn.cursor()
cur.execute("PRAGMA synchronous = OFF")
#cur.execute("PRAGMA journal_mode = MEMORY")
cur.execute("PRAGMA journal_mode = WAL")
cur.execute("PRAGMA cache_size = 100000")
hash2old_stat = dict()
num_entries = len(hash2merged_stat)
if num_entries > 0:
query_old_stat(cur, hash2merged_stat, hash2old_stat)
update_new_stat(cur, hash2merged_stat, hash2old_stat)
elapse = time.time() - current_time
num_stats = sum([len(v) for v in hash2stat.values()])
num_files = len(stat_files)
print('%s\tProcessed [%d] stat files with [%d] stats of [%d] unique hash entries in [%f] seconds' % (getlocaltime(), num_files, num_stats, num_entries, elapse))
sys.stdout.flush()
if WAL:
conn.close()
del hash2stat, hash2merged_stat, hash2old_stat
for f in stat_files + rule_files:
try:
os.remove(f)
except OSError as e:
print(e)
pass
last_time = current_time
except (KeyboardInterrupt, Exception) as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print(type(e))
print(''.join('!! ' + line for line in lines))
sys.stdout.flush()
try:
conn.rollback()
if WAL:
conn.close()
except Exception:
pass
if isinstance(e, KeyboardInterrupt):
break
if not WAL:
conn.close()
#log_f.close()
if __name__ == '__main__':
main(sys.argv[1:])
|
# coding = utf-8
# @time : 2019/6/30 6:41 PM
# @author : alchemistlee
# @fileName: filter_long_tk.py
# @abstract:
if __name__ == '__main__':
input_zh = '/root/workspace/translate_data/my_corpus_v6.zh-cut.processed6-bpe-v6-2-.test'
input_en = '/root/workspace/translate_data/my_corpus_v6.en.tok.processed6-bpe-v6-2-.test'
out_zh = '/root/workspace/translate_data/my_corpus_v6.zh-cut.processed6-bpe-v6-2-filter3.test'
out_en = '/root/workspace/translate_data/my_corpus_v6.en.tok.processed6-bpe-v6-2-filter3.test'
with open(input_zh,'r') as i_zh, open(input_en,'r') as i_en, open(out_zh,'w') as o_zh, open(out_en, 'w') as o_en:
for row_zh, row_en in zip(i_zh, i_en):
zh_toks = row_zh.split()
if len(zh_toks) > 100:
continue
o_zh.write(row_zh.strip()+'\n')
o_en.write(row_en.strip()+'\n')
|
from flask import (Flask,
make_response,
redirect,
render_template,
request,
Blueprint,
abort)
from ..settings import settings, backend
from .utils import StaticFilesFlask
import os
import math
import urllib
from datetime import date
context = {
'active_page' : '',
}
def get_app(settings):
debug = settings.get('debug')
static_folder = os.path.join(settings.get('frontend.path'),
settings.get('frontend.static_folder') if debug else
settings.get('frontend.optimized_static_folder')
)
#we subclass the Flask class to add more than one directory for static files
app = StaticFilesFlask(
__name__,
static_url_path=settings.get('frontend.static_url'),
static_folder=static_folder,
template_folder=os.path.join(settings.get('frontend.path'),
settings.get('frontend.template_folder')),
)
webapp_context = {
'DEBUG' : debug,
'website_title' : settings.get('website_title','QuantifiedCode'),
'static_url' : '{}{}'.format(settings.get('frontend.url'), settings.get('frontend.static_url')),
'app_url' : settings.get('frontend.app_url')
}
@app.errorhandler(404)
def redirect_to_index(e):
#todo: we should display a normal 404 page instead of a redirect, since redirecting
#can yield to errors and hard-to-debug output with e.g. missing JS files...
context['error'] = e
return render_template("404.html", **context), 404
@app.route('{}/js/boot.js'.format(settings.get('frontend.static_url')))
def boot_js():
"""
Returns the main JS file required to run the frontend.
For the development version, this just returns the config.js and the environment as well
as plugin settings. All other modules will then be loaded separately.
For the production version, this concatenates the plugin boot scripts, the main boot script,
and all settings files.
"""
def read_file(filename):
with open(filename,'r') as input_file:
return input_file.read().decode('utf-8')
settings_context = context.copy()
settings_context['url'] = settings.get('frontend.url')
env_settings = render_template("settings.js", **settings_context)
plugins_context = {'plugin_modules' : [], 'plugins' : {}}
#we add the plugin settings
for plugin in settings.get('plugins'):
plugin_config = settings.load_plugin_config(plugin)
if plugin_config.get('frontend') and plugin_config['frontend'].get('settings'):
plugins_context['plugins'][plugin] = plugin_config['frontend']['settings']
plugins_context['plugin_modules'].append(plugin_config['frontend']['settings']['settingsModule'])
#we add the settings modules of the plugins to the require list for the core settings
plugins_context['plugin_modules'] = ','.join(['"{}"'.format(module)
for module in plugins_context['plugin_modules']])
plugin_settings = u"\n"+render_template("plugins.js", **plugins_context)
if debug:
content = "\n".join([
read_file(os.path.join(static_folder,'bower_components/requirejs/require.js')),
read_file(os.path.join(static_folder,'js/config.js')),
env_settings,
plugin_settings,
u'\nrequire(["main"],function(main){});\n'
])
else:
plugin_boot_scripts = ""
for plugin in settings.get('plugins'):
plugin_config = settings.load_plugin_config(plugin)
if plugin_config.get('frontend') and plugin_config['frontend'].get('optimized_static_files'):
plugin_boot_scripts += read_file(os.path.join(plugin_config['frontend']['optimized_static_files'],'js/boot.min.js'))
content = "\n".join([
read_file(os.path.join(static_folder,'js/require.min.js')),
plugin_boot_scripts,
env_settings,
plugin_settings,
read_file(os.path.join(static_folder,'js/boot.min.js'))
])
response = make_response(content)
response.mimetype = 'application/javascript'
return response
@app.route('/',defaults = {'path' : ''})
@app.route('/<path:path>')
def webapp(path):
return make_response(render_template("app.html", **webapp_context))
configure(app, settings)
return app
def configure(app, settings):
debug = settings.get('debug')
app.config.update(DEBUG = debug)
app.secret_key = settings.get('frontend.secret_key')
for plugin_name in settings.get('plugins',{}):
config = settings.load_plugin_config(plugin_name)
if debug:
#in development, we include non-optimized files
if config.get('frontend') and config['frontend'].get('static_files'):
app.add_static_folder(config['frontend']['static_files'])
else:
#for a production version, we include only optimized files
if config.get('frontend') and config['frontend'].get('optimized_static_files'):
app.add_static_folder(config['frontend']['static_files'])
if __name__ == '__main__':
app = get_app(settings)
app.run(debug=debug, host='0.0.0.0', port=8000,threaded = False)
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.view_wishlist, name='view_wishlist'),
path('add_to_wishlist/<car_id>/', views.add_to_wishlist,
name='add_to_wishlist'),
path('remove_wishlist_item/<car_id>/', views.remove_wishlist_item,
name='remove_wishlist_item'),
]
|
from unittest import TestCase
import create_graph as cg
class TestCreate_graph(TestCase):
def send_msg(self, channel, msg):
print("TECHIO> message --channel \"{}\" \"{}\"".format(channel, msg))
def success(self):
print("TECHIO> success true")
def fail(self):
print("TECHIO> success false")
def create_graph(self):
graph = {}
for v in self.vertices:
graph[v] = cg.City(v)
for e in self.edges:
graph[e[0]].add_neighbor(e[1], e[2])
graph[e[1]].add_neighbor(e[0], e[2])
return graph
def is_equal(self, v1, vuser):
if v1.name != vuser.name:
return "Name differs", False
set_succ = set(v1.neighbours)
set_user_succ = set(vuser.neighbours)
if len(set_succ) != len(set_user_succ):
return "Different number of neighbours", False
for s in set_user_succ:
if s not in set_succ:
return "Different neighbours", False
return "Perfect", True
def setUp(self):
self.vertices = {'A', 'B', 'C', 'D', 'E', 'F', 'G'}
# Attention : undirected graph
self.edges = [['A', 'B', 5],
['A', 'D', 3],
['A', 'E', 12],
['A', 'F', 5],
['B', 'D', 1],
['B', 'G', 2],
['C', 'E', 1],
['C', 'F', 16],
['D', 'E', 1],
['D', 'G', 1],
['E', 'F', 2]
]
self.correct_graph = self.create_graph()
#self.edges[2] = ['A', 'E', 13]
#self.vertices.add('Z')
self.graph_to_check = cg.create_graph(self.vertices, self.edges)
#self.vertices.remove('Z')
def test_create_graph_all_vertices(self):
#print("Test vertices names")
# retrieve all names
if not self.graph_to_check:
self.fail()
self.send_msg("Oops! ", "It seems the graph is not initialized at all")
return
try:
for key in self.correct_graph:
print(key)
self.assertIn(key, self.graph_to_check.keys(), "It seems the graph does not contain all nodes")
except AssertionError as e:
self.fail()
self.send_msg("Oops! ", e)
return
try:
for node in self.correct_graph.values():
msg, result = self.is_equal(node, self.graph_to_check[node.name])
print(msg)
self.assertTrue(result, "One node has a wrong neighbor")
self.success()
self.send_msg("Congrats! The graph of cities is properly initialized.")
except AssertionError as e:
self.fail()
self.send_msg("Oops! ", e) |
# coding=utf-8
'''
Fetch FMI open data using "fmiopendata"-library
https://github.com/pnuu/fmiopendata
pip install fmiopendata
numpy, requests, pandas, datetime, math, matplotlib, pathlib, os
Configured for 1.9. - 30.6. (~possibility for snow in ground) !
'''
from createPaths import createPaths
from splitWinters import splitWinters
from checkAvailability import checkAvailability
from fetchFmiData import fetchFmiData
from toPandasDF import toPandasDF
from plotter import plotYear, plotSite, plotTimeseries
from returnMaster import returnMaster
from parameterSpecific import parameterSpecific
from interpolateNaNs import interpolateNaNs
from calcStat import calcRowStat, calcYearStat, deCompose
from createTimeseries import createTimeseries
from pathlib import Path
from datetime import datetime
import numpy as np
import pandas as pd
# Site specific analysis YES/NO
siteAnalysis = True
# Timeseries analysis YES/NO
timeseriesAnalysis = True
# Re-plot YES/NO
rePlotYears = True
rePlotSiteAnalysis = True
rePlotTSAnalysis = True # timeseries
rePlotDC = True # timeseries decomposition
# Ski Centers: Saariselkä, Levi, Ylläs/Pallas/Ollos, Pyhä/Luosto, Ruka, Syöte, Vuokatti, Kilpisjärvi
skiCenters = ['Saariselkä','Levi','Ylläs&Pallas&Ollos','Pyhä&Luosto','Ruka','Syöte','Vuokatti','Kilpisjärvi']
# And closest FMI-sites with snow records
sites = ['Inari Saariselkä matkailukeskus','Kittilä kirkonkylä','Kittilä Kenttärova','Kemijärvi lentokenttä','Kuusamo Kiutaköngäs','Taivalkoski kirkonkylä','Sotkamo Kuolaniemi','Enontekiö Kilpisjärvi kyläkeskus']
# Establishment year / snow record availability of the FMI site
# Sotkamo 1989->2009
# Enontekiö 1951->1979
est = [1976, 2009, 2002, 2006, 1966, 2002, 2009, 1979] # established
excl = [[1976], [2009], [2002], [0], [1967], [2002], [0], [1982]] # years to exclude
# Example query timeperiod (for past winters)
# startTime = '2020-09-01T00:00:00'
# endTime = '2021-06-30T00:00:00'
# Example query timeperiod (for ongoing winter)
# startTime = '2020-09-01T00:00:00'
# endTime = '2021-02-20T00:00:00'
# Define timeperiod in winters
startWinter = 2017
endWinter = 2020
assert endWinter > startWinter
years = str(startWinter)+'-'+str(endWinter)
# Define start and end of winter
startDay = '-09-01T00:00:00'
endDay = '-06-30T00:00:00'
# Generate data, pics, and sites folders
paths = createPaths(years)
pD = paths[0] # data
pP = paths[1] # pics
pS = paths[2] # sites
pSTS = paths[3] # time-series analysis
# Generate timeperiods for winters
[startTimes,endTimes] = splitWinters(startWinter,endWinter,startDay,endDay)
# Define parameter of interest
par = 'snow_aws' # snow cover
# Zip to dictionaries
siteToSki = dict(zip(sites, skiCenters))
siteToEst = dict(zip(sites, est))
# Fetch, transform and save year-by-year data and pics
for startTime,endTime in zip(startTimes,endTimes):
# Choose sites based on data availability
[avl_skiCenters,avl_sites] = checkAvailability(skiCenters,sites,startTime,endTime,est,excl)
# Check if list is empty
if not avl_sites:
print("Empty year "+startTime[0:4]+"-"+endTime[0:4])
continue
else:
print(startTime[0:4]+"-"+endTime[0:4])
print(avl_sites)
# Create unique name for yearly parameter query
name = par+'_'+startTime+'_'+endTime
name = name.replace(":","_")
# If pickle-file exists, skip fetch and save, but recreate plots
if Path('./'+str(pD)+'/'+name+'.pkl').is_file():
print(name+' already exists')
if rePlotYears:
df = pd.read_pickle(Path('./'+str(pD)+'/'+name+'.pkl'))
plotYear(df,avl_skiCenters,par,name,years,pP)
continue
# Fetch data as a list of queries
fmiData = fetchFmiData(avl_sites,startTime,endTime)
# Cumulate all records of parameter into a single pandas dataframe
df = toPandasDF(fmiData,avl_sites)
# Save to pickle-files (years/data-folder)
df.to_pickle('./'+str(pD)+'/'+name+'.pkl')
# Save yearly plots separately (years/pics-folder)
try:
plotYear(df,avl_skiCenters,par,name,years,pP)
except Exception as e:
print(e)
# Delete dataframe to free space immediately
del(df)
if siteAnalysis:
# Fill sites with yearly (winter) data
master = returnMaster(sites,startWinter,pD)
# Do parameter specific tricks (if any)
master = parameterSpecific(master,par)
# Fill NaNs using linear interpolation
master = interpolateNaNs(master)
# Calculate statistics columns
master = calcRowStat(master)
# Plot site specific statistics
if rePlotSiteAnalysis:
plotSite(master,par,startWinter,endWinter,siteToSki,siteToEst,pSTS)
# Delete master file to free space
del(master)
if timeseriesAnalysis:
# Create timeseries-dataframes
ts = createTimeseries(sites,pD)
# Plot timeseries raw
if rePlotTSAnalysis:
plotTimeseries(ts,par,startWinter,endWinter,siteToSki,siteToEst,pSTS,'unprocessed')
# Interpolate missing values
ts = interpolateNaNs(ts)
# Plot timeseries raw interpolated
if rePlotTSAnalysis:
plotTimeseries(ts,par,startWinter,endWinter,siteToSki,siteToEst,pSTS,'processed')
# Decompose timeseries into statsmodels and plot
models = deCompose(ts,par,pSTS,rePlotDC)
# Delete timeseries to free space
# del(ts)
|
import numpy as np
import maps
# ------------------------------
# Obsolete, delete soon (04/24/2014
# def qe_cov_fill_helper( qe1, qe2, cfft, f1, f2, switch_12=False, switch_34=False, conj_12=False, conj_34=False ):
# lx, ly = cfft.get_lxly()
# l = np.sqrt(lx**2 + ly**2)
# psi = np.arctan2(lx, -ly)
# if f1.shape != l.shape:
# assert( len(f1.shape) == 1 )
# f1 = np.interp( l.flatten(), np.arange(0, len(f1)), f1, right=0 ).reshape(l.shape)
# if f2.shape != l.shape:
# assert( len(f2.shape) == 1 )
# f2 = np.interp( l.flatten(), np.arange(0, len(f2)), f2, right=0 ).reshape(l.shape)
# i0_12 = { False : 0, True : 1 }[switch_12]; i0_34 = { False : 0, True : 1 }[switch_34]
# i1_12 = { False : 1, True : 0 }[switch_12]; i1_34 = { False : 1, True : 0 }[switch_34]
# cfunc_12 = { False : lambda v : v, True : lambda v : np.conj(v) }[conj_12]
# cfunc_34 = { False : lambda v : v, True : lambda v : np.conj(v) }[conj_34]
# fft = cfft.fft
# for i in xrange(0, qe1.ntrm):
# for j in xrange(0, qe2.ntrm):
# fft[:,:] += np.fft.fft2(
# np.fft.ifft2(
# cfunc_12(qe1.wl[i][i0_12](l, lx, ly)) *
# cfunc_34(qe2.wl[j][i0_34](l, lx, ly)) * f1 *
# np.exp(+1.j*((-1)**(conj_12)*qe1.sl[i][i0_12]+(-1)**(conj_34)*qe2.sl[j][i0_34])*psi) ) *
# np.fft.ifft2(
# cfunc_12(qe1.wl[i][i1_12](l, lx, ly)) *
# cfunc_34(qe2.wl[j][i1_34](l, lx, ly)) * f2 *
# np.exp(+1.j*((-1)**(conj_12)*qe1.sl[i][i1_12]+(-1)**(conj_34)*qe2.sl[j][i1_34])*psi) )
# ) * ( cfunc_12(qe1.wl[i][2](l, lx, ly)) *
# cfunc_34(qe2.wl[j][2](l, lx, ly)) *
# np.exp(-1.j*((-1)**(conj_12)*qe1.sl[i][2]+(-1)**(conj_34)*qe2.sl[j][2])*psi) ) * 0.25 / (cfft.dx * cfft.dy)
# return cfft
def pad_ft(a, npad=2):
'''
Pad an array in Fourier-space for FFT convolutions
'''
if npad == 1: return a
nx, ny = a.shape
p = np.zeros([nx * npad, ny * npad], dtype=a.dtype)
p[0:nx, 0:ny] = np.fft.fftshift(a)
p = np.roll(np.roll(p, -nx / 2, axis=0), -ny / 2, axis=1)
return p
def unpad_ft(a, npad=2):
'''
Un-pad an array in Fourier-space
'''
if npad == 1: return a
nx_pad, ny_pad = a.shape
nx = int(nx_pad / npad)
ny = int(ny_pad / npad)
return np.roll(
np.roll(
(np.roll(np.roll(a, nx / 2, axis=0), ny / 2, axis=1)[0:nx, 0:ny]),
nx / 2,
axis=0),
ny / 2,
axis=1)
def iconvolve_padded(f, g, npad=2):
'''
Calculate the convolution:
ret(L) = iint{d^2\vec{l} f(l) \times g(L-l)}
'''
return (unpad_ft(
np.fft.fft2(
np.fft.ifft2(pad_ft(f, npad=npad)
) * np.fft.ifft2(pad_ft(g, npad=npad))),
npad=npad) * npad**2)
def qe_cov_fill_helper(qe1,
qe2,
cfft,
f1,
f2,
switch_12=False,
switch_34=False,
conj_12=False,
conj_34=False,
npad=None):
'''
Calculate the covariance between two estimators.
When used to calculate the response, the return value is half of the full response.
'''
if npad == None: # return the maximum npad of qe1 or qe2. If either has no specified npad, default to 2.
if hasattr(qe1, 'npad_conv'): npad_qe1 = qe1.npad_conv
else: npad_qe1 = 2
if hasattr(qe2, 'npad_conv'): npad_qe2 = qe2.npad_conv
else: npad_qe2 = 2
npad = max(npad_qe1, npad_qe2)
if npad < 1: npad = 1 # never less than 1
if npad != 2:
print "lensing.qest.qe_cov_fill_helper(): npad is not equal to 2! I hope you know what you are doing..."
lx, ly = cfft.get_lxly()
l = np.sqrt(lx**2 + ly**2)
psi = np.arctan2(lx, -ly)
nx, ny = l.shape
if f1.shape != l.shape:
assert (len(f1.shape) == 1)
f1 = np.interp(
l.flatten(), np.arange(0, len(f1)), f1, right=0).reshape(l.shape)
if f2.shape != l.shape:
assert (len(f2.shape) == 1)
f2 = np.interp(
l.flatten(), np.arange(0, len(f2)), f2, right=0).reshape(l.shape)
i0_12 = {
False: 0,
True: 1
}[switch_12]
i0_34 = {
False: 0,
True: 1
}[switch_34]
i1_12 = {
False: 1,
True: 0
}[switch_12]
i1_34 = {
False: 1,
True: 0
}[switch_34]
cfunc_12 = {False: lambda v: v, True: lambda v: np.conj(v)}[conj_12]
cfunc_34 = {False: lambda v: v, True: lambda v: np.conj(v)}[conj_34]
fft = cfft.fft
for i in xrange(0, qe1.ntrm):
for j in xrange(0, qe2.ntrm):
term1 = (cfunc_12(qe1.wl[i][i0_12](l, lx, ly)) * cfunc_34(
qe2.wl[j][i0_34](l, lx, ly)) * f1 * np.exp(
+1.j * ((-1)**(conj_12) * qe1.sl[i][i0_12] + (-1)**
(conj_34) * qe2.sl[j][i0_34]) * psi))
term2 = (cfunc_12(qe1.wl[i][i1_12](l, lx, ly)) * cfunc_34(
qe2.wl[j][i1_34](l, lx, ly)) * f2 * np.exp(
+1.j * ((-1)**(conj_12) * qe1.sl[i][i1_12] + (-1)**
(conj_34) * qe2.sl[j][i1_34]) * psi))
fft[:, :] += (iconvolve_padded(term1, term2, npad=npad) * (
cfunc_12(qe1.wl[i][2](l, lx, ly)) * cfunc_34(
qe2.wl[j][2](l, lx, ly)) * np.exp(-1.j * (
(-1)**(conj_12) * qe1.sl[i][2] + (-1)**
(conj_34) * qe2.sl[j][2]) * psi)) * 0.25 /
(cfft.dx * cfft.dy))
return cfft
class qest():
def __init__(self):
pass
def eval(self, r1, r2, npad=None):
'''
Evaluate the quadradic estimator of \phi from fields r1 and r2
'''
if hasattr(r1, 'get_cfft'):
r1 = r1.get_cfft()
if hasattr(r2, 'get_cfft'):
r2 = r2.get_cfft()
assert (r1.compatible(r2))
if npad == None:
if self.npad_conv: npad = self.npad_conv
else: npad = 2
cfft = maps.cfft(r1.nx, r1.dx, ny=r1.ny, dy=r1.dy)
lx, ly = cfft.get_lxly()
l = np.sqrt(lx**2 + ly**2)
psi = np.arctan2(lx, -ly)
fft = cfft.fft
for i in xrange(0, self.ntrm):
term1 = self.wl[i][0](l, lx, ly) * r1.fft * np.exp(
+1.j * self.sl[i][0] * psi)
term2 = self.wl[i][1](l, lx, ly) * r2.fft * np.exp(
+1.j * self.sl[i][1] * psi)
fft[:, :] += (iconvolve_padded(term1, term2, npad=npad) * (
self.wl[i][2](l, lx, ly) * np.exp(-1.j * self.sl[i][2] * psi)
) * 0.5 / np.sqrt(cfft.dx * cfft.dy) * np.sqrt(cfft.nx * cfft.ny))
# # Original code ---------------------
# fft_orig = fft*0.
# for i in xrange(0, self.ntrm):
# fft_orig[:,:] += np.fft.fft2(
# np.fft.ifft2(
# self.wl[i][0](l, lx, ly) * r1.fft *
# np.exp(+1.j*self.sl[i][0]*psi) ) *
# np.fft.ifft2(
# self.wl[i][1](l, lx, ly) * r2.fft *
# np.exp(+1.j*self.sl[i][1]*psi) )
# ) * ( self.wl[i][2](l, lx, ly) *
# np.exp(-1.j*self.sl[i][2]*psi) ) * 0.5 / np.sqrt(cfft.dx * cfft.dy) * np.sqrt(cfft.nx * cfft.ny)
# # -----------------------------------
return cfft
def fill_resp(self, qe2, cfft, f1, f2, npad=2):
print "fill_resp", qe2
cfft.fft[:, :] = 0.0
qe_cov_fill_helper(self, qe2, cfft, f1, f2, npad=npad)
cfft.fft[:, :] *= 2.0 # Multiply by 2 because qe_cov_fill_helper returns 1/2 the response.
return cfft
def fill_clqq(self, cfft, f11, f12, f22, npad=2):
cfft.fft[:, :] = 0.0
qe_cov_fill_helper(
self,
self,
cfft,
f11,
f22,
switch_34=False,
conj_34=True,
npad=npad)
qe_cov_fill_helper(
self,
self,
cfft,
f12,
f12,
switch_34=True,
conj_34=True,
npad=npad)
return cfft
def get_sl1(self, i):
return self.sl[i][0]
def get_sl2(self, i):
return self.sl[i][1]
def get_slL(self, i):
return self.sl[i][2]
def get_wl1(self, i, l, lx, ly):
return self.wl[i][0](l, lx, ly)
def get_wl2(self, i, l, lx, ly):
return self.wl[i][1](l, lx, ly)
def get_wlL(self, i, l, lx, ly):
return self.wl[i][2](l, lx, ly)
#############################
# Classes for calculating quadratic estimates.
# In order to write the QE convolutions as FFT's we separate the equations into terms,
# where each term can be expressed as components of the form
# F(l) * G(L-l) * H(L)
# Here, l is the integration variable.
# components self.wl[:][0] correspond to F(l)
# components self.wl[:][1] correspond to G(L-l)
# components self.wl[:][2] are independend of l, corresponding to H(L)
#
# self.npad_conv is the factor for padding the convolution-by-FFT calculations
#############################
class qest_plm_TT_s0(qest):
def __init__(self, cltt):
self.cltt = cltt
self.ntrm = 4
self.wl = {i: {} for i in xrange(0, self.ntrm)}
self.sl = {i: [0, 0, 0] for i in xrange(0, self.ntrm)}
self.wl[0][0] = self.wc_lx
self.wl[0][1] = self.wo_m1
self.wl[0][2] = self.wo_lx
self.wl[1][0] = self.wc_ly
self.wl[1][1] = self.wo_m1
self.wl[1][2] = self.wo_ly
self.wl[2][0] = self.wo_m1
self.wl[2][1] = self.wc_lx
self.wl[2][2] = self.wo_lx
self.wl[3][0] = self.wo_m1
self.wl[3][1] = self.wc_ly
self.wl[3][2] = self.wo_ly
self.npad_conv = 2
def wo_m1(self, l, lx, ly):
return 1.0
def wo_lx(self, l, lx, ly):
return lx
def wc_lx(self, l, lx, ly):
return np.interp(
l, np.arange(0, len(self.cltt)), self.cltt, right=0) * lx
def wo_ly(self, l, lx, ly):
return ly
def wc_ly(self, l, lx, ly):
return np.interp(
l, np.arange(0, len(self.cltt)), self.cltt, right=0) * ly
class qest_plm_TT(qest):
def __init__(self, cltt):
self.cltt = cltt
self.ntrm = 4
self.wl = {i: {} for i in xrange(0, self.ntrm)}
self.sl = {i: {} for i in xrange(0, self.ntrm)}
self.wl[0][0] = self.wc_ml
self.sl[0][0] = +1
self.wl[0][1] = self.wo_d2
self.sl[0][1] = +0
self.wl[0][2] = self.wo_ml
self.sl[0][2] = +1
self.wl[1][0] = self.wc_ml
self.sl[1][0] = -1
self.wl[1][1] = self.wo_d2
self.sl[1][1] = +0
self.wl[1][2] = self.wo_ml
self.sl[1][2] = -1
self.wl[2][0] = self.wo_d2
self.sl[2][0] = +0
self.wl[2][1] = self.wc_ml
self.sl[2][1] = +1
self.wl[2][2] = self.wo_ml
self.sl[2][2] = +1
self.wl[3][0] = self.wo_d2
self.sl[3][0] = +0
self.wl[3][1] = self.wc_ml
self.sl[3][1] = -1
self.wl[3][2] = self.wo_ml
self.sl[3][2] = -1
self.npad_conv = 2
def wo_d2(self, l, lx, ly):
return -0.5
def wo_ml(self, l, lx, ly):
return l
def wc_ml(self, l, lx, ly):
return np.interp(
l, np.arange(0, len(self.cltt)), self.cltt, right=0) * l
class qest_xlm_TT(qest):
def __init__(self, cltt):
self.cltt = cltt
self.ntrm = 4
self.wl = {i: {} for i in xrange(0, self.ntrm)}
self.sl = {i: {} for i in xrange(0, self.ntrm)}
self.wl[0][0] = self.wc_ml
self.sl[0][0] = +1
self.wl[0][1] = self.wo_d2
self.sl[0][1] = +0
self.wl[0][2] = self.wo_ml
self.sl[0][2] = +1
self.wl[1][0] = self.wc_ml
self.sl[1][0] = -1
self.wl[1][1] = self.wo_n2
self.sl[1][1] = +0
self.wl[1][2] = self.wo_ml
self.sl[1][2] = -1
self.wl[2][0] = self.wo_d2
self.sl[2][0] = +0
self.wl[2][1] = self.wc_ml
self.sl[2][1] = +1
self.wl[2][2] = self.wo_ml
self.sl[2][2] = +1
self.wl[3][0] = self.wo_n2
self.sl[3][0] = +0
self.wl[3][1] = self.wc_ml
self.sl[3][1] = -1
self.wl[3][2] = self.wo_ml
self.sl[3][2] = -1
def wo_d2(self, l, lx, ly):
return -0.5j
def wo_n2(self, l, lx, ly):
return +0.5j
def wo_ml(self, l, lx, ly):
return l
def wc_ml(self, l, lx, ly):
return np.interp(
l, np.arange(0, len(self.cltt)), self.cltt, right=0) * l
class qest_plm_TE(qest):
def __init__(self, clte):
self.clte = clte
self.ntrm = 6
self.wl = {i: {} for i in xrange(0, self.ntrm)}
self.sl = {i: {} for i in xrange(0, self.ntrm)}
# t de
self.wl[0][0] = self.wc_ml
self.sl[0][0] = +3
self.wl[0][1] = self.wo_d4
self.sl[0][1] = -2
self.wl[0][2] = self.wo_ml
self.sl[0][2] = +1
self.wl[1][0] = self.wc_ml
self.sl[1][0] = -3
self.wl[1][1] = self.wo_d4
self.sl[1][1] = +2
self.wl[1][2] = self.wo_ml
self.sl[1][2] = -1
self.wl[2][0] = self.wc_ml
self.sl[2][0] = -1
self.wl[2][1] = self.wo_d4
self.sl[2][1] = +2
self.wl[2][2] = self.wo_ml
self.sl[2][2] = +1
self.wl[3][0] = self.wc_ml
self.sl[3][0] = +1
self.wl[3][1] = self.wo_d4
self.sl[3][1] = -2
self.wl[3][2] = self.wo_ml
self.sl[3][2] = -1
# dt e
self.wl[4][0] = self.wo_d2
self.sl[4][0] = +0
self.wl[4][1] = self.wc_ml
self.sl[4][1] = +1
self.wl[4][2] = self.wo_ml
self.sl[4][2] = +1
self.wl[5][0] = self.wo_d2
self.sl[5][0] = +0
self.wl[5][1] = self.wc_ml
self.sl[5][1] = -1
self.wl[5][2] = self.wo_ml
self.sl[5][2] = -1
self.npad_conv = 2
def wo_d2(self, l, lx, ly):
return -0.50
def wo_d4(self, l, lx, ly):
return -0.25
def wo_ml(self, l, lx, ly):
return l
def wc_ml(self, l, lx, ly):
return np.interp(
l, np.arange(0, len(self.clte)), self.clte, right=0) * l
class qest_plm_ET(qest_plm_TE):
def __init__(self, clte):
self.clte = clte
self.ntrm = 6
self.wl = {i: {} for i in xrange(0, self.ntrm)}
self.sl = {i: {} for i in xrange(0, self.ntrm)}
# t de
self.wl[0][1] = self.wc_ml
self.sl[0][1] = +3
self.wl[0][0] = self.wo_d4
self.sl[0][0] = -2
self.wl[0][2] = self.wo_ml
self.sl[0][2] = +1
self.wl[1][1] = self.wc_ml
self.sl[1][1] = -3
self.wl[1][0] = self.wo_d4
self.sl[1][0] = +2
self.wl[1][2] = self.wo_ml
self.sl[1][2] = -1
self.wl[2][1] = self.wc_ml
self.sl[2][1] = -1
self.wl[2][0] = self.wo_d4
self.sl[2][0] = +2
self.wl[2][2] = self.wo_ml
self.sl[2][2] = +1
self.wl[3][1] = self.wc_ml
self.sl[3][1] = +1
self.wl[3][0] = self.wo_d4
self.sl[3][0] = -2
self.wl[3][2] = self.wo_ml
self.sl[3][2] = -1
# dt e
self.wl[4][1] = self.wo_d2
self.sl[4][1] = +0
self.wl[4][0] = self.wc_ml
self.sl[4][0] = +1
self.wl[4][2] = self.wo_ml
self.sl[4][2] = +1
self.wl[5][1] = self.wo_d2
self.sl[5][1] = +0
self.wl[5][0] = self.wc_ml
self.sl[5][0] = -1
self.wl[5][2] = self.wo_ml
self.sl[5][2] = -1
self.npad_conv = 2
class qest_plm_TB(qest):
def __init__(self, clte):
self.clte = clte
self.ntrm = 4
self.wl = {i: {} for i in xrange(0, self.ntrm)}
self.sl = {i: {} for i in xrange(0, self.ntrm)}
# t de
self.wl[0][0] = self.wc_ml
self.sl[0][0] = +3
self.wl[0][1] = self.wo_di
self.sl[0][1] = -2
self.wl[0][2] = self.wo_ml
self.sl[0][2] = +1
self.wl[1][0] = self.wc_ml
self.sl[1][0] = -3
self.wl[1][1] = self.wo_mi
self.sl[1][1] = +2
self.wl[1][2] = self.wo_ml
self.sl[1][2] = -1
self.wl[2][0] = self.wc_ml
self.sl[2][0] = -1
self.wl[2][1] = self.wo_mi
self.sl[2][1] = +2
self.wl[2][2] = self.wo_ml
self.sl[2][2] = +1
self.wl[3][0] = self.wc_ml
self.sl[3][0] = +1
self.wl[3][1] = self.wo_di
self.sl[3][1] = -2
self.wl[3][2] = self.wo_ml
self.sl[3][2] = -1
self.npad_conv = 2
def wo_di(self, l, lx, ly):
return +0.25j
def wo_mi(self, l, lx, ly):
return -0.25j
def wo_ml(self, l, lx, ly):
return l
def wc_ml(self, l, lx, ly):
return np.interp(
l, np.arange(0, len(self.clte)), self.clte, right=0) * l
class qest_plm_BT(qest_plm_TB):
def __init__(self, clte):
self.clte = clte
self.ntrm = 4
self.wl = {i: {} for i in xrange(0, self.ntrm)}
self.sl = {i: {} for i in xrange(0, self.ntrm)}
# t de
self.wl[0][1] = self.wc_ml
self.sl[0][1] = +3
self.wl[0][0] = self.wo_di
self.sl[0][0] = -2
self.wl[0][2] = self.wo_ml
self.sl[0][2] = +1
self.wl[1][1] = self.wc_ml
self.sl[1][1] = -3
self.wl[1][0] = self.wo_mi
self.sl[1][0] = +2
self.wl[1][2] = self.wo_ml
self.sl[1][2] = -1
self.wl[2][1] = self.wc_ml
self.sl[2][1] = -1
self.wl[2][0] = self.wo_mi
self.sl[2][0] = +2
self.wl[2][2] = self.wo_ml
self.sl[2][2] = +1
self.wl[3][1] = self.wc_ml
self.sl[3][1] = +1
self.wl[3][0] = self.wo_di
self.sl[3][0] = -2
self.wl[3][2] = self.wo_ml
self.sl[3][2] = -1
self.npad_conv = 2
class qest_plm_EE(qest):
def __init__(self, clee):
self.clee = clee
self.ntrm = 8
self.wl = {i: {} for i in xrange(0, self.ntrm)}
self.sl = {i: {} for i in xrange(0, self.ntrm)}
self.wl[0][0] = self.wo_d4
self.sl[0][0] = -2
self.wl[0][1] = self.wc_ml
self.sl[0][1] = +3
self.wl[0][2] = self.wo_ml
self.sl[0][2] = +1
self.wl[1][0] = self.wo_d4
self.sl[1][0] = +2
self.wl[1][1] = self.wc_ml
self.sl[1][1] = -3
self.wl[1][2] = self.wo_ml
self.sl[1][2] = -1
self.wl[2][0] = self.wc_ml
self.sl[2][0] = +3
self.wl[2][1] = self.wo_d4
self.sl[2][1] = -2
self.wl[2][2] = self.wo_ml
self.sl[2][2] = +1
self.wl[3][0] = self.wc_ml
self.sl[3][0] = -3
self.wl[3][1] = self.wo_d4
self.sl[3][1] = +2
self.wl[3][2] = self.wo_ml
self.sl[3][2] = -1
self.wl[4][0] = self.wo_d4
self.sl[4][0] = +2
self.wl[4][1] = self.wc_ml
self.sl[4][1] = -1
self.wl[4][2] = self.wo_ml
self.sl[4][2] = +1
self.wl[5][0] = self.wo_d4
self.sl[5][0] = -2
self.wl[5][1] = self.wc_ml
self.sl[5][1] = +1
self.wl[5][2] = self.wo_ml
self.sl[5][2] = -1
self.wl[6][0] = self.wc_ml
self.sl[6][0] = -1
self.wl[6][1] = self.wo_d4
self.sl[6][1] = +2
self.wl[6][2] = self.wo_ml
self.sl[6][2] = +1
self.wl[7][0] = self.wc_ml
self.sl[7][0] = +1
self.wl[7][1] = self.wo_d4
self.sl[7][1] = -2
self.wl[7][2] = self.wo_ml
self.sl[7][2] = -1
self.npad_conv = 2
def wo_d4(self, l, lx, ly):
return -0.25
def wo_ml(self, l, lx, ly):
return l
def wc_ml(self, l, lx, ly):
return np.interp(
l, np.arange(0, len(self.clee)), self.clee, right=0) * l
class qest_plm_EB(qest):
def __init__(self, clee):
self.clee = clee
self.ntrm = 4
self.wl = {i: {} for i in xrange(0, self.ntrm)}
self.sl = {i: {} for i in xrange(0, self.ntrm)}
# t de
self.wl[0][0] = self.wc_ml
self.sl[0][0] = +3
self.wl[0][1] = self.wo_di
self.sl[0][1] = -2
self.wl[0][2] = self.wo_ml
self.sl[0][2] = +1
self.wl[1][0] = self.wc_ml
self.sl[1][0] = -3
self.wl[1][1] = self.wo_mi
self.sl[1][1] = +2
self.wl[1][2] = self.wo_ml
self.sl[1][2] = -1
self.wl[2][0] = self.wc_ml
self.sl[2][0] = -1
self.wl[2][1] = self.wo_mi
self.sl[2][1] = +2
self.wl[2][2] = self.wo_ml
self.sl[2][2] = +1
self.wl[3][0] = self.wc_ml
self.sl[3][0] = +1
self.wl[3][1] = self.wo_di
self.sl[3][1] = -2
self.wl[3][2] = self.wo_ml
self.sl[3][2] = -1
self.npad_conv = 2
def wo_di(self, l, lx, ly):
return +0.25j
def wo_mi(self, l, lx, ly):
return -0.25j
def wo_ml(self, l, lx, ly):
return l
def wc_ml(self, l, lx, ly):
return np.interp(
l, np.arange(0, len(self.clee)), self.clee, right=0) * l
class qest_plm_BE(qest_plm_EB):
def __init__(self, clee):
self.clee = clee
self.ntrm = 4
self.wl = {i: {} for i in xrange(0, self.ntrm)}
self.sl = {i: {} for i in xrange(0, self.ntrm)}
# t de
self.wl[0][1] = self.wc_ml
self.sl[0][1] = +3
self.wl[0][0] = self.wo_di
self.sl[0][0] = -2
self.wl[0][2] = self.wo_ml
self.sl[0][2] = +1
self.wl[1][1] = self.wc_ml
self.sl[1][1] = -3
self.wl[1][0] = self.wo_mi
self.sl[1][0] = +2
self.wl[1][2] = self.wo_ml
self.sl[1][2] = -1
self.wl[2][1] = self.wc_ml
self.sl[2][1] = -1
self.wl[2][0] = self.wo_mi
self.sl[2][0] = +2
self.wl[2][2] = self.wo_ml
self.sl[2][2] = +1
self.wl[3][1] = self.wc_ml
self.sl[3][1] = +1
self.wl[3][0] = self.wo_di
self.sl[3][0] = -2
self.wl[3][2] = self.wo_ml
self.sl[3][2] = -1
self.npad_conv = 2
class qest_xlm_EB(qest):
def __init__(self, clee):
self.clee = clee
self.ntrm = 4
self.wl = {i: {} for i in xrange(0, self.ntrm)}
self.sl = {i: {} for i in xrange(0, self.ntrm)}
# t de
self.wl[0][0] = self.wc_ml
self.sl[0][0] = +3
self.wl[0][1] = self.wo_di
self.sl[0][1] = -2
self.wl[0][2] = self.wo_ml
self.sl[0][2] = +1
self.wl[1][0] = self.wc_ml
self.sl[1][0] = -3
self.wl[1][1] = self.wo_di
self.sl[1][1] = +2
self.wl[1][2] = self.wo_ml
self.sl[1][2] = -1
self.wl[2][0] = self.wc_ml
self.sl[2][0] = -1
self.wl[2][1] = self.wo_mi
self.sl[2][1] = +2
self.wl[2][2] = self.wo_ml
self.sl[2][2] = +1
self.wl[3][0] = self.wc_ml
self.sl[3][0] = +1
self.wl[3][1] = self.wo_mi
self.sl[3][1] = -2
self.wl[3][2] = self.wo_ml
self.sl[3][2] = -1
self.npad_conv = 2
def wo_di(self, l, lx, ly):
return -0.25
def wo_mi(self, l, lx, ly):
return +0.25
def wo_ml(self, l, lx, ly):
return l
def wc_ml(self, l, lx, ly):
return np.interp(
l, np.arange(0, len(self.clee)), self.clee, right=0) * l
class qest_blm_EP(qest):
def __init__(self, clee, clpp):
self.clee = clee
self.clpp = clpp
self.ntrm = 4
self.wl = {i: {} for i in xrange(0, self.ntrm)}
self.sl = {i: {} for i in xrange(0, self.ntrm)}
# t de
self.wl[0][0] = self.wc_ml
self.sl[0][0] = +3
self.wl[0][1] = self.wp_ml
self.sl[0][1] = -1
self.wl[0][2] = self.wo_di
self.sl[0][2] = +2
self.wl[1][0] = self.wc_ml
self.sl[1][0] = -3
self.wl[1][1] = self.wp_ml
self.sl[1][1] = +1
self.wl[1][2] = self.wo_mi
self.sl[1][2] = -2
self.wl[2][0] = self.wc_ml
self.sl[2][0] = -1
self.wl[2][1] = self.wp_ml
self.sl[2][1] = -1
self.wl[2][2] = self.wo_mi
self.sl[2][2] = -2
self.wl[3][0] = self.wc_ml
self.sl[3][0] = +1
self.wl[3][1] = self.wp_ml
self.sl[3][1] = +1
self.wl[3][2] = self.wo_di
self.sl[3][2] = +2
self.npad_conv = 2
def wo_di(self, l, lx, ly):
return -0.25j
def wo_mi(self, l, lx, ly):
return +0.25j
def wc_ml(self, l, lx, ly):
return np.interp(
l, np.arange(0, len(self.clee)), self.clee, right=0) * l
def wp_ml(self, l, lx, ly):
return np.interp(
l, np.arange(0, len(self.clpp)), self.clpp, right=0) * l
class qest_blm_EX(qest):
def __init__(self, clee, clpp):
self.clee = clee
self.clpp = clpp
self.ntrm = 4
self.wl = {i: {} for i in xrange(0, self.ntrm)}
self.sl = {i: {} for i in xrange(0, self.ntrm)}
# t de
self.wl[0][0] = self.wc_ml
self.sl[0][0] = +3
self.wl[0][1] = self.wp_ml
self.sl[0][1] = -1
self.wl[0][2] = self.wo_di
self.sl[0][2] = +2
self.wl[1][0] = self.wc_ml
self.sl[1][0] = -3
self.wl[1][1] = self.wp_ml
self.sl[1][1] = +1
self.wl[1][2] = self.wo_di
self.sl[1][2] = -2
self.wl[2][0] = self.wc_ml
self.sl[2][0] = -1
self.wl[2][1] = self.wp_ml
self.sl[2][1] = -1
self.wl[2][2] = self.wo_mi
self.sl[2][2] = -2
self.wl[3][0] = self.wc_ml
self.sl[3][0] = +1
self.wl[3][1] = self.wp_ml
self.sl[3][1] = +1
self.wl[3][2] = self.wo_mi
self.sl[3][2] = +2
self.npad_conv = 2
def wo_di(self, l, lx, ly):
return -0.25
def wo_mi(self, l, lx, ly):
return +0.25
def wc_ml(self, l, lx, ly):
return np.interp(
l, np.arange(0, len(self.clee)), self.clee, right=0) * l
def wp_ml(self, l, lx, ly):
return np.interp(
l, np.arange(0, len(self.clpp)), self.clpp, right=0) * l
class qest_tlm_TP(qest):
def __init__(self, cltt, clpp):
self.cltt = cltt
self.clpp = clpp
self.ntrm = 2
self.wl = {i: {} for i in xrange(0, self.ntrm)}
self.sl = {i: {} for i in xrange(0, self.ntrm)}
self.wl[0][0] = self.wc_ml
self.sl[0][0] = +1
self.wl[0][1] = self.wp_ml
self.sl[0][1] = -1
self.wl[0][2] = self.wo_d2
self.sl[0][2] = +0
self.wl[1][0] = self.wc_ml
self.sl[1][0] = -1
self.wl[1][1] = self.wp_ml
self.sl[1][1] = +1
self.wl[1][2] = self.wo_d2
self.sl[1][2] = +0
self.npad_conv = 2
def wo_d2(self, l, lx, ly):
return 1.
def wo_ml(self, l, lx, ly):
return l
def wp_ml(self, l, lx, ly):
return np.interp(
l, np.arange(0, len(self.clpp)), self.clpp, right=0) * l
def wc_ml(self, l, lx, ly):
return np.interp(
l, np.arange(0, len(self.cltt)), self.cltt, right=0) * l
class qest_tau_TT(qest):
def __init__(self, cltt):
self.cltt = cltt
self.ntrm = 2
self.wl = {i: {} for i in xrange(0, self.ntrm)}
self.sl = {i: {} for i in xrange(0, self.ntrm)}
self.wl[0][0] = self.wc
self.sl[0][0] = +0
self.wl[0][1] = self.wo_p1
self.sl[0][1] = +0
self.wl[0][2] = self.wo_m1
self.sl[0][2] = +0
self.wl[1][0] = self.wo_p1
self.sl[1][0] = +0
self.wl[1][1] = self.wc
self.sl[1][1] = +0
self.wl[1][2] = self.wo_m1
self.sl[1][2] = +0
def wo_p1(self, l, lx, ly):
return +1.0
def wo_m1(self, l, lx, ly):
return -1.0
def wc(self, l, lx, ly):
return np.interp(l, np.arange(0, len(self.cltt)), self.cltt, right=0)
class qest_tau_EB(qest):
def __init__(self, clee):
self.clee = clee
self.ntrm = 2
self.wl = {i: {} for i in xrange(0, self.ntrm)}
self.sl = {i: {} for i in xrange(0, self.ntrm)}
self.wl[0][0] = self.wc
self.sl[0][0] = -2
self.wl[0][1] = self.wo_p1
self.sl[0][1] = +2
self.wl[0][2] = self.wo_aa
self.sl[0][2] = +0
self.wl[1][0] = self.wc
self.sl[1][0] = +2
self.wl[1][1] = self.wo_m1
self.sl[1][1] = -2
self.wl[1][2] = self.wo_aa
self.sl[1][2] = +0
def wo_p1(self, l, lx, ly):
return +1.0
def wo_m1(self, l, lx, ly):
return -1.0
def wo_aa(self, l, lx, ly):
return 1.0 / (2j)
#return -1.0/(2j)
def wc(self, l, lx, ly):
return np.interp(l, np.arange(0, len(self.clee)), self.clee, right=0)
class qest_tau_TB(qest):
def __init__(self, clte):
self.clte = clte
self.ntrm = 2
self.wl = {i: {} for i in xrange(0, self.ntrm)}
self.sl = {i: {} for i in xrange(0, self.ntrm)}
self.wl[0][0] = self.wc
self.sl[0][0] = -2
self.wl[0][1] = self.wo_p1
self.sl[0][1] = +2
self.wl[0][2] = self.wo_aa
self.sl[0][2] = +0
self.wl[1][0] = self.wc
self.sl[1][0] = +2
self.wl[1][1] = self.wo_m1
self.sl[1][1] = -2
self.wl[1][2] = self.wo_aa
self.sl[1][2] = +0
def wo_p1(self, l, lx, ly):
return +1.0
def wo_m1(self, l, lx, ly):
return -1.0
def wo_aa(self, l, lx, ly):
return 1.0 / (2j)
#return -1.0/(2j)
def wc(self, l, lx, ly):
return np.interp(l, np.arange(0, len(self.clte)), self.clte, right=0)
class qest_tau_EE(qest):
def __init__(self, clee):
self.clee = clee
self.ntrm = 4
self.wl = {i: {} for i in xrange(0, self.ntrm)}
self.sl = {i: {} for i in xrange(0, self.ntrm)}
self.wl[0][0] = self.wc
self.sl[0][0] = -2
self.wl[0][1] = self.wo_p1
self.sl[0][1] = +2
self.wl[0][2] = self.wo_aa
self.sl[0][2] = +0
self.wl[1][0] = self.wc
self.sl[1][0] = +2
self.wl[1][1] = self.wo_p1
self.sl[1][1] = -2
self.wl[1][2] = self.wo_aa
self.sl[1][2] = +0
self.wl[2][0] = self.wo_p1
self.sl[2][0] = -2
self.wl[2][1] = self.wc
self.sl[2][1] = +2
self.wl[2][2] = self.wo_aa
self.sl[2][2] = +0
self.wl[3][0] = self.wo_p1
self.sl[3][0] = +2
self.wl[3][1] = self.wc
self.sl[3][1] = -2
self.wl[3][2] = self.wo_aa
self.sl[3][2] = +0
def wo_p1(self, l, lx, ly):
return +1.0
def wo_aa(self, l, lx, ly):
return -0.5
def wc(self, l, lx, ly):
return np.interp(l, np.arange(0, len(self.clee)), self.clee, right=0)
class qest_tau_TE(qest):
def __init__(self, clte):
self.clte = clte
self.ntrm = 3
self.wl = {i: {} for i in xrange(0, self.ntrm)}
self.sl = {i: {} for i in xrange(0, self.ntrm)}
self.wl[0][0] = self.wc
self.sl[0][0] = -2
self.wl[0][1] = self.wo_p1
self.sl[0][1] = +2
self.wl[0][2] = self.wo_aa
self.sl[0][2] = +0
self.wl[1][0] = self.wc
self.sl[1][0] = +2
self.wl[1][1] = self.wo_p1
self.sl[1][1] = -2
self.wl[1][2] = self.wo_aa
self.sl[1][2] = +0
self.wl[2][0] = self.wo_p1
self.sl[2][0] = +0
self.wl[2][1] = self.wc
self.sl[2][1] = +0
self.wl[2][2] = self.wo_m1
self.sl[2][2] = +0
def wo_p1(self, l, lx, ly):
return +1.0
def wo_m1(self, l, lx, ly):
return -1.0
def wo_aa(self, l, lx, ly):
return -0.5
def wc(self, l, lx, ly):
return np.interp(l, np.arange(0, len(self.clte)), self.clte, right=0)
|
import os
from datetime import datetime
def get_current_time():
"""
Reads current time rom system and returns it as a string.
Returns:
str: string of current time in format %H:%M:%S.
"""
return datetime.now().strftime('%H:%M:%S')
def make_datetime_str(time_str):
"""
Gets the current date and changes the time to the given time_str. Returns combination as a string.
Args:
time_str (str): input string for time to be set in forma %H:%M
Returns:
str: string of date with given time in format %day %d %m %H:%M:%S %timezone %y.
"""
date = os.popen('sudo date').read()
date = [part.strip() for part in date.split(" ")]
date[-3] = time_str+":00"
datetime_str = " ".join(date)
return datetime_str
def set_date(date_str):
"""
Sets sytsem time to passed date specified by string.
Requires root privileges.
"""
os.system('sudo date --set="{}"'.format(date_str)) |
"""
Collect basic features about the user that can be used to predict attributes.
features collected: # of incoming/outgoing, workday/nonworkday, weekend/weekday calls
Stores features in a csv file with the user id + features
"""
import csv, collections, pickle, os, time
from datetime import datetime
class User:
def __init__(self, mobnum):
self.mobnum = mobnum
#self.incoming_cost = 0
#self.outgoing_cost = 0
#self.incoming_dur = 0
#self.outgoing_dur = 0
self.incoming = 0
self.outgoing = 0
self.workday = 0
self.nonworkday = 0
self.weekend = 0
self.weekday = 0
def addData(self, inc, out, dur, cost, date, t):
if int(inc) == 1:
self.incoming += int(inc)
#self.incoming_cost += int(cost)
#self.incoming_dur += int(dur)
if int(out) == 1:
self.outgoing += int(out)
#self.outgoing_cost += int(cost)
#self.outgoing_dur += int(dur)
t = t.split(':')
if int(t[0]) >= 9 and int(t[0]) <= 18 and date.weekday() < 5:
self.workday += 1
else:
self.nonworkday += 1
if date.weekday() < 5:
self.weekday += 1
else:
self.weekend += 1
def getstats(denom=20, round=1):
users = {}
f = csv.reader(open("../applab_6.csv", 'rb'), delimiter=',')
i = 0
uniquect, actual = 0, 0
wf = open("temp", 'w', 0)
if os.path.isfile("users.p"):
users = pickle.load(open("users.p", "rb"))
else:
full_users = set()
for line in f:
i += 1
if i % 100000 == 0:
wf.write("processed " + str(i) + " lines and " + str(uniquect) + " users\n")
#print "processed", i, "lines"
#if i == 20000000: break
try:
sender, receiver, date, ti, duration, cost, location, region = line
date = datetime.fromtimestamp(time.mktime(time.strptime(date, "%d-%b-%y")))
if sender not in full_users:
uniquect += 1
full_users.add(sender)
#if uniquect % denom - round == 0:
users[sender] = User(sender)
if receiver not in full_users:
uniquect += 1
full_users.add(receiver)
#if uniquect % denom - round == 0:
users[receiver] = User(receiver)
if sender in users:
users[sender].addData(0, 1, duration, cost, date, ti)
if receiver in users:
users[receiver].addData(1, 0, duration, cost, date, ti)
except:
print line
#wf.write("total users:" len(users.keys()), uniquect
wf.close()
#pickle.dump(users, open("users_%s.p" % str(round), "wb"))
w = open("../output/users_features_%s.csv" % round, 'w')
w.write("mobile,incoming,outgoing,workday,nonworkday,weekday,weekend\n")
i = 0
for user in users.values():
if i % 100000 == 0: print "written line", i
i += 1 #user.incoming_cost,user.incoming_dur,\
#user.outgoing_cost,user.outgoing_dur,\
w.write("%s,%s,%s,%s,%s,%s,%s\n" % (user.mobnum, user.incoming,\
user.outgoing,\
user.workday, user.nonworkday, user.weekday, user.weekend))
w.close()
getstats() |
import games.interfaces
import random
class heuristics_c4(games.interfaces.Heuristics):
def generate_score(self, game_state: games.interfaces.InterfaceGames2Player):
if self.check_victory(game_state):
return self.check_victory(game_state)
else:
score_horizontal = self.check_horizontal_strings(game_state, 'X') + self.check_horizontal_strings(game_state, 'O')
score_vertical = self.check_vertical_string(game_state, 'X') + self.check_vertical_string(game_state, 'O')
score_diagonale = self.check_diagonal_string(game_state, 'X') + self.check_diagonal_string(game_state, 'O')
return score_horizontal + score_vertical + score_diagonale + self.reward_middle(game_state)
def reward_middle(self, game_state: games.interfaces.InterfaceGames2Player):
score_middle = 0
for i in range(game_state.n):
for j in range(int(game_state.m/3), int(game_state.m/3)*2):
if game_state.board[i][j] == 'O':
score_middle -= 1
elif game_state.board[i][j] == 'X':
score_middle += 1
return score_middle
def check_horizontal_strings(self, game_state: games.interfaces.InterfaceGames2Player, jeton : str ):
score_string = 0
for i in range(game_state.n):
for j in range(game_state.m-1):
ref = game_state.board[i][j]
if (ref == jeton):
string_size = 1
for k in range(1,game_state.m-j):
if game_state.board[i][j+k] == ref:
string_size += 1
else:
break
if string_size > 1:
if jeton == 'X':
score_string += string_size * 10
else:
score_string -= string_size * 10
return score_string
def check_vertical_string(self, game_state: games.interfaces.InterfaceGames2Player, jeton : str ):
score_string = 0
for i in range(game_state.n-1):
for j in range(game_state.m):
ref = game_state.board[i][j]
if (ref == jeton):
string_size = 1
for k in range(1, game_state.n - i):
if game_state.board[i+k][j] == ref:
string_size += 1
else:
break
if string_size > 1:
if jeton == 'X':
score_string += string_size * 10
else:
score_string -= string_size * 10
return score_string
def check_diagonal_string(self, game_state: games.interfaces.InterfaceGames2Player, jeton : str):
score_string = 0
for i in range(game_state.n-1):
for j in range(game_state.m):
ref = game_state.board[i][j]
if ref == jeton:
string_size = 1
borne_max = min(game_state.n-i, game_state.m-j)
for k in range(1,borne_max):
if game_state.board[i+k][j+k] == ref:
string_size += 1
else:
break
if j < game_state.n-i:
borne_max = j+1
else:
borne_max = game_state.n-i
for l in range(1,borne_max):
if game_state.board[i+l][j-l] == ref:
string_size += 1
else:
break
if string_size > 1:
if jeton == 'O':
score_string -= string_size * 10
else:
score_string += string_size * 10
return score_string
def check_victory(self, game_state: games.interfaces.InterfaceGames2Player):
for i in range(game_state.n-1):
for j in range(game_state.m):
ref = game_state.board[i][j]
if ref != '_':
string_size = 1
for k in range(1, game_state.n - i):
if game_state.board[i+k][j] == ref:
string_size += 1
else:
break
if string_size >= 4:
if ref == 'X':
return float('inf')
else:
return float('-inf')
for i in range(game_state.n):
for j in range(game_state.m-1):
ref = game_state.board[i][j]
if ref != '_':
string_size = 1
for k in range(1,game_state.m-j):
if game_state.board[i][j+k] == ref:
string_size += 1
else:
break
if string_size >= 4:
if ref == 'X':
return float('inf')
else:
return float('-inf')
for i in range(game_state.n-1):
for j in range(game_state.m):
ref = game_state.board[i][j]
if ref != '_':
string_size = 1
borne_max = min(game_state.n-i, game_state.m-j)
for k in range(1,borne_max):
if game_state.board[i+k][j+k] == ref:
string_size += 1
else:
break
if string_size >= 4:
if ref == 'O':
return float('-inf')
else:
return float('inf')
string_size = 1
if j < game_state.n-i:
borne_max = j+1
else:
borne_max = game_state.n-i
for l in range(1,borne_max):
if game_state.board[i+k][j-k] == ref:
string_size += 1
else:
break
if string_size >= 4:
if ref == 'O':
return float('-inf')
else:
return float('inf') |
#!/usr/bin/env python
"""
usage is
score [vcmfh] <answer> <key> <sensemap>
-v (or --verbose)
verbose -- print out every entry
-c (or --coarse)
coarse grained scoring
-m (or --mixed)
mixed grain scoring
-f (or --fine)
fine grain scoring (default)
-h (or --help)
print out this message, and some information on file formats
"""
detailed_help = """
This is a scorer for senseval2, 2001. answers are one per line in the form
<item> <instance> <answer> [ <answer> [ ... ]] [<comment>]
<item> is a lexical item, sometimes just a word, sometimes it notes a
word plus a part of speech. for example, "fish" or "fish-v"
<instance> is an arbitrary identitier -- cannot contain whitespace.
<answer> is in the form [^ \\t/]+ followed by an optional "/" plus
weight.
a <comment> is in the form "!![^\\n]*"
answers will be compared against a key. both answers and keys are in
the format described above. additionally, a sense map may be supplied
which provides a subsumption table of the scores.
a sensemap is in the form
<subsummee>[ <numsubsumed> <subsumer>]
HISTORY: a replica of Joseph Rosenzweig's scoring software, rewritten to be
more robust about the format of answers.
Author: Scott Cotton, cotton@linc.cis.upenn.edu
"""
import re
import sys
import getopt
DEBUG=0
#
# answers are in the form (item, instance): answer list
#
INSTANCES_KEYED=0
answer_key = {}
# sum of all the answer weights
answer_ttl_weight = 0.0
def parse_answer_line(ln, key=None):
global INSTANCES_KEYED, answer_ttl_weight
prg = re.compile(r"^(?P<item>[^ \t]+)\s+(?P<instance>[^ \t]+)\s+(?P<answers>.*)")
match = re.match(prg, ln)
if not match:
print >> sys.stderr, "bad answer line", ln
return None
str_answers = match.group("answers")
# chop off comments
if str_answers.find("!!") != -1:
str_answers = str_answers[:str_answers.find("!!")]
item = match.group("item")
instance = match.group("instance")
answers = []
ttl_weight = 0
weight_specified = 0
answer_ttl_weight_diff = 0
for str_answer in str_answers.split():
if not str_answer: continue
if str_answer.count('/') > 0:
weight_specified = 1
i = str_answer.index('/')
try:
answerid, weight = str_answer[:i], float(str_answer[i+1:])
except ValueError:
print >> sys.stderr, "bad answer line", ln
return None
# take all weights > 1 (usually out of 100) and
# normalize them to a value between 0 and 1.
while weight > 1.0: weight /= 100.0
answers.append((answerid, weight))
answer_ttl_weight_diff += weight
ttl_weight += weight
else:
answers.append((str_answer, 1.0))
if not key:
ttl_weight += 1.0
# can't deal with these
if not len(answers):
print >> sys.stderr, "no answer provided by system for %s, %s" % \
(item, instance)
return None
answer_ttl_weight += answer_ttl_weight_diff
#
# If the answer line presents no weights, the
# total weight tried will be normalized to 1.0
#
if not key and not weight_specified:
answer_ttl_weight += 1.0
#
# normalize the weights
#
if round(ttl_weight) > 1.0:
wt = 1.0 / len(answers)
answers_normalized = []
for answ, old_wt in answers:
answers_normalized.append((answ, wt))
ttl_weight += wt
else:
answers_normalized = answers
if key:
answer_key[(item, instance)] = answers_normalized
INSTANCES_KEYED += 1
return item, instance, answers_normalized
#
# these hold the subsumption table information and just the senses.
#
senses_subsumed = {}
senses_subsuming = {}
subsum_ttl = {}
#
# this adds an entry, coordinating the structure of the data in the
# above dicts
#
def add_entry(subsumed, num=0, subsummer=None):
if subsummer is not None:
if senses_subsumed.has_key(subsumed):
senses_subsumed[subsumed].append(subsummer)
subsum_ttl[subsummer] = num
else:
senses_subsumed[subsumed] = [subsummer]
subsum_ttl[subsummer] = num
if senses_subsuming.has_key(subsummer):
senses_subsuming[subsummer].append(subsumed)
else:
senses_subsuming[subsummer] = [subsumed]
senses_subsuming[subsummer].extend(senses_subsuming.get(subsumed, []))
#
# this parses a sensemap file line
#
def parse_senses_line(ln):
ln = ln.strip()
spl = ln.split()
if len(spl) == 1:
add_entry(ln)
return
for i in range(0, len(spl) - 1, 2):
try:
subsumee = spl[i]
num = int(spl[i+1])
subsumer = spl[i + 2]
add_entry(subsumee, num, subsumer)
except IndexError:
print >> sys.stderr, "bad subsumption table entry: '%s'" % ln
except ValueError: # int() failed
print >> sys.stderr, "bad subsumption table entry: '%s'" % ln
#
# these scores a given answer. XXX note that this assumes
# that the answers have been keyed and the sensemap file has
# been parsed.
#
# for when there's no answer available
class NoScore(Exception): pass
#
# verbose by instance output
#
def fmt_verbose(item, instance, ttl, keyids, answers):
print 'score for "%s_%s": %0.3f' % (item, instance, ttl)
print ' key =',
print ' '.join(keyids)
print ' guess =',
outs = []
for id, wt in answers:
outs.append("%s/%0.3f" % (id, wt))
print ' '.join(outs)
print
#
# fine grained scoring
#
def score_fine(item, instance, answers):
keys = answer_key.get((item, instance))
if keys is None:
print >> sys.stderr, "no answer key for item %s instance %s" % (item, instance)
raise NoScore()
ttl = 0.0
keyids = []
for answerid, weight in answers:
for keyid, keyweight in keys:
keyids.append(keyid)
if answerid == keyid:
ttl += weight
if VERBOSE:
fmt_verbose(item, instance, ttl, keyids, answers)
return ttl
#
# mixed grained scoring
#
def score_mixed(item, instance, answers):
keys = answer_key.get((item, instance))
if keys is None:
print >> sys.stderr, "no answer key for item %s instance %s" % (item, instance)
raise NoScore()
ttl = 0.0
kids = {}
scored = {}
# to get the best possible score, check for answers in decreasing level of
# quality: same, key subsumes answer, and answer subsumes key
for aid, awt in answers:
try:
for kid, unused_kwt in keys:
kids[kid] = 1
if aid == kid:
ttl += awt
if DEBUG:
print "k %s = a %s" % (kid, aid)
raise "Answered"
for kid, unused_kwt in keys:
kids[kid] = 1
if senses_subsuming.has_key(kid) and \
aid in senses_subsuming[kid]:
ttl += awt
if DEBUG:
print "k %s subsumes a %s" % (kid, aid)
raise "Answered"
#
# in ths case we sum all the keys, as for some reason
# joseph's does.
#
for kid, unused_kwt in keys:
kids[kid] = 1
if senses_subsuming.has_key(aid) and \
kid in senses_subsuming[aid]:
if DEBUG:
print "a %s subsumes k %s" % (aid, kid)
num_subsumed = subsum_ttl[aid]
ttl += awt * (1.0 / num_subsumed)
except "Answered":
pass
if VERBOSE:
fmt_verbose(item, instance, ttl, kids.keys(), answers)
return ttl
#
# return the set of collected (grouped) answers given a particular one
# the set is in the form of a dict. good thing these relations aren't too
# big
#
def resolve_answer_to_group(answerid):
dict = {answerid: 1}
if not senses_subsumed.has_key(answerid):
return dict
for k in senses_subsumed[answerid]:
dict[k] = 1
dict.update(resolve_answer_to_group(k))
return dict
#
# coarse grained scoring -- expand both the answer and the
# keys to their respective groups and see if any of the answers
# are in the keys, adding that answers weight as appropriate
#
def score_coarse(item, instance, answers):
keys = answer_key.get((item, instance))
if keys is None:
print >> sys.stderr, "no answer key for item %s instance %s" % (item, instance)
raise NoScore()
ttl = 0.0
prkeys = {}
for answerid, weight in answers:
answ_group = resolve_answer_to_group(answerid)
try:
for keyid, keyweight in keys:
prkeys[keyid] = 1
key_group = resolve_answer_to_group(keyid)
for kg_answ in key_group.keys():
if answ_group.has_key(kg_answ):
ttl += weight
raise "Answered"
except "Answered":
pass
if VERBOSE:
fmt_verbose(item, instance, ttl, prkeys.keys(), answers)
return ttl
#
# print out the answers ... exactly like the c program
#
def summarize(score_ttl, ihandled, ikeyed):
prec = score_ttl / answer_ttl_weight
rec = score_ttl / ikeyed
print " precision: %0.3f (%0.2f correct of %0.2f attempted)" % (prec,
score_ttl,
answer_ttl_weight)
print " recall: %0.3f (%0.2f correct of %0.2f in total)" % (rec,
score_ttl,
ikeyed)
attempted = answer_ttl_weight / float(ikeyed) * 100
print " attempted: %0.3f %% (%0.2f attempted of %0.2f in total)" % (attempted,
answer_ttl_weight,
ikeyed)
print
#
# main flow control from here down
#
usage = __doc__
# argument processing
try:
optlist, args = getopt.getopt(sys.argv[1:], "vcmfh", ["coarse",
"mixed",
"fine",
"help",
"verbose"])
except getopt.error, rest:
print >> sys.stderr, "ERROR: ", rest
print >> sys.stderr, usage
sys.exit(1)
# defaults
VERBOSE=0
FINE=1
MIXED=0
COARSE=0
for opt, val in optlist:
if opt in ("-v", "--verbose"):
VERBOSE=1
elif opt in ("-c", "--coarse"):
COARSE=1;FINE=0;MIXED=0
elif opt in ("-m", "--mixed"):
MIXED=1;COARSE=0;FINE=0
elif opt in ("-f", "--fine"):
FINE=1;MIXED=0; COARSE=0
elif opt in ("-h", "--help"):
print usage
print detailed_help
sys.exit(0)
else:
raise "What the hell happened? that's s'poseed to be all the options"
if len(args) != 3:
sys.exit("ERROR\n" + usage)
answers_f, key_f, sensemap_f = args
try:
# read in the answer key
key_in = open(key_f)
for line in key_in.readlines():
parse_answer_line(line, key=1)
key_in.close()
# read in the sense map
sense_in = open(sensemap_f)
for line in sense_in.readlines():
parse_senses_line(line)
sense_in.close()
# read in the answers, keeping track of ttl answers, sum score
score_sum = 0
instances_handled = 0
answer_in = open(answers_f)
for line in answer_in.readlines():
p = parse_answer_line(line)
if not p:
continue
item, instance, answers = p
instances_handled += 1
try:
if FINE:
score_sum += score_fine(item, instance, answers)
elif MIXED:
score_sum += score_mixed(item, instance, answers)
elif COARSE:
score_sum += score_coarse(item, instance, answers)
else:
print >> sys.stderr, "NO SCORING REQUESTED (impossible!)"
except NoScore: # error already reported
continue
# print things out just like joseph's original
print
if FINE:
print "Fine-grained score for",
elif MIXED:
print "Mixed-grained score for",
elif COARSE:
print "Coarse-grained score for",
print '"%s" using key "%s":' % (answers_f, key_f)
summarize(score_sum, instances_handled, INSTANCES_KEYED)
except Exception, rest:
print >> sys.stderr, "ERROR: " + str(rest)
raise
sys.exit("aborting...")
|
#!/usr/bin/env python3
import unittest
import numpy as np
import numpy.testing as npt
import math
import functions as F
class Test1DFunctions(unittest.TestCase):
#scalar tests
def test_ApproxJacobian1(self):
slope = 3.0
# Yes, you can define a function inside a function/method. And
# it has scope only within the method within which it's
# defined (unless you return it to the outside world, which
# you can do in Python with no need for anything like C's
# malloc() or C++'s new() )
def f(x):
return slope * x + 5.0
x0 = 2.0
dx = 1.e-3
Df_x = F.approximateJacobian(f, x0, dx)
# self.assertEqual(Df_x.shape, (1,1)
# If x and f are scalar-valued, Df_x should be, too
self.assertTrue(np.isscalar(Df_x))
self.assertAlmostEqual(Df_x, slope)
def test_ApproxJacobian1a(self):
#test for large numbers
slope = 300000000.0
def f(x):
return slope * x + 5000000.0
x0 = 2.0
dx = 1.e-3
Df_x = F.approximateJacobian(f, x0, dx)
self.assertTrue(np.isscalar(Df_x))
self.assertAlmostEqual(Df_x, slope)
def test_ApproxJacobian1b(self):
#test for second degree polynomials
slope = 3.0
quadratic = F.Polynomial([slope,slope,slope])
def f(x):
return quadratic(x)
x0 = 2.0
dx = 1.e-7
Df_x = F.approximateJacobian(f, x0, dx)
self.assertTrue(np.isscalar(Df_x))
self.assertAlmostEqual(Df_x, 15)
def test_ApproxJacobian1bb(self):
#test for second degree polynomials
slope = 3.0
quadratic = F.Polynomial([6,0,slope])
def f(x):
return quadratic(x)
x0 = 0
dx = 1.e-7
Df_x = F.approximateJacobian(f, x0, dx)
self.assertTrue(np.isscalar(Df_x))
self.assertAlmostEqual(Df_x, 0)
def test_ApproxJacobian1c(self):
#test for second degree polynomials, different x0
slope = 3.0
quadratic = F.Polynomial([slope,slope,slope])
def f(x):
return quadratic(x)
x0 = 0
dx = 1.e-007
Df_x = F.approximateJacobian(f, x0, dx)
self.assertTrue(np.isscalar(Df_x))
self.assertAlmostEqual(Df_x, 3)
def test_ApproxJacobian1d(self):
#test for trig function
slope = 3.0
def f(x):
return math.sin(x)
x0 = math.pi
dx = 1.e-003
Df_x = F.approximateJacobian(f, x0, dx)
self.assertTrue(np.isscalar(Df_x))
self.assertAlmostEqual(Df_x, -1)
def test_ApproxJacobian1e(self):
#test for exponential function
slope = 3.0
def f(x):
return math.e**x
x0 = 1
dx = 1.e-005
Df_x = F.approximateJacobian(f, x0, dx)
self.assertTrue(np.isscalar(Df_x))
self.assertAlmostEqual(Df_x, math.e)
#nonscalar tests
class Test2DFunctions(unittest.TestCase):
def test_ApproxJacobian2(self):
# numpy matrices can also be initialized with strings. The
# semicolon separates rows; spaces (or commas) delimit entries
# within a row.
A = np.matrix([[1.0, 2.0], [3.0, 4.0]])
def f(x):
# The * operator for numpy matrices is overloaded to mean
# matrix-multiplication, rather than elementwise
# multiplication as it does for numpy arrays
return A * x
# The vector-valued function f defined above is the following:
# if we let u = f(x), then
#
# u1 = x1 + 2 x2
# u2 = 3 x1 + 4 x2
#
# The Jacobian of this function is constant and exactly equal
# to the matrix A. approximateJacobian should thus return
# something pretty close to A.
x0 = np.matrix([[5.0], [6.0]])
dx = 1.e-6
Df_x = F.approximateJacobian(f, x0, dx)
# Make sure approximateJA
self.assertEqual(Df_x.shape, (2,2))
# numpy arrays and matrices vectorize comparisons. So if a & b
# are arrays, the expression a==b will itself be an array of
# booleans. But an array of booleans does not itself evaluate
# to a clean boolean (this is an exception to the general
# Python rule that "every object can be interpreted as a
# boolean"), so normal assert statements will break. We need
# array-specific assert statements found in numpy.testing
npt.assert_array_almost_equal(Df_x, A)
def test_ApproxJacobian2a(self):
A = np.matrix([[10,0],[0,21]])
poly1 = F.Polynomial([2,2,2])
poly2 = F.Polynomial([3,3,3])
def f(x):
return np.matrix([[poly1(float(x[0]))],[poly2(float(x[1]))]])
# print('This is f(x)\n {0}'.format(f(x)))
x0 = np.matrix([[2.0],[3.0]])
dx = 1.e-6
Df_x = F.approximateJacobian(f, x0, dx)
# print('This is Df_x\n {0}'.format(Df_x))
self.assertEqual(len(Df_x), 2)
npt.assert_array_almost_equal(Df_x, A)
def test_ApproxJacobian2b(self):
A = np.matrix([[8,2],[36,3]])
def f(x):
return np.matrix([[2*(float(x[0]))**2+2*(float(x[1]))],[3*(float(x[0]))**3+ 3*(float(x[1]))]])
x0 = np.matrix([[2.0] , [3.0]])
dx = 1.e-6
Df_x = F.approximateJacobian(f, x0, dx)
self.assertEqual(len(Df_x), 2)
npt.assert_array_almost_equal(Df_x, A)
class miscTests(unittest.TestCase):
def test_Polynomial(self):
# p(x) = x^2 + 5x + 4
p = F.Polynomial([4, 5, 1])
# linspace(a, b, N) produces N equally spaced values from a to
# b, including both a & b.
for x in np.linspace(-2,2,11):
self.assertAlmostEqual(p(x), 4 + 5*x + x**2)
# It would have been more Pythonic to feed the whole array x into p
# (the way the Polynomial class is written, it's agnostic
# about whether x is a scalar or an array) and compare with an
# npt assertion, as follows:
#
# p = F.Polynomial([4, 5, 1])
# npt.assert_array_almost_equal(p(x), 4 + 5*x + x**2)
#
# But I though this would be more legible.
if __name__ == '__main__':
unittest.main()
|
import inspect
import logging
from slack_bolt.kwargs_injection.utils import build_required_kwargs
from slack_bolt.request.request import BoltRequest
from slack_bolt.response.response import BoltResponse
from plugins.shared_links import shared_links
from include.herald import herald
from slack_bolt.context import BoltContext
from slack_bolt.context.ack import Ack
from slack_sdk import WebClient
from slack_sdk.web.slack_response import SlackResponse
import json
class event:
def __init__(self, ack:Ack, context: BoltContext, client:WebClient, event:dict, logger:logging.Logger, payload:dict, request:BoltRequest, response:BoltResponse):
ack()
view = event.get('view', None)
view_id = view.get('id') if view is not None else None
herald_function = herald(**build_required_kwargs(
logger=logger,
request=request,
response=response,
required_arg_names=inspect.getfullargspec(herald).args,
this_func=herald,
))
app_home_view = {
"type": "home",
"blocks": herald_function.blocks()
}
app_home_view['blocks'].append({ "type": "divider" })
app_home_view['blocks'].extend(shared_links().blocks())
if view_id is None: client.views_publish(user_id=context.get('user_id'),view=json.dumps(app_home_view))
else: client.views_update(view_id=view.get('id'), view=json.dumps(app_home_view)) |
import gymnasium as gym
from stable_baselines3 import DQN
from stable_baselines3.common.vec_env import VecVideoRecorder, DummyVecEnv
import highway_env
def train_env():
env = gym.make('highway-fast-v0')
env.configure({
"observation": {
"type": "GrayscaleObservation",
"observation_shape": (128, 64),
"stack_size": 4,
"weights": [0.2989, 0.5870, 0.1140], # weights for RGB conversion
"scaling": 1.75,
},
})
env.reset()
return env
def test_env():
env = train_env()
env.configure({"policy_frequency": 15, "duration": 20})
env.reset()
return env
if __name__ == '__main__':
# Train
model = DQN('CnnPolicy', DummyVecEnv([train_env]),
learning_rate=5e-4,
buffer_size=15000,
learning_starts=200,
batch_size=32,
gamma=0.8,
train_freq=1,
gradient_steps=1,
target_update_interval=50,
exploration_fraction=0.7,
verbose=1,
tensorboard_log="highway_cnn/")
model.learn(total_timesteps=int(1e5))
model.save("highway_cnn/model")
# Record video
model = DQN.load("highway_cnn/model")
env = DummyVecEnv([test_env])
video_length = 2 * env.envs[0].config["duration"]
env = VecVideoRecorder(env, "highway_cnn/videos/",
record_video_trigger=lambda x: x == 0, video_length=video_length,
name_prefix="dqn-agent")
obs, info = env.reset()
for _ in range(video_length + 1):
action, _ = model.predict(obs)
obs, _, _, _, _ = env.step(action)
env.close()
|
import urllib
import re
url=raw_input("enter url:")
f=urllib.urlopen(url)
d=f.read()
a=re.compile('<img.*src="(.*\.jpg)"')
n=re.findall(a,d)
b=open("link1.txt","w")
for i in n:
b.write(url+i+"\n")
b.close() |
#! /usr/bin/env python
# -*- coding=utf8 -*-
"""
ref:
https://stackoverflow.com/questions/45719176/how-to-display-runtime-statistics-in-tensorboard-using-estimator-api-in-a-distri
"""
import tensorflow as tf
from tensorflow.python.training.session_run_hook import SessionRunHook
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.training import training_util
class MetadataHook(SessionRunHook):
def __init__(self, save_steps=None, output_dir=""):
self.output_dir = output_dir
self.save_steps = save_steps
def begin(self):
self.writer = tf.summary.FileWriter(self.output_dir,
tf.get_default_graph())
def before_run(self, run_context):
self.global_step_tensor = training_util.get_global_step()
self.global_step = run_context.session.run(self.global_step_tensor)
if self.global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use MetadataHook.")
self.should_summary = False
opts = None
next_step = self.global_step + 1
if next_step % self.save_steps == 0:
opts = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
self.should_summary = True
fetches = {}
return SessionRunArgs(fetches, options=opts)
def after_run(self, run_context, run_values):
self.global_step += 1
if self.should_summary:
self.writer.add_run_metadata(run_values.run_metadata,
str(self.global_step))
self.writer.flush()
def end(self, session):
self.writer.close()
|
from django.urls import path
from .views import processes_list,process,join_process
urlpatterns = [
path('',processes_list),
path('<int:pk>',process),
path('<int:pk>/join',join_process)
]
|
#!/usr/bin/python
import random
import sys
count = 0
while count < 1000:
x=random.randint(1,100)
j=random.randint(1,200)
op = ["+", "-", "*"]
op_num = random.randint(1,3)
data = ""
solve = 0
solve = x + j
data = ("what is %d + %d\n" % (x , j))
r = input(data)
if int(r) == solve:
count = count + 1
else:
print("Wrong... try again\r\n")
if count == 1000:
print('flag{U7bA-uyNUYw-l1WTwOLWXF4HlyQWke5M}\r\n')
|
from main import count_animals
def test_count_animals(benchmark):
assert benchmark(count_animals, "I see 3 zebras, 5 lions and 6 giraffes.") == 14, 'Live from the Savannah'
assert benchmark(count_animals, "Mom, 3 rhinoceros and 6 snakes come to us!") == 9
assert benchmark(count_animals, "I do not see any animals here!") == 0
|
#!/usr/bin/env
# -*- coding: utf-8 -*-
username = "user ID"
password = "password"
season = "season ID"
fakulta = "faculty ID"
studium = "studium ID"
# Set the time when the scripts should fire
time_hours = 17
time_minutes = 0
time_seconds = 0
time_microseconds = 0
|
# Using readlines()
file = open('input-2.txt', 'r')
lines = file.readlines()
horizontal_pos = 0
depth = 0
for line in lines:
direction, amount = line.split(" ")
if direction == "forward":
horizontal_pos += int(amount)
elif direction == "up":
depth -= int(amount)
elif direction == "down":
depth += int(amount)
print(horizontal_pos * depth)
horizontal_pos = 0
depth = 0
aim = 0
for line in lines:
direction, amount = line.split(" ")
if direction == "forward":
horizontal_pos += int(amount)
depth += (aim*int(amount))
elif direction == "up":
aim -= int(amount)
elif direction == "down":
aim += int(amount)
print(horizontal_pos * depth) |
import sys
import pytest
from pytest import mark
from osbrain import run_agent
from osbrain import run_logger
from osbrain import run_nameserver
from osbrain.helper import sync_agent_logger
skip_windows = mark.skipif(sys.platform == 'win32',
reason='Not supported on windows')
skip_windows_port_reuse = mark.skipif(sys.platform == 'win32',
reason='Windows allows port reuse')
skip_windows_any_port = mark.skipif(sys.platform == 'win32',
reason='Windows allows binding to well '
'known ports')
skip_windows_spawn = mark.skipif(sys.platform == 'win32',
reason='Windows does not support fork')
skip_windows_ipc = mark.skipif(sys.platform == 'win32',
reason='Windows does not support IPC')
def append_received(agent, message, topic=None):
agent.received.append(message)
def set_received(agent, message, topic=None):
agent.received = message
@pytest.fixture(scope='function')
def nsproxy(request):
ns = run_nameserver()
yield ns
ns.shutdown()
@pytest.fixture(scope='function')
def agent_logger(request):
ns = run_nameserver()
agent = run_agent('a0')
logger = run_logger('logger')
agent.set_logger(logger)
sync_agent_logger(agent=agent, logger=logger)
yield agent, logger
ns.shutdown()
|
# Feito por: Cacatua
# Criado em: 19/05/2021
# Atualizado em: 21/06/2021
"""
Descrição:
Script para ver a quantidade de membros no servidor e mandar uma
mensagem de bom dia dizendo esta quantidade com o horário.
Como Utilizar:
Deixe o Discord aberto no chat e ele pegará e digitará a mesnagem
automaticamente.
Obs:
TODO:
- Fazer com que pegue apenas um número ao invés de separar a string;
"""
# Import das bibliotecas necessárias
import pytesseract
import pyautogui
from datetime import datetime
def main():
# print(pyautogui.displayMousePosition())
pyautogui.screenshot('membros.png', region=(1690, 85, 90, 25))
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
amigosGamersOnline = pytesseract.image_to_string('membros.png')
# print(amigosGamersOnline)
amigosGamersOnline = amigosGamersOnline[8:]
numero = amigosGamersOnline.split()
data = datetime.now()
atual = data.strftime("%H:%M %d/%m")
pyautogui.click(x=395, y=995)
pyautogui.write(atual)
pyautogui.hotkey('shift', 'enter')
pyautogui.write(f"{numero[0]} amigos gamers online")
if __name__ == '__main__':
main()
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torchdiffeq
from torchdyn.sensitivity.adjoint import Adjoint
from .defunc import DEFunc
class NeuralDE(pl.LightningModule):
"""General Neural DE class
:param func: function parametrizing the vector field.
:type func: nn.Module
:param settings: specifies parameters of the Neural DE.
:type settings: dict
"""
def __init__(self, func:nn.Module,
order=1,
sensitivity='autograd',
s_span=torch.linspace(0, 1, 2),
solver='rk4',
atol=1e-4,
rtol=1e-4,
intloss=None):
super().__init__()
#compat_check(defaults)
# TO DO: remove controlled from input args
self.defunc, self.order = DEFunc(func, order), order
self.sensitivity, self.s_span, self.solver = sensitivity, s_span, solver
self.nfe = self.defunc.nfe
self.rtol, self.atol = rtol, atol
self.intloss = intloss
self.u, self.controlled = None, False # data-control
if sensitivity=='adjoint': self.adjoint = Adjoint(self.intloss);
def _prep_odeint(self, x:torch.Tensor):
self.s_span = self.s_span.to(x)
# loss dimension detection routine; for CNF div propagation and integral losses w/ autograd
excess_dims = 0
if (not self.intloss is None) and self.sensitivity == 'autograd':
excess_dims += 1
# handle aux. operations required for some jacobian trace CNF estimators e.g Hutchinson's
# as well as data-control set to DataControl module
for name, module in self.defunc.named_modules():
if hasattr(module, 'trace_estimator'):
if module.noise_dist is not None: module.noise = module.noise_dist.sample((x.shape[0],))
excess_dims += 1
# data-control set routine. Is performed once at the beginning of odeint since the control is fixed to IC
# TO DO: merge the named_modules loop for perf
for name, module in self.defunc.named_modules():
if hasattr(module, 'u'):
self.controlled = True
module.u = x[:, excess_dims:].detach()
return x
def forward(self, x:torch.Tensor):
x = self._prep_odeint(x)
switcher = {
'autograd': self._autograd,
'adjoint': self._adjoint,
}
odeint = switcher.get(self.sensitivity)
out = odeint(x)
return out
def trajectory(self, x:torch.Tensor, s_span:torch.Tensor):
"""Returns a data-flow trajectory at `s_span` points
:param x: input data
:type x: torch.Tensor
:param s_span: collections of points to evaluate the function at e.g torch.linspace(0, 1, 100) for a 100 point trajectory
between 0 and 1
:type s_span: torch.Tensor
"""
x = self._prep_odeint(x)
sol = torchdiffeq.odeint(self.defunc, x, s_span,
rtol=self.rtol, atol=self.atol, method=self.solver)
return sol
def backward_trajectory(self, x:torch.Tensor, s_span:torch.Tensor):
raise NotImplementedError
def reset(self):
self.nfe, self.defunc.nfe = 0, 0
def _autograd(self, x):
self.defunc.intloss, self.defunc.sensitivity = self.intloss, self.sensitivity
if self.intloss == None:
return torchdiffeq.odeint(self.defunc, x, self.s_span, rtol=self.rtol,
atol=self.atol, method=self.solver)[-1]
else:
return torchdiffeq.odeint(self.defunc, x, self.s_span,
rtol=self.rtol, atol=self.atol, method=self.solver)[-1]
def _adjoint(self, x):
return self.adjoint(self.defunc, x, self.s_span, rtol=self.rtol, atol=self.atol, method=self.solver)
@property
def nfe(self):
return self.defunc.nfe
@nfe.setter
def nfe(self, val):
self.defunc.nfe = val
def __repr__(self):
npar = sum([p.numel() for p in self.defunc.parameters()])
return f"Neural DE:\n\t- order: {self.order}\
\n\t- solver: {self.solver}\n\t- integration interval: {self.s_span[0]} to {self.s_span[-1]}\
\n\t- num_checkpoints: {len(self.s_span)}\
\n\t- tolerances: relative {self.rtol} absolute {self.atol}\
\n\t- num_parameters: {npar}\
\n\t- NFE: {self.nfe}\n\
\nIntegral loss: {self.intloss}\n\
\nDEFunc:\n {self.defunc}"
|
from django.conf.urls import url
from django.urls import path
from . import views
urlpatterns = [
path("register/",views.register_event,name="register_event"),
path("calendar/",views.CalendarView.as_view(),name="calendar"),
path("event_list/",views.event_list,name="viewEvent"),
path("event_edit/<int:id>/", views.edit_event, name='edit_event'),
path("profile/<int:id>/",views.event_profile,name="event_profile"),
path("delete/<int:id>/",views.delete_event,name="delete_event"),
# path("edit/<int:id>/",views.event_edit,name="event_edit"),
]
|
from django.contrib import admin
from django.urls import path,include
from .views import (home_page,
register,
logout_request,
login_request,
list_exercises,
exercise,
add_exercise,
add_category,saveans)
app_name = 'main'
urlpatterns = [
path('', home_page),
path('register/', register),
path('login/', login_request),
path('logout/', logout_request),
path('add-exercise/', add_exercise),
path('add-category/', add_category),
path('saveans/', saveans),
path('<id>/exercise/', exercise),
path('list-exercises/', list_exercises),
]
|
import os
files = ['1_requests.py',
'2_session.py',
'3_async1.py',
'5_async3.py',
'6_action1.py',
'7_action2.py']
for _ in range(100):
for file in files:
print(f'\nStarting script {file}')
os.system(f'python3 {file}')
print(f'Ending script {file}\n')
|
from django.http import request
from django.views import View
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.shortcuts import render
from django.views import generic
from boto3.session import Session
from datetime import date, datetime
from .models import Article
from config.settings import AWS_ACCESS_KEY_ID, AWS_S3_REGION_NAME, AWS_SECRET_ACCESS_KEY, AWS_STORAGE_BUCKET_NAME
import boto3
from social.service import ArticleService, CommentService, EditService, LikeService, RelateService
from userinfo.service import UserService
from userinfo.dto import EditDto, RelateDto, CommentDto, ArticleDto, LikeDto
# Create your views here.
class UserlistView(generic.ListView) :
model = User
template_name = 'user_list.html'
context_object_name = 'userlist'
class UserdetailView(generic.DetailView):
model = User
template_name = 'user_detail.html'
context_object_name = 'user'
def get(self, request, *args, **kwargs) :
context = {'user':UserService.find_by_user(kwargs['pk'])}
return render(request, 'user_detail.html', context)
def post(self, request, *args, **kwargs):
# article_dto = self._build_article_dto(self, request)
# ArticleService.article(article_dto)
file = request.FILES.get('image')
session = Session(
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
region_name=AWS_S3_REGION_NAME,
)
s3 = session.resource('s3')
now = datetime.now().strftime('%Y%H%M%S')
img_object = s3.Bucket(AWS_STORAGE_BUCKET_NAME).put_object(
Key=now+file.name,
Body=file
)
s3_url = 'https://django-s3-cj.s3.ap-northeast-2.amazonaws.com/'
Article.objects.create(
title=request.POST['title'],
user = request.user,
article= request.POST.get('article'),
url = s3_url+now+file.name
)
return redirect('social:user_detail', kwargs['pk'])
@staticmethod
def _build_article_dto(self, request) :
return ArticleDto (
title=request.POST.get('title', 'NO TITLE'),
user = request.user,
article=request.POST.get('article', 'NO CONTENT'),
url=request.FILES.get('image', None)
)
# def get_queryset(self) :
# return Article.objects.order_by()
class PostDetailView(generic.DetailView) :
model = Article
context_object_name = 'article'
template_name = 'post_detail.html'
def post(self, request, *args, **kwargs):
file = request.FILES.get('image')
session = Session(
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
region_name=AWS_S3_REGION_NAME,
)
s3 = session.resource('s3')
now = datetime.now().strftime('%Y%H%M%S')
img_object = s3.Bucket(AWS_STORAGE_BUCKET_NAME).put_object(
Key=now+file.name,
Body=file
)
s3_url = 'https://django-s3-cj.s3.ap-northeast-2.amazonaws.com/'
Article.objects.create(
title=request.POST['title'],
user = request.user,
article= request.POST.get('article'),
url = s3_url+now+file.name
)
return render(request, 'post_detail.html')
class EditView(View) :
def get(self, request, *args, **kwargs) :
context = {'user':UserService.find_by_user(kwargs['pk'])}
return render(request, 'edit.html', context)
def post(self, request, *args, **kwargs) :
edit_dto = self._build_edit_dto(self, request.POST)
EditService.edit(edit_dto)
return redirect('social:user_detail', kwargs['pk'])
@staticmethod
def _build_edit_dto(self, post_data) :
return EditDto (
name=post_data['name'],
introduce=post_data['introduce'],
address=post_data['address'],
pk=self.kwargs['pk']
)
class UploadPostView(View) :
def get(self, request, *args, **kwargs) :
return render(request, 'upload_post.html')
def post(self, request, *args, **kwargs):
# article_dto = self._build_article_dto(self, request.POST)
# ArticleService.article(article_dto)
file = request.FILES.get('image')
session = Session(
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
region_name=AWS_S3_REGION_NAME,
)
s3 = session.resource('s3')
now = datetime.now().strftime('%Y%H%M%S')
img_object = s3.Bucket(AWS_STORAGE_BUCKET_NAME).put_object(
Key=now+file.name,
Body=file
)
s3_url = 'https://django-s3-cj.s3.ap-northeast-2.amazonaws.com/'
Article.objects.create(
title=request.POST['title'],
user = request.user,
article= request.POST.get('article'),
url = s3_url+now+file.name
)
return redirect('social:user_list', kwargs['pk'])
# @staticmethod
# def _build_article_dto(self, request) :
# return ArticleDto (
# title=request.POST.get('title', 'NO TITLE'),
# user = request.user,
# article=request.POST.get('article', 'NO CONTENT'),
# image=request.FILES.get('image', None)
# )
class CommentView(View) :
def get(self, request, *args, **kwargs) :
return render(request, 'post_detail.html')
def post(self, request, *args, **kwargs) :
comment_dto = self._build_comment_dto(self,request)
CommentService.comment(comment_dto)
owner = CommentService.find_owner(kwargs['pk'])
return redirect('social:post_detail', owner.pk)
@staticmethod
def _build_comment_dto(self, request) :
return CommentDto (
article_pk=self.kwargs['pk'],
owner=CommentService.find_owner(self.kwargs['pk']),
writer=request.user,
content=request.POST['content'],
)
class CommentLikeView(View) :
# def get(self, request, *args, **kwargs) :
# return render(request, 'post_detail.html')
def post(self, request, *args, **kwargs) :
like_dto = self._build_like_dto(self, request)
LikeService.toggle(like_dto)
return redirect('social:post_detail', kwargs['pk'])
@staticmethod
def _build_like_dto(self, request) :
return LikeDto(
comment_pk=self.kwargs['pk'],
users=request.user
)
class RelationshipView(View) :
def get(self, request, *args, **kwargs) :
return render(request, 'social:user_list')
def post(self, request, *args, **kwargs) :
relate_dto = self._build_relate_dto(self, request)
RelateService.toggle(relate_dto)
return redirect('social:user_detail', kwargs['pk'])
@staticmethod
def _build_relate_dto(self, request) :
return RelateDto (
user_pk=self.kwargs['pk'],
requester=request.user
)
|
pkg_pip = {
'mako': {},
# this needs to be installed, but pip does not state, that it is so use unless instead
'setuptools': {
'unless': 'pip list | grep setuptools',
},
}
if node.has_bundle('apt'):
files['/etc/apt/sources.list.d/hashicorp.list'] = {
'content': 'deb [arch=amd64] https://apt.releases.hashicorp.com {release_name} main\n'.format(
release_name=node.metadata.get(node.os).get('release_name')
),
'content_type': 'text',
'needs': ['file:/etc/apt/trusted.gpg.d/hashicorp.gpg', ],
'triggers': ["action:force_update_apt_cache", ],
}
files['/etc/apt/trusted.gpg.d/hashicorp.gpg'] = {
'content_type': 'binary',
}
pkg_apt = {
'packer': {
'needs': [
'file:/etc/apt/trusted.gpg.d/hashicorp.gpg',
'file:/etc/apt/sources.list.d/hashicorp.list',
]
},
}
|
"""
Implementation of a RBF kernel PCA for dimensionality reduction using SciPy and NumPy helper functions
"""
from scipy.spatial.distance import pdist, squareform
from scipy import exp
from scipy.linalg import eigh
import numpy as np
def rbf_kernel_pca(x, gamma, n_components):
"""
RBF Kernel PCA implementation
:param x: {NumPy Array}, shape = [n_samples, n_features]
:param gamma: float - Tuning parameter of the RBF Kernel
:param n_components: int - Number of principal components to return
:return: x_pc {NumPy Array}, shape = [n_samples, k_features]. Projected Dataset
lambdas {list} - eigenvalues.
"""
# Calculate pairwise squared Euclidean distances in the MxN dimensional dataset.
sq_dists = pdist(x, 'sqeuclidean')
# Convert pairwise distances into a square matrix
mat_sq_dists = squareform(sq_dists)
# Compute the symmetrical kernel matrix
k = exp(-gamma * mat_sq_dists)
# Center the kernel matrix
n = k.shape[0]
one_n = np.ones((n, n)) / n
k = k - k.dot(one_n) + one_n.dot(k).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# numpy.eigh returns them in sorted order
eig_vals, eig_vecs = eigh(k)
# Collect the top k eigenvectors (projected samples)
x_pc = np.column_stack((eig_vecs[:, -i]) for i in range(1, n_components + 1))
# Collect the corresponding eigenvalues
lambdas = [eig_vals[-i] for i in range(1, n_components + 1)]
return x_pc, lambdas
|
from logic import convert_to_csv
# create test variables
input_folder = 'test/downloader_input'
# input_folder = 'test/input/klhs_shino'
output_folder = 'test/ui_output'
output_file = 'test.csv'
# call function
convert_to_csv(input_folder, output_folder, output_file)
|
lst1=["Apple","Banana","Orange","Tomato","Mango"]
i=0
# for i in range(i,len(lst1)):
# print(lst1[i])
# i=i+1
for i in lst1:
print(i)
else:
print("This is inside of false")
print(len(lst1))
# print(lst1[1]) |
#!/usr/bin/python
# coding:utf-8
# change log list
# 20170209 shaoning (__main__) command line parse method changed
import sys
import commands
import re
import time
import datetime
import OptParser
def usage():
print """Help(-h|--help)for icfs-admin-log:
Usage:
>> icfs-admin-log ---- --download ---- --config ----><
Functions: Download config file on mon node
Options:
--download: download file
--mon: download monitor log
--config: download config file
--week: recent week number
Exit status:
0 if executed successfully
1 if executed unsuccessfully
Usage:
>> icfs-admin-log ---- --download ---- --message ---- --node node_name ---- --week week ----><
Functions: Download message log on specific nodes
Options:
--download: download file
--mon: download monitor log
--config: download config file
--week: recent week number
Exit status:
0 if executed successfully
1 if executed unsuccessfully
Usage:
>> icfs-admin-log ---- --download --+-- --mon --+-- --node node_name ---- --date date ----><
'-- --osd --'
Functions: Download mon/osd log on specific nodes
Options:
--download: download file
--osd: download osd log
--message: download message log
--node: node name
--week: recent week number
Exit status:
0 if executed successfully
1 if executed unsuccessfully """
def error(num, *description):
if num == 610:
print "Error(610): Invalid input! "
elif num == 2100:
print "Error(2100): week must be number greater than 0"
elif num == 2101:
print "Error(2101): Create directory failed"
elif num == 2102:
print "Error(2102): Invalid node name %s" % description
elif num == 2103:
print "Error(2103): Copy file failed"
elif num == 2104:
print "Error(2104): Pack and compress file failed"
elif num == 2105:
print "Error(2105): No files found"
elif num == 2106:
print "Error(2106): Invalid date format"
sys.exit(1)
def log_download_config():
# make directory
status, output = commands.getstatusoutput("mkdir -p /var/log/icfs/download/config")
if status != 0:
error(2101)
# clear directory
status, output = commands.getstatusoutput("rm -rf /var/log/icfs/download/config/*")
if status != 0:
print output
sys.exit(1)
# copy file(only mon config)
status, output = commands.getstatusoutput("cp -f /etc/icfs/icfs.conf /var/log/icfs/download/config/")
if status != 0:
error(2103)
# finish copy, start compress
status, output = commands.getstatusoutput("tar zcvf /var/log/icfs/download/config.tar.gz"
" -C /var/log/icfs/download config")
if status != 0:
error(2104)
def log_download_mon():
# make directory
status, output = commands.getstatusoutput("mkdir -p /var/log/icfs/download/mon")
if status != 0:
error(2101)
# clear directory
status, output = commands.getstatusoutput("rm -rf /var/log/icfs/download/mon/*")
if status != 0:
print output
sys.exit(1)
# check if node is valid
status, output = commands.getstatusoutput("salt-key -l acc | grep -v 'Accepted Keys:'")
if status != 0:
print output
sys.exit(1)
nodes = output.split('\n')
for node in node_list:
if node not in nodes:
error(2102, node)
count = 0
for node in node_list:
# make sub directory
status, output = commands.getstatusoutput("mkdir -p /var/log/icfs/download/mon/%s" % node)
if status != 0:
error(2101)
# filter salt stderror so that output can be parsed correctly
status, output = commands.getstatusoutput("salt '%s' cmd.run 'ls /var/log/icfs/' 2>/dev/null" % node)
if status != 0:
print output
sys.exit(1)
file_list = output.split("\n")
file_list.pop(0)
for file_name in file_list:
file_name = file_name.strip()
# check whether file need to copy
need_copy = False
current_pattern = re.compile(r"^icfs-mon.*log$")
m = current_pattern.match(file_name)
if m is not None and date == datetime.date.today():
need_copy = True
backup_pattern = re.compile(r"^icfs-mon.*log-(\d*).gz$")
m = backup_pattern.match(file_name)
if m is not None and m.group(1) == date.strftime("%Y%m%d"):
need_copy = True
if not need_copy:
continue
# copy file
status, output = commands.getstatusoutput("scp %s:/var/log/icfs/%s /var/log/icfs/download/mon/%s/"
% (node, file_name, node))
if status != 0:
error(2103)
count += 1
if count == 0:
error(2105)
# finish copy, start compress
status, output = commands.getstatusoutput("tar zcvf /var/log/icfs/download/mon.tar.gz"
" -C /var/log/icfs/download mon")
if status != 0:
error(2104)
def log_download_osd():
# make directory
status, output = commands.getstatusoutput("mkdir -p /var/log/icfs/download/osd")
if status != 0:
error(2101)
# clear directory
status, output = commands.getstatusoutput("rm -rf /var/log/icfs/download/osd/*")
if status != 0:
print output
sys.exit(1)
# check if node is valid
status, output = commands.getstatusoutput("salt-key -l acc | grep -v 'Accepted Keys:'")
if status != 0:
print output
sys.exit(1)
nodes = output.split('\n')
for node in node_list:
if node not in nodes:
error(2102, node)
count = 0
for node in node_list:
# make sub directory
status, output = commands.getstatusoutput("mkdir -p /var/log/icfs/download/osd/%s" % node)
if status != 0:
error(2101)
# filter salt stderror so that output can be parsed correctly
status, output = commands.getstatusoutput("salt '%s' cmd.run 'ls /var/log/icfs/' 2>/dev/null" % node)
if status != 0:
print output
sys.exit(1)
file_list = output.split("\n")
file_list.pop(0)
for file_name in file_list:
file_name = file_name.strip()
# check whether file need to copy
need_copy = False
current_pattern = re.compile(r"^icfs-osd.*log$")
m = current_pattern.match(file_name)
if m is not None and date == datetime.date.today():
need_copy = True
backup_pattern = re.compile(r"^icfs-osd.*log-(\d*).gz$")
m = backup_pattern.match(file_name)
if m is not None and m.group(1) == date.strftime("%Y%m%d"):
need_copy = True
if not need_copy:
continue
# copy file
status, output = commands.getstatusoutput("scp %s:/var/log/icfs/%s /var/log/icfs/download/osd/%s/"
% (node, file_name, node))
if status != 0:
error(2103)
count += 1
if count == 0:
error(2105)
# finish copy, start compress
status, output = commands.getstatusoutput("tar zcvf /var/log/icfs/download/osd.tar.gz"
" -C /var/log/icfs/download osd")
if status != 0:
error(2104)
def log_download_message():
# make directory
status, output = commands.getstatusoutput("mkdir -p /var/log/icfs/download/message")
if status != 0:
error(2101)
# clear directory
status, output = commands.getstatusoutput("rm -rf /var/log/icfs/download/message/*")
if status != 0:
print output
sys.exit(1)
# pre date string
min_date = datetime.date.today() - datetime.timedelta(week*7)
max_date = datetime.date.today() - datetime.timedelta((week-1)*7)
# check if node is valid
status, output = commands.getstatusoutput("salt-key -l acc | grep -v 'Accepted Keys:'")
if status != 0:
print output
sys.exit(1)
nodes = output.split('\n')
for node in node_list:
if node not in nodes:
error(2102, node)
count = 0
for node in node_list:
# make sub directory
status, output = commands.getstatusoutput("mkdir -p /var/log/icfs/download/message/%s" % node)
if status != 0:
error(2101)
# filter salt stderror so that output can be parsed correctly
status, output = commands.getstatusoutput("salt '%s' cmd.run 'ls /var/log/' 2>/dev/null" % node)
if status != 0:
print output
sys.exit(1)
file_list = output.split("\n")
file_list.pop(0)
for file_name in file_list:
file_name = file_name.strip()
# check whether file need to copy
need_copy = False
if file_name == "messages" and min_date < datetime.date.today() <= max_date:
need_copy = True
pattern = re.compile(r"^messages-(\d*)$")
m = pattern.match(file_name)
if m is not None and min_date.strftime("%Y%m%d") < m.group(1) <= max_date.strftime("%Y%m%d"):
need_copy = True
if not need_copy:
continue
# copy file
status, output = commands.getstatusoutput("scp %s:/var/log/%s /var/log/icfs/download/message/%s/"
% (node, file_name, node))
if status != 0:
error(2103)
count += 1
if count == 0:
error(2105)
# finish copy, start compress
status, output = commands.getstatusoutput("tar zcvf /var/log/icfs/download/message.tar.gz"
" -C /var/log/icfs/download message")
if status != 0:
error(2104)
if __name__ == "__main__":
node_list = None
date = None
week = 1
try:
parser = OptParser.OptParser()
parser.append("help", "{-h|--help}")
parser.append("download_config", "--download,--config")
parser.append("download_mon", "--download,--mon,--node=,--date=")
parser.append("download_osd", "--download,--osd,--node=,--date=")
parser.append("download_message", "--download,--message,--node=,--week=")
m_name, m_opts = parser.parse(sys.argv[1:])
except Exception, e:
print e
print "Error(610): Invalid input! "
sys.exit(1)
for x, y in m_opts:
if x == "--node":
node_list = y.split(",")
elif x == "--week":
try:
week = int(y)
if week <= 0:
error(2100)
except ValueError, e:
error(2100)
elif x == "--date":
try:
t = time.strptime(y, "%Y%m%d")
date = datetime.date(t[0], t[1], t[2])
if date > datetime.date.today():
error(2105)
except ValueError, e:
error(2106)
if m_name == "help":
usage()
elif m_name == "download_config":
log_download_config()
elif m_name == "download_mon":
log_download_mon()
elif m_name == "download_osd":
log_download_osd()
elif m_name == "download_message":
log_download_message()
else:
error(610)
|
import os
import simpleaudio as sa
import time
class SOUND:
def onRead(self):
f = os.path.join(os.path.dirname(__file__), "read.wav")
wave_obj = sa.WaveObject.from_wave_file(f)
play_obj = wave_obj.play()
#play_obj.wait_done()
def onFalse(self):
pass
def onError(self):
pass
if __name__ == '__main__' :
sound = SOUND()
sound.onRead() |
N, M, C = map(int, input().split())
B_lst = list(map(int, input().split()))
lst = list()
count = 0
for i in range(N):
lst.append(list(map(int, input().split())))
for e1 in lst:
summation = 0
for i, e2 in enumerate(e1):
summation += B_lst[i] * e2
summation += C
if summation > 0:
count += 1
print(count) |
from django.conf.urls import url
from core.api.routers import PostHackedRouter
from . import views
router = PostHackedRouter()
router.include_root_view = False
# reverse('api:word-list'), reverse('api:word-detail', kwargs={'pk': 1})
router.register(r'word', views.WordViewSet, base_name='word')
router.register(r'skintype', views.SkinTypeViewSet, base_name='skintype')
urlpatterns = [
url(r'^word/autocomplete/$', views.WordAutocompleteAPIView.as_view(), name='word-autocomplete'),
]
urlpatterns += router.urls
|
A = False
B = False
C = True
a = (not A or not B) and not C
b = (not A or not B) and (A or B)
c = A and B or A and C or not C
print("a)",a)
print("б)",b)
print("c)",c)
|
import itertools
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
# data
# load data ~/dropbox/nasa_stretch/force_features/force_emg_expl.csv
df = pd.read_csv('~/dropbox/nasa_stretch/force_features/force_emg_expl.csv')
# add ratios
df = df.assign(bmg_wav=(df['lmg_airsum'] + df['rmg_airsum'])/(df['lmg_lsrsum'] + df['rmg_lsrsum']),
bmg_iemg=(df['lmg_iemg_air'] + df['rmg_iemg_air'])/(df['lmg_iemg_lnd'] + df['rmg_iemg_lnd']),
bta_wav=(df['lta_airsum'] + df['rta_airsum'])/(df['lta_lsrsum'] + df['rta_lsrsum']),
bta_iemg=(df['lta_iemg_air'] + df['rta_iemg_air'])/(df['lta_iemg_lnd'] + df['rta_iemg_lnd']))
# reorder for i/o paradigm
df = df[['Platform',
'subjectNo',
'normTime',
'jumpNo',
'lmg_airsum',
'lmg_lsrsum',
'rmg_airsum',
'rmg_lsrsum',
'lta_airsum',
'lta_lsrsum',
'rta_airsum',
'rta_lsrsum',
'lmg_iemg_air',
'rmg_iemg_air',
'lta_iemg_air',
'rta_iemg_air',
'lmg_iemg_lnd',
'rmg_iemg_lnd',
'lta_iemg_lnd',
'rta_iemg_lnd',
'bmg_wav',
'bmg_iemg',
'bta_wav',
'bta_iemg',
'F1',
'T1',
'W1',
'F2',
'T2',
'W2',
'F3',
'T3',
'W3',
'F2F1']]
# remove cases where jumpNo > 3
df = df[df.jumpNo <= 3]
# get ISS platform
df_plats = df.groupby('Platform')
df_iss = df_plats.get_group('ISS')
a_iss = df_iss['normTime'] == "A"
b_iss = df_iss['normTime'] == "B"
c_iss = df_iss['normTime'] == "C"
e_iss = df_iss['normTime'] == "E"
f_iss = df_iss['normTime'] == "F"
g_iss = df_iss['normTime'] == "G"
bcefg_iss = df_iss[b_iss | c_iss | e_iss | f_iss | g_iss]
bcefg_iss_predictors = bcefg_iss.loc[:, 'lmg_airsum':'bta_iemg']
X = bcefg_iss_predictors
X = X.as_matrix()
scaler = StandardScaler()
X = scaler.fit_transform(X)
y = bcefg_iss['normTime']
y = y.as_matrix()
#le = LabelEncoder()
#le.fit(y)
#y = le.transform(y)
class_names = ['B', 'C', 'E', 'F', 'G']
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='rbf', gamma=0.1, C=21.54)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title='Normalized confusion matrix')
plt.show() |
# Generated by Django 2.2.9 on 2020-02-14 09:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('transportation', '0009_auto_20200214_0807'),
]
operations = [
migrations.AlterField(
model_name='deal',
name='customer',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='transportation.Profile'),
),
]
|
# coding: utf-8
# 自分の得意な言語で
# Let's チャレンジ!!
a, n = map(int, input().split())
print(a*n)
|
#!/usr/bin/python
import argparse
import os
import sys
class WordFrequencyCount:
def __init__(self, memoryUsageInMegabytes, inputfilename, outputfilename):
self.inputFileName = inputfilename
self.outputFileName = outputfilename
self.maxMemoryUsage = memoryUsageInMegabytes * 1024 * 1024
self.wordArray = []
def count(self):
inFile = open(self.inputFileName, 'r')
outFile = open(self.outputFileName, 'w')
memoryUsed = 0
currWord = None
currCount = 0
while(True):
line = inFile.readline().rstrip()
if(line == ""):
break;
if(line != currWord):
if(currWord is not None):
entry = (currWord, currCount)
self.wordArray.append(entry)
memoryUsed += sys.getsizeof(entry) + sys.getsizeof(currWord) + sys.getsizeof(currCount)
if(memoryUsed > self.maxMemoryUsage):
for word, count in self.wordArray:
outFile.write(word + ' ' + str(count) + '\n')
memoryUsed = 0
self.wordArray = []
currWord = line
currCount = 1
else:
currCount += 1
sys.stdout.write("\r" + str(memoryUsed))
sys.stdout.flush()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('memory',
'<memory>',
nargs=1,
help='amount of memory to use')
parser.add_argument('inputfilename',
metavar='<input_filename>',
nargs=1,
help='name of input file')
parser.add_argument('outputfilename',
metavar='<outputfilename>',
nargs=1,
help='name of input file')
args = parser.parse_args()
print args
freqCount = WordFrequencyCount(int(args.memory[0]), args.inputfilename[0], args.outputfilename[0])
freqCount.count()
if __name__ == '__main__':
main() |
#write a function called "partition"
#This function accepts a list and a callback function which assumes returns tru or false
#The function should iterate over each element in the list and invoke the callback function at each iteration
# the result should be a list made of two list, [truthy or falsey] list
list = [1,2,3,4]
def isEven(num):
return num % 2 == 0
def partition(list=None, fn=isEven):
truthy = []
falsey = []
result = [truthy, falsey]
for num in list:
if isEven(num):
truthy.append(num)
else:
falsey.append(num)
return result
print(partition(list, isEven)) # [[2,4],[1,3]]
#Another way of doing it, though it is less readable
def partition_2(lst, fn):
return [[val for val in lst if fn(val)], [val for val in lst if not fn(val)]] # both are created under one array or "list" |
#!/usr/bin/env python3
import boto3
from datetime import datetime
class R53:
def __init__(self):
self.client = boto3.client('route53')
@staticmethod
def __get_domain_from_fqdn(fqdn):
fqdn_as_list = fqdn.split('.')
del fqdn_as_list[0]
domain = '.'.join(fqdn_as_list)
return domain
@staticmethod
def __fqdn_with_root_level(fqdn):
""" Remove . at the end of the FQDN """
if not fqdn[:1] == '.':
fqdn += '.'
return fqdn
def get_current_zones(self):
"""
Returns list of current hosted zones for
configured amazon account
"""
return self.client.list_hosted_zones()
def get_private_zones(self) -> dict:
return {'HostedZones': [zone for zone in self.get_current_zones()['HostedZones'] if zone['Config']['PrivateZone']]}
def get_public_zones(self) -> dict:
return {'HostedZones': [zone for zone in self.get_current_zones()['HostedZones'] if not zone['Config']['PrivateZone']]}
def list_zones_by_name(self, name: str) -> dict:
name = self.__fqdn_with_root_level(name)
response = None
for zone in self.client.list_hosted_zones_by_name(DNSName=name)['HostedZones']:
if zone['Name'] == name:
response = zone
return response
def list_public_zones_by_name(self, name):
name = self.__fqdn_with_root_level(name)
print(name)
response = None
for zone in self.get_public_zones()['HostedZones']:
if zone['Name'] == name:
response = zone
return response
def get_public_zone_id_by_name(self, name):
return self.list_public_zones_by_name(name)['Id'].split('/')[2]
def list_private_zones_by_name(self, name):
name = self.__fqdn_with_root_level(name)
response = None
for zone in self.get_private_zones()['HostedZones']:
if zone['Name'] == name:
response = zone
return response
def get_private_zone_id_by_name(self, name):
return self.list_private_zones_by_name(name)['Id'].split('/')[2]
def update_a_record(self, record_name: str, ip: str, zone_id: str=None, ttl=600,
comment: str='Updated by R53_RECORD_UPDATE'):
"""
Modifies a given resource record set for
HostedZoneId
"""
if not zone_id:
zone_id = self.get_public_zone_id_by_name(name=self.__get_domain_from_fqdn(record_name))
response = self.client.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
'Comment': comment,
'Changes': [
{
'Action': 'UPSERT',
'ResourceRecordSet': {
'TTL': ttl,
'Name': record_name,
'Type': 'A',
'ResourceRecords': [
{
'Value': ip
},
]
}
},
]
}
)
return response
if __name__ == '__main__':
""" CLI version unbundled from kraken """
import json
from sys import argv
r53 = R53()
args = argv
record_name, ip = args[1], args[2]
response = r53.update_a_record(record_name=record_name, ip=ip)
# print(json.dumps(response, indent=4, skipkeys=True))
print(response)
|
from solver import *
class SolverDFS(UninformedSolver):
def __init__(self, gameMaster, victoryCondition):
super().__init__(gameMaster, victoryCondition)
def solveOneStep(self):
"""
Go to the next state that has not been explored. If a
game state leads to more than one unexplored game states,
explore in the order implied by the GameMaster.getMovables()
function.
If all game states reachable from a parent state has been explored,
the next explored state should conform to the specifications of
the Depth-First Search algorithm.
Returns:
True if the desired solution state is reached, False otherwise
"""
### use a stack (only use append and pop)
if self.gm.getGameState() == self.victoryCondition:
return True
movables = self.gm.getMovables()
if not movables:
self.gm.reverseMove(self.currentState.requiredMovable)
else:
for move in movables:
self.gm.makeMove(move)
state = GameState(self.gm.getGameState(), self.currentState.depth + 1, move)
state.parent = self.currentState
self.currentState.children.append(state)
self.gm.reverseMove(move)
child_to_visit = self.findNextVisitDFS(self.currentState)
if child_to_visit is None:
return False
self.gm.makeMove(child_to_visit.requiredMovable)
self.visited[child_to_visit] = True
new_state = self.gm.getGameState()
if new_state == self.victoryCondition:
self.currentState = child_to_visit
return True
self.currentState = child_to_visit
return False
# go to next child, if this has been visited, increment nextChildToVisit
# if the end of the children is reached, run findNextVisit on the parent node
def findNextVisitDFS(self, node):
index = node.nextChildToVisit
if index < len(node.children) and node.children[index] not in self.visited:
return node.children[index]
elif index >= len(node.children):
if node.parent is None:
return None
else:
self.gm.reverseMove(node.requiredMovable)
return self.findNextVisitDFS(node.parent)
node.nextChildToVisit += 1
return self.findNextVisitDFS(node)
class SolverBFS(UninformedSolver):
def __init__(self, gameMaster, victoryCondition):
super().__init__(gameMaster, victoryCondition)
self.bfsqueue = Queue()
def solveOneStep(self):
"""
Go to the next state that has not been explored. If a
game state leads to more than one unexplored game states,
explore in the order implied by the GameMaster.getMovables()
function.
If all game states reachable from a parent state has been explored,
the next explored state should conform to the specifications of
the Breadth-First Search algorithm.
Returns:
True if the desired solution state is reached, False otherwise
"""
### use a queue (only use append and popleft)
if self.gm.getGameState() == self.victoryCondition:
return True
current = self.currentState
movables = self.gm.getMovables()
if not movables:
self.gm.reverseMove(current.requiredMovable)
else:
for move in movables:
self.gm.makeMove(move)
state = GameState(self.gm.getGameState(), self.currentState.depth + 1, move)
state.parent = current
if state not in self.visited:
self.bfsqueue.enqueue(state)
self.gm.reverseMove(move)
child_to_visit = self.bfsqueue.dequeue()
while child_to_visit in self.visited:
child_to_visit = self.bfsqueue.dequeue()
self.visited[child_to_visit] = True
current_depth = current.depth
next_depth = child_to_visit.depth
if current.parent is None:
self.gm.makeMove(child_to_visit.requiredMovable)
elif current.parent == child_to_visit.parent:
self.gm.reverseMove(current.requiredMovable)
self.gm.makeMove(child_to_visit.requiredMovable)
else:
for i in range(current_depth):
self.gm.reverseMove(current.requiredMovable)
current = current.parent
moves = []
pointer_to_next = child_to_visit
for i in range(next_depth):
moves.append(pointer_to_next.requiredMovable)
pointer_to_next = pointer_to_next.parent
while moves:
self.gm.makeMove(moves.pop())
new_state = self.gm.getGameState()
if new_state == self.victoryCondition:
self.currentState = child_to_visit
return True
self.currentState = child_to_visit
return False
class Queue:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def enqueue(self, item):
self.items.insert(0, item)
def dequeue(self):
return self.items.pop()
def size(self):
return len(self.items) |
def solution(record):
answer=[]
userDict=dict()
charLog=[]
for info in record:
infoLst=info.split('')
if infoLst[0]=='Enter':
if infoLst[1] not in userDict.keys():
userDict[infoLst[1]]=infoLst[2]
else:
userDict[infoLst[1]]=infoLst[2]
chatLog.append(['님이 들어왔습니다.',infoLst[1]])
elif infoLst[0]=='Leave':
charLog.append('[님이 나갔습니다.',infoLst[1])
elif infoLst[0]=='Change':
userDict[infoLst[1]]=infoLst[2]
for log in chatLog:
answer.append(userDict[log[1]]+log[0])
return answer
|
import numpy as np
import matplotlib.pyplot as plt
import pickle
import os
# directory
current_dir = os.path.dirname(os.path.abspath(__file__))
categories = {0:'airplane', 1:'automobile', 2:'bird', 3:'cat', 4:'deer',
5:'dog', 6:'frog', 7:'horse', 8:'ship', 9:'truck'}
def unpickle(data_batch_file):
import pickle
with open(data_batch_file, 'rb') as fo:
dic1 = pickle.load(fo, encoding='bytes')
return np.array(dic1[b'batch_label']), np.array(dic1[b'labels']), \
np.array(dic1[b'data']), np.array(dic1[b'filenames'])
batch_labels, labels, data, filenames = \
unpickle('cifar-10-batches-py/data_batch_2')
print('shape of batch_labels =', batch_labels) # b'training batch i of 5'
print('shape of labels =', labels.shape) # (10000,)
print('shape of CIFAR data =', data.shape) # (10000, 3072)
print('shape of filenames =', filenames.shape) # (10000,)
def CIFAR_image_to_gray_image(cifar_image):
r_pane = cifar_image[0: 32*32] # R
g_pane = cifar_image[32*32: 32*32*2] # G
b_pane = cifar_image[32*32*2: 32*32*3] # B
gray_image = r_pane * 0.2989 + g_pane * 0.5870 + b_pane * 0.1140
return gray_image
def plot_gray_image(gray_image, label_val):
image = gray_image.reshape(32, 32)
title = categories[np.argmax([label_val])] + ':' + str(label_val)
fig = plt.figure(figsize=(3, 2)) # 가로 세로 길이(인치)
subplot = fig.add_subplot(1, 1, 1)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.set_title(title)
subplot.imshow(image, cmap=plt.cm.gray, interpolation='nearest')
def plot_CIFAR_10_image(index):
img = data[index]
R = img[0:1024].reshape(32,32)/255.0
G = img[1024:2048].reshape(32,32)/255.0
B = img[2048:].reshape(32,32)/255.0
rgb_img = np.dstack((R,G,B))
category = categories[labels[index]]
file_name = str(filenames[index])
fig = plt.figure(figsize=(3,3))
ax = fig.add_subplot(111)
ax.set_title(category + ', ' + file_name, fontsize =13)
ax.imshow(rgb_img,interpolation='bicubic')
def to_one_hot_encoding_format(labels):
r = []
for i in labels:
encoding = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] # category 10개
encoding[i] = 1
r.append(encoding)
one_hot_encoding_labels =np.array(r)
print('shape of one_hot_encoding_lables =', one_hot_encoding_labels.shape)
print('one_hot_encoding_lables[' + str(i) + '] = ', one_hot_encoding_labels[0])
return one_hot_encoding_labels
def CIFAR_data_to_gray_data(CIFAR_data, labels, pkl_name):
gray_data = []
for i in range(len(CIFAR_data)):
gray_data.append(CIFAR_image_to_gray_image(CIFAR_data[i]))
r = np.array(gray_data)
print('shape of grey_data = ', r.shape)
fp = open(current_dir+'/'+ pkl_name, 'wb')
pickle.dump(r, fp) # r의 shape은 (10000, 1024)
one_hot_encoding_format_labels = to_one_hot_encoding_format(labels)
pickle.dump(one_hot_encoding_format_labels, fp)
fp.close()
return r
def load_cifar_gray_data_and_one_labels(pkl_name):
fp = open(current_dir+'/'+pkl_name, 'rb')
grey_data = pickle.load(fp)
grey_ohe_labels = pickle.load(fp)
print('shape of gray data = ', grey_data.shape)
print('shape of gray one-hot-encoding labels = ',
grey_ohe_labels.shape)
#grey_data(10000,1024), grey_ohe_labels(10000,10)
return grey_data, grey_ohe_labels
# (10000, 1024)의 gray image 객체와 (10000,)의 1-hot-encoding 라벨
if __name__ == '__main__':
CIFAR_data_to_gray_data(data, labels,pkl_name='cifar_grey_data_and_labels.pkl')
for i in range(4):
plot_CIFAR_10_image(i)
gray_data, gray_labels = load_cifar_gray_data_and_ohe_labels('cifar_grey_data_and_labels.pkl')
for i in range(4):
plot_gray_image(gray_data[i], gray_labels[i]) |
from flask import Flask, render_template, request
from arbitrage_algos import ArbitrageAlgorithms
from visualization import GraphVisualization
from forex_scraper import ForexScraper
"""
This is the main class that runs the web-app.
"""
app = Flask(__name__)
@app.route('/')
def index():
"""
Method that returns the initial HTML page to load upon starting the web app.
"""
return render_template("index.html")
@app.route("/", methods=['POST'])
def run_arbitrage_program():
"""
This method runs when the "calculate" button is clicked on the web app. It
creates instances of the ForexScraper, GraphVisualization, and ArbitrageAlgorithms
classes in order to retrieve all relevant information to display to the user.
"""
# RETRIEVE DATE USER INPUT
date = request.form['date']
if date is None or date == "" or date == "now" or date == "today":
date = "latest"
## RUN THE FOREX SCRAPER AND CREATE ADJACENCY MATRIX & EXCHANGE TABLE WITH CURRENCIES
scraper = ForexScraper(date)
adjacency_matrix = scraper.get_adjacency_matrix()
exchange_table = scraper.get_exchange_table_html()
## CREATE GRAPH VISUALIZATION OF ALL RETRIEVED CURRENCIES/EXCHANGE RATES
visualization = GraphVisualization()
digraph = visualization.create_graph_from_dataframe(adjacency_matrix)
visualization.draw_graph(digraph, output_file="all_vertices_digraph.png",
size="small", edge_weights=False)
## FIND ARBITRAGE OPPORTUNITIES ON THE GRAPH
arbitrage = ArbitrageAlgorithms(digraph)
paths = arbitrage.run_arbitrage()
path_string, percentage_string = format_paths(paths)
## CREATE NEW ADJACENCY MATRIX USING ONLY CURRENCIES INVOLVED IN ARBITRAGE OPPORTUNITIES
# get a list of all currencies involved in one or more arbitrage opportunities
arbitrage_currencies = arbitrage.get_arbitrage_currencies()
# create a list of all currencies NOT involved any arbitrage opportunities
currency_set = set(scraper.get_currency_list())
no_arbitrage_currencies = currency_set.difference(arbitrage_currencies)
# create a new adjancey matrix with only the currencies involved in one or more arbitrage opportunities
filtered_adj_matrix = adjacency_matrix.copy()
filtered_adj_matrix = filtered_adj_matrix.drop(index=no_arbitrage_currencies,
columns=no_arbitrage_currencies)
## CREATE GRAPH VISUALIZATION OF CURRENCIES/EXCHANGE RATES INVOLVED IN ARBITRAGE OPPORTUNITIES
filtered_digraph = visualization.create_graph_from_dataframe(filtered_adj_matrix)
visualization.draw_graph(filtered_digraph, output_file="filtered_digraph.png",
size="large", edge_weights=True)
return render_template('index.html', paths=path_string, percentage_gains=percentage_string,
exchange_table=exchange_table, date="("+date+")");
def format_paths(paths):
"""
This is a helper method that formats the inputed paths into a displayable
string with currency emojis.
"""
path_string = ""
percentage_string = ""
for path, percentage in paths:
percentage_string += "+ " +str(percentage) + "% profit<br/>"
for currency in path[:-1]:
path_string += ForexScraper.currency_flags[currency] + " " +currency + " ⟶ "
path_string += ForexScraper.currency_flags[path[-1]] + " " +path[-1]
path_string += "<br/>"
return (path_string, percentage_string)
if __name__ == "__main__":
app.run(debug=True)
|
hours = input('Enter hours: ')
rate = input('Enter rate: ')
try:
hours = int(hours)
rate = float(rate)
except:
print('Please enter a numeric value.')
quit()
if(hours > 40):
rate = rate * 1.5
salary = hours * rate
print(round(salary,2))
|
import tensorflow as tf
import tflearn
import cPickle
import numpy as np
import sys
import os
import argparse
from sklearn import metrics
# os.environ["CUDA_VISIBLE_DEVICES"]="9"
def unpickle(fid):
with open(fid, 'rb') as fo:
data = cPickle.load(fo)
return data
def bottleneck_layer(x, filters, scope, keep_prob=1):
with tf.name_scope(scope):
x = tflearn.batch_normalization(x, scope=scope+'_batch1')
x = tf.nn.relu(x)
x = tflearn.conv_2d(x, nb_filter=4*filters, filter_size=1, strides=1, padding='same',
activation='linear', bias=False, scope=scope+'_conv1',
regularizer='L2', weight_decay=1e-4)
x = tflearn.dropout(x, keep_prob=keep_prob)
x = tflearn.batch_normalization(x, scope=scope+'_batch2')
x = tf.nn.relu(x)
x = tflearn.conv_2d(x, nb_filter=filters, filter_size=3, strides=1, padding='same',
activation='linear', bias=False, scope=scope+'_conv2',
regularizer='L2', weight_decay=1e-4)
x = tflearn.dropout(x, keep_prob=keep_prob)
return x
def dense_block(input_x, filters, nb_layers, layer_name):
with tf.name_scope(layer_name):
layers_concat = list()
layers_concat.append(input_x)
x = bottleneck_layer(input_x, filters, scope=layer_name+'_bottleN_'+str(0))
layers_concat.append(x)
for i in range(nb_layers - 1):
x = tf.concat(layers_concat, axis=3)
x = bottleneck_layer(x, filters, scope=layer_name + '_bottleN_'+str(i+1))
layers_concat.append(x)
x = tf.concat(layers_concat, axis=3)
print(layer_name,x)
return x
def transition_layer(x, scope, reduction=0.5, keep_prob=1):
out_filters = int(int(x.get_shape()[-1])*reduction)
with tf.name_scope(scope):
x = tflearn.batch_normalization(x, scope=scope+'_batch1')
x = tf.nn.relu(x)
x = tflearn.conv_2d(x, nb_filter=out_filters, filter_size=1, strides=1, padding='same',
activation='linear', bias=False, scope=scope+'_conv1',
regularizer='L2', weight_decay=1e-4)
x = tflearn.dropout(x, keep_prob=keep_prob)
x = tflearn.avg_pool_2d(x, kernel_size=2, strides=2, padding='valid')
print(scope,x)
return x
def load_data(args, mode):
fid = args.fid+mode
data = unpickle(fid)
n_class = args.nclass
feats = data["data"].astype(np.float64)
labs = data["fine_labels"]
feats = np.reshape(np.transpose(np.reshape(feats, [-1 ,3,1024]), (0, 2, 1)), [-1,32,32,3])
if args.onlyevalue == 1:
return feats,labs
labs = tflearn.data_utils.to_categorical(labs, n_class)
return feats,labs
def main(args):
# Data loading
fid = args.fid+"train"
data = unpickle(fid)
n_class = args.nclass
train_feats, train_labs = load_data(args, "train")
test_feats, test_labs = load_data(args, "test")
# Real-time data preprocessing
mean = [129.30416561, 124.0699627, 112.43405006]
std = [51.20360335, 50.57829831, 51.56057865]
img_prep = tflearn.ImagePreprocessing()
img_prep.add_featurewise_zero_center(per_channel=True, mean=mean)
img_prep.add_featurewise_stdnorm(per_channel=True, std=std)
# Real-time data augmentation
img_aug = tflearn.ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_crop([32, 32], padding=4)
# DenseNet
depth = args.depth
filters = args.growth_rate
nb_blocks = 3
#nb_layers_list = [6,12,48,32]
nb_layers_list = [(depth - (nb_blocks + 1)) // (2*nb_blocks) for i in range(nb_blocks)]
print(nb_layers_list)
net = tflearn.input_data(shape=[None, 32, 32, 3], data_preprocessing=img_prep, data_augmentation=img_aug)
print("input",net)
net = tflearn.conv_2d(net, nb_filter=2*filters, filter_size=3, strides=1, padding='same', activation='linear', bias=False, name='conv0',
regularizer='L2', weight_decay=1e-4)
# net = tflearn.max_pool_2d(net, kernel_size=3, strides=2, padding='valid')
print("init_layer",net)
for i in range(nb_blocks-1):
net = dense_block(net, filters, nb_layers=nb_layers_list[i], layer_name='dense_'+str(i+1))
net= transition_layer(net, scope='trans_'+str(i+1))
net = dense_block(net, filters, nb_layers=nb_layers_list[-1], layer_name='dense_final')
# Global Avg + FC
net = tflearn.batch_normalization(net, scope='linear_batch')
net = tf.nn.relu(net)
net = tflearn.global_avg_pool(net)
if args.finetune == 1:
net = tflearn.fully_connected(net, n_class, activation='softmax', regularizer='L2', weight_decay=1e-4, restore=False)
else:
net = tflearn.fully_connected(net, n_class, activation='softmax', regularizer='L2', weight_decay=1e-4)
# Optimizer
opt = tf.train.MomentumOptimizer(learning_rate=args.lr, momentum=0.9, use_nesterov=True)
epsilon = 1e-4
learning_rate = 1e-4
# opt = tf.train.AdamOptimizer(learning_rate=learning_rate, epsilon=epsilon)
# Regression
net = tflearn.regression(net, optimizer=opt, loss='categorical_crossentropy', restore=False)
# Training
config = tf.ConfigProto()
config.allow_soft_placement=True
config.gpu_options.allow_growth = True
tf.add_to_collection(tf.GraphKeys.GRAPH_CONFIG, config)
model = tflearn.DNN(net, checkpoint_path='/data/srd/models/image/model_'+args.model_name+'/model',
tensorboard_dir='/data/srd/logs/image/log_'+args.model_name,
max_checkpoints=3, tensorboard_verbose=0, clip_gradients=0.0)
if args.onlyevalue == 1:
model.load("/data/srd/models/image/"+args.pre_train+"/model.tfl")
n_test = len(test_feats)
n_batch = 10
batch_size = n_test/10
labsp = model.predict(test_feats[0:batch_size])
for i in range(1,10):
labsp = np.vstack([labsp, model.predict(test_feats[i*batch_size:(i+1)*batch_size])])
print(metrics.classification_report(test_labs, np.argmax(labsp,1)))
print("acc:", metrics.accuracy_score(test_labs, np.argmax(labsp,1)))
np.argmax(labsp,1).tofile("/data/srd/data/cifar/"+args.pre_train+".bin")
return
# pre-train model
if args.pre_train:
model.load("/data/srd/models/image/"+args.pre_train+"/model.tfl", weights_only=True)
try:
model.fit(train_feats, train_labs, n_epoch=args.epoch, validation_set=(test_feats, test_labs),
snapshot_epoch=False, snapshot_step=500, show_metric=True, batch_size=64, shuffle=True,
run_id=args.model_name)
except KeyboardInterrupt:
print("Keyboard Interrupt")
model.save("/data/srd/models/image/"+args.model_name+"/model.tfl")
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--depth', type=int, help='DenseNet\'s Depth', default=190)
parser.add_argument('--growth_rate', type=int, help='DenseNet\'s growth_rate', default=40)
parser.add_argument('--model_name', type=str, help='model name', default='test')
parser.add_argument('--pre_train', type=str, help='pre train model', default=None)
parser.add_argument('--lr', type=float, help='learning rate', default=0.1)
parser.add_argument('--epoch', type=int, help='max epoch', default=1000)
parser.add_argument('--onlyevalue', type=int, help='only evalue or note', default=0)
parser.add_argument('--fid', type=str, help='train and test file path', default='/data/srd/data/Image/cifar-100-python/')
parser.add_argument('--nclass', type=int, help='number of class', default=100)
parser.add_argument('--finetune', type=int, help='finetune or not', default=0)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
|
import numpy as np
import investor.predict
from investor.predict.numerics import roi_window, roi
class SlidingWindow:
def __init__(self, win, target, lag):
'''
Sliding window prediction.
For an n-dimensional time series we extract windows
of a certain size at each time and take the target dimension
at time + lag as the prediction.
:param win: window size
:param target: target dimension
:param lag: n steps into the future
'''
self.win = win
self.target = target
self.lag = lag
def slide(self, sequence):
'''
Slide extracts sliding windows from a sequence,
and the return for each window.
:param sequence: input sequences
:return: the windows, the target predictions and the investing value
'''
n = sequence.shape[0]
invest_at = []
windows = []
predictions = []
for t in range(self.win + 1, n - self.lag):
sample = sequence[t-self.win:t]
roi_sample = roi_window(sample, sequence[t-self.win-1])
roi_lag = roi(sample[-1][self.target], sequence[t + self.lag][self.target])
windows += [roi_sample.flatten()]
predictions += [roi_lag]
invest_at += [sample[-1][self.target]]
invest_at = np.array(invest_at)
windows = np.array(windows)
predictions = np.array(predictions)
return windows, predictions, invest_at
|
import pygame
from Shape import Shape
class Board:
# создание поля
def __init__(self):
self.width = 500
self.height = 800
self.board = [[0] * self.width for _ in range(self.height)]
# значения по умолчанию
self.left = 10
self.top = 10
self.cell_size = 50
def render(self, screen):
for i in range(self.height):
for j in range(self.width):
if self.board[i][j] == 1:
pygame.draw.rect(screen, self.shape.color,
(self.left + self.cell_size * j,
self.top + self.cell_size * i,
self.cell_size, self.cell_size))
def start(self, mouse_pos):
if mouse_pos[0] > self.left + self.cell_size * self.width \
or mouse_pos[1] > self.top + self.cell_size * self.height \
or mouse_pos[0] < self.left or mouse_pos[1] < self.top:
return False
self.shape = Shape()
return True
def change_field(self):
self.board = [[0] * self.width for _ in range(self.height)]
for i in range(self.height):
for j in range(self.width):
if (i, j) in self.shape.cells:
self.board[i][j] = 1
|
""" Helper functions for the Access Control Queuing problem
This script contains object definitions and herlper functions for the Access Control
Queuing problem, which are useful for the q-learning implementation.
This file should be imported as a module and contains the following functions:
* get_action - returns a greedy or random action, according to the exploration rate
* step - get information about doing action in current state, like reward and nex state
* priority_client - get a random priority for a nwe client
* gets0unif - get a uniformly-selected random initial state
"""
import numpy as np
import itertools
# possible priorities
PRIORITIES = np.arange(0, 4)
# reward for each priority
REWARDS = np.array([1,2,4,8])
# possible actions
REJECT = 0
ACCEPT = 1
ACTIONS = [REJECT, ACCEPT]
# total number of servers
NUM_OF_SERVERS = 10
# at each time step, a busy server will be free w.p. 0.06
PROBABILITY_FREE = 0.06
# learning rate
ALPHA = 0.05
# probability for exploration
EPSILON = 0.1
class QValueFunction:
""" Class to represent the values of the agent and to apply its learning process following
the q-learning's update rule
Attributes
----------
servers - array with servers (NUM_OF_SERVERS=10)
priorities - customer's priorities
actions - two posible actions: reject (0) or accept (1)
alpha - learning rate used in the update rule of Q-values
rho - gain
q_table - array for q-values of pairs (s,a)
vs - array for values of each states
"""
def __init__(self):
self.servers = np.arange(NUM_OF_SERVERS+1)
self.priorities = PRIORITIES
self.actions = ACTIONS
self.alpha = ALPHA
self.rho = 0.0
self.q_table = np.zeros([NUM_OF_SERVERS+1, len(PRIORITIES), len(ACTIONS)])
self.q_table[0,:,1] = np.nan
self.q_table[3,1:,0] = np.nan
self.vs = np.zeros([NUM_OF_SERVERS+1,len(PRIORITIES)])
def value(self, num_free_servers, priority, action):
""" Returns the value of a specific pair (s,a) """
return self.q_table[num_free_servers, priority, action]
def learn(self, free_servers, priority, action, new_free_servers, new_priority, reward):
""" Apply update rule for q-learning """
current_state = (free_servers, priority, action)
R = reward - self.rho
if not(reward==8 and new_free_servers==0):
self.q_table[current_state] = (1.0-self.alpha)*self.q_table[current_state] + \
self.alpha*(R+np.nanmax(self.q_table[new_free_servers,new_priority]))
else:
self.q_table[current_state] = (1.0-self.alpha)*self.q_table[current_state] + self.alpha*R
def update_vs(self):
""" Get the value of each state, given their Q values """
x=map(lambda x:np.apply_along_axis(max, 1, self.q_table[x,:]), np.arange(NUM_OF_SERVERS+1))
self.vs = np.array(list(x))
def get_action(free_servers, priority, Qvalue_function, greedy=False):
""" Returns a greedy or random action, according to the exploration rate and greedy argument
Args:
free_servers & priority - current state of the environment
Qvalue_function - object with Q-values of the agent
greedy - if true, only explote; if false, balance exploration/explotation
Returns:
action - 0 (reject) or 1 (accept)
"""
if free_servers == 0:
return REJECT
if priority == PRIORITIES[-1]:
return ACCEPT
if (np.random.uniform(0,1) > EPSILON) or greedy:
return np.argmax(Qvalue_function.q_table[free_servers,priority])
else:
return np.random.choice(ACTIONS)
def step(free_servers, priority, action):
""" Get information about doing action in state
Returns:
fnew - new number of free servers
cnew - new customer priority
r - reward for performing action in state
"""
f = free_servers
n = NUM_OF_SERVERS
p = priority
r = 0.0
a = action
if(f==0):
a = REJECT
r = 0.0
fnew = f + len(np.argwhere(np.random.rand(n-f)<=PROBABILITY_FREE))
if(a==ACCEPT):
fnew = fnew - 1
r = REWARDS[p]
cnew = priority_client()
return fnew, cnew, r
def priority_client():
""" Get priority of next customer
Customers of priorities {8, 4, 2, 1} arrive with probabilities {0.4, 0.2, 0.2, 0.2},
respectively
"""
return np.random.choice(PRIORITIES,size=1, replace=False, p=[0.2,0.2,0.2,0.4])[0]
def gets0():
""" Get initial state """
if(np.random.uniform(0,1) < 0.5):
return 0, priority_client()
else:
return np.random.choice(NUM_OF_SERVERS+1,size=1, replace=False)[0], priority_client()
def gets0unif():
return np.random.choice(NUM_OF_SERVERS+1,size=1, replace=False)[0], priority_client()
def moving_average(x, w):
return np.convolve(x, np.ones(w), 'valid') / w |
import pygame # import pygame module
pygame.init() # initialize all of pygame's submodules
screen = pygame.display.set_mode((400, 400)) # create a surface called 'screen' and give it a size of 400x400
running = True # used to control while loop
"""Setup for working with text"""
my_font = pygame.font.SysFont("Arial", 48) # choose a font. can be a list of fonts. pygame will fall back on the default font if none of your fonts can be found
text_surface = my_font.render("Hello World", True, (200, 20, 255)) # render your text as a surface
while running: # main loop of program. will repeat until the window is closed
"""Below this line is a for loop for handling the event queue.
The event queue takes every event (user interaction) that happens in the pygame window and
makes it available in order. the variable 'e' will be each event in order.
Anything that involves handling events should generally happen in here"""
for e in pygame.event.get(): # get each event in the event queue...
if e.type == pygame.QUIT: # ...and if that event is QUIT...
running = False # ......set running to False so the main loop ends
"""It's probably worth noting that we're still in the main loop here, but have left the for loop
for the event queue. This will run on repeat, but won't repeat for each event."""
screen.fill((0,0,0)) # fill the display with black to clear it
screen.blit(text_surface, (0, 0)) # display the text on the screen
pygame.display.flip() # update the display |
from django.shortcuts import render
# Create your views here.
def home(request):
context={}
if request.method=='POST':
answer=eval(request.POST["nameC"])
context={"answer": answer}
return render(request, 'index.html', context) |
# -*- coding: utf-8 -*-
"""Top-level package for Log Aggregator Server."""
__author__ = """Trumble0921"""
__email__ = 'joonyeolsim@gmail.com'
__version__ = '0.2.1'
|
# -*- coding:utf8 -*-
from mock import Mock
from collections import defaultdict
try:
# python 2.6
from unittest2 import TestCase, SkipTest
except ImportError:
from unittest import TestCase, SkipTest
from nos import Client
class DummyTransport(object):
def __init__(self, responses=None, **kwargs):
self.responses = responses
self.call_count = 0
self.calls = defaultdict(list)
def perform_request(self, method, bucket='', key='', params={}, body=None,
headers={}, timeout=None):
resp = Mock()
resp.read = Mock(return_value='<a></a>')
h = {
'Last-Modified': 'Fri, 10 Feb 2012 21:34:55 GMT',
'Content-Length':1,
'Content-Range': '0-50'
}
resp = 200, h, resp
if self.responses:
resp = self.responses[self.call_count]
self.call_count += 1
self.calls[(method, bucket, key)].append((params, body, headers, timeout))
return resp
class ClinetTestCase(TestCase):
def setUp(self):
super(ClinetTestCase, self).setUp()
self.client = Client(transport_class=DummyTransport)
def assert_call_count_equals(self, count):
self.assertEquals(count, self.client.transport.call_count)
def assert_url_called(self, method, bucket, key, count=1):
self.assertIn((method, bucket, key), self.client.transport.calls)
calls = self.client.transport.calls[(method, bucket, key)]
self.assertEquals(count, len(calls))
return calls
class TestClinetTestCase(ClinetTestCase):
def test_our_transport_used(self):
self.assertIsInstance(self.client.transport, DummyTransport)
def test_start_with_0_call(self):
self.assert_call_count_equals(0)
def test_each_call_is_recorded(self):
self.client.transport.perform_request('GET')
self.client.transport.perform_request(
'DELETE', 'test', 'object', params={},
body='body', headers={}, timeout=None
)
self.assert_call_count_equals(2)
self.assertEquals([({}, 'body', {}, None)], self.assert_url_called(
'DELETE', 'test', 'object', 1
))
|
# getting input from user and pars it to the integer
your_weight = input("Enter your Weight in kg: ")
print(type(your_weight))
# to parse value of variable, we have to put it in seperate line or put it equal new variable
int_weight_parser = int(your_weight)
print(type(int_weight_parser))
# formatted String
first_name = "pooya"
last_name = "panahandeh"
message = f'mr. {first_name} {last_name}, welcome to the python world.'
print(message)
# print the lenght of the string
print(len(message))
# find special element in string
print(message.find('p'))
# replace string with another one
print(message.replace('python', 'your python'))
# boolean string function to check our string for specific value, the result will be true or false.
print('python' in message) # the result will be true.
|
# Numerical Methods II, Courant Institute, NYU, spring 2018
# http://www.math.nyu.edu/faculty/goodman/teaching/NumericalMethodsII2018/index.html
# written by Jonathan Goodman (instructor)
# see class notes Part 2 for more discussion
# Illustrate Fourier interpolation ...
# ... and how the Python FFT works ...
# ... and some Python vector instructions that run faster than loops.
import numpy as np
import matplotlib.pyplot as plt
# Function to be interpolated
# A symmetric hat function with uf(0) = 1 and u(x)=0 for |x|>r
def u1(x):
return np.cos(x)
r = 1.
ax = np.abs(x)
if ax > r:
return 0.
# if x < -r/2.:
# return 0.
else: # uf(x) = 1-|x|/r if |x| < r
return 1. - (ax / r)
u1name = "continuous"
# The hat function for x>0, but discontinuous jump to zero at x=0
def u2(x):
r = 1.
if x < 0:
return 0.
if x > r:
return 0.
return 1. - x / r
u2name = "discontinuous"
# A gaussian that is (almost) smooth if r << L
def u3(x):
r = .4
sx = x / r
return np.exp(-sx * sx / 2.)
u3name = "smooth_big"
# A gaussian that is (almost) smooth if r << L
def u4(x):
r = .1
sx = x / r
return np.exp(-sx * sx / 2.)
u4name = "smooth_small"
# --------------------------------------------------------------------------------
m = 10 # so n is even
n = 2 * m # number of sample points
L = 4. # length of the physical interval
dx = L / n # for comments only, not used in the code
# Uniformly spaced points in [-L/2,L/2], for periodic functions:
# x[0] = -L/2 and x[n-1] = L/2 - dx.
# Could be done with a scalar loop: xa[j] = (-L/2) + j*dx, ...
# ... but that would be slower, and possibly less clear?
xa = np.linspace(0, 2*np.pi, num=n, endpoint=False) # array of x values
mg = 30
ng = 2 * mg # of points for graphics
xg = np.linspace(0, 2*np.pi, num=ng, endpoint=False) # array of x values, for plotting
# Apply the function u1 (or u2 or u3) to every entry in the array xa
# ... and put the result in the array u.
u = list(map(u1, xa)) # choose functions 1, 2, or 3, AND !!!
ug = list(map(u1, xg))
name = u1name # !!! MUST change the name when you change the function!!!
uh = np.fft.fft(u) # "u hat", the Python "forward" FFT routine, ...
# ... produces a length n complex array
# "roll" from numpy does a circular shift every entry of uh moves left (or right") ...
# ... by m-1 and the ones that fall off are copied onto the other end.
uh = np.roll(uh, m - 1) # put frequency k=0 in the middle of the array
# Evaluate the Fourier interpolant using the inverse FFT. Must pad by zeros
# on the left and right to make a longer vector.
uhp = np.zeros(ng, dtype=complex) # "u hat padded", an array with ...
# ... uh padded by zeros left and right.
# "dtype = complex" makes them complex zeros
js = mg - m # "j start", location for first uDft[0] to go
je = js + n # "j end", there are n entries in uDFT
# Vectorized copy commands give ranges using colons.
# You can find this under "slicing" in the documentation.
uhp[js:je] = uh[:] # Also converts the type of uh to the type of uhp ..
# ... that's why we had to make uhp be complex zeros.
print(uhp)
uhp = np.roll(uhp, -mg + 1) # Circular shift to get ready for the inverse FFT
ugi = (ng / n) * np.fft.ifft(uhp) # The Python inverse FFT has a factor of (1/n) ...
# ... built in. We want (1/n), not (1/ng).
plt.plot(xa, u, "o", xg, np.real(ugi), '.', xg, ug)
titleString = name + ", n = " + str(n)
plt.title(titleString)
fileName = "FourierInterp_" + name + ".pdf"
plt.savefig(fileName)
plt.show()
|
#####################################################################################
# Name : codestrs_final.py #
# Date : Dec 4, 2016 #
# Description : Solution to updating the Flappy Bike game user interface. #
# The hanlde_**_arrow functions allow the player to move the bike #
# when he/she clicks the up, down, left and right arrow keys. #
# #
#####################################################################################
stage.set_background("city")
sprite = codesters.Sprite("bike")
sprite.set_size(0.4)
sprite.go_to(-200, 0)
stage.set_gravity(10)
stage.disable_all_walls()
def handle_up_arrow():
sprite.set_y_speed(4)
stage.event_key("up", handle_up_arrow)
def handle_down_arrow():
sprite.set_y_speed(-4)
stage.event_key("down", handle_down_arrow)
def handle_right_arrow():
sprite.set_x_speed(4)
stage.event_key("right", handle_right_arrow)
def handle_left_arrow():
sprite.set_x_speed(-4)
stage.event_key("left", handle_left_arrow)
def interval():
# sprite = codesters.Rectangle(x, y, width, height, "color")
top_height = random.randint(50, 300)
top_block = codesters.Rectangle(300, 0, 100, top_height, "blue")
top_block.set_gravity_off()
top_block.set_top(250)
bot_height = 350 - top_height
bot_block = codesters.Rectangle(300, 0, 100, bot_height, "blue")
bot_block.set_gravity_off()
bot_block.set_bottom(-250)
top_block.set_x_speed(-2)
bot_block.set_x_speed(-2)
# add any other actions...
stage.event_interval(interval, 5)
def collision(sprite, hit_sprite):
sprite.go_to(0,0)
text = codesters.Text("Game Over!", 0, 100, "yellow")
sprite.event_collision(collision)
|
import sys
from solution import Solution
# from classes import ?
class TestSuite:
def run(self):
self.test001()
def test001(self):
print "test 001"
s = " the sky is blue "
r = Solution().reverseWords(s)
print " input:\t", s
print " expect:\t", "blue is sky the"
print " output:\t", r
print
def main(argv):
TestSuite().run()
if __name__ == '__main__':
main(sys.argv)
|
from odoo import models, fields, api
class accouting_customer(models.Model):
_inherit = 'hr.expense.sheet'
# def _check_user_ap(self):
# user_id = self.env['res.users'].browse(self._uid)
# if user_id:
# if user_id.login == 'ap_manager':
# return True
# else:
# return False
# def _check_user_ar(self):
# user_id = self.env['res.users'].browse(self._uid)
# if user_id:
# if user_id.login == 'ar_manager':
# return True
# else:
# return False
# @api.model
# def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
# res = super(accouting_customer, self).fields_view_get(view_id, view_type, toolbar=toolbar, submenu=False)
# hr_sheet = self.env.ref('hr_expense.action_hr_expense_sheet_my_all').id
# if self._context.get('params') and self._context.get('params').get('action') and hr_sheet == self._context.get('params').get('action'):
# if view_type == 'form':
# # if 'arch' in res:
# # data = res.get('arch').split('\n')
# # modify_edit_str = 'edit="0" create="0" copy="0" delete="0"'
# #
# # arch_data = '<form %s>' % (modify_edit_str)
# # for n in range(1, len(data)):
# # arch_data += '\n%s' % (data[n])
# # res['arch'] = arch_data
# return res
#
# else:
# return res |
from api.base_model import db
class Business(db.Model):
__tablename__ = 'business'
id = db.Column(db.Integer, primary_key=True)
businessname = db.Column(db.String(50), unique=True)
description = db.Column(db.String(50), nullable=False)
category = db.Column(db.String(50), nullable=False)
location = db.Column(db.String(50), nullable=False)
owner_id = db.Column(db.Integer, db.ForeignKey('users.id'))
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(db.DateTime,
default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
reviews = db.relationship('Reviews', backref='business', lazy='dynamic')
def obj(self):
return {
'id': self.id,
'businessname': self.businessname,
'description': self.description,
'category': self.category,
'location': self.location,
'owner': self.owner.username,
'created_at': self.date_created,
'updated_at': self.date_modified
}
class Reviews(db.Model):
__tablename__ = 'reviews'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(50), nullable=False)
review = db.Column(db.String(50), nullable=False)
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
owner_id = db.Column(db.Integer, db.ForeignKey('users.id'))
business_id = db.Column(db.Integer, db.ForeignKey('business.id'))
date_modified = db.Column(db.DateTime,
default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
|
# queue.py
# by James Fulford
# for Joyful Love
# implements needed functions from Buffer API
import utilities
from utilities import get_when
from utilities import dtformat
import buffpy
class Queue(utilities.SaveLoad):
"""
Represents a single buffer.
.name() returns a pretty string
Queue.name(profile) returns pretty string for profile too
.pending_updates() returns list of updates in Buffer and Reserve
.get_buffer() returns list of updates in Buffer
.add(update) tries to add update to Reserve
.sync() sends reserved updates to
"""
def __init__(self, profile, path=None):
self._prof = profile
part = path
if not path:
part = Queue.profile_name(profile)
try:
self.reserve = utilities.load(part)["reserve"]
except IOError:
self.reserve = []
def save(self, fp=None):
from json import dump
part = ""
if fp:
part = fp + "/"
with open(part + self.name(), "wb") as phile:
dump(self.dictionary(), phile, indent=4, sort_keys=True)
def dictionary(self):
return {
"reserve": sorted(self.reserve, key=get_when)
}
def name(self):
return Queue.profile_name(self._prof)
@staticmethod
def profile_name(prof):
return prof["formatted_service"] + " " + prof["formatted_username"]
__str__ = name
def pending_updates(self):
"""
Returns list of posts in Buffer and in reserve
"""
upd = self.get_buffer()
upd.extend(self.reserve)
return sorted(upd, key=get_when)
def get_buffer(self):
"""
Returns list of posts in Buffer
"""
return self._prof.updates.pending
def add(self, update):
"""
Adds updates to reserve.
"""
import buffpy
upds = self.pending_updates()
if get_when(update) not in map(get_when, upds):
self.reserve.append(update)
else:
msg = "Already a message scheduled for " + get_when(update).strftime(dtformat())
exp = Exception(msg)
exp.error = {
"update": update,
"queue": self,
}
raise exp
def sync(self):
"""
Sends reserved updates to Buffer, stores the rest back in reserve
Returns time of the latest post
Order matters, so do not do major multithreading if it can be avoided
"""
upds = self.reserve
self.reserve = []
upds.sort(key=get_when)
while len(self.get_buffer()) < 10 and len(upds) > 0:
update = upds.pop(0)
try:
self.post(update)
except buffpy.exceptions.BuffpyRestException:
print(self.name() + " did not post: " + get_when(update).strftime(dtformat()))
self.reserve.extend(upds)
def next_time(self):
to_consider = []
buff = self.get_buffer()
if len(buff) > 0:
to_consider.append(max(map(get_when, buff)))
if len(self.reserve) > 0:
to_consider.append(min(map(get_when, self.reserve)))
if len(to_consider) > 0:
return min(to_consider)
return None
def post(self, update):
"""
Adds update to profile's buffer.
TODO: Allow posting by sending email to secret buffer address
"""
content = ""
if "content" in update.keys():
content = update["content"]
if "text" in update.keys():
content = update["text"]
if "timezone" not in update.keys():
update["timezone"] = self._prof["timezone"]
def post_no_media(update):
if "post_at" in update.keys():
return self._prof.updates.new(content, when=str(get_when(update)))
else:
return self._prof.updates.new(content, now=True)
def post_with_media(update):
media = update["media"]
if "post_at" in update.keys():
when = str(get_when(update))
return self._prof.updates.new(content, media=media, when=when)
else:
return self._prof.updates.new(content, media=media, now=True)
def post_link(update):
# assert url_exists(media["link"])
# remove picture or thumbnail if url is bad
if "picture" in media.keys():
# if not url_exists(media["picture"]):
del media["picture"]
if "thumbnail" in media.keys():
# if not url_exists(media["thumbnail"]):
del media["thumbnail"]
post_with_media(update)
def url_exists(url):
import httplib
conn = httplib.HTTPConnection(url)
conn.request("HEAD", "/index.html")
res = conn.getresponse()
return res.status < 400
# now to post
try:
assert "media" in update.keys()
# if there is not media, posts without media.
media = update["media"]
# if media is a photo
if "photo" in media.keys():
import os
try:
# if photo url fails, post link
# assert url_exists(media["photo"])
# post photo
post_with_media(update)
except AssertionError:
del media["photo"]
# if post_link fails, posts without media
post_link(update)
else:
# if there isn't a photo,
# post link instead.
# if post_link fails, posts without media
post_link(update)
except AssertionError:
# posts without media
post_no_media(update)
|
import random
from django import template
from django.utils.safestring import mark_safe
from common.data.greetings import DUMB_GREETINGS
from common.markdown.markdown import markdown_email
register = template.Library()
@register.filter(is_safe=True)
def email_markdown(text):
return mark_safe(markdown_email(text))
@register.simple_tag(takes_context=True)
def render_email(context, post_or_comment):
return mark_safe(markdown_email(post_or_comment.text))
@register.simple_tag()
def random_greeting():
return random.choice(DUMB_GREETINGS)
|
import os
import pathlib
import pandas as pd
def compute_speedup_over_rocksdb(results):
"""
Given a dataframe with the raw results (LLSM and RocksDB), removes all
RocksDB rows and replaces them with a new column "speedup_over_rocksdb".
"""
llsm = results[results["db"] == "llsm"]
rocksdb = results[results["db"] == "rocksdb"][
["benchmark_name", "mops_per_s"]
]
# Duplicate the RocksDB results and append "_defer" to each benchmark's
# name. This helps us compute LLSM speedups when deferral is used.
rocksdb_defer = rocksdb.copy(deep=True)
rocksdb_defer["benchmark_name"] = (
rocksdb_defer["benchmark_name"].apply(lambda name: name + "_defer")
)
rocksdb = pd.concat([rocksdb, rocksdb_defer])
joined = pd.merge(
llsm,
rocksdb,
on="benchmark_name",
how="left",
suffixes=["_llsm", "_rocksdb"],
)
joined["speedup_over_rocksdb"] = (
joined["mops_per_s_llsm"] / joined["mops_per_s_rocksdb"]
)
relevant = joined[[
"benchmark_name",
"mops_per_s_llsm",
"mib_per_s",
"speedup_over_rocksdb",
]]
return relevant.rename(columns={"mops_per_s_llsm": "mops_per_s"})
def main():
"""
This script combines the automated experiment results into one csv file,
storing each result's name and throughput in both Mops/s and MiB/s.
This script is meant to be executed by Conductor.
"""
results_dirs = list(map(pathlib.Path, os.environ["COND_DEPS"].split(":")))
if any(map(lambda path: not path.is_dir(), results_dirs)):
raise RuntimeError("Cannot find results!")
output_dir = pathlib.Path(os.environ["COND_OUT"])
if not output_dir.is_dir():
raise RuntimeError("Output directory does not exist!")
all_results = []
for results_dir in results_dirs:
for result_file in results_dir.iterdir():
if result_file.suffix != ".csv":
continue
all_results.append(pd.read_csv(result_file))
combined = pd.concat(all_results)
combined.sort_values(["benchmark_name", "db"], inplace=True)
combined.to_csv(output_dir / "raw_results.csv", index=False)
results = compute_speedup_over_rocksdb(combined)
results.to_csv(output_dir/ "results.csv", index=False)
if __name__ == "__main__":
main()
|
L = ['Michael', 'Sarah', 'Tracy', 'Bob', 'Jack']
print(L[0:3])
#如果第一个索引是0可以省略
print(L[:3])
print(L[-2:])
print(L[-2:-1])
L = list(range(100))
print(L[:10])
print(L[-10:])
print(L[10:20])
print(L[:10:2])#前十个,每两个取一个
print(L[::5])#所有数每五个取一个
#tuple也是一种list,唯一区别是tuple不可变。因此,tuple也可以用切片操作,只是操作的结果仍是tuple
t = (0,1,2,3,4,5,6,7,8,9)
print(t[:3])
#字符串'xxx'也可以看成是一种list,每个元素就是一个字符。因此,字符串也可以用切片操作,只是操作结果仍是字符串
print('ABCDEFG'[:3])
list(range(1, 11))
#列表生成式
print([x*x for x in range(1,11)])
print([x * x for x in range(1, 11) if x % 2 == 0])
#生成全排列
print([m + n for m in 'ABC' for n in 'XYZ'])
import os
print([d for d in os.listdir('.')])# os.listdir可以列出文件和目录
d = {'x': 'A', 'y': 'B', 'z': 'C' }
for k, v in d.items():
print(k, '=', v)
for i in d:
print(i)
print([k + '=' + v for k, v in d.items()])
L = ['Hello', 'World', 'IBM', 'Apple']
print([x.lower() for x in L])
#测试
L1 = ['Hello', 'World', 18, 'Apple', None]
L2 = [x.lower() for x in L1 if isinstance(x, str)]
print(L2)
|
# -*- coding: utf-8 -*-
# Copyright 2009-2019 Yelp and Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for YarnEMRJobRunner."""
import os
import boto3
from mrjob.yarnemr import YarnEMRJobRunner
from mrjob.step import StepFailedException
from tests.mock_boto3 import MockBoto3TestCase
from tests.mr_null_spark import MRNullSpark
from tests.py2 import Mock
from tests.py2 import patch
from tests.py2 import PY2
from tests.sandbox import mrjob_conf_patcher
class YarnEMRJobRunnerTestBase(MockBoto3TestCase):
def setUp(self):
super(YarnEMRJobRunnerTestBase, self).setUp()
self.client = boto3.client('emr')
self.start(patch(
'mrjob.bin.MRJobBinRunner._spark_submit_args',
return_value=['<spark submit args>']))
def _set_in_mrjob_conf(self, **kwargs):
emr_opts = {'runners': {'yarnemr': {}}}
emr_opts['runners']['yarnemr'].update(kwargs)
patcher = mrjob_conf_patcher(emr_opts)
patcher.start()
self.addCleanup(patcher.stop)
def _default_mrjob_setup(self, **kwargs):
cluster_cache = os.path.join(self.tmp_dir, 'cache')
self._set_in_mrjob_conf(
# yarn runner required
ec2_key_pair_file='meh',
expected_cores=1,
expected_memory=10,
yarn_logs_output_base=self.tmp_dir,
# others
cluster_cache_file=cluster_cache,
pool_clusters=True,
**kwargs
)
class YarnEMRJobRunnerEndToEndTestCase(YarnEMRJobRunnerTestBase):
def test_end_to_end(self):
self._default_mrjob_setup()
input1 = os.path.join(self.tmp_dir, 'input1')
open(input1, 'w').close()
input2 = os.path.join(self.tmp_dir, 'input2')
open(input2, 'w').close()
job = MRNullSpark(['-r', 'yarnemr', input1, input2])
job.sandbox()
with job.make_runner() as runner:
# skip waiting
runner._wait_for_cluster = Mock()
# mock ssh
runner.fs.ssh._ssh_run = Mock()
mock_stderr = b'whooo stderr Submitting application application_1'\
b'550537538614_0001 to ResourceManager stderr logs'
runner.fs.ssh._ssh_run.return_value = ('meh stdout', mock_stderr)
runner._get_application_info = Mock()
runner._get_application_info.side_effect = [
{
'state': 'NOT FINISHED',
'finalStatus': 'NOT SUCCESS',
'elapsedTime': 25.12345,
'progress': 5000
},
{
'state': 'FINISHED',
'finalStatus': 'SUCCEEDED',
'elapsedTime': 50.12345,
'progress': 10000
},
]
runner.run()
class YarnEMRJobRunnerClusterLaunchTestCase(YarnEMRJobRunnerTestBase):
def _create_cluster(self, *args, **kwargs):
return self.client.run_job_flow(
Instances=dict(
InstanceCount=1,
KeepJobFlowAliveWhenNoSteps=True,
MasterInstanceType='m1.medium',
),
JobFlowRole='fake-instance-profile',
Name='Development Cluster',
ReleaseLabel='emr-5.0.0',
ServiceRole='fake-service-role',
Tags=[
{'Key': '__mrjob_pool_name',
'Value': 'default'},
{'Key': '__mrjob_pool_hash',
'Value': '07b8041374af73b32d93aa6e87213ddf' if PY2 else
'a791edd20463b0e10558f9d3884f5b59'}
]
)['JobFlowId']
def _setup_mocked_runner(self, setup_ret_val, state_ret_val):
# create yarn runner
runner = YarnEMRJobRunner()
# don't try to create/wait for a cluster
runner._create_cluster = Mock()
runner._create_cluster.side_effect = self._create_cluster
runner._wait_for_cluster = Mock()
# mock out stuff that is run after cluster management we
# don't care about
runner._address_of_master = Mock()
runner.get_image_version = Mock()
runner.get_image_version.return_value = '5.0.0'
runner._execute_job = Mock()
# mock out setup and state methods to always return true
runner._compare_cluster_setup = Mock()
runner._compare_cluster_setup.return_value = setup_ret_val
runner._check_cluster_state = Mock()
runner._check_cluster_state.return_value = \
5 if state_ret_val else -1
# return it
runner._prepare_for_launch()
return runner
def test_valid_cluster_find(self):
self._default_mrjob_setup()
# create clusters and manually set them to WAITING
cluster_ids = []
for _ in range(2):
cluster_id = self._create_cluster()
self.mock_emr_clusters[cluster_id]['Status']['State'] = 'WAITING'
cluster_ids.append(cluster_id)
# mark all clusters as valid
runner = self._setup_mocked_runner(True, True)
# launch the job
runner._launch_yarn_emr_job()
# ensure we found and used this valid cluster
self.assertIn(runner._cluster_id, cluster_ids)
self.assertFalse(runner._created_cluster)
def test_invalid_cluster_find(self):
self._default_mrjob_setup()
# create clusters and manually set them to WAITING
cluster_ids = []
for _ in range(2):
cluster_id = self._create_cluster()
self.mock_emr_clusters[cluster_id]['Status']['State'] = 'WAITING'
cluster_ids.append(cluster_id)
# mark all clusters as invalid
runner = self._setup_mocked_runner(False, True)
# launch the job
runner._launch_yarn_emr_job()
# ensure we created a new cluster
self.assertTrue(runner._created_cluster)
def test_under_max_cluster_limit(self):
self._default_mrjob_setup(max_pool_cluster_count=3)
# create clusters and manually set them to WAITING
cluster_ids = []
for _ in range(2):
cluster_id = self._create_cluster()
self.mock_emr_clusters[cluster_id]['Status']['State'] = 'WAITING'
cluster_ids.append(cluster_id)
# mark all clusters as valid but in an invalid state
runner = self._setup_mocked_runner(True, False)
# launch the job
runner._launch_yarn_emr_job()
# ensure we created a new cluster
self.assertTrue(runner._created_cluster)
def test_over_max_cluster_limit(self):
self._default_mrjob_setup(max_pool_cluster_count=2)
# create clusters and manually set them to WAITING
cluster_ids = []
for _ in range(2):
cluster_id = self._create_cluster()
self.mock_emr_clusters[cluster_id]['Status']['State'] = 'WAITING'
cluster_ids.append(cluster_id)
# mark all clusters as valid but in an invalid state
runner = self._setup_mocked_runner(True, False)
for cluster in cluster_ids:
tags = self.mock_emr_clusters[cluster]['Tags']
for tag in tags:
if tag['Key'] == '__mrjob_pool_hash':
tag['Value'] = runner._pool_hash()
# launch the job and ensure we hit an exception
with self.assertRaises(StepFailedException):
runner._launch_yarn_emr_job()
# ensure we didn't creat a new cluster
self.assertFalse(runner._created_cluster)
def test_retry_max_cluster_limit(self):
self._default_mrjob_setup(max_pool_cluster_count=2)
# create clusters and manually set them to WAITING
cluster_ids = []
for _ in range(2):
cluster_id = self._create_cluster()
self.mock_emr_clusters[cluster_id]['Status']['State'] = 'WAITING'
cluster_ids.append(cluster_id)
# mark all clusters as valid but in an invalid state
runner = self._setup_mocked_runner(True, False)
# return a valid cluster on the second iterations
runner._check_cluster_state.side_effect = [False]*2 + [True]*2
# launch the job and ensure we hit an exception
runner._launch_yarn_emr_job()
# ensure we didn't creat a new cluster
self.assertFalse(runner._created_cluster)
|
import os
import scipy.io.wavfile as wav
from python_speech_features import mfcc
import numpy as np
input_dir = r"C:\Users\a7825\Desktop\工作空间\语音数据\ogvc_16\M2\A\wav_16"
output_dir = r"C:\Users\a7825\Desktop\工作空间\语音数据\ogvc_16\M2\A\mfcc_13"
if __name__ == "__main__":
for ad_file in os.listdir(input_dir):
fs, audio = wav.read(input_dir+"/"+ad_file)
feature_mfcc = mfcc(audio, samplerate=fs)
print(feature_mfcc.shape)
np.savetxt(output_dir+"/"+ad_file+".csv", feature_mfcc, delimiter = ',')
#拼接字符串,把单引号改成双引号居然好使 |
from django.shortcuts import render
import pyautogui, sys
import pyscreeze
import os
import os, sys, stat
import shutil
from pathlib import Path
class Desktop(object):
def __init__(self):
self.switch = False
self.username = username = os.getlogin()
#print(os.path.join(os.environ["HOMEPATH"], "Desktop"))
self.path = os.path.join(os.environ["HOMEPATH"], "Desktop")
#self.test_and_create_files()
self.lst = ["programs", "gifs", "pictures", "videos", "projects", "files", "songs", "shortcuts"]
def switchh(self, switch1):
try:
int(switch1)
print("Please type in yes or no")
except:
switch_lower = switch1.lower()
if switch_lower not in ["yes", "no"]:
print("Please type in yes or no")
else:
if switch_lower == "yes":
self.switch = True
else:
self.switch = False
def test_and_create_files(self):
desk = list(os.listdir(rf"{self.path}"))
lower_desk = []
os.chdir(self.path)
#print(desk)
if self.switch == False:
print("Desktop is off! Use Switch First")
else:
for a in desk:
for b in self.lst:
if a.lower() == b:
os.chmod(f"{a}", stat.S_IWRITE)
os.rename(f"{a}", f"{a.lower()}")
else:
continue
for b in self.lst:
if b not in desk:
try:
os.mkdir(f"{b}")
except:
pass
else:
continue
class Folder(object):
def __init__(self, name):
self.name = name
extensions = []
desktop_obj = Desktop()
desktop_obj_path = desktop_obj.path
self.path = rf"{desktop_obj_path}\\{name}"
self.content = self.list_content()
def list_content(self):
content = list(os.listdir(self.path))
return content
class Videos(Folder):
pass
############################### code this class later
class Sub_Folder(Folder):
pass
############################## code this class later
# Create your views here.
def index(request):
return render(request, "desk/button.html")
def action(request):
folder_obj = Folder("videos")
print(folder_obj.content)
desk_obj = Desktop()
desk_obj.switchh("yes")
desk_obj.test_and_create_files()
print(os.path.join(os.environ["HOMEPATH"], "Desktop"))
return render(request, "desk/button.html")
|
# Population Class | Genetic Algorithm
from random import randint
from random import choice
class Population:
class Member:
def __init__(self, lifespan=50, write_dna=True):
self.lifespan = lifespan
self.fitness = 0
self.directions = ["U", "D", "L", "R"]
if write_dna:
self.DNA = [choice(self.directions) for x in range(self.lifespan)]
else:
self.DNA = []
def __eq__(self, other):
return self.fitness == other
def __ne__(self, other):
return self.fitness != other
def __lt__(self, other):
return self.fitness < other
def __gt__(self, other):
return self.fitness > other
def get_fitness(self, feedback):
pass
def mutate(self, mutation_rate):
for i in range(len(self.DNA)):
chance = randint(0, 100)/100
if chance < mutation_rate:
self.DNA[i] = choice(self.directions)
def __init__(self, spawn, pop_size=10, mutation_rate=0.1):
self.spawn = spawn # (x, y) tuple, np format
self.population_size = pop_size
self.mutation_rate = mutation_rate
self.members = [self.Member() for x in range(self.population_size)]
def batch_fitness(self):
for x in self.members:
x.get_fitness() # TODO
def batch_mutate(self):
for x in self.members:
x.mutate(self.mutation_rate)
def crossover(self):
new_pop = []
parents = self.selection()
for x in range(self.population_size//2):
p1 = choice(parents)
p2 = choice(parents)
child1 = self.Member(lifespan=p1.lifespan, write_dna=False)
child2 = self.Member(lifespan=p1.lifespan, write_dna=False)
pos = randint(0, p1.lifespan) # crossover point
child1.DNA = p1.DNA[:pos] + p2.DNA[pos:]
child2.DNA = p2.DNA[:pos] + p1.DNA[pos:]
new_pop.append(child1)
new_pop.append(child2)
self.members = new_pop
def selection(self):
self.members.sort(reverse=True)
return self.members[0:self.population_size//2]
|
#!/usr/bin/env python
#_*_ coding:utf-8 _*_
'''
Created on 2018年3月23日
@author: yangxu
'''
from django.core.paginator import Paginator
def page(obj,pagenum,datarow):
pagelist = []
pagedict = {'pagecount':None,'pagenum':None,'itemcount':None,'pagedata':None}
for item in obj:
pagelist.append(item)
p = Paginator(pagelist,datarow)
itemcount = p.count
pagecount = p.num_pages
pagedata = p.page(pagenum).object_list
pagedict['pagecount']= pagecount
pagedict['pagenum'] = pagenum
pagedict['itemcount'] = itemcount
pagedict['pagedata'] = pagedata
return pagedict |
from functions import *
from exerciseData import *
from personalData import *
from email_functions import *
from exerciseList import *
from email_functions import *
import os
import sys
import subprocess
from functools import reduce
curWeek, weekSets = findWeekSets()
from exerciseData import warmups
# make a function for each workout
with open('tempFile.md', 'w') as writefile:
writeline = lambda line : writefile.write(line + '\n')
for setNum, weekSet in enumerate(weekSets, 1):
program = getProgram(weekSet)
writeline("## " + curWeek + str(setNum))
writeline(reduce(lambda x, y : x + "|" + y.name, program, "") + "|")
writeline("|" + ":---:|" * len(program))
longestExerciseLen = max([len(exercise.sets) for exercise in program])
for index in range(0, longestExerciseLen):
exerciseList = []
for exercise in program:
if index < len(exercise.sets):
exerciseList.append(formatSet(exercise.sets[index]))
else:
exerciseList.append('')
writeline('|' + '|'.join(exerciseList) + '|')
writeline('')
writefile.close()
subprocess.call('pandoc -s -o weeklyworkout.pdf tempFile.md'.split(' '))
fileencoding = sys.getfilesystemencoding()
user_email_password=os.environ["user_email_password"]
user_email=os.environ["user_email"]
user_name = os.environ["user_name"]
emailFile(user_name, user_email, user_email_password, "weeklyworkout.pdf", "Weekly Workout")
subprocess.call('rm tempFile.md weeklyworkout.pdf'.split(' '))
|
#coding:utf-8
from flask import request, jsonify
from app.models.clients import Usuario
from app import app
from app import db
from app.controllers.clients import ControllerClients
@app.route('/usuario/cadastrar', methods=['POST'])
def cadastrar():
try:
data = request.get_json()
controller = ControllerClients()
controller.cadastrar(data['nome'], data['cpf'], data['celular'], data['email'], data['senha'])
user_id = Usuario.query.order_by('-id').first().id
return jsonify({"status": "ok", "msg": "Usuário cadastrado com sucesso!", "extra": user_id})
except Exception as error:
return jsonify({"status": "erro", "msg": error.message})
@app.route('/usuarios', methods=['GET'])
def listar():
usuarios = Usuario.query.all()
usuarios_all = []
for usuario in usuarios:
usuarios_all.append({"id":usuario.id, "nome": usuario.nome, "cpf": usuario.cpf, "celular": usuario.celular, "email": usuario.email, "senha": usuario.senha})
return jsonify(usuarios_all)
@app.route('/usuario/alterar/<int:user_id>', methods=['PUT'])
def alterar(user_id):
try:
data = request.get_json()
controller = ControllerClients()
controller.alterar(user_id, data['nome'], data['cpf'], data['celular'], data['email'], data['senha'])
return jsonify({"status": "ok", "msg": "Usuário alterado com sucesso!"})
except Exception as error:
return jsonify({"status": "erro", "msg": error.message})
@app.route('/usuario/remover/<int:user_id>', methods=['DELETE'])
def remover(user_id):
try:
data = request.get_json()
controller = ControllerClients()
controller.remover(user_id)
return jsonify({"status": "ok", "msg": "Usuário removido com sucesso!"})
except Exception as error:
return jsonify({"status": "erro", "msg": error.message})
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 23 21:02:12 2017
@author: enriqueareyan
"""
import basic_plots
import marketmaker_plots
import deviation_graphs
import markov_chain
for number_of_games in [100, 200, 400]:
print('[Plotting for number of games = ', number_of_games, ']')
basic_plots.produce_all_agents_plots(number_of_games)
marketmaker_plots.produce_all_market_maker_plots(number_of_games)
deviation_graphs.plot_all_deviation_graphs(number_of_games)
deviation_graphs.plot_all_proportion_pure_nash(number_of_games)
markov_chain.plot_all_soft_deviation_graph(number_of_games)
markov_chain.plot_all_soft_expected_agents_pure_nash(number_of_games)
|
import asyncio
async def tick():
print('Tick')
await asyncio.sleep(1)
print('Tock')
async def main():
await asyncio.gather(tick(), tick(), tick())
for taks in asyncio.all_tasks():
print(taks, end='\n')
if __name__ == '__main__':
coroutine = main()
# print(coroutine)
# asyncio.run(main())
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
# loop.create_task(main())
# loop.run_forever()
print('coroutine has finshed')
except KeyboardInterrupt:
print('Manually closed')
finally:
loop.close()
print('loop is closed')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Validates and parses SPF amd DMARC DNS records"""
import logging
from collections import OrderedDict
from re import compile, IGNORECASE
import json
from csv import DictWriter
from argparse import ArgumentParser
import os
from time import sleep
import socket
import smtplib
import tempfile
import platform
import shutil
import atexit
from ssl import SSLError, CertificateError, create_default_context
from io import StringIO
from expiringdict import ExpiringDict
import publicsuffixlist
import dns
import dns.resolver
import dns.exception
import timeout_decorator
from pyleri import (Grammar,
Regex,
Sequence,
List,
Repeat
)
import ipaddress
"""Copyright 2019 Sean Whalen
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
__version__ = "4.7.0"
DMARC_VERSION_REGEX_STRING = r"v *= *DMARC1;"
BIMI_VERSION_REGEX_STRING = r"v=BIMI1;"
DMARC_TAG_VALUE_REGEX_STRING = r"([a-z]{1,5}) *= *([\w.:@/+!,_\- ]+)"
BIMI_TAG_VALUE_REGEX_STRING = r"([a-z]{1}) *= *(.*)"
MAILTO_REGEX_STRING = r"^(mailto):" \
r"([\w\-!#$%&'*+-/=?^_`{|}~]" \
r"[\w\-.!#$%&'*+-/=?^_`{|}~]*@[\w\-.]+)(!\w+)?"
SPF_VERSION_TAG_REGEX_STRING = "v=spf1"
SPF_MECHANISM_REGEX_STRING = r"([+\-~?])?(mx|ip4|ip6|exists|include|all|a|" \
r"redirect|exp|ptr)[:=]?([\w+/_.:\-{%}]*)"
AFTER_ALL_REGEX_STRING = "all .*"
DMARC_TAG_VALUE_REGEX = compile(DMARC_TAG_VALUE_REGEX_STRING, IGNORECASE)
BIMI_TAG_VALUE_REGEX = compile(BIMI_TAG_VALUE_REGEX_STRING, IGNORECASE)
MAILTO_REGEX = compile(MAILTO_REGEX_STRING, IGNORECASE)
SPF_MECHANISM_REGEX = compile(SPF_MECHANISM_REGEX_STRING, IGNORECASE)
AFTER_ALL_REGEX = compile(AFTER_ALL_REGEX_STRING, IGNORECASE)
USER_AGENT = "Mozilla/5.0 (({0} {1})) parsedmarc/{2}".format(
platform.system(),
platform.release(),
__version__
)
DNS_CACHE = ExpiringDict(max_len=200000, max_age_seconds=1800)
TLS_CACHE = ExpiringDict(max_len=200000, max_age_seconds=1800)
STARTTLS_CACHE = ExpiringDict(max_len=200000, max_age_seconds=1800)
TMPDIR = tempfile.mkdtemp()
def _cleanup():
"""Remove temporary files"""
shutil.rmtree(TMPDIR)
atexit.register(_cleanup)
class SMTPError(Exception):
"""Raised when SMTP error occurs"""
class SPFError(Exception):
"""Raised when a fatal SPF error occurs"""
def __init__(self, msg, data=None):
"""
Args:
msg (str): The error message
data (dict): A dictionary of data to include in the output
"""
self.data = data
Exception.__init__(self, msg)
class _SPFWarning(Exception):
"""Raised when a non-fatal SPF error occurs"""
class _SPFMissingRecords(_SPFWarning):
"""Raised when a mechanism in a ``SPF`` record is missing the requested
A/AAAA or MX records"""
class _SPFDuplicateInclude(_SPFWarning):
"""Raised when a duplicate SPF include is found"""
class _DMARCWarning(Exception):
"""Raised when a non-fatal DMARC error occurs"""
class _BIMIWarning(Exception):
"""Raised when a non-fatal BIMI error occurs"""
class _DMARCBestPracticeWarning(_DMARCWarning):
"""Raised when a DMARC record does not follow a best practice"""
class DNSException(Exception):
"""Raised when a general DNS error occurs"""
def __init__(self, error):
if isinstance(error, dns.exception.Timeout):
error.kwargs["timeout"] = round(error.kwargs["timeout"], 1)
class DNSExceptionNXDOMAIN(DNSException):
"""Raised when a NXDOMAIN DNS error (RCODE:3) occurs"""
class DMARCError(Exception):
"""Raised when a fatal DMARC error occurs"""
def __init__(self, msg, data=None):
"""
Args:
msg (str): The error message
data (dict): A dictionary of data to include in the results
"""
self.data = data
Exception.__init__(self, msg)
class SPFRecordNotFound(SPFError):
"""Raised when an SPF record could not be found"""
def __init__(self, error):
if isinstance(error, dns.exception.Timeout):
error.kwargs["timeout"] = round(error.kwargs["timeout"], 1)
class MultipleSPFRTXTRecords(SPFError):
"""Raised when multiple TXT spf1 records are found"""
class SPFSyntaxError(SPFError):
"""Raised when an SPF syntax error is found"""
class SPFTooManyDNSLookups(SPFError):
"""Raised when an SPF record requires too many DNS lookups (10 max)"""
def __init__(self, *args, **kwargs):
data = dict(dns_lookups=kwargs["dns_lookups"])
SPFError.__init__(self, args[0], data=data)
class SPFTooManyVoidDNSLookups(SPFError):
"""Raised when an SPF record requires too many void DNS lookups (2 max)"""
def __init__(self, *args, **kwargs):
data = dict(dns_void_lookups=kwargs["dns_void_lookups"])
SPFError.__init__(self, args[0], data=data)
class SPFRedirectLoop(SPFError):
"""Raised when an SPF redirect loop is detected"""
class SPFIncludeLoop(SPFError):
"""Raised when an SPF include loop is detected"""
class DMARCRecordNotFound(DMARCError):
def __init__(self, error):
"""
Raised when a DMARC record could not be found
"""
if isinstance(error, dns.exception.Timeout):
error.kwargs["timeout"] = round(error.kwargs["timeout"], 1)
class DMARCSyntaxError(DMARCError):
"""Raised when a DMARC syntax error is found"""
class InvalidDMARCTag(DMARCSyntaxError):
"""Raised when an invalid DMARC tag is found"""
class InvalidDMARCTagValue(DMARCSyntaxError):
"""Raised when an invalid DMARC tag value is found"""
class InvalidDMARCReportURI(InvalidDMARCTagValue):
"""Raised when an invalid DMARC reporting URI is found"""
class UnrelatedTXTRecordFoundAtDMARC(DMARCError):
"""Raised when a TXT record unrelated to DMARC is found"""
class SPFRecordFoundWhereDMARCRecordShouldBe(UnrelatedTXTRecordFoundAtDMARC):
"""Raised when an SPF record is found where a DMARC record should be;
most likely, the ``_dmarc`` subdomain
record does not actually exist, and the request for ``TXT`` records was
redirected to the base domain"""
class DMARCRecordInWrongLocation(DMARCError):
"""Raised when a DMARC record is found at the root of a domain"""
class DMARCReportEmailAddressMissingMXRecords(_DMARCWarning):
"""Raised when an email address in a DMARC report URI is missing MX
records"""
class UnverifiedDMARCURIDestination(_DMARCWarning):
"""Raised when the destination of a DMARC report URI does not indicate
that it accepts reports for the domain"""
class MultipleDMARCRecords(DMARCError):
"""Raised when multiple DMARC records are found, in violation of
RFC 7486, section 6.6.3"""
class BIMIError(Exception):
"""Raised when a fatal BIMI error occurs"""
def __init__(self, msg, data=None):
"""
Args:
msg (str): The error message
data (dict): A dictionary of data to include in the results
"""
self.data = data
Exception.__init__(self, msg)
class BIMIRecordNotFound(BIMIError):
"""Raised when a BIMI record could not be found"""
def __init__(self, error):
if isinstance(error, dns.exception.Timeout):
error.kwargs["timeout"] = round(error.kwargs["timeout"], 1)
class BIMISyntaxError(BIMIError):
"""Raised when a BIMI syntax error is found"""
class InvalidBIMITag(BIMISyntaxError):
"""Raised when an invalid BIMI tag is found"""
class InvalidBIMITagValue(BIMISyntaxError):
"""Raised when an invalid BIMI tag value is found"""
class InvalidBIMIIndicatorURI(InvalidBIMITagValue):
"""Raised when an invalid BIMI indicator URI is found"""
class UnrelatedTXTRecordFoundAtBIMI(BIMIError):
"""Raised when a TXT record unrelated to BIMI is found"""
class SPFRecordFoundWhereBIMIRecordShouldBe(UnrelatedTXTRecordFoundAtBIMI):
"""Raised when an SPF record is found where a BIMI record should be;
most likely, the ``selector_bimi`` subdomain
record does not actually exist, and the request for ``TXT`` records was
redirected to the base domain"""
class BIMIRecordInWrongLocation(BIMIError):
"""Raised when a BIMI record is found at the root of a domain"""
class MultipleBIMIRecords(BIMIError):
"""Raised when multiple BIMI records are found"""
class _SPFGrammar(Grammar):
"""Defines Pyleri grammar for SPF records"""
version_tag = Regex(SPF_VERSION_TAG_REGEX_STRING)
mechanism = Regex(SPF_MECHANISM_REGEX_STRING, IGNORECASE)
START = Sequence(version_tag, Repeat(mechanism))
class _DMARCGrammar(Grammar):
"""Defines Pyleri grammar for DMARC records"""
version_tag = Regex(DMARC_VERSION_REGEX_STRING, IGNORECASE)
tag_value = Regex(DMARC_TAG_VALUE_REGEX_STRING, IGNORECASE)
START = Sequence(version_tag, List(tag_value, delimiter=";", opt=True))
class _BIMIGrammar(Grammar):
"""Defines Pyleri grammar for BIMI records"""
version_tag = Regex(BIMI_VERSION_REGEX_STRING, IGNORECASE)
tag_value = Regex(BIMI_TAG_VALUE_REGEX_STRING, IGNORECASE)
START = Sequence(version_tag, List(tag_value, delimiter=";", opt=True))
tag_values = OrderedDict(adkim=OrderedDict(name="DKIM Alignment Mode",
default="r",
description='In relaxed mode, '
'the Organizational '
'Domains of both the '
'DKIM-authenticated '
'signing domain (taken '
'from the value of the '
'"d=" tag in the '
'signature) and that '
'of the RFC 5322 '
'From domain '
'must be equal if the '
'identifiers are to be '
'considered aligned.'),
aspf=OrderedDict(name="SPF alignment mode",
default="r",
description='In relaxed mode, '
'the SPF-authenticated '
'domain and RFC5322 '
'From domain must have '
'the same '
'Organizational Domain. '
'In strict mode, only '
'an exact DNS domain '
'match is considered to '
'produce Identifier '
'Alignment.'),
fo=OrderedDict(name="Failure Reporting Options",
default="0",
description='Provides requested '
'options for generation '
'of failure reports. '
'Report generators MAY '
'choose to adhere to the '
'requested options. '
'This tag\'s content '
'MUST be ignored if '
'a "ruf" tag (below) is '
'not also specified. '
'The value of this tag is '
'a colon-separated list '
'of characters that '
'indicate failure '
'reporting options.',
values={
"0": 'Generate a DMARC failure '
'report if all underlying '
'authentication mechanisms '
'fail to produce an aligned '
'"pass" result.',
"1": 'Generate a DMARC failure '
'report if any underlying '
'authentication mechanism '
'produced something other '
'than an aligned '
'"pass" result.',
"d": 'Generate a DKIM failure '
'report if the message had '
'a signature that failed '
'evaluation, regardless of '
'its alignment. DKIM-'
'specific reporting is '
'described in AFRF-DKIM.',
"s": 'Generate an SPF failure '
'report if the message '
'failed SPF evaluation, '
'regardless of its alignment.'
' SPF-specific reporting is '
'described in AFRF-SPF'
}
),
p=OrderedDict(name="Requested Mail Receiver Policy",
description='Specifies the policy to '
'be enacted by the '
'Receiver at the '
'request of the '
'Domain Owner. The '
'policy applies to '
'the domain and to its '
'subdomains, unless '
'subdomain policy '
'is explicitly described '
'using the "sp" tag.',
values={
"none": 'The Domain Owner requests '
'no specific action be '
'taken regarding delivery '
'of messages.',
"quarantine": 'The Domain Owner '
'wishes to have '
'email that fails '
'the DMARC mechanism '
'check be treated by '
'Mail Receivers as '
'suspicious. '
'Depending on the '
'capabilities of the '
'MailReceiver, '
'this can mean '
'"place into spam '
'folder", '
'"scrutinize '
'with additional '
'intensity", and/or '
'"flag as '
'suspicious".',
"reject": 'The Domain Owner wishes '
'for Mail Receivers to '
'reject '
'email that fails the '
'DMARC mechanism check. '
'Rejection SHOULD '
'occur during the SMTP '
'transaction.'
}
),
pct=OrderedDict(name="Percentage",
default=100,
description='Integer percentage of '
'messages from the '
'Domain Owner\'s '
'mail stream to which '
'the DMARC policy is to '
'be applied. '
'However, this '
'MUST NOT be applied to '
'the DMARC-generated '
'reports, all of which '
'must be sent and '
'received unhindered. '
'The purpose of the '
'"pct" tag is to allow '
'Domain Owners to enact '
'a slow rollout of '
'enforcement of the '
'DMARC mechanism.'
),
rf=OrderedDict(name="Report Format",
default="afrf",
description='A list separated by '
'colons of one or more '
'report formats as '
'requested by the '
'Domain Owner to be '
'used when a message '
'fails both SPF and DKIM '
'tests to report details '
'of the individual '
'failure. Only "afrf" '
'(the auth-failure report '
'type) is currently '
'supported in the '
'DMARC standard.',
values={
"afrf": ' "Authentication Failure '
'Reporting Using the '
'Abuse Reporting Format", '
'RFC 6591, April 2012,'
'<https://www.rfc-'
'editor.org/info/rfc6591>'
}
),
ri=OrderedDict(name="Report Interval",
default=86400,
description='Indicates a request to '
'Receivers to generate '
'aggregate reports '
'separated by no more '
'than the requested '
'number of seconds. '
'DMARC implementations '
'MUST be able to provide '
'daily reports and '
'SHOULD be able to '
'provide hourly reports '
'when requested. '
'However, anything other '
'than a daily report is '
'understood to '
'be accommodated on a '
'best-effort basis.'
),
rua=OrderedDict(name="Aggregate Feedback Addresses",
description=' A comma-separated list '
'of DMARC URIs to which '
'aggregate feedback '
'is to be sent.'
),
ruf=OrderedDict(name="Forensic Feedback Addresses",
description=' A comma-separated list '
'of DMARC URIs to which '
'forensic feedback '
'is to be sent.'
),
sp=OrderedDict(name="Subdomain Policy",
description='Indicates the policy to '
'be enacted by the '
'Receiver at the request '
'of the Domain Owner. '
'It applies only to '
'subdomains of the '
'domain queried, and not '
'to the domain itself. '
'Its syntax is identical '
'to that of the "p" tag '
'defined above. If '
'absent, the policy '
'specified by the "p" '
'tag MUST be applied '
'for subdomains.'
),
v=OrderedDict(name="Version",
description='Identifies the record '
'retrieved as a DMARC '
'record. It MUST have the '
'value of "DMARC1". The '
'value of this tag MUST '
'match precisely; if it '
'does not or it is absent, '
'the entire retrieved '
'record MUST be ignored. '
'It MUST be the first '
'tag in the list.')
)
spf_qualifiers = {
"": "pass",
"?": "neutral",
"+": "pass",
"-": "fail",
"~": "softfail"
}
bimi_tags = OrderedDict(
v=OrderedDict(name="Version",
description='Identifies the record '
'retrieved as a BIMI '
'record. It MUST have the '
'value of "BIMI1". The '
'value of this tag MUST '
'match precisely; if it '
'does not or it is absent, '
'the entire retrieved '
'record MUST be ignored. '
'It MUST be the first '
'tag in the list.')
)
def get_base_domain(domain):
"""
Gets the base domain name for the given domain
.. note::
Results are based on a list of public domain suffixes at
https://publicsuffix.org/list/public_suffix_list.dat.
Args:
domain (str): A domain or subdomain
Returns:
str: The base domain of the given domain
"""
psl = publicsuffixlist.PublicSuffixList()
return psl.privatesuffix(domain)
def _query_dns(domain, record_type, nameservers=None, resolver=None,
timeout=2.0, cache=None):
"""
Queries DNS
Args:
domain (str): The domain or subdomain to query about
record_type (str): The record type to query for
nameservers (list): A list of one or more nameservers to use
resolver (dns.resolver.Resolver): A resolver object to use for DNS
requests
timeout (float): Sets the DNS timeout in seconds
cache (ExpiringDict): Cache storage
Returns:
list: A list of answers
"""
domain = str(domain).lower()
record_type = record_type.upper()
cache_key = "{0}_{1}".format(domain, record_type)
if cache is None:
cache = DNS_CACHE
if type(cache) is ExpiringDict:
records = cache.get(cache_key, None)
if records:
return records
if not resolver:
resolver = dns.resolver.Resolver()
timeout = float(timeout)
if nameservers is not None:
resolver.nameservers = nameservers
resolver.timeout = timeout
resolver.lifetime = timeout
if record_type == "TXT":
resource_records = list(map(
lambda r: r.strings,
resolver.resolve(domain, record_type, lifetime=timeout)))
_resource_record = [
resource_record[0][:0].join(resource_record)
for resource_record in resource_records if resource_record]
records = [r.decode() for r in _resource_record]
else:
records = list(map(
lambda r: r.to_text().replace('"', '').rstrip("."),
resolver.resolve(domain, record_type, lifetime=timeout)))
if type(cache) is ExpiringDict:
cache[cache_key] = records
return records
def _get_nameservers(domain, nameservers=None, resolver=None, timeout=2.0):
"""
Queries DNS for a list of nameservers
Args:
domain (str): A domain name
nameservers (list): A list of nameservers to query
resolver (dns.resolver.Resolver): A resolver object to use for DNS
requests
timeout (float): number of seconds to wait for an answer from DNS
Returns:
list: A list of ``OrderedDicts``; each containing a ``preference``
integer and a ``hostname``
Raises:
:exc:`checkdmarc.DNSException`
"""
answers = []
try:
answers = _query_dns(domain, "NS", nameservers=nameservers,
resolver=resolver, timeout=timeout)
except dns.resolver.NXDOMAIN:
raise DNSExceptionNXDOMAIN(
"The domain {0} does not exist".format(domain))
except dns.resolver.NoAnswer:
pass
except Exception as error:
raise DNSException(error)
return answers
def _get_mx_hosts(domain, nameservers=None, resolver=None, timeout=2.0):
"""
Queries DNS for a list of Mail Exchange hosts
Args:
domain (str): A domain name
nameservers (list): A list of nameservers to query
resolver (dns.resolver.Resolver): A resolver object to use for DNS
requests
timeout (float): number of seconds to wait for an answer from DNS
Returns:
list: A list of ``OrderedDicts``; each containing a ``preference``
integer and a ``hostname``
Raises:
:exc:`checkdmarc.DNSException`
"""
hosts = []
try:
logging.debug("Checking for MX records on {0}".format(domain))
answers = _query_dns(domain, "MX", nameservers=nameservers,
resolver=resolver, timeout=timeout)
for record in answers:
record = record.split(" ")
preference = int(record[0])
hostname = record[1].rstrip(".").strip().lower()
hosts.append(OrderedDict(
[("preference", preference), ("hostname", hostname)]))
hosts = sorted(hosts, key=lambda h: (h["preference"], h["hostname"]))
except dns.resolver.NXDOMAIN:
raise DNSExceptionNXDOMAIN(
"The domain {0} does not exist".format(domain))
except dns.resolver.NoAnswer:
pass
except Exception as error:
raise DNSException(error)
return hosts
def _get_a_records(domain, nameservers=None, resolver=None, timeout=2.0):
"""
Queries DNS for A and AAAA records
Args:
domain (str): A domain name
nameservers (list): A list of nameservers to query
resolver (dns.resolver.Resolver): A resolver object to use for DNS
requests
timeout (float): number of seconds to wait for an answer from DNS
Returns:
list: A sorted list of IPv4 and IPv6 addresses
Raises:
:exc:`checkdmarc.DNSException`
"""
qtypes = ["A", "AAAA"]
addresses = []
for qt in qtypes:
try:
addresses += _query_dns(domain, qt, nameservers=nameservers,
resolver=resolver, timeout=timeout)
except dns.resolver.NXDOMAIN:
raise DNSExceptionNXDOMAIN("The domain {0} does not exist".format(
domain))
except dns.resolver.NoAnswer:
# Sometimes a domain will only have A or AAAA records, but not both
pass
except Exception as error:
raise DNSException(error)
addresses = sorted(addresses)
return addresses
def _get_reverse_dns(ip_address, nameservers=None, resolver=None, timeout=2.0):
"""
Queries for an IP addresses reverse DNS hostname(s)
Args:
ip_address (str): An IPv4 or IPv6 address
Returns:
list: A list of reverse DNS hostnames
nameservers (list): A list of nameservers to query
resolver (dns.resolver.Resolver): A resolver object to use for DNS
requests
timeout (float): number of seconds to wait for an answer from DNS
Raises:
:exc:`checkdmarc.DNSException`
"""
try:
name = dns.reversename.from_address(ip_address)
hostnames = _query_dns(name, "PTR", nameservers=nameservers,
resolver=resolver, timeout=timeout)
except dns.resolver.NXDOMAIN:
return []
except Exception as error:
raise DNSException(error)
return hostnames
def _get_txt_records(domain, nameservers=None, resolver=None, timeout=2.0):
"""
Queries DNS for TXT records
Args:
domain (str): A domain name
nameservers (list): A list of nameservers to query
resolver (dns.resolver.Resolver): A resolver object to use for DNS
requests
timeout (float): number of seconds to wait for an answer from DNS
Returns:
list: A list of TXT records
Raises:
:exc:`checkdmarc.DNSException`
"""
try:
records = _query_dns(domain, "TXT", nameservers=nameservers,
resolver=resolver, timeout=timeout)
except dns.resolver.NXDOMAIN:
raise DNSExceptionNXDOMAIN("The domain {0} does not exist".format(
domain))
except dns.resolver.NoAnswer:
raise DNSException(
"The domain {0} does not have any TXT records".format(domain))
except Exception as error:
raise DNSException(error)
return records
def _query_dmarc_record(domain, nameservers=None, resolver=None, timeout=2.0):
"""
Queries DNS for a DMARC record
Args:
domain (str): A domain name
nameservers (list): A list of nameservers to query
resolver (dns.resolver.Resolver): A resolver object to use for DNS
requests
timeout (float): number of seconds to wait for a record from DNS
Returns:
str: A record string or None
"""
target = "_dmarc.{0}".format(domain.lower())
dmarc_record = None
dmarc_record_count = 0
unrelated_records = []
try:
records = _query_dns(target, "TXT", nameservers=nameservers,
resolver=resolver, timeout=timeout)
for record in records:
if record.startswith("v=DMARC1"):
dmarc_record_count += 1
else:
unrelated_records.append(record)
if dmarc_record_count > 1:
raise MultipleDMARCRecords(
"Multiple DMARC policy records are not permitted - "
"https://tools.ietf.org/html/rfc7489#section-6.6.3")
if len(unrelated_records) > 0:
raise UnrelatedTXTRecordFoundAtDMARC(
"Unrelated TXT records were discovered. These should be "
"removed, as some receivers may not expect to find "
"unrelated TXT records "
"at {0}\n\n{1}".format(target, "\n\n".join(unrelated_records)))
dmarc_record = records[0]
except dns.resolver.NoAnswer:
try:
records = _query_dns(domain.lower(), "TXT",
nameservers=nameservers, resolver=resolver,
timeout=timeout)
for record in records:
if record.startswith("v=DMARC1"):
raise DMARCRecordInWrongLocation(
"The DMARC record must be located at "
"{0}, not {1}".format(target, domain.lower()))
except dns.resolver.NoAnswer:
pass
except dns.resolver.NXDOMAIN:
raise DMARCRecordNotFound(
"The domain {0} does not exist".format(domain))
except Exception as error:
raise DMARCRecordNotFound(error)
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
pass
except Exception as error:
raise DMARCRecordNotFound(error)
return dmarc_record
def _query_bmi_record(domain, selector="default", nameservers=None,
resolver=None, timeout=2.0):
"""
Queries DNS for a BIMI record
Args:
domain (str): A domain name
selector: the BIMI selector
nameservers (list): A list of nameservers to query
resolver (dns.resolver.Resolver): A resolver object to use for DNS
requests
timeout (float): number of seconds to wait for a record from DNS
Returns:
str: A record string or None
"""
target = "{0}._bimi.{1}".format(selector, domain.lower())
bimi_record = None
bmi_record_count = 0
unrelated_records = []
try:
records = _query_dns(target, "TXT", nameservers=nameservers,
resolver=resolver, timeout=timeout)
for record in records:
if record.startswith("v=BIMI1"):
bmi_record_count += 1
else:
unrelated_records.append(record)
if bmi_record_count > 1:
raise MultipleBIMIRecords(
"Multiple BMI records are not permitted")
if len(unrelated_records) > 0:
raise UnrelatedTXTRecordFoundAtDMARC(
"Unrelated TXT records were discovered. These should be "
"removed, as some receivers may not expect to find "
"unrelated TXT records "
"at {0}\n\n{1}".format(target, "\n\n".join(unrelated_records)))
bimi_record = records[0]
except dns.resolver.NoAnswer:
try:
records = _query_dns(domain.lower(), "TXT",
nameservers=nameservers, resolver=resolver,
timeout=timeout)
for record in records:
if record.startswith("v=BIMI1"):
raise BIMIRecordInWrongLocation(
"The BIMI record must be located at "
"{0}, not {1}".format(target, domain.lower()))
except dns.resolver.NoAnswer:
pass
except dns.resolver.NXDOMAIN:
raise BIMIRecordNotFound(
"The domain {0} does not exist".format(domain))
except Exception as error:
BIMIRecordNotFound(error)
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
pass
except Exception as error:
raise BIMIRecordNotFound(error)
return bimi_record
def query_dmarc_record(domain, nameservers=None, resolver=None, timeout=2.0):
"""
Queries DNS for a DMARC record
Args:
domain (str): A domain name
nameservers (list): A list of nameservers to query
resolver (dns.resolver.Resolver): A resolver object to use for DNS
requests
timeout (float): number of seconds to wait for a record from DNS
Returns:
OrderedDict: An ``OrderedDict`` with the following keys:
- ``record`` - the unparsed DMARC record string
- ``location`` - the domain where the record was found
- ``warnings`` - warning conditions found
Raises:
:exc:`checkdmarc.DMARCRecordNotFound`
:exc:`checkdmarc.DMARCRecordInWrongLocation`
:exc:`checkdmarc.MultipleDMARCRecords`
:exc:`checkdmarc.SPFRecordFoundWhereDMARCRecordShouldBe`
"""
logging.debug("Checking for a DMARC record on {0}".format(domain))
warnings = []
base_domain = get_base_domain(domain)
location = domain.lower()
record = _query_dmarc_record(domain, nameservers=nameservers,
resolver=resolver, timeout=timeout)
try:
root_records = _query_dns(domain.lower(), "TXT",
nameservers=nameservers, resolver=resolver,
timeout=timeout)
for root_record in root_records:
if root_record.startswith("v=DMARC1"):
warnings.append("DMARC record at root of {0} "
"has no effect".format(domain.lower()))
except Exception:
pass
if record is None and domain != base_domain:
record = _query_dmarc_record(base_domain, nameservers=nameservers,
resolver=resolver, timeout=timeout)
location = base_domain
if record is None:
raise DMARCRecordNotFound(
"A DMARC record does not exist for this domain or its base domain")
return OrderedDict([("record", record), ("location", location),
("warnings", warnings)])
def query_bimi_record(domain, selector="default", nameservers=None,
resolver=None, timeout=2.0):
"""
Queries DNS for a BIMI record
Args:
domain (str): A domain name
selector (str): The BMI selector
nameservers (list): A list of nameservers to query
resolver (dns.resolver.Resolver): A resolver object to use for DNS
requests
timeout (float): number of seconds to wait for a record from DNS
Returns:
OrderedDict: An ``OrderedDict`` with the following keys:
- ``record`` - the unparsed DMARC record string
- ``location`` - the domain where the record was found
- ``warnings`` - warning conditions found
Raises:
:exc:`checkdmarc.BIMIRecordNotFound`
:exc:`checkdmarc.BIMIRecordInWrongLocation`
:exc:`checkdmarc.MultipleBIMIRecords`
"""
logging.debug("Checking for a BIMI record on {0}".format(domain))
warnings = []
base_domain = get_base_domain(domain)
location = domain.lower()
record = _query_bmi_record(domain, selector=selector,
nameservers=nameservers, resolver=resolver,
timeout=timeout)
try:
root_records = _query_dns(domain.lower(), "TXT",
nameservers=nameservers, resolver=resolver,
timeout=timeout)
for root_record in root_records:
if root_record.startswith("v=BIMI1"):
warnings.append("BIMI record at root of {0} "
"has no effect".format(domain.lower()))
except Exception:
pass
if record is None and domain != base_domain and selector != "default":
record = _query_bmi_record(base_domain, selector="default",
nameservers=nameservers, resolver=resolver,
timeout=timeout)
location = base_domain
if record is None:
raise BIMIRecordNotFound(
"A BIMI record does not exist for this domain or its base domain")
return OrderedDict([("record", record), ("location", location),
("warnings", warnings)])
def get_dmarc_tag_description(tag, value=None):
"""
Get the name, default value, and description for a DMARC tag, amd/or a
description for a tag value
Args:
tag (str): A DMARC tag
value (str): An optional value
Returns:
OrderedDict: An ``OrderedDict`` with the following keys:
- ``name`` - the tag name
- ``default``- the tag's default value
- ``description`` - A description of the tag or value
"""
name = tag_values[tag]["name"]
description = tag_values[tag]["description"]
default = None
if "default" in tag_values[tag]:
default = tag_values[tag]["default"]
if type(value) is str and "values" in tag_values[tag] and value in \
tag_values[tag]["values"][value]:
description = tag_values[tag]["values"][value]
elif type(value) is list and "values" in tag_values[tag]:
new_description = ""
for value_value in value:
if value_value in tag_values[tag]["values"]:
new_description += "{0}: {1}\n\n".format(value_value,
tag_values[tag][
"values"][
value_value])
new_description = new_description.strip()
if new_description != "":
description = new_description
return OrderedDict(
[("name", name), ("default", default), ("description", description)])
def parse_dmarc_report_uri(uri):
"""
Parses a DMARC Reporting (i.e. ``rua``/``ruf``) URI
.. note::
``mailto`` is the only reporting URI scheme supported in DMARC1
Args:
uri: A DMARC URI
Returns:
OrderedDict: An ``OrderedDict`` of the URI's components:
- ``scheme``
- ``address``
- ``size_limit``
Raises:
:exc:`checkdmarc.InvalidDMARCReportURI`
"""
uri = uri.strip()
mailto_matches = MAILTO_REGEX.findall(uri)
if len(mailto_matches) != 1:
raise InvalidDMARCReportURI(
"{0} is not a valid DMARC report URI".format(uri))
match = mailto_matches[0]
scheme = match[0].lower()
email_address = match[1]
size_limit = match[2].lstrip("!")
if size_limit == "":
size_limit = None
return OrderedDict([("scheme", scheme), ("address", email_address),
("size_limit", size_limit)])
def check_wildcard_dmarc_report_authorization(domain,
nameservers=None, resolver=None,
timeout=2.0):
"""
Checks for a wildcard DMARC report authorization record, e.g.:
::
*._report.example.com IN TXT "v=DMARC1"
Args:
domain (str): The domain to check
nameservers (list): A list of nameservers to query
resolver (dns.resolver.Resolver): A resolver object to use for DNS
requests
timeout (float): number of seconds to wait for an answer from DNS
Returns:
bool: An indicator of the existence of a valid wildcard DMARC report
authorization record
"""
wildcard_target = "*._report._dmarc.{0}".format(domain)
dmarc_record_count = 0
unrelated_records = []
try:
records = _query_dns(wildcard_target, "TXT",
nameservers=nameservers, resolver=resolver,
timeout=timeout)
for record in records:
if record.startswith("v=DMARC1"):
dmarc_record_count += 1
else:
unrelated_records.append(record)
if len(unrelated_records) > 0:
raise UnrelatedTXTRecordFoundAtDMARC(
"Unrelated TXT records were discovered. "
"These should be removed, as some "
"receivers may not expect to find unrelated TXT records "
"at {0}\n\n{1}".format(wildcard_target,
"\n\n".join(unrelated_records)))
if dmarc_record_count < 1:
return False
except Exception:
return False
return True
def verify_dmarc_report_destination(source_domain, destination_domain,
nameservers=None, resolver=None,
timeout=2.0):
"""
Checks if the report destination accepts reports for the source domain
per RFC 7489, section 7.1
Args:
source_domain (str): The source domain
destination_domain (str): The destination domain
nameservers (list): A list of nameservers to query
resolver (dns.resolver.Resolver): A resolver object to use for DNS
requests
timeout (float): number of seconds to wait for an answer from DNS
Returns:
bool: Indicates if the report domain accepts reports from the given
domain
Raises:
:exc:`checkdmarc.UnverifiedDMARCURIDestination`
:exc:`checkdmarc.UnrelatedTXTRecordFound`
"""
source_domain = source_domain.lower()
destination_domain = destination_domain.lower()
if get_base_domain(source_domain) != get_base_domain(destination_domain):
if check_wildcard_dmarc_report_authorization(destination_domain,
nameservers=nameservers,
resolver=resolver):
return True
target = "{0}._report._dmarc.{1}".format(source_domain,
destination_domain)
message = "{0} does not indicate that it accepts DMARC reports " \
"about {1} - " \
"Authorization record not found: " \
'{2} IN TXT "v=DMARC1"'.format(destination_domain,
source_domain,
target)
dmarc_record_count = 0
unrelated_records = []
try:
records = _query_dns(target, "TXT",
nameservers=nameservers, resolver=resolver,
timeout=timeout)
for record in records:
if record.startswith("v=DMARC1"):
dmarc_record_count += 1
else:
unrelated_records.append(record)
if len(unrelated_records) > 0:
raise UnrelatedTXTRecordFoundAtDMARC(
"Unrelated TXT records were discovered. "
"These should be removed, as some "
"receivers may not expect to find unrelated TXT records "
"at {0}\n\n{1}".format(target,
"\n\n".join(unrelated_records)))
if dmarc_record_count < 1:
return False
except Exception:
raise UnverifiedDMARCURIDestination(message)
return True
def parse_dmarc_record(record, domain, parked=False,
include_tag_descriptions=False,
nameservers=None, resolver=None, timeout=2.0):
"""
Parses a DMARC record
Args:
record (str): A DMARC record
domain (str): The domain where the record is found
parked (bool): Indicates if a domain is parked
include_tag_descriptions (bool): Include descriptions in parsed results
nameservers (list): A list of nameservers to query
resolver (dns.resolver.Resolver): A resolver object to use for DNS
requests
timeout (float): number of seconds to wait for an answer from DNS
Returns:
OrderedDict: An ``OrderedDict`` with the following keys:
- ``tags`` - An ``OrderedDict`` of DMARC tags
- ``value`` - The DMARC tag value
- ``explicit`` - ``bool``: A value is explicitly set
- ``default`` - The tag's default value
- ``description`` - A description of the tag/value
- ``warnings`` - A ``list`` of warnings
.. note::
``default`` and ``description`` are only included if
``include_tag_descriptions`` is set to ``True``
Raises:
:exc:`checkdmarc.DMARCSyntaxError`
:exc:`checkdmarc.InvalidDMARCTag`
:exc:`checkdmarc.InvalidDMARCTagValue`
:exc:`checkdmarc.InvalidDMARCReportURI`
:exc:`checkdmarc.UnverifiedDMARCURIDestination`
:exc:`checkdmarc.UnrelatedTXTRecordFound`
:exc:`checkdmarc.DMARCReportEmailAddressMissingMXRecords`
"""
logging.debug("Parsing the DMARC record for {0}".format(domain))
spf_in_dmarc_error_msg = "Found a SPF record where a DMARC record " \
"should be; most likely, the _dmarc " \
"subdomain record does not actually exist, " \
"and the request for TXT records was " \
"redirected to the base domain"
warnings = []
record = record.strip('"')
if record.lower().startswith("v=spf1"):
raise SPFRecordFoundWhereDMARCRecordShouldBe(spf_in_dmarc_error_msg)
dmarc_syntax_checker = _DMARCGrammar()
parsed_record = dmarc_syntax_checker.parse(record)
if not parsed_record.is_valid:
expecting = list(
map(lambda x: str(x).strip('"'), list(parsed_record.expecting)))
raise DMARCSyntaxError("Error: Expected {0} at position {1} in: "
"{2}".format(" or ".join(expecting),
parsed_record.pos, record))
pairs = DMARC_TAG_VALUE_REGEX.findall(record)
tags = OrderedDict()
# Find explicit tags
for pair in pairs:
tags[pair[0].lower()] = OrderedDict(
[("value", str(pair[1])), ("explicit", True)])
# Include implicit tags and their defaults
for tag in tag_values.keys():
if tag not in tags and "default" in tag_values[tag]:
tags[tag] = OrderedDict(
[("value", tag_values[tag]["default"]), ("explicit", False)])
if "p" not in tags:
raise DMARCSyntaxError(
'The record is missing the required policy ("p") tag')
tags["p"]["value"] = tags["p"]["value"].lower()
if "sp" not in tags:
tags["sp"] = OrderedDict([("value", tags["p"]["value"]),
("explicit", False)])
if list(tags.keys())[1] != "p":
raise DMARCSyntaxError("the p tag must immediately follow the v tag")
tags["v"]["value"] = tags["v"]["value"].upper()
# Validate tag values
for tag in tags:
if tag not in tag_values:
raise InvalidDMARCTag("{0} is not a valid DMARC tag".format(tag))
if tag == "fo":
tags[tag]["value"] = tags[tag]["value"].split(":")
if "0" in tags[tag]["value"] and "1" in tags[tag]["value"]:
warnings.append("Including 0 and 1 fo tag values is redundant")
for value in tags[tag]["value"]:
if value not in tag_values[tag]["values"]:
raise InvalidDMARCTagValue(
"{0} is not a valid option for the DMARC "
"fo tag".format(value))
elif tag == "rf":
tags[tag]["value"] = tags[tag]["value"].lower().split(":")
for value in tags[tag]["value"]:
if value not in tag_values[tag]["values"]:
raise InvalidDMARCTagValue(
"{0} is not a valid option for the DMARC "
"rf tag".format(value))
elif "values" in tag_values[tag] and tags[tag]["value"] not in \
tag_values[tag]["values"]:
raise InvalidDMARCTagValue(
"Tag {0} must have one of the following values: "
"{1} - not {2}".format(tag,
",".join(tag_values[tag]["values"]),
tags[tag]["value"]))
try:
tags["pct"]["value"] = int(tags["pct"]["value"])
except ValueError:
raise InvalidDMARCTagValue(
"The value of the pct tag must be an integer")
try:
tags["ri"]["value"] = int(tags["ri"]["value"])
except ValueError:
raise InvalidDMARCTagValue(
"The value of the ri tag must be an integer")
if "rua" in tags:
parsed_uris = []
uris = tags["rua"]["value"].split(",")
for uri in uris:
try:
uri = parse_dmarc_report_uri(uri)
parsed_uris.append(uri)
email_address = uri["address"]
email_domain = email_address.split("@")[-1]
if email_domain.lower() != domain.lower():
verify_dmarc_report_destination(domain, email_domain,
nameservers=nameservers,
resolver=resolver,
timeout=timeout)
try:
_get_mx_hosts(email_domain, nameservers=nameservers,
resolver=resolver, timeout=timeout)
except _DMARCWarning:
raise DMARCReportEmailAddressMissingMXRecords(
"The domain for rua email address "
"{0} has no MX records".format(
email_address)
)
except DNSException as warning:
raise DMARCReportEmailAddressMissingMXRecords(
"Failed to retrieve MX records for the domain of "
"rua email address "
"{0} - {1}".format(email_address, str(warning))
)
except _DMARCWarning as warning:
warnings.append(str(warning))
tags["rua"]["value"] = parsed_uris
if len(parsed_uris) > 2:
warnings.append(str(_DMARCBestPracticeWarning(
"Some DMARC reporters might not send to more than two rua URIs"
)))
else:
warnings.append(str(_DMARCBestPracticeWarning(
"rua tag (destination for aggregate reports) not found")))
if "ruf" in tags.keys():
parsed_uris = []
uris = tags["ruf"]["value"].split(",")
for uri in uris:
try:
uri = parse_dmarc_report_uri(uri)
parsed_uris.append(uri)
email_address = uri["address"]
email_domain = email_address.split("@")[-1]
if email_domain.lower() != domain.lower():
verify_dmarc_report_destination(domain, email_domain,
nameservers=nameservers,
resolver=resolver,
timeout=timeout)
try:
_get_mx_hosts(email_domain, nameservers=nameservers,
resolver=resolver, timeout=timeout)
except _SPFWarning:
raise DMARCReportEmailAddressMissingMXRecords(
"The domain for ruf email address "
"{0} has no MX records".format(
email_address)
)
except DNSException as warning:
raise DMARCReportEmailAddressMissingMXRecords(
"Failed to retrieve MX records for the domain of "
"ruf email address "
"{0} - {1}".format(email_address, str(warning))
)
except _DMARCWarning as warning:
warnings.append(str(warning))
tags["ruf"]["value"] = parsed_uris
if len(parsed_uris) > 2:
warnings.append(str(_DMARCBestPracticeWarning(
"Some DMARC reporters might not send to more than two ruf URIs"
)))
if tags["pct"]["value"] < 0 or tags["pct"]["value"] > 100:
warnings.append(str(InvalidDMARCTagValue(
"pct value must be an integer between 0 and 100")))
elif tags["pct"]["value"] < 100:
warning_msg = "pct value is less than 100. This leads to " \
"inconsistent and unpredictable policy " \
"enforcement. Consider using p=none to " \
"monitor results instead"
warnings.append(str(_DMARCBestPracticeWarning(warning_msg)))
if parked and tags["p"] != "reject":
warning_msg = "Policy (p=) should be reject for parked domains"
warnings.append(str(_DMARCBestPracticeWarning(warning_msg)))
if parked and tags["sp"] != "reject":
warning_msg = "Subdomain policy (sp=) should be reject for " \
"parked domains"
warnings.append(str(_DMARCBestPracticeWarning(warning_msg)))
# Add descriptions if requested
if include_tag_descriptions:
for tag in tags:
details = get_dmarc_tag_description(tag, tags[tag]["value"])
tags[tag]["name"] = details["name"]
if details["default"]:
tags[tag]["default"] = details["default"]
tags[tag]["description"] = details["description"]
return OrderedDict([("tags", tags), ("warnings", warnings)])
def get_dmarc_record(domain, include_tag_descriptions=False, nameservers=None,
resolver=None, timeout=2.0):
"""
Retrieves a DMARC record for a domain and parses it
Args:
domain (str): A domain name
include_tag_descriptions (bool): Include descriptions in parsed results
nameservers (list): A list of nameservers to query
resolver (dns.resolver.Resolver): A resolver object to use for DNS
requests
timeout (float): number of seconds to wait for an answer from DNS
Returns:
OrderedDict: An ``OrderedDict`` with the following keys:
- ``record`` - The DMARC record string
- ``location`` - Where the DMARC was found
- ``parsed`` - See :meth:`checkdmarc.parse_dmarc_record`
Raises:
:exc:`checkdmarc.DMARCRecordNotFound`
:exc:`checkdmarc.DMARCRecordInWrongLocation`
:exc:`checkdmarc.MultipleDMARCRecords`
:exc:`checkdmarc.SPFRecordFoundWhereDMARCRecordShouldBe`
:exc:`checkdmarc.UnverifiedDMARCURIDestination`
:exc:`checkdmarc.DMARCSyntaxError`
:exc:`checkdmarc.InvalidDMARCTag`
:exc:`checkdmarc.InvalidDMARCTagValue`
:exc:`checkdmarc.InvalidDMARCReportURI`
:exc:`checkdmarc.UnverifiedDMARCURIDestination`
:exc:`checkdmarc.UnrelatedTXTRecordFound`
:exc:`checkdmarc.DMARCReportEmailAddressMissingMXRecords`
"""
query = query_dmarc_record(domain, nameservers=nameservers,
resolver=resolver, timeout=timeout)
tag_descriptions = include_tag_descriptions
tags = parse_dmarc_record(query["record"], query["location"],
include_tag_descriptions=tag_descriptions,
nameservers=nameservers, resolver=resolver,
timeout=timeout)
return OrderedDict([("record",
query["record"]),
("location", query["location"]),
("parsed", tags)])
def query_spf_record(domain, nameservers=None, resolver=None, timeout=2.0):
"""
Queries DNS for an SPF record
Args:
domain (str): A domain name
nameservers (list): A list of nameservers to query
resolver (dns.resolver.Resolver): A resolver object to use for DNS
requests
timeout (float): number of seconds to wait for an answer from DNS
Returns:
OrderedDict: An ``OrderedDict`` with the following keys:
- ``record`` - The SPF record string
- ``warnings`` - A ``list`` of warnings
Raises:
:exc:`checkdmarc.SPFRecordNotFound`
"""
logging.debug("Checking for a SPF record on {0}".format(domain))
warnings = []
spf_type_records = []
spf_txt_records = []
try:
spf_type_records += _query_dns(domain, "SPF", nameservers=nameservers,
resolver=resolver, timeout=timeout)
except (dns.resolver.NoAnswer, Exception):
pass
if len(spf_type_records) > 0:
message = "SPF type DNS records found. Use of DNS Type SPF has been " \
"removed in the standards " \
"track version of SPF, RFC 7208. These records should " \
"be removed and replaced with TXT records: " \
"{0}".format(",".join(spf_type_records))
warnings.append(message)
warnings_str = ""
if len(warnings) > 0:
warnings_str = ". {0}".format(" ".join(warnings))
try:
answers = _query_dns(domain, "TXT", nameservers=nameservers,
resolver=resolver, timeout=timeout)
spf_record = None
for record in answers:
if record.startswith("v=spf1"):
spf_txt_records.append(record)
if len(spf_txt_records) > 1:
raise MultipleSPFRTXTRecords(
"{0} has multiple SPF TXT records{1}".format(
domain, warnings_str))
elif len(spf_txt_records) == 1:
spf_record = spf_txt_records[0]
if spf_record is None:
raise SPFRecordNotFound(
"{0} does not have a SPF TXT record{1}".format(
domain, warnings_str))
except dns.resolver.NoAnswer:
raise SPFRecordNotFound(
"{0} does not have a SPF TXT record{1}".format(
domain, warnings_str))
except dns.resolver.NXDOMAIN:
raise SPFRecordNotFound("The domain {0} does not exist".format(domain))
except Exception as error:
raise SPFRecordNotFound(error)
return OrderedDict([("record", spf_record), ("warnings", warnings)])
def parse_spf_record(record, domain, parked=False, seen=None,
nameservers=None, resolver=None,
recursion=None, timeout=2.0):
"""
Parses an SPF record, including resolving ``a``, ``mx``, and ``include``
mechanisms
Args:
record (str): An SPF record
domain (str): The domain that the SPF record came from
parked (bool): indicated if a domain has been parked
seen (list): A list of domains seen in past loops
nameservers (list): A list of nameservers to query
resolver (dns.resolver.Resolver): A resolver object to use for DNS
requests
recursion (OrderedDict): Results from a previous call
timeout (float): number of seconds to wait for an answer from DNS
Returns:
OrderedDict: An ``OrderedDict`` with the following keys:
- ``dns_lookups`` - Number of DNS lookups required by the record
- ``parsed`` - An ``OrderedDict`` of a parsed SPF record values
- ``warnings`` - A ``list`` of warnings
Raises:
:exc:`checkdmarc.SPFIncludeLoop`
:exc:`checkdmarc.SPFRedirectLoop`
:exc:`checkdmarc.SPFSyntaxError`
:exc:`checkdmarc.SPFTooManyDNSLookups`
"""
logging.debug("Parsing the SPF record on {0}".format(domain))
lookup_mechanisms = ["a", "mx", "include", "exists", "redirect"]
if seen is None:
seen = [domain]
if recursion is None:
recursion = [domain]
record = record.replace('" ', '').replace('"', '')
warnings = []
spf_syntax_checker = _SPFGrammar()
if parked:
correct_record = "v=spf1 -all"
if record != correct_record:
warnings.append("The SPF record for parked domains should be: "
"{0} not: {1}".format(correct_record, record))
if len(AFTER_ALL_REGEX.findall(record)) > 0:
warnings.append("Any text after the all mechanism is ignored")
record = AFTER_ALL_REGEX.sub("all", record)
parsed_record = spf_syntax_checker.parse(record)
if not parsed_record.is_valid:
pos = parsed_record.pos
expecting = list(
map(lambda x: str(x).strip('"'), list(parsed_record.expecting)))
expecting = " or ".join(expecting)
raise SPFSyntaxError(
"{0}: Expected {1} at position {2} in: {3}".format(domain,
expecting,
pos,
record))
matches = SPF_MECHANISM_REGEX.findall(record.lower())
parsed = OrderedDict([("pass", []),
("neutral", []),
("softfail", []),
("fail", []),
("include", []),
("redirect", None),
("exp", None),
("all", "neutral")])
lookup_mechanism_count = 0
void_lookup_mechanism_count = 0
for match in matches:
mechanism = match[1].lower()
if mechanism in lookup_mechanisms:
lookup_mechanism_count += 1
if lookup_mechanism_count > 10:
raise SPFTooManyDNSLookups(
"Parsing the SPF record requires {0}/10 maximum DNS lookups - "
"https://tools.ietf.org/html/rfc7208#section-4.6.4".format(
lookup_mechanism_count),
dns_lookups=lookup_mechanism_count)
for match in matches:
result = spf_qualifiers[match[0]]
mechanism = match[1]
value = match[2]
try:
if mechanism == "ip4":
try:
if not isinstance(ipaddress.ip_network(value,
strict=False),
ipaddress.IPv4Network):
raise SPFSyntaxError("{0} is not a valid ipv4 value. "
"Looks like ipv6".format(value))
except ValueError:
raise SPFSyntaxError("{0} is not a valid ipv4 "
"value".format(value))
elif mechanism == "ip6":
try:
if not isinstance(ipaddress.ip_network(value,
strict=False),
ipaddress.IPv6Network):
raise SPFSyntaxError("{0} is not a valid ipv6 value. "
"Looks like ipv4".format(value))
except ValueError:
raise SPFSyntaxError("{0} is not a valid ipv6 "
"value".format(value))
if mechanism == "a":
if value == "":
value = domain
a_records = _get_a_records(value, nameservers=nameservers,
resolver=resolver, timeout=timeout)
if len(a_records) == 0:
raise _SPFMissingRecords(
"{0} does not have any A/AAAA records".format(
value.lower()))
for record in a_records:
parsed[result].append(OrderedDict(
[("value", record), ("mechanism", mechanism)]))
elif mechanism == "mx":
if value == "":
value = domain
mx_hosts = _get_mx_hosts(value, nameservers=nameservers,
resolver=resolver, timeout=timeout)
if len(mx_hosts) == 0:
raise _SPFMissingRecords(
"{0} does not have any MX records".format(
value.lower()))
if len(mx_hosts) > 10:
url = "https://tools.ietf.org/html/rfc7208#section-4.6.4"
raise SPFTooManyDNSLookups(
"{0} has more than 10 MX records - "
"{1}".format(value, url), dns_lookups=len(mx_hosts))
for host in mx_hosts:
parsed[result].append(OrderedDict(
[("value", host["hostname"]),
("mechanism", mechanism)]))
elif mechanism == "redirect":
if value.lower() in recursion:
raise SPFRedirectLoop(
"Redirect loop: {0}".format(value.lower()))
seen.append(value.lower())
try:
redirect_record = query_spf_record(value,
nameservers=nameservers,
resolver=resolver,
timeout=timeout)
redirect_record = redirect_record["record"]
redirect = parse_spf_record(redirect_record, value,
seen=seen,
recursion=recursion + [
value.lower()],
nameservers=nameservers,
resolver=resolver,
timeout=timeout)
lookup_mechanism_count += redirect["dns_lookups"]
void_lookup_mechanism_count += redirect["dns_void_lookups"]
if lookup_mechanism_count > 10:
raise SPFTooManyDNSLookups(
"Parsing the SPF record requires {0}/10 maximum "
"DNS lookups - "
"https://tools.ietf.org/html/rfc7208"
"#section-4.6.4".format(
lookup_mechanism_count),
dns_lookups=lookup_mechanism_count)
if void_lookup_mechanism_count > 2:
raise SPFTooManyVoidDNSLookups(
"Parsing the SPF record has {0}/2 maximum void "
"DNS lookups - "
"https://tools.ietf.org/html/rfc7208#section-4.6.4"
.format(
void_lookup_mechanism_count),
dns_void_lookups=void_lookup_mechanism_count)
parsed["redirect"] = OrderedDict(
[("domain", value), ("record", redirect_record),
("dns_lookups", redirect["dns_lookups"]),
("dns_void_lookups", redirect["dns_void_lookups"]),
("parsed", redirect["parsed"]),
("warnings", redirect["warnings"])])
warnings += redirect["warnings"]
except DNSException as error:
if isinstance(error, DNSExceptionNXDOMAIN):
void_lookup_mechanism_count += 1
raise _SPFWarning(str(error))
elif mechanism == "exp":
parsed["exp"] = _get_txt_records(value)[0]
elif mechanism == "all":
parsed["all"] = result
elif mechanism == "include":
if value.lower() in recursion:
raise SPFIncludeLoop("Include loop: {0}".format(
" -> ".join(recursion + [value.lower()])))
if value.lower() in seen:
raise _SPFDuplicateInclude(
"Duplicate include: {0}".format(value.lower()))
seen.append(value.lower())
try:
include_record = query_spf_record(value,
nameservers=nameservers,
resolver=resolver,
timeout=timeout)
include_record = include_record["record"]
include = parse_spf_record(include_record, value,
seen=seen,
recursion=recursion + [
value.lower()],
nameservers=nameservers,
resolver=resolver,
timeout=timeout)
lookup_mechanism_count += include["dns_lookups"]
void_lookup_mechanism_count += include["dns_void_lookups"]
if lookup_mechanism_count > 10:
raise SPFTooManyDNSLookups(
"Parsing the SPF record requires {0}/10 maximum "
"DNS lookups - "
"https://tools.ietf.org/html/rfc7208"
"#section-4.6.4".format(
lookup_mechanism_count),
dns_lookups=lookup_mechanism_count)
if void_lookup_mechanism_count > 2:
raise SPFTooManyVoidDNSLookups(
"Parsing the SPF record has {0}/2 maximum void "
"DNS lookups - "
"https://tools.ietf.org/html/rfc7208#section-4.6.4"
.format(
void_lookup_mechanism_count),
dns_void_lookups=void_lookup_mechanism_count)
include = OrderedDict(
[("domain", value), ("record", include_record),
("dns_lookups", include["dns_lookups"]),
("dns_void_lookups", include["dns_void_lookups"]),
("parsed", include["parsed"]),
("warnings", include["warnings"])])
parsed["include"].append(include)
warnings += include["warnings"]
except DNSException as error:
if isinstance(error, DNSExceptionNXDOMAIN):
void_lookup_mechanism_count += 1
raise _SPFWarning(str(error))
except SPFRecordNotFound as error:
void_lookup_mechanism_count += 1
raise error
elif mechanism == "ptr":
parsed[result].append(
OrderedDict([("value", value), ("mechanism", mechanism)]))
raise _SPFWarning("The ptr mechanism should not be used - "
"https://tools.ietf.org/html/rfc7208"
"#section-5.5")
else:
parsed[result].append(
OrderedDict([("value", value), ("mechanism", mechanism)]))
except (_SPFWarning, DNSException) as warning:
if isinstance(warning, (_SPFMissingRecords, DNSExceptionNXDOMAIN)):
void_lookup_mechanism_count += 1
if void_lookup_mechanism_count > 2:
raise SPFTooManyVoidDNSLookups(
"Parsing the SPF record has {0}/2 maximum void DNS "
"lookups - "
"https://tools.ietf.org/html/rfc7208#section-4.6.4"
.format(
void_lookup_mechanism_count),
dns_void_lookups=void_lookup_mechanism_count)
warnings.append(str(warning))
return OrderedDict(
[('dns_lookups', lookup_mechanism_count),
('dns_void_lookups', void_lookup_mechanism_count),
("parsed", parsed), ("warnings", warnings)])
def get_spf_record(domain, nameservers=None, resolver=None, timeout=2.0):
"""
Retrieves and parses an SPF record
Args:
domain (str): A domain name
nameservers (list): A list of nameservers to query
resolver (dns.resolver.Resolver): A resolver object to use for DNS
requests
timeout (float): Number of seconds to wait for an answer from DNS
Returns:
OrderedDict: An SPF record parsed by result
Raises:
:exc:`checkdmarc.SPFRecordNotFound`
:exc:`checkdmarc.SPFIncludeLoop`
:exc:`checkdmarc.SPFRedirectLoop`
:exc:`checkdmarc.SPFSyntaxError`
:exc:`checkdmarc.SPFTooManyDNSLookups`
"""
record = query_spf_record(domain, nameservers=nameservers,
resolver=resolver, timeout=timeout)
record = record["record"]
parsed_record = parse_spf_record(record, domain, nameservers=nameservers,
resolver=resolver, timeout=timeout)
parsed_record["record"] = record
return parsed_record
@timeout_decorator.timeout(5, timeout_exception=SMTPError,
exception_message="Connection timed out")
def test_tls(hostname, ssl_context=None, cache=None):
"""
Attempt to connect to an SMTP server port 465 and validate TLS/SSL support
Args:
hostname (str): The hostname
cache (ExpiringDict): Cache storage
ssl_context: A SSL context
Returns:
bool: TLS supported
"""
tls = False
if cache:
cached_result = cache.get(hostname, None)
if cached_result is not None:
if cached_result["error"] is not None:
raise SMTPError(cached_result["error"])
return cached_result["tls"]
if ssl_context is None:
ssl_context = create_default_context()
logging.debug("Testing TLS/SSL on {0}".format(hostname))
try:
server = smtplib.SMTP_SSL(hostname, context=ssl_context)
server.ehlo_or_helo_if_needed()
tls = True
try:
server.quit()
server.close()
except Exception as e:
logging.debug(e)
finally:
return tls
except socket.gaierror:
error = "DNS resolution failed"
if cache:
cache[hostname] = dict(tls=False, error=error)
raise SMTPError(error)
except ConnectionRefusedError:
error = "Connection refused"
if cache:
cache[hostname] = dict(tls=False, error=error)
raise SMTPError(error)
except ConnectionResetError:
error = "Connection reset"
if cache:
cache[hostname] = dict(tls=False, error=error)
raise SMTPError(error)
except ConnectionAbortedError:
error = "Connection aborted"
if cache:
cache[hostname] = dict(tls=False, error=error)
raise SMTPError(error)
except TimeoutError:
error = "Connection timed out"
if cache:
cache[hostname] = dict(tls=False, error=error)
raise SMTPError(error)
except BlockingIOError as e:
error = e.__str__()
if cache:
cache[hostname] = dict(tls=False, error=error)
raise SMTPError(error)
except SSLError as e:
error = "SSL error: {0}".format(e.__str__())
if cache:
cache[hostname] = dict(tls=False, error=error)
raise SMTPError(error)
except CertificateError as e:
error = "Certificate error: {0}".format(e.__str__())
if cache:
cache[hostname] = dict(tls=False, error=error)
raise SMTPError(error)
except smtplib.SMTPConnectError as e:
message = e.__str__()
error_code = int(message.lstrip("(").split(",")[0])
if error_code == 554:
message = " SMTP error code 554 - Not allowed"
else:
message = " SMTP error code {0}".format(error_code)
error = "Could not connect: {0}".format(message)
if cache:
cache[hostname] = dict(tls=False, error=error)
raise SMTPError(error)
except smtplib.SMTPHeloError as e:
error = "HELO error: {0}".format(e.__str__())
if cache:
cache[hostname] = dict(tls=False, error=error)
raise SMTPError(error)
except smtplib.SMTPException as e:
error = e.__str__()
error_code = error.lstrip("(").split(",")[0]
error = "SMTP error code {0}".format(error_code)
if cache:
cache[hostname] = dict(tls=False, error=error)
raise SMTPError(error)
except OSError as e:
error = e.__str__()
if cache:
cache[hostname] = dict(tls=False, error=error)
raise SMTPError(error)
except Exception as e:
error = e.__str__()
if cache:
cache[hostname] = dict(tls=False, error=error)
raise SMTPError(error)
finally:
if cache:
cache[hostname] = dict(tls=tls, error=None)
return tls
@timeout_decorator.timeout(5, timeout_exception=SMTPError,
exception_message="Connection timed out")
def test_starttls(hostname, ssl_context=None, cache=None):
"""
Attempt to connect to an SMTP server and validate STARTTLS support
Args:
hostname (str): The hostname
cache (ExpiringDict): Cache storage
ssl_context: A SSL context
Returns:
bool: STARTTLS supported
"""
starttls = False
if cache:
cached_result = cache.get(hostname, None)
if cached_result is not None:
if cached_result["error"] is not None:
raise SMTPError(cached_result["error"])
return cached_result["starttls"]
if ssl_context is None:
ssl_context = create_default_context()
logging.debug("Testing STARTTLS on {0}".format(hostname))
try:
server = smtplib.SMTP(hostname)
server.ehlo_or_helo_if_needed()
if server.has_extn("starttls"):
server.starttls(context=ssl_context)
server.ehlo()
starttls = True
try:
server.quit()
server.close()
except Exception as e:
logging.debug(e)
finally:
if cache:
cache[hostname] = dict(starttls=starttls, error=None)
return starttls
except socket.gaierror:
error = "DNS resolution failed"
if cache:
cache[hostname] = dict(starttls=False, error=error)
raise SMTPError(error)
except ConnectionRefusedError:
error = "Connection refused"
if cache:
cache[hostname] = dict(starttls=False, error=error)
raise SMTPError(error)
except ConnectionResetError:
error = "Connection reset"
if cache:
cache[hostname] = dict(starttls=False, error=error)
raise SMTPError(error)
except ConnectionAbortedError:
error = "Connection aborted"
if cache:
cache[hostname] = dict(starttls=False, error=error)
raise SMTPError(error)
except TimeoutError:
error = "Connection timed out"
if cache:
cache[hostname] = dict(starttls=False, error=error)
raise SMTPError(error)
except BlockingIOError as e:
error = e.__str__()
if cache:
cache[hostname] = dict(starttls=False, error=error)
raise SMTPError(error)
except SSLError as e:
error = "SSL error: {0}".format(e.__str__())
if cache:
cache[hostname] = dict(starttls=False, error=error)
raise SMTPError(error)
except CertificateError as e:
error = "Certificate error: {0}".format(e.__str__())
if cache:
cache[hostname] = dict(starttls=False, error=error)
raise SMTPError(error)
except smtplib.SMTPConnectError as e:
message = e.__str__()
error_code = int(message.lstrip("(").split(",")[0])
if error_code == 554:
message = " SMTP error code 554 - Not allowed"
else:
message = " SMTP error code {0}".format(error_code)
error = "Could not connect: {0}".format(message)
if cache:
cache[hostname] = dict(starttls=False, error=error)
raise SMTPError(error)
except smtplib.SMTPHeloError as e:
error = "HELO error: {0}".format(e.__str__())
if cache:
cache[hostname] = dict(starttls=False, error=error)
raise SMTPError(error)
except smtplib.SMTPException as e:
error = e.__str__()
error_code = error.lstrip("(").split(",")[0]
error = "SMTP error code {0}".format(error_code)
if cache:
cache[hostname] = dict(starttls=False, error=error)
raise SMTPError(error)
except OSError as e:
error = e.__str__()
if cache:
cache[hostname] = dict(starttls=False, error=error)
raise SMTPError(error)
except Exception as e:
error = e.__str__()
if cache:
cache[hostname] = dict(starttls=False, error=error)
raise SMTPError(error)
def get_mx_hosts(domain, skip_tls=False,
approved_hostnames=None, parked=False,
nameservers=None, resolver=None, timeout=2.0):
"""
Gets MX hostname and their addresses
Args:
domain (str): A domain name
skip_tls (bool): Skip STARTTLS testing
approved_hostnames (list): A list of approved MX hostname substrings
parked (bool): Indicates that the domains are parked
nameservers (list): A list of nameservers to query
resolver (dns.resolver.Resolver): A resolver object to use for DNS
requests
timeout (float): number of seconds to wait for a record from DNS
Returns:
OrderedDict: An ``OrderedDict`` with the following keys:
- ``hosts`` - A ``list`` of ``OrderedDict`` with keys of
- ``hostname`` - A hostname
- ``addresses`` - A ``list`` of IP addresses
- ``warnings`` - A ``list`` of MX resolution warnings
"""
hosts = []
warnings = []
hostnames = set()
dupe_hostnames = set()
mx_records = _get_mx_hosts(domain, nameservers=nameservers,
resolver=resolver, timeout=timeout)
for record in mx_records:
hosts.append(OrderedDict([("preference", record["preference"]),
("hostname", record["hostname"].lower()),
("addresses", [])]))
if parked and len(hosts) > 0:
warnings.append("MX records found on parked domains")
elif not parked and len(hosts) == 0:
warnings.append("No MX records found. Is the domain parked?")
if approved_hostnames:
approved_hostnames = list(map(lambda h: h.lower(),
approved_hostnames))
for host in hosts:
if host["hostname"] in hostnames:
if host["hostname"] not in dupe_hostnames:
warnings.append(
"Hostname {0} is listed in multiple MX records".format(
host["hostname"]))
dupe_hostnames.add(host["hostname"])
continue
hostnames.add(host["hostname"])
if approved_hostnames:
approved = False
for approved_hostname in approved_hostnames:
if approved_hostname in host["hostname"]:
approved = True
break
if not approved:
warnings.append("Unapproved MX hostname: {0}".format(
host["hostname"]
))
try:
host["addresses"] = []
host["addresses"] = _get_a_records(host["hostname"],
nameservers=nameservers,
resolver=resolver,
timeout=timeout)
if len(host["addresses"]) == 0:
warnings.append(
"{0} does not have any A or AAAA DNS records".format(
host["hostname"]
))
except Exception as e:
if host["hostname"].lower().endswith(".msv1.invalid"):
warnings.append("{0}. Consider using a TXT record to validate "
"domain ownership in Office 365 instead."
"".format(e.__str__()))
else:
warnings.append(e.__str__())
for address in host["addresses"]:
try:
reverse_hostnames = _get_reverse_dns(address,
nameservers=nameservers,
resolver=resolver,
timeout=timeout)
except DNSException:
reverse_hostnames = []
if len(reverse_hostnames) == 0:
warnings.append(
"{0} does not have any reverse DNS (PTR) "
"records".format(address))
for hostname in reverse_hostnames:
try:
_addresses = _get_a_records(hostname, resolver=resolver)
except DNSException as warning:
warnings.append(str(warning))
_addresses = []
if address not in _addresses:
warnings.append("The reverse DNS of {1} is {0}, but "
"the A/AAAA DNS records for "
"{0} do not resolve to "
"{1}".format(hostname, address))
if not skip_tls and platform.system() == "Windows":
logging.warning("Testing TLS is not supported on Windows")
skip_tls = True
if skip_tls:
logging.debug("Skipping TLS/SSL tests on {0}".format(
host["hostname"]))
else:
try:
starttls = test_starttls(host["hostname"],
cache=STARTTLS_CACHE)
if not starttls:
warnings.append("STARTTLS is not supported on {0}".format(
host["hostname"]))
tls = test_tls(host["hostname"], cache=TLS_CACHE)
if not tls:
warnings.append("SSL/TLS is not supported on {0}".format(
host["hostname"]))
host["tls"] = tls
host["starttls"] = starttls
except DNSException as warning:
warnings.append(str(warning))
tls = False
starttls = False
host["tls"] = tls
host["starttls"] = starttls
except SMTPError as error:
tls = False
starttls = False
warnings.append("{0}: {1}".format(host["hostname"], error))
host["tls"] = tls
host["starttls"] = starttls
return OrderedDict([("hosts", hosts), ("warnings", warnings)])
def get_nameservers(domain, approved_nameservers=None,
nameservers=None, resolver=None, timeout=2.0):
"""
Gets a list of nameservers for a given domain
Args:
domain (str): A domain name
approved_nameservers (list): A list of approved nameserver substrings
nameservers (list): A list of nameservers to query
resolver (dns.resolver.Resolver): A resolver object to use for DNS
requests
timeout (float): number of seconds to wait for a record from DNS
Returns:
Dict: A dictionary with the following keys:
- ``hostnames`` - A list of nameserver hostnames
- ``warnings`` - A list of warnings
"""
logging.debug("Getting NS records on {0}".format(domain))
warnings = []
ns_records = _get_nameservers(domain, nameservers=nameservers,
resolver=resolver, timeout=timeout)
if approved_nameservers:
approved_nameservers = list(map(lambda h: h.lower(),
approved_nameservers))
for nameserver in ns_records:
if approved_nameservers:
approved = False
for approved_nameserver in approved_nameservers:
if approved_nameserver in nameserver.lower():
approved = True
break
if not approved:
warnings.append("Unapproved nameserver: {0}".format(
nameserver
))
return OrderedDict([("hostnames", ns_records), ("warnings", warnings)])
def test_dnssec(domain, nameservers=None, timeout=2.0):
"""
Check for DNSSEC on the given domain
Args:
domain (str): The domain to check
nameservers (list): A list of nameservers to query
timeout (float): Timeout in seconds
Returns:
bool: DNSSEC status
"""
if nameservers is None:
nameservers = dns.resolver.Resolver().nameservers
request = dns.message.make_query(get_base_domain(domain),
dns.rdatatype.NS,
want_dnssec=True)
for nameserver in nameservers:
try:
response = dns.query.udp(request, nameserver, timeout=timeout)
if response is not None:
for record in response.answer:
if record.rdtype == dns.rdatatype.RRSIG:
if response.flags & dns.flags.AD:
return True
except Exception as e:
logging.debug("DNSSEC query error: {0}".format(e))
return False
def check_domains(domains, parked=False,
approved_nameservers=None,
approved_mx_hostnames=None,
skip_tls=False,
include_dmarc_tag_descriptions=False,
nameservers=None, resolver=None, timeout=2.0, wait=0.0):
"""
Check the given domains for SPF and DMARC records, parse them, and return
them
Args:
domains (list): A list of domains to check
parked (bool): Indicates that the domains are parked
approved_nameservers (list): A list of approved nameservers
approved_mx_hostnames (list): A list of approved MX hostname
skip_tls (bool): Skip STARTTLS testing
include_dmarc_tag_descriptions (bool): Include descriptions of DMARC
tags and/or tag values in the
results
nameservers (list): A list of nameservers to query
resolver (dns.resolver.Resolver): A resolver object to use for DNS
requests
timeout (float): number of seconds to wait for an answer from DNS
wait (float): number of seconds to wait between processing domains
Returns:
An ``OrderedDict`` or ``list`` of `OrderedDict` with the following keys
- ``domain`` - The domain name
- ``base_domain`` The base domain
- ``mx`` - See :func:`checkdmarc.get_mx_hosts`
- ``spf`` - A ``valid`` flag, plus the output of
:func:`checkdmarc.parse_spf_record` or an ``error``
- ``dmarc`` - A ``valid`` flag, plus the output of
:func:`checkdmarc.parse_dmarc_record` or an ``error``
"""
domains = sorted(list(set(
map(lambda d: d.rstrip(".\r\n").strip().lower().split(",")[0],
domains))))
not_domains = []
for domain in domains:
if "." not in domain:
not_domains.append(domain)
for domain in not_domains:
domains.remove(domain)
while "" in domains:
domains.remove("")
results = []
for domain in domains:
domain = domain.lower()
logging.debug("Checking: {0}".format(domain))
domain_results = OrderedDict(
[("domain", domain), ("base_domain", get_base_domain(domain)),
("dnssec", None), ("ns", []), ("mx", [])])
domain_results["dnssec"] = test_dnssec(
domain,
nameservers=nameservers,
timeout=timeout
)
domain_results["ns"] = check_ns(
domain,
approved_nameservers=approved_nameservers,
nameservers=nameservers,
resolver=resolver, timeout=timeout
)
domain_results["mx"] = check_mx(
domain,
approved_mx_hostnames=approved_mx_hostnames,
skip_tls=skip_tls,
nameservers=nameservers,
resolver=resolver,
timeout=timeout
)
domain_results["spf"] = check_spf(
domain,
parked=parked,
nameservers=nameservers,
resolver=resolver,
timeout=timeout
)
domain_results["dmarc"] = check_dmarc(
domain,
parked=parked,
include_dmarc_tag_descriptions=include_dmarc_tag_descriptions,
nameservers=nameservers,
resolver=resolver,
timeout=timeout
)
results.append(domain_results)
if wait > 0.0:
logging.debug("Sleeping for {0} seconds".format(wait))
sleep(wait)
if len(results) == 1:
results = results[0]
return results
def check_ns(domain, approved_nameservers=None,
nameservers=None, resolver=None, timeout=2.0):
try:
ns_results = get_nameservers(
domain,
approved_nameservers=approved_nameservers,
nameservers=nameservers, resolver=resolver,
timeout=timeout)
except DNSException as error:
ns_results = OrderedDict([("hostnames", []),
("error", error.__str__())])
return ns_results
def check_mx(domain, approved_mx_hostnames=None, skip_tls=False,
nameservers=None, resolver=None, timeout=2.0):
try:
mx_results = get_mx_hosts(
domain,
skip_tls=skip_tls,
approved_hostnames=approved_mx_hostnames,
nameservers=nameservers, resolver=resolver,
timeout=timeout)
except DNSException as error:
mx_results = OrderedDict([("hosts", []),
("error", error.__str__())])
return mx_results
def check_dmarc(domain, parked=False,
include_dmarc_tag_descriptions=False,
nameservers=None, resolver=None, timeout=2.0):
dmarc_results = OrderedDict([("record", None), ("valid", True),
("location", None)])
try:
dmarc_query = query_dmarc_record(domain,
nameservers=nameservers,
resolver=resolver,
timeout=timeout)
dmarc_results["record"] = dmarc_query["record"]
dmarc_results["location"] = dmarc_query["location"]
parsed_dmarc_record = parse_dmarc_record(
dmarc_query["record"],
dmarc_query["location"],
parked=parked,
include_tag_descriptions=include_dmarc_tag_descriptions,
nameservers=nameservers, resolver=resolver,
timeout=timeout)
dmarc_results["warnings"] = dmarc_query["warnings"]
dmarc_results["tags"] = parsed_dmarc_record["tags"]
dmarc_results["warnings"] += parsed_dmarc_record[
"warnings"]
except DMARCError as error:
dmarc_results["error"] = str(error)
dmarc_results["valid"] = False
if hasattr(error, "data") and error.data:
for key in error.data:
dmarc_results[key] = error.data[key]
return dmarc_results
def check_spf(domain, parked=False, nameservers=None, resolver=None,
timeout=2.0):
spf_results = OrderedDict(
[("record", None), ("valid", True), ("dns_lookups", None),
("dns_void_lookups", None)])
try:
spf_query = query_spf_record(
domain,
nameservers=nameservers, resolver=resolver,
timeout=timeout)
spf_results["record"] = spf_query["record"]
spf_results["warnings"] = spf_query["warnings"]
parsed_spf = parse_spf_record(spf_results["record"],
domain,
parked=parked,
nameservers=nameservers,
resolver=resolver,
timeout=timeout)
spf_results["dns_lookups"] = parsed_spf[
"dns_lookups"]
spf_results["dns_void_lookups"] = parsed_spf[
"dns_void_lookups"]
spf_results["parsed"] = parsed_spf["parsed"]
spf_results["warnings"] += parsed_spf["warnings"]
except SPFError as error:
spf_results["error"] = str(error)
del spf_results["dns_lookups"]
spf_results["valid"] = False
if hasattr(error, "data") and error.data:
for key in error.data:
spf_results[key] = error.data[key]
return spf_results
def results_to_json(results):
"""
Converts a dictionary of results to a JSON string
Args:
results (dict): A dictionary of results
Returns:
str: Results in JSON format
"""
return json.dumps(results, ensure_ascii=False, indent=2)
def results_to_csv_rows(results):
"""
Converts a dictionary of results list of CSV row dicts
Args:
results (dict): A dictionary of results
Returns:
list: A list of CSV row dicts
"""
rows = []
if type(results) is OrderedDict:
results = [results]
for result in results:
row = dict()
ns = result["ns"]
mx = result["mx"]
spf = result["spf"]
dmarc = result["dmarc"]
row["domain"] = result["domain"]
row["base_domain"] = result["base_domain"]
row["dnssec"] = result["dnssec"]
row["ns"] = "|".join(ns["hostnames"])
if "error" in ns:
row["ns_error"] = ns["error"]
else:
row["ns_warnings"] = "|".join(ns["warnings"])
row["mx"] = "|".join(list(
map(lambda r: "{0} {1}".format(r["preference"], r["hostname"]),
mx["hosts"])))
tls = None
try:
tls_results = list(
map(lambda r: "{0}".format(r["starttls"]),
mx["hosts"]))
for tls_result in tls_results:
tls = tls_result
if tls_result is False:
tls = False
break
except KeyError:
# The user might opt to skip the STARTTLS test
pass
finally:
row["tls"] = tls
starttls = None
try:
starttls_results = list(
map(lambda r: "{0}".format(r["starttls"]),
mx["hosts"]))
for starttls_result in starttls_results:
starttls = starttls_result
if starttls_result is False:
starttls = False
except KeyError:
# The user might opt to skip the STARTTLS test
pass
finally:
row["starttls"] = starttls
if "error" in mx:
row["mx_error"] = mx["error"]
else:
row["mx_warnings"] = "|".join(mx["warnings"])
row["spf_record"] = spf["record"]
row["spf_valid"] = spf["valid"]
if "error" in spf:
row["spf_error"] = spf["error"]
else:
row["spf_warnings"] = "|".join(spf["warnings"])
row["dmarc_record"] = dmarc["record"]
row["dmarc_record_location"] = dmarc["location"]
row["dmarc_valid"] = dmarc["valid"]
if "error" in dmarc:
row["dmarc_error"] = dmarc["error"]
else:
row["dmarc_adkim"] = dmarc["tags"]["adkim"]["value"]
row["dmarc_aspf"] = dmarc["tags"]["aspf"]["value"]
row["dmarc_fo"] = ":".join(dmarc["tags"]["fo"]["value"])
row["dmarc_p"] = dmarc["tags"]["p"]["value"]
row["dmarc_pct"] = dmarc["tags"]["pct"]["value"]
row["dmarc_rf"] = ":".join(dmarc["tags"]["rf"]["value"])
row["dmarc_ri"] = dmarc["tags"]["ri"]["value"]
row["dmarc_sp"] = dmarc["tags"]["sp"]["value"]
if "rua" in dmarc["tags"]:
addresses = dmarc["tags"]["rua"]["value"]
addresses = list(map(lambda u: "{}:{}".format(
u["scheme"],
u["address"]), addresses))
row["dmarc_rua"] = "|".join(addresses)
if "ruf" in dmarc["tags"]:
addresses = dmarc["tags"]["ruf"]["value"]
addresses = list(map(lambda u: "{}:{}".format(
u["scheme"],
u["address"]), addresses))
row["dmarc_ruf"] = "|".join(addresses)
row["dmarc_warnings"] = "|".join(dmarc["warnings"])
rows.append(row)
return rows
def results_to_csv(results):
"""
Converts a dictionary of results to CSV
Args:
results (dict): A dictionary of results
Returns:
str: A CSV of results
"""
fields = ["domain", "base_domain", "dnssec", "spf_valid", "dmarc_valid",
"dmarc_adkim", "dmarc_aspf",
"dmarc_fo", "dmarc_p", "dmarc_pct", "dmarc_rf", "dmarc_ri",
"dmarc_rua", "dmarc_ruf", "dmarc_sp",
"mx", "tls", "starttls", "spf_record", "dmarc_record",
"dmarc_record_location", "mx_error",
"mx_warnings", "spf_error",
"spf_warnings", "dmarc_error", "dmarc_warnings",
"ns", "ns_error", "ns_warnings"]
output = StringIO(newline="\n")
writer = DictWriter(output, fieldnames=fields)
writer.writeheader()
rows = results_to_csv_rows(results)
writer.writerows(rows)
output.flush()
return output.getvalue()
def output_to_file(path, content):
"""
Write given content to the given path
Args:
path (str): A file path
content (str): JSON or CSV text
"""
with open(path, "w", newline="\n", encoding="utf-8",
errors="ignore") as output_file:
output_file.write(content)
def _main():
"""Called when the module in executed"""
arg_parser = ArgumentParser(description=__doc__)
arg_parser.add_argument("domain", nargs="+",
help="one or more domains, or a single path to a "
"file containing a list of domains")
arg_parser.add_argument("-p", "--parked", help="indicate that the "
"domains are parked",
action="store_true", default=False)
arg_parser.add_argument("--ns", nargs="+",
help="approved nameserver substrings")
arg_parser.add_argument("--mx", nargs="+",
help="approved MX hostname substrings")
arg_parser.add_argument("-d", "--descriptions", action="store_true",
help="include descriptions of DMARC tags in "
"the JSON output")
arg_parser.add_argument("-f", "--format", default="json",
help="specify JSON or CSV screen output format")
arg_parser.add_argument("-o", "--output", nargs="+",
help="one or more file paths to output to "
"(must end in .json or .csv) "
"(silences screen output)")
arg_parser.add_argument("-n", "--nameserver", nargs="+",
help="nameservers to query")
arg_parser.add_argument("-t", "--timeout",
help="number of seconds to wait for an answer "
"from DNS (default 2.0)",
type=float,
default=2.0)
arg_parser.add_argument("-v", "--version", action="version",
version=__version__)
arg_parser.add_argument("-w", "--wait", type=float,
help="number of seconds to wait between "
"checking domains (default 0.0)",
default=0.0),
arg_parser.add_argument("--skip-tls", action="store_true",
help="skip TLS/SSL testing")
arg_parser.add_argument("--debug", action="store_true",
help="enable debugging output")
args = arg_parser.parse_args()
logging_format = "%(asctime)s - %(levelname)s: %(message)s"
logging.basicConfig(level=logging.WARNING, format=logging_format)
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
logging.debug("Debug output enabled")
domains = args.domain
if len(domains) == 1 and os.path.exists(domains[0]):
with open(domains[0]) as domains_file:
domains = sorted(list(set(
map(lambda d: d.rstrip(".\r\n").strip().lower().split(",")[0],
domains_file.readlines()))))
not_domains = []
for domain in domains:
if "." not in domain:
not_domains.append(domain)
for domain in not_domains:
domains.remove(domain)
results = check_domains(domains, skip_tls=args.skip_tls,
parked=args.parked,
approved_nameservers=args.ns,
approved_mx_hostnames=args.mx,
include_dmarc_tag_descriptions=args.descriptions,
nameservers=args.nameserver, timeout=args.timeout,
wait=args.wait)
if args.output is None:
if args.format.lower() == "json":
results = results_to_json(results)
elif args.format.lower() == "csv":
results = results_to_csv(results)
print(results)
else:
for path in args.output:
json_path = path.lower().endswith(".json")
csv_path = path.lower().endswith(".csv")
if not json_path and not csv_path:
logging.error(
"Output path {0} must end in .json or .csv".format(path))
else:
if path.lower().endswith(".json"):
output_to_file(path, results_to_json(results))
elif path.lower().endswith(".csv"):
output_to_file(path, results_to_csv(results))
if __name__ == "__main__":
_main()
|
import numpy as np
obstacles = np.loadtxt("obstacles - first.csv", delimiter=',')
#print (obstacles)
start_distr = -0.5
end_distr = 0.5
start_x = -0.5
start_y = -0.5
goal_x = 0.5
goal_y = 0.5
k_nn = 3
nodes = np.array([[1,-0.5,-0.5,np.sqrt((goal_x-start_x)**2 + (goal_y-start_y)**2)]])
sample_x = np.random.uniform(start_distr, end_distr, 30)
sample_y = np.random.uniform(start_distr, end_distr, 30)
for i in range(len(sample_x)):
nodes = np.concatenate((nodes, np.array([[i+2, sample_x[i], sample_y[i], np.sqrt((goal_x-sample_x[i])**2 + (goal_y-sample_y[i])**2)]])), axis=0)
nodes = np.concatenate((nodes, np.array([[len(sample_x)+2, goal_x, goal_y, np.sqrt((goal_x-goal_x)**2 + (goal_y-goal_y)**2)]])))
nodes = np.array([[1,-0.5,-0.5,1.4142],[2,-0.09,-0.4,1.0762],[3,-0.285,-0.305,1.1244],[4,0.0575,-0.225,0.8494],[5,-0.0525,-0.0175,0.7604],[6,-0.37,0.3,0.8927],[7,0.3525,-0.0525,0.5719],[8,0.0625,0.255,0.5014],[9,-0.1,0.3725,0.6134],[10,0.4275,0.195,0.3135],[11,0.345,0.3525,0.214],[12,0.5,0.5,0]])
#nodes = np.loadtxt("nodes_test.csv", delimiter=',')
#print (nodes)
np.savetxt("results/nodes.csv", nodes, fmt=['%d','%1.6f','%1.6f','%1.6f'], delimiter = ",")
'''
def collision_check(ax,bx,ay,by,cx,cy,r):
#compute euclidean distance of the two points
eucl_dist_p = np.sqrt((bx-ax)**2 + (by-ay)**2)
#compute direction vector
dx = (bx-ax) / eucl_dist_p
dy = (by-ay) / eucl_dist_p
#the point of line ab closest to the obstacle (circle) center needs to be found
closest_to_center = dx*(cx-ax) + dy*(cy-ay)
#compute the coordinates of the point closest_to_center
closest_x = closest_to_center * dx + ax
closest_y = closest_to_center * dy + ay
#compute the euclidean distance between closest_to_center and the circle center
eucl_dist_c = np.sqrt((closest_x-cx)**2 + (closest_y-cy)**2)
if eucl_dist_c < r:
dt = np.sqrt(r**2 - eucl_dist_c**2)
t1 = closest_to_center - dt
t2 = closest_to_center + dt
if t1 < 0 or t1 > 1 and t2 < 0 or t2 > 1:
return False
else:
return True
else:
return False
'''
#collision check new
def collision_check(x2, x1, y2, y1, cx, cy, r):
inside1 = pointCircle(x1,y1, cx,cy,r)
inside2 = pointCircle(x2,y2, cx,cy,r)
if inside1 or inside2:
return True
distX = x1 - x2
distY = y1 - y2
length = np.sqrt((distX*distX) + (distY*distY))
dot = (((cx-x1)*(x2-x1)) + ((cy-y1)*(y2-y1))) / length**2
closestX = x1 + (dot * (x2-x1))
closestY = y1 + (dot * (y2-y1))
onSegment = linePoint(x1,y1,x2,y2, closestX,closestY)
if onSegment == False:
#print (onSegment)
return False
distX = closestX - cx
distY = closestY - cy
distance = np.sqrt((distX*distX) + (distY*distY))
if distance <= r:
return True
return False
def pointCircle(px, py, cx, cy, r):
distX = px - cx
distY = py - cy
distance = np.sqrt((distX*distX) + (distY*distY))
if distance <= r:
return True
return False
def linePoint(x2, x1, y2, y1, px, py):
d1 = np.sqrt((px-py)**2 + (x1-y1)**2)
#print (d1)
d2 = np.sqrt((px-py)**2 + (x2-y2)**2)
#print (d2)
lineLen = np.sqrt((x2-x1)**2 + (y2-y1)**2)
#print (lineLen)
buffer = 0.01
if d1+d2 >= lineLen-buffer and d1+d2 <=lineLen+buffer:
return True
return False
print (collision_check(-0.052500,-0.5,-0.017500,-0.5,-0.285,-0.075,0.33))
#print (collision_check(-0.052500,-0.5,-0.017500,-0.5,0.365,-0.295,0.27))
#print (collision_check(-0.052500,-0.5,-0.017500,-0.5,0.205,0.155,0.15))
cx = 0
cy = 0
r = 0.3
edges = np.array([[0, 0, 0]])
i=0
'''
while i < len(nodes):
for k in range(len(nodes)):
for o in range(len(obstacles)):
if collision_check(nodes[k][1], nodes[i][1], nodes[k][2], nodes[i][2], obstacles[o][0], obstacles[o][1], obstacles[o][2]):
edges = np.concatenate((edges, np.array([[nodes[k][0], nodes[i][0], 1000]])))
else:
edges = np.concatenate((edges, np.array([[nodes[k][0], nodes[i][0], np.sqrt((nodes[k][1]-nodes[i][1])**2 + (nodes[k][2]-nodes[i][2])**2)]])))
i=i+1
'''
while i < len(nodes):
for k in range(len(nodes)):
edges = np.concatenate((edges, np.array([[nodes[k][0], nodes[i][0], np.sqrt((nodes[k][1]-nodes[i][1])**2 + (nodes[k][2]-nodes[i][2])**2)]])))
#print (edges[1])
for o in range(len(obstacles)):
#print (collision_check(nodes[k][1], nodes[i][1], nodes[k][2], nodes[i][2], obstacles[o][0], obstacles[o][1], obstacles[o][2]))
if collision_check(nodes[k][1], nodes[i][1], nodes[k][2], nodes[i][2], obstacles[o][0], obstacles[o][1], obstacles[o][2]):
#print (edges[-1])
edges[-1][2] = edges[-1][2] + 1000
#print (edges[-1])
i=i+1
'''
edges_new = np.array([[0, 0, 0]])
while i < len(nodes):
for k in range(len(nodes)):
collision_sum = 0
edges = np.concatenate((edges, np.array([[nodes[k][0], nodes[i][0], np.sqrt((nodes[k][1]-nodes[i][1])**2 + (nodes[k][2]-nodes[i][2])**2)]])))
#print (edges[1])
for o in range(len(obstacles)):
if collision_check(nodes[k][1], nodes[i][1], nodes[k][2], nodes[i][2], obstacles[o][0], obstacles[o][1], obstacles[o][2]):
collision_sum = collision_sum + 1000
#print (edges_new)
if collision_sum < 1000:
edges_new = np.concatenate((edges_new, np.array([[nodes[k][0], nodes[i][0], np.sqrt((nodes[k][1]-nodes[i][1])**2 + (nodes[k][2]-nodes[i][2])**2)]])))
i=i+1
'''
#print (edges)
edges = np.delete(edges, 0, 0)
edges = np.reshape(edges, (len(nodes), len(nodes), 3))
#print (edges)
edges1 = np.array([[0,0,0]])
#for e in range(len(edges)):
#edges[e] = edges[e][edges[e][:,2].argsort()]
#edges1 = np.concatenate((edges1, np.array(edges[e][0:k_nn + 1])))
#edges1 = np.concatenate((edges1, np.array(edges[e])))
#edges1 = np.concatenate((edges1, np.array(edges[e][1:len(edges[e])])))
#print (nodes[int(edges[5][2][1])-1][1])
#print (edges[5][2][1])
'''
for e in range(len(edges)):
for o in range(len(obstacles)):
for p in range(len(edges[e])):
if collision_check(nodes1[int(edges[e][p][1])-1][1], nodes1[int(edges[e][p][0])-1][1], nodes1[int(edges[e][p][1])-1][2], nodes1[int(edges[e][p][0])-1][2], edges[e][p][2], obstacles[o][0], obstacles[o][1], obstacles[o][2]):
edges[e][p][2] = 1000
edges[e] = edges[e][edges[e][:,2].argsort()]
print (edges)
edges1 = np.concatenate((edges1, np.array(edges[e][1:k_nn + 1])))
#print (edges1)
'''
#eliminate doubles
#print (edges1)
#edges2 = np.array([[0,0,0]])
edges3 = np.array([[0,0,0]])
#for d in range(len(edges1)):
#if edges1[d][0] > edges1[d][1] and edges1[d][2] < 1000:
#if edges1[d][2] < 1000:
#edges2 = np.concatenate((edges2, np.array([edges1[d]])))
for d in range(len(edges)):
if edges1[d][0] > edges1[d][1] and edges1[d][2] < 1000:
#if edges[d][0] != edges[d][1] and edges[d][0] > edges[d][1]:
edges1 = np.concatenate((edges1, np.array([edges1[d]])))
edges3 = np.delete(edges3, 0, 0)
#print (edges2[1][1])
#print (nodes1[int(edges2[17][1])][1])
#print (edges2[1][2])
#print (collision_check(0.5, 0.3, 0.4, -0.2, 0.42585, 0.5, 0.3, 0.2))
#print (len(edges2))
#print (edges2)
#bx ax by ay
#2,0.023011,0.325946,0.507753
#12,0.500000,0.500000,0.000000
'''
print (collision_check(0.063701,0.5,0.325946,0.5,0.0, 0.0, 0.2))
print (collision_check(0.023011,0.5,0.325946,0.5,0.0, 0.1, 0.2))
print (collision_check(0.023011,0.5,0.325946,0.5,0.3, 0.2, 0.2))
print (collision_check(0.023011,0.5,0.325946,0.5,-0.3, -0.2, 0.2))
print (collision_check(0.023011,0.5,0.325946,0.5,-0.1, -0.4, 0.2))
print (collision_check(0.023011,0.5,0.325946,0.5,-0.2, 0.3, 0.2))
print (collision_check(0.023011,0.5,0.325946,0.5,0.3, -0.3, 0.2))
print (collision_check(0.023011,0.5,0.325946,0.5,0.1, 0.4, 0.2))
print (collision_check(0.063701,-0.5,-0.438482,-0.5,0.0, 0.0, 0.2))
print (collision_check(0.063701,-0.5,-0.438482,-0.5,0.0, 0.1, 0.2))
print (collision_check(0.063701,-0.5,-0.438482,-0.5,0.3, 0.2, 0.2))
print (collision_check(0.063701,-0.5,-0.438482,-0.5,-0.3, -0.2, 0.2))
print (collision_check(0.063701,-0.5,-0.438482,-0.5,-0.1, -0.4, 0.2))
print (collision_check(0.063701,-0.5,-0.438482,-0.5,-0.2, 0.3, 0.2))
print (collision_check(0.063701,-0.5,-0.438482,-0.5,0.3, -0.3, 0.2))
print (collision_check(0.063701,-0.5,-0.438482,-0.5,0.1, 0.4, 0.2))
j=0
while j < len(edges2):
#print (nodes[int(edges2[j][0])-1][1], nodes[int(edges2[j][1])-1][1], nodes[int(edges2[j][0])-1][2], nodes[int(edges2[j][1])-1][2], edges2[j][2], obstacles[o][0], obstacles[o][1], obstacles[o][2])
for o in range(len(obstacles)):
if collision_check(nodes[int(edges2[j][1])][1], nodes[int(edges2[j][0])][1], nodes[int(edges2[j][1])][2], nodes[int(edges2[j][0])][2], edges[j][2], obstacles[o][0], obstacles[o][1], obstacles[o][2]):
#print (nodes[int(edges2[j][0])-1][1], nodes[int(edges2[j][1])-1][1], nodes[int(edges2[j][0])-1][2], nodes[int(edges2[j][1])-1][2], edges2[j][2], obstacles[o][0], obstacles[o][1], obstacles[o][2])
edges2[j][2] = 1000
j=j+1
'''
np.savetxt("results/edges.csv", edges1, fmt=['%d','%d','%1.6f'], delimiter = ",")
#print (edges2)
#nodes1[edges[e][1]] |
# alligator
import re
from util import hook
# dub_url = "http://tubedubber.com/#%s:%s:0:100:0:%s:1"
dub_url = "http://www.youdubber.com/index.php?video={}&audio={}&audio_start=0"
whale_url = "http://www.youtube.com/watch?v=ZS_6-IwMPjM"
cow_url = "http://www.youtube.com/watch?v=lXKDu6cdXLI"
lawn_url = "http://www.youtube.com/watch?v=r6FpEjY1fg8"
# sw8 regex by commonwealth bro lilpp
yre = "(?:http|https)://(?:www\.)?(?:youtube\.com/watch\?v=|youtu\.be/)([^&\n]+)"
@hook.command
def lawnmower(inp): return dub(lawn_url + " " + inp)
@hook.command
def whale(inp): return dub(whale_url + " " + inp)
@hook.command
def cow(inp): return dub(cow_url + " " + inp)
@hook.command
def dub(inp):
'.dub <vid> <audio> [audio start time] -- tubedubber'
ar = inp.split(" ")
time = 0
if len(ar) == 3:
time = ar[2]
vid = re.match(yre, ar[0]).group(1)
audio = re.match(yre, ar[1]).group(1)
return dub_url.format(vid, audio)
@hook.command
def worldstar(inp):
sp = inp.split(' ')
if len(sp) > 1:
vid = re.match(yre, sp[0]).group(1)
try:
time = int(sp[1])
except ValueError:
return 'that is not a time'
else:
vid = re.match(yre, inp).group(1)
time = 0
return 'http://www.youdubber.com/index.php?video={}&video_start={}&audio=uEgtNSBa4Zk&audio_start=0'.format(vid, time)
|
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
# from .models import Profile
class UserRegisterForm(UserCreationForm):
# default: required = True
class Meta:
model = User
# now we know that when we'll do form.save() we'll save it to model User
fields = ['username', 'password1', 'password2'] |
import FWCore.ParameterSet.Config as cms
from ..modules.ecalRecHit_cfi import *
ecalRecHitNoTPTask = cms.Task(
ecalRecHit
)
|
# -*- coding: utf-8 -*-
"""
824. Goat Latin
@link https://leetcode.com/problems/goat-latin/
"""
class Solution:
def toGoatLatin(self, S: str) -> str:
s_list = S.split(' ')
result = ''
for i in range(0, len(s_list)):
temp = ''
if s_list[i].lower().startswith('a') or s_list[i].lower().startswith('e') or s_list[i].lower().startswith('i') or s_list[i].lower().startswith('o') or s_list[i].lower().startswith('u'):
temp += s_list[i] + 'ma'
else:
temp += s_list[i][1:] + s_list[i][0] + 'ma'
temp += 'a' * (i+1)
result += temp + ' '
return result[:-1]
if __name__ == '__main__':
print(Solution().toGoatLatin(S="Each word consists of lowercase and uppercase letters only"))
# print('abdfd'.startswith('a'))
|
# Generated by Django 3.0.7 on 2020-07-16 09:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('rfqsite', '0012_forecast_aeqfc'),
]
operations = [
migrations.RenameField(
model_name='part_costing',
old_name='nre_amortizing_cost',
new_name='total_nre_cost',
),
]
|
#I pledge on my honor that I have not given or received
# any unauthorized assistance on this project.
# William Sentosatio UID 114545749
# Project 1 CMSC421
import math, racetrack
import sys
# borrowing heuristics'
infinity = float('inf') # alternatively, we could import math.inf
# global variables
g_fline = False
g_walls = False
grid = []
grids = []
around_fline = []
xmax = 0
ymax = 0
def h_proj1(state, fline, walls):
# edited h_walldist
global g_fline, g_walls, around_fline
if fline != g_fline or walls != g_walls or grid == []:
edist_edited(fline, walls)
((x,y),(u,v)) = state
hval = float(grid[x][y])
# add a small penalty to favor short stopping distances
au = abs(u); av = abs(v);
sdu = au*(au+1)/2.0
sdv = av*(av+1)/2.0
sd = math.sqrt(sdu**2 + sdv**2)
if near_fline(x,y,sdu,sdv,fline) == False:
penalty = sd/10.0
else:
penalty = 0
# compute location after fastest stop, and add a penalty if it goes through a wall
if u < 0: sdu = -sdu
if v < 0: sdv = -sdv
sx = x + sdu
sy = y + sdv
if racetrack.crash([(x,y),(sx,sy)],walls) or going_away_from_fline((x,y), (sx,sy), fline):
penalty += math.sqrt(au**2 + av**2)
if (sx,sy) in grids:
return hval
elif near_startpoint(sd):
return hval
else:
hval = max(hval+penalty,sd)
return hval
def near_startpoint(sd):
if sd < 3:
return True
else:
return False
def going_away_from_fline(c_point,f_point,fline):
x1,y1 = c_point
x2,y2 = f_point
((xf1,yf1),(xf2,yf2)) = fline
a = 0
b = 0
# for c_point
if x1 == x2:
a = [math.sqrt((xf1-x1)**2 + (y3-y1)**2) for y3 in range(min(yf1,yf2),max(yf1,yf2)+1)]
else:
a = [math.sqrt((x3-x1)**2 + (yf1-y1)**2) for x3 in range(min(xf1,xf2),max(xf1,xf2)+1)]
# for f_point
if x1 == x2:
b = [math.sqrt((xf1-x2)**2 + (y3-y2)**2) for y3 in range(min(yf1,yf2),max(yf1,yf2)+1)]
else:
b = [math.sqrt((x3-x2)**2 + (yf1-y2)**2) for x3 in range(min(xf1,xf2),max(xf1,xf2)+1)]
if a <= b:
return True
else:
return False
def near_fline(x, y, u, v, fline):
global xmax, ymax
((x1,y1), (x2,y2)) = fline
if max(0, x1 - 5) <= x + u <= min(x2 + 5, xmax) or max(0, y1 - 5) <= y + v <= min(y2 + 5, xmax):
return True
else:
return False
def edist_edited(fline, walls):
global grid, grids, g_fline, g_walls, xmax, ymax, around_fline
maximum_walls(walls)
# get the list of gridpoints around the wall corners.
area_fline(fline, walls)
corner_grids(fline, walls)
# calculate the heuristics value for each gridpoint based on the edistw_to_finish
# grids has the list of "important" coordinates
grid = [[edistw_to_finish((x,y), fline, walls) for y in range(ymax+1)] for x in range(xmax+1)]
# update the heuristic values for the "important" grids that does not intersect wall
# by increasing the other gridpoints heuristic values
if len(walls) > 4:
for x in range(xmax+1):
for y in range(ymax+1):
if (x,y) not in grids:
for (xg,yg) in grids:
copy = grid[x][y]
if edistw_to_finish((x,y), fline, walls) > edistw_to_finish((xg,yg), fline, walls):
grid[x][y] = grid[x][y] + edistw_to_finish((x,y), fline, walls)
break
elif going_away_from_fline((xg,yg), (x,y), fline):
grid[x][y] = grid[x][y] + edistw_to_finish((x,y), fline, walls)
break
#borrowing heuristics
flag = True
print('computing edist grid', end=' ');sys.stdout.flush()
while flag:
print('.', end='');sys.stdout.flush()
flag = False
for x in range(xmax+1):
for y in range(ymax+1):
for y1 in range(max(0,y-1),min(ymax+1,y+2)):
for x1 in range(max(0,x-1),min(xmax+1,x+2)):
if grid[x1][y1] != infinity and not racetrack.crash(((x,y),(x1,y1)),walls):
if (x == x1 or y == y1):
d = grid[x1][y1] + 1
else:
d = grid[x1][y1] + 1.4142135623730951
if d < grid[x][y] and ((x,y) in grids):
selection = distance_to_corner((x,y))
for calc in selection:
if calc < d and calc < grid[x][y]:
grid[x][y] = calc
else:
grid[x][y] = d
flag = True
print(' done')
g_fline = fline
g_walls = walls
return grid
def distance_to_corner(point):
global grids
dist = []
for (x,y) in grids:
a,b = point
z = math.sqrt((y-b)**2 + (x-a)**2)
dist.append(z)
return dist
def maximum_walls(walls):
global xmax, ymax
xmax = max([max(x,x1) for ((x,y),(x1,y1)) in walls])
ymax = max([max(y,y1) for ((x,y),(x1,y1)) in walls])
def area_fline(fline, walls):
global around_fline, xmax, ymax
((x1,y1), (x2,y2)) = fline
for x in range(max(0, x1-5), min(xmax+1, x2+5)):
for y in range(max(0, y1-5), min(ymax+1, y2+5)):
around_fline.append((x,y))
def corner_grids(fline, walls):
global grids, gfline, xmax, ymax
((x1,y1), (x2,y2)) = fline
# boundary of the racetrack
# inserting into grids
for ((x,y),(x1,y1)) in walls:
for xs in range(max(x-1,0),min(x+2, xmax+1)):
for ys in range(max(y-1,0),min(y+2, ymax+1)):
if not in_walls((xs,ys), walls) and in_boundary((xs,ys), xmax, ymax):
grids.append((xs,ys))
def in_walls(point, walls):
for ((x,y),(x1,y1)) in walls:
if racetrack.collinear_point_in_edge(point, ((x,y),(x1,y1))) == True:
return True
def in_boundary(point, x, y):
(xa, ya) = point
if 0 < xa < x and 0 < ya < y:
return True
else:
return False
# exactly touching the finish line with 0,0 velocity
def touch_fline(sd, point, fline, walls):
if sd == edistw_to_finish(point, fline, walls):
return True
else:
return False
#borrowing edistw_to_finish
def edistw_to_finish(point, fline, walls):
"""
straight-line distance from (x,y) to the finish line ((x1,y1),(x2,y2)).
Return infinity if there's no way to do it without intersecting a wall
"""
# if min(x1,x2) <= x <= max(x1,x2) and min(y1,y2) <= y <= max(y1,y2):
# return 0
(x,y) = point
((x1,y1),(x2,y2)) = fline
# make a list of distances to each reachable point in fline
if x1 == x2: # fline is vertical, so iterate over y
ds = [math.sqrt((x1-x)**2 + (y3-y)**2) \
for y3 in range(min(y1,y2),max(y1,y2)+1) \
if not racetrack.crash(((x,y),(x1,y3)), walls)]
else: # fline is horizontal, so iterate over x
ds = [math.sqrt((x3-x)**2 + (y1-y)**2) \
for x3 in range(min(x1,x2),max(x1,x2)+1) \
if not racetrack.crash(((x,y),(x3,y1)), walls)]
ds.append(infinity) # for the case where ds is empty
return min(ds)
|
#!/usr/bin/env python3
import sys
print('= data from cmd_2 to stdout=', file=sys.stdout)
print('- data from cmd_2 to stderr-', file=sys.stderr)
|
"""
public static TreeNode mirrorTree1(TreeNode root)
{
if(root==null)
return null;
//对左右孩子镜像处理
TreeNode left=mirrorTree1(root.left);
TreeNode right=mirrorTree1(root.right);
//对当前节点进行镜像处理。
root.left=right;
root.right=left;
return root;
}
"""
class Solution:
def mirrorBST(self, root):
if not root:
return None
# 处理左右孩子
leftNode = self.mirrorBST(root.left)
rightNode = self.mirrorBST(root.right)
# 对当前节点进行处理
root.left = rightNode
root.right = leftNode
return root
def mirrorBST2(self, root):
""" 非递归实现:使用先序遍历,每遍历到一个节点就交换它的左右孩子 """
pass
|
import uuid
from django.db import models
from applications.base.model_mixins import UserTenantModel
from applications.feed.models import Feed, Item
class Subscription(UserTenantModel, models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
feed = models.ForeignKey(
Feed,
on_delete=models.PROTECT
)
updated_at = models.DateTimeField(
null=True
)
subscribed_at = models.DateTimeField(
auto_now_add=True
)
class Meta:
ordering = ('-subscribed_at', )
unique_together = (
('user', 'feed'),
)
class Post(UserTenantModel, models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
subscription = models.ForeignKey(
Subscription,
on_delete=models.CASCADE,
related_name='posts'
)
item = models.ForeignKey(
Item,
on_delete=models.CASCADE,
related_name='posts'
)
is_read = models.BooleanField(
default=False
)
is_favourite = models.BooleanField(
default=False
)
comment = models.TextField(
null=True,
blank=True
)
class Meta:
ordering = ('-item__published_at', )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.