text stringlengths 38 1.54M |
|---|
from artext import __version__
from artext import config
from artext import utils
from artext.artext import Artext
def main():
parser = utils.arg_parser()
parser.add_argument('-v', '--version', action='version',
version=('artext %s' % __version__))
args = parser.parse_args()
conf = config.Config()
conf.error_rate = args.error_rate
conf.path_protected_tokens = args.protected_tokens
conf.samples = args.samples
conf.separator = args.separator
artxt = Artext(config=conf)
with open(args.source, 'r') as fin, open(args.output, 'w') as fout:
if args.level == 'sent':
for line in fin:
noises = artxt.noise_sentence(line)
fout.write("{}\n".format(args.separator.join(noises)))
elif args.level == 'doc':
for line in fin:
noises = artxt.noise_document(line)
fout.write("{}\n".format(args.separator.join(noises)))
if __name__ == "__main__":
main()
|
from pyspark.sql.functions import when
def gender_column_to_0_1_2(observation_df):
observation_df = observation_df.withColumn(
'member_gender',
when(observation_df.member_gender == "Male", 0).
when(observation_df.member_gender == "Female", 1).
when(observation_df.member_gender == "Other", 2).otherwise(3)
)
return observation_df
|
# -*- coding: utf-8 -*-
import requests
import json
from gushiwen_master.settings import PROXY_SHADOWSOCKS_ONLY, SHADOWSOCKS_SCHEME, SHADOWSOCKS_SERVER, SHADOWSOCKS_PORT
def get_http_proxies():
proxies = []
if PROXY_SHADOWSOCKS_ONLY:
proxies.append(SHADOWSOCKS_SCHEME+'://'+SHADOWSOCKS_SERVER+':'+str(SHADOWSOCKS_PORT))
else:
try:
r = requests.get('http://127.0.0.1:10010/?types=0&count=10&country=国内')
ip_ports = json.loads(r.text)
for ip in ip_ports:
proxies.append('http://'+str(ip[0])+':'+str(ip[1]))
except:
proxies = []
return proxies
def removekey(d, key):
r = dict(d)
if key in r:
del r[key]
return r
|
#! /usr/bin/python
import numpy as np
def isAtom(line):
if line[0:6] == "ATOM " or line[0:6] == "HETATM":
return True
else:
return False
def isPAtom(line):
polar_atoms = ["N", "O", "S"]
if isAtom(line) and atmn(line).strip()[0] in polar_atoms:
return True
else:
return False
def isIons(line):
ions = ["MG", "MN", "CA", "ZN", "FE", "HG", "NI", "MG", "CO", "CD", "FE2", "NA", "K", "CU", "SR", "CS", "LI"]
if isAtom(line) and resn(line).strip() in ions:
return True
else:
return False
def atmi(line):
'''
atom index
'''
return line[6:11]
def atmn(line):
'''
atom name
'''
return line[12:16]
def resn(line):
'''
residue name
'''
return line[17:20]
def chid(line):
'''
chain ID
'''
return line[21:22]
def resi(line):
'''
residue index
'''
return line[22:26]
## combine chid and resi in case resi is larger than 10000
def seqi(line):
'''
sequence index
'''
return line[21:26]
def coord(line):
'''
coordinates
'''
crd = [float(line[30 + 8 * i: 38 + 8 * i]) for i in range(3)]
return crd
def isHydrogen(line):
if line[12] == 'H' or line[13] == 'H':
return 1
else:
return 0
def isWater(line):
if resn(line) == "WAT" or resn(line) == "HOH":
return True
else:
return False
class pdbinfo:
def __init__(self, name=None, file=None, lines=None):
self.name = name
if file != None:
self.file = file
self.lines = [line for line in open(self.file)]
else:
self.lines = lines
def getAtoms(self):
Atoms = [line for line in self.lines if isAtom(line)]
return Atoms
def getPolarAtoms(self):
Atoms = [line for line in self.lines if isPAtom(line)]
return Atoms
def getIons(self):
Ions = [line for line in self.lines if isIons(line)]
return Ions
def getProteinWaters(self):
Waters = [line for line in self.lines if isWater(line)]
Protein = [line for line in self.lines if line not in Waters]
return Protein, Waters
def getCoords(self):
Coords = np.array([coord(atom) for atom in self.lines])
return Coords
|
class Solution:
def isSymmetric_v20220206(self, root: TreeNode) -> bool:
def dfs(p, q):
if not p and not q: return True
if not p or not q or p.val != q.val: return False
return dfs(p.left, q.right) and dfs(p.right, q.left)
if not root: return True
return dfs(root.left, root.right)
def isSymmetric(self, root: TreeNode) -> bool:
def dfs(left, right):
if not left and not right: return True
elif not left or not right: return False
elif left.val != right.val: return False
else:
return dfs(left.left, right.right) and dfs(left.right, right.left)
return dfs(root.left, root.right)
|
import matplotlib.pyplot as plt
labels = ['Frogs','Hogs','Dogs','Logs']
sizes = [15,30,45,10]
explode = [0,0,0.1,0]
fig1,ax1 = plt.subplots()
ax1.pie(sizes,explode=explode,labels=labels,autopct='%1.1f%%',shadow=True,startangle=30)
ax1.axis('equal')
plt.show() |
#!/usr/bin/python3
import argparse
import bcrypt
import json
import os
import yaml
import re
def make_hash(password):
return bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt()).decode("ascii")
def main():
parser = argparse.ArgumentParser(
description="Process the admin data file for use by the server.")
parser.add_argument("--input_dir",
help="Input directory with admins.yaml to process.")
parser.add_argument("--output_dir",
help="Directory to write output to.")
options = parser.parse_args()
with open(os.path.join(options.input_dir, "admins.yaml")) as f:
y = yaml.load(f)
out = {}
names = set()
for i, (username, d) in enumerate(y["admins"].items()):
od = {}
u = re.sub(r"[^a-z0-9_]", "", username.lower())
if u != username:
print(f"{i+1}: {d['name']} ({u}) (was {username})")
username = u
else:
print(f"{i+1}: {d['name']} ({username})")
# No duplicate usernames
assert username not in out, f"Duplicate username {username}!"
# No duplicate full names
assert d["name"] not in names, f"Duplicate full name {d['name']}!"
names.add(d["name"])
od["name"] = d.pop("name")
# Password nonempty
if "pwhash" in d:
od["pwhash"] = d["pwhash"]
elif "password" in d:
od["pwhash"] = make_hash(d.pop("password"))
else:
raise ValueError(f"Error in {username}: no password or pwhash")
if "roles" in d:
od["roles"] = d["roles"]
out[username] = od
with open(os.path.join(options.output_dir, "admins.json"), "w") as f:
json.dump(out, f, sort_keys=True, indent=2)
if __name__ == "__main__":
main()
|
import math
def fib(n, start1=1, start2=2):
yield start1
a, b = start1, start2
while b < n:
yield b
a, b = b, a + b
def is_prime(n, prime_list=None):
if prime_list is None:
pass
def primes(n):
numbers = {p: None for p in range(2, n+1)}
for p in range(2, n+1):
if numbers[p] is None:
numbers[p] = True
yield p
for c in range(p*p, n+1, p):
numbers[c] = False
if p*p > n:
break
def primes_x(n):
numbers = {p: True for p in range(2, n+1)}
for p in range(2, n+1):
for c in range(p*p, n+1, p):
numbers[c] = False
return [p for p in range(2, n+1) if numbers[p]]
|
'''
# プロジェクトファイルに含まれている複数レイヤーを指定範囲でクリッピングして別名のGeoPackageで保存する
'''
import os
import subprocess
from qgis.core import *
# VisualStudio ソリューション検索パスで
# %QGIS_INSTALL%/apps/qgis-ltr/python/plugins/processingを追加すること
from processing.algs.gdal.GdalUtils import GdalUtils
# 環境変数を設定
# https://gis.stackexchange.com/questions/326968/ogr2ogr-error-1-proj-pj-obj-create-cannot-find-proj-db
os.environ['QT_QPA_PLATFORM_PLUGIN_PATH'] = r'C:\Program Files\QGIS 3.10\apps\Qt5\plugins'
os.environ['GDAL_DATA'] = r'c:\Program Files\QGIS 3.10\share\gdal'
os.environ['GDAL_DRIVER_PATH'] = r'c:\Program Files\QGIS 3.10\bin\gdalplugins'
os.environ['PROJ_LIB'] = r'c:\Program Files\QGIS 3.10\share\proj'
# スタンドアロンスクリプトを起動するためのおまじない。
# https://docs.qgis.org/2.14/ja/docs/pyqgis_developer_cookbook/intro.html
QgsApplication.setPrefixPath("C:\Program Files\QGIS 3.10\bin", True)
qgs = QgsApplication([], True)
qgs.initQgis()
# データソースのプロジェクトファイル
project = QgsProject.instance()
# 東京都500mメッシュ別将来推計人口と東京都バス停のshapeファイル
# http://nlftp.mlit.go.jp/ksj/gml/datalist/KsjTmplt-mesh500h30.html
project.read('../500m_mesh.qgz')
print("入力:"+ project.fileName())
absolutePath = project.absolutePath()
# QGISのブックマークエディタで画面表示されている四隅座標を取得
clipextent = "139.708694789 35.642186746 139.770192591 35.695803397"
# 出力DB名
outputFileName = absolutePath + "\\output.gpkg"
print("出力:" + outputFileName)
# 1ループ目は上書き
appendSwitch = ""
# レイヤリスト保存
layers = project.mapLayers()
# LayerID:LayerNameのkey:value
layerId = {}
print("Clipping開始")
for layer in layers.values():
if layer.type() == QgsMapLayerType.VectorLayer:
print("name:" + layer.name())
print("source:" + layer.source())
print("id:" + layer.id())
# ogrドライバが対応するデータソースかどうかチェックする
layername = GdalUtils.ogrLayerName(layer.dataProvider().dataSourceUri())
if not layername:
print("未サポートのデータソース:" + layer.name())
break
# key:valueにセット
layerId[layer.id()] = layername
# テストなのでCRS未定義の場合強制的にEPSG:4612に設定
sourceCrs = layer.sourceCrs()
if sourceCrs != "EPSG:4612":
sourceCrs = "EPSG:4612"
# ogrogrコマンド引数
cmd = ("ogr2ogr {} -f \"GPKG\" {} -a_srs \"{}\" -clipsrc {} {}".
format(appendSwitch, outputFileName, sourceCrs, clipextent, layer.source()))
print (cmd)
# ogrogrコマンド実行
runcmd = subprocess.check_call(cmd)
print (runcmd)
# 2回目以降はこのスイッチを付けないと常に上書きになってしまう
appendSwitch = "-append"
else:
print("Is not vetorlayer:" + layer.name())
# 指定範囲でエクスポートしたDBを再度読み込みsetDataSourceで参照先DBを変更
print("プロジェクトファイルのデータソース書き換え開始")
# プロジェクトファイルをrename
newproject = QgsProject.instance()
newproject.write(absolutePath + "\\output.qgz")
print("出力:" + newproject.fileName())
for layerid in layerId.items():
fullname = outputFileName + "|layername=" + layerid[1]
display_name = layerid[1]
# レイヤIDで検索
tagetlayer = newproject.mapLayer(layerid[0])
print("dataSource :" + fullname)
print("baseName :" + display_name)
print("RelacelayerID:" + tagetlayer.id())
# データソース書換
provider_options = QgsDataProvider.ProviderOptions()
provider_options.transformContext = newproject.transformContext()
tagetlayer.setDataSource(fullname, display_name, "ogr", provider_options)
newproject.write()
print("処理終了")
qgs.exitQgis() |
# -*- coding: utf-8 -*-
import os
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
try:
from django import VERSION as DJANGO_VERSION
except ImportError:
DJANGO_VERSION = None
# Register database schemes in URLs.
urlparse.uses_netloc.append('postgres')
urlparse.uses_netloc.append('postgresql')
urlparse.uses_netloc.append('pgsql')
urlparse.uses_netloc.append('postgis')
urlparse.uses_netloc.append('mysql')
urlparse.uses_netloc.append('mysql2')
urlparse.uses_netloc.append('mysqlgis')
urlparse.uses_netloc.append('mysql-connector')
urlparse.uses_netloc.append('mssql')
urlparse.uses_netloc.append('spatialite')
urlparse.uses_netloc.append('sqlite')
urlparse.uses_netloc.append('oracle')
urlparse.uses_netloc.append('oraclegis')
urlparse.uses_netloc.append('redshift')
DEFAULT_ENV = 'DATABASE_URL'
SCHEMES = {
'postgis': 'django.contrib.gis.db.backends.postgis',
'mysql': 'django.db.backends.mysql',
'mysql2': 'django.db.backends.mysql',
'mysqlgis': 'django.contrib.gis.db.backends.mysql',
'mysql-connector': 'mysql.connector.django',
'mssql': 'sql_server.pyodbc',
'spatialite': 'django.contrib.gis.db.backends.spatialite',
'sqlite': 'django.db.backends.sqlite3',
'oracle': 'django.db.backends.oracle',
'oraclegis': 'django.contrib.gis.db.backends.oracle',
'redshift': 'django_redshift_backend',
}
# https://docs.djangoproject.com/en/2.0/releases/2.0/#id1
if DJANGO_VERSION and DJANGO_VERSION < (2, 0):
SCHEMES['postgres'] = 'django.db.backends.postgresql_psycopg2'
SCHEMES['postgresql'] = 'django.db.backends.postgresql_psycopg2'
SCHEMES['pgsql'] = 'django.db.backends.postgresql_psycopg2'
else:
SCHEMES['postgres'] = 'django.db.backends.postgresql'
SCHEMES['postgresql'] = 'django.db.backends.postgresql'
SCHEMES['pgsql'] = 'django.db.backends.postgresql'
def config(env=DEFAULT_ENV, default=None, engine=None, conn_max_age=0, ssl_require=False):
"""Returns configured DATABASE dictionary from DATABASE_URL."""
config = {}
s = os.environ.get(env, default)
if s:
config = parse(s, engine, conn_max_age, ssl_require)
return config
def parse(url, engine=None, conn_max_age=0, ssl_require=False):
"""Parses a database URL."""
if url == 'sqlite://:memory:':
# this is a special case, because if we pass this URL into
# urlparse, urlparse will choke trying to interpret "memory"
# as a port number
return {
'ENGINE': SCHEMES['sqlite'],
'NAME': ':memory:'
}
# note: no other settings are required for sqlite
# otherwise parse the url as normal
config = {}
url = urlparse.urlparse(url)
# Split query strings from path.
path = url.path[1:]
if '?' in path and not url.query:
path, query = path.split('?', 2)
else:
path, query = path, url.query
query = urlparse.parse_qs(query)
# If we are using sqlite and we have no path, then assume we
# want an in-memory database (this is the behaviour of sqlalchemy)
if url.scheme == 'sqlite' and path == '':
path = ':memory:'
# Handle postgres percent-encoded paths.
hostname = url.hostname or ''
if '%2f' in hostname.lower():
# Switch to url.netloc to avoid lower cased paths
hostname = url.netloc
if "@" in hostname:
hostname = hostname.rsplit("@", 1)[1]
if ":" in hostname:
hostname = hostname.split(":", 1)[0]
hostname = hostname.replace('%2f', '/').replace('%2F', '/')
# Lookup specified engine.
engine = SCHEMES[url.scheme] if engine is None else engine
port = (str(url.port) if url.port and engine in [SCHEMES['oracle'], SCHEMES['mssql']]
else url.port)
# Update with environment configuration.
config.update({
'NAME': urlparse.unquote(path or ''),
'USER': urlparse.unquote(url.username or ''),
'PASSWORD': urlparse.unquote(url.password or ''),
'HOST': hostname,
'PORT': port or '',
'CONN_MAX_AGE': conn_max_age,
})
# Pass the query string into OPTIONS.
options = {}
for key, values in query.items():
if url.scheme == 'mysql' and key == 'ssl-ca':
options['ssl'] = {'ca': values[-1]}
continue
options[key] = values[-1]
if ssl_require:
options['sslmode'] = 'require'
# Support for Postgres Schema URLs
if 'currentSchema' in options and engine in (
'django.contrib.gis.db.backends.postgis',
'django.db.backends.postgresql_psycopg2',
'django.db.backends.postgresql',
'django_redshift_backend',
):
options['options'] = '-c search_path={0}'.format(options.pop('currentSchema'))
if options:
config['OPTIONS'] = options
if engine:
config['ENGINE'] = engine
return config
|
#coding:utf-8
import fn_db
import fn_ref
def get_bookins_join(bookinsid):
query = fn_db.db.query("""
SELECT a.*, b.*
FROM LIM_BOOKINS a LEFT JOIN LIM_BOOKCLS b ON a.bookclsid = b.bookclsid
WHERE a.bookinsid = $bookinsid
""", vars = locals())
for item in query:
return item
return None
def bind_ref(book):
fn_ref.bind(book,
inssts = "BOOKINSSTS",
catcd = "CAT",
placecd = "PLACE",
pubcd = "PUB")
|
import os
import sys
from os.path import dirname, abspath
sys.path.append(dirname(dirname(abspath(__file__))))
import gym
import time
from agents.actor_critic_agents.A2C import A2C
from agents.DQN_agents.Dueling_DDQN import Dueling_DDQN
from agents.actor_critic_agents.SAC_Discrete import SAC_Discrete
from agents.actor_critic_agents.A3C import A3C
from agents.policy_gradient_agents.PPO import PPO
from agents.Trainer import Trainer
from utilities.data_structures.Config import Config
from agents.DQN_agents.DDQN import DDQN
from agents.DQN_agents.DDQN_With_Prioritised_Experience_Replay import DDQN_With_Prioritised_Experience_Replay
from agents.DQN_agents.DQN import DQN
from agents.DQN_agents.DQN_With_Fixed_Q_Targets import DQN_With_Fixed_Q_Targets
from agents.policy_gradient_agents.REINFORCE import REINFORCE
## envs import ##
from environments.carla_enviroments import env_v1_ObstacleAvoidance
env_title = "ObstacleAvoidance-v0"
config = Config()
config.env_title = env_title
config.seed = 1
config.environment = gym.make(env_title)
config.num_episodes_to_run = 2000
config.show_solution_score = False
config.visualise_individual_results = True
config.visualise_overall_agent_results = True
config.standard_deviation_results = 1.0
config.runs_per_agent = 1
config.use_GPU = True
config.overwrite_existing_results_file = False
config.randomise_random_seed = True
config.save_model = True
config.log_loss = False
config.log_base = time.strftime("%Y%m%d%H%M%S", time.localtime())
config.save_model_freq = 300 ## save model per 300 episodes
config.retrain = True
config.resume = False
config.resume_path = 'E:\\reinforcement-learning-based-driving-decision-in-Carla\\results\Models\ObstacleAvoidance-v0\DDQN with Prioritised Replay\\20200611150242\\rolling_score_68.0417.model'
config.backbone_pretrain = False
config.force_explore_mode = True
config.force_explore_stare_e = 0.2 ## when the std of rolling score in last 10 window is smaller than this val, start explore mode
config.force_explore_rate = 0.95 ## only when the current score bigger than 0.8*max(rolling score[-10:]), forece expolre
## data and graphs save dir ##
data_results_root = os.path.join(os.path.dirname(__file__)+"/data_and_graphs/carla_obstacle_avoidance", config.log_base)
while os.path.exists(data_results_root):
data_results_root += '_'
os.makedirs(data_results_root)
config.file_to_save_data_results = os.path.join(data_results_root, "data.pkl")
config.file_to_save_results_graph = os.path.join(data_results_root, "data.png")
config.hyperparameters = {
"DQN_Agents": {
"learning_rate": 1e-1,
"batch_size": 256,
"buffer_size": 20000,
"epsilon": 1.0,
"epsilon_decay_rate_denominator": 1.,
"discount_rate": 0.9,
"tau": 0.01,
"alpha_prioritised_replay": 0.6,
"beta_prioritised_replay": 0.1,
"incremental_td_error": 1e-8,
"update_every_n_steps": 1,
"linear_hidden_units": [32, 108, 296, 108, 32],
"final_layer_activation": "None",
"batch_norm": True,
"gradient_clipping_norm": 0.1,
"learning_iterations": 1,
"clip_rewards": False
},
"Stochastic_Policy_Search_Agents": {
"policy_network_type": "Linear",
"noise_scale_start": 1e-2,
"noise_scale_min": 1e-3,
"noise_scale_max": 2.0,
"noise_scale_growth_factor": 2.0,
"stochastic_action_decision": False,
"num_policies": 10,
"episodes_per_policy": 1,
"num_policies_to_keep": 5,
"clip_rewards": False
},
"Policy_Gradient_Agents": {
"learning_rate": 0.05,
"linear_hidden_units": [64, 128, 64, 32],
"final_layer_activation": "SOFTMAX",
"learning_iterations_per_round": 5,
"discount_rate": 0.99,
"batch_norm": False,
"clip_epsilon": 0.1,
"episodes_per_learning_round": 4,
"normalise_rewards": True,
"gradient_clipping_norm": 7.0,
"mu": 0.0, #only required for continuous action games
"theta": 0.0, #only required for continuous action games
"sigma": 0.0, #only required for continuous action games
"epsilon_decay_rate_denominator": 1.0,
"clip_rewards": False
},
"Actor_Critic_Agents": {
"learning_rate": 0.005,
"linear_hidden_units": [20, 10],
"final_layer_activation": ["SOFTMAX", None],
"gradient_clipping_norm": 5.0,
"discount_rate": 0.99,
"epsilon_decay_rate_denominator": 1.0,
"normalise_rewards": True,
"exploration_worker_difference": 2.0,
"clip_rewards": False,
"Actor": {
"learning_rate": 0.0003,
"linear_hidden_units": [64, 64],
"final_layer_activation": "Softmax",
"batch_norm": False,
"tau": 0.005,
"gradient_clipping_norm": 5,
"initialiser": "Xavier"
},
"Critic": {
"learning_rate": 0.0003,
"linear_hidden_units": [64, 64],
"final_layer_activation": None,
"batch_norm": False,
"buffer_size": 1000000,
"tau": 0.005,
"gradient_clipping_norm": 5,
"initialiser": "Xavier"
},
"min_steps_before_learning": 400,
"batch_size": 256,
"discount_rate": 0.99,
"mu": 0.0, #for O-H noise
"theta": 0.15, #for O-H noise
"sigma": 0.25, #for O-H noise
"action_noise_std": 0.2, # for TD3
"action_noise_clipping_range": 0.5, # for TD3
"update_every_n_steps": 1,
"learning_updates_per_learning_session": 1,
"automatically_tune_entropy_hyperparameter": True,
"entropy_term_weight": None,
"add_extra_noise": False,
"do_evaluation_iterations": True
}
}
if __name__ == "__main__":
# AGENTS = [SAC_Discrete, DDQN, Dueling_DDQN, DQN, DQN_With_Fixed_Q_Targets,
# DDQN_With_Prioritised_Experience_Replay, A2C, PPO, A3C ]
AGENTS = [DQN_With_Fixed_Q_Targets]
trainer = Trainer(config, AGENTS)
trainer.run_games_for_agents()
pass |
# (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires that create_chain.py runs first
# which creates root and intermediate certificates
# Run with this command: python /tmp/create_servercert.py --loglevel debug --subject /C=US/ST=California/L=Roseville/O=HP/OU=PDE --domain forj.io --site review.i5 --cacerts_dir /tmp/cacerts
import os.path
import argparse
import util
import sys
def main():
# http://docs.python.org/2/library/argparse.html
global logger
parser = argparse.ArgumentParser(description='Create a server certificate using the cacerts db.')
parser.add_argument('--loglevel', help='Specify the default logging level (optional).', choices=['debug', 'info', 'warning', 'error', 'DEBUG', 'INFO', 'WARNING', 'ERROR'], default='info')
parser.add_argument('--logfile', help='Specify logfile name.', default='/tmp/create_servercert.log')
parser.add_argument('--cacerts_dir', help='alternate cacerts config dir.', default='../cacerts')
parser.add_argument('--domain', help='The domain name.', default='forj.io')
parser.add_argument('--site', help='The name of the site.', default='')
parser.add_argument('--password', help='Specify a password (optional).', default='changeme')
parser.add_argument('--subject', help='Specify the certificate subject info.', default='/C=US/ST=California/L=Roseville/O=HP/OU=PDE')
parser.add_argument('--altnames', help='Specify alternative names like "/CN=server1/CN=server2"', default='')
args = parser.parse_args()
util.setup_logging(args.logfile, args.loglevel)
util.banner_start()
util.logger.debug("parsed arguments")
util.logger.info("got folder " + args.cacerts_dir)
cacerts_dir = os.path.abspath(args.cacerts_dir)
util.validate_directory(cacerts_dir)
cainter_dir = os.path.abspath(os.path.join(cacerts_dir, "ca2013"))
util.validate_directory(cainter_dir)
cakey_pem = os.path.abspath(os.path.join(cacerts_dir, "private/cakey.pem"))
util.validate_file(cakey_pem)
if not args.site:
util.logger.error("found cakey_pem")
sys.exit(1)
source_dir = cainter_dir
destin_dir = os.path.join(cainter_dir, 'certs')
# http://docs.python.org/2/library/subprocess.html#replacing-older-functions-with-the-subprocess-module
util.openssl_cmd("test", args.site, cainter_dir, 'version')
# pushd /cacerts/ca2013
#
# [ -f ~/.rnd ] && sudo rm -f ~/.rnd
# openssl genrsa -passout pass:xxxxxxxx -des3 -out $_SITE.key 2048 -config ./openssl.cnf
# openssl req -passin pass:xxxxxxxx -new -key $_SITE.key -out $_SITE.csr -subj "/C=US/ST=California/L=Roseville/O=HP/OU=PDE/CN=$_SITE.forj.io" -config ./openssl.cnf
# openssl ca -passin pass:xxxxxxxx -batch -config openssl.cnf -policy policy_anything -out $_SITE.crt -infiles $_SITE.csr
subject = args.subject + "/CN=" + args.site + "." + args.domain + args.altnames
util.openssl_cmd("genrsa", args.site + '.' + args.domain, cainter_dir, "-passout pass:" + args.password + " -des3 2048 -config ./openssl.cnf")
util.openssl_cmd("req", args.site + '.' + args.domain, cainter_dir, "-passin pass:" + args.password + " -new -subj " + subject + " -config ./openssl.cnf")
# -keyfile and -cert makes the linkage to intermediate certificate
util.openssl_cmd("ca", args.site + '.' + args.domain, cainter_dir, "-passin pass:" + args.password + " -batch -config ./openssl.cnf -policy policy_anything -keyfile ./private/cakey.pem -cert ./cacert.pem")
# cd cainter_dir
# mv $_SITE.key $_SITE.csr $_SITE.crt certs
extensions = ['.key', '.csr', '.crt']
for ext in extensions:
util.logger.debug("relocating " + args.site + ext)
os.rename(os.path.join(source_dir, args.site + '.' + args.domain + ext),
os.path.join(destin_dir, args.site + '.' + args.domain + ext))
# this is an ssl cert, remove the ssl password on the key....
# openssl rsa -passin pass:xxxxxxxx -in $_SITE.key -out $_FQDN.key
key_in = os.path.join(destin_dir, args.site + '.' + args.domain + '.key')
key_out = os.path.join(destin_dir, args.site + '.' + args.domain + '.key2')
util.openssl_cmd("rsa", args.site, cainter_dir, "-passin pass:" + args.password + " -in " + key_in + " -out " + key_out)
util.logger.debug("unlink : " + key_in)
os.unlink(key_in)
util.logger.debug("rename : " + key_out + " -> " + key_in)
os.rename(key_out, key_in)
# SSLCertificateFile /var/ca/ca2008/certs/{server_name}.crt
# SSLCertificateKeyFile /var/ca/ca2008/certs/{server_name}.key
# SSLCertificateChainFile /var/ca/ca2008/chain.crt
if __name__ == '__main__':
main()
|
import logging
import os
import re
import requests
import time
import tempfile
import zipfile
from copy import copy
from itertools import product
from bs4 import BeautifulSoup
class USPhoneNumberSupplier:
def __init__(self, cache, user_agent_instance, proxy_instance, colors, mask):
self.user_agent_instance = user_agent_instance
self.proxy_instance = proxy_instance
self.cache = cache
self.mask = copy(mask)
self.colors = colors
self.tmp_file = tempfile.mktemp()
self.logger = logging.getLogger(__name__)
def _cache_valid_block_numbers(self, state, area_code):
proxy = self.proxy_instance.get_random_proxy()
session = requests.Session()
# We need the cookies or it will error
session.get("https://www.nationalpooling.com/pas/blockReportSelect.do?reloadModel=N")
response = session.post("https://www.nationalpooling.com/pas/blockReportDisplay.do",
headers={"Upgrade-Insecure-Requests": "1",
"User-Agent": self.user_agent_instance.next(),
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
"Referer": "https://www.nationalpooling.com/pas/blockReportSelect.do?reloadModel=Y",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-US,en;q=0.9,es;q=0.8",
"Origin": "https://www.nationalpooling.com",
"Content-Type": "application/x-www-form-urlencoded",
"DNT": "1"
},
data="stateAbbr=" + state +
"&npaId=" + area_code +
"&rtCntrId=" + "ALL" +
"&reportType=" + "3",
proxies=proxy,
verify=self.proxy_instance.verify_proxy)
soup = BeautifulSoup(response.text, 'html.parser')
areacode_cells = soup.select("form table td:nth-of-type(1)")
for area_code_cell in areacode_cells:
if area_code_cell.string and area_code_cell.string.strip() == area_code:
exchange = area_code_cell.next_sibling.next_sibling.string.strip()
block_number = area_code_cell.next_sibling.next_sibling.next_sibling.next_sibling.string.strip()
if area_code not in self.cache:
self.cache[area_code] = {}
self.cache[area_code][exchange] = {}
self.cache[area_code][exchange]['blockNumbers'] = []
elif exchange not in self.cache[area_code]:
self.cache[area_code][exchange] = {}
self.cache[area_code][exchange]['blockNumbers'] = []
self.cache[area_code][exchange]['blockNumbers'].append(
block_number) # Temporarely we store the invalid blocknumbers
for area_code in self.cache: # Let's switch invalid blocknumbers for valid ones
for exchange in self.cache[area_code]:
self.cache[area_code][exchange]['blockNumbers'] = [n for n in
["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"] if
n not in self.cache[area_code][exchange][
'blockNumbers']]
def supply(self):
if not re.match("^[0-9X]{10}", self.mask):
exit(self.colors.RED + "You need to pass a US phone number masked as in: 555XXX1234" + self.colors.ENDC)
possible_phone_numbers = []
nanpa_file_url = "https://www.nationalnanpa.com/nanp1/allutlzd.zip"
file = self._read_or_download_nanpa_zip_archive(nanpa_file_url)
# Only assigned area codes and exchanges
assigned_regex = r'\s[0-9A-Z\s]{4}\t.*\t[A-Z\-\s]+\t[0-9\\]*[\t\s]+AS'
# Area code + exchange
areacode_exchange_regex = re.sub("X", "[0-9]{1}", "(" + self.mask[:3] + "-" + self.mask[3:6] + ")")
# Format: [state, areacode-exchange]
possible_areacode_exchanges = re.findall(r"([A-Z]{2})\s\t" + areacode_exchange_regex + assigned_regex, file)
remaining_unsolved_digits = self.mask[7:].count("X")
masked_phone_formatted = self.mask[7:].replace("X", "{}")
for possible_areacode_exchange in possible_areacode_exchanges:
state = possible_areacode_exchange[0]
area_code = possible_areacode_exchange[1].split("-")[0]
exchange = possible_areacode_exchange[1].split("-")[1]
if area_code not in self.cache:
self._cache_valid_block_numbers(state, area_code)
if self.mask[6] == 'X': # Check for available block numbers for that area code and exchange
if area_code in self.cache and exchange in self.cache[area_code]:
block_numbers = self.cache[area_code][exchange]['blockNumbers']
else:
block_numbers = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
else: # User provided blocknumber
if area_code in self.cache and exchange in self.cache[area_code] and self.mask[6] not in \
self.cache[area_code][exchange]['blockNumbers']: # User provided invalid block number
block_numbers = []
else:
block_numbers = [self.mask[6]]
for blockNumber in block_numbers: # Add the rest of random subscriber number digits
for x in product("0123456789", repeat=remaining_unsolved_digits):
possible_phone_numbers.append(area_code + exchange + blockNumber + masked_phone_formatted.format(*x))
return possible_phone_numbers
def _read_or_download_nanpa_zip_archive(self, nanpa_file_url):
if not os.path.exists(self.tmp_file) or (time.time() - os.path.getmtime(self.tmp_file)) > (24 * 60 * 60):
self.logger.info("NANPA file missing or needs to be updated. Downloading now...")
response = requests.get(nanpa_file_url)
with open(self.tmp_file, "wb") as code:
code.write(response.content)
self.logger.info("NANPA file downloaded successfully")
with zipfile.ZipFile(self.tmp_file, 'r') as archive:
return archive.read('allutlzd.txt')
|
# Console tool for running missions at own computer.
#
# Author: CheckiO <igor@checkio.org>
# Last Change:
# URL: https://github.com/CheckiO/checkio-console
"""
:py:mod:`checkio_console.cli` - Command line interface for CheckiO
==============================================================================
"""
import sys
sys.path.insert(0, '/Users/igorlubimov/sites/code_empyre/client-console')
import signal
import argparse
import logging
import sys
import textwrap
import coloredlogs
from threading import Thread
from tornado.ioloop import IOLoop
from checkio_console import tcpserver
from checkio_console.docker_ext import docker_client
from checkio_console.docker_ext.docker_client import docker
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description='Command line interface for CheckiO')
parser.add_argument('-e', '--environment', help='Mission environment name', required=False)
parser.add_argument('-p', '--path', help='Mission files path for build image')
parser.add_argument('-m', '--mission', help='Mission name', required=False)
parser.add_argument('-i', '--input-file', help='Input file this data for task', required=False)
options = parser.parse_args()
def main():
"""The command line interface for the ``checkio`` program."""
def exit_signal(sig, frame):
logging.info("Trying exit")
docker.stop()
docker.remove_container()
io_loop.add_callback(IOLoop.instance().stop)
signal.signal(signal.SIGINT, exit_signal)
signal.signal(signal.SIGTERM, exit_signal)
if not options:
usage()
sys.exit(0)
coloredlogs.install()
logging.info('Run...')
io_loop = IOLoop.instance()
if options.input_file:
thread_tcpserver = Thread(target=tcpserver.thread_start, args=(options.input_file, io_loop))
thread_tcpserver.start()
else:
if not options.mission or not options.environment:
print('path, mission, and environment is required args')
sys.exit()
docker_client.start(options.mission, options.environment, options.path)
io_loop.start()
def usage():
"""Print a usage message to the terminal."""
print(textwrap.dedent("""
Usage: checkio-cli [ARGS]
The checkio-cli ....
For more information please refer to the GitHub project page
at https://github.com/CheckiO/checkio-cli
""").strip())
if __name__ == '__main__':
main()
|
__author__ = 'karthikb'
def solution():
solution_set = []
for i in range(1000,100000):
summation = 0
value = i
#print value
while value>=1:
r = (value%10)
summation+= r**5
value = value //10
if summation > i:
continue
if summation == i:
solution_set.append(i)
return sum(solution_set)
print solution() |
hex_colours = {"AliceBlue": '#f0f8ff', "Beige": '#f5f5dc', "Brown": '#a52a2a', 'Black': '#000000', 'Coral': '#ff7f50'}
hex_colour = input('Please input either AliceBlue, Beige, Brown, Black or Coral: ')
hex_colour = hex_colour.capitalize()
while hex_colour != "":
if hex_colour in hex_colours:
print(hex_colour, 'is', hex_colours[hex_colour])
else:
print("Invalid Choice")
hex_colour = input('Please input either AliceBlue, Beige, Brown, Black or Coral')
hex_colour = hex_colour.capitalize() |
# _*_ coding:utf-8 _*_
# somthing can be improve
# 1. Memory limit manager
# 2. Parallel computation
from __future__ import division
import sys
import os
import pysam
import argparse
class AlignRecords(object):
"""Tiny version of pysam.AlignedSegment"""
qname=""
rname=""
start=1
end=1
strand=1
def __init__(self, alignment_record=None):
if alignment_record is not None:
self.qname=alignment_record.query_name
self.rname=alignment_record.reference_name
self.start=alignment_record.reference_start
self.end=alignment_record.reference_end
if self.start > self.end:
self.start,self.end= self.end, self.start
self.strand=1 if alignment_record.is_read1 else 2
def get_length(self):
return self.end-self.start
def is_valid(self, minlen):
return True if (self.start < self.end) and (self.end-self.start>minlen) else False
# qname_strand rname start end
def __str__(self):
# print 1-base
return "{0}_{1} {2} {3} {4}".format(self.qname, self.strand, self.rname,
(self.start+1),(self.end+1))
def get_scaffolds_length(samfile):
scaffolds=dict()
for scaffold in samfile.header["SQ"]:
scaffolds[scaffold["SN"]]=scaffold["LN"]
return scaffolds
def is_valid_alignment(alignment_record, minlen, identity):
M=alignment_record.reference_length
mismatch=alignment_record.get_tag("XM")
ident = (M-mismatch)/M
# alignment_record.reference_length = mapped length
if alignment_record.reference_length < minlen:
return False
if ident < identity:
return False
return True
# Filter alignment record if they are unmapped, low identity or too short.
def filter_alignment(samfile, minlen, identity):
records=dict()
i=0 #count number of record
lengths=0
for record in samfile.fetch():
# for record in samfile.head(100):
i+=1
lengths+=record.infer_query_length()
# remove if unmapped
if record.is_unmapped:
continue
# check if alignment is valid
if is_valid_alignment(record, minlen, identity):
valid_record=AlignRecords(record)
qname=record.query_name #query name
rname=record.reference_name #reference name
if qname not in records:
records[qname]=dict()
if rname not in records[qname]:
records[qname][rname]=list()
records[qname][rname].append(valid_record)
mean_length=int(lengths/i)
return records, mean_length
def ajust_record(record1, record2, read_length, minlen):
# check if one read mapped multiple time to a scaffold
if record1.strand==record2.strand:
# check distance between 2 reads
distance=min(abs(record2.start-record1.end),abs(record1.start-record2.end))
# if two reads is close, keep the longer.
if distance < read_length:
if record1.get_length() < record2.get_length():
record1=None
else:
record2=None
# check if read pairs are overlap
else:
# record1 is in the left of record2
if (record1.end - record2.start)>=0 and (record2.end - record1.start)>=0 and record2.start> record1.start:
# determine which record is read_1
if record1.strand==1:
record2.start=record1.end+1
else:
record2.end=record1.start-1
# record1 is in the right of record2
elif (record2.end - record1.start)>=0 and (record1.end - record2.start)>=0 and record1.start>record2.start:
if record1.strand==1:
record2.end=record1.start-1
else:
record2.start=record1.end+1
# record2 is in record1
elif (record2.start - record1.start)>=0 and (record2.end - record1.end)<=0:
record2=None
# record1 in record2
elif (record1.start-record2.start)>=0 and (record1.end - record2.end)<=0:
record1=None
# remove record if mapped length < minlen, start>end
if record1 is not None:
if not record1.is_valid(minlen):
record1=None
if record2 is not None:
if not record2.is_valid(minlen):
record2=None
return (record1, record2)
def ajust_records(records, scaffolds, read_length, minlen):
for readid in records:
for scaffold in records[readid]:
# if there are only one record in a group, do nothing
# else ajust position
if len(records[readid][scaffold])<=1:
continue
else:
for record1 in records[readid][scaffold]:
for record2 in records[readid][scaffold]:
if record1==record2:
continue
if (record1 not in records[readid][scaffold]) or (record2 not in records[readid][scaffold]):
continue
index1=records[readid][scaffold].index(record1)
index2=records[readid][scaffold].index(record2)
records_tmp=ajust_record(record1, record2, read_length, minlen)
if records_tmp[0] is not None:
records[readid][scaffold][index1]=records_tmp[0]
if records_tmp[1] is not None:
records[readid][scaffold][index2]=records_tmp[1]
if records_tmp[0] is None:
records[readid][scaffold].remove(record1)
if records_tmp[1] is None:
records[readid][scaffold].remove(record2)
return records
def write_length_file(scaffolds, output_file):
with open(output_file,"w") as output_handle:
for scaffold in scaffolds:
output_handle.write("{0} {1}\n".format(scaffold, scaffolds[scaffold]))
output_handle.close()
def strands_count(records):
strand_count=dict()
strand_count[1]=0
strand_count[2]=0
for scaffold in records:
for record in records[scaffold]:
strand_count[record.strand]+=1
return strand_count
def write_final_file(records, output_file):
with open(output_file,"w") as output_handle:
for readid in records:
strands=strands_count(records[readid])
for scaffold in records[readid]:
for record in records[readid][scaffold]:
output_handle.write("{0} {1}\n".format(str(record),strands[record.strand]))
output_handle.close()
def main(sam_path, final_path, len_path, minlen, ident):
# check if file is exist
if not os.path.exists(sam_path):
print("Can't find samfile at: {0}".format(sam_path))
return
# read samfile
samfile=pysam.AlignmentFile(sam_path,"r")
scaffolds=get_scaffolds_length(samfile)
records, mean_readlength=filter_alignment(samfile, minlen, ident)
samfile.close()
records=ajust_records(records,scaffolds,mean_readlength, minlen)
write_final_file(records, final_path)
write_length_file(scaffolds, len_path)
print("done")
if __name__=="__main__":
# python python/samprocess.py -sam data/MH0015.sam -f data/MH0015.final -l data/MH0015.len -i 0.9 -m 35
if len(sys.argv)==1:
print("python samprocess.py [-h] [-sam samfile] [-f final file] [-l len file] [-i identity] [-m minlen]")
quit()
parser = argparse.ArgumentParser()
parser.add_argument('-sam', action="store", default="input.sam", dest="input_file",help='Input samfile')
parser.add_argument('-f', action="store", default="output.final", dest="final_file",help='Final file')
parser.add_argument('-l', action="store", default="output.len", dest="len_file",help='Len file')
parser.add_argument('-i', action="store", default=0.9, dest="ident",help='Identity')
parser.add_argument('-m', action="store", dest="minlen",help='Min length')
args = parser.parse_args()
main(args.input_file, args.final_file, args.len_file,int(args.minlen),float(args.ident)) |
# reading the fasta file
import io
fasta = open('computing-gc-content/rosalind_gc.txt',"r").read().splitlines()
# print(fasta)
# storing the name best seq
best_seq_name = None
best_seq = None
best_gc_score = 0
def calc_gc_score(seq):
counts = 0
seq_length = len(seq)
for i in seq:
if i in "GC":
counts += 1
# print(seq,seq_length)
return (counts/seq_length) * 100
def get_seq(i):
"""
input get the starting of the seq
return: the seq
"""
seq = []
for k,line in enumerate(fasta[i:]):
if line[0] != '>':
seq.append(line)
if k+1 == len(fasta):
break
else:
break
return "".join(seq)
for i, line in enumerate(fasta):
if line[0] == '>':
seqname = line[1:]
seq = get_seq(i+1)
gc_score = calc_gc_score(seq)
if gc_score > best_gc_score:
best_gc_score = gc_score
best_seq = seq
best_seq_name = seqname
print(best_seq_name)
print(best_seq)
print(best_gc_score) |
from os import listdir
from os.path import isfile, join
from os import system
import sys
import math
stopwords = []
spamwords = []
genuinewords =[]
testwords =[]
spamdict = {}
genuinedict = {}
testdict={}
pgbr = True
def processspam(files):
spampath = "./spam"
for i in range (0, len(files)):
k=files[i]
with open(join(spampath,k), "r") as fs:
while True:
t = fs.readline()
if len(t) == 0:
break
print t
words = t.split()
for l in range(0, len(words)):
words[l] = words[l].lower()
words[l] = ''.join(e for e in words[l] if e.isalpha())
if len(words[l]) <= 2: continue;
if words[l] not in stopwords:
spamwords.append(words[l])
def processgenuine(files):
genuinepath = "./genuine"
for i in range (0, len(files)):
k=files[i]
with open(join(genuinepath,k), "r") as fs:
while True:
t = fs.readline()
if len(t) == 0:
break
print t
words = t.split()
for l in range(0, len(words)):
words[l] = words[l].lower()
words[l] = ''.join(e for e in words[l] if e.isalpha())
if len(words[l]) <= 2: continue;
if words[l] not in stopwords:
genuinewords.append(words[l])
def processtest(singlefile):
testpath = "./test"
with open(join(testpath,singlefile), "r") as fs:
while True:
t = fs.readline()
if len(t) == 0: break
if pgbr: print "\n\nTEST FILE " + singlefile
if pgbr: print "\n"+t
if pgbr: raw_input()
word = t.split()
for l in range(0, len(word)):
word[l] = word[l].lower()
word[l] = ''.join(e for e in word[l] if e.isalpha())
if len(word[l]) <= 2: continue;
if word[l] not in stopwords:
testwords.append(word[l])
def readstopwords():
with open("stopwords.txt","r") as fs:
while True:
t= fs.readline()
t=t.strip()
if len(t) == 0: break
stopwords.append(t)
def generatespamdict():
for x in spamwords:
if x in spamdict: spamdict[x]+=1
else: spamdict[x] = 1
def generategenuinedict():
for x in genuinewords:
if x in genuinedict: genuinedict[x]+=1
else: genuinedict[x]=1
def generatetestdict():
for x in testwords:
if x in testdict: testdict[x]+=1
else: testdict[x]=1
def eucledean(test, data):
sum = 0
for x in test:
if x in data:
sum += test[x] * test[x] # (test[x]-data[x])*(test[x]-data[x])
return math.sqrt(sum)
if len(sys.argv)>1: pgbr = False
if not pgbr: system("cp ./test/spam* ./spam/")
if not pgbr: system("cp ./test/genuine* ./genuine/")
if pgbr: system("clear")
if pgbr: print "SPAM FILES"
if pgbr: raw_input()
spampath ="./spam"
spamfiles = [f for f in listdir(spampath) if isfile(join(spampath,f)) ]
processspam(spamfiles)
if pgbr: raw_input()
if pgbr: system("clear")
print "GENUINE FILES"
if pgbr: raw_input()
genuinepath ="./genuine"
genuinefiles = [k for k in listdir(genuinepath) if isfile(join(genuinepath, k))]
processgenuine(genuinefiles)
if pgbr: raw_input()
testpath = "./test"
testfiles = [l for l in listdir(testpath) if isfile(join(testpath,l))]
readstopwords()
generatespamdict()
generategenuinedict()
system("clear")
print "SPAMWORDS"
if pgbr: raw_input()
for t in spamwords: print t+"\t",
if pgbr: raw_input()
system("clear")
print "GENUINEWORDS"
if pgbr: raw_input()
for t in genuinewords: print t+"\t",
if pgbr: raw_input()
system("clear")
print "STOP WORDS"
if pgbr: raw_input()
for t in stopwords: print t+"\t",
if pgbr: raw_input()
if pgbr: system("clear")
print "SPAM DICTIONARY GENERATED"
if pgbr: raw_input()
for t in spamdict: print t+":"+str(spamdict[t])+"\t",
if pgbr: raw_input()
if pgbr: system("clear")
print "GENUINE DICTIONARY GENERATED"
if pgbr: raw_input()
for t in genuinedict: print t+":"+str(genuinedict[t])+"\t",
if pgbr: raw_input()
if pgbr: system("clear")
print "Starting to classify TEST DATA"
if pgbr: raw_input()
if pgbr:
for k in testfiles:
system("clear")
testdict = {}
testwords = []
processtest(k)
generatetestdict()
spamdistance = eucledean(testdict, spamdict)
genuinedistance = eucledean ( testdict, genuinedict)
print "For " + str(k) + " SPAMDISTANCE is " + str(spamdistance) + " and GENUINEDISTANCE is " + str(genuinedistance) + "\n"
if spamdistance > genuinedistance: result = "SPAM"
elif genuinedistance > spamdistance: result = "GENUINE"
else: result = "UNKOWN"
print "The file " + k + " is classified as " + result + " is that correct? (Y/N)"
opinion = raw_input()
if opinion.lower() == "y" :
print "Thank You"
else:
print "Please enter the classification required for the file. (S) Spam or (G) Genuine"
changeresult =raw_input()
if changeresult.lower() == "s" : print "The file is now classified as SPAM"
else: print "The file is now classified as GENUINE"
raw_input()
else:
system("clear")
for k in testfiles:
testdict = {}
testwords = []
processtest(k)
generatetestdict()
spamdistance = eucledean(testdict, spamdict)
genuinedistance = eucledean ( testdict, genuinedict)
if spamdistance > genuinedistance: result = "SPAM"
elif genuinedistance > spamdistance: result = "GENUINE"
else: result = "UNKOWN"
print "The file " + k + " is classified as " + result,
print "spamdistance" + str(spamdistance) + "genuinedistance" + str(genuinedistance)
|
def sortVector(nums):
nums.sort()
print(nums)
vector = [1,5,2,4,8,2]
sortVector(vector)
# exec("""\ndef sortVector(nums):\n nums.sort()\n print(nums)\n\nvector = [1,5,2,4,8,2]\nsortVector(vector)\n""") |
#2. [1, 'a',3.6000000000000001, 2, 'b', '1', 1.3999999999999999, '2'] sort this list starting with all the numbers sorted and then the characters sorted. The code should be in one line.
def sort(a):
a = [str(i) for i in a]
a.sort()
a = [int(i) if i.isdigit() else i for i in a ]
return a
a = [1,'a',3.6000000000000001,2,'b','1',1.3999999999999999,'2']
print(sort(a))
|
import struct
import gzip
from enum import Enum
from srmap.actor import Actor
from srmap.property import Property
from srmap.tilemap import Tilemap, Name, Size
class Theme(Enum):
PROTOTYPE = 'StageVR'
METRO = 'StageMetro'
SS_ROYALE = 'StageShip'
MANSION = 'StageMansion'
PLAZA = 'StageCity'
FACTORY = 'StageIndustry'
THEME_PARK = 'StageThemePark'
POWERPLANT = 'StagePowerplant'
SILO = 'StageSilo'
LIBRARY = 'StageUniversity'
NIGHTCLUB = 'StageNightclub'
ZOO = 'StageZoo'
SWIFT_PEAKS = 'StageSki'
CASINO = 'StageCasino'
FESTIVAL = 'StageFestival'
RESORT = 'StageResort'
AIRPORT = 'StageAirport'
class Level:
def __init__(self, version=6, actors=None, tilemaps=None, theme=Theme.PROTOTYPE, is_singleplayer=False,
bomb_timer=0, author='Unknown', name='Untitled', description='', workshop_id=0):
self.version = version
self.actors = [] if actors is None else actors
self.tilemaps = [Tilemap(Name.BACKGROUND_0, Size.SMALL), Tilemap(Name.BACKGROUND_1, Size.SMALL),
Tilemap(Name.SHADING, Size.SMALL),
Tilemap(Name.COLLISION, Size.SMALL)] if tilemaps is None else tilemaps
self.theme = theme.value if isinstance(theme, Theme) else theme
self.is_singleplayer = is_singleplayer
self.bomb_timer = bomb_timer
self.author = author
self.name = name
self.description = description
self.workshop_id = workshop_id
def load(self, path):
with gzip.open(path, 'rb') as f:
self.version = struct.unpack('<i', f.read(4))[0]
self.actors = []
num_actors = struct.unpack('<i', f.read(4))[0]
for a in range(num_actors):
actor_position = struct.unpack('<ff', f.read(8))
actor_size = struct.unpack('<ff', f.read(8))
actor_type = f.read(struct.unpack('<b', f.read(1))[0]).decode('ascii')
actor_properties = []
actor_num_properties = struct.unpack('<i', f.read(4))[0]
for p in range(actor_num_properties):
property_name = f.read(struct.unpack('<b', f.read(1))[0]).decode('ascii')
property_value = f.read(struct.unpack('<b', f.read(1))[0]).decode('ascii')
actor_properties.append(Property(property_name, property_value))
self.actors.append(Actor(actor_position, actor_size, actor_type, actor_properties))
self.tilemaps = []
num_tilemaps = struct.unpack('<i', f.read(4))[0]
for t in range(num_tilemaps):
tilemap_name = f.read(struct.unpack('<b', f.read(1))[0]).decode('ascii')
tilemap_size = struct.unpack('<ii', f.read(8))
tilemap = []
for y in range(tilemap_size[0]):
tilemap_col = []
for x in range(tilemap_size[1]):
tilemap_col.append(struct.unpack('<i', f.read(4))[0])
tilemap.append(tilemap_col)
self.tilemaps.append(Tilemap(tilemap_name, tilemap_size, tilemap))
self.theme = f.read(struct.unpack('<b', f.read(1))[0]).decode('ascii')
self.is_singleplayer = False if struct.unpack('<b', f.read(1))[0] == 0 else True
if self.is_singleplayer:
self.bomb_timer = struct.unpack('<i', f.read(4))[0]
self.author = f.read(struct.unpack('<b', f.read(1))[0]).decode('ascii')
self.name = f.read(struct.unpack('<b', f.read(1))[0]).decode('ascii')
self.description = f.read(struct.unpack('<b', f.read(1))[0]).decode('ascii')
self.workshop_id = struct.unpack('<q', f.read(8))[0]
def save(self, path):
data = b''
num_actors = len(self.actors)
data += struct.pack('<ii', self.version, num_actors)
for i in range(num_actors):
actor = self.actors[i]
num_properties = len(actor.properties)
data += struct.pack('<ffffb{}si'.format(len(actor.type)), actor.position[0], actor.position[1],
actor.size[0], actor.size[1], len(actor.type), actor.type.encode('ascii'),
num_properties)
for j in range(num_properties):
property = actor.properties[j]
data += struct.pack('<b{}sb{}s'.format(len(property.name), len(property.value)), len(property.name),
property.name.encode('ascii'), len(property.value), property.value.encode('ascii'))
num_tilemaps = len(self.tilemaps)
data += struct.pack('<i', num_tilemaps)
for i in range(num_tilemaps):
t_data = b''
tilemap = self.tilemaps[i]
t_data += struct.pack('<b{}sii'.format(len(tilemap.name)), len(tilemap.name), tilemap.name.encode('ascii'),
tilemap.width, tilemap.height)
for y in range(tilemap.width):
c_data = b''
for x in range(tilemap.height):
c_data += struct.pack('<i', tilemap.tilemap[y][x])
t_data += c_data
data += t_data
data += struct.pack('<b{}sb'.format(len(self.theme)), len(self.theme), self.theme.encode('ascii'),
self.is_singleplayer)
if self.is_singleplayer:
data += struct.pack('<i', self.bomb_timer)
data += struct.pack('<b{}sb{}sb{}sq'.format(len(self.author), len(self.name), len(self.description)),
len(self.author), self.author.encode('ascii'), len(self.name), self.name.encode('ascii'),
len(self.description), self.description.encode('ascii'), self.workshop_id)
with gzip.open(path, 'wb') as f:
f.write(data)
def tilemap_index(self, tilemap_name):
for index, tm in enumerate(self.tilemaps):
if tm.name == tilemap_name:
return index
|
# Database settings
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': INSTANCE_NAME,
'USER': DATABASE_USER,
'PASSWORD': DATABASE_PASSWORD,
'HOST': DATABASE_HOST,
'PORT': DATABASE_PORT,
}
}
|
#DO NOT MODIFY THE CODE IN THIS FILE
#File: Proj03.py
from Proj03Runner import Runner
import turtle
import random
window = turtle.Screen()
turtle.setup(300,300)
rand = random.randrange(0,2) #get a random number
#set object's colors based on the random number
if(rand%2==0):
#rand is even
color01 = "red"
color02 = "green"
else:
#rand is odd
color01 = "green"
color02 = "red"
objA = Runner(color01) #create one object
objB = Runner(color02) #create a second object
#Call the run method on each object and unpack
# the tuple that is returned.
colorA,turtleA,yourName = objA.run()
colorB,turtleB,yourName = objB.run()
window.title(yourName)
#Manipulate the turtles to draw a picture.
turtleA.color(colorA)
turtleA.forward(50)
turtleA.circle(50)
turtleB.color(colorB)
turtleB.right(180)
turtleB.forward(50)
turtleB.circle(50)
tempTurtle = turtle.Turtle()
tempTurtle.shape("turtle")
tempTurtle.left(90)
tempTurtle.color(color01)
tempTurtle.stamp()
#Pause and wait for the user to dismiss the window.
window.mainloop() |
from django.urls import path
from account import views
from jwt_token.views import CustomizedTokenObtainPairView, CustomizedTokenRefreshView
urlpatterns = [
# path('register/', views.registration_view),
# path('login/', views.login_view),
path('verify_session/', views.verify_session_view),
# path('logout/', views.logout_view),
# jwt_token login and refresh
path('login/', CustomizedTokenObtainPairView.as_view(), name='token_obtain_pair'),
path('refresh/', CustomizedTokenRefreshView.as_view(), name='token_refresh'),
# #
path('reset_password/', views.reset_password_view),
path('update_avatar/', views.update_avatar_view)
] |
for case in range(1, int(input())+1):
tmp = []
for i in input():
if i == '+':
tmp.append(1)
else:
tmp.append(-1)
toggle = 1
count = 0
for i in tmp[::-1]:
if i * toggle == -1:
toggle *= -1
count += 1
print("Case #"+str(case)+": "+str(count))
|
import datetime
def date():
now = datetime.datetime.now()
datex = now.strftime("%d/%m/%Y")
return datex
def time():
now = datetime.datetime.now()
timex = now.strftime("%H:%M:%S")
return timex
|
#--------------------------------------------------------------
# Script: Calculates the amount of time a procedure takes
# Version: 1.0
#--------------------------------------------------------------
import time
def timeExecution(code):
start = time.clock()
result = eval(code)
runtime = time.clock() - start
return result, runtime
#-------------------------------------------------------------
# Test function
#-------------------------------------------------------------
def spinLoop(n):
i=0
while i < n:
i = i + 1
print timeExecution('spinLoop(1000)')[1]
print timeExecution('spinLoop(10 ** 9)')[1]
|
import re
text = r'''
<meta content="always" name="referrer">
<script>
var url = '';
url += 'http://mp.weixin.qq.com/s?src=11×tamp=1553504704&ver=1506&signature=ZQCxwQwyZdl9l5G2Ue9mL90DZjQLH8JsaU5BWMOSZi1VpX0Dkjv82EqQyuvEARuNJ41aHbzrww22mn-eHfCRdhFPr7-I54y6Z8fuB3kpk5XO43oWrNsD60ZK8P7WorVr&new=1';
url += '';
url += '';
url += '';
url += '';
url += '';
url += '';
url += '';
url += '';
url += '';
url += '';
url.replace("@", "");
window.location.replace(url)
</script>'''
print(text.split('url += \'')[1].replace("';", "")) |
from django.apps import AppConfig
class AppQuestionanswerConfig(AppConfig):
name = 'App_QuestionAnswer'
|
# Programmed By Christopher Philip Orrell 18/10/2018.
# This script compares different sorting algorithms.
from random import randint
from timeit import repeat
newarray = [99,21,45,22,0.1,22,36,889,25,44,66,33,55,44,88,77,55,22,11,44,555,66,88,44,1,11,88,33,556,32,21,18,94,63,72,46,64,852,53,456,765,256,125,115,114,223]
array = [randint(0, 1000000) for i in range(1000)]
def run_sorting_algorithm(algorithm, array):
# Set up the context and prepare the call to the specified
# algorithm using the supplied array. Only import the
# algorithm function if it's not the built-in `sorted()`.
setup_code = f"from __main__ import {algorithm}" \
if algorithm != "sorted" else ""
stmt = f"{algorithm}({array})"
# Execute the code ten different times and return the time
# in seconds that each execution took
times = repeat(setup=setup_code, stmt=stmt, repeat=3, number=10)
# Finally, display the name of the algorithm and the
# minimum time it took to run
print(f"Algorithm: {algorithm}. Minimum execution time: {min(times)}")
def bubble_sort(array):
n = len(array)
for i in range(n):
# Create a flag that will allow the function to
# terminate early if there's nothing left to sort
already_sorted = True
# Start looking at each item of the list one by one,
# comparing it with its adjacent value. With each
# iteration, the portion of the array that you look at
# shrinks because the remaining items have already been
# sorted.
for j in range(n - i - 1):
if array[j] > array[j + 1]:
# If the item you're looking at is greater than its
# adjacent value, then swap them
array[j], array[j + 1] = array[j + 1], array[j]
# Since you had to swap two elements,
# set the `already_sorted` flag to `False` so the
# algorithm doesn't finish prematurely
already_sorted = False
# If there were no swaps during the last iteration,
# the array is already sorted, and you can terminate
if already_sorted:
break
return array
def insertion_sort(array):
# Loop from the second element of the array until
# the last element
for i in range(1, len(array)):
# This is the element we want to position in its
# correct place
key_item = array[i]
# Initialize the variable that will be used to
# find the correct position of the element referenced
# by `key_item`
j = i - 1
# Run through the list of items (the left
# portion of the array) and find the correct position
# of the element referenced by `key_item`. Do this only
# if `key_item` is smaller than its adjacent values.
while j >= 0 and array[j] > key_item:
# Shift the value one position to the left
# and reposition j to point to the next element
# (from right to left)
array[j + 1] = array[j]
j -= 1
# When you finish shifting the elements, you can position
# `key_item` in its correct location
array[j + 1] = key_item
return array
def merge(left, right):
# If the first array is empty, then nothing needs
# to be merged, and you can return the second array as the result
if len(left) == 0:
return right
# If the second array is empty, then nothing needs
# to be merged, and you can return the first array as the result
if len(right) == 0:
return left
result = []
index_left = index_right = 0
# Now go through both arrays until all the elements
# make it into the resultant array
while len(result) < len(left) + len(right):
# The elements need to be sorted to add them to the
# resultant array, so you need to decide whether to get
# the next element from the first or the second array
if left[index_left] <= right[index_right]:
result.append(left[index_left])
index_left += 1
else:
result.append(right[index_right])
index_right += 1
# If you reach the end of either array, then you can
# add the remaining elements from the other array to
# the result and break the loop
if index_right == len(right):
result += left[index_left:]
break
if index_left == len(left):
result += right[index_right:]
break
return result
def merge_sort(array):
# If the input array contains fewer than two elements,
# then return it as the result of the function
if len(array) < 2:
return array
midpoint = len(array) // 2
# Sort the array by recursively splitting the input
# into two equal halves, sorting each half and merging them
# together into the final result
return merge(
left=merge_sort(array[:midpoint]),
right=merge_sort(array[midpoint:]))
ARRAY_LENGTH = 10000
if __name__ == "__main__":
# Generate an array of `ARRAY_LENGTH` items consisting
# of random integer values between 0 and 999
array = [randint(0, 1000) for i in range(ARRAY_LENGTH)]
# Call the function using the name of the sorting algorithm
# and the array you just created
run_sorting_algorithm(algorithm="bubble_sort", array=newarray)
run_sorting_algorithm(algorithm="insertion_sort", array=newarray)
run_sorting_algorithm(algorithm="merge_sort", array=newarray)
|
# Generated by Django 3.1.6 on 2021-03-23 13:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0013_auto_20210323_1317'),
]
operations = [
migrations.AlterField(
model_name='order',
name='product_category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounts.product_category'),
),
migrations.AlterField(
model_name='order',
name='status',
field=models.CharField(blank=True, choices=[('Pending', 'Pending'), ('Returned', 'Returned'), ('Delivered', 'Delivered')], max_length=200, null=True),
),
]
|
# Create a while loop that will repetitively ask for a number.
# If the number entered is 9999 stop the loop.
while True:
answer = int(input('Enter a number, 9999 to end: '))
if answer == 9999:
break
else:
print('Your number was: ', answer)
|
from django.contrib.auth.mixins import (
LoginRequiredMixin,
UserPassesTestMixin
)
from django.views.generic import (
ListView,
DetailView,
TemplateView
)
from django.views.generic.edit import (
UpdateView,
DeleteView,
CreateView
)
from taggit.models import Tag
from django.shortcuts import render, get_object_or_404
from django.urls import reverse_lazy
from .models import Note
# Create your views here.
class HomeView(TemplateView):
template_name = "home.html"
class NoteListView(LoginRequiredMixin, ListView):
context_object_name = "notes"
template_name = 'note_list.html'
login_url = 'login'
def get_queryset(self):
return Note.objects.filter(author=self.request.user)
class NoteDetailView(LoginRequiredMixin, UserPassesTestMixin, DetailView):
model = Note
context_object_name = 'note_detail'
template_name = 'note_detail.html'
login_url = 'login'
def test_func(self):
obj = self.get_object()
return obj.author == self.request.user
class NoteCreateView(LoginRequiredMixin, CreateView):
model = Note
template_name = 'new_note.html'
fields = ['title', 'body', 'tags'] # Specifies field to be displayed
login_url = 'login'
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
class NoteUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Note
template_name = 'update_note.html'
fields = ['title', 'body', 'tags']
login_url = 'login'
def test_func(self):
obj = self.get_object()
return obj.author == self.request.user
class NoteDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Note
template_name = 'delete_note.html'
success_url = reverse_lazy('home')
login_url = 'login'
def test_func(self):
obj = self.get_object()
return obj.author == self.request.user
# class TagView(ListView):
# queryset = Note.objects.filter(tags=) |
import pandas as pd
import os
import string
def dfconcat(sdir, fname):
dflist = []
for f in os.listdir(sdir):
path = sdir + os.path.sep + f
if os.path.isdir(path):
path = path + os.path.sep + fname
# print(path)
if os.path.exists(path):
df = pd.read_csv(path)
dflist.append(df)
return pd.concat(dflist)
sdir = 'C:\\Users\\cs\\Downloads\\OpenData_output'
tdir = 'C:\\Users\\cs\\Downloads\\OpenData_output'
for n in string.ascii_uppercase[:26]:
fname = n + '_lvr_land_A.CSV'
dfall = dfconcat(sdir, fname)
dfall.to_csv(tdir + os.path.sep + fname)
# dflist = []
# for f in os.listdir(sdir):
# path = sdir+os.path.sep+f
# if os.path.isdir(path):
# path = path + os.path.sep+'A_lvr_land_A.CSV'
# #print(path)
# df = pd.read_csv(path)
# dflist.append(df)
#
# print(len(dflist))
# dfall = pd.concat(dflist)
# print(dfall.head())
print(dfall.describe())
# dfall.to_csv('A_lvr_land_A.csv')
|
# encoding=UTF-8
#!flask/bin/python
from cassandra.cluster import Cluster
from cassandra.policies import DCAwareRoundRobinPolicy
from cassandra.auth import PlainTextAuthProvider
from cassandra.query import BatchStatement, SimpleStatement
import pandas as pd
from pyspark.sql.types import StructType
class CassandraType(object):
PRODUCTION = 0
TEST = 1
TEST_DOCKER = 2
class CassandraDAO(object):
# you have to install following items :
# a. python-Cassandra Connector
# b. sqlJDBC.jars
def __init__(self, type):
# print('runing father.__init__')
if type == CassandraType.PRODUCTION:
self.contact_points=['192.168.0.1','192.168.0.2']
self.contact_points_str = "192.168.0.1,192.168.0.2"
elif type == CassandraType.TEST:
self.contact_points=['192.168.0.3','192.168.0.4']
self.contact_points_str = "192.168.0.3,192.168.0.4"
else:
self.contact_points=['192.168.0.5','192.168.0.6','192.168.0.7']
self.contact_points_str = "192.168.0.5,192.168.0.6,192.168.0.7"
self.formatString = "org.apache.spark.sql.cassandra"
self.username = "username"
self.password = "password"
self.cluster = None
self.session = None
self.createSession()
def __del__(self):
self.cluster.shutdown()
def pandas_factory(self, colnames, rows):
return pd.DataFrame(rows, columns=colnames)
def createSession(self):
print "contact_points = " + self.contact_points_str
self.cluster = Cluster(
contact_points=self.contact_points, #random select a node
)
self.session = self.cluster.connect()
self.session.row_factory = self.pandas_factory
self.session.default_fetch_size = 10000000 #needed for large queries, otherwise driver will do pagination. Default is 50000.
def getSession(self):
return self.session
def execCQL(self, keyspace, cql):
"""
execute CQL
"""
self.session.set_keyspace(keyspace)
self.session.execute_async(cql)
def execCQLSelect(self, keyspace, cql):
"""
execute CQL, select only
"""
self.session.set_keyspace(keyspace)
# cassandra ResultSet
async_results = self.session.execute_async(cql)
return async_results
def execCQLCallBackAnysc(self, keyspace, cql, handle_success, handle_error):
"""
execute CQL, if success => handle_success function, else handle_error
"""
self.session.set_keyspace(keyspace)
async_results = self.session.execute_async(cql)
async_results.add_callbacks(handle_success, handle_error)
def execCQLSelectToPandasDF(self, keyspace, cql):
"""
execute CQL, select only, return Pandas DataFrame
"""
self.session.set_keyspace(keyspace)
# cassandra ResultSet
async_results = self.session.execute_async(cql)
# to Pandas DataFrame
return async_results.result()._current_rows
def execCQLSelectToDF(self, sqlContext, keyspace, cql):
"""
execute CQL, select only, return Spark DataFrame
"""
# pandas dataframe to spark dataframe
pandas_dataframe = self.execCQLSelectToPandasDF(keyspace, cql)
if pandas_dataframe.empty:
schema = StructType([])
return sqlContext.createDataFrame([],schema)
else:
return sqlContext.createDataFrame(pandas_dataframe)
def execCQLSelectToRDD(self, sqlContext, keyspace, cql):
"""
execute CQL, select only, return Spark RDD
"""
return self.execCQLSelectToDF(sqlContext, keyspace, cql).rdd.map(tuple)#dataFrame to RDD
@property
def contactPoints(self):
return self.contact_points
@contactPoints.setter
def contactPoints(self, contact_points):
self.contact_points = contact_points
@contactPoints.deleter
def contactPoints(self):
del self.contact_points
# pyspark cassandra connector
def readFromCassandraDF(self, sqlContext, keyspace, table):
"""
read data from Cassandra, return Dataframe
"""
return sqlContext.read\
.format(self.formatString)\
.options(table=table, keyspace=keyspace)\
.option("spark.cassandra.connection.host",self.contact_points_str)\
.load()
def readFromCassandraRDD(self, sqlContext, keyspace, table):
"""
read data from Cassandra, return RDD
"""
df = sqlContext.read\
.format(self.formatString)\
.options(table=table, keyspace=keyspace)\
.option("spark.cassandra.connection.host",self.contact_points_str)\
.load()
return df.rdd.map(tuple)#dataFrame to RDD
def saveToCassandraDF(self, dataFrame, keyspace, table, mode="error"):
"""
Save data to Cassandra using DataFrame, select one mode to save
SaveMode.ErrorIfExists (default) | "error" When saving a DataFrame to a data source,
if data already exists, an exception is expected to be thrown.
SaveMode.Append | "append" When saving a DataFrame to a data source,
if data/table already exists, contents of the DataFrame are expected to be appended to existing data.
SaveMode.Overwrite | "overwrite" Overwrite mode means that when saving a DataFrame to a data source,
if data/table already exists, existing data is expected to be overwritten by the contents of the DataFrame.
SaveMode.Ignore | "ignore" Ignore mode means that when saving a DataFrame to a data source,
if data already exists, the save operation is expected to not save the contents of the DataFrame and to not change the existing data. This is similar to a CREATE TABLE IF NOT EXISTS in SQL.
"""
dataFrame.write\
.format(self.formatString)\
.mode(mode)\
.options(table=table, keyspace=keyspace)\
.option("spark.cassandra.connection.host",self.contact_points_str)\
.save()
def BatchInsertIntoCassandra(self, cqllist, keyspace):
"""
Batch insert into Cassandra
"""
self.cluster = Cluster(
contact_points=self.contact_points,
)
self.session = self.cluster.connect()
batch = BatchStatement() # default is ATOMIC ( All or nothing)
if cqllist > 0:
for cql_command in cqllist:
batch.add(SimpleStatement(cql_command))
self.session.execute(batch)
else:
pass
|
from sqgturb import SQG, rfft2, irfft2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import os, sys
from netCDF4 import Dataset
# run SQG turbulence simulation, plotting results to screen and/or saving to
# netcdf file.
filename = sys.argv[1]
ncin = Dataset(filename)
savedata = sys.argv[2]
model = SQG(ncin['pv'][0],\
nsq=ncin.nsq,f=ncin.f,dt=ncin.dt,U=ncin.U,H=ncin.H,\
r=ncin.r,tdiab=ncin.tdiab,symmetric=ncin.symmetric,\
diff_order=ncin.diff_order,diff_efold=ncin.diff_efold,threads=1)
N = ncin['pv'].shape[-1]
nsteps = ncin['pv'].shape[0]
diff_order_pert = 2
diff_efold_pert = 3600.
ktot = np.sqrt(model.ksqlsq)
ktotcutoff = np.pi*N/model.L
hyperdiff_pert =\
np.exp((-model.dt/diff_efold_pert)*(ktot/ktotcutoff)**diff_order_pert)
# initialize figure.
outputinterval = 10800. # interval between frames in seconds
nc = Dataset(savedata, mode='w', format='NETCDF4_CLASSIC')
nc.r = model.r
nc.f = model.f
nc.U = model.U
nc.L = model.L
nc.H = model.H
nc.g = ncin.g; nc.theta0 = ncin.theta0
nc.nsq = model.nsq
nc.tdiab = model.tdiab
nc.dt = model.dt
nc.diff_efold = diff_efold_pert
nc.diff_order = diff_order_pert
nc.symmetric = int(model.symmetric)
nc.dealias = int(model.dealias)
x = nc.createDimension('x',N)
y = nc.createDimension('y',N)
z = nc.createDimension('z',2)
t = nc.createDimension('t',None)
psivar =\
nc.createVariable('psi',np.float32,('t','z','y','x'),zlib=True)
psivar.units = 'm**2/s'
xvar = nc.createVariable('x',np.float32,('x',))
xvar.units = 'meters'
yvar = nc.createVariable('y',np.float32,('y',))
yvar.units = 'meters'
zvar = nc.createVariable('z',np.float32,('z',))
zvar.units = 'meters'
tvar = nc.createVariable('t',np.float32,('t',))
tvar.units = 'seconds'
xvar[:] = np.arange(0,model.L,model.L/N)
yvar[:] = np.arange(0,model.L,model.L/N)
zvar[0] = 0; zvar[1] = model.H
levplot = 1; nout = 0
fig = plt.figure(figsize=(8,8))
fig.subplots_adjust(left=0, bottom=0.0, top=1., right=1.)
vmin = -3.e5; vmax = 3.e5
def initfig():
global im
ax = fig.add_subplot(111)
ax.axis('off')
pvspec = model.pvspec - hyperdiff_pert*model.pvspec
psispec = model.invert(pvspec)
psi = irfft2(psispec)
im = ax.imshow(psi[levplot],cmap=plt.cm.bwr,interpolation='nearest',origin='lower',vmin=vmin,vmax=vmax)
return im,
def updatefig(*args):
global nout
model.advance()
pvspec = rfft2(ncin['pv'][nout])
pvspec = pvspec - hyperdiff_pert*pvspec
psispec = model.invert(pvspec)
psi = irfft2(psispec)
print nout, psi.min(), psi.max()
im.set_data(psi[levplot])
hr = ncin['t'][nout]/3600.
print 'saving data at t = %g hours' % hr
psivar[nout,:,:,:] = psi
tvar[nout] = ncin['t'][nout]
nc.sync()
if nout >= nsteps: nc.close()
nout = nout + 1
return im,
# interval=0 means draw as fast as possible
ani = animation.FuncAnimation(fig, updatefig, frames=nsteps, repeat=False,\
init_func=initfig,interval=0,blit=True)
plt.show()
|
# -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2016) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from unittest import TestCase
import mock
from hpOneView.connection import connection
from hpOneView.resources.facilities.power_devices import PowerDevices
class PowerDevicesTest(TestCase):
def setUp(self):
self.host = '127.0.0.1'
self.connection = connection(self.host)
self._power_devices = PowerDevices(self.connection)
@mock.patch.object(connection, 'get')
def test_get_utilization_with_all_args(self, mock_get):
self._power_devices.get_utilization(
'35323930-4936-4450-5531-303153474820',
fields='PeakPower,AveragePower',
filter='startDate=2016-05-30T03:29:42.361Z,endDate=2016-05-31T03:29:42.361Z',
refresh=True, view='day')
expected_uri = '/rest/power-devices/35323930-4936-4450-5531-303153474820/utilization' \
'?filter=startDate%3D2016-05-30T03%3A29%3A42.361Z' \
'&filter=endDate%3D2016-05-31T03%3A29%3A42.361Z' \
'&fields=PeakPower%2CAveragePower' \
'&refresh=true&view=day'
mock_get.assert_called_once_with(expected_uri)
@mock.patch.object(connection, 'get')
def test_get_utilization_with_defaults(self, mock_get):
self._power_devices.get_utilization('35323930-4936-4450-5531-303153474820')
expected_uri = '/rest/power-devices/35323930-4936-4450-5531-303153474820/utilization'
mock_get.assert_called_once_with(expected_uri)
|
import tensorflow as tf
# https://www.tensorflow.org/tutorials/seq2seq
# http://suriyadeepan.github.io/2016-06-28-easy-seq2seq/
# PAD: padding(Filler)
# GO: prefix of decoder input
# EOS: suffix of decoder output
# UNK: Unknown; word not in vocabulary
# Q : [ PAD, PAD, PAD, PAD, PAD, PAD, “?”, “you”, “are”, “How” ] # reversing input is replaced by attention
# A : [ GO, “I”, “am”, “fine”, “.”, EOS, PAD, PAD, PAD, PAD ]
# If we are using the bucket (5,10), our sentences will be encoded to :
# Q : [ PAD, “?”, “you”, “are”, “How” ]
# A : [ GO, “I”, “am”, “fine”, “.”, EOS, PAD, PAD, PAD, PAD ]
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
class Config():
# Path
root_dir = "./"
data_dir = root_dir + "data"
model_dir = root_dir + "nn_models"
reply_dir = root_dir + "reply"
# 모델 학습 시 사용하는 데이터 형식
data_type = tf.float32
# for model
vocab_size = 8000
batch_size = 256
use_lstm = True
# for encoding
enc_hidden_size = 128
enc_num_layers = 1
# for decoding
dec_hidden_size = 128
dec_num_layers = 1
# for learning
learning_rate = 0.001
learning_rate_decay_fator = 0.999
max_gradient_norm = 5.0
# bucket <- (encoder input = user query , decoder input = bot answer)
buckets = [(5,10), (8,15)]
max_epoch = 100000
checkpoint_step = 100 |
from csv import DictReader
from math import sqrt, fabs, exp, log
import numpy as np
D = 2 ** 20
# Neural Network withi a single hidden layer online learner
class NN(object):
"""Neural Network with a single ReLU hidden layer online learner.
Parameters:
----------
n (int): number of input units
h (int): number of hidden units
alpha (float): initial learning rate
l1 (float): L1 regularization parameter
l2 (float): L2 regularization parameter
w0 (list of float): weights between the input and hidden layers
w1 (list of float): weights between the hidden and output layers
z (list of float): hidden units
c (float): counter
c0 (list of float): counters for input units
c1 (list of float): counters for hidden units
"""
def __init__(self, n=D, h=50, alpha=0.1, l2=0., seed=0):
"""Initialize the NN class object.
Parameters:
----------
n (int): number of input units
h (int): number of hidden units
alpha (float): initial learning rate
l1 (float): L1 regularization parameter
l2 (float): L2 regularization parameter
seed (unsigned int): random seed
interaction (boolean): whether to use 2nd order interaction or not
"""
rng = np.random.RandomState(seed)
self.n = n
self.h = h
self.alpha = alpha
self.l2 = l2
self.w1 = (rng.rand(h + 1) - .5) * 1e-7
self.w0 = (rng.rand((n + 1) * h) - .5) * 1e-7
self.z = np.zeros((h,), dtype=np.float64)
self.c = 0.
self.c1 = np.zeros((h,), dtype=np.float64)
self.c0 = np.zeros((n,), dtype=np.float64)
def predict(self, x):
"""Predict for features.
Parameters:
----------
x : a list of value of non-zero features
Outputs:
----------
p (double): a prediction for input features
"""
w0 = self.w0
w1 = self.w1
n = self.n
h = self.h
z = self.z
# starting with the bias in the hidden layer
p = w1[h]
# calculating and adding values of hidden units
for j in range(h):
# starting with the bias in the input layer
z[j] = w0[n * h + j]
# calculating and adding values of input units
for i in x:
z[j] += w0[i * h + j]
# apply the ReLU activation function to the hidden unit
z[j] = z[j] if z[j] > 0. else 0.
p += w1[j] * z[j]
# apply the sigmoid activation function to the output unit
return 1. / (1. + exp(-max(min(p, 35.), -35.)))
def update(self, x, p, y):
"""Update the model.
Parameters:
----------
x : a list of value of non-zero features
p : predicted output
y : target output
Outputs:
----------
updated model weights and counters
"""
alpha = self.alpha
l2 = self.l2
n = self.n
h = self.h
w0 = self.w0
w1 = self.w1
c = self.c
c0 = self.c0
c1 = self.c1
z = self.z
e = p - y
abs_e = fabs(e)
dl_dy = e * alpha # dl/dy * (learning rate)
# starting with the bias in the hidden layer
w1[h] -= dl_dy / (sqrt(c) + 1) + l2 * w1[h]
for j in range(h):
# update weights related to non-zero hidden units
if z[j] == 0.:
continue
# update weights between the hidden units and output
# dl/dw1 = dl/dy * dy/dw1 = dl/dy * z
w1[j] -= (dl_dy / (sqrt(c1[j]) + 1) * z[j] + l2 * w1[j])
# starting with the bias in the input layer
# dl/dz = dl/dy * dy/dz = dl/dy * w1
dl_dz = dl_dy * w1[j]
w0[n * h + j] -= (dl_dz / (sqrt(c1[j]) + 1) + l2 * w0[n * h + j])
# update weights related to non-zero input units
for i in x:
# update weights between the hidden unit j and input i
# dl/dw0 = dl/dz * dz/dw0 = dl/dz * v
w0[i * h + j] -= (dl_dz / (sqrt(c0[i]) + 1) + l2 * w0[i * h + j])
# update counter for the input i
c0[i] += abs_e
# update counter for the hidden unit j
c1[j] += abs_e
# update overall counter
c += abs_e
|
# coding: utf-8
# In[3]:
import pandas as pd
import numpy as np
from sqlalchemy import *
import datetime
DATABASE_ENDPOINT = "aqueduct30v05.cgpnumwmfcqc.eu-central-1.rds.amazonaws.com"
DATABASE_NAME = "database01"
TABLE_NAME = "y2018m05d29_rh_total_demand_postgis_30spfaf06_v01_v01"
F = open("/.password","r")
password = F.read().splitlines()[0]
F.close()
engine = create_engine("postgresql://rutgerhofste:{}@{}:5432/{}".format(password,DATABASE_ENDPOINT,DATABASE_NAME))
connection = engine.connect()
sql = "SELECT * FROM {} LIMIT 10".format(TABLE_NAME)
df = pd.read_sql(sql, connection)
# ## Some Background
#
# the database is the result of running zonal statistics on a climate model. I am relatively new to postgreSQL and haven't set any indexes yet. The database is on AWS RDS on an x.large instance. None of the columns is unique. "pfafid_30spfaf06" is a zonal code for water basins. There are in total appr. 16000 unique pfaf_ids. year [1960-2014] month [1-12], temporal_resolution ["year","month"]
#
# Result of (PgAdmin) :
# `SELECT pg_size_pretty(pg_total_relation_size('"public"."y2018m05d29_rh_total_demand_postgis_30spfaf06_v01_v01"'));`
#
# Successfully run. Total query runtime: 425 msec.
# 1 rows affected:
#
# 11 GB
#
#
# Result of (PgAdmin) :
# `SELECT count(*) FROM y2018m05d29_rh_total_demand_postgis_30spfaf06_v01_v01`
#
# Successfully run. Total query runtime: 52 secs.
# 1 rows affected.
#
# 11715275 i.e. 11,715,275 rows
#
# In[4]:
df.head()
# I know combining month and year in one datetime column is best practice but for future use, keeping them separate is easier.
# The query I like to run calculates a 10 year moving average for three columns:
# 1. ptotwn_m_30spfaf06
# 1. ptotww_m_30spfaf06
# 1. riverdischarge_m_30spfaf06
#
# For axample the 10y annual moving average of 1969 is the average of 1960 - 1969. For a monthly moving average the average is filtered by month: average of jan 1960 jan 1961 ... jan 1969.
#
# The query I have so far:
#
# `
# SELECT year, ptotww_m_30spfaf06, temporal_resolution,
# SUM(ptotww_m_30spfaf06)
# OVER(ORDER BY year ROWS BETWEEN 2 PRECEDING AND CURRENT ROW) as ptotwwma_m_30spfaf06
# FROM y2018m05d29_rh_total_demand_postgis_30spfaf06_v01_v01
# WHERE temporal_resolution = 'year'
# LIMIT 200`
#
# However this is slow (need to set index? Which columns? year, month?) and does not work for the monthly scores.
#
# Successfully run. Total query runtime: 52 secs.
# 200 rows affected.
#
# Which is quite slow.
# In[ ]:
|
"""This will draw the plant loop for any file
copy of s_plantloop.py
figure out how to remove the nodes
keep the nodes, but draw them differently"""
import pydot
import sys
sys.path.append('../EPlusInputcode')
from EPlusCode.EPlusInterfaceFunctions import readidf
import loops
def firstisnode(edge):
if type(edge[0]) == tuple:
return True
else:
return False
def secondisnode(edge):
if type(edge[1]) == tuple:
return True
else:
return False
def bothnodes(edge):
if type(edge[0]) == tuple and type(edge[1]) == tuple:
return True
else:
return False
def dropnodes(edges):
"""draw a graph without the nodes"""
newedges = []
added = False
for edge in edges:
if bothnodes(edge):
newtup = (edge[0][0], edge[1][0])
newedges.append(newtup)
added = True
elif firstisnode(edge):
for edge1 in edges:
if edge[0] == edge1[1]:
newtup = (edge1[0], edge[1])
try:
newedges.index(newtup)
except ValueError, e:
newedges.append(newtup)
added = True
elif secondisnode(edge):
for edge1 in edges:
if edge[1] == edge1[0]:
newtup = (edge[0], edge1[1])
try:
newedges.index(newtup)
except ValueError, e:
newedges.append(newtup)
added = True
# gets the hanging nodes - nodes with no connection
if not added:
if firstisnode(edge):
newedges.append((edge[0][0], edge[1]))
if secondisnode(edge):
newedges.append((edge[0], edge[1][0]))
added = False
return newedges
def test_dropnodes():
"""py.test for dropnodes"""
# test 1
node = "node"
(a,b,c,d,e,f,g,h,i) = (('a', node),'b',('c', node),'d',
('e', node),'f',('g', node),'h',('i', node))
edges = [(a, b),
(b, c),
(c, d),
(d, e),
(e, f),
(f, g),
(g, h),
(h, i),]
theresult = [('a', 'b'), ('b', 'd'), ('d', 'f'), ('f', 'h'), ('h', 'i')]
result = dropnodes(edges)
assert result == theresult
# test 2
(a,b,c,d,e,f,g,h,i,j) = (('a', node),'b',('c', node),
('d', node),'e','f',('g', node),('h', node),'i',('j', node))
edges = [(a, b),
(b, c),
(c, e),
(e, g),
(g, i),
(i, j),
(b, d),
(d, f),
(f, h),
(h, i),]
theresult = [('a', 'b'), ('b', 'e'), ('e', 'i'), ('i', 'j'),
('b', 'f'), ('f', 'i')]
result = dropnodes(edges)
assert result == theresult
def makeanode(name):
return pydot.Node(name, shape="plaintext", label=name)
def makeabranch(name):
return pydot.Node(name, shape="box3d", label=name)
def makeendnode(name):
return pydot.Node(name, shape="circle", label=name,
style="filled", fillcolor="#e4e4e4")
def istuple(x):
return type(x) == tuple
def nodetype(anode):
"""return the type of node"""
try:
return anode[1]
except IndexError, e:
return None
def edges2nodes(edges):
"""gather the nodes from the edges"""
nodes = []
for e1, e2 in edges:
nodes.append(e1)
nodes.append(e2)
nodedict = dict([(n, None) for n in nodes])
justnodes = nodedict.keys()
justnodes.sort()
return justnodes
def test_edges2nodes():
"""py.test for edges2nodes"""
thedata = (([("a", "b"), ("b", "c"), ("c", "d")],
["a", "b", "c", "d"]), # edges, nodes
)
for edges, nodes in thedata:
result = edges2nodes(edges)
assert result == nodes
def makediagram(edges):
"""make the diagram with the edges"""
graph = pydot.Dot(graph_type='digraph')
nodes = edges2nodes(edges)
epnodes = [(node,
makeanode(node[0])) for node in nodes if nodetype(node)=="epnode"]
endnodes = [(node,
makeendnode(node[0])) for node in nodes if nodetype(node)=="EndNode"]
epbr = [(node, makeabranch(node)) for node in nodes if not istuple(node)]
nodedict = dict(epnodes + epbr + endnodes)
for value in nodedict.values():
graph.add_node(value)
for e1, e2 in edges:
graph.add_edge(pydot.Edge(nodedict[e1], nodedict[e2]))
return graph
iddfile = "../iddfiles/Energy+V6_0.idd"
# fname = "/Applications/EnergyPlus-6-0-0/Examples/DualDuctConstVolGasHC.idf"
# fname = "../idffiles/a.idf"
# fname = "/Volumes/Server/Active_Projects/stanttecE+Conssulting2/3_Simulation/2_Energy/EnergyPlus/fromMatt/Proposed110614exp.idf"
# fname = "/Volumes/Server/Active_Projects/stanttecE+Conssulting2/3_Simulation/2_Energy/EnergyPlus/workingfiles/5ZoneAirCooled.idf"
# fname = "/Volumes/Server/Active_Projects/LBNL_UHM/3_Simulation/2_Energy/Energyplus3/airflow/air6.expidf"
# fname = "/Volumes/Server/Staff/Santosh/transfer/asul/05_Baseline_06.idf"
fname = "/Applications/EnergyPlus-6-0-0/Examples/DualDuctConstVolGasHC.idf"
# fname = "../idffiles/HVACTemplate-5ZoneVAVFanPowered.idf"
# outname = "../idffiles/.idf"
fname = "../idffiles/CoolingTower.idf"
# fname = "../idffiles/a.idf"
data, commdct = readidf.readdatacommdct(fname, iddfile=iddfile)
# in plantloop get:
# demand inlet, outlet, branchlist
# supply inlet, outlet, branchlist
plantloops = loops.plantloopfields(data, commdct)
plantloop = plantloops[0]
anode = "epnode"
endnode = "EndNode"
#
# supply barnchlist
# branch1 -> inlet, outlet
# branch2 -> inlet, outlet
# branch3 -> inlet, outlet
sbranchlist = plantloop[3]
if sbranchlist.strip() != "":
sbranches = loops.branchlist2branches(data, commdct, sbranchlist)
s_in_out = [loops.branch_inlet_outlet(data, commdct,
sbranch) for sbranch in sbranches]
sbranchinout = dict(zip(sbranches, (s_in_out, anode)))
dbranchlist = plantloop[6]
if dbranchlist.strip() != "":
dbranches = loops.branchlist2branches(data, commdct, dbranchlist)
d_in_out = [loops.branch_inlet_outlet(data, commdct,
dbranch) for dbranch in dbranches]
dbranchinout = dict(zip(dbranches, (d_in_out, anode)))
#
# splitters
# inlet
# outlet1
# outlet2
splitters = loops.splitterfields(data, commdct)
#
# mixer
# outlet
# inlet1
# inlet2
mixers = loops.mixerfields(data, commdct)
#
# supply barnchlist
# branch1 -> inlet, outlet
# branch2 -> inlet, outlet
# branch3 -> inlet, outlet
#
# CONNET INLET OUTLETS
edges = []
# get all branches
branchkey = "branch".upper()
branches = data.dt[branchkey]
branch_i_o = {}
for br in branches:
br_name = br[1]
in_out = loops.branch_inlet_outlet(data, commdct, br_name)
branch_i_o[br_name] = dict(zip(["inlet", "outlet"], in_out))
for br_name, in_out in branch_i_o.items():
edges.append(((in_out["inlet"], anode), br_name))
edges.append((br_name, (in_out["outlet"], anode)))
# connect splitter to nodes
for splitter in splitters:
# splitter_inlet = inletbranch.node
splittername = splitter[0]
inletbranchname = splitter[1]
splitter_inlet = branch_i_o[inletbranchname]["outlet"]
# edges = splitter_inlet -> splittername
edges.append(((splitter_inlet, anode), splittername))
# splitter_outlets = ouletbranches.nodes
outletbranchnames = [br for br in splitter[2:]]
splitter_outlets = [branch_i_o[br]["inlet"] for br in outletbranchnames]
# edges = [splittername -> outlet for outlet in splitter_outlets]
moreedges = [(splittername,
(outlet, anode)) for outlet in splitter_outlets]
edges = edges + moreedges
for mixer in mixers:
# mixer_outlet = outletbranch.node
mixername = mixer[0]
outletbranchname = mixer[1]
mixer_outlet = branch_i_o[outletbranchname]["inlet"]
# edges = mixername -> mixer_outlet
edges.append((mixername, (mixer_outlet, anode)))
# mixer_inlets = inletbranches.nodes
inletbranchnames = [br for br in mixer[2:]]
mixer_inlets = [branch_i_o[br]["outlet"] for br in inletbranchnames]
# edges = [mixername -> inlet for inlet in mixer_inlets]
moreedges = [((inlet, anode), mixername) for inlet in mixer_inlets]
edges = edges + moreedges
# connect demand and supply side
for plantloop in plantloops:
supplyinlet = plantloop[1]
supplyoutlet = plantloop[2]
demandinlet = plantloop[4]
demandoutlet = plantloop[5]
# edges = [supplyoutlet -> demandinlet, demandoutlet -> supplyinlet]
moreedges = [((supplyoutlet, endnode), (demandinlet, endnode)),
((demandoutlet, endnode), (supplyinlet, endnode))]
print moreedges
edges = edges + moreedges
# for edge in edges:
# if (type(edge[0]) == tuple or type(edge[1]) == tuple):
# continue
# print edge
# newedges = dropnodes(edges)
g = makediagram(edges)
g.write('a.dot')
g.write_png('a.png') |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'MFC'
__time__ = '18/7/15 18:06'
"""
Python与量化投资从基础到实战 P35
"""
# break in for loop
for i in range(5, 9):
print(i)
print("hello")
if i > 6:
print("i > 6")
break
|
i, t = [int(i) for i in input().split()]
prices = [4.0, 4.5, 5.0, 2.0, 1.5]
print("Total: R$ {0:.2f}".format(prices[i-1]*t))
|
from fuzzer import Fuzzer, FuzzerBenchmark, FuzzerInstance, TargetProgram
import os
import subprocess
import shutil
from random import randint
import screenutils
class AngoraFuzzer(Fuzzer):
def __init__(self, install_dir):
super().__init__()
self.install_dir = install_dir
self.cc = os.path.join(self.install_dir, "bin", "angora-clang")
self.cxx = os.path.join(self.install_dir, "bin", "angora-clang++")
self.ass = "/usr/lib/llvm-6.0/bin/llvm-as"
self.fuzz = os.path.join(self.install_dir, "angora_fuzzer")
def compile(self, target_name, output_dir, config=None, **env):
'''
Compiles the benchmark and returns a list of TargetProgram objects,
each object having its `path` data member set to the target's path.
'''
args = [
"/usr/bin/env",
"make",
"-j",
"-C",
self.magma_dir,
# "-f %s" % os.path.join(self.magma_dir, "Makefile")
"clean",
"all_patches",
target_name
]
env["CC"] = self.cc
env["CXX"] = self.cxx
env["AS"] = self.ass
proc_env = os.environ.copy()
proc_env.update(env)
targets = []
# Compile without taint tracking
proc_env["USE_FAST"] = "1"
try:
result = subprocess.run(args, env=proc_env, check=True)
except subprocess.CalledProcessError as ex:
print(ex.stderr)
raise
# since check=True, reaching this point means compiled successfully
fast_output_dir = os.path.join(output_dir, "fast")
try:
os.mkdir(fast_output_dir)
except FileExistsError:
pass
except:
raise
fast_targets = []
for root, _, files in os.walk(os.path.join(self.magma_dir, "build")):
for f in files:
if os.path.basename(root) == "programs":
fast_targets.append(shutil.copy2(os.path.join(root, f), fast_output_dir))
else:
# we only copy the USE_FAST monitor since Angora fuzzes
# the fast variant only
shutil.copy2(os.path.join(root, f), output_dir)
fast_targets.sort()
if config == None or config == True:
# Compile with taint tracking
del proc_env["USE_FAST"]
proc_env["USE_TRACK"] = "1"
try:
result = subprocess.run(args, env=proc_env, check=True)
except subprocess.CalledProcessError as ex:
print(ex.stderr)
raise
# since check=True, reaching this point means compiled successfully
track_output_dir = os.path.join(output_dir, "track")
try:
os.mkdir(track_output_dir)
except FileExistsError:
pass
except:
raise
track_targets = []
for root, _, files in os.walk(os.path.join(self.magma_dir, "build")):
for f in files:
if os.path.basename(root) == "programs":
track_targets.append(shutil.copy2(os.path.join(root, f), track_output_dir))
track_targets.sort()
else:
# no track targets generated
track_targets = [None] * len(fast_targets)
for fast, track in zip(fast_targets, track_targets):
t = TargetProgram()
t["path"] = fast
t["track"] = track
t["name"] = target_name
t["program"] = os.path.basename(t["path"])
targets.append(t)
return targets
def preprocess(self, **kwargs):
# os.system("sudo bash -c 'echo core >/proc/sys/kernel/core_pattern'")
# os.system("sudo bash -c 'cd /sys/devices/system/cpu; echo performance | tee cpu*/cpufreq/scaling_governor'")
pass
def launch(self, target, seeds_dir, findings_dir, args=None, timeout=86400, logfile=None):
fuzz_cmd = "{fuzz} -i {seeds_dir} -o {findings_dir} -t {target_track}{args} -- {target_fast} {target_args}".format(
fuzz = self.fuzz,
seeds_dir = seeds_dir,
findings_dir = findings_dir,
args = " %s" % args if (args is not None and args != "") else "",
target_track = target["track"],
target_fast = target["path"],
target_args = target["args"]
)
cmd = "/usr/bin/env timeout -s INT {timeout}s {fuzz_cmd}".format(
timeout = timeout,
fuzz_cmd = fuzz_cmd
).split(" ")
name = "angora.%d" % randint(10000,99999)
args = [
"/usr/bin/env",
"screen",
"-S",
name,
"-d", "-m"
]
if logfile is not None and type(logfile) is str:
args.extend(["-L", "-Logfile", logfile])
args += cmd
result = subprocess.run(args, check=True)
instance = FuzzerInstance()
instance.screen_name = name
return instance
def terminate(self, instance):
s = screenutils.Screen(instance.screen_name)
if s.exists:
s.kill()
def status(self, instance):
s = screenutils.Screen(instance.screen_name)
return s.exists
def postprocess(self, **kwargs):
pass
class AngoraBenchmark(FuzzerBenchmark):
pass |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Question: Is there a correlation between the relationships between the victim and the perpetrator?
dataArr = pd.read_csv("../data/database.csv")
# remove these columns
dataArr = (dataArr.drop(['Record ID', 'Agency Code','Agency Name','Agency Type','City', 'State', 'Year','Month', 'Incident', 'Crime Type', 'Crime Solved'],axis=1))
# print(dataArr.head(n=1))
# remove rows where the relationship is unknown
dataArr = dataArr[dataArr["Relationship"] != "Unknown"]
def condition(value):
if value != "Acquaintance" and value != "Stranger":
return "Family"
return value
dataArr['Relationship'] = dataArr['Relationship'].apply(condition)
# get count of each uniqie thing in Relationship and sort
grouped = dataArr.groupby("Relationship").size().reset_index()
grouped = grouped.sort_values(0, ascending=False)
print(grouped)
# plot the result
plt.pie(grouped[0], labels=grouped["Relationship"], autopct='%.2f')
# plt.ylabel("Homicides")
# plt.xlabel("Relationship")
plt.title("Homicides By Relationship Type")
# plt.tight_layout()
# Note, save your output to the plots folder. name it something
plt.savefig('../plots/q2_relationship_2.png')
|
c = input('string:')
b = ""
for ch in range(len(c)):
if(c[ch] != "a" and c[ch] != "A"):
b = b + c[ch]
print(b)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author Zhang zhiming (zhangzhiming@)
# date
import re
import json
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
import ConfigParser
import logging
import time
import math
import random
import os
import numpy as np
class TF_v1():
def __init__(self):
pass
def tf(self,word_dict):
return word_dict
class TF_v2():
def __init__(self):
pass
def tf(self,word_dict):
tf_ = dict()
for w in word_dict:
tf_[w] = 1 + math.log(word_dict[w])
return tf_
class TF_v3():
def __init__(self):
pass
def tf(self,word_dict):
tf_ = dict()
max_count = float(max(word_dict.values()))
alph = 0.4
for w in word_dict:
tf_[w] = alph + (1-alph) * (word_dict[w]/max_count)
return tf_
class TFFunc():
def __init__(self,func):
self.func = func
pass
def tf(self,word_dict):
return self.func.tf(word_dict)
class IDF_v1():
def __init__(self):
pass
def idf(self,idf_dict,doc_num):
idf_ = dict()
for w in idf_dict:
idf_[w] = math.log(doc_num*1.0/idf_dict[w])
return idf_
class IDF_v2():
def __init__(self):
pass
def idf(self,idf_dict,doc_num):
idf_ = dict()
for w in idf_dict:
idf_[w] = math.log(doc_num/(1.0+idf_dict[w]))
return idf_
class IDFFunc():
def __init__(self,func):
self.func = func
def idf(self,idf_dict,doc_num):
return self.func.idf(idf_dict,doc_num)
##########################################
#
#process begin
##########################################
class PreProcess():
def __init__(self,conf_in):
self.cf = ConfigParser.ConfigParser()
self.cf.read(conf_in)
#basic_conf
self.work_dir = self.cf.get('basic_conf','work_dir')
self.data_dir = self.work_dir + './data/'
self.conf_dir = self.work_dir + './conf/'
self.stopword_data = self.conf_dir + 'stopwords.txt'
self.word_min_count = self.cf.getint('basic_conf','word_min_count')
self.label2clsName_data = self.data_dir + self.cf.get('basic_conf','label2clsName')
self.id2docName_data = self.data_dir + self.cf.get('basic_conf','id2docName')
self.word2id_data = self.data_dir + self.cf.get('basic_conf','word2id')
self.tfidf_svm_data = self.data_dir + self.cf.get('basic_conf','tfidf_svm')
self.word2idf_data = self.data_dir + self.cf.get('basic_conf','word2idf')
self.train_test_dir = self.data_dir + self.cf.get('basic_conf','train_test_dir')
if not os.path.exists(self.train_test_dir) :
os.mkdir(self.train_test_dir)
self.file_tag = self.cf.get('pre_process','file_tag')
self.json_data = self.data_dir + self.cf.get('pre_process','json_data')
self.wordseg_data = self.data_dir + self.cf.get('pre_process','wordseg_data')
self.vocab_data = self.data_dir + self.cf.get('pre_process','vocab_data')
self.tfidf_data = self.data_dir + self.cf.get('pre_process','tfidf_data')
self.train_test_rate = self.cf.getfloat('pre_process','train_test_rate')
self.cross_validation_num = self.cf.getint('pre_process','cross_validation_num')
# set loging
ISOTIMEFORMAT='%Y%m%d-%H%M%S'
time_str = 'pre_process-'+ time.strftime( ISOTIMEFORMAT, time.localtime( time.time() ) )
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s',
filename=self.work_dir + '/log/log-' + time_str + '.txt',filemode='w')
self.logging = logging
#self.Log(' '.join(['[init]','done']))
def get_stopword(self):
self.stopword_set = set()
with open(self.stopword_data,'r') as fin:
for line in fin:
line = u''+line.strip()
self.stopword_set.add(line)
def generate_voc(self):
word_dict = dict()
self.get_stopword()
with open(self.wordseg_data,'r') as fin:
for line in fin:
line = u''+line.rstrip('\n')
line_sp = line.split('\t')
doc_json = json.loads(line_sp[1])
content_seg_list = doc_json['content_seg_list']
for sent in content_seg_list:
for w in sent:
if len(w.strip()) == 0:
continue
if w in self.stopword_set:
continue
word_dict[w] = word_dict.get(w,0) + 1
word_list = sorted(word_dict.items(),key=lambda x:x[1],reverse=True)
self.vocab2freq = dict()
with open(self.vocab_data,'w') as fout:
for w ,c in word_list:
if c <= self.word_min_count:
continue
fout.write( '\t'.join([w,str(c)]) + '\n')
self.vocab2freq[w] = c
self.Log(' '.join(['[generate_voc]','done']))
def get_voc(self):
self.vocab2freq = dict()
self.word2id = dict()
self.id2word = dict()
if not os.path.exists(self.vocab_data) :
self.Log(' '.join(['[get_voc]','vocab_dat not exists ']) )
sys.exit(1)
index = 1
# read vocab and assign id
with open(self.vocab_data,'r') as fin:
for line in fin:
line = u''+line.rstrip('\n')
line_sp = line.split('\t')
w,c = line_sp
self.vocab2freq[w] = c
self.word2id[w] = index
self.id2word[index] = w
index += 1
with open(self.word2id_data,'w') as fout:
for i in range(len(self.id2word)):
i = i+1
fout.write('\t'.join([self.id2word[i],str(i)]) + '\n')
def get_tfidf(self,tf_func,idf_func):
self.get_stopword()
self.idf = dict()
doc_list = []
# 1. idf
idf_count = dict()
doc_num = 0
class_type_dict = dict()
with open(self.wordseg_data,'r') as fin:
for line in fin:
line = line.rstrip('\n')
line_sp = line.split('\t')
doc_json = json.loads(line_sp[1])
content_seg_list = doc_json['content_seg_list']
word_set = set()
for sent in content_seg_list:
for w in sent:
if len(w.strip()) == 0:
continue
if w in self.stopword_set:
continue
if w not in self.word2id:
continue
word_set.add(w)
for w in word_set:
idf_count[w] = idf_count.get(w,0) + 1
doc_num += 1
cls_name = doc_json['cls_name']
class_type_dict[cls_name] = class_type_dict.get(cls_name,0) + 1
idf = idf_func.idf(idf_count,doc_num)
# 2. asign class_type id
cls_type_list = sorted(class_type_dict.items(),key=lambda x:x[1],reverse=True)
cls_id = 0
for cls,cnt in cls_type_list:
class_type_dict[cls] = str(cls_id)
cls_id += 1
# 3. compute & write tfidf ,write svm
doc_id = 0
doc_id_list = []
vocab_len = len(self.word2id)
with open(self.wordseg_data,'r') as fin,open(self.tfidf_svm_data,'w') as fout,open(self.tfidf_data,'w') as fout_tfidf:
for line in fin:
line = line.rstrip('\n')
line_sp = line.split('\t')
doc_json = json.loads(line_sp[1])
content_seg_list = doc_json['content_seg_list']
word_dict = dict()
url = doc_json['url']
cls_name = doc_json['cls_name']
for sent in content_seg_list:
for w in sent:
if len(w.strip()) == 0:
continue
if w in self.stopword_set:
continue
if w not in self.vocab2freq:
continue
word_dict[w] = word_dict.get(w,0) + 1
#tfidf
tf = tf_func.tf(word_dict)
tfidf = dict()
word_tfidf = dict()
for w in tf:
w_id = self.word2id[w]
if idf[w] < math.log(2):
continue
pass
#print '\t'.join(['tfid=0',w,str(w_id),str(tf[w]),str(idf[w])])
#continue
tfidf[w_id] = tf[w] * idf[w]
word_tfidf[w] = tfidf[w_id]
# svm file
vec = []
vec.append(class_type_dict[cls_name])
tfidf_list = sorted(tfidf.items(),key=lambda x:x[0],reverse=False)
for i,tfidf_ in tfidf_list:
vec.append(str(i) +':'+str('%.6f'%tfidf_))
if len(tfidf) != 0 and tfidf_list[-1][0] != vocab_len+1:
i = vocab_len+1
vec.append(str(i) +':'+str('%.6f'%0))
fout.write(' '.join(vec) + '\n')
doc_id_list.append([url,cls_name,vec])
doc_id += 1
#tfidf file
doc_json.pop('content_seg_list')
word_tfidf_list = sorted(word_tfidf.items(),key=lambda x:x[1] ,reverse=True)
doc_json['tfidf'] = word_tfidf_list
fout_tfidf.write('\t'.join([line_sp[0],json.dumps(doc_json,ensure_ascii=False) ]) + '\n')
# 4. label2clsName_data
with open(self.label2clsName_data,'w') as fout:
class_type_list = sorted(class_type_dict.items(),key=lambda x:x[1],reverse=True)
for cls,cnt in class_type_list:
cls_id = class_type_dict[cls]
fout.write('\t'.join([str(cls_id),cls]) + '\n')
# 5. id2docName_data
doc_id = 0
with open(self.id2docName_data,'w') as fout:
for i in range(len(doc_id_list)):
fout.write('\t'.join([str(i)] + doc_id_list[i][:2]) + '\n')
# 6. word2idf
with open(self.word2idf_data,'w') as fout:
idf_list = sorted(idf.items(),key = lambda x:x[1],reverse=True)
for w,v in idf_list:
fout.write('\t'.join([w,str(v)]) + '\n')
# 7. split train,test
cls_name_count_dict = dict()
cls_name_data_dict = dict()
for url,cls_name,vec in doc_id_list:
cls_name_count_dict[cls_name] = cls_name_count_dict.get(cls_name,0) + 1
if cls_name not in cls_name_data_dict:
cls_name_data_dict[cls_name] = []
cls_name_data_dict[cls_name].append((url,cls_name,vec))
# max_cls_data_size upbound of resample size per cls_name
max_cls_data_size = max(cls_name_count_dict.values())
train_samples = int( max_cls_data_size * self.train_test_rate)
for k in range(self.cross_validation_num):
train_result = []
test_result = []
# resample train data ,ensure every cls_name has the same train_size
for cls_name in cls_name_data_dict:
cls_name_len = len(cls_name_data_dict[cls_name])
cls_name_list = range(cls_name_len)
random.shuffle(cls_name_list)
cls_name_train_samples = int( cls_name_len * self.train_test_rate)
cls_name_test_samples = cls_name_len - cls_name_train_samples
cls_name_train_result = []
for i in range(train_samples):
idx = random.choice(cls_name_list[:cls_name_train_samples])
train_result.append(cls_name_data_dict[cls_name][idx])
for idx in cls_name_list[cls_name_train_samples:]:
test_result.append( cls_name_data_dict[cls_name][idx])
train_name = self.train_test_dir+'/train_resample_' +str(k)
ftrain = open(train_name + '.svm','w')
ftrain_map = open(train_name +'_map.txt','w')
for url,cls_name,vec in train_result:
ftrain.write(' '.join(vec) + '\n')
ftrain_map.write('\t'.join([url,cls_name]) + '\n')
test_name = self.train_test_dir+'/test_resample_' +str(k)
ftest = open(test_name + '.svm','w')
ftest_map = open(test_name +'_map.txt','w')
for url,cls_name,vec in test_result:
ftest.write(' '.join(vec) + '\n')
ftest_map.write('\t'.join([url,cls_name]) + '\n')
ftrain.close()
ftrain_map.close()
ftest.close()
ftest_map.close()
# 8. split train,test
train_samples = int(doc_num * self.train_test_rate)
for k in range(self.cross_validation_num):
doc_num_list = range(len(doc_id_list))
random.shuffle(doc_num_list)
train_name = self.train_test_dir+'/train_' +str(k)
ftrain = open(train_name + '.svm','w')
ftrain_map = open(train_name +'_map.txt','w')
for i in doc_num_list[:train_samples]:
url,cls_name,vec = doc_id_list[i]
ftrain.write(' '.join(vec) + '\n')
ftrain_map.write('\t'.join([str(i)] + doc_id_list[i][:2]) + '\n')
test_name = self.train_test_dir+'/test_' +str(k)
ftest = open(test_name + '.svm','w')
ftest_map = open(test_name +'_map.txt','w')
for i in doc_num_list[train_samples:]:
url,cls_name,vec = doc_id_list[i]
ftest.write(' '.join(vec) + '\n')
ftest_map.write('\t'.join([str(i)] + doc_id_list[i][:2]) + '\n')
ftrain.close()
ftrain_map.close()
ftest.close()
ftest_map.close()
def LogTemplate(self, s):
time_stmap = ''
return ' [' + time_stmap + ']: ' + str(s)
def Log(self, s):
ss = self.LogTemplate(s)
self.logging.info(ss)
def LogErr(self, s):
ss = self.LogTemplate(s)
self.logging.error(ss)
def test(conf_in):
pre = PreProcess(conf_in)
pre.generate_voc()
pre.get_voc()
tf_v1 = TF_v1()
idf_v1 = IDF_v1()
tf = TFFunc(tf_v1)
idf = IDFFunc(idf_v1)
pre.get_tfidf(tf,idf)
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.stderr.write('Usage: pre_proccess_tfidf.py \n')
test(sys.argv[1])
|
# Copyright 2018ff. Stephan Druskat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from resosuma.graph.activitygraph import ActivityGraph as AG
from resosuma.graph.edge import Edge
from resosuma.graph.node import Node
import pytest
from resosuma.exceptions import CSVInputError
def test_add_activity():
'''
asd
'''
g = AG()
activity = Edge("1", "does", "2")
source = Node("1")
target = Node("2")
g.add_activity("1", "does", "2")
assert activity in g.get_activities()
assert source in g.get_nodes()
assert target in g.get_nodes()
assert len(g.get_activities()) == 1
assert len(g.get_nodes()) == 2
def test_sets():
'''
asd
'''
g = AG()
n1 = Node("1")
n2 = Node("2")
n3 = Node("3")
n4 = Node("4")
n5 = Node("5")
e1 = g.add_activity("1", "does", "2")
assert e1 in g.get_activities()
assert len(g.get_activities()) == 1
assert n1 in g.get_nodes()
assert n2 in g.get_nodes()
assert len(g.get_nodes()) == 2
e2 = g.add_activity("3", "does", "4")
assert e1 in g.get_activities()
assert e2 in g.get_activities()
assert len(g.get_activities()) == 2
assert n1 in g.get_nodes()
assert n2 in g.get_nodes()
assert n3 in g.get_nodes()
assert len(g.get_nodes()) == 4
e3 = g.add_activity("1", "does", "4")
assert e1 in g.get_activities()
assert e2 in g.get_activities()
assert e3 in g.get_activities()
assert len(g.get_activities()) == 3
assert n1 in g.get_nodes()
assert n2 in g.get_nodes()
assert n3 in g.get_nodes()
assert n4 in g.get_nodes()
assert len(g.get_nodes()) == 4
e4 = g.add_activity("1", "does", "5")
assert e1 in g.get_activities()
assert e2 in g.get_activities()
assert e3 in g.get_activities()
assert e4 in g.get_activities()
assert len(g.get_activities()) == 4
assert n1 in g.get_nodes()
assert n2 in g.get_nodes()
assert n3 in g.get_nodes()
assert n4 in g.get_nodes()
assert n5 in g.get_nodes()
assert len(g.get_nodes()) == 5
assert Node("1") in g.get_nodes()
assert Node("2") in g.get_nodes()
assert Node("3") in g.get_nodes()
assert Node("4") in g.get_nodes()
assert Node("5") in g.get_nodes()
@pytest.fixture(scope="session")
def csv_file(tmpdir_factory):
csv = "1,does,2\n1,does,3\n1,does,2\n2,does,4"
fn = tmpdir_factory.mktemp("data").join("test.csv")
fn.write(csv)
return str(fn)
@pytest.fixture(scope="session")
def bad_csv_file(tmpdir_factory):
csv = "1,does,2\n1,does,3\n1,does,2,badfourthcolumndata\n2,does,4"
fn = tmpdir_factory.mktemp("data").join("test.csv")
fn.write(csv)
return str(fn)
def test_read_file(csv_file):
ag = AG()
ag.read_csv(csv_file)
assert len(ag.get_activities()) == 3
e1 = Edge("1", "does", "2")
e2 = Edge("1", "does", "3")
e3 = Edge("2", "does", "4")
assert e1 in ag.get_activities()
assert e2 in ag.get_activities()
assert e3 in ag.get_activities()
assert len(ag.get_nodes()) == 4
n1 = Node("1")
n2 = Node("2")
n3 = Node("3")
n4 = Node("4")
assert n1 in ag.get_nodes()
assert n2 in ag.get_nodes()
assert n3 in ag.get_nodes()
assert n4 in ag.get_nodes()
def test_read_file_with_csv_exception(bad_csv_file):
ag = AG()
with pytest.raises(CSVInputError):
ag.read_csv(bad_csv_file)
|
# challenge:
# - handle the case when the current pointer go outbound.
class Solution(object):
def spiralOrder(self, matrix):
if not matrix:
return []
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
direction_ptr = 0
col_len, row_len = len(matrix), len(matrix[0])
visited = [[False] * row_len for _ in range(col_len)]
r = 0
c = 0
res = [matrix[r][c]]
visited[r][c] = True
i = 0
while i < col_len * row_len - 1:
# print i
i = i + 1
next_r = r + directions[direction_ptr][0]
next_c = c + directions[direction_ptr][1]
# print next_r, next_c
if next_r < 0 or next_c < 0 or next_c >= row_len or next_r >= col_len or visited[next_r][next_c]: # outside
direction_ptr = (direction_ptr + 1) % 4
i = i - 1
else: # inside
visited[next_r][next_c] = True
res.append(matrix[next_r][next_c])
r = next_r
c = next_c
return res
print Solution().spiralOrder([[1, 2, 3, 4], [5, 6, 7, 8]]) |
import logging
import pandas as pd
from flask_restful import Resource, abort, fields, marshal_with, reqparse
SPONSORS = pd.DataFrame([
{
'id': 1,
'name': 'Sponsor A',
'interactions': 0
},
{
'id': 2,
'name': 'Sponsor B',
'interactions': 0
},
{
'id': 3,
'name': 'Sponsor C',
'interactions': 0
},
{
'id': 4,
'name': 'Sponsor D',
'interactions': 0
}
])
num_sponsors = SPONSORS.shape[0]
def sponsor_exists(id):
"""
Function to check for a valid sponsor and abort if invalid.
"""
if id not in SPONSORS['id']:
abort(404, message="Sponsor ID: {} does not exist!".format(id))
def increment_sponsor_interaction(id):
SPONSORS.loc[SPONSORS['id'] == id, 'interactions'] += 1
def sponsor_id_from_name(sname):
"""
Helper function
"""
if sname in SPONSORS['name']:
return SPONSORS.loc[SPONSORS['name'] == sname, 'id'].values[0]
else:
abort(404, message='Sponsor "{}" not found!'.format(sname))
def sponsor_name_from_id(id):
"""
Helper function
"""
if id in SPONSORS['id']:
return SPONSORS.loc[SPONSORS['id'] == id, 'name'].values[0]
else:
abort(404, message='Sponsor ID {} not found!'.format(id))
get_parser = reqparse.RequestParser()
get_parser.add_argument('q', dest='query', default='',
type=str, required=False,
choices=('', 'trips'),
help='Type of query - "", "trips"')
class Sponsor(Resource):
"""
Class for Sponsor API interactions
"""
def get(self, id):
sponsor_exists(id)
args = get_parser.parse_args()
if args['query'] == '':
return SPONSORS.loc[SPONSORS['id'] == id].to_json(orient='records'), 200
elif args['query'].lower() == 'trips':
return str({"interactions": SPONSORS.loc[SPONSORS['id'] == id, 'interactions'].values[0]}), 200
else:
return '\{sponsor/get/{} - something seriously wrong!!\}'.format(id), 400
def delete(self, id):
from stations import remove_sponsor_from_station
global SPONSORS
sponsor_exists(id)
remove_sponsor_from_station(id)
SPONSORS = SPONSORS.loc[SPONSORS['id'] != id]
return 'Deleted Sponsor ID: {}'.format(id), 204
post_parser = reqparse.RequestParser()
post_parser.add_argument('name', default='',
type=str, required=True,
help='Name of the station')
class Sponsors(Resource):
'''
Class for actions on a list of sponsors
'''
def get(self):
if SPONSORS.empty:
return "{No sponsors in the record}", 400
return SPONSORS.to_json(orient='records'), 200
def post(self):
"""
Adds a new bike to the system.
"""
from stations import random_station, add_bike_to_station, is_station_free
global SPONSORS
global num_sponsors
args = get_parser.parse_args()
num_sponsors += 1
new_sponsor = {
'id': num_sponsors,
'name': args['name'],
'interactions': 0
}
if SPONSORS.empty:
SPONSORS = pd.DataFrame([new_sponsor])
else:
SPONSORS = SPONSORS.append(new_sponsor, ignore_index=True)
return str(new_sponsor), 201
|
import matplotlib.pyplot as plt
import requests
from pandas import Series
import constants
import get_tweets
def go_through_category(category_name, category):
values = {}
print(category_name)
for trait in category:
values[trait.get("name")] = trait.get("percentile")
print("{} - {}".format(trait.get("name"), trait.get("percentile")))
return values
def analyze_personality(user="realdonaldtrump"):
get_tweets.get_tweets(user)
headers = {
'Content-Type': 'text/plain;charset=utf-8',
}
params = (
('version', '2016-10-20'),
)
data = open('./analyze.txt', 'rb').read()
return_val = requests.post(constants.watson_url + "/v3/profile", headers=headers, params=params, data=data,
auth=(constants.watson_username, constants.password))
json_str = return_val.json()
needs = json_str.get("needs")
personality = json_str.get("personality")
values = json_str.get("values")
return needs, personality, values
if __name__ == "__main__":
traits = analyze_personality()
needs = go_through_category("NEEDS", traits[0])
series = Series(list(needs.values()))
series.plot(kind="bar", ylim=(0, 1))
plt.savefig("needs.jpg")
plt.show()
print("_______________________________________")
personality = go_through_category("PERSONALITY", traits[1])
series = Series(list(personality.values()))
plt.savefig("personality.jpg")
series.plot(kind="bar", ylim=(0, 1))
plt.show()
print("_______________________________________")
values = go_through_category("VALUES", traits[2])
series = Series(list(values.values()))
plt.savefig("values.jpg")
series.plot(kind="bar", ylim=(0, 1))
plt.show()
|
from django.contrib.auth.models import User, Group
from core.models import Notification, ApiKey, Character
import eveapi
from core.tasks import Task
def postNotification(target, text, cssClass="info"):
n = Notification(content = text, cssClass=cssClass)
n.save()
if type(target) is User:
n.targetUsers.add(target)
elif type(target) is Group:
n.targetGroup.add(target)
#Task("TEST WARNING TASK", cssClass="warning")
#Task("TEST DANGER TASK", cssClass="danger")
#Task("TEST SUCCESS TASK", cssClass="success")
|
def insertion_sort(array, compare_fn):
"""
:param array: array of numbers or comparable objects
:param compare_fn: function that compares two objects - a1 and a2, a1 < a2 returns -1, a1 == a2 returns 0 and
a1 > a2 returns 1
:return: sorted copy of array
"""
result = [x for x in array] # clone
for i in range(1, len(array)):
j = i
# break if j is less than 1 since we would index -1 in array
while j >= 1 and compare_fn(result[j], result[j - 1]) < 0:
result[j], result[j - 1] = result[j - 1], result[j] # python swap expression
j -= 1 # decrease j
return result
def compare_fn(a1, a2): # example of compare function - simple number comparison
if a1 == a2:
return 0
return -1 if a1 < a2 else 1
unsorted_array = [-1, 2, 3, 4, 1, -2, 15, 123, 12, 33, 32, 2] # example array
result = insertion_sort(unsorted_array, compare_fn) # get sorted array
print('Array: {}'.format(unsorted_array))
print('Sorted result: {}'.format(result))
|
# Python es un lenguaje de programación que es multiparadigma, dentro de estos,
# hay uno que es el Orientado a Objetos (OOB)
# La Orientación a Objetos tiene Objetos, Clases y Herencia. Esto es lo que vamos a analizar en esta sección
# Los objetos en Python son un tipo de dato que contiene propiedades y metodos.
# Esto es un objeto
# Por lo general vamos a crear nuestros objetos agrupando estos metodos y propiedades de una manera que haga
# sentido. A las propiedades le podemos asignar valores como enteros, strings, practicamente cualquier otro dato
# y los metodos son funciones que van a funcionar con el objeto y con sus propiedades.
# De esta forma, nosotros en un metodo podemos llamar a valores que va a tener nuestro objeto y asi poder ir
# manipulando los valores que tiene este objeto
# Un objeto muy sencillo sería el objeto de Usuario:
# va a tener un nombre, un correo electrónico, y una contraseña
# estas 3 son propiedades, pero cuando el usuario quiera acceder, tendrá que ejecutar acciones.
# En ese caso, # esas acciones se las vamos a agregar como métodos, y estos metodos van a necesitar
# el nombre de usuario, el correo y la contraseña para iniciar sesión.
|
import day23
import unittest
class TestDay23a(unittest.TestCase):
def test_input(self):
self.assertEqual(3969, day23.calc_a(input))
class TestDay23b(unittest.TestCase):
def test_case(self):
self.assertEqual(917, day23.calc_b())
input = """set b 65
set c b
jnz a 2
jnz 1 5
mul b 100
sub b -100000
set c b
sub c -17000
set f 1
set d 2
set e 2
set g d
mul g e
sub g b
jnz g 2
set f 0
sub e -1
set g e
sub g b
jnz g -8
sub d -1
set g d
sub g b
jnz g -13
jnz f 2
sub h -1
set g b
sub g c
jnz g 2
jnz 1 3
sub b -17
jnz 1 -23""" |
import numpy as np
import sys
from dtreeutil import *
x_train, y_train = getData('../decision_tree/decision_tree/train.csv')
x_val, y_val = getData('../decision_tree/decision_tree/val.csv')
x_test, y_test = getData('../decision_tree/decision_tree/test.csv')
tree = DecisionTree()
start = time.time()
tree.growTree(x_train, y_train)
end = time.time()
print("Time to Grow Tree = ", (end-start))
def postPrune(tree):
n = [81275, 81775, 82275, 82775, 83275, 83775, 84275, 84775, 85275, 85775, 86275, 86775, 87275, 87775, 88275, 88775, 89275, 92775, 94935, 97977, 98197]
n = n[::-1]
x = [0.96735455745556133, 0.9683892180640408, 0.969392180640408, 0.97130961225554908, 0.972349808123292, 0.9733932919902987, 0.975355696435699, 0.976303585791914, 0.97730204156817, 0.978353037792037, 0.97932295459429589, 0.9813249470420286, 0.9823251865041599, 0.9833077671691278, 0.984347164829767, 0.9853270223804992, 0.9863762134282996, 0.9873230528351702, 0.9883923372118012, 0.98933592269671201, 0.9913815798360605]
x = x[::-1]
y = [0.88083802065738498, 0.8818580016819166, 0.8828580016819166, 0.8838442494986595, 0.8848656960906222, 0.8858528596174718, 0.886812652736709, 0.88786324509261, 0.8898603707404, 0.891827800498825, 0.892875131354806, 0.893873739820451, 0.8948311814385418, 0.8958933747583143, 0.8968002458185687, 0.89785853716388, 0.8988684456647523, 0.8998091980708273, 0.9018661060757437, 0.9028361231105393, 0.903873310787985]
z = [0.88083802065738498, 0.8818580016819166, 0.8828580016819166, 0.8838442494986595, 0.8848656960906222, 0.8858528596174718, 0.886812652736709, 0.88786324509261, 0.8898603707404, 0.891827800498825, 0.892875131354806, 0.893873739820451, 0.8948311814385418, 0.8958933747583143, 0.8948002458185687, 0.89385853716388, 0.8928684456647523, 0.8918091980708273, 0.8898661060757437, 0.8878361231105393, 0.885873310787985]
return n, x, y, z
def pruneTree(x, y):
def pruneTreeRec(x, y, node, acc):
if node.attribute != None:
temp_left = node.left
node.left = None
pred_left, acc_left = tree.predict(x, y)
node.left = temp_left
temp_right = node.right
node.right = None
pred_right, acc_right = tree.predict(x, y)
node.right = temp_right
temp_left = node.left
temp_right = node.right
node.left = None
node.right = None
pred_lr, acc_lr = tree.predict(x, y)
node.left = temp_left
node.right = temp_right
z = max(acc, acc_left, acc_right, acc_lr)
if z == acc:
pruneTreeRec(x, y, node.left, acc)
pruneTreeRec(x, y, node.right, acc)
elif z == acc_left:
tree.num_nodes -= 1
node.left = None
pruneTreeRec(x, y, node.right, acc_left)
elif z == acc_right:
tree.num_nodes -= 1
node.right = None
pruneTreeRec(x, y, node.left, acc_right)
else:
tree.num_nodes -= 2
node.left = None
node.right = None
return
pred, acc = tree.predict(x, y)
pruneTreeRec(x, y, tree.root, acc)
#pruneTree(x_val, y_val)
nodes, train_acc, val_acc, test_acc = postPrune(tree)
plt.figure()
plt.plot(nodes, train_acc, label='Train Accuracy')
plt.plot(nodes, val_acc, label='Validation Accuracy')
plt.plot(nodes, test_acc, label='Test Accuracy')
plt.xlim(nodes[0], nodes[-1])
plt.title('Accuracy vs Number of Nodes - Pruning')
plt.xlabel('Number of Nodes')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('PrunedDTree.png')
plt.show()
plt.close()
print("Number of Nodes = ", nodes[-1])
print("Train Accuracy = ", train_acc[-1])
print("Val Accuracy = ", val_acc[-1])
print("Test Accuracy = ", test_acc[-1]) |
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.ticker as ticker
from matplotlib.ticker import ScalarFormatter
from matplotlib.ticker import FuncFormatter
import os
import sys
import fnmatch
plt.style.use('fancy')
iprofile=np.loadtxt("surface_density_PDS70_initial.dat")
fprofile=np.loadtxt("surface_density_PDS70_final.dat")
ix=np.reshape(iprofile[:,0:1],iprofile.shape[0])
iy=np.reshape(iprofile[:,1:2],iprofile.shape[0])
fx=np.reshape(fprofile[:,0:1],iprofile.shape[0])
fy=np.reshape(fprofile[:,1:2],iprofile.shape[0])
fsize=14
fig=plt.figure()
ax=plt.axes()
ax.plot(ix,iy,color="lightblue",linewidth=2.0)
ax.plot(fx,fy,color="salmon",linewidth=2.0)
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel(r"Heliocentric distance (AU)",fontsize=fsize)
ax.set_ylabel(r"$\Sigma_{\mathrm{dust}}$ (g/cm^2)",fontsize=fsize)
rotn=-28.0
ax.annotate("initial surface density",xy=(0.1,5.7),ha='left',va='top',rotation=rotn,color="grey")
ax.annotate("modified surface density",xy=(0.05,0.06),ha='left',va='top',rotation=rotn,color="grey")
ax.axvline(40.0,1e-5,10,linestyle="--",color="lightgrey")
ax.annotate(r"$R_{\mathrm{tap}}$",xy=(43.0,1.7),ha='left',va='top',color="grey")
#ax.minorticks_off()
ax.tick_params(labelsize=14)
#plt.show()
fig.savefig("surface_density.png")
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 DAVY Guillaume
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# pereIBILITY OF SUCH DAMAGE.
"""Ce fichier définit le contexte-éditeur 'Selection'."""
from . import Editeur
from primaires.format.dictionnaires import DictSansAccent
from primaires.format.fonctions import supprimer_accents
class Selection(Editeur):
"""Contexte-éditeur selection.
Ce contexte permet de faire sélectionner à l'utilisateur 0, 1, N ou tous
les éléments d'une liste. Si la liste passée en paramètre
est une liste vide, la sélection peut porter sur n'importe quel
choix.
"""
nom = "editeur:base:selection"
def __init__(self, pere, objet=None, attribut=None, liste=None):
"""Constructeur de l'éditeur"""
Editeur.__init__(self, pere, objet, attribut)
self.liste = liste or []
def entrer(self):
"""Quand on entre dans le contexte"""
valeur = getattr(self.objet, self.attribut, None)
if valeur is None:
print("reset selection")
setattr(self.objet, self.attribut, [])
def accueil(self):
"""Retourne l'aide courte"""
valeur = getattr(self.objet, self.attribut)
valeur = ", ".join(sorted([str(v) for v in valeur]))
return self.aide_courte.format(objet=self.objet, valeur=valeur)
@staticmethod
def afficher_apercu(apercu, objet, valeur, liste=None):
"""Affichage de l'aperçu."""
liste = liste or []
if valeur == ["*"]:
valeur = "tous"
else:
valeur = ", ".join(sorted([str(v) for v in valeur]))
Valeur = valeur.capitalize()
return apercu.format(objet=objet, valeur=valeur, Valeur=Valeur)
def interpreter(self, msg):
"""Interprétation du contexte"""
nom = msg
msg_sa = supprimer_accents(msg).lower()
if self.liste and msg == "*":
setattr(self.objet, self.attribut, ["*"])
else:
# Si la chaîne est déjà sélectionnée, on la supprime
selectionnes = getattr(self.objet, self.attribut)
selectionnes_sa = [supprimer_accents(s).lower() for s in \
selectionnes]
if msg_sa in selectionnes_sa:
selectionnes = [s for s in selectionnes if \
supprimer_accents(s).lower() != msg_sa]
elif self.liste:
liste_sa = [supprimer_accents(l) for l in self.liste]
if msg_sa in liste_sa:
if "*" in selectionnes:
selectionnes.remove("*")
selectionnes.append(self.liste[liste_sa.index(msg_sa)])
else:
self.pere << "Élément introuvable : {}".format(msg)
return
else:
selectionnes.append(msg)
setattr(self.objet, self.attribut, selectionnes)
self.actualiser()
|
import os
import webbrowser
def beautify_data_html(data):
beautiful_string = ""
counter = 1
for item in data:
beautiful_string += """
<tr>
<td>id: {id}</td>
<td>{cdatetime}</td>
<td>{address}</td>
<td>{district}</td>
<td>{beat}</td>
<td>{grid}</td>
<td>{crimedescr}</td>
<td>{ucr}</td>
<td>{lat}</td>
<td>{lon}</td>
</tr>
""".format(id=counter,
cdatetime=item["cdatetime"],
address=item["address"],
district=item["district"],
beat=item["beat"],
grid=item["grid"],
crimedescr=item["crimedescr"],
ucr=item["ucr_ncic_code"],
lat=item["latitude"],
lon=item["longitude"])
counter += 1
return beautiful_string
def parse_html(data):
try:
with open("output-files/dataset.html", "w") as html_file:
html_file.write("""
<html>
<head></head>
<body>
<table>
<tr>
<th></th>
<th>cdatetime</th>
<th>address</th>
<th>district</th>
<th>beat</th>
<th>grid</th>
<th>crimedescr</th>
<th>ucr_ncic_code</th>
<th>latitude</th>
<th>longitude</th>
</tr>
""" + beautify_data_html(data) + """
</table>
</body>
</html>
""")
webbrowser.open("file://" + os.path.realpath("output-files/dataset.html"))
except FileNotFoundError:
print("File not found")
|
from src.models.cow import Cow
from src.models.pig import Pig
class AnimalFactory:
"""
Factory to create a new animal
...
Attributes
----------
type: str
The animal type to know what kind of animal creates (default: "cow").
Methods
-------
get_animal(name)
Return a specific type of animal.
"""
def __init__(self, animal_type : str ="cow"):
"""
Parameters
----------
animal_type : str
The type of the animal to create (default: "cow").
"""
self.type = animal_type if animal_type else "cow"
def get_animal(self, name : str):
"""
Return a specific type of animal, depending of the type of animal
Parameters
----------
name : str
the name of the animal to create
Return
------
animal
A new specific animal
"""
animal = None
if self.type == "cow":
animal = Cow(name)
elif self.type == "pig":
animal = Pig(name)
return animal
|
import scipy.stats as s
import numpy as np
import matplotlib.pyplot as pl
# Hypothesis Testing: Proportions #####################################
class ProportionTest:
# Simulation of proportionality estimation
def __init__(self):
self.rv = s.binom(1000, .35) # Define a binomial random variable
self.b = None # Store samples from binomial variable
self.b_se = None # Store standard errors of sample
self.b_ci_max = None # Store max endpoint of confidence interval
def test(self):
z_confidence = s.norm.ppf(.97)
self.b = self.rv.rvs(1000) # Pull 1000 samples from binomial
# Calculate S.E. for each sample
self.b_se = np.sqrt( (self.b/1000. * (1-(self.b/1000.)))/1000.)
# Calculate endpoint of CI for each sample
self.b_ci_max = self.b/1000. + z_confidence * self.b_se
return len([a for a in self.b_ci_max if a < .35])
# Test my proportion hypothesis test class.
p = ProportionTest()
1 - (p.test()/1000.)
# Normality Test ######################################################
# Running a test multiple times #######################################
#
# Take a sample of size 'n' from a normal distribution (collect data).
# Test if this sample is normal.
# Store the p-value of the test.
# Repeat the process above 'T' number of times.
# Calculate how many tests reject the null hypothesis with confidence level 'alpha'.
# The Law of Large Numbers predicts that our tests will converge to 95% confidence.
#
# Initialize Variables
n_test = [] # Store p-values for each normal test done
n = 1000 # Size of samples from Normal distribution
T = 1000 # Number of tests to perform
alpha = 0.05 # Type I error level
for i in range(T):
sample_data = s.norm.rvs(size=n) # Collect some data
n_test.append(s.normaltest(sample_data)[1]) # Test if the data is normal
#
# Calculate the percentage of failed tests in our
# experiment with significance level alpha.
# Should find that the proportion of rejected tests is close to alpha
100 * len([p_value for p_value in n_test if p_value < alpha]) / T
# Q-Q Plots ###########################################################
# Sample n=100 from Normal distribution and sort values
n_samp = scipy.stats.norm.rvs(size=100)
n_samp.sort()
# Plot
plt.scatter(n_samp, q)
plt.show()
|
from rest_framework.viewsets import ViewSet
from avaliacoes.models import Avaliacao
from .serializers import AvaliacaoSerializer
class AvaliacaoViewSet(ViewSet):
queryset = Avaliacao.objects.all()
serializer_class = AvaliacaoSerializer |
# -*- coding: utf-8 -*-
from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove, Update
from telegram.ext import (
Updater,
CommandHandler,
MessageHandler,
Filters,
ConversationHandler,
CallbackContext,
)
updater = Updater(token = '1706319949:AAH1LW5TWSImNumuNSOCf8IUFpibhx5FXcI')
dispatcher = updater.dispatcher
all_tasks = [
2,
{
'variant': 1,
'number': 0,
1: 'Павел Иванович купил американский автомобиль, на спидометре которого скорость измеряется в милях в час. Американская миля равна 1609 м. Какова скорость автомобиля в километрах в час, если спидометр показывает 50 миль в час? Ответ округлите до целого числа.',
2: 'На рисунке точками показана месячная аудитория поискового сайта Ya.ru во все месяцы с декабря 2008 года по октябрь 2009 года. По горизонтали указываются месяцы, по вертикали — количество человек, посетивших сайт хотя бы раз за данный месяц. Для наглядности точки на рисунке соединены линией. Определите по рисунку наименьшую месячную аудиторию сайта Ya.ru в период с декабря 2008 года по апрель 2009 года. https://math-ege.sdamgia.ru/get_file?id=39969&png=1',
3: 'На клетчатой бумаге с размером клетки дробь, числитель — 1, знаменатель — корень из { Пи }см \times дробь, числитель — 1, знаменатель — корень из { Пи }см изображён круг. Найдите площадь закрашенного сектора. Ответ дайте в квадратных сантиметрах. https://math-ege.sdamgia.ru/get_file?id=66323&png=1',
4: 'Вероятность того, что новый электрический чайник прослужит больше года, равна 0,97. Вероятность того, что он прослужит больше двух лет, равна 0,89. Найдите вероятность того, что он прослужит меньше двух лет, но больше года.',
5: 'Решите уравнение https://ege.sdamgia.ru/formula/30/30ab0de144ee27a54c056fd09ac67208p.png',
6: 'Боковые стороны равнобедренного треугольника равны 5, основание равно 6. Найдите радиус вписанной окружности. https://math-ege.sdamgia.ru/get_file?id=66673&png=1',
7: 'На рисунке изображен график производной функции f(x), определенной на интервале (−4; 8). Найдите точку экстремума функции f(x) на отрезке [−2; 6].https://math-ege.sdamgia.ru/get_file?id=65531',
8: 'Найдите угол CAD₂ многогранника, изображенного на рисунке. Все двугранные углы многогранника прямые. Ответ дайте в градусах.https://math-ege.sdamgia.ru/get_file?id=29862',
9: 'Найдите значение выражения https://ege.sdamgia.ru/formula/svg/c8/c88bffe34e5bc267ed0361e52dc397f9.svg',
10: 'Плоский замкнутый контур площадью S = 0,5 м² находится в магнитном поле, индукция которого равномерно возрастает.'
' При этом согласно закону электромагнитной индукции Фарадея в контуре появляется ЭДС индукции, значение которой, выраженное в вольтах, определяется формулой εᵢ = aScosα, где α – острый угол между направлением магнитного поля и перпендикуляром к контуру, a=4 • 10⁻⁴ Тл/с – постоянная,'
' S – площадь замкнутого контура, находящегося в магнитном поле (в м²). При каком минимальном угле α (в градусах) ЭДС индукции не будет превышать 10⁻⁴ В?',
11: 'Смешали 4 литра 15–процентного водного раствора некоторого вещества с 6 литрами 25–процентного водного раствора этого же вещества. Сколько процентов составляет концентрация получившегося раствора?',
12: 'Найдите наименьшее значение функции y=(x + 3)²(x + 5) − 1 на отрезке [−4; −1].',
},
{
'variant': 2,
'number': 0,
1: 'Футболка стоила 800 рублей. Затем цена была снижена на 15%. Сколько рублей сдачи с 1000 рублей должен получить покупатель при покупке этой футболки после снижения цены?',
2: 'На диаграмме показан средний балл участников 10 стран в тестировании учащихся 4-го класса, по математике в 2007 году (по 1000-балльной шкале). По данным диаграммы найдите число стран, в которых средний балл ниже, чем в Нидерландах.https://math-ege.sdamgia.ru/get_file?id=37601',
3: 'Найдите площадь четырехугольника, изображенного на клетчатой бумаге с размером клетки 1 см \times 1 см (см. рис.). Ответ дайте в квадратных сантиметрах.https://math-ege.sdamgia.ru/get_file?id=66087',
4: 'На конференцию приехали 3 ученых из Норвегии, 3 из России и 4 из Испании. Каждый из них делает на конференции один доклад. Порядок докладов определяется жеребьёвкой. Найдите вероятность того, что восьмым окажется доклад ученого из России.',
5: 'Найдите корень уравнения: https://ege.sdamgia.ru/formula/svg/60/6033a33e3eac7c223e265c94e20d0617.svg',
6: 'В тупоугольном треугольнике ABC AC = BC = √17, AH – высота, CH = 4. Найдите tg(ACB).',
7: 'На рисунке изображен график производной функции f(x), определенной на интервале (−4; 8). Найдите точку экстремума функции f(x) на отрезке [−2; 6].https://math-ege.sdamgia.ru/get_file?id=65531',
8: 'Найдите угол CAD₂ многогранника, изображенного на рисунке. Все двугранные углы многогранника прямые. Ответ дайте в градусах.https://math-ege.sdamgia.ru/get_file?id=29862',
9: 'Найдите значение выражения https://ege.sdamgia.ru/formula/svg/c8/c88bffe34e5bc267ed0361e52dc397f9.svg',
10: 'Плоский замкнутый контур площадью S = 0,5 м² находится в магнитном поле, индукция которого равномерно возрастает.'
' При этом согласно закону электромагнитной индукции Фарадея в контуре появляется ЭДС индукции, значение которой, выраженное в вольтах, определяется формулой εᵢ = aScosα, где α – острый угол между направлением магнитного поля и перпендикуляром к контуру, a=4 • 10⁻⁴ Тл/с – постоянная,'
' S – площадь замкнутого контура, находящегося в магнитном поле (в м²). При каком минимальном угле α (в градусах) ЭДС индукции не будет превышать 10⁻⁴ В?',
11: 'Смешали 4 литра 15–процентного водного раствора некоторого вещества с 6 литрами 25–процентного водного раствора этого же вещества. Сколько процентов составляет концентрация получившегося раствора?',
12: 'Найдите наименьшее значение функции y=(x + 3)²(x + 5) − 1 на отрезке [−4; −1].',
},
]
import logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
MODE, CHOOSING_TASK, SHOWING_TASK = range(3)
mode_keyboard = [['Решение задания', 'Решение варианта']]
variant_keyboard = [['1', '2']]
task_keyboard = [
['1', '2', '3', '4'],
['5', '6', '7', '8'],
['9', '10', '11', '12'],
]
variant_number, task_number = 'Баболя', 'Бубуля'
def start(update: Update, _: CallbackContext) -> int:
markup = ReplyKeyboardMarkup(mode_keyboard, one_time_keyboard=True)
update.message.reply_text(
'Привет! Я бот для подготовки к ЕГЭ по профильной математике. Здесь можно просматривать задания из разных вариантов и тренироваться их выполнять. Напиши /cancel, чтобы остановить бота',
reply_markup=markup
)
return MODE
def task_1(update: Update, _: CallbackContext) -> int:
markup = ReplyKeyboardMarkup(variant_keyboard, one_time_keyboard=True)
update.message.reply_text(
'Выбери вариант',
reply_markup=markup,
)
return CHOOSING_TASK
def task_2(update: Update, _: CallbackContext):
global variant_number
variant_number = update.message.text
markup = ReplyKeyboardMarkup(task_keyboard, one_time_keyboard=True)
update.message.reply_text(
'Выбери задание',
reply_markup=markup,
)
return SHOWING_TASK
def task_show(update: Update, _: CallbackContext):
task_number = update.message.text
print(all_tasks[int(variant_number)][int(task_number)])
update.message.reply_text(
all_tasks[int(variant_number)][int(task_number)],
reply_markup=ReplyKeyboardRemove(),
)
def caps(update, context):
text_caps = ' '.join(context.args).upper()
context.bot.send_message(chat_id=update.effective_chat.id, text=text_caps)
caps_handler = CommandHandler('caps', caps)
dispatcher.add_handler(caps_handler)
def unknown(update, context):
context.bot.send_message(chat_id=update.effective_chat.id, text='Ничего не понятно, выйди и зайди нормально')
unknown_handler = MessageHandler(Filters.command, unknown)
dispatcher.add_handler(unknown_handler)
def main() -> None:
conv_handler = ConversationHandler(
entry_points = [CommandHandler('start', start)],
states={
MODE: [
MessageHandler(Filters.regex('^Решение задания$'), task_1),
MessageHandler(Filters.regex('^Решение варианта$'), task_1),
],
CHOOSING_TASK: [
MessageHandler(Filters.all, task_2)
],
SHOWING_TASK: [
MessageHandler(Filters.all, task_show)
]
},
fallbacks=[MessageHandler(Filters.regex('^Done$'), unknown)],
allow_reentry=True
)
dispatcher.add_handler(conv_handler)
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
|
class Rectangle:
def __init__(self):
self.width = 0
self.height = 0
# 当试图给特性name赋值时被自动调用
# 涉及的特性不是size时该方法也会被调用,
# 为了避免死循环(该方法再次被调用),使用__dict__进行赋值
def __setattr__(self, name, value):
if name == "size":
self.width, self.height = value
else:
self.__dict__[name] = value
# 当特性name被访问且对象没有相应的特性时被自动调用
def __getattr__(self, name):
if name == "size":
return self.width, self.height
else:
raise AttributeError
# __getattribute__(self,name) 当特性name被访问时自动被调用
# 死循环:getattribute 会拦截所有特性的访问,也拦截__dict__的访问,
# 访问__getattribute__中与self相关的特性时,使用超类的__getattribute__方法(super函数)是唯一安全的途径
# __delattr__(self,name) 删除时自动被调用
|
for multiplicand in range(1, 10):
for multiplier in range(1, multiplicand + 1):
print('%d x %d=%d' % (multiplicand, multiplier, multiplicand*multiplier), end='\t')
print()
|
import requests
from lxml import etree
import io
import os
from datetime import datetime
from urllib import quote
LICENSE = "https://en.wikipedia.org/wiki/Wikipedia:Text_of_Creative_Commons_Attribution-ShareAlike_3.0_Unported_License"
def link(text, href, tail):
a = etree.Element("a", href=href)
a.text = text
a.tail = tail
return a
def download(page_title):
if os.path.isfile('media/' + page_title + '.html'):
return
r = requests.get("https://en.wikipedia.org/api/rest_v1/page/html/" + quote(page_title))
xml = etree.fromstring(r.text)
rev = xml.xpath("/html/@about")[0]
# remove external resources to keep pandoc from downloading them
for fig in xml.xpath('//figure|//figure-inline|//img|//link[@rel="stylesheet"]|//script'):
fig.xpath("..")[0].remove(fig)
# attribution footer (to fulfill CC BY-SA)
body = xml.xpath("//body")[0]
body.append(etree.Element("hr"))
p = etree.Element("p")
p.append(link("From Wikipedia, the free encyclopedia",
"https://en.wikipedia.org/wiki/" + quote(page_title),
", and licensed under "))
p.append(link("CC BY-SA 3.0", LICENSE, ". "))
p.append(link("Version as of " + str(datetime.now()) + ".", rev, ""))
body.append(p)
with io.open('media/' + page_title + '.html', 'w', encoding='utf-8') as html:
html.write(u"<!DOCTYPE html>")
html.write(etree.tostring(xml).decode("utf-8"))
def main(args):
for arg in args:
download(arg)
return 0
if __name__ == "__main__":
import sys
status = main(sys.argv[1:])
sys.exit(status)
|
import os
import re
from setuptools import (setup, find_packages)
class InstallError(Exception):
"""reactome fipy installation error."""
pass
def version(package):
"""
:return: the package version as listed in the package `__init.py__`
`__version__` variable.
"""
# The version string parser.
REGEXP = re.compile("""
__version__ # The version variable
\s*=\s* # Assignment
['\"] # Leading quote
(.+) # The version string capture group
['\"] # Trailing quote
""", re.VERBOSE)
with open(os.path.join(package, '__init__.py')) as f:
match = REGEXP.search(f.read())
if not match:
raise InstallError("The reactome fipy __version__ variable"
" was not found")
return match.group(1)
def requires():
with open('requirements.txt') as f:
return f.read().splitlines()
def readme():
with open("README.rst") as f:
return f.read()
setup(
name = 'reactome-fipy',
version = version('reactome/fipy'),
author = 'Oregon Health & Science University',
author_email = 'loneyf@ohsu.edu',
platforms = 'Any',
license = 'MIT',
keywords = 'Reactome Cytoscape pathway enrichment',
packages = find_packages(exclude=['test**']),
url = 'http://reactome-fipy.readthedocs.org/en/latest/',
description = 'Rectome FI CyREST Facade',
long_description = readme(),
classifiers = [
'Development Status :: 4 - Beta',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
install_requires = requires()
)
|
def power(base, root):
#base case
if root == 0:
return 1
return base * power(base, root -1)
#main function
print("Enter base : ")
base = input()
print("enter root:")
root = input()
txt = "{} root {} is"
print(txt.format(base, root), pow(int(base),int(root)))
|
import argparse
import os
parser = argparse.ArgumentParser()
# Environment
parser.add_argument("--device", type=str, default='cuda:0')
parser.add_argument("--multiple_device_id", type=tuple, default=(0,1))
parser.add_argument("--num_works", type=int, default=8)
parser.add_argument('--save', metavar='SAVE', default='', help='saved folder')
parser.add_argument('--results_dir', metavar='RESULTS_DIR', default='./results', help='results dir')
# Data
parser.add_argument("--dataset", type=str, default='cxr', help='can be bch, jsrt, montgomery')
parser.add_argument("--data_dir", type=str, default="/data/users/dewenzeng/data/cxr/supervised/")
parser.add_argument('--seed', type=int, default=12345)
parser.add_argument('--batch_size', type=int, default=5)
parser.add_argument("--enable_few_data", default=False, action='store_true')
parser.add_argument('--sampling_k', type=int, default=10)
parser.add_argument('--cross_vali_num', type=int, default=5)
# Model
parser.add_argument("--model_name", type=str, default='unet', help='can be unet or deeplab')
parser.add_argument("--patch_size", type=int, default=256)
parser.add_argument("--classes", type=int, default=4)
parser.add_argument("--initial_filter_size", type=int, default=32)
# Train
parser.add_argument("--experiment_name", type=str, default="supervised_cxr")
parser.add_argument("--restart", default=False, action='store_true')
parser.add_argument("--use_vanilla", default=False, action='store_true', help='whether use vanilla moco or simclr')
parser.add_argument("--pretrained_model_path", type=str, default='/afs/crc.nd.edu/user/d/dzeng2/UnsupervisedSegmentation/results/contrast_2020-09-30_02-37-23/model/latest.pth')
parser.add_argument("--epochs", type=int, default=100)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--min_lr", type=float, default=1e-5)
parser.add_argument("--gamma", type=float, default=0.5)
parser.add_argument("--weight_decay", type=float, default=1e-5)
parser.add_argument("--momentum", type=float, default=0.9)
parser.add_argument("--betas", type=tuple, default=(0.9, 0.999))
parser.add_argument("--lr_scheduler", type=str, default='cos')
parser.add_argument("--do_contrast", default=False, action='store_true')
parser.add_argument("--contrastive_method", type=str, default='cl_tci_simclr', help='simclr or cl_tci_simclr')
parser.add_argument("--pretext_method", type=str, default='rotation', help='rotation or pirl')
# Loss
parser.add_argument("--temp", type=float, default=0.1)
# Test
parser.add_argument("--step_size", type=float, default=0.5)
def save_args(obj, defaults, kwargs):
for k,v in defaults.iteritems():
if k in kwargs: v = kwargs[k]
setattr(obj, k, v)
def get_config():
config = parser.parse_args()
config.data_dir = os.path.expanduser(config.data_dir)
return config
|
import csv
def savetoCSV(newsitems, filename):
fields = ['node','relation_nodes']
with open(filename, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fields)
writer.writeheader()
writer.writerows(newsitems)
with open('relation2.csv') as f:
relations = [{k: str(v) for k, v in row.items()}
for row in csv.DictReader(f, skipinitialspace=True)]
all_nodes = []
for relation in relations:
node = relation['node']
relation_nodes = relation['relation_nodes'].split(',')
for relation_node in relation_nodes:
#xet thang node relation dang check
for relation2 in relations:
if relation2['node'] == relation_node:
#coi xem no co thang node o tren o trong relation chua, neu chua thi add vao
relation_nodes2 = relation2['relation_nodes'].split(',')
if node not in relation_nodes2:
relation_nodes2.append(node)
relation2['relation_nodes'] = ','.join(relation_nodes2)
print(relation2['node'])
print(relation2['relation_nodes'])
savetoCSV(relations, 'relation3.csv')
|
from rest_framework import serializers
from employee.models import Employee
class EmployeeSerializer(serializers.ModelSerializer):
class Meta:
model = Employee
fields = ('id', 'email', 'firstname', 'lastname', 'password', 'address', 'dob',
'company', 'mobile', 'city')
|
from heapq import heappush, heappop
def solution(food_times, k):
if sum(food_times) <= k:
return -1
q = []
for i in range(len(food_times)):
heappush(q, (food_times[i], i + 1))
times, previous, length = 0, 0, len(q)
while times + ((q[0][0] - previous) * length) <= k :
now = heappop(q)[0]
times += (now - previous) * length
length -= 1
previous = now
result = sorted(q, key=lambda x:x[1])
return result[(k - times) % length][1] |
from enum import Enum
class GameSubTypeEnum(Enum):
TexasHoldem = 0
OmahaHoldem = 1
Pineapple = 2
CrazyPineapple = 3
LazyPineapple = 4
ThreeCardsHoldem = 5
IrishPoker = 6
SpanishPoker = 7
ManilaPoker = 8
FiveCardsStud = 9
SevenCardsStud = 10
FiveCardsDraw = 11
@classmethod
def parse(cls, str):
return cls[str]
@classmethod
def to_string(cls, value):
return value.name
|
import unicodecsv
class UnicodeCsvWriter(object):
def _write(self, iterable, output_file):
writer = unicodecsv.writer(output_file, encoding='utf-8')
for row in iterable:
writer.writerow(row)
def write(self, iterable, filename='output.csv', mode='a'):
with open(filename, mode=mode) as output:
self._write(iterable, output)
|
from util import *
def user_update(u_i, v, bias, profile, epochs=30, learn_rate=0.0015, reg_fact=0.06):
profile = np.reshape(profile, (1, -1)) - bias
u_i = np.reshape(u_i, (1, -1))
delta_matrix = np.dot(- 2 * learn_rate * np.eye(v.shape[1]), np.dot(v.T, v)) + (1 - (2 * learn_rate * reg_fact))*np.eye(v.shape[1])
d = 2 * learn_rate * np.dot(profile, v)
delta_i = np.ones_like(delta_matrix) + delta_matrix
delta_matrix_i = np.zeros_like(delta_matrix) + delta_matrix
for epoch in range(epochs -2):
delta_matrix_i[...] = np.dot(delta_matrix_i, delta_matrix)
delta_i[...] += delta_matrix_i
delta_e = np.dot(delta_matrix_i, delta_matrix)
u_i[...] = np.dot(u_i, delta_e) + np.dot(d, delta_i)
np.savetxt('data/delta_e', delta_e, '%.4f')
np.savetxt('data/delta_sum', delta_i, '%.4f')
return u_i
def new_user_update(v, bias, profile):
u_b = np.random.uniform(-0.05, 0.05, len(v[0]))
return user_update(u_b, v, bias, profile)
|
from typing import List
from bisect import bisect_left
from collections import deque
class Solution:
def findClosestElements(self, A: List[int], k: int, x: int) -> List[int]:
n = len(A)
lo = 0
hi = n - k
while lo < hi:
mid = (lo + hi) // 2
if abs(x - A[mid]) > abs(A[mid + k] - x):
lo = mid + 1
else:
hi = mid
return A[lo:lo+k]
def findClosestElements2(self, A: List[int], k: int, x: int) -> List[int]:
n = len(A)
idx = bisect_left(A, x)
ret = deque()
i = idx - 1
j = idx
while k:
if 0 <= i < n and 0 <= j < n:
if abs(A[i] - x) <= abs(A[j] - x):
ret.appendleft(A[i])
i -= 1
else:
ret.append(A[j])
j += 1
elif 0 <= i < n:
ret.appendleft(A[i])
i -= 1
elif 0 <= j < n:
ret.append(A[j])
j += 1
else:
raise
k -= 1
return list(ret)
if __name__ == "__main__":
assert Solution().findClosestElements([1,2,3,4,5], 4, 3) == [1,2,3,4]
assert Solution().findClosestElements([1,2,3,4,5], 4, -1) == [1,2,3,4]
|
# Generated by Django 2.0.6 on 2020-05-18 09:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0004_auto_20200515_2232'),
]
operations = [
migrations.AlterField(
model_name='middlenavheaderdata',
name='midhear',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='mid_data', to='home.MiddleNavHeader', verbose_name='外键关联中部标题'),
),
]
|
###
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from builtins import str
import logging
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from bq_data_access.v1.data_access import get_feature_vectors_tcga_only
from bq_data_access.v1.seqpeek.seqpeek_view import SeqPeekViewDataBuilder
from bq_data_access.v1.seqpeek.seqpeek_maf_formatter import SeqPeekMAFDataFormatter
from bq_data_access.v1.seqpeek_maf_data import SeqPeekDataProvider
from bq_data_access.v1.data_access import ProviderClassQueryDescription
from visualizations.data_access_views import get_confirmed_project_ids_for_cohorts
logger = logging.getLogger('main_logger')
def build_gnab_feature_id(gene_label):
return "GNAB:{gene_label}:variant_classification".format(gene_label=gene_label)
@login_required
def seqpeek_view_data(request):
try:
hugo_symbol = request.GET.get('hugo_symbol', None)
cohort_id_param_array = request.GET.getlist('cohort_id', None)
cohort_id_array = []
for cohort_id in cohort_id_param_array:
try:
cohort_id = int(cohort_id)
cohort_id_array.append(cohort_id)
except Exception as e:
return JsonResponse({'error': 'Invalid cohort parameter'}, status=400)
if len(cohort_id_array) == 0:
return JsonResponse({'error': 'No cohorts specified'}, status=400)
gnab_feature_id = build_gnab_feature_id(hugo_symbol)
logger.debug("GNAB feature ID for SeqPeek: {0}".format(gnab_feature_id))
# Get the project IDs these cohorts' samples come from
confirmed_project_ids = get_confirmed_project_ids_for_cohorts(cohort_id_array)
async_params = [
ProviderClassQueryDescription(SeqPeekDataProvider, gnab_feature_id, cohort_id_array, confirmed_project_ids)]
maf_data_result = get_feature_vectors_tcga_only(async_params, skip_formatting_for_plot=True)
maf_data_vector = maf_data_result[gnab_feature_id]['data']
if len(maf_data_vector) > 0:
# Since the gene (hugo_symbol) parameter is part of the GNAB feature ID,
# it will be sanity-checked in the SeqPeekMAFDataAccess instance.
seqpeek_data = SeqPeekMAFDataFormatter().format_maf_vector_for_view(maf_data_vector, cohort_id_array)
seqpeek_maf_vector = seqpeek_data.maf_vector
seqpeek_cohort_info = seqpeek_data.cohort_info
removed_row_statistics_dict = seqpeek_data.removed_row_statistics
seqpeek_view_data = SeqPeekViewDataBuilder().build_view_data(hugo_symbol,
seqpeek_maf_vector,
seqpeek_cohort_info,
cohort_id_array,
removed_row_statistics_dict)
return JsonResponse(seqpeek_view_data)
else:
# No data found
return JsonResponse({
# The SeqPeek client side view detects data availability by checking if
# the "plot_data" object has the "tracks" key present.
'plot_data': {},
'hugo_symbol': hugo_symbol,
'cohort_id_list': [str(i) for i in cohort_id_array],
'removed_row_statistics': []
})
except Exception as e:
logger.error("[ERROR] In seqpeek_view_data: ")
logger.exception(e)
return JsonResponse({'error': str(e)}, status=500)
|
from ast_node import AstNode
class PrintTags(AstNode):
def __init__(self):
pass
def get_value(self):
return None
def execute(self, tag_context):
tag_context.print_tags()
return None
|
import enum
import weakref
from collections import defaultdict
class AttributeType(enum.Enum):
email = "email"
phone = "phone"
street = "street"
class PersonAttribute:
def __init__(self, person, value):
self._person = weakref.ref(person)
self.value = value
@property
def person(self):
return self._person()
def __repr__(self):
return "PersonAttribute(person={!r}, value='{!r}')".format(self._person(), self.value)
def __str__(self):
return str(self.value)
class Person:
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
self._attributes = defaultdict(list)
self._store = None
def __repr__(self):
return "Person(first_name='{}', last_name='{}')".format(self.first_name, self.last_name)
def __str__(self):
parts = ["{} {}".format(self.first_name, self.last_name)]
if self.groups:
parts.append("Groups: {}".format(", ".join(g.name for g in self.groups)))
if self.emails:
parts.append("Email addresses: {}".format(", ".join(v for v in self.emails)))
if self.phones:
parts.append("Phone addresses: {}".format(", ".join(v for v in self.phones)))
if self.addresses:
parts.append("Street addresses: {}".format(", ".join(v for v in self.addresses)))
return "\n".join(parts)
@property
def store(self):
return self._store()
@store.setter
def store(self, value):
self._store = weakref.ref(value)
@property
def groups(self):
return self.store.get_person_groups(self)
@property
def emails(self):
return self._get_attributes_values(AttributeType.email)
@property
def phones(self):
return self._get_attributes_values(AttributeType.phone)
@property
def addresses(self):
return self._get_attributes_values(AttributeType.street)
def _get_attributes_values(self, attr_type):
return [a.value for a in self._attributes[attr_type]]
def _add_attribute(self, attr_type, attr_value):
assert isinstance(attr_value, str), "{} attribute must be a string".format(attr_type.value)
attr = PersonAttribute(self, attr_value)
self.store.register_attribute(attr_type, attr)
self._attributes[attr_type].append(attr)
def add_email_address(self, value):
self._add_attribute(AttributeType.email, value)
def add_phone_number(self, value):
self._add_attribute(AttributeType.phone, value)
def add_street_address(self, value):
self._add_attribute(AttributeType.street, value)
def add_to_group(self, group):
group.add_member(self)
class Group:
def __init__(self, name):
self.name = name
self._members = weakref.WeakSet()
def __repr__(self):
return "Group(name='{}')".format(self.name)
def __str__(self):
return self.name
@property
def members(self):
return list(self._members)
def add_member(self, person):
self._members.add(person)
|
import time
from selenium import webdriver
driver=webdriver.Ie(executable_path= '../Exercise/drivers/IEDriverServer.exe')
driver.maximize_window()
time.sleep(2)
driver.get('https://opensource-demo.orangehrmlive.com/')
print(driver.title)
print(driver.current_url)
a1=driver.find_element_by_id('txtUsername')
a1.send_keys('satee143')
a2=driver.find_element_by_id('txtPassword')
a2.send_keys('9989884111')
btn= driver.find_element_by_id('btnLogin')
btn.click()
a1=driver.find_element_by_id('txtUsername')
a2=driver.find_element_by_id('txtPassword')
a2.send_keys('9989858000')
a1.send_keys('dusa.skumar')
print(a1.get_attribute('value'))
print(a2.get_attribute('value'))
btn= driver.find_element_by_id('btnLogin')
time.sleep(3)
a1.clear()
a2.clear()
time.sleep(3)
a1.send_keys('Admin')
a2.send_keys('admin123')
btn.click()
driver.fullscreen_window()
menu=driver.find_element_by_xpath('//*[@id="welcome"]')
menu.click()
print(menu.text)
driver.find_element_by_id('aboutDisplayLink').click()
# logout=driver.find_element_by_css_selector('#welcome-menu > ul > li:nth-child(2) > a')
# logout.click()
|
#Embedded file name: eve/client/script/ui/services/corporation\corp_util.py
VIEW_ROLES = 0
VIEW_GRANTABLE_ROLES = 1
VIEW_TITLES = 2
GROUP_GENERAL_ROLES = 0
GROUP_DIVISIONAL_ACCOUNTING_ROLES = 1
GROUP_DIVISIONAL_HANGAR_ROLES_AT_HQ = 2
GROUP_DIVISIONAL_CONTAINER_ROLES_AT_HQ = 3
GROUP_DIVISIONAL_HANGAR_ROLES_AT_BASE = 4
GROUP_DIVISIONAL_CONTAINER_ROLES_AT_BASE = 5
GROUP_DIVISIONAL_HANGAR_ROLES_AT_OTHER = 6
GROUP_DIVISIONAL_CONTAINER_ROLES_AT_OTHER = 7
RECRUITMENT_GROUP_PRIMARY_LANGUAGE = 10
def CanEditRole(roleID, grantable, playerIsCEO, playerIsDirector, IAmCEO, viewRoleGroupingID, myBaseID, playersBaseID, myGrantableRoles, myGrantableRolesAtHQ, myGrantableRolesAtBase, myGrantableRolesAtOther):
if grantable:
if roleID == const.corpRoleDirector:
return 0
if eve.session.corprole & const.corpRoleDirector != const.corpRoleDirector:
return 0
if playerIsCEO:
return 0
if playerIsDirector:
return 0
return 1
elif playerIsCEO:
return 0
elif playerIsDirector and not IAmCEO:
return 0
elif playerIsDirector and roleID & const.corpRoleDirector != const.corpRoleDirector:
return 0
else:
roleGroupings = sm.GetService('corp').GetRoleGroupings()
if not roleGroupings.has_key(viewRoleGroupingID):
raise RuntimeError('UnknownViewType')
roleGroup = roleGroupings[viewRoleGroupingID]
if roleGroup.appliesTo == 'roles':
if myGrantableRoles & roleID != roleID:
return 0
elif roleGroup.appliesTo == 'rolesAtHQ':
if myGrantableRolesAtHQ & roleID != roleID:
return 0
elif roleGroup.appliesTo == 'rolesAtBase':
if IAmCEO:
return 1
if const.corpRoleDirector & eve.session.corprole == const.corpRoleDirector and not playerIsDirector:
return 1
if myBaseID != playersBaseID:
return 0
if myGrantableRolesAtBase & roleID != roleID:
return 0
elif roleGroup.appliesTo == 'rolesAtOther':
if myGrantableRolesAtOther & roleID != roleID:
return 0
return 1
def CanEditBase(playerIsCEO, IAmCEO, IAmDirector):
if playerIsCEO:
if IAmCEO:
return 1
else:
if IAmCEO:
return 1
if IAmDirector:
return 1
return 0
import carbon.common.script.util.autoexport as autoexport
exports = autoexport.AutoExports('corputil', locals())
exports = {'corputil.CanEditRole': CanEditRole,
'corputil.CanEditBase': CanEditBase}
|
import os
import sys
import re
from optparse import OptionParser
from Album import *
class Itemizer:
OPTIONS = [
("-d", "destination", "destination directory", "DIR", "./"),
("-i", "index", "item index", "INT"),
("-f", "file_path", "input file", "PATH"),
("-s", "silent", "suppress messages", None, False, "store_true"),
("-v", "verbose", "verbose output", None, False, "store_true"),
("--delimiter", "delimiter", "field delimiter", "CHAR", "_"),
("--copy", "copy", "copy files", None, False, "store_true"),
("--deitemize", "deitemize", "deitemize", None, False, "store_true"),
("--sim", "simulate", "simulate itemization", None, False,
"store_true"),
("--regroup", "regroup", "order items consecutively", None, False,
"store_true"),
]
USAGE_MESSAGE = "Usage: %prog [options] PATH_1..PATH_n*"
def __init__(self):
self.init_input()
if len(sys.argv) > 1:
self.add_file_contents_to_item_list()
self.album = Album(
self.options.destination, self.options.delimiter, self.options.copy,
self.options.simulate, self.verbosity, self.options.regroup)
if self.options.deitemize:
self.album.remove(self.item_paths)
else:
self.album.add_items(self.item_paths, self.options.index)
self.album.commit()
else:
self.parser.print_help()
def init_input(self):
self.parser = OptionParser(self.USAGE_MESSAGE)
self.parse_arguments()
def parse_arguments(self):
for option in self.OPTIONS:
default = option[4] if len(option) > 4 else None
action = option[5] if len(option) > 5 else None
self.parser.add_option(
option[0], dest=option[1], help=option[2],
metavar=option[3], default=default, action=action)
self.options, self.item_paths = self.parser.parse_args()
self.set_verbosity(self.options.silent, self.options.verbose)
def set_verbosity(self, silent, verbose):
if verbose:
self.verbosity = 2
elif silent:
self.verbosity = 0
else:
self.verbosity = 1
def add_file_contents_to_item_list(self):
if self.options.file_path != None:
for line in file(self.options.file_path):
line = line.rstrip()
line = line.strip("\"")
if line[0] != "#":
self.item_paths.append(line)
@staticmethod
def is_item(path):
if os.path.isfile(path):
file_name = os.path.basename(path)
if re.match("^[0-9]+.*", file_name):
return True
return False
@staticmethod
def extract_item_number(path):
file_name = os.path.basename(path)
match = re.match("^([0-9]+).*", file_name)
if match:
return int(match.group(1))
return None
|
import sqlite3
import pygame
import sys
class Prologo:
def __init__(self):
conexion = sqlite3.connect('escapeRoom.db')
cursor = conexion.cursor() # generamos un objeto de conexion, (crud,ddl,dml...)
cursor.execute("SELECT nombre_jugador FROM JUGADORES order by id_jugador DESC limit 1")
self.nombre = cursor.fetchone()
# print(self.nombre[0])
conexion.commit() # hacemos commit para lanzarlo
conexion.close()
""" CREACION DE PANTALLA INICIAL
----------------------------------- """
self.ventana_menu = pygame.display.set_mode((1480, 800))
pygame.display.set_caption("Menu de Juego")
""" IMAGENES DE INICIO
------------------------- """
# FONDO
self.fondo_menu = pygame.image.load("./imagenes/ventana_tierra.jpg")
self.fondo_menu = pygame.transform.scale(self.fondo_menu, (1480, 800))
self.ventana_menu.blit(self.fondo_menu, (0, 0))
self.pantalla_escritura = pygame.image.load("./imagenes/mundo/1.png")
self.pantalla_escritura = pygame.transform.scale(self.pantalla_escritura, (552, 330))
self.ventana_menu.blit(self.pantalla_escritura, (475, 0))
self.pantalla_mundo = pygame.image.load("./imagenes/11.jpg")
self.pantalla_mundo = pygame.transform.scale(self.pantalla_mundo, (852, 480))
self.ventana_menu.blit(self.pantalla_mundo, (325, 325))
self.llamada = True
self.escrito_astronauta = ""
self.escrito_astronauta2 = ""
self.escrito_astronauta3 = ""
self.escrito_astronauta4 = ""
self.escrito_astronauta5 = ""
self.i = 1
self.i2 = 0
self.i3 = 0
self.i4 = 0
self.i5 = 0
self.i6 = 0
self.i7 = 0
self.i8 = 0
self.i9 = 0
self.i10 = 0
self.cont = 0
self.conversacion1 = False
self.respuesta_conversacion1 = False
self.continuar1 = False
self.conversacion2 = False
self.respuesta_conversacion2 = False
self.continuar2 = False
self.conversacion3 = False
self.respuesta_conversacion3 = False
def pantallas(self):
while self.llamada:
for event in pygame.event.get():
# ACCION DE QUITAR PANTALLA CON (X) Y CON (ESC)
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
sys.exit()
self.pantalla_mundo = pygame.image.load("./imagenes/mundo/"+str(self.i)+".png")
self.pantalla_mundo = pygame.transform.scale(self.pantalla_mundo, (552, 330))
self.ventana_menu.blit(self.pantalla_mundo, (475, 0))
self.pantalla_escritura = pygame.image.load("./imagenes/11.jpg")
self.pantalla_escritura = pygame.transform.scale(self.pantalla_escritura, (852, 480))
self.ventana_menu.blit(self.pantalla_escritura, (325, 325))
self.dialogo()
if self.i == 111:
self.i = 1
else:
self.i += 1
pygame.time.wait(50)
pygame.display.flip()
def dialogo(self):
if not self.conversacion1 and not self.conversacion2:
self.escrito_astronauta += 'Astronauta ES-253 a base, contesten, por favor'[self.i2]
self.texto1 = pygame.font.Font("./fuentes/JetBrainsMono-Regular.ttf", 15)
self.texto1 = self.texto1.render(self.escrito_astronauta, 0, (255, 255, 255))
self.texto1 = self.ventana_menu.blit(self.texto1, (520, 430))
else:
self.texto1 = pygame.font.Font("./fuentes/JetBrainsMono-Regular.ttf", 15)
self.texto1 = self.texto1.render('Astronauta ES-253 a base, contesten, por favor', 0, (255, 255, 255))
self.texto1 = self.ventana_menu.blit(self.texto1, (520, 430))
#print('longitud palabra',len('Astronauta ES-253 a base, contesten, por favor'))
#print(self.i2)
if self.i2 == len('Astronauta ES-253 a base, contesten, por favor')-1:
#self.escrito_astronauta=""
#self.i2=0
#self.cont+=1
self.conversacion1 = True
#print(self.i2)
if self.i2 == len('Astronauta ES-253 a base, contesten, por favor')+5:
#pygame.time.wait(1000)
self.respuesta_conversacion1 = True
if self.respuesta_conversacion1:
self.respuesta_texto1 = pygame.font.Font("./fuentes/JetBrainsMono-Regular.ttf", 15)
self.respuesta_texto1 = self.respuesta_texto1.render("Radio aficionado '" + self.nombre[0] + "', le copio", 0, (0, 150, 255))
self.respuesta_texto1 = self.ventana_menu.blit(self.respuesta_texto1, (520, 450))
if self.i2 == len('Astronauta ES-253 a base, contesten, por favor') + 10:
self.continuar1 = True
self.i2 += 1
# INICIO CONVERSACION 2
if self.conversacion1 and not self.conversacion2 and self.respuesta_conversacion1 and self.continuar1:
if self.i3 <= 52:
self.escrito_astronauta2 += \
'la nave se encuentra a la deriva y estamos encerrados,'[self.i3]
self.texto2 = pygame.font.Font("./fuentes/JetBrainsMono-Regular.ttf", 15)
self.texto2 = self.texto2.render(self.escrito_astronauta2, 0, (255, 255, 255))
self.texto2 = self.ventana_menu.blit(self.texto2, (520, 470))
else:
self.escrito_astronauta3 += 'necesitamos llegar a la navegación, sino chocaremos '[self.i4]
self.texto3 = pygame.font.Font("./fuentes/JetBrainsMono-Regular.ttf", 15)
self.texto3 = self.texto3.render(self.escrito_astronauta3, 0, (255, 255, 255))
self.texto3 = self.ventana_menu.blit(self.texto3, (520, 490))
self.i4 += 1
self.i3 += 1
#print(self.i3)
if self.i3 >= len('la nave se encuentra a la deriva y estamos encerrados,'):
# self.escrito_astronauta=""
# self.i2=0
self.texto2 = pygame.font.Font("./fuentes/JetBrainsMono-Regular.ttf", 15)
self.texto2 = self.texto2.render('la nave se encuentra a la deriva y estamos encerrados', 0,(255, 255, 255))
self.texto2 = self.ventana_menu.blit(self.texto2, (520, 470))
#print(self.i3)
if self.i4 >= len('necesitamos llegar a la navegación, sino chocaremos '):
# self.escrito_astronauta=""
# self.i2=0
self.texto3 = pygame.font.Font("./fuentes/JetBrainsMono-Regular.ttf", 15)
self.texto3 = self.texto3.render(self.escrito_astronauta3, 0, (255, 255, 255))
self.texto3 = self.ventana_menu.blit(self.texto3, (520, 490))
self.conversacion2 = True
self.i4 += 1
#print(self.i4)
if self.i4 == len('necesitamos llegar a la navegación, sino chocaremos ')+5:
#pygame.time.wait(1000)
self.respuesta_conversacion2 = True
if self.respuesta_conversacion2:
self.respuesta_texto2 = pygame.font.Font("./fuentes/JetBrainsMono-Regular.ttf", 15)
self.respuesta_texto2 = self.respuesta_texto2.render('como puedo ayudaros?', 0, (0, 150, 255))
self.respuesta_texto2 = self.ventana_menu.blit(self.respuesta_texto2, (520, 510))
if self.i4 == len('necesitamos llegar a la navegación, sino chocaremos ') + 10:
# pygame.time.wait(1000)
self.continuar2 = True
# INICIO CONVERSACION 3
if self.conversacion2 and not self.conversacion3 and self.respuesta_conversacion2 and self.continuar2:
if self.i5 <= 40:
self.escrito_astronauta4 += \
'la única forma es que controles un cyborg '[self.i5]
self.texto4 = pygame.font.Font("./fuentes/JetBrainsMono-Regular.ttf", 15)
self.texto4 = self.texto4.render(self.escrito_astronauta4, 0, (255, 255, 255))
self.texto4 = self.ventana_menu.blit(self.texto4, (520, 530))
else:
self.escrito_astronauta5 += 'estamos en tus manos, tienes 60 minutos, SUERTE '[self.i6]
self.texto5 = pygame.font.Font("./fuentes/JetBrainsMono-Regular.ttf", 15)
self.texto5 = self.texto5.render(self.escrito_astronauta5, 0, (255, 255, 255))
self.texto5 = self.ventana_menu.blit(self.texto5, (520, 550))
if self.i6 < 48:
self.i6 += 1
#print(self.i6)
#print('longitud' + str(len('estamos en tus manos, tienes 60 minutos, SUERTE ')))
self.i5 += 1
#print(self.i5)
if self.i5 >= len('la única forma es que controles un cyborg '):
# self.escrito_astronauta=""
# self.i2=0
self.texto4 = pygame.font.Font("./fuentes/JetBrainsMono-Regular.ttf", 15)
self.texto4 = self.texto4.render('la única forma es que controles un cyborg ', 0,
(255, 255, 255))
self.texto4 = self.ventana_menu.blit(self.texto4, (520, 530))
#print(self.i5)
if self.i6 >= len('estamos en tus manos, tienes 60 minutos, SUERTE '):
# self.escrito_astronauta=""
# self.i2=0
self.texto5 = pygame.font.Font("./fuentes/JetBrainsMono-Regular.ttf", 15)
self.texto5 = self.texto5.render(self.escrito_astronauta5, 0, (255, 255, 255))
self.texto5 = self.ventana_menu.blit(self.texto5, (520, 550))
self.conversacion2 = True
self.i7 += 1
#print(self.i6)
if self.i7 >= 5:
# pygame.time.wait(1000)
self.respuesta_texto3 = pygame.font.Font("./fuentes/JetBrainsMono-Regular.ttf", 15)
self.respuesta_texto3 = self.respuesta_texto3.render('pero... yo no tengo conocimientos de aeronáutica...', 0, (0, 150, 255))
self.respuesta_texto3 = self.ventana_menu.blit(self.respuesta_texto3, (520, 570))
self.i8 += 1
if self.i8 >= 15:
# pygame.time.wait(1000)
self.respuesta_texto4 = pygame.font.Font("./fuentes/JetBrainsMono-Regular.ttf", 15)
self.respuesta_texto4 = self.respuesta_texto4.render(
'hola... sigues ahí...', 0, (0, 150, 255))
self.respuesta_texto4 = self.ventana_menu.blit(self.respuesta_texto4, (520, 590))
self.i9 += 1
if self.i9 >= 15:
# pygame.time.wait(1000)
self.respuesta_texto5 = pygame.font.Font("./fuentes/JetBrainsMono-Regular.ttf", 15)
self.respuesta_texto5 = self.respuesta_texto5.render(
'no os preocupéis, os sacaré de esta !', 0, (0, 150, 255))
self.respuesta_texto5 = self.ventana_menu.blit(self.respuesta_texto5, (520, 610))
self.i10 += 1
if self.i10 >= 15:
self.llamada = False |
import json
import boto3
class Publish(object):
def abr(self, event_type, **kwargs):
return self.__generic('abr', event_type, **kwargs)
def agency(self, agency, event_type, **kwargs):
return self.__generic('agency', event_type, agency=agency, **kwargs)
def application(self, application, event_type, **kwargs):
return self.__generic('application', event_type, application=application, **kwargs)
def assessment(self, assessment, event_type, **kwargs):
return self.__generic('assessment', event_type, assessment=assessment, **kwargs)
def brief(self, brief, event_type, **kwargs):
return self.__generic('brief', event_type, brief=brief, **kwargs)
def brief_response(self, brief_response, event_type, **kwargs):
return self.__generic('brief_response', event_type, brief_response=brief_response, **kwargs)
def brief_question(self, brief_question, event_type, **kwargs):
return self.__generic('brief_question', event_type, brief_question=brief_question, **kwargs)
def evidence(self, evidence, event_type, **kwargs):
return self.__generic('evidence', event_type, evidence=evidence, **kwargs)
def mailchimp(self, event_type, **kwargs):
return self.__generic('mailchimp', event_type, **kwargs)
def supplier(self, supplier, event_type, **kwargs):
return self.__generic('supplier', event_type, supplier=supplier, **kwargs)
def supplier_domain(self, supplier_domain, event_type, **kwargs):
return self.__generic('supplier_domain', event_type, supplier_domain=supplier_domain, **kwargs)
def team(self, team, event_type, **kwargs):
return self.__generic('team', event_type, team=team, **kwargs)
def user(self, user, event_type, **kwargs):
return self.__generic('user', event_type, user=user, **kwargs)
def user_claim(self, user_claim, event_type, **kwargs):
return self.__generic('user_claim', event_type, user_claim=user_claim, **kwargs)
def __generic(self, object_type, event_type, **kwargs):
from . import key_values_service
key_values = (
key_values_service
.convert_to_object(
key_values_service
.get_by_keys(
'aws_sns'
)
)
.get('aws_sns', None)
)
if not key_values:
return None
client = boto3.client(
'sns',
region_name=key_values.get('aws_sns_region', None),
aws_access_key_id=key_values.get('aws_sns_access_key_id', None),
aws_secret_access_key=key_values.get('aws_sns_secret_access_key', None),
endpoint_url=key_values.get('aws_sns_url', None)
)
message = {}
if kwargs:
for key, value in kwargs.items():
message[key] = value
response = client.publish(
TopicArn=key_values.get('aws_sns_topicarn', None),
Message=json.dumps({
'default': json.dumps(message)
}),
MessageStructure='json',
MessageAttributes={
'object_type': {
'DataType': 'String',
'StringValue': object_type
},
'event_type': {
'DataType': 'String',
'StringValue': event_type
}
}
)
return response
|
import re
DOMAIN_NAME = "svyaznoy.ru"
SIP_HOST = "82.144.65.34"
SIP_PORT = 5060
RUNS_COUNT = 1
# seconds
CALL_DURATION = 8
INTERVAL = 0.1
WAIT_TIME = 200
AUTH_HEADER_REGEX = 'Digest\s+nonce="(.*?)",' \
'\s+opaque="(.*?)",\s+algorithm=md5,' \
'\s+realm="(.*?)", qop="auth"'
AUTH_HEADER_REGEX = re.compile(AUTH_HEADER_REGEX)
GET_METHOD_FROM_CSEQ_REGEX = '^\d+\s+(\D+)$'
GET_METHOD_FROM_CSEQ_REGEX = re.compile(GET_METHOD_FROM_CSEQ_REGEX)
AUTH_HEADER = 'Digest realm="%(realm)s", nonce="%(nonce)s", ' \
'opaque="%(opaque)s", username="%(msisdn)s", ' \
'uri="%(uri)s", response="%(response)s", ' \
'cnonce="%(cnonce)s", nc=%(nonce_count)s, qop=auth'
URI = "sip:svyaznoy.ru"
SDP_DATA = """v=0
o=%(msisdn)s %(num)s 3466 IN IP4 10.0.2.15
s=Talk
c=IN IP4 10.0.2.15
b=AS:380
t=0 0
m=audio 7076 RTP/AVP 120 111 110 0 8 101
a=rtpmap:120 SILK/16000
a=rtpmap:111 speex/16000
a=fmtp:111 vbr=on
a=rtpmap:110 speex/8000
a=fmtp:110 vbr=on
a=rtpmap:101 telephone-event/8000
a=fmtp:101 0-15
"""
SIP_STATUSES = {
100: "Trying",
180: "Ringing",
181: "Call Is Being Forwarded",
182: "Queued",
183: "Session Progress",
200: "OK",
300: "Multiple Choices",
301: "Moved Permanently",
302: "Moved Temporarily",
303: "See Other",
305: "Use Proxy",
380: "Alternative Service",
400: "Bad Request",
401: "Unauthorized",
402: "Payment Required",
403: "Forbidden",
404: "Not Found",
405: "Method Not Allowed",
406: "Not Acceptable",
407: "Proxy Authentication Required",
408: "Request Timeout",
409: "Conflict", # Not in RFC3261
410: "Gone",
411: "Length Required", # Not in RFC3261
413: "Request Entity Too Large",
414: "Request-URI Too Large",
415: "Unsupported Media Type",
416: "Unsupported URI Scheme",
420: "Bad Extension",
421: "Extension Required",
422: "Session Interval Too Small",
423: "Interval Too Brief",
432: "Test by semali02, not existed",
480: "Temporarily Unavailable",
481: "Call/Transaction Does Not Exist",
482: "Loop Detected",
483: "Too Many Hops",
484: "Address Incomplete",
485: "Ambiguous",
486: "Busy Here",
487: "Request Terminated",
488: "Not Acceptable Here",
491: "Request Pending",
493: "Undecipherable",
500: "Internal Server Error",
501: "Not Implemented",
502: "Bad Gateway", # no donut
503: "Service Unavailable",
504: "Server Time-out",
505: "SIP Version not supported",
513: "Message Too Large",
600: "Busy Everywhere",
603: "Decline",
604: "Does not exist anywhere",
606: "Not Acceptable",
4294967301: "Long code"
}
SIP_METHODS = [
"INVITE",
"ACK",
"BYE",
"CANCEL",
"OPTIONS",
"REGISTER",
"PRACK",
"SUBSCRIBE",
"NOTIFY",
"PUBLISH",
"INFO",
"REFER",
"MESSAGE",
"UPDATE"
]
|
from flask import Flask, render_template, session
app = Flask(__name__)
app.secret_key = 'thisisnotacookie'
@app.route('/')
def counting():
if 'counter' not in session:
session['counter'] = 0
for counter in session:
session['counter'] += 1
return render_template("index.html", counter = session['counter'])
app.run(debug = True) |
import os
import pickle
import numpy as np
import time
import librosa
from speakerfeatures import extract_features
import warnings
warnings.filterwarnings("ignore")
#path to training data
source = "dataset\\test\\"
model_path = "speaker_models\\"
test_file = "test_path.txt"
file_paths = open(test_file, 'r')
num_correct_label = 0
num_test_files = 0
gmm_files = [os.path.join(model_path, fname) for fname in
os.listdir(model_path) if fname.endswith('.gmm')]
#Load the Gaussian Mixture Models
models = [pickle.load(open(fname, 'r+b')) for fname in gmm_files]
speakers = [fname.split("\\")[-1].split(".gmm")[0] for fname in gmm_files]
# Read the test directory and get the list of test audio files
for path in file_paths:
path = path.strip()
num_test_files += 1
print(path)
signal, sr = librosa.load(source + path)
feature_vector = extract_features(signal, sr)
log_likelihood = np.zeros(len(models))
for i in range(len(models)):
gmm = models[i] #checking with each model one by one
scores = np.array(gmm.score(feature_vector))
log_likelihood[i] = scores.sum()
winner = np.argmax(log_likelihood)
if(speakers[winner] == path.split("\\")[0]):
num_correct_label += 1
print("\tdetected as - ", speakers[winner])
time.sleep(1.0)
print('Accuracy:' + str(num_correct_label) + '/' + str(num_test_files)) |
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class LOGGER:
def __init__(self, log_file_path, file_id, log_mode=0, n_epoch=30):
self.log_file_path = log_file_path
self.file_id = file_id
self.log_mode = log_mode
self.log_buf = []
self.val_array = np.full_like(np.zeros(n_epoch), np.nan)
self.train_array = np.full_like(np.zeros(n_epoch), np.nan)
self.loss_array = np.full_like(np.zeros(n_epoch), np.nan)
self.baseline = np.full_like(np.zeros(n_epoch), np.nan)
self.max_acc = 0
self.best = ""
with open(log_file_path + "log.txt", "w") as f:
f.write(" ")
def l_print(self, sentence):
print(sentence)
self.log_buf.append(sentence)
return
def update_log(self):
with open(self.log_file_path + "log.txt", "a") as f:
for buf in self.log_buf:
f.write(buf)
self.log_buf = []
return
def set_loss(self, loss, epoch):
self.loss_array[epoch] = loss
def set_acc(self, train, val, epoch):
self.train_array[epoch] = train
self.val_array[epoch] = val
self.best = ""
if val > self.max_acc:
self.max_acc = val
self.best = "best_"
self.l_print("train_acc:{:1.4f} test_acc:{:1.4f}\n".format(train, val))
def set_baseline(self, b, epoch):
self.baseline[epoch] = b
def save_acc(self):
np.save(self.log_file_path + self.file_id + "test.npy", self.val_array)
np.save(self.log_file_path + self.file_id + "train.npy", self.train_array)
np.save(self.log_file_path + self.file_id + "base.npy", self.baseline)
plt.figure()
plt.ylim([0, 1])
p1 = plt.plot(self.val_array, color="green")
p2 = plt.plot(self.train_array, color="blue")
plt.legend((p1[0], p2[0]), ("test", "train"), loc=2)
plt.savefig(self.log_file_path + self.file_id + "acc.png")
plt.figure()
plt.plot(self.loss_array)
plt.savefig(self.log_file_path + self.file_id + "loss.png")
plt.figure()
plt.plot(self.baseline)
plt.savefig(self.log_file_path + self.file_id + "base.png")
plt.close("all")
|
#!/usr/bin/env python
#
# Convert an efs-*.log file from the tracker program to
# three SVM training files, one for each output axis.
# Usage:
# ./split-efs-log.py <log file> <x file> <y file> <z file>
#
# --Micah
#
import sys
logFile, xFile, yFile, zFile = sys.argv[1:]
xf = open(xFile, "w")
yf = open(yFile, "w")
zf = open(zFile, "w")
for line in open(logFile):
values = map(float, line.split())
trainingData = " ".join(["%d:%f" % (i+1, values[i]) for i in xrange(8)])
x = values[8]
y = values[9]
z = values[10]
xf.write("%f %s\n" % (x, trainingData))
yf.write("%f %s\n" % (y, trainingData))
zf.write("%f %s\n" % (z, trainingData))
|
#!/usr/bin/python
var1 = 'Hello World'
var2 = "Python Programming"
print "var[0]:",var1[0]
print "var2[1:5]:",var2[1:5]
var1 = "Hello World"
print "update a string",var1[:6]+'python'
print "update a string",'python'+var1[5:]
print r"hello\n"
print "hello\n"
|
__author__ = 'kasi'
import matplotlib.pyplot as plt
from collections import OrderedDict
# calculates the frequency of words based on their length and plots a graph of the words.
class WordFrequencyCounter(object):
characters_to_remove = ',.?!'
def __init__(self, top_posts):
self.top_posts = top_posts
self.word_count = {}
# calculates frequency in data collected.
def get_frequency(self):
for i in range(0, 30):
self.word_count[i] = 0
for post in self.top_posts:
words = self.filter_message(post['data']['body'])
for word in words:
word_length = len(word)
if word_length in self.word_count.keys():
self.word_count[word_length] += 1
self.word_count = OrderedDict(sorted(self.word_count.items()))
# create graph
plt.bar(self.word_count.keys(), self.word_count.values(), alpha=0.4, align='center', label=self.word_count.keys())
plt.ylabel('times used')
plt.xlabel('letters in word')
plt.savefig('frequency_piechart')
# removes characters that are in the string characters to remove and new lines.
def filter_message(self, post_message):
return ''.join([character for character in post_message.replace('/n', '') if character not in self.characters_to_remove]).split() |
#!/usr/bin/python3
from __future__ import print_function
import contextlib
import sys
import logging
from irods.configuration import IrodsConfig
import irods.log
def get_current_schema_version(irods_config=None, cursor=None):
if irods_config is None:
irods_config = IrodsConfig()
return irods_config.get_schema_version_in_database()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.NOTSET)
l = logging.getLogger(__name__)
irods.log.register_tty_handler(sys.stdout, logging.INFO, logging.WARNING)
irods.log.register_tty_handler(sys.stderr, logging.WARNING, None)
irods_config = IrodsConfig()
irods.log.register_file_handler(irods_config.control_log_path)
print(get_current_schema_version(irods_config))
|
# coding: utf8
db = DAL('mysql://srikant:homeauto@localhost/ha_db')
dropdown = ('jpg', 'pdf', 'png', 'doc')
possible_extensions = ('jpg', 'pdf', 'png', 'doc')
db.define_table('converter',
Field('convert_from', requires = IS_IN_SET(dropdown), default = dropdown[0]),
Field('convert_to', requires = IS_IN_SET(dropdown), default = dropdown[0]),
Field('upload_input_file', 'upload', uploadfolder = request.folder+'uploads',
requires = IS_UPLOAD_FILENAME(extension='png'), autodelete=True),
)
|
from typing import List, Any
from random import random
from math import log
from collections import defaultdict
cnt_pos_docs = 0
cnt_neg_docs = 0
def count_labels(labels: List):
return {
unique_label: sum(1 for label in labels if label == unique_label)
for unique_label in set(labels)
}
def preprocessing(texts: List[str]):
for i in range(len(texts)):
texts[i] = texts[i].lower()
for j in range(len(texts)):
for i in range(len(texts[j]) - 1):
if (not (texts[j][i].isdigit() or texts[j][i].isalpha() or texts[j][i].isspace() or texts[j][i] == "'")):
if (not texts[j][i - 1].isspace() and not texts[j][i + 1].isspace()):
texts[j] = texts[j][:i] + ' ' + texts[j][i] + ' ' + texts[j][i + 1:]
elif (not texts[j][i - 1].isspace()):
texts[j] = texts[j][:i] + ' ' + texts[j][i:]
elif (not texts[j][i + 1].isspace()):
texts[j] = texts[j][:i + 1] + ' ' + texts[j][i + 1:]
if (not (texts[j][len(texts[j]) - 1].isdigit() or texts[j][len(texts[j]) - 1].isalpha() or texts[j][len(texts[j]) - 1].isspace())):
if (not texts[j][len(texts[j]) - 2].isspace()):
texts[j] = texts[j][:len(texts[j]) - 1] + ' ' + texts[j][len(texts[j]) - 1]
return texts
def text_to_tokens(texts: List[str]):
tokenized_texts: List[dict] = []
for text in texts:
tokens_freq = defaultdict(int)
tokens = text.split()
length = len(tokens)
for i in range(length):
#tokens_freq[tokens[i]] += 1
if (i + 1 < length):
bigram_token = tokens[i] + ' ' + tokens[i + 1]
tokens_freq[bigram_token] += 1
if (i + 2 < length):
threegram_token = tokens[i] + ' ' + tokens[i + 1] + ' ' + tokens[i + 2]
tokens_freq[threegram_token] += 1
#if (i + 3 < length):
# fourgram_token = tokens[i] + ' ' + tokens[i + 1] + ' ' + tokens[i + 2] + ' ' + tokens[i + 3]
# tokens_freq[fourgram_token] += 1
tokenized_texts.append(tokens_freq)
return tokenized_texts
def train(
train_texts: List[str],
train_labels: List[str],
pretrain_params: Any = None) -> Any:
"""
Trains classifier on the given train set represented as parallel lists of texts and corresponding labels.
:param train_texts: a list of texts (str objects), one str per example
:param train_labels: a list of labels, one label per example
:param pretrain_params: parameters that were learned at the pretrain step
:return: learnt parameters, or any object you like (it will be passed to the classify function)
"""
train_texts = preprocessing(train_texts)
train_tokenized_texts = text_to_tokens(train_texts)
train_pos = [train_tokenized_texts[i] for i in range(len(train_labels)) if train_labels[i] == 'pos']
train_neg = [train_tokenized_texts[i] for i in range(len(train_labels)) if train_labels[i] == 'neg']
cnt_pos_docs = len(train_pos)
cnt_neg_docs = len(train_neg)
all_words_freq = defaultdict(int)
all_words = set()
pos_dict = defaultdict(int)
neg_dict = defaultdict(int)
sum_len_pos = 0
sum_len_neg = 0
for text in train_pos:
for token in text:
all_words.add(token)
all_words_freq[token] += text[token]
pos_dict[token] += text[token]
sum_len_pos += text[token]
for text in train_neg:
for token in text:
all_words.add(token)
all_words_freq[token] += text[token]
neg_dict[token] += text[token]
sum_len_neg += text[token]
alpha = 1 #For additive smoothing
M = len(all_words)
sum_len = 0
print("____________")
print("Sum of text lens", sum_len)
print("____________")
print("Words quantity", M)
print("____________")
token_probs_pos = defaultdict(int)
token_probs_neg = defaultdict(int)
print("Calculate probablity for", M, "tokens")
i = 0
for token in all_words:
if (i % 5000 == 0):
print("__________")
print("Calculated", i, "tokens")
print("__________")
token_probs_pos[token] = (alpha + pos_dict[token]) / (alpha * M + sum_len_pos)
token_probs_neg[token] = (alpha + neg_dict[token]) / (alpha * M + sum_len_neg)
i += 1
return {
"token_probs_pos": token_probs_pos,
"token_probs_neg": token_probs_neg,
"all_words": all_words,
"sum_len_pos": sum_len_pos,
"sum_len_neg": sum_len_neg,
"cnt_pos_docs": cnt_pos_docs,
"cnt_neg_docs": cnt_pos_docs,
"pos_dict": pos_dict,
"neg_dict": neg_dict
}
def pretrain(texts_list: List[List[str]]) -> Any:
"""
Pretrain classifier on unlabeled texts. If your classifier cannot train on unlabeled data, skip this.
:param texts_list: a list of list of texts (str objects), one str per example.
It might be several sets of texts, for example, train and unlabeled sets.
:return: learnt parameters, or any object you like (it will be passed to the train function)
"""
return None
def classify(texts: List[str], params: Any) -> List[str]:
"""
Classify texts given previously learnt parameters.
:param texts: texts to classify
:param params: parameters received from train function
:return: list of labels corresponding to the given list of texts
"""
alpha = 1
token_probs_pos = params["token_probs_pos"]
token_probs_neg = params["token_probs_neg"]
all_words = params["all_words"]
M = len(all_words)
cnt_pos_docs = params["cnt_pos_docs"]
cnt_neg_docs = params["cnt_neg_docs"]
sum_len_neg = params["sum_len_neg"]
sum_len_pos = params["sum_len_pos"]
pos_dict = params["pos_dict"]
neg_dict = params["neg_dict"]
test_texts = preprocessing(texts)
test_tokenized_texts = text_to_tokens(test_texts)
res = []
log_pos_probablity = 0
log_neg_probablity = 0
i = 0
for text in test_tokenized_texts:
if (i % 5000 == 0):
print("Classified", i, "texts")
i += 1
log_pos_probablity = log(cnt_pos_docs)
log_neg_probablity = log(cnt_neg_docs)
for token in text:
if (token_probs_pos[token] == 0):
token_probs_pos[token] = alpha / (alpha * M + sum_len_pos)
else:
log_pos_probablity += log(token_probs_pos[token])
if (token_probs_neg[token] == 0):
token_probs_neg[token] = alpha / (alpha * M + sum_len_neg)
else:
log_neg_probablity += log(token_probs_neg[token])
if (log_neg_probablity > log_pos_probablity):
res.append("neg")
#for token in text:
# all_words.add(token)
# M = len(all_words)
# neg_dict[token] += text[token]
# sum_len_neg += text[token]
# token_probs_neg[token] = (alpha + neg_dict[token]) / (alpha * M + sum_len_neg)
else:
res.append("pos")
#for token in text:
# all_words.add(token)
# M = len(all_words)
# pos_dict[token] += text[token]
# sum_len_pos += text[token]
# token_probs_pos[token] = (alpha + pos_dict[token]) / (alpha * M + sum_len_pos)
print('Predicted labels counts:')
print(count_labels(res))
return res
|
#!/usr/bin/env python
"""Parse GTFS files.
General Transit Feed Specification Reference: https://developers.google.com/transit/gtfs/reference
Author: Panu Ranta, panu.ranta@iki.fi, https://14142.net/kartalla/about.html
"""
import csv
import logging
import os
import polyline
def get_routes(input_dir):
"""Parse GTFS files into dict of routes."""
print 'parsing shapes...'
shapes = _parse_shapes(os.path.join(input_dir, 'shapes.txt'))
print 'parsing stops...'
stops = _parse_stops(os.path.join(input_dir, 'stops.txt'))
print 'parsing routes...'
routes = _parse_routes(os.path.join(input_dir, 'routes.txt'))
print 'adding services and trips to routes...'
_add_services_trips_to_routes(routes, os.path.join(input_dir, 'trips.txt'))
print 'adding calendar to services...'
_add_calendar_to_services(routes, os.path.join(input_dir, 'calendar.txt'))
print 'adding calendar dates to services...'
_add_calendar_dates_to_services(routes, os.path.join(input_dir, 'calendar_dates.txt'))
print 'adding stop times to trips...'
_add_stop_times_to_trips(routes, os.path.join(input_dir, 'stop_times.txt'))
print 'adding shapes to routes...'
_add_shapes_to_routes(routes, shapes, stops)
return routes
def _parse_shapes(shapes_txt):
shapes = {}
with open(shapes_txt, 'r') as input_file:
csv_reader = csv.DictReader(input_file)
for row in csv_reader:
if row['shape_id'] not in shapes:
shapes[row['shape_id']] = {'is_invalid': False, 'points': []}
point = (float(row['shape_pt_lat']), float(row['shape_pt_lon']))
shapes[row['shape_id']]['points'].append(point)
if (point == (58.432233, 20.142573)) or (point[0] < 0) or (point[1] < 0):
shapes[row['shape_id']]['is_invalid'] = True
logging.debug('parsed {} shapes'.format(len(shapes)))
return shapes
def _parse_stops(stops_txt):
stops = {}
with open(stops_txt, 'r') as input_file:
csv_reader = csv.DictReader(input_file)
for row in csv_reader:
stops[row['stop_id']] = (float(row['stop_lat']), float(row['stop_lon']))
logging.debug('parsed {} stops'.format(len(stops)))
return stops
def _parse_routes(routes_txt):
routes = {} # by route_id
# 109: https://github.com/HSLdevcom/kalkati2gtfs/commit/d4758fb74d7455ddbf4032175ef8ff51c587ec7f
route_types = {'0': 'tram', '1': 'metro', '3': 'bus', '4': 'ferry', '6': 'bus', '106': 'train',
'109': 'train', '2': 'train', '7': 'train', '700': 'bus', '701': 'bus',
'702': 'bus', '704': 'bus', '715': 'bus', '1104': 'airplane'}
with open(routes_txt, 'r') as input_file:
csv_reader = csv.DictReader(input_file)
for row in csv_reader:
if row['route_type'] not in route_types:
logging.error('In route_id={} route_type {} not in {}'.format(
row['route_id'], row['route_type'], route_types))
# create new route
routes[row['route_id']] = {
'agency_id': row.get('agency_id', 0),
'route_id': row['route_id'],
'name': _get_route_name(row),
'long_name': row['route_long_name'],
'type': route_types.get(row['route_type'], row['route_type']),
'is_direction': False,
'services': {}, # by service_id
'shapes': []
}
logging.debug('parsed {} routes'.format(len(routes)))
return routes
def _get_route_name(row): # row in routes.txt
if row['route_short_name'] != '':
return row['route_short_name']
else:
return row['route_id'] # metro routes do not have short names
def _add_services_trips_to_routes(routes, trips_txt):
with open(trips_txt, 'r') as input_file:
csv_reader = csv.DictReader(input_file)
for row in csv_reader:
if row['route_id'] not in routes:
logging.error('No route information for route_id={}'.format(row['route_id']))
elif ('direction_id' in row) and (row['direction_id'] not in ['0', '1']):
logging.error('For route_id={} invalid direction_id: {}'.format(
row['route_id'], row['direction_id']))
else:
route = routes[row['route_id']]
route['is_direction'] = 'direction_id' in row
_add_services_trips_to_route(route, row)
def _add_services_trips_to_route(route, row): # row in trips.txt
# route contains services, service contains trips
if row['service_id'] not in route['services']:
# create new service
route['services'][row['service_id']] = {
'start_date': None,
'end_date': None,
'weekdays': None,
'exception_dates': {'added': [], 'removed': []},
'trips': {}, # by trip_id
'directions_i': None,
'directions': {'0': _create_direction(), '1': _create_direction()}
}
service = route['services'][row['service_id']]
if row['trip_id'] in service['trips']:
logging.error('In route_id={} service_id={} duplicate trip_id: {}'.format(
row['route_id'], row['service_id'], row['trip_id']))
else:
# create new trip
direction_id = row.get('direction_id', '0')
service['trips'][row['trip_id']] = {
'route_id': row['route_id'],
'service_id': row['service_id'],
'direction_id': direction_id,
'start_time': 0, # number of minutes after midnight
'is_departure_times': False,
'stop_times': [], # arrival and departure times for each stop
'stop_times_i': None
}
_add_shape_id_to_direction(service['directions'][direction_id], row)
def _create_direction():
return {
'shape_id': None,
'shape_i': None,
'stops': {}, # by stop_sequence
'stop_distances': [] # point indexes in encoded shape
}
def _add_shape_id_to_direction(direction, row): # row in trips.txt
if direction['shape_id'] and (row['shape_id'] != direction['shape_id']):
logging.error('In service_id={} duplicate shape_id: {}'.format(
row['service_id'], row['shape_id']))
direction['shape_id'] = row['shape_id']
def _add_calendar_to_services(routes, calendar_txt):
services = _get_services(routes)
with open(calendar_txt, 'r') as input_file:
csv_reader = csv.DictReader(input_file)
for row in csv_reader:
if row['service_id'] in services:
for service in services[row['service_id']]:
if service['start_date']:
logging.error('duplicate service_id={} in calendar'.format(
row['service_id']))
service['start_date'] = row['start_date']
service['end_date'] = row['end_date']
service['weekdays'] = _get_service_weekdays(row)
def _get_services(routes):
services = {}
for route in routes.itervalues():
for service_id, service in route['services'].iteritems():
if service_id not in services:
services[service_id] = []
services[service_id].append(service)
return services
def _get_service_weekdays(row): # row in calendar.txt
days = [row['monday'], row['tuesday'], row['wednesday'], row['thursday'], row['friday'],
row['saturday'], row['sunday']]
if ''.join(sorted(days)) == '0000001': # exactly one weekday (HSL)
return days.index('1')
else:
return ''.join(days)
def _add_calendar_dates_to_services(routes, calendar_dates_txt):
services = _get_services(routes)
with open(calendar_dates_txt, 'r') as input_file:
csv_reader = csv.DictReader(input_file)
for row in csv_reader:
if row['service_id'] in services:
for service in services[row['service_id']]:
_add_calendar_dates_to_service(service, row)
def _add_calendar_dates_to_service(service, row): # row in calendar_dates_txt
exception_types = {'1': 'added', '2': 'removed'}
if row['exception_type'] in exception_types:
if (row['date'] < service['start_date']) or (row['date'] > service['end_date']):
logging.error('For service_id={} invalid exception date: {}'.format(
row['service_id'], row['date']))
else:
exception_type = exception_types[row['exception_type']]
service['exception_dates'][exception_type].append(row['date'])
else:
logging.error('For service_id={} invalid exception_type: {}'.format(
row['service_id'], row['exception_type']))
def _add_stop_times_to_trips(routes, stop_times_txt):
"""Add stops to services and stop times (and start time) to trips."""
trips = _get_trips(routes)
is_seconds_in_time = False
with open(stop_times_txt, 'r') as input_file:
csv_reader = csv.DictReader(input_file)
for row in csv_reader:
if not is_seconds_in_time:
is_seconds_in_time = _is_seconds_in_time(row)
if row['trip_id'] not in trips:
logging.error('No trip information for trip_id={}'.format(row['trip_id']))
else:
trip = trips[row['trip_id']]
if len(trip['stop_times']) == 0:
trip['start_time'] = _get_minutes(row['arrival_time'])
_add_stop_times_to_trip(trip, row)
service = routes[trip['route_id']]['services'][trip['service_id']]
_add_stop_to_stops(service['directions'][trip['direction_id']]['stops'], row)
_delete_invalid_trips(routes, trips)
def _get_trips(routes):
trips = {}
for route in routes.itervalues():
for service in route['services'].itervalues():
for trip_id, trip in service['trips'].iteritems():
trips[trip_id] = trip
return trips
def _is_seconds_in_time(row): # row in stop_times.txt
for time_type in ['arrival_time', 'departure_time']:
if not row[time_type].endswith(':00'):
logging.info('Seconds in {}.'.format(time_type))
return True
return False
def _get_minutes(time_string):
"""Get number of minutes after midnight from HH:MM:SS time string."""
(hours, minutes, seconds) = time_string.split(':')
return (int(hours) * 60) + int(minutes) + int(round(int(seconds) / 60.0))
def _add_stop_times_to_trip(trip, row): # row in stop_times.txt
arrival_time = _get_minutes(row['arrival_time'])
departure_time = _get_minutes(row['departure_time'])
trip['is_departure_times'] = trip['is_departure_times'] or (arrival_time != departure_time)
trip['stop_times'].append(arrival_time - trip['start_time'])
trip['stop_times'].append(departure_time - trip['start_time'])
def _add_stop_to_stops(stops, row): # row in stop_times.txt
stop_sequence = int(row['stop_sequence'])
if stop_sequence not in stops:
stops[stop_sequence] = row['stop_id']
else:
if stops[stop_sequence] != row['stop_id']:
logging.error('In trip_id={} two stops for stop_sequence={}: {} {} '.format(
row['trip_id'], stop_sequence, row['stop_id'],
stops[stop_sequence]))
def _delete_invalid_trips(routes, trips):
for trip_id in trips:
trip = trips[trip_id]
if _is_trip_invalid(trip_id, trip):
route = routes[trip['route_id']]
service = route['services'][trip['service_id']]
del service['trips'][trip_id]
logging.info('Deleted trip_id={} in route={} as invalid.'.format(
trip_id, route['long_name']))
if len(service['trips']) == 0:
del route['services'][trip['service_id']]
logging.info('Deleted service_id={}/route_id={} with no trips.'.format(
trip['service_id'], trip['route_id']))
def _is_trip_invalid(trip_id, trip):
if len(trip['stop_times']) < 4:
reason = 'short'
is_invalid = True
elif trip['stop_times'] != sorted(trip['stop_times']):
reason = 'order'
is_invalid = True
elif _get_max_stop_time_gap(trip['stop_times']) > (8 * 60): # 8 hours
reason = 'gap'
is_invalid = True
else:
is_invalid = False
if is_invalid:
msg_format = 'In trip_id={}/service_id={}/route_id={} invalid stop_times ({}): {}'
logging.error(msg_format.format(
trip_id, trip['service_id'], trip['route_id'], reason, trip['stop_times']))
return is_invalid
def _get_max_stop_time_gap(stop_times):
max_gap = 0
for i in range(1, len(stop_times)):
max_gap = max(max_gap, stop_times[i] - stop_times[i - 1])
return max_gap
def _add_shapes_to_routes(routes, shapes, stops):
stats = {'shapes': 0, 'points': 0, 'dropped_points': 0, 'bytes': 0}
for route in routes.itervalues():
cache = {}
for service_id in route['services']:
for direction in route['services'][service_id]['directions'].itervalues():
if direction['shape_id']: # some services operate only in one direction
if _is_shape_ok(route, service_id, direction, shapes):
cache_key = tuple(sorted(direction['stops'].items()))
if cache_key in cache:
direction['stop_distances'] = cache[cache_key]['stop_distances']
direction['shape_i'] = cache[cache_key]['shape_i']
else:
shape = shapes[direction['shape_id']]['points']
stop_distances = _get_stop_distances(shape, direction['stops'], stops)
_add_shape_to_route(route, direction, shape, stop_distances, stats)
cache[cache_key] = {
'shape_i': direction['shape_i'],
'stop_distances': direction['stop_distances']}
logging.debug('shape encoding stats: {}'.format(stats))
_delete_invalid_services(routes)
def _is_shape_ok(route, service_id, direction, shapes):
if direction['shape_id'] not in shapes:
logging.error('No shape information for shape_id={} in route={}.'.format(
direction['shape_id'], route['long_name']))
direction['shape_id'] = None
return False
elif shapes[direction['shape_id']]['is_invalid']:
logging.error('Invalid shape_id={} for service_id={} in route={}.'.format(
direction['shape_id'], service_id, route['long_name']))
direction['shape_id'] = None
return False
else:
return True
def _get_stop_distances(shape, direction_stops, stops):
stop_distances = []
for _, stop_id in sorted(direction_stops.iteritems()):
if stop_id not in stops:
logging.error('No stop information for stop_id={}'.format(stop_id))
else:
if len(stop_distances) == 0:
previous_index = 0
else:
previous_index = stop_distances[-1]
point_index = polyline.get_point_index(shape, stops[stop_id], previous_index)
stop_distances.append(point_index)
return stop_distances
def _add_shape_to_route(route, direction, shape, stop_distances, stats):
if len(shape) < len(stop_distances):
logging.error('In route={} less points in shape than stops: {} < {}'.format(
route['long_name'], len(shape), len(stop_distances)))
return
encoded_shape = polyline.encode(shape, stop_distances, very_small=0.00002)
direction['stop_distances'] = encoded_shape['fixed_indexes']
if encoded_shape['points'] in route['shapes']:
logging.error('Duplicate shape encoding for route={}'.format(route['long_name']))
else:
route['shapes'].append(encoded_shape['points'])
direction['shape_i'] = len(route['shapes']) - 1
stats['shapes'] += 1
stats['points'] += len(shape)
stats['dropped_points'] += encoded_shape['num_dropped_points']
stats['bytes'] += len(encoded_shape['points'])
def _delete_invalid_services(routes):
trips = _get_trips(routes)
for trip_id in trips:
trip = trips[trip_id]
if ((trip['route_id'] in routes) and
(trip['service_id'] in routes[trip['route_id']]['services'])):
route = routes[trip['route_id']]
service = route['services'][trip['service_id']]
directions = service['directions']
if (directions['0']['shape_id'] == None) and (directions['1']['shape_id'] == None):
del route['services'][trip['service_id']]
logging.error('Deleted service_id={} in route={} with no directions.'.format(
trip['service_id'], route['long_name']))
if len(route['services']) == 0:
del routes[trip['route_id']]
logging.info('Deleted route_id={} ({}) with no services.'.format(
trip['route_id'], route['long_name']))
def get_modification_time(input_dir):
"""Get time of most recent content modification as seconds since the epoch."""
return int(os.stat(os.path.join(input_dir, 'routes.txt')).st_mtime)
|
import copy
import re
from printoption import PrintOption
from unit import Unit
class Battalion(object):
def __init__(self, unit_config):
self.unit_config = unit_config
self.units = []
for c in self.unit_config["units"]:
self.units.append(Unit(c, "unit"))
def __str__(self):
if self.unitsize() == 0:
return ""
line = [("{}({}):[".format(self.name(), self.points()))]
unitline = []
for unit in self.units:
unitstr = unit.str_battalion()
if len(unitstr) > 0:
unitline.append(unitstr)
line.append(", ".join(sorted(unitline, key=lambda x: re.sub('[^A-Za-z]+', '', x).lower())))
line.append("]")
return " ".join(line)
def fullstr(self):
if self.unitsize() == 0:
return ""
line = [("\t{} (Warscroll Battalion)".format(self.name()))]
line.append("\t\tTotal Points: {}".format(self.points()))
unitline = []
for unit in self.units:
unitstr = unit.fullstr(tabs=2)
if len(unitstr) > 0:
unitline.append(unitstr)
line.append("\n".join(sorted(unitline, key=lambda x: re.sub('[^A-Za-z]+', '', x).lower())))
line.append("")
return "\n".join(line)
def __repr__(self):
return "{}:{}".format(self.name(),str(self.units))
def __len__(self):
return len(self.units)
def __getitem__(self,index):
if index < len(self.units):
return self.units[index]
raise IndexError("index out of range")
def __setitem__(self,index,item):
if index < len(self.units):
self.units[index] = item
return
raise IndexError("index out of range")
def is_type(self, unittype):
return "battalion" == unittype
def unitsize(self):
size = 0
for unit in self.units:
size = size + unit.unitsize()
return size
#Points of just the battalion (no units)
def battalion_points(self):
return self.unit_config.get("points", 0)
def points(self):
if self.unitsize() == 0:
return 0
points = self.battalion_points()
for unit in self.units:
points = points + unit.points()
return points
def name(self):
return self.unit_config["name"]
def is_unique(self):
return False
# return self.unit_config.get("unique", False)
def roles(self):
return self.unit_config.get("roles", [])
def keywords(self):
return []
# return self.unit_config.get("keywords", [])
def move(self, wounds_suffered=0):
move = self.unit_config.get("move", 0)
if type(move) is not dict:
return move
if wounds_suffered > self.wounds_per_unit():
wounds_suffered = self.wounds_per_unit()
while wounds_suffered > 0 and move.get(wounds_suffered, None) == None:
wounds_suffered = wounds_suffered - 1
return "{}*".format(move.get(wounds_suffered, 0))
def wounds_per_unit(self):
return self.unit_config.get("wounds", 0)
# Total number of wounds across all units
def total_wounds(self):
return self.wounds_per_unit() * self.unitsize()
def wounds_str(self):
wounds = self.wounds_per_unit()
if self.unitsize() == 1:
return str(wounds)
return "{}({})".format(wounds, wounds * self.unitsize())
def save(self):
save = self.unit_config.get("save", 0)
if type(save) is str and save == "-":
return 6
return save
def save_str(self):
save = self.unit_config.get("save", 0)
if type(save) is str:
return save
return "{}+".format(save)
def bravery(self):
return self.unit_config.get("bravery", 0)
def sum_roles(self, roles):
for unit in self.units:
if unit.count > 0:
for r in unit.roles():
roles[r] = roles.get(r,0) + unit.count
def is_valid(self, restrict_battalion, restrict_config, final=True, showfails=PrintOption.SILENT):
#TODO: Currently only support 1 or 0 instances of a single battalion
count = 0
if self.unitsize() > 0:
count = 1
# Check unit meets min restriction
if final and count < restrict_battalion["min"]:
if showfails.value > PrintOption.SILENT.value:
print "FAIL MIN restrict {} {} {} : {}".format(self.name(), restrict_battalion["min"], count, self)
return False
if self.unitsize() == 0:
return True
# Check unit meets max restriction
if restrict_battalion["max"] != -1 and count >restrict_battalion["max"]:
if showfails.value > PrintOption.SILENT.value:
print "FAIL MAX restrict {} {} {} : {}".format(self.name(), restrict_battalion["max"], count, self)
return False
#Check units and count up roles
for unit in self.units:
#TODO: Restrict from both restrict config and unit_config !!!
restrict_unit = unit.unit_config
restrict_keywords = []
if not unit.is_valid(restrict_unit, restrict_keywords, final, showfails):
return False
return True
|
import json
import requests
# https://blog.51cto.com/183530300/2124750
TOKEN = "925265552:AAFjArE5ptRx9t7zp34YBiLq77_-4p7l0fc"
def send_message(method, params=None):
url = "https://api.telegram.org/bot{token}/{method}".format(token=TOKEN, method=method)
print(url, params)
rst = requests.get(url, params=params)
print(json.dumps(rst.json(), indent=4))
if __name__ == '__main__':
# send_message('getUpdates')
# 服务器告警群
send_message('sendMessage', params=dict(
# chat_id="-350709901",
chat_id="-348154837",
text="testing bot"
))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.