text stringlengths 38 1.54M |
|---|
#!/usr/bin/env python
"""
Generate weights for linear regression based on POP history file and region mask
1. Initially just mimic what is done in Loess step by step notebook
2. Rather than taking the "n nearest points", look for all neighboring points within
some pre-defined area (maybe elipse parameterized by x- and y-axis lengths)
3. Use region mask: don't allow non-zero weights from Pacific for points in Atlantic
(and vice versa)
"""
import numpy as np
import xarray as xr
import dask.bag
class LoessWeightGenClass(object): # pylint: disable=useless-object-inheritance,too-few-public-methods
"""
A class to wrap function calls required to generate weights for LOESS regression
"""
def __init__(self, hist_file, mask_file):
self._read_files(hist_file, mask_file)
self._gen_weights()
# self.to_netcdf()
#####################
def _read_files(self, hist_file, mask_file):
# Read netcdf file
self._hist_ds = xr.open_dataset(hist_file, decode_times=False).isel(time=0)
# Read binary file: >i4 => big-endian (>) and 4-byte integer (i4)
self._region_mask = np.fromfile(mask_file, dtype=">i4")
#####################
def _gen_weights(self, ndims=3, num_nearest_pts=384): # pylint: disable=too-many-locals
"""
Main subroutine to generate weights for linear regression step of LOESS
Region masks come from POP log:
+-----------------------+
| Marginal Seas Only |
Region Region | Area Volume |
Number Name | (km^2) (km^3) |
------ ------------------- ----------- -----------
1 Southern Ocean
2 Pacific Ocean
3 Indian Ocean
-4 Persian Gulf 3.21703E+05 2.57362E+04
-5 Red Sea 5.55613E+05 5.56528E+04
6 Atlantic Ocean
7 Mediterranean Sea
8 Labrador Sea
9 GIN Sea
10 Arctic Ocean
11 Hudson Bay
-12 Baltic Sea 3.70458E+05 1.74489E+04
-13 Black Sea 4.50883E+05 3.29115E+05
-14 Caspian Sea 2.50748E+05 7.52244E+03
"""
# Pull necessary data from files
npts = self._hist_ds['KMT'].data.size
ds_coords = dict()
ds_coords['ndims'] = ndims
ds_coords['npts'] = npts
ds_coords['num_nearest_pts'] = num_nearest_pts
land_mask = self._hist_ds['KMT'].data.reshape(npts) != 0
atlantic_mask = (self._region_mask == 6).reshape(npts)
pacific_mask = (self._region_mask == 2).reshape(npts)
lat = self._hist_ds['TLAT'].data.reshape(npts)
lon = self._hist_ds['TLONG'].data.reshape(npts)
# Convert (lat, lon) -> (x, y, z)
deg2rad = np.pi/180.
lon_loc = lon * deg2rad
lat_loc = lat * deg2rad
x3d = np.cos(lat_loc)*np.cos(lon_loc)
y3d = np.cos(lat_loc)*np.sin(lon_loc)
z3d = np.sin(lat_loc)
self.grid = xr.Dataset(coords=ds_coords)
self.grid['ndims'] = xr.DataArray(np.arange(ndims)+1, dims='ndims')
self.grid['npts'] = xr.DataArray(np.arange(npts)+1, dims='npts')
self.grid['num_nearest_pts'] = xr.DataArray(np.arange(num_nearest_pts)+1,
dims='num_nearest_pts')
self.grid['coord_matrix'] = xr.DataArray(np.array([x3d, y3d, z3d]), dims=['ndims', 'npts'])
self.grid['included_pts'] = xr.DataArray(land_mask, dims=['npts'])
self.grid['norm'] = xr.DataArray(np.empty((npts, num_nearest_pts), dtype=np.float64),
dims=['npts', 'num_nearest_pts'])
self.grid['norm_jind'] = xr.DataArray(np.empty((npts, num_nearest_pts), dtype=np.int),
dims=['npts', 'num_nearest_pts'])
# from scipy.sparse import lil_matrix
# norms_mat = lil_matrix((npts, npts))
import time
loop1_start = time.time()
for i in np.where(self.grid['included_pts'])[0]:
self.grid['norm_jind'].data[i, :], self.grid['norm'].data[i, :] = _weight_gen_loop(i, self.grid.copy(), num_nearest_pts,
atlantic_mask, pacific_mask)
loop1_end = time.time()
print("Loop1 time: {}".format(loop1_end-loop1_start))
# for j, jj in enumerate(norm_jind):
# norms_mat[i, jj] = norms_loc[j]
# self.grid['norm_csr'] = norms_mat.tocsr()
#####################
def to_netcdf(self, out_file):
"""
Dump self['LOESS_weights'] to netcdf
"""
if hasattr(self, 'grid'):
print("Dump to netcdf file: {}".format(out_file))
self.grid.to_netcdf(out_file)
class LoessWeightGenParallelClass(LoessWeightGenClass): # pylint: disable=useless-object-inheritance,too-few-public-methods
"""
A class to wrap function calls required to generate weights for LOESS regression
(Parallelized via dask)
"""
def __init__(self, hist_file, mask_file, npart=50):
self._read_files(hist_file, mask_file)
self._gen_weights(npart=npart)
# self.to_netcdf()
def _gen_weights(self, ndims=3, num_nearest_pts=384, npart=50): # pylint: disable=too-many-locals
"""
Main subroutine to generate weights for linear regression step of LOESS
Region masks come from POP log:
+-----------------------+
| Marginal Seas Only |
Region Region | Area Volume |
Number Name | (km^2) (km^3) |
------ ------------------- ----------- -----------
1 Southern Ocean
2 Pacific Ocean
3 Indian Ocean
-4 Persian Gulf 3.21703E+05 2.57362E+04
-5 Red Sea 5.55613E+05 5.56528E+04
6 Atlantic Ocean
7 Mediterranean Sea
8 Labrador Sea
9 GIN Sea
10 Arctic Ocean
11 Hudson Bay
-12 Baltic Sea 3.70458E+05 1.74489E+04
-13 Black Sea 4.50883E+05 3.29115E+05
-14 Caspian Sea 2.50748E+05 7.52244E+03
"""
# Pull necessary data from files
npts = self._hist_ds['KMT'].data.size
ds_coords = dict()
ds_coords['ndims'] = ndims
ds_coords['npts'] = npts
ds_coords['num_nearest_pts'] = num_nearest_pts
land_mask = self._hist_ds['KMT'].data.reshape(npts) != 0
atlantic_mask = (self._region_mask == 6).reshape(npts)
pacific_mask = (self._region_mask == 2).reshape(npts)
lat = self._hist_ds['TLAT'].data.reshape(npts)
lon = self._hist_ds['TLONG'].data.reshape(npts)
# Convert (lat, lon) -> (x, y, z)
deg2rad = np.pi/180.
lon_loc = lon * deg2rad
lat_loc = lat * deg2rad
x3d = np.cos(lat_loc)*np.cos(lon_loc)
y3d = np.cos(lat_loc)*np.sin(lon_loc)
z3d = np.sin(lat_loc)
self.grid = xr.Dataset(coords=ds_coords)
self.grid['ndims'] = xr.DataArray(np.arange(ndims)+1, dims='ndims')
self.grid['npts'] = xr.DataArray(np.arange(npts)+1, dims='npts')
self.grid['num_nearest_pts'] = xr.DataArray(np.arange(num_nearest_pts)+1,
dims='num_nearest_pts')
self.grid['coord_matrix'] = xr.DataArray(np.array([x3d, y3d, z3d]), dims=['ndims', 'npts'])
self.grid['included_pts'] = xr.DataArray(land_mask, dims=['npts'])
self.grid['norm'] = xr.DataArray(np.empty((npts, num_nearest_pts), dtype=np.float64),
dims=['npts', 'num_nearest_pts'])
self.grid['norm_jind'] = xr.DataArray(np.empty((npts, num_nearest_pts), dtype=np.int),
dims=['npts', 'num_nearest_pts'])
import time
loop1_start = time.time()
# FIXME: parallelize this loop
ocn_pts = dask.bag.from_sequence(np.where(self.grid['included_pts'])[0].tolist(), npartitions=npart)
dask_out = ocn_pts.map(_weight_gen_loop, self.grid.copy(), num_nearest_pts,
atlantic_mask, pacific_mask).compute()
loop1_end = time.time()
loop2_start = time.time()
for n, i in enumerate(np.where(self.grid['included_pts'])[0]):
self.grid['norm_jind'].data[i, :] = dask_out[n][0]
self.grid['norm'].data[i, :] = dask_out[n][1]
loop2_end = time.time()
print("Loop1 time: {}".format(loop1_end-loop1_start))
print("Loop2 time: {}".format(loop2_end-loop2_start))
#######################
def _weight_gen_loop(i, grid_obj, num_nearest_pts, atlantic_mask, pacific_mask):
# Find num_nearest_pts smallest values
# https://stackoverflow.com/questions/5807047/efficient-way-to-take-the-minimum-maximum-n-values-and-indices-from-a-matrix-usi
# Using home-spun norm
temp_array = np.linalg.norm(grid_obj['coord_matrix'].data[:,i].reshape(3,1) - grid_obj['coord_matrix'].data, axis=0)
norms_loc = np.partition(temp_array, num_nearest_pts-1)[:num_nearest_pts]
norm_jind = np.argpartition(temp_array, num_nearest_pts-1)[:num_nearest_pts]
if atlantic_mask[i]:
norms_loc = np.where(pacific_mask[norm_jind], 0, norms_loc)
elif pacific_mask[i]:
norms_loc = np.where(atlantic_mask[norm_jind], 0, norms_loc)
# grid_obj['norm_jind'].data[i, :] = norm_jind
# grid_obj['norm'].data[i, :] = norms_loc
return norm_jind, norms_loc
#####################
# Can run as script #
#####################
def _parse_args():
import argparse
parser = argparse.ArgumentParser(description="Generate a netCDF file containing linear " +
"regression weights for LOESS")
# Command line argument to point to land mask file
parser.add_argument('-f', '--history-file', action='store', dest='hist_file', required=True,
help='Location of POP history file (netCDF format) containing grid info')
parser.add_argument('-m', '--region-mask-file', action='store', dest='mask_file',
required=True, help='Location of binary file containing POP regions')
parser.add_argument('-o', '--output-file', action='store', dest='out_file', required=False,
default='weights.nc', help='Name of netCDF file to write weights in')
return parser.parse_args()
##############################
# __name__ == __main__ block #
##############################
if __name__ == "__main__":
args = _parse_args() # pylint: disable=invalid-name
wgts = LoessWeightGenClass(args.hist_file, args.mask_file) # pylint: disable=invalid-name
wgts.to_netcdf(args.out_file)
|
# Generated by Django 3.2.18 on 2023-06-20 08:20
from django.db import connection, migrations
def cleanup_old_database_tables_and_schema(apps, schema_editor):
"""
This migration removes the `old` database schema
and all celery-related tables from the database.
"""
with connection.cursor() as cursor:
cursor.execute(
"SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' AND (table_name LIKE 'celery\_%' OR table_name LIKE 'djcelery\_%' OR table_name LIKE 'djkombu\_%')"
)
tables = cursor.fetchall()
for table in tables:
cursor.execute(f"DROP TABLE IF EXISTS {table[0]} CASCADE;")
cursor.execute("DROP SCHEMA IF EXISTS old CASCADE;")
class Migration(migrations.Migration):
dependencies = [
("torchbox", "0146_alter_standardpage_additional_content"),
]
operations = [
migrations.RunPython(cleanup_old_database_tables_and_schema),
]
|
import os
import json
import logging
import requests
import numpy as np
from pipeline_monitor import prometheus_monitor as monitor
from pipeline_logger import log
_logger = logging.getLogger('pipeline-logger')
_logger.setLevel(logging.INFO)
_logger_stream_handler = logging.StreamHandler()
_logger_stream_handler.setLevel(logging.INFO)
_logger.addHandler(_logger_stream_handler)
__all__ = ['invoke']
_labels = {
'name': 'transfer',
'tag': 'ensemble',
'runtime': 'python',
'chip': 'cpu',
'resource_type': 'model',
'resource_subtype': 'keras',
}
# There is no model to import.
# This function is merely calling other functions/models
# and aggregating the results.
# def _initialize_upon_import():
# return
# This is called unconditionally at *module import time*...
# _model = _initialize_upon_import()
@log(labels=_labels, logger=_logger)
def invoke(request):
"""Where the magic happens..."""
with monitor(labels=_labels, name="transform_request"):
transformed_request = _transform_request(request)
with monitor(labels=_labels, name="invoke"):
# TODO: Handle any failure responses such as Fallback/Circuit-Breaker, etc
timeout_seconds=1200
# TODO: Can we use internal dns name (predict-mnist)
# TODO: Pass along the request-tracing headers
url_model_a = 'http://predict-83f05e58transfer-v1pythoncpu1b79207e:8080/invoke'
response_a = requests.post(
url=url_model_a,
data=transformed_request,
timeout=timeout_seconds
)
url_model_b = 'http://predict-83f05e58transfer-v1pythoncpu40c1d1f5:8080/invoke'
response_b = requests.post(
url=url_model_b,
data=transformed_request,
timeout=timeout_seconds
)
# TODO: Aggregate the responses into a single response
# * Classification: Return the majority class from all predicted classes
# * Regression: Average the result
# TODO: Include all models that participated in the response (including confidences, timings, etc)
response = [response_a.json(), response_b.json()]
with monitor(labels=_labels, name="transform_response"):
transformed_response = _transform_responsei(response_a, response_b)
return transformed_response
def _transform_request(request):
# request_str = request.decode('utf-8')
# request_json = json.loads(request_str)
# request_np = (np.array(request_json['image'], dtype=np.float32) / 255.0).reshape(1, 28, 28)
# return {"image": json.load(request_np)}
return request
def _transform_response(response_a, response_b):
# return json.dumps({"classes": response['classes'].tolist(),
# "probabilities": response['probabilities'].tolist(),
# })
# TODO: Apply quorum aggregator function vs. hard-coding to class_c
class_a_list = response_a['classes']
class_b_list = response_b['classes']
# TODO: Aggregate probabilities?
probabilities_a_list_list = response_a['probabilities']
probabilities_b_list_list = response_b['probabilities']
# TODO:
return json.dumps({"classes": class_b_list,
"probabilities": probabilities_c_list_list})
# Note: This is a mini test
if __name__ == '__main__':
with open('./pipeline_test_request.json', 'rb') as fb:
request_bytes = fb.read()
response_bytes = invoke(request_bytes)
print(response_bytes)
|
import pymysql
import requests
import json
Cookie = "JSESSIONID=BAAE61D4FBECC955F6E7E42CAB1AA76B"
headers = {
"Content-Type": "application/json",
"Origin": "http://kd.nsfc.gov.cn",
"Cookie": Cookie,
"Origin": "http://kd.nsfc.gov.cn",
"Referer": "http://kd.nsfc.gov.cn/baseQuery/conclusionQuery",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36"
}
data = {
"adminID": "",
"beginYear": "",
"checkDep": "",
"checkType": "",
"code": "",
"complete": "true",
"conclusionYear": "",
"dependUnit": "",
"endYear": "",
"keywords": "",
"pageNum": 0,
"pageSize": 50,
"personInCharge": "",
"projectName": "",
"projectType": "",
"psPType": "",
"queryType": "input",
"quickQueryInput": "",
"ratifyNo": "",
"ratifyYear": "",
"subPType": "",
"type": ""
}
person_data = {
"adminID": "",
"beginYear": "",
"checkDep": "",
"checkType": "",
"code": "",
"complete": "true",
"conclusionYear": "",
"dependUnit": "",
"endYear": "",
"id": "",
"keywords": "",
"pageNum": 0,
"pageSize": 50,
"personInCharge": "",
"projectName": "",
"projectType": "",
"psPType": "",
"queryType": "input",
"quickQueryInput": "",
"ratifyNo": "",
"ratifyYear": "",
"subPType": "",
"type": "person"
}
def get_db():
return pymysql.connect(host='127.0.0.1', user='root', password='123456', database='sci', charset='utf8mb4')
def get_db1():
return pymysql.connect(host='127.0.0.1', user='root', password='123456', database='sci', charset='utf8mb4')
def insert_project(*args, **kwargs):
db = get_db1()
conn = db.cursor()
if args[0] == 'project':
sql = "insert into project(`number`,`position`,`study_duration`,`study_result`,`admin_id`," \
"`title`,`group`,`catetory`,`money`,`master`,`company`,`year`) value ('{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}')".format(
kwargs['number'], kwargs['position'], kwargs['study_duration'], kwargs['study_result'], kwargs['admin_id'],
kwargs['title'], kwargs['group'], kwargs['catetory'], kwargs['money'], kwargs['master'],kwargs['company'],
kwargs['year']
)
sql_exeist = "select count(id) from project where number='{}'".format(kwargs['number'])
print(sql_exeist)
conn.execute(sql_exeist)
res = conn.fetchall()
print(res)
if res[0][0] > 0:
print('已经存在')
return
try:
conn.execute(sql)
db.commit()
print('插入成功项目')
except Exception as e:
db.rollback()
print(str(e))
print('失败',sql)
elif args[0] == 'part_person':
sql_exist = "select count(id) from part_person where `number_key`='{}' and `name`='{}'".format(kwargs['number_key'],
kwargs['name'])
print(sql_exist)
conn.execute(sql_exist)
res = conn.fetchall()
if res[0][0] > 0:
print('已经存在')
return
sql = "insert into part_person(`name`,`position`,`company`,`number_key`,`person_id`) value('{}','{}','{}','{}','{}')".format(
kwargs['name'], kwargs['position'], kwargs['company'], kwargs['number_key'], kwargs['person_id']
)
print(sql)
try:
conn.execute(sql)
db.commit()
print('插入项目参与人',kwargs['name'])
except Exception as e:
db.rollback()
print('失败',sql)
elif args[0]=='rela_project':
sql_exist = "select count(id) from rela_project where `number_key`='{}' and `number`='{}'".format(kwargs['number_key'],kwargs['number'])
print(sql_exist)
conn.execute(sql_exist)
res = conn.fetchall()
if res[0][0] > 0:
print('已经存在')
return
sql = "insert into rela_project(`title`,`number`,`year`,`catetory`,`money`,`master`,`company`,`number_key`,`is_master`) value('{}','{}','{}','{}','{}','{}','{}','{}','{}')".format(
kwargs['title'],kwargs['number'],kwargs['year'],kwargs['catetory'],kwargs['money'],kwargs['master'],kwargs['company'],
kwargs['number_key'],kwargs['is_master']
)
print(sql)
try:
conn.execute(sql)
db.commit()
print('插入关联项目',kwargs['number'])
except Exception as e:
db.rollback()
print('失败', sql)
def get_content(*args, **kwargs):
for k, v in kwargs.items():
if kwargs['person'] == 'True':
temp_data = person_data
else:
temp_data = data
temp_data[k] = v
json_data = json.dumps(temp_data)
if args[0] == 'POST':
rsp = requests.request(args[0], url=args[1], data=json_data, headers=headers)
else:
rsp = requests.request(args[0], url=args[1], headers=headers)
if 'data' not in rsp.text:
print('请求出错',rsp.text)
return {'status': 0, 'messae': '请求出错{}'.format(str(kwargs))}
rsp = rsp.content.decode('utf8')
rsp = json.loads(rsp)
if rsp['code'] != 200:
return {'status': 0, 'messae': '请求出错,cookie过期{}'.format(str(kwargs))}
return {'status': 1, 'data': rsp['data']}
|
# -*- encoding: utf-8 -*-
from bnf import Group, NamedToken, TokenFunctor
from tl import ast
class AttributeAccessSubExpr(Group):
def __init__(self, subexpr):
self._subexpr = subexpr
from tl.bnf.function_call import FunctionParam
# from tl.bnf.variable_value import VariableValue
from tl.bnf.operators import BinaryInfixOperator
from tl.bnf.variable import Variable
Group.__init__(self, [
subexpr,
Group([
BinaryInfixOperator("."),
NamedToken('attribute', Variable),
Group([
'(',
Group([
FunctionParam,
Group([
',',
FunctionParam
], min=0, max=-1)
], min=0, max=1),
')',
TokenFunctor(self.pushMethod),
])
| TokenFunctor(self.pushMember)
], min=0, max=-1)
])
def clone(self):
return AttributeAccessSubExpr(self._subexpr)
def pushMember(self, context):
member = self.getByName('attribute').getToken().id
context.endExpression()
if self._is_first:
context.getCurrentExpression().extend([self._expr[0], '.', member])
else:
context.getCurrentExpression().extend(['.', member])
self._expr = context.beginExpression()
self._is_first = False
return True
def pushMethod(self, context):
method = self.getByName('attribute').getToken().id
context.endExpression()
if self._is_first:
context.getCurrentExpression().extend([
self._expr[0], '.', ast.FunctionCall(method, self._expr[2:])
])
else:
context.getCurrentExpression().extend([
'.', [ast.FunctionCall(method, self._expr[1:])]
])
self._expr = context.beginExpression()
self._is_first = False
return True
def match(self, context):
self._is_first = True
main = context.beginExpression()
self._expr = context.beginExpression()
res = Group.match(self, context)
context.endExpression()
if res == True and len(self._expr) > 0:
main.append(self._expr)
context.endExpression()
context.getCurrentExpression().append(main)
return res
|
# Crie um programa que leia nome, ano de nascimento e carteira de trabalho e cadastre-o (com idade) em um dicionário.
# Se por acaso a CTPS for diferente de ZERO, o dicionário receberá também o ano de contratação e o salário.
# Calcule e acrescente, além da idade, com quantos anos a pessoa vai se aposentar.
from datetime import datetime
cores = {'limpa':'\033[m',
'bverde':'\033[1;32m',
'bvermelho': '\033[1;31m',
'pretoebranco':'\033[7:30m'}
print('-=-'*10)
print(cores['pretoebranco']+'_____INICIO_____'+cores['limpa'])
print('-=-'*10)
dado = dict()
dado['nome'] = str(input('Nome: '))
nasc = int(input('Ano de Nascimento: '))
dado['idade'] = datetime.today().year - nasc
dado['ctps'] = int(input('Carteira de trabalho (0 não tem): '))
if dado['ctps'] != 0:
dado['contratação'] = int(input('Ano de Contratação: '))
dado['salário'] = float(input('Salário: R$'))
dado['aposentadoria'] = dado['idade'] + ( (dado['contratação'] + 35) - datetime.today().year )
print(f'{"VALORES CALCULADOS":=^40}')
for k, v in dado.items():
print(f'- {k} tem o valor {v}')
print('')
print('-=-'*10)
print(cores['pretoebranco']+'______FIM_______'+cores['limpa'])
print(cores['pretoebranco']+'_Code by Rafael_'+cores['limpa'])
print('-=-'*10) |
#-*-coding:utf-8 -*-
from __future__ import with_statement
from urllib.request import urlretrieve
from selenium import webdriver
from contextlib import contextmanager
from contextlib import contextmanager
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import signal
import logging
import os
import time
import re
import json
class TimeoutException(Exception): pass
class Naver_DN(webdriver.Firefox, webdriver.Chrome, webdriver.Ie):
def __init__(self, browser, CNF_JSON):
'''
browser: browser Name : ie, Firefox, chrome
CNF_JSON_OBJ: site1['cafe_name']['menuID']['searchType']['searchKeyword']
'''
if browser.lower() == "ie":
webdriver.Ie.__init__(self)
elif browser.lower() == "chrome":
webdriver.Chrome.__init__(self)
elif browser.lower() == "phantomjs":
webdriver.PhantomJS.__init__(self)
else:
webdriver.Firefox.__init__(self)
self.implicitly_wait(5)
# self.logger = logger.getLogger('Naver dn logger')
self.base_URL = 'http://cafe.naver.com/'
self.cafe_name = CNF_JSON['cafe_name']
self.menuID = CNF_JSON['menuID']
self.searchType = CNF_JSON['searchType']
self.searchKeyword = CNF_JSON['searchKeyword']
self.menu_iframe = CNF_JSON['id_iframe']
self.log_ID = CNF_JSON['log_id']
self.log_pw = CNF_JSON['log_pw']
self._txt_box ="//div[@class='atch_file_area']/a[@class='atch_view m-tcol-c']"
self._txt_cafe_main = "//div[@class='cafe_main']"
self._txt_files = "//div[@id='attachLayer']/ul/li/span[@class='file_name']"
self._txt_dn_links = "//div[@id='attachLayer']/ul/li/div[@id='attahc']/a"
self._txt_title = ".//td/span/span[@class='aaa']"
self._txt_href = ".//td[@class='board-list']/span/span[@class='aaa']/a[@class='m-tcol-c']"
def __del__(self):
logging.warning(" CLASS OJBECT KILLED")
os.system('pkill -f firefox')
def __switch_to_iFrame__(self, _iframe_id):
self.switch_to_default_content()
try:
_iframe = self.find_element_by_xpath("//iframe[@id='cafe_main']")
except NoSuchElementException as ex:
print(ex.message)
self.switch_to_frame(_iframe)
def __make_folder__(self, _folder):
'''
make a folder and create IF NOT EXISTS
'''
try:
os.makedirs(_folder)
return True
except OSError:
return False
def goTomenu(self, _menuID):
'''
move to menu by clicking menuID
'''
try:
_menu = self.find_element_by_id(self.menuID)
except NoSuchElementException as ex:
print(ex.message)
_menu.click()
self.__switch_to_iFrame__(self.menu_iframe)
logging.debug('SUCCESS TO CHANGE IFRAME')
def search(self, _type, _kw):
'''
search keyword by
1: title+ content
2: title
3: id
4: content of comment
5: commentator
22
'''
try:
qs = self.find_element_by_xpath("//form[@name='frmSearch']/span[2]/input[1]")
qs.click()
for i in range(0,_type):
qs.send_keys(Keys.ARROW_DOWN)
qs.send_keys(Keys.ENTER)
qs.send_keys(Keys.ENTER)
# Search by keyword
# time.sleep(1)
qn = self.find_element_by_id('query')
qn.send_keys(self.searchKeyword)
qbtn = self.find_element_by_class_name('btn-search-green')
qbtn.click()
except NoSuchElementException as ex:
print(ex.message)
def log_in(self, _id, _pw):
self.find_element_by_xpath("//a[@id = 'gnb_login_button']").click()
time.sleep(3)
self.find_element_by_xpath("//input[@id='id']").send_keys(_id)
_a= self.find_element_by_xpath("//input[@id='pw']")
_a.send_keys(_pw)
_a.submit()
# self.find_element_by_class_name("btn").click()
time.sleep(2)
self.find_element_by_xpath("//span[@class='btn_cancel']/a").click()
# self.find_element_by_xpath("//div[@class='login_maintain']/a[2]").click()
time.sleep(2)
def __check_exists_by_xpath__(self,xpath):
try:
self.find_element_by_xpath(xpath)
except NoSuchElementException:
return False
return True
def __isExist_Next_page__(self):
'''
move to Next page in iFrmae(id='cafe_main') if next page is available
'''
_t = self.find_elements(By.XPATH, "//table[@class='Nnavi']/tbody/tr/td[@class='on']/following-sibling::td/a")
if len(_t):
return True
else:
return False
def _get_b_titles(self):
return [ re.sub('\[[0-9]*\]', '', i.text).strip() for i in self.find_elements(By.XPATH, ".//td/span/span[@class='aaa']")]
def _get_b_numbers(self):
return [i.text for i in self.find_elements(By.XPATH, "//form[@name='ArticleList']/table[@class='board-box']/tbody/tr[@align='center']/td[1]/span")]
def _get_b_hrefs(self, sw='list'):
'''
sw ='list': returns list of Href link addresses
sw= 'href': returns list Href clickalbe a addresses
'''
if sw.lower() == 'list':
return [i.get_attribute('href') for i in self.find_elements(By.XPATH, ".//td[@class='board-list']/span/span[@class='aaa']/a[@class='m-tcol-c']")]
else:
return [i for i in self.find_elements(By.XPATH, ".//td[@class='board-list']/span/span[@class='aaa']/a[@class='m-tcol-c']")]
def _get_download_links(self):
return [i.get_attribute('href') for i in self.find_elements(Bety.XPATH, "//div[@id='attachLayer']/ul/li/div[@id='attahc']/a[1]")]
def _goTo_nextPage(self):
self.find_element(By.XPATH, "//table[@class='Nnavi']/tbody/tr/td[@class='on']/following-sibling::td/a").click()
def _get_download_file_nameNlinks_(self):
'''
RETURN a list : [(file1, link1),(file2, link2), (file3,link3)...]
'''
_txt_dn_arrow = "//div[@class='atch_file_area']/a[@class='atch_view m-tcol-c']"
_txt_dn_box = "//div[@class='atch_file_area']/div[@id='attachLayer']"
_txt_cafe_main = "//div[@class='cafe_main']"
_txt_files = "//div[@id='attachLayer']/ul/li/span[@class='file_name']"
_txt_dn_links = "//div[@id='attachLayer']/ul/li/div[@id='attahc']/a"
logging.info("getting nameNlinks")
# self.refresh()
# self.__switch_to_iFrame__('cafe_main')
time.sleep(1)
while( self.__check_exists_by_xpath__(_txt_cafe_main) is True):
self.__switch_to_iFrame__('cafe_main')
logging.info("***** Switched to cafe_main")
self.__switch_to_iFrame__('cafe_main')
while(self.__check_exists_by_xpath__(_txt_dn_arrow) is False):
na.refresh()
while( self.__check_exists_by_xpath__(_txt_cafe_main) is True):
self.__switch_to_iFrame__('cafe_main')
logging.info("***** Switched to cafe_main")
print("dn_arrow Founded")
_dn_box = self.find_element(By.XPATH, _txt_dn_arrow)
_dn_box.click()
logging.info("***** dn_arrow_ Clicked")
#Check whether the option box is clicked and opened.
_dn_box = self.find_element(By.XPATH, _txt_dn_box)
while( _dn_box.is_displayed() is False):
logging.info("Box is not displayed")
self.find_element(By.XPATH, _txt_dn_arrow).click()
if self.find_element(By.XPATH, _txt_dn_arrow).is_displayed():
print("***** Box CLICKED")
_links_ = self.find_elements(By.XPATH, _txt_dn_links)
_files_ = self.find_elements(By.XPATH, _txt_files)
time.sleep(1)
_dn_links = [i.get_attribute('href') for i in _links_]
_dn_files = [i.text for i in _files_]
return list(zip(_dn_links, _dn_files))
def make_b_data_lst(self):
''' Return a list of number and Title of bulletin from search Result'''
_b_lst =[]
while True:
# time.sleep(2)
_lst = list(zip(self._get_b_numbers(), self._get_b_titles(), self._get_b_hrefs()))
_b_lst.extend(_lst)
_t = self.__isExist_Next_page__()
if _t:
print("{0}: {1}".format(_t, len(_b_lst)))
na._goTo_nextPage()
time.sleep(1)
else:
print("{0}: {1}".format(_t, len(_b_lst)))
break
return _b_lst
def download_files(self, _title = '', _lst='' ):
'''
Download All files from current Opened Download page
to CURRENT FOLDER
_lst : list of file and download links
'''
logging.info("start DOWNLOADFILE")
if _lst == '':
_lst = self. _get_download_file_nameNlinks_()
# _lst = (link, file_nmae)
print("{0}: founded".format(len(_lst)))
for a in _lst:
if self.__make_folder__('./' + _title):
logging.info("The folder created")
urlretrieve(a[0], './' + _title + '/' + a[1])
print("{0}: downloaded ".format(a[1]))
time.sleep(1)
def page_has_loaded(self):
print("Checking if {} page is loaded.".format(self.current_url))
page_state = self.execute_script('return document.readyState;')
return page_state == 'complete'
def page_has_loaded2(self):
print("Checking if {} page is loaded.".format(self.current_url))
try:
new_page = self.find_element_by_tag_name('html')
return new_page.id != old_page.id
except NoSuchElementException:
return False
@contextmanager
def wait_for_page_load3(self, timeout=3):
print("Waiting for page to load self.assertTrue() {}.".format(self.current_url))
old_page = self.find_element(By.TAG_NAME, 'html')
yield
WebDriverWait(self, timeout).until(staleness_of(old_page))
def read_json(f_name):
with open(f_name, 'r') as f: cnf_jsn = json.load(f)
return cnf_jsn
@contextmanager
def time_limit(seconds):
#import signal
def signal_handler(signum, frame):
raise TimeoutException
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
cnf_jsn = read_json('config.json')
na = Naver_DN('Firefox', cnf_jsn)
cafe_url = 'http://cafe.naver.com/violin79/'
na.get('https://nid.naver.com/nidlogin.login')
na.get(na.base_URL+ na.cafe_name)
na.log_in(na.log_ID, na.log_pw)
na.goTomenu(na.menuID)
time.sleep(2)
na.search(na.searchType, na.searchKeyword)
time.sleep(2)
# b_num_title : list of na.make_b_data_lst which returns the [{number, title, href}...]
b_num_title = [x for x in na.make_b_data_lst()]
time.sleep(1)
# for n,i in enumerate(b_num_title):
# number,title, addr = i
# print("go To bulletin body")
# print("========== {0} :{1}".format(n, i[1]))
# # na.get with the running time limitation
# try:
# with time_limit(5):
# time.sleep(2)
# na.get(cafe_url+i[0])
# except TimeoutException:
# logging.critical('Firefox crushed')
# print("{0} xxxxxxxxxxxxxxxxxxxxTimed out!xxxxxxxxxx".format(cafe_url+i[0]))
# na.__del__()
# # os.system('pkill -f firefox')
# time.sleep(2)
# na = Naver_DN('Firefox', cnf_jsn)
# na.get(cafe_url)
# na.log_pin(na.log_ID, na.log_pw)
# na.get(cafe_url+i[0])
# time.sleep(3)
# try:
# na.download_files(title)
# except:
# pass
# continue
# print("STARTS DOWNLOADFILE")
# try:
# na.download_files(title)
# except:
# continue
|
# Imports
import sys
import os
import argparse
import json
import random
import time
import datetime
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import BertTokenizer, DistilBertTokenizer, ElectraTokenizer, DPRConfig
from datasets import Dataset
# Index for models
model_index = {
'bert': ('bert-base-uncased', BertTokenizer),
'distilbert': ('distilbert-base-uncased', DistilBertTokenizer),
'electra': ('google/electra-small-discriminator', ElectraTokenizer),
'tinybert': ('huawei-noah/TinyBERT_General_4L_312D', BertTokenizer)
}
def preprocess_dataset(dataset_instance, tokenizer, content):
"""
Function for tokenizing the dataset, used with Huggingface map function.
Inputs:
dataset_instance - Instance from the Huggingface dataset
tokenizer - Tokenizer instance to use for tokenization
content - Attribute that contains the content of the instance
Outputs:
dict - Dict containing the new added columns
"""
# Tokenize the instance
tokenized = tokenizer(
dataset_instance[content],
return_tensors='pt',
)
token_ids = tokenized['input_ids'].squeeze()
# Get the length of the tokens
token_length = token_ids.size()[0]
# Return the new columns
return {
'length': token_length
}
def load_dataset(args, path):
"""
Function for loading the given dataset.
Inputs:
args - Namespace object from the argument parser
path - String representing the location of the .json file
Outputs:
questions_dataset - Dataset containing the questions
passage_dataset - Dataset containing all passages
"""
# Read the file
with open(path, 'rb') as f:
dataset = json.load(f)
questions = []
all_passages = []
# Get all instances from the dataset
for instance in dataset:
questions.append(instance['question'])
if args.dont_embed_title:
all_passages.append(instance['positive_ctxs'][0]['text'])
else:
all_passages.append(instance['positive_ctxs'][0]['title'] + ' ' + instance['positive_ctxs'][0]['text'])
for neg_context in (instance['hard_negative_ctxs'] + instance['negative_ctxs']):
if args.dont_embed_title:
all_passages.append(neg_context['text'])
else:
all_passages.append(neg_context['title'] + ' ' + neg_context['text'])
# Create a pandas DataFrame for the questions
question_df = pd.DataFrame(questions, columns=['question'])
# Create a pandas DataFrame from the passages and remove duplicates
passage_df = pd.DataFrame(all_passages, columns=['passage'])
passage_df.drop_duplicates(subset=['passage'])
# Create Huggingface Dataset from the dataframes
question_dataset = Dataset.from_pandas(question_df)
passage_dataset = Dataset.from_pandas(passage_df)
# Return the datasets
return question_dataset, passage_dataset
def perform_analysis(args, device):
"""
Function for performning data analysis.
Inputs:
args - Namespace object from the argument parser
device - PyTorch device to train on
"""
train_filename = 'nq-train.json'
dev_filename = 'nq-dev.json'
# Load the dataset
print('Loading data..')
train_questions, train_passages = load_dataset(args, args.data_dir + train_filename)
dev_question, dev_passages = load_dataset(args, args.data_dir + dev_filename)
print('Data loaded')
# Load the tokenizer
print('Loading tokenizer..')
model_location, tokenizer_class = model_index[args.model]
question_tokenizer = tokenizer_class.from_pretrained(model_location)
context_tokenizer = tokenizer_class.from_pretrained(model_location)
print('Tokenizer loaded')
# Encode the training data
print('Calculating length of training data..')
train_questions = train_questions.map(
lambda x: preprocess_dataset(
x,
tokenizer = question_tokenizer,
content = 'question',
),
batched=False,
)
train_questions_dataframe = train_questions.to_pandas()
train_passages = train_passages.map(
lambda x: preprocess_dataset(
x,
tokenizer = context_tokenizer,
content = 'passage',
),
batched=False,
)
train_passages_dataframe = train_passages.to_pandas()
print('Training data lengths calculated')
# Encode the dev data
print('Calculating length of dev data..')
dev_questions = dev_questions.map(
lambda x: preprocess_dataset(
x,
tokenizer = question_tokenizer,
content = 'question',
),
batched=False,
)
dev_questions_dataframe = dev_questions.to_pandas()
dev_passages = dev_passages.map(
lambda x: preprocess_dataset(
x,
tokenizer = context_tokenizer,
content = 'passage',
),
batched=False,
)
dev_passages_dataframe = dev_passages.to_pandas()
print('Dev data lengths calculated')
# Save all dataframes to csv files
train_questions_dataframe.to_csv("length_csv/"+str(args.model)+"train_questions_dataframe.csv",index=False)
train_passages_dataframe.to_csv("length_csv/"+str(args.model)+"train_passages_dataframe.csv",index=False)
dev_questions_dataframe.to_csv("length_csv/"+str(args.model)+"dev_questions_dataframe.csv",index=False)
dev_passages_dataframe.to_csv("length_csv/"+str(args.model)+"dev_passages_dataframe.csv",index=False)
def main(args):
"""
Function for handling the arguments and starting the data analysis.
Inputs:
args - Namespace object from the argument parser
"""
# Set a seed
torch.manual_seed(args.seed)
random.seed(args.seed)
# Check if GPU is available
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Print the given parameters
print('-----TRAINING PARAMETERS-----')
print('Device: {}'.format(device))
print('Model: {}'.format(args.model))
print('Embed title: {}'.format(not args.dont_embed_title))
print('Seed: {}'.format(args.seed))
print('-----------------------------')
# Start analysis
perform_analysis(args, device)
# Command line arguments parsing
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Model hyperparameters
parser.add_argument('--model', default='bert', type=str,
help='What model to use. Default is bert.',
choices=['bert', 'distilbert', 'electra', 'tinybert'])
# DPR hyperparameters
parser.add_argument('--dont_embed_title', action='store_true',
help='Do not embed titles. Titles are embedded by default.')
# Training hyperparameters
parser.add_argument('--data_dir', default='downloads/data/retriever/', type=str,
help='Directory where the data is stored. Default is downloads/data/retriever/.')
parser.add_argument('--seed', default=1234, type=int,
help='Seed to use during training. Default is 1234.')
# Parse the arguments
args = parser.parse_args()
# Perform data analysis
main(args)
|
from devMod import ExtraInfo
class Patient:
countx = 0
countn = 0
manifest = {}
dataset = ''
def __init__(self, patientID, count, sessionID, EEG):
self.count = count
self.id = patientID
self.fileName = None
self.fileNameList = []
self.info = {}
self.info["EEG"] = EEG
self.info["sessionID"] = sessionID
self.info["seizureCount"] = count
self.seizure = []
self.noSeizure = []
try:
self.info["age"] = ExtraInfo.info[Patient.dataset][self.id]['age']
self.info["gender"] = ExtraInfo.info[Patient.dataset][self.
id]['gender']
except Exception:
self.info["age"] = 'unknown'
self.info["gender"] = 'unknown'
def addSession(self, fileName, start, stop, seizureType, edf):
if fileName:
self.fileName = fileName
session = {}
if start:
Patient.countx += 1
session["fileName"] = self.fileName
session["edf"] = edf
session["seizure"] = {}
session["seizure"]["start"] = start
session["seizure"]["stop"] = stop
session["seizure"]["seizureType"] = seizureType
self.seizure.append(session)
else:
self.noSeizure.append(edf)
Patient.countn += 1
def addInfo(self, description):
try:
with open(description) as f:
content = f.read().split('HISTORY:')[-1].strip(' ')[:50]
Patient.manifest[self.id] = Patient.manifest.get(
self.id, set()).union([content])
except Exception:
pass
|
from openerp.osv import fields, osv
class voucher_inh(osv.osv):
_inherit="account.voucher"
_columns={
'deposit_id':fields.many2one('deposit','deposit_id')
}
|
from .blob import Blob, BYTE
from debug import validate
class Raw:
"""Represents a raw data packet (16 DWORDS).
This is only used during development.
Parameters
----------
byteStr : bytes
The raw data read from the USB.
"""
mode = None
operation = None
def __init__(self, mode: int = None, operation: int = None, index: int = None, flags: int = None,
data: bytes = None, padChar: bytes = b'\x00'):
self.padChar = padChar
self.padding = b''
if data is not None and len(data) >= 4:
if mode is None:
mode = data[0]
if operation is None:
operation = data[1]
if index is None:
index = data[2]
if flags is None:
flags = data[3]
assert mode is not None, "Expected 'mode' to be configured"
assert operation is not None, "Expected 'operation' to be configured"
assert index is not None, "Expected 'index' to be configured"
assert flags is not None, "Expected 'flags' to be configured"
self.mode = BYTE(mode)
self.operation = BYTE(operation)
self.index = BYTE(index)
self.flags = BYTE(flags)
if data is not None:
self.update(data)
def setMode(self, mode: int) -> None:
self.mode = BYTE(mode)
def readHeader(self, blob: Blob) -> None:
if self.mode is not None:
validate(blob.readByte(), self.mode, 'Mode missmatch - expected $1, got $0')
else:
self.mode = blob.readByte()
if self.operation is not None:
validate(blob.readByte(), self.operation,
'Operation missmatch - expected $1, got $0')
else:
self.operation = blob.readByte()
if self.index is not None:
validate(blob.readByte(), self.index, 'Index missmatch - expected $1, got $0')
else:
self.index = blob.readByte()
if self.flags is not None:
validate(blob.readByte(), self.flags, 'Flags missmatch - expected $1, got $0')
else:
self.flags = blob.readByte()
def encode(self, blob: Blob) -> None:
return
def decode(self, blob: Blob) -> None:
"""Reads the remaining 62 bytes of a 64 byte packet.
Derived classes should implement this to populate fields.
Parameters
----------
blob : Blob
Description of parameter `blob`.
Raises
-------
AssertionError
If trying to read more than 64 bytes.
"""
c = 0
while c < 15:
setattr(self, "dword%02d" % c, blob.readDword())
c += 1
def to_blob(self) -> Blob:
blob = Blob(debug=False, padChar=self.padChar)
blob.writeBytes(self.mode)
blob.writeBytes(self.operation)
blob.writeBytes(self.index)
blob.writeBytes(self.flags)
# derived classes should implement this
self.encode(blob)
if len(self.padding) > 0:
blob.writeBytes(self.padding)
return blob
def dump(self):
self.to_blob().dump()
def update(self, byteStr: bytes):
blob = Blob(byteStr, padChar=self.padChar)
try:
self.readHeader(blob)
self.decode(blob)
if blob.remain() > 0:
self.padding = blob.readBytes(blob.remain())
except AssertionError as error:
print(error)
blob.debug = False # suppress the __del__ error checking
blob.dump()
return ErrorResponse(byteStr)
return self
def displayName(self):
return 'Packet.Raw'
def __bytes__(self):
return bytes(self.to_blob())
def __getitem__(self, i):
return getattr(self, list(self.__dict__)[i])
def __len__(self):
return len(self.__dict__)
def __repr__(self):
# str = 'Packet.%s(' % self.__class__.__name__
str = '%s(' % self.displayName()
for field in self.__dict__:
if field in ['padChar', 'padding']:
continue
str += "\n %s: %s" % (field, getattr(self, field))
return str + "\n)"
class ErrorResponse(Raw):
def __init__(self, data: bytes):
super().__init__(data=data)
def displayName(self):
return 'Packet.ErrorResponse'
def decode(self, blob: Blob) -> None:
self.prev_mode = blob.readByte()
self.prev_operation = blob.readByte()
self.prev_index = blob.readByte()
self.prev_flags = blob.readByte()
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
# In[ ]:
get_ipython().magic(u'matplotlib inline')
get_ipython().magic(u"config InlineBackend.figure_format = 'retina'")
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import pandas as pd
from pprint import pprint
import seaborn as sns
import itertools
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# In[ ]:
def easy_drop(tdf: pd.DataFrame, *columns):
if len(columns) == 1 and (isinstance(columns[0], (list, tuple))):
columns = columns[0]
for col in columns:
if col in tdf.columns:
tdf.drop(col, axis=1, inplace=True)
return tdf
def as_categorical(tdf, *columns):
if len(columns) == 1 and (isinstance(columns[0], (list, tuple))):
columns = columns[0]
for col in columns:
if col in tdf.columns:
tdf[col] = tdf[col].astype('category')
def load(filepath):
tdf = pd.read_csv(filepath)
tdf.columns = tdf.columns.str.lower()
return tdf.set_index('passengerid', verify_integrity=True)
# In[ ]:
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# In[ ]:
raw = load('../input/train.csv')
test_df = load('../input/test.csv')
full_df = pd.concat([raw, test_df])
# In[ ]:
ldf = raw.sort_values(['survived', 'age']).reset_index(drop=1)
xline = ldf[ldf.survived == 0].survived.count()
ldf.age.plot()
plt.axvline(xline, c='g');
# In[ ]:
raw.head()
# In[ ]:
sns.distplot(full_df.age.fillna(0));
# In[ ]:
full_df['title'] = full_df.name.str.extract(r'^[^,]+, ([\S]*) .*$', expand=False).astype('category')
full_df.groupby('title').survived.mean().to_frame()
# # Base model
# In[ ]:
allways_yes = raw.survived.sum() / raw.survived.count()
print('Base accuracy: {:.2f}'.format(max(allways_yes, 1-allways_yes)))
# # Feature engineering
# In[ ]:
full_df['lastname'] = full_df.name.str.extract("^([a-zA-Z \-']+),", expand=False)
full_df['relatives_survived'] = full_df.groupby('lastname', as_index=False) .survived.transform('sum').subtract(full_df.survived, axis=0)
# In[ ]:
def generate_features(tdf: pd.DataFrame):
features = []
features.append('pclass')
features.append('sibsp')
features.append('parch')
features.append('age')
features.append('fare')
# features.append('survived')
tdf = tdf.copy()
tdf['age'] = tdf.age.fillna(tdf.age.median())
tdf['fare'] = tdf.fare.fillna(tdf.age.median())
tdf['third_class'] = tdf.pclass == 3
features.append('third_class')
tdf['male'] = (tdf.sex == 'male')
features.append('male')
tdf['cabin_letter_cat'] = tdf.cabin.str[0].astype('category').cat.codes
features.append('cabin_letter_cat')
tdf['embarked_code'] = tdf.embarked.astype('category').cat.codes
features.append('embarked_code')
tdf['rounded_fare'] = tdf.fare.fillna(tdf.fare.mean()).round(decimals=-1).astype(np.int32)
features.append('rounded_fare')
tdf['fare_log'] = np.log(tdf.fare.fillna(tdf.fare.median())+0.1)
features.append('fare_log')
tdf['age_f'] = tdf.age.fillna(tdf.age.mean())
features.append('age_f')
tdf['age_cat'] = pd.cut(tdf.age, np.arange(0, raw.age.max()+1, 5)).cat.codes
features.append('age_cat')
tdf['words_in_name'] = tdf.name.str.split().apply(lambda x: len(x))
features.append('words_in_name')
tdf['lastname'] = tdf.name.str.extract("^([a-zA-Z \-']+),", expand=False)
tdf['relatives_survived'] = tdf.groupby('lastname', as_index=False) .survived.transform('sum').subtract(full_df.survived.fillna(0), axis=0)
features.append('relatives_survived')
tdf['title_cat'] = tdf.name.str.extract(r'^[^,]+, ([\S]*) .*$', expand=False).astype('category').cat.codes
features.append('title_cat')
return tdf[features].copy()
# # Training
# In[ ]:
test_df = load('../input/test.csv')
full_df = pd.concat([raw, test_df])
X = generate_features(full_df).loc[raw.index, :]
y = raw.survived
# scaler = StandardScaler()
# X = scaler.fit_transform(X)
model = RandomForestClassifier(max_depth=10,
n_estimators=50,
max_features='auto',
criterion='gini',
random_state=42, n_jobs=-1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3, random_state=42, stratify=y)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print('Mean accuracy: {:.4f}'.format(model.score(X_test, y_test)))
print(classification_report(y_test, y_pred, target_names=['died', 'survived']))
plot_confusion_matrix(confusion_matrix(y_test, y_pred), ['died', 'survived'])
model.fit(X_test, y_test);
# In[ ]:
max_len = X.columns.str.len().max()
for imp, f in sorted(zip(model.feature_importances_, X.columns)): print('{:{len}}: {:.3f}'.format(f, imp, len=max_len))
importances = model.feature_importances_
indices = np.argsort(importances)[::-1]
std = np.std([tree.feature_importances_ for tree in model.estimators_], axis=0)
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices], color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), X.columns[indices], rotation=90)
plt.xlim([-1, X.shape[1]]);
|
from django.core.validators import ValidationError
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from rest_framework import serializers
from formshare.libs.models.share_xform import ShareXForm
from formshare.libs.permissions import ROLES
from formshare.libs.serializers.fields.xform_field import XFormField
class ShareXFormSerializer(serializers.Serializer):
xform = XFormField()
username = serializers.CharField(max_length=255)
role = serializers.CharField(max_length=50)
def restore_object(self, attrs, instance=None):
if instance is not None:
instance.xform = attrs.get('xform', instance.xform)
instance.username = attrs.get('username', instance.username)
instance.role = attrs.get('role', instance.role)
return instance
return ShareXForm(**attrs)
def validate_username(self, attrs, source):
"""Check that the username exists"""
value = attrs[source]
try:
User.objects.get(username=value)
except User.DoesNotExist:
raise ValidationError(_(u"User '%(value)s' does not exist."
% {"value": value}))
return attrs
def validate_role(self, attrs, source):
"""check that the role exists"""
value = attrs[source]
if value not in ROLES:
raise ValidationError(_(u"Unknown role '%(role)s'."
% {"role": value}))
return attrs
|
from turtle import Turtle
class Ball(Turtle):
"""
A class used to represent a Ball.
...
Attributes
----------
x_offset: int
the amount to move the ball in
the x direction.
y_offset: int
the amount to move the ball in
the y direction
Methods
-------
move()
moves the ball and
changes direction of the ball
if it collides with top or bottom
walls.
bounce()
changes the x direction of the ball
reset_ball()
moves the ball back to the starting
position
"""
def __init__(self):
super().__init__()
self.shape('circle')
self.color('white')
self.penup()
self.speed('fastest')
self.shapesize(stretch_wid=1, stretch_len=1)
self.x_offset = 10
self.y_offset = 10
def move(self):
"""moves the ball and
changes direction of the ball
if it collides with top or bottom
walls.
"""
# Also detects collision with the top or bottom wall
if self.ycor() >= 280 or self.ycor() <= -280:
self.y_offset = -self.y_offset
new_x = self.xcor() + self.x_offset
new_y = self.ycor() + self.y_offset
self.goto(x=new_x, y=new_y)
def bounce(self):
"""changes the x direction of the ball
"""
self.x_offset *= -1
def reset_ball(self):
"""moves the ball back to the starting
position"""
self.goto(x=0, y=0)
self.x_offset *= -1
|
from http import HTTPStatus
from fastapi import HTTPException, Request
from slack_sdk.signature import SignatureVerifier
from app.settings import settings
async def verify_signature(request: Request) -> bool:
# リクエストの署名を検証
# ref: https://api.slack.com/authentication/verifying-requests-from-slack
verifier = SignatureVerifier(settings.SLACK_SIGNING_SECRET)
if verifier.is_valid_request(await request.body(), dict(request.headers)):
return True
raise HTTPException(HTTPStatus.FORBIDDEN)
|
my_dict = {}
# touple = ()
# list = []
print(my_dict)
user = {'name': 'xyz', 'age': 20, 2: 'xy'}
print(user)
my_dict = dict({1: 'apple', 2: 'ball'})
print(my_dict)
print(user.get('name'))
print(len(user))
person = {
'first_name': 'Asabeneh',
'last_name': 'Yetayeh',
'age': 250,
'country': 'Finland',
'is_marred': True,
'skills': ['JavaScript', 'React', 'Node', 'MongoDB', 'Python'],
'address': {
'street': 'Space street',
'zipcode': '02210'
}
}
print(len(person))
print(person['first_name'])
print(person['skills'][2])
print(person['address']['street'])
person = {
'first_name': 'Asabeneh',
'last_name': 'Yetayeh',
'age': 250,
'country': 'Finland',
'is_marred': True,
'skills': ['JavaScript', 'React', 'Node', 'MongoDB', 'Python'],
'address': {
'street': 'Space street',
'zipcode': '02210'
}
}
person['job_title'] = 'Instructor'
person['skills'].append('HTML')
print(person)
fruit = {"orange": "a sweet, orange, citrus fruit",
"apple": "good for making cider",
"lemon": "a sour, yellow, citrus fruit",
"grape": "a small, sweet fruit growing in bunches",
"lime": "a sour, green citrus fruit"
}
fruit['lemon'] = "great with tequila"
# while True:
# dict_key = input("Please enter a fruit: ")
# if(dict_key == 'quit'):
# break
# description = fruit.get(dict_key)
# print(description)
print(fruit.values())
print(fruit.items())
keys = {'a', 'e', 'i', 'o', 'u'} # set
vowels = dict.fromkeys(keys)
print(vowels)
keys = {'a', 'e', 'i', 'o', 'u'}
vowels = dict.fromkeys(keys, 'xyz')
print(vowels)
keys = {'a', 'e', 'i', 'o', 'u'}
vowels = dict.fromkeys(keys, ['English', 'Hindi'])
print(vowels)
vowels = dict.fromkeys(['English', 'Hindi'], 'xyz')
print(vowels)
fruit = {"orange": "a sweet, orange, citrus fruit",
"apple": "good for making cider",
"lemon": "a sour, yellow, citrus fruit",
"grape": "a small, sweet fruit growing in bunches",
"lime": "a sour, green citrus fruit"
}
print('Loop'.center(50, '*'))
print(fruit.items())
for snack in fruit.items():
item, description = snack
print(item + ' is ' + description)
|
# coding=utf-8
class DictionaryBuildError(Exception):
pass
class WordBuildError(Exception):
pass
class MetadataError(Exception):
pass
class WordComponentsError(Exception):
pass
class NotOTMJson(Exception):
...
|
# encoding: utf-8
"""
Created on 2014.05.26
@author: Allen
"""
import os
import sys
import shutil
from collections import OrderedDict
from importlib import import_module
from startpro.core.topcmd import TopCommand
from startpro.core.settings import MAIN_PATH, TEMPLATE_PACKAGE
from startpro.core import settings
from startpro.core.utils.opts import load_script_temp
options = OrderedDict()
options['-name'] = "main package name",
options['-i'] = "package functions include(if more than one, split by comma)"
options['-e'] = "package functions exclude(if more than one, split by comma)"
# fix to hooks collect
options['-add-data'] = "py-installer add-data string [such:'SRC:DEST,SRC:DEST']"
SPEC_CONTENT = '''# -*- mode: python -*-
block_cipher = None
a = Analysis(['#PY_NAME#'],
pathex=['#PATHEX#'],
binaries=[],
datas=#DATA_FILE#,
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher
)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
name='#PKG_NAME#',
debug=False,
strip=None,
upx=True,
console=True )
'''
class Command(TopCommand):
def __init__(self):
"""
Constructor
"""
def run(self, **kwargs):
try:
mod = import_module(MAIN_PATH)
src = mod.__path__[0]
path = os.path.join(src, TEMPLATE_PACKAGE)
name = kwargs.get('name', None)
if not name:
if settings.CONFIG:
name = settings.CONFIG.get_config('package', 'name') # @UndefinedVariable
if not name:
return
print('[INFO]:package name:{0}'.format(name))
py_name = "%s.py" % name
path_ex = os.getcwd()
# main PY
dst = os.path.join(path_ex, py_name)
shutil.copyfile(path, dst)
patterns = []
include_regex = [(r, True) for r in filter(lambda x: x, kwargs.get('i', '').split(','))]
exclude_regex = [(r, False) for r in filter(lambda x: x, kwargs.get('e', '').split(','))]
patterns.extend(include_regex)
patterns.extend(exclude_regex)
if patterns:
load_paths = []
scripts = load_script_temp()
for k, v in scripts.items():
for r, b in patterns:
p = v.get('path')
n = v.get('name')
if r in p or r in n:
if b:
load_paths.append(p)
break
else:
break
else:
load_paths = kwargs['load_paths']
for r in load_paths:
print('[INFO]:include:[%s]' % r)
self.update(dst, ["import %s" % r for r in load_paths])
# configure
cfg = os.path.join(path_ex, settings.MAIN_CONFIG)
settings.CONFIG.set_config("package", "load", str(kwargs.get('paths', '')))
print('[INFO]:package set load:{0}'.format(kwargs.get('paths')))
# py installer
pkg_name = name
data_file = [
(os.path.join(src, 'VERSION'), 'startpro'),
(cfg, 'startpro'),
(path, 'startpro/template/package.py')
]
# update extend hooks
more_file = kwargs.get('add-data', '').strip()
# parse add-file
if more_file:
for r in more_file.split(','):
r = r.split(':')
data_file.append((r[0], r[1]))
global SPEC_CONTENT
SPEC_CONTENT = SPEC_CONTENT.replace("#PY_NAME#", py_name)
SPEC_CONTENT = SPEC_CONTENT.replace("#PATHEX#", path_ex)
SPEC_CONTENT = SPEC_CONTENT.replace("#DATA_FILE#", str(data_file))
SPEC_CONTENT = SPEC_CONTENT.replace("#PKG_NAME#", pkg_name)
spec = dst.replace(".py", ".spec")
with open(spec, 'w') as f:
f.write(SPEC_CONTENT)
f.flush()
os.system("pyinstaller -F {}".format(spec))
settings.CONFIG.remove_option("package", "load")
print('[INFO]:package clean load')
# os.remove(spec)
print("[INFO]:package:[%s]" % os.path.join(os.path.join(path_ex, "dist"), pkg_name))
except Exception:
s = sys.exc_info()
print('[ERROR]:pkg %s on line %d.' % (s[1], s[2].tb_lineno))
@staticmethod
def update(main_py, res):
lines = []
start = False
end = False
with open(main_py, 'r') as f:
for line in f:
if line.startswith("# [LOAD MODULE END]"):
end = True
if not start:
lines.append(line)
else:
if end:
lines.append(line)
if line.startswith("# [LOAD MODULE START]"):
start = True
lines.append("%s\n" % "\n".join(res))
with open(main_py, 'w') as f:
f.writelines("".join(lines))
f.flush()
def help(self, **kwargvs):
print('')
print("Available options:")
for name, desc in sorted(options.items()):
print(" %-13s %s" % (name, desc))
|
"""Массив размером 2m + 1, где m – натуральное число, заполнен случайным образом. Найдите в массиве медиану. Медианой
называется элемент ряда, делящий его на две равные части: в одной находятся элементы, которые не меньше медианы,
в другой – не больше медианы. Задачу можно решить без сортировки исходного массива. Но если это слишком сложно,
то используйте метод сортировки, который не рассматривался на уроках """
from random import randint
m = int(input('(вариант 1) Введите число для задания размера массива: '))
massive = [randint(0, 100) for j in range(2 * m + 1)]
print(f'исходный массив: \n {massive} ')
for i in range(len(massive)):
maxi = 0
mini = 0
check = massive[0]
massive.pop(0)
for el in massive:
if maxi > m or mini > m: # Если число больше или меньше половины чисел + 1
continue # Это не наше число, прерываем дальнейшее сравнение
if check >= el: # Число будет медианой, если оно больше и меньше
maxi += 1 # Одинакового кол-ва чисел в массиве
elif check <= el: # Если на данной итерации число равно числу в массиве
mini += 1 # Мы прибавим по единице и в maxi и в mini
massive.append(check) # Если достигли такого условия - выходим из программы, медиана найдена
if maxi == mini:
print(f'{check} является медианой')
break
"""---------Вариант 2---------"""
m = int(input('(вариант 2) Введите число для задания размера массива: '))
massive = [randint(0, 100) for j in range(2 * m + 1)]
print(f'исходный массив: \n {massive} ')
for i in range(m): # Медиана должна быть меньше или равна половине чисел массива
massive.remove(max(massive)) # Поэтому отбросим ровно столько максимумов из массива.
print(f'{max(massive)} является медианой') # Следующим максимумом будет медиана.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 11 10:45:12 2021
@author: sanjeevahuja
making change
"""
import sqlite3
import collections
keys = []
newlist= []
def iterate_over(data):
for row in data:
for key,value in row.items():
if key not in keys:
keys.append(key)
if type(value)==list:
if type(value[0])== dict:
iterate_over(value)
def transform_data(data):
newDict = {}
newlist = []
for row in data:
for key,value in row.items():
if type(value)!=list:
newDict[key] = value
for key,value in row.items():
if type(value)==list:
for newDict2 in value:
newDictTemp = {}
for keyNew,valueNew in newDict2.items():
newDictTemp[keyNew] = valueNew
newlist.append( {**newDict, **newDictTemp})
if len(newlist)==0:
newlist.append({**newDict})
return newlist
class fetch_db(object):
"""
This class is to define common functions for db processing
"""
def __init__(self, table_name, data):
"""
The constructor for db class
"""
self.db = sqlite3.connect(table_name)
self.cursor = self.db.cursor()
iterate_over(data)
print(keys)
def drop_table (self, table_name):
"""
The create table is used saving the data
"""
try:
# Find all keys
# Print table definition
drop_table = """Drop TABLE {0}""".format(table_name)
self.cursor.execute(drop_table)
except Exception as E:
print('Error :', E)
else:
print('{0} table dropped'.format(table_name))
def create_table (self, data, table_name):
"""
The create table is used saving the data
"""
try:
# Print table definition
create_table = """CREATE TABLE {0}(
{1}
);""".format(table_name,",\n ".join(map(lambda key: "{0} VARCHAR".format(key), keys)))
self.cursor.execute(create_table)
print(create_table)
except Exception as E:
print('Error :', E)
else:
print('table created')
def save_data(self, data, table_name):
"""
The create table is used saving the data
"""
try:
data = transform_data(data)
for row in data:
insert_query = """INSERT INTO {0} VALUES({1});""".format(table_name,
",".join(map(lambda key: '"{0}"'.format(row[key]) if key in row else "NULL", keys)))
print(insert_query)
self.cursor.execute(insert_query)
except Exception as E:
print('Error :', E)
else:
self.db.commit()
print('data inserted')
def print_data(self, table_name):
"""
The create table is used printing the table
"""
try:
cursor = self.db.cursor()
print("Fetching all the records from {0} table".format(table_name))
cursor.execute("""SELECT * FROM {0}""".format(table_name))
except Exception as E:
print ('Error: ', E)
else:
for row in cursor.fetchall():
print (row)
self.db.close()
|
#!/usr/bin/env python
import os
import sys
CURRENT_PATH = os.path.dirname(__file__)
PARENT_PATH = os.path.dirname(CURRENT_PATH)
sys.path.append(CURRENT_PATH)
sys.path.append(PARENT_PATH)
import argparse
from pyspecs import _idle
import logging
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Pyspecs test runner')
parser.add_argument('path', nargs='?', default=os.getcwd(),
help='Directory to be processed')
parser.add_argument('-w', '--watch', action='store_true', default=False,
help='watch files and run tests under any change')
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='Switch verbose mode on')
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.getLogger().handlers[0].formatter = formatter
if args.watch:
_idle.watch(args.path)
else:
_idle.run(args.path)
|
import torch
import torch.nn as nn
import torch.optim as optim
import os
import numpy as np
from sklearn.metrics import accuracy_score
import sampler
class Solver:
def __init__(self, args, test_dataloader):
self.args = args
self.test_dataloader = test_dataloader
# print(len(self.test_dataloader))
# exit(len(self.test_dataloader))
self.bce_loss = nn.BCELoss()
self.mse_loss = nn.MSELoss()
self.ce_loss = nn.CrossEntropyLoss()
self.sampler = sampler.AdversarySampler(self.args.budget)
def read_data(self, dataloader, labels=True):
if labels:
while True:
for img, label, _ in dataloader:
yield img, label
else:
while True:
for img, _, _ in dataloader: #this INFINITELY yields images, interestingly! (oh it just looks around, when it sees everyting!)
yield img
'''
Now, we will simply run a new task model at the very end, that takes the EMBEDDINGS as inputs.
And check the performance.
We can also: train this jointly. But that can be investigated later
'''
def train(self, querry_dataloader, task_model, vae, discriminator, unlabeled_dataloader):
from tqdm import tqdm
labeled_data = self.read_data(querry_dataloader)
unlabeled_data = self.read_data(unlabeled_dataloader, labels=False)
optim_vae = optim.Adam(vae.parameters(), lr=5e-4)
optim_task_model = optim.Adam(task_model.parameters(), lr=5e-4)
optim_discriminator = optim.Adam(discriminator.parameters(), lr=5e-4)
vae.train()
discriminator.train()
task_model.train()
if self.args.cuda:
vae = vae.cuda()
discriminator = discriminator.cuda()
task_model = task_model.cuda()
change_lr_iter = self.args.train_iterations // 25
for iter_count in tqdm(range(self.args.train_iterations)):
if iter_count is not 0 and iter_count % change_lr_iter == 0:
for param in optim_vae.param_groups:
param['lr'] = param['lr'] * 0.9
for param in optim_task_model.param_groups:
param['lr'] = param['lr'] * 0.9
for param in optim_discriminator.param_groups:
param['lr'] = param['lr'] * 0.9
labeled_imgs, labels = next(labeled_data)
unlabeled_imgs = next(unlabeled_data)
if self.args.cuda:
labeled_imgs = labeled_imgs.cuda()
unlabeled_imgs = unlabeled_imgs.cuda()
labels = labels.cuda()
# task_model step
# preds = task_model(labeled_imgs)
# task_loss = self.ce_loss(preds, labels)
# optim_task_model.zero_grad()
# task_loss.backward()
# optim_task_model.step()
# VAE step
for count in (range(self.args.num_vae_steps)):
recon, z, mu, logvar = vae(labeled_imgs)
unsup_loss = self.vae_loss(labeled_imgs, recon, mu, logvar, self.args.beta)
unlab_recon, unlab_z, unlab_mu, unlab_logvar = vae(unlabeled_imgs)
transductive_loss = self.vae_loss(unlabeled_imgs,
unlab_recon, unlab_mu, unlab_logvar, self.args.beta)
labeled_preds = discriminator(mu)
unlabeled_preds = discriminator(unlab_mu)
lab_real_preds = torch.ones(labeled_imgs.size(0))
unlab_real_preds = torch.ones(unlabeled_imgs.size(0))
if self.args.cuda:
lab_real_preds = lab_real_preds.cuda()
unlab_real_preds = unlab_real_preds.cuda()
dsc_loss = self.bce_loss(labeled_preds, lab_real_preds) + \
self.bce_loss(unlabeled_preds, unlab_real_preds)
total_vae_loss = unsup_loss + transductive_loss + self.args.adversary_param * dsc_loss
optim_vae.zero_grad()
total_vae_loss.backward()
optim_vae.step()
# sample new batch if needed to train the adversarial network
if count < (self.args.num_vae_steps - 1):
labeled_imgs, _ = next(labeled_data)
unlabeled_imgs = next(unlabeled_data)
if self.args.cuda:
labeled_imgs = labeled_imgs.cuda()
unlabeled_imgs = unlabeled_imgs.cuda()
labels = labels.cuda()
# Discriminator step
for count in (range(self.args.num_adv_steps)):
with torch.no_grad():
_, _, mu, _ = vae(labeled_imgs)
_, _, unlab_mu, _ = vae(unlabeled_imgs)
labeled_preds = discriminator(mu)
unlabeled_preds = discriminator(unlab_mu)
lab_real_preds = torch.ones(labeled_imgs.size(0))
unlab_fake_preds = torch.zeros(unlabeled_imgs.size(0))
if self.args.cuda:
lab_real_preds = lab_real_preds.cuda()
unlab_fake_preds = unlab_fake_preds.cuda()
dsc_loss = self.bce_loss(labeled_preds, lab_real_preds) + \
self.bce_loss(unlabeled_preds, unlab_fake_preds)
optim_discriminator.zero_grad()
dsc_loss.backward()
optim_discriminator.step()
# sample new batch if needed to train the adversarial network
if count < (self.args.num_adv_steps - 1):
labeled_imgs, _ = next(labeled_data)
unlabeled_imgs = next(unlabeled_data)
if self.args.cuda:
labeled_imgs = labeled_imgs.cuda()
unlabeled_imgs = unlabeled_imgs.cuda()
labels = labels.cuda()
if iter_count % 1000 == 0:
print('Current training iteration: {}'.format(iter_count))
# print('Current task model loss: {:.4f}'.format(task_loss.item()))
print('Current vae model loss: {:.4f}'.format(total_vae_loss.item()))
print('Current discriminator model loss: {:.4f}'.format(dsc_loss.item()))
# We need to generate the embeddings, and the dataset to do iteration over
labeled_data = self.read_data(querry_dataloader)
# for i, labeled_data_batch in enumerate(labeled_data ): # just need to encode these guys
# labeled_imgs, labels = labeled_data_batch
# recon, z, mu, logvar = vae(labeled_imgs)
# we can easily just do the inference here, and then keep doing this in turn
# train the task model on the embeddings (of the labelled data)
# also need to run for several epochs.
# print(len(querry_dataloader))
# print(len(labeled_data))
NUM_EPOCHS = 25
from tqdm import tqdm
total_task_loss = 0
total_examples = 0
# for iter_count in tqdm(range(self.args.train_iterations)):
for epoch in range(NUM_EPOCHS):
for labeled_data in tqdm(querry_dataloader):
# labeled_imgs, labels = next(labeled_data)
labeled_imgs, labels,sample_idx = labeled_data
# print(sample_idx)
# print(sample_idx.shape)
if self.args.cuda:
labeled_imgs = labeled_imgs.cuda()
labels = labels.cuda()
recon, z, mu, logvar = vae(labeled_imgs)
# now, we just need to train a classifier on these datapoints; also need to associate the labels then
# compute loss
X = torch.cat((mu, logvar),1) #assuming batch size first, ambient space dimension second
y = labels
total_examples += len(X)
preds = task_model(X)
task_loss = self.ce_loss(preds, labels)
total_task_loss += task_loss.item()
optim_task_model.zero_grad()
task_loss.backward()
optim_task_model.step()
# if iter_count %100:
# print("Loss on iter_count {} is {}".format(100*iter_count, total_task_loss/len(total_examples )))
# if iter_count %100:
# print("Loss on epoch {} is {}".format(epoch, total_task_loss/total_examples ))
final_accuracy = self.test_via_embedding(task_model, vae)
return final_accuracy, vae, discriminator
def sample_for_labeling(self, vae, discriminator, unlabeled_dataloader):
querry_indices = self.sampler.sample(vae,
discriminator,
unlabeled_dataloader,
self.args.cuda)
return querry_indices
def test_via_embedding(self, task_model, vae):
task_model.eval()
vae.eval()
total, correct = 0, 0
for imgs, labels in self.test_dataloader:
if self.args.cuda:
imgs = imgs.cuda()
with torch.no_grad():
# print("calling the test func")
# print(imgs.shape)
recon, z, mu, logvar = vae(imgs)
X = torch.cat((mu, logvar), 1) # assuming batch size first, ambient space dimension second
y = labels
preds = task_model(X)
preds = torch.argmax(preds, dim=1).cpu().numpy()
correct += accuracy_score(labels, preds, normalize=False)
total += imgs.size(0)
# print(total)
return correct / total * 100
def test(self, task_model):
task_model.eval()
total, correct = 0, 0
for imgs, labels in self.test_dataloader:
if self.args.cuda:
imgs = imgs.cuda()
with torch.no_grad():
# print("calling the test func")
# print(imgs.shape)
preds = task_model(imgs)
preds = torch.argmax(preds, dim=1).cpu().numpy()
correct += accuracy_score(labels, preds, normalize=False)
total += imgs.size(0)
return correct / total * 100
def vae_loss(self, x, recon, mu, logvar, beta):
MSE = self.mse_loss(recon, x)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
KLD = KLD * beta
return MSE + KLD
|
"""
The split() method splits a string into a list.
>>> string.split(separator, maxsplit)
#parameter optional separator ='space" by default
"""
text = 'a,b,c'
text = text.split(',')
print(text)
text2 = '1234'
list1 = []
for i in text2:
list1.append(int(i))
print(list1)
#Split the string into a list with max 2 items:
txt = "apple#banana#cherry#orange"
# setting the maxsplit parameter to 1, will return a list with 2 elements!
x = txt.split("#", 1)
print(x) |
'''
Unit tests for oc serviceaccount
'''
import os
import sys
import unittest
import mock
import yaml
# Removing invalid variable names for tests so that I can
# keep them brief
# pylint: disable=invalid-name,no-name-in-module
# Disable import-error b/c our libraries aren't loaded in jenkins
# pylint: disable=import-error
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
from oc_storageclass import OCStorageClass # noqa: E402
class OCStorageClassTest(unittest.TestCase):
'''
Test class for OCStorageClass
'''
@mock.patch('oc_storageclass.Utils.create_tmpfile')
@mock.patch('oc_storageclass.locate_oc_binary')
@mock.patch('oc_storageclass.Utils.create_tmpfile_copy')
@mock.patch('oc_storageclass.OCStorageClass._run')
def test_adding_a_storageclass_without_qualification(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary, mock_tmpfile_create):
''' Testing adding a storageclass '''
# Arrange
# run_ansible input parameters
params = {
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'state': 'present',
'debug': False,
'name': 'testsc',
'provisioner': 'aws-ebs',
'annotations': {'storageclass.beta.kubernetes.io/is-default-class': "true"},
'parameters': {'type': 'gp2'},
'api_version': 'v1',
'default_storage_class': 'true'
}
valid_result_json = '''{
"kind": "StorageClass",
"apiVersion": "v1",
"metadata": {
"name": "testsc",
"selfLink": "/apis/storage.k8s.io/v1/storageclasses/gp2",
"uid": "4d8320c9-e66f-11e6-8edc-0eece8f2ce22",
"resourceVersion": "2828",
"creationTimestamp": "2017-01-29T22:07:19Z",
"annotations": {"storageclass.beta.kubernetes.io/is-default-class": "true"}
},
"provisioner": "kubernetes.io/aws-ebs",
"parameters": {"type": "gp2"}
}'''
# Return values of our mocked function call. These get returned once per call.
mock_cmd.side_effect = [
# First call to mock
(1, '', 'Error from server: storageclass "testsc" not found'),
# Second call to mock
(0, 'storageclass "testsc" created', ''),
# Third call to mock
(0, valid_result_json, ''),
]
mock_oc_binary.side_effect = [
'oc'
]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
generated_yaml_spec_file = '/tmp/spec_output_yaml'
mock_tmpfile_create.side_effect = [
generated_yaml_spec_file,
]
# Act
results = OCStorageClass.run_ansible(params, False)
with open(generated_yaml_spec_file) as json_data:
generated_spec = yaml.load(json_data)
# Assert
self.assertTrue(generated_spec['provisioner'], 'kubernetes.io/aws-ebs')
self.assertTrue(results['changed'])
self.assertEqual(results['results']['returncode'], 0)
self.assertEqual(results['state'], 'present')
# Making sure our mock was called as we expected
mock_cmd.assert_has_calls([
mock.call(['oc', 'get', 'storageclass', 'testsc', '-o', 'json'], None),
mock.call(['oc', 'create', '-f', mock.ANY], None),
mock.call(['oc', 'get', 'storageclass', 'testsc', '-o', 'json'], None),
])
@mock.patch('oc_storageclass.Utils.create_tmpfile')
@mock.patch('oc_storageclass.locate_oc_binary')
@mock.patch('oc_storageclass.Utils.create_tmpfile_copy')
@mock.patch('oc_storageclass.OCStorageClass._run')
def test_adding_a_storageclass_with_qualification(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary, mock_tmpfile_create):
''' Testing adding a storageclass '''
# Arrange
# run_ansible input parameters
params = {
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'state': 'present',
'debug': False,
'name': 'testsc',
'provisioner': 'kubernetes.io/aws-ebs',
'annotations': {'storageclass.beta.kubernetes.io/is-default-class': "true"},
'parameters': {'type': 'gp2'},
'api_version': 'v1',
'default_storage_class': 'true'
}
valid_result_json = '''{
"kind": "StorageClass",
"apiVersion": "v1",
"metadata": {
"name": "testsc",
"selfLink": "/apis/storage.k8s.io/v1/storageclasses/gp2",
"uid": "4d8320c9-e66f-11e6-8edc-0eece8f2ce22",
"resourceVersion": "2828",
"creationTimestamp": "2017-01-29T22:07:19Z",
"annotations": {"storageclass.beta.kubernetes.io/is-default-class": "true"}
},
"provisioner": "kubernetes.io/aws-ebs",
"parameters": {"type": "gp2"}
}'''
# Return values of our mocked function call. These get returned once per call.
mock_cmd.side_effect = [
# First call to mock
(1, '', 'Error from server: storageclass "testsc" not found'),
# Second call to mock
(0, 'storageclass "testsc" created', ''),
# Third call to mock
(0, valid_result_json, ''),
]
mock_oc_binary.side_effect = [
'oc'
]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
generated_yaml_spec_file = '/tmp/spec_output_yaml'
mock_tmpfile_create.side_effect = [
generated_yaml_spec_file,
]
# Act
results = OCStorageClass.run_ansible(params, False)
with open(generated_yaml_spec_file) as json_data:
generated_spec = yaml.load(json_data)
# Assert
self.assertTrue(generated_spec['provisioner'], 'kubernetes.io/aws-ebs')
self.assertTrue(results['changed'])
self.assertEqual(results['results']['returncode'], 0)
self.assertEqual(results['state'], 'present')
# Making sure our mock was called as we expected
mock_cmd.assert_has_calls([
mock.call(['oc', 'get', 'storageclass', 'testsc', '-o', 'json'], None),
mock.call(['oc', 'create', '-f', mock.ANY], None),
mock.call(['oc', 'get', 'storageclass', 'testsc', '-o', 'json'], None),
])
|
# Задание-3:
# Дан список, заполненный произвольными числами.
# Получить список из элементов исходного, удовлетворяющих следующим условиям:
# + Элемент кратен 3
# + Элемент положительный
# + Элемент не кратен 4
import random
old_list = [random.randint(-10,10) for _ in range(10)]
new_list = [el for el in old_list if el % 3 == 0 and el >= 0 and el % 4 != 0]
print(old_list, '-->', new_list) |
import os
import time
from adb.ScrcpyCapture import ScrCpyCapture
from core import mainCore
from adb.capture import get_screen, imageCrop
from adb.adbKey import send_adb_key
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
class ActionController(QObject):
actionDone = pyqtSignal()
addImage = pyqtSignal(str)
def __init__(self):
super().__init__()
self.running = False
self.core = mainCore()
def start(self):
self.running = True
def stop(self):
self.running = False
def setAction(self, action, value):
if action == "delay":
timeout = int(value)
else:
timeout = 100
try:
QTimer.singleShot(timeout, lambda: self.runAction(action, value))
except Exception as e:
print(e)
def runAction(self, action, value):
if self.running == False:
return
if action == "capture":
try:
scrcpy = ScrCpyCapture()
path = os.path.join(self.core.newFilePath())
scrcpy.capture(path)
# get_screen('./capture/a.png')
time.sleep(0.3)
self.addImage.emit(path)
except Exception as e:
print(e)
elif action == "crop":
print(value)
try:
size = value.split(',')
if len(size) < 4:
print(size)
return
imageCrop(
os.path.join(self.core.currentFilePath()),
int(size[0]),
int(size[1]),
int(size[2]),
int(size[3])
)
except Exception as e:
print(e)
elif action == "key":
send_adb_key(value)
print(action)
print(value)
self.actionDone.emit()
def sendKey(self, key):
send_adb_key(key)
def stopAction(self):
self.running = False
|
import sys
sys.stdin = open('2630.txt', 'r')
def check(li):
for x in range(len(li)):
for y in range(len(li[0])):
if li[x][y] == 0:
return False
return True
def isEnd(li):
for x in range(len(li)):
for y in range(len(li[0])):
if li[x][y]:
return True
return False
N = int(input())
arr = [list(map(int, input().split())) for _ in range(N)]
cnt = 0
for i in range(N):
if N == 1:
break
N = N//2
cnt += 1
while isEnd(arr):
n = 0
N = N // 2
while:
ar = [[0] * N for _ in range(N)]
|
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import logging
import web
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
# Database connection
db = web.database(
dbn = 'mysql',
host = 'localhost',
db = 'bot',
user = 'angiebot',
pw = 'angiebot.2019',
port = 3306
)
#beautyByAngie_bot
token = '675117576:AAE88bs3wNH8FybiV_tGkPiTT3xWtM9R9kc'
lista = []
# Define a few command handlers. These usually take the two arguments bot and
# update. Error handlers also receive the raised TelegramError object in error.
def start(bot, update):
username = update.message.from_user.username #Almacena el nombre de usuario
nombres = db.select('tips', what='tip_de')
for i in nombres:
print "Send nombre_tip to {}".format(username)
lista.append(i)
update.message.reply_text('Hola {} este bot te dara tips de belleza!! Usa el comando /tip_de \n seguido de la opcion que elijas... \n los tips son los siguientes\n'.format(username))
update.message.reply_text(lista)
def help(bot, update):
username = update.message.from_user.username
nombres = db.select('tips', what='tip_de')
for i in nombres:
print "Send nombre_tip to {}".format(username)
lista.append(i)
update.message.reply_text('Hola {} este bot te dara tips de belleza!! Usa el comando /tip_de \n seguido de la opcion que elijas... \n los tips son los siguientes\n'.format(username))
update.message.reply_text(lista)
def search(update):
text = update.message.text.split()
username = update.message.from_user.username
try:
tips = (text[1])
print "Send tip_de to {}".format(username)
print "Key search {}".format(tips)
result = db.select('tips', where='tip_de=$tips', vars=locals())[0]
print result
respuesta = str(result.tip) + ",\n " + str(result.tiempo)
#response = "Sending Info " + str(result[0]) + ", " + str(result[1]) + ", " + str(result[2])
#print response
update.message.reply_text('Hola {}\n Aqui esta la informaion que desea:\n{}'.format(username, respuesta))
except Exception as e:
print "Error: "
print "Error 2 {}".format(e.args)
update.message.reply_text('El comando {} es incorreto'.format(tips))
def tip_de(bot, update):
search(update)
def echo(bot, update):
update.message.reply_text(update.message.text)
print update.message.text
print update.message.date
print update.message.from_user
print update.message.from_user.username
def error(bot, update, error):
logger.warn('Update "%s" caused error "%s"' % (update, error))
def main():
try:
print 'Angie init token'
updater = Updater(token) #Llamar al token para poder establecer conexion
# Get the dispatcher to register handlers
dp = updater.dispatcher
print 'Angie init dispatcher'
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
dp.add_handler(CommandHandler("tip_de", tip_de))
# on noncommand i.e message - echo the message on Telegram
dp.add_handler(MessageHandler(Filters.text, echo))
# log all errors
dp.add_error_handler(error)
# Start the Bot
updater.start_polling()
# Run the bot until the you presses Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
print 'Angie ready'
updater.idle()
except Exception as e:
print "Error 100: ", e.message
if __name__ == '__main__':
main()
|
from cf_speedtest import cf_speedtest
def test_country():
cf_speedtest.get_our_country()
def test_preamble():
cf_speedtest.preamble()
def test_main():
assert cf_speedtest.main() == 0
'''
def test_proxy():
assert cf_speedtest.main(['--proxy', '100.24.216.83:80']) == 0
'''
def test_nossl():
assert cf_speedtest.main(['--verifyssl', 'False']) == 0 |
from Character import Character, Stats
from Attack import Attack, Spell
from Die_Simulator import die_parse,modifier
from time import time
Adam = Character(Stats((12,11,10,10,14,17,10,16)),
[Attack('d20+1', 'd8+1'),Attack('d20+1', 'd10+1'),])
Oscar = Character(Stats((16,14,13,11,3,11,12,16)),
[Attack('d20+3', 'd8+3'),Attack('d20+1', 'd10+1'),])
James = Character(Stats((8, 12,10,12,17,6, 9, 12)),
[Spell('d20', 'd8', stat='dex', test='13-modifier({0})')])
Nikko = Character(Stats((8, 11,13,18,11,6, 8, 13)),
[Spell('d20', 'd8', stat='dex', test='14-modifier({0})')])
characters = [Adam, Oscar, James, Nikko]
enemies = []
for i in range(3,21):
enemies.append(Character(Stats((i,i,i,i,i,i,i,i)),[]))
t1 = time()
for i in range(0,10000):
for character in characters:
#print(character)
for attack in character.attacks:
totals = [0]*len(range(3,21))
i=0
for enemy in enemies:
attack.execute(character,(enemy,))
totals[i]+=enemy.stats.max_hp-enemy.hp
enemy.restore()
i+=1
#print(totals)
#print(characters)
print(time()-t1)
|
import numpy as np
import pandas as pd
import xgboost
dataset = pd.read_csv('train.csv')
dataset['Family_size'] = dataset['SibSp']+dataset['Parch']
X = dataset.iloc[:,[2,4,5,12]].values
Y = dataset.iloc[:,1].values
from sklearn.preprocessing import Imputer
imp = Imputer()
X[:,[2]] = imp.fit_transform(X[:,[2]])
from sklearn.preprocessing import LabelEncoder
label_X = LabelEncoder()
X[:,1] = label_X.fit_transform(X[:,1])
from sklearn.preprocessing import OneHotEncoder
onehot_ = OneHotEncoder(categorical_features=[1])
X = onehot_.fit_transform(X).toarray()
X = X[:,[1,2,3,4]]
from xgboost import XGBClassifier
classifier = XGBClassifier()
classifier.fit(X,Y)
# preparing the test set
dataset2 = pd.read_csv('test.csv')
dataset2['Family_size'] = dataset2['SibSp']+dataset2['Parch']
X_test = dataset2.iloc[:,[1,3,4,11]].values
imp2 = Imputer()
X_test[:,[2]] = imp2.fit_transform(X_test[:,[2]])
label_X2 = LabelEncoder()
X_test[:,1] = label_X2.fit_transform(X_test[:,1])
onehot_2 = OneHotEncoder(categorical_features=[1])
X_test = onehot_2.fit_transform(X_test).toarray()
X_test = X_test[:,[1,2,3,4]]
y_pred = classifier.predict(X_test)
y_val = dataset2.iloc[:,0].values
y_final = np.vstack([y_val,y_pred]).T
y_final = pd.DataFrame(y_final,columns=['PassengerId','Survived'])
y_final.to_csv('Test2.csv',index=False) |
import win32security
def set_permissions(self, path, username, permissions,
inheritance=constants.ACE_INHERITED):
user_sid, _, _ = win32security.LookupAccountName("", username)
security_description = win32security.GetFileSecurity(path, win32security.DACL_SECURITY_INFORMATION)
dacl = security_description.GetSecurityDescriptorDacl()
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION, inheritance,
permissions, user_sid)
security_description.SetSecurityDescriptorDacl(1, dacl, 0)
win32security.SetFileSecurity(path,
win32security.DACL_SECURITY_INFORMATION,
security_description)
|
from django.shortcuts import render,redirect
from play.models import NewsType,NewsInfo
from django.http import HttpResponse
def insert(request):
n1 = NewsType(tName="体育",username="zhang@qq.com",password="123456")
n1.save()
n2 = NewsType(tName="娱乐",username="zhang@qq.com",password="123456")
n2.save()
n3 = NewsType(tName="科技",username="zhang@qq.com",password="123456")
n3.save()
NewsInfo.objects.create(tid=n1, nTitle='CAB被控ns苏州',nAuthor='搜狐新闻',nContent='两人交战,情势非常激烈',Nstatus=True)
NewsInfo.objects.create(tid=n1, nTitle='足球', nAuthor='搜狐新闻', nContent='十年巨大巨星',Nstatus=True)
NewsInfo.objects.create(tid=n1, nTitle='5G时代', nAuthor='搜狐新闻', nContent='5G来临',Nstatus=False)
NewsInfo.objects.create(tid=n2, nTitle='CAB被控ns苏州', nAuthor='搜狗新闻', nContent='两人交战,情势非常激烈',Nstatus=True)
NewsInfo.objects.create(tid=n2, nTitle='足球', nAuthor='搜狗新闻', nContent='十年巨大巨星',Nstatus=False)
NewsInfo.objects.create(tid=n2, nTitle='5G时代', nAuthor='搜狗新闻', nContent='5G来临',Nstatus=False)
NewsInfo.objects.create(tid=n3, nTitle='CAB被控ns苏州', nAuthor='新浪新闻', nContent='两人交战,情势非常激烈',Nstatus=True)
NewsInfo.objects.create(tid=n3, nTitle='足球', nAuthor='新浪新闻', nContent='十年巨大巨星',Nstatus=False)
NewsInfo.objects.create(tid=n2, nTitle='5G时代', nAuthor='新浪新闻', nContent='5G来临',Nstatus=True)
return HttpResponse('数据添加成功')
def login(request):
if request.method== 'GET':
return render(request,"login.html")
else:
username = request.POST.get("username")
password = request.POST.get("password")
z = NewsType.objects.filter(username=username, password=password).first()
if z:
return redirect("show")
else:
return redirect("login")
# 展示正常版本
# def show(request):
# t_all = NewsType.objects.all()
# i_all = NewsInfo.objects.all()
# if request.method == 'GET':
# return render(request, "Index.html",{
# "t_all":t_all,
# "i_all":i_all,
# })
# 展示带模糊查询
def show(request):
t_all =NewsType.objects.all()
if request.method == 'GET':
i_all = NewsInfo.objects.all()
return render(request, "Index.html",{
"t_all":t_all,
"i_all":i_all,
})
else:
q = request.POST.get('n1')
i_all = NewsInfo.objects.filter(nTitle__contains=q)
return render(request, "Index.html",{
"t_all": t_all,
"i_all": i_all,
})
def zeng(request):
if request.method== 'GET':
return render(request,"InsertNewsInfo.html")
else:
nTitle = request.POST.get('nTitle')
nAuthor = request.POST.get('nAuthor')
tName = request.POST.get('tName')
Nstatus = request.POST.get('Nstatus')
nContent = request.POST.get('nContent')
z = NewsType.objects.filter(tName=tName).first()
if z:
NewsInfo.objects.create(nTitle=nTitle, nAuthor=nAuthor, nContent=nContent, Nstatus=Nstatus, tid=z)
else:
zz = NewsType(tName=tName)
zz.save()
NewsInfo.objects.create(nTitle=nTitle, nAuthor=nAuthor, nContent=nContent, Nstatus=Nstatus, tid=zz)
return redirect("show")
def delete(request, id):
s1 = NewsInfo.objects.filter(id=id).first()
s1.delete()
return redirect("show")
def zhuce(request):
if request.method== 'GET':
return render(request,"zhuce.html")
else:
username = request.POST.get("username")
password = request.POST.get("password")
z = NewsType.objects.filter(username=username).first()
if z:
return HttpResponse("此账号已被注册")
else:
NewsType(username=username, password=password).save()
return redirect("login")
|
import sys
import linecache
import os
from config import *
from multiprocessing import Pool
def compute_global_index(param_file):
params = parse_json(param_file)
idx_interval = params['fingerprint']['fp_lag'] * params['fingerprint']['spec_lag']
station = params['data']['station']
channel = params['data']['channel']
_, ts_path = get_fp_ts_folders(params)
f = open('%s%s_%s_idx_mapping.txt' % (config['index_folder'],
station, channel), 'w')
for fname in params['data']['fingerprint_files']:
ts_file = open(ts_path + get_ts_fname(fname), 'r')
for line in ts_file.readlines():
t = datetime.datetime.strptime(line.strip(),
"%y-%m-%dT%H:%M:%S.%f")
idx = round((t - min_time).total_seconds() / idx_interval)
f.write("%d\n" % idx)
ts_file.close()
f.close()
if __name__ == '__main__':
config = parse_json(sys.argv[1])
min_time = None
for param_file in config['fp_params']:
print(config['fp_param_dir']+param_file)
params = parse_json(config['fp_param_dir']+param_file)
_, ts_path = get_fp_ts_folders(params)
ts_fname = ts_path + get_ts_fname(params['data']['fingerprint_files'][0])
tmp = datetime.datetime.strptime(linecache.getline(ts_fname, 1).strip(), "%y-%m-%dT%H:%M:%S.%f")
if min_time is None:
min_time = tmp
elif tmp < min_time:
min_time = tmp
# Save stats to file
if not os.path.exists(config['index_folder']):
os.makedirs(config['index_folder'])
f = open('%sglobal_idx_stats.txt' % config['index_folder'], 'w')
f.write('%s\n' % min_time.strftime("%Y-%m-%dT%H:%M:%S.%f"))
f.write('%s\n' % ','.join(config['fp_params']))
f.close()
# Attach input folder to filename
for ip,param_file in enumerate(config['fp_params']):
config['fp_params'][ip] = config['fp_param_dir']+param_file
p = Pool(len(config['fp_params']))
p.map(compute_global_index, config['fp_params'])
p.terminate()
|
#coding=utf-8
import pandas as pd
import numpy as np
import tensorflow as tf
rnn_unit=128 #隐层数量
input_size=250
output_size=1
n_classes =2
lr=0.0005 #学习率
lambda_loss_amount = 0.0015
#——————————————————导入数据——————————————————————
data_len = 907
f='C:\\-----YHY-----\\--SCHOOL--\\BJFU\\Graduation_Project\\LSTM\\sentiment-network\\clip-feature-onlyframes\\features-250.xlsx'
df=pd.read_excel(f)
data=df.iloc[:,0:252].values
data = data[0:data_len]
# 归一化处理
data[:,0:input_size] -= np.mean(data[:,0:input_size], axis = 0) # zero-center
data[:,0:input_size] /= (np.std(data[:,0:input_size], axis = 0) + 1e-5) # normalize
#获取训练集
def get_train_data(batch_size=50,time_step=10,train_begin=0,train_end=data_len-30):
batch_index=[]
data_train = data[train_begin:train_end,0:input_size]
data_train_y = data[train_begin:train_end, input_size:input_size+n_classes]
train_x,train_y=[],[] #训练集
for i in range(len(data_train)-time_step):
if i % batch_size==0:
batch_index.append(i)
x=data_train[i:i+time_step,:250]
y=data_train_y[i:i+time_step,0:2]
train_x.append(x.tolist())
train_y.append(y.tolist())
batch_index.append((len(data_train)-time_step))
return batch_index,train_x,train_y # x是数据 y是标签
# 获取验证集
def get_validation_data(batch_size=50, time_step=10, validation_begin=data_len - 15):
validation_test = data[validation_begin:, 0:input_size]
validation_test_y = data[validation_begin:, input_size:input_size+n_classes]
batch_index = []
size = (len(validation_test) + time_step - 1) // time_step # 有size个sample
val_x, val_y = [], []
for i in range(size - 1):
if i % batch_size == 0:
batch_index.append(i)
x = validation_test[i * time_step:(i + 1) * time_step, :250]
y = validation_test_y[i * time_step:(i + 1) * time_step, 0:2]
val_x.append(x.tolist())
val_y.append(y.tolist())
batch_index.append((len(validation_test) - time_step))
return batch_index,val_x, val_y # x是数据 y是标签
#获取测试集
def get_test_data(time_step=10,test_begin=data_len-30,test_end=data_len-15):
data_test=data[test_begin:test_end,0:input_size]
data_test_y = data[test_begin:test_end, input_size:input_size+n_classes]
size=(len(data_test)+time_step-1)//time_step #有size个sample
test_x,test_y=[],[]
for i in range(size-1):
x = data_test[i*time_step:(i+1)*time_step,:250]
y = data_test_y[i*time_step:(i+1)*time_step,0:2]
test_x.append(x.tolist())
test_y.append(y.tolist())
return test_x,test_y # x是数据 y是标签
#——————————————————定义神经网络变量——————————————————
#输入层、输出层权重、偏置
weights={
'in':tf.Variable(tf.random_normal([input_size,rnn_unit])),
'out':tf.Variable(tf.random_normal([rnn_unit,n_classes]))
}
biases={
'in':tf.Variable(tf.constant(0.1,shape=[rnn_unit,])),
'out':tf.Variable(tf.constant(0.1,shape=[n_classes,]))
}
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
#——————————————————定义神经网络变量——————————————————
def lstm(X):
batch_size=tf.shape(X)[0]
time_step=tf.shape(X)[1]
print ("-------------开始进行lstm的训练")
w_in=weights['in']
b_in=biases['in']
input=tf.reshape(X,[-1,input_size]) #需要将tensor转成2维进行计算,计算后的结果作为隐藏层的输入
input_rnn=tf.matmul(input,w_in)+b_in
input_rnn=tf.reshape(input_rnn,[-1,time_step,rnn_unit]) #将tensor转成3维,作为lstm cell的输入
cell=tf.nn.rnn_cell.BasicLSTMCell(rnn_unit, forget_bias=1.0, state_is_tuple=True)
drop = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)
lstm_cell = tf.nn.rnn_cell.MultiRNNCell([drop] * 1)
print ("-------------输出cell",cell)
output_rnn,final_states=tf.nn.dynamic_rnn(lstm_cell, input_rnn, dtype=tf.float32)
print ("-------------跑rnn部分",output_rnn)
output=tf.reshape(output_rnn,[-1,rnn_unit])
w_out=weights['out']
b_out=biases['out']
pred=tf.matmul(output,w_out)+b_out
print ("-------------lstm训练结束!!!")
return pred,final_states
#————————————————训练模型————————————————————
def train_lstm(batch_size=50,time_step=10,train_begin=1,train_end=data_len):
X=tf.placeholder(tf.float32, shape=[None,time_step,input_size])
Y=tf.placeholder(tf.float32, shape=[None,time_step,n_classes])
batch_index,train_x,train_y=get_train_data(batch_size,time_step,train_begin,train_end-30)
batch_index_val,val_x, val_y = get_validation_data(time_step=10, validation_begin=data_len-15)
with tf.variable_scope("sec_lstm"):
pred,_=lstm(X)
Y_ = tf.reshape(Y, [-1, n_classes])
# Loss, optimizer and evaluation
l2 = lambda_loss_amount * sum(
tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables()
) # L2 loss prevents this overkill neural network to overfit the data
for ele1 in tf.trainable_variables():
print(ele1.name)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y_, logits=pred)) + l2 # Softmax loss
train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss) # Adam Optimizer
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
loss_scalar = tf.summary.scalar('loss', loss)
acc_scalar = tf.summary.scalar('accuracy', accuracy)
l2_scalar = tf.summary.scalar('L2', l2)
saver=tf.train.Saver(tf.global_variables(),max_to_keep=15)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter('logs', sess.graph) # 将训练日志写入到logs文件夹下
for i in range(30): #这个迭代次数,可以更改,越大预测效果会更好,但需要更长时间
for step in range(len(batch_index)-1):
_,loss_,acc,l2_=sess.run([train_op,loss_scalar,acc_scalar,l2_scalar],
feed_dict={
X:train_x[batch_index[step]:batch_index[step+1]],
Y:train_y[batch_index[step]:batch_index[step+1]],
keep_prob: 0.5})
_,loss_2,acc2=sess.run([train_op,loss,accuracy],
feed_dict={
X:train_x[batch_index[step]:batch_index[step+1]],
Y:train_y[batch_index[step]:batch_index[step+1]],
keep_prob: 0.5})
print("Number of iterations:", i+100, " train loss:", loss_2, "accuracy:", acc2)
writer.add_summary(loss_, global_step=i) # 写入文件
writer.add_summary(acc, global_step=i) # 写入文件
writer.add_summary(l2_, global_step=i) # 写入文件
for step in range(len(batch_index_val)-1):
_, loss_val, acc_val = sess.run([train_op, loss, accuracy],
feed_dict={
X: val_x[batch_index_val[step]:batch_index_val[step+1]],
Y: val_y[batch_index_val[step]:batch_index_val[step+1]],
keep_prob: 1})
#print(" validation loss:", loss_val, "accuracy:", acc_val)
print("model_save: ",saver.save(sess,'model_lstm\\modle_temp.ckpt'))
print("The train has finished")
train_lstm()
#————————————————预测模型————————————————————
def prediction(time_step=10):
X=tf.placeholder(tf.float32, shape=[None,time_step,input_size])
Y=tf.placeholder(tf.float32, shape=[None,time_step,n_classes])
test_x,test_y=get_test_data(time_step)
test_y = np.array(test_y)
test_y_ = tf.reshape(test_y,[-1,n_classes])
#print ("测试集大小:",len(test_x),len(test_y))
with tf.variable_scope("sec_lstm",reuse=True):
pred,_=lstm(X)
saver=tf.train.Saver(tf.global_variables())
with tf.Session() as sess:
#参数恢复
module_file = tf.train.latest_checkpoint('model_lstm')
saver.restore(sess, module_file)
test_predict=[]
for step in range(len(test_x)): #我这里删掉了一个-1
prob,y=sess.run([pred,test_y_],
feed_dict={X:[test_x[step]],
Y:[test_y[step]],
keep_prob: 1})
predict=prob.reshape([-1,n_classes])
test_predict.extend(predict)
test_predict=np.array(test_predict) #相当于解决归一化
print ("真实值 ",y,"预测结果 ",test_predict)
correct_pred = tf.equal(tf.argmax(y, 1), tf.argmax(test_predict, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
acc=sess.run([accuracy],feed_dict={X:[test_x[step]],
Y:[test_y[step]],
keep_prob: 1})
print("The accuracy of this predict:",acc)
prediction()
|
# GIS4WRF (https://doi.org/10.5281/zenodo.1288569)
# Copyright (c) 2018 D. Meyer and M. Riechert. Licensed under MIT.
from typing import List, Tuple, Iterable, Any
from collections import namedtuple
import os
import sys
import platform
from pathlib import Path
import shutil
import subprocess
import sysconfig
import site
import pkg_resources
import random
DID_BOOTSTRAP = False
# Python x.y version tuple, e.g. ('3', '6').
PY_MAJORMINOR = platform.python_version_tuple()[:2]
# name: distribution name, min: minimum version we require, install: version to be installed
Dependency = namedtuple('Dep', ['name', 'min', 'install'])
# All extra packages we need that are generally not part of
# QGIS's Python (on Windows) or the system Python (on Linux/macOS).
# Given that we use a custom site-packages folder (in ~/.gis4wrf) there are some limitations on
# what we can do since Python does not allow multiple versions of a package to be used.
# If a package is already installed, it is only updated if we had installed
# it ourselves, in which case it would be in our custom site-packages folder.
# If an installed package has a version lower than the 'min' key but we did not install it ourselves,
# then we can't do anything and have to notify the user to update the package manually
# (this would typically happen on Linux/macOS where QGIS uses the system Python installation).
# If a package is not installed, then it is installed with the exact version given in
# the 'install' key. Exact versions are used to avoid surprises when new versions are released.
# Note that if it is determined that we can install or update a given package, then all other packages
# that we installed ourselves, even if they don't need to be updated, are re-installed as well.
# This works around some limitations of 'pip install --prefix' and cannot be prevented currently.
# See the end of this script for more details on this.
DEPS = [
# Direct dependencies.
Dependency('f90nml', install='1.0.2', min=None),
# Indirect dependencies.
# Indirect dependencies are dependencies that we don't import directly in our code but
# that are required by one or more of the main dependencies above and that require a special version
# to be installed. Normally they would be automatically installed by one of the direct
# dependencies above, but this would nearly always install the latest version of that
# indirect dependency. The reason why we sometimes don't want that is mostly due to packaging
# bugs where the latest version is binary incompatible with older versions of numpy.
# And since we can't update numpy ourselves, we need to use older versions of those indirect
# dependencies which are built against older versions of numpy.
]
# For some packages we need to use different versions depending on the Python version used.
if PY_MAJORMINOR == ('3', '6'):
DEPS += [
# NetCDF4 >= 1.3.0 is built against too recent numpy version.
Dependency('netCDF4',
install='1.2.9',
min=None),
# dependency of netCDF4
Dependency('cftime',
install='1.5.1',
min=None),
]
elif PY_MAJORMINOR == ('3', '7'):
DEPS += [
Dependency('netCDF4',
install='1.4.2',
min=None),
# dependency of netCDF4
Dependency('cftime',
install='1.5.1',
min=None),
]
elif PY_MAJORMINOR == ('3', '9'):
DEPS += [
Dependency('netCDF4',
install='1.5.7',
min=None),
# dependency of netCDF4
Dependency('cftime',
install='1.5.1',
min=None),
]
# best effort
else:
DEPS += [
Dependency('netCDF4',
install='1.*',
min=None),
# dependency of netCDF4
Dependency('cftime',
install='1.*',
min=None),
]
# Use a custom folder for the packages to avoid polluting the per-user site-packages.
# This also avoids any permission issues.
# Windows: ~\AppData\Local\gis4wrf\python<xy>
# macOS: ~/Library/Application Support/gis4wrf/python<xy>
# Linux: ~/.local/share/gis4wrf/python<xy>
if platform.system() == 'Windows':
DATA_HOME = os.getenv('LOCALAPPDATA')
assert DATA_HOME, '%LOCALAPPDATA% not found'
elif platform.system() == 'Darwin':
DATA_HOME = os.path.join(os.path.expanduser('~'), 'Library', 'Application Support')
else:
DATA_HOME = os.getenv('XDG_DATA_HOME')
if not DATA_HOME:
DATA_HOME = os.path.join(os.path.expanduser('~'), '.local', 'share')
INSTALL_PREFIX = os.path.join(DATA_HOME, 'gis4wrf', 'python' + ''.join(PY_MAJORMINOR))
LOG_PATH = os.path.join(INSTALL_PREFIX, 'pip.log')
def bootstrap() -> Iterable[Tuple[str,Any]]:
''' Yields a stream of log information. '''
global DID_BOOTSTRAP
if DID_BOOTSTRAP:
return
DID_BOOTSTRAP = True
# Add custom folder to search path.
for path in site.getsitepackages(prefixes=[INSTALL_PREFIX]):
if not path.startswith(INSTALL_PREFIX):
# On macOS, some global paths are added as well which we don't want.
continue
# Distribution installs of Python in Ubuntu return "dist-packages"
# instead of "site-packages". But 'pip install --prefix ..' always
# uses "site-packages" as the install location.
path = path.replace('dist-packages', 'site-packages')
yield ('log', 'Added {} as module search path'.format(path))
# Make sure directory exists as it may otherwise be ignored later on when we need it.
# This is because Python seems to cache whether module search paths do not exist to avoid
# redundant lookups.
os.makedirs(path, exist_ok=True)
site.addsitedir(path)
# pkg_resources doesn't listen to changes on sys.path.
pkg_resources.working_set.add_entry(path)
# pip tries to install packages even if they are installed already in the
# custom folder. To avoid that, we do the check ourselves.
# However, if any package is missing, we re-install all packages.
# See the comment below on why this is necessary.
installed = []
needs_install = []
cannot_update = []
for dep in DEPS:
try:
# Will raise DistributionNotFound if not found.
location = pkg_resources.get_distribution(dep.name).location
is_local = Path(INSTALL_PREFIX) in Path(location).parents
if not dep.min:
installed.append((dep, is_local))
else:
# There is a minimum version constraint, check that.
try:
# Will raise VersionConflict on version mismatch.
pkg_resources.get_distribution('{}>={}'.format(dep.name, dep.min))
installed.append((dep, is_local))
except pkg_resources.VersionConflict as exc:
# Re-install is only possible if the previous version was installed by us.
if is_local:
needs_install.append(dep)
else:
# Continue without re-installing this package and hope for the best.
# cannot_update is populated which can later be used to notify the user
# that a newer version is required and has to be manually updated.
cannot_update.append((dep, exc.dist.version))
installed.append((dep, False))
except pkg_resources.DistributionNotFound as exc:
needs_install.append(dep)
if needs_install:
yield ('needs_install', needs_install)
yield ('log', 'Package directory: ' + INSTALL_PREFIX)
# Remove everything as we can't upgrade packages when using --prefix
# which may lead to multiple pkg-0.20.3.dist-info folders for different versions
# and that would lead to false positives with pkg_resources.get_distribution().
if os.path.exists(INSTALL_PREFIX):
# Some randomness for the temp folder name, in case an old one is still lying around for some reason.
rnd = random.randint(10000, 99999)
tmp_dir = INSTALL_PREFIX + '_tmp_{}'.format(rnd)
# On Windows, rename + delete allows to re-create the folder immediately,
# otherwise it may still be locked and we get "Permission denied" errors.
os.rename(INSTALL_PREFIX, tmp_dir)
shutil.rmtree(tmp_dir)
os.makedirs(INSTALL_PREFIX, exist_ok=True)
# Determine packages to install.
# Since we just cleaned all packages installed by us, including those that didn't need
# a re-install, re-install those as well.
installed_local = [dep for dep, is_local in installed if is_local]
req_specs = []
for dep in needs_install + installed_local:
if dep.install.startswith('http'):
req_specs.append(dep.install)
else:
req_specs.append('{}=={}'.format(dep.name, dep.install))
# Locate python in order to invoke pip.
python = os.path.join(sysconfig.get_path('scripts'), 'python3')
# Handle the special Python environment bundled with QGIS on Windows.
try:
import qgis
except:
qgis = None
if os.name == 'nt' and qgis:
# sys.executable will be one of two things:
# within QGIS: C:\Program Files\QGIS 3.0\bin\qgis-bin-g7.4.0.exe
# within python-qgis.bat: C:\PROGRA~1\QGIS 3.0\apps\Python36\python.exe
exe_path = sys.executable
exe_dir = os.path.dirname(exe_path)
if os.path.basename(exe_path) == 'python.exe':
python_qgis_dir = os.path.join(exe_dir, os.pardir, os.pardir, 'bin')
else:
python_qgis_dir = exe_dir
python = os.path.abspath(os.path.join(python_qgis_dir, 'python-qgis.bat'))
if not os.path.isfile(python):
python = os.path.abspath(os.path.join(python_qgis_dir, 'python-qgis-ltr.bat'))
# Must use a single pip install invocation, otherwise dependencies of newly
# installed packages get re-installed and we couldn't pin versions.
# E.g. 'pip install pandas==0.20.3' will install pandas, but doing
# 'pip install xarray==0.10.0' after that would re-install pandas (latest version)
# as it's a dependency of xarray.
# This is all necessary due to limitations of pip's --prefix option.
args = [python, '-m', 'pip', 'install', '--prefix', INSTALL_PREFIX] + req_specs
yield ('log', ' '.join(args))
for line in run_subprocess(args, LOG_PATH):
yield ('log', line)
yield ('install_done', None)
if cannot_update:
for dep, _ in cannot_update:
yield ('cannot_update', cannot_update)
def run_subprocess(args: List[str], log_path: str) -> Iterable[str]:
startupinfo = None
if os.name == 'nt':
# hides the console window
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
process = subprocess.Popen(args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
bufsize=1, universal_newlines=True,
startupinfo=startupinfo)
with open(log_path, 'w') as fp:
while True:
line = process.stdout.readline()
if line != '':
fp.write(line)
yield line
else:
break
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(process.returncode, args)
|
from bs4 import BeautifulSoup
import requests
import csv
import pandas as pd
def main():
# session and url
s = requests.session()
url = "https://www.rottentomatoes.com/top/bestofrt/?year="
# years
movies_dict = {}
for year in range(2000, 2021):
curr_url = url + str(year)
result = s.get(curr_url)
soup = BeautifulSoup(result.text, 'html.parser')
movies = soup.findAll("a", {"class": "unstyled articleLink"})
movies_dict[year] = []
for movie in movies:
link = movie.get('href')
if "/m/" in link:
movies_dict[year].append(("rottentomatoes.com" + link,
link[3:]))
with open('movies.csv', mode='w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=['Movie', 'Movie Link',
'Year'])
writer.writeheader()
for key in movies_dict.keys():
for movie in movies_dict[key]:
writer.writerow({'Movie': movie[1], 'Movie Link': movie[0],
'Year': key})
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
main()
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
#example without constructor
class E:
wowname = "wow"
def somefunc(i):
return i
e = E()
print e.wowname
|
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def generateTrees(self, n):
"""
:type n: int
:rtype: List[TreeNode]
"""
def dfs(nums):
if len(nums)==0: return [None]
node_list = []
for i in range(len(nums)):
left_node = dfs(nums[:i])
right_node = dfs(nums[i+1:])
# print node.val,[t.val for t in left_node if t!=None],[t.val for t in right_node if t!=None]
for left in left_node:
for right in right_node:
node = TreeNode(nums[i])
node.left = left
node.right = right
node_list.append(node)
return node_list
sol = dfs(range(1,n+1))
return sol
test = Solution()
test.generateTrees(3) |
"""
sample function 23 - class 5
"""
a = 0
b = 0
c = 0
d = 0
for a in range (0,10):
#print ('a',a)
for b in range (0,10):
#print ('b',b)
for c in range (0,10):
#print ('c',c)
for d in range (0,10):
#print ('d',d)
if (a ** 2 + b ** 2 == c **2 + d ** 2):
if a != b and b != c and c != d and d != a and a != c and b != d :
print (a, b, c, d)
|
"""emolument-facture repartition
Revision ID: 826c2810ab30
Revises: 13fcb71e77c1
Create Date: 2021-09-13 11:51:59.441107
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '826c2810ab30'
down_revision = '13fcb71e77c1'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('emolument_affaire_repartition',
sa.Column('emolument_affaire_id', sa.BigInteger(), nullable=False),
sa.Column('facture_id', sa.BigInteger(), nullable=False),
sa.Column('repartition', sa.Float(), nullable=False),
sa.ForeignKeyConstraint(['emolument_affaire_id'], ['infolica.emolument_affaire.id'], name=op.f('fk_emolument_affaire_repartition_emolument_affaire_id_emolument_affaire')),
sa.ForeignKeyConstraint(['facture_id'], ['infolica.facture.id'], name=op.f('fk_emolument_affaire_repartition_facture_id_facture')),
sa.PrimaryKeyConstraint('emolument_affaire_id', 'facture_id', name=op.f('pk_emolument_affaire_repartition')),
schema='infolica'
)
op.drop_table('emolument_facture')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('emolument_facture',
sa.Column('id', sa.BIGINT(), autoincrement=True, nullable=False),
sa.Column('facture_id', sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column('emolument_id', sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column('nombre', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('facteur_correctif', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=False),
sa.Column('batiment', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('montant', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['emolument_id'], ['tableau_emoluments.id'], name='fk_emolument_facture_emolument_id_tableau_emoluments'),
sa.ForeignKeyConstraint(['facture_id'], ['facture.id'], name='fk_emolument_facture_facture_id_facture'),
sa.PrimaryKeyConstraint('id', name='pk_emolument_facture')
)
op.drop_table('emolument_affaire_repartition', schema='infolica')
# ### end Alembic commands ###
|
'''TESTCASE
60
-
17
-
100
-
32
'''
def factoring(a):
f = []
i = 2
while a > 1:
while a > 1 and a % i == 0:
f.append(i)
a //= i
i += 1
return f
a = int(input())
print('{}={}'.format(a, '*'.join(map(str, factoring(a)))))
|
import numpy as np
a = np.array([x for x in range(5)])
print('a:',a)
print("np.sin(a)=",np.sin(a))
print("np.exp(a)=",np.exp(a))
print("np.sqrt(a)=",np.sqrt(a))
print("np.power(a,3)",np.power(a,3)) |
#!/usr/bin/env python2.7
import cx_Oracle
import getpass
import paramiko
from subprocess import call
ngasdbpasswd = getpass.getpass('Enter password for ngas db: ')
dsn2 = ''' (DESCRIPTION_LIST =
(FAILOVER = TRUE)
(LOAD_BALANCE = FALSE)
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = ora.sco.alma.cl)(PORT = 1521))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SERVICE_NAME = OFFLINE.SCO.CL)
)
)
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = orastbsco1.sco.alma.cl)(PORT = 1521))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SERVICE_NAME = OFFLINE.SCO.CL)
)
)
)'''
orcl = cx_Oracle.connect('ngas', ngasdbpasswd, dsn2)
cursorsco = orcl.cursor()
cursorsco.execute('''select 'ngamsCClient -cmd DISCARD -host ' || substr(c.host_id, 0,
instr(c.host_id, ':') - 1) || ' -port ' || substr(c.host_id,
instr(c.host_id, ':') + 1) || ' -diskId ' || a.disk_id || ' -fileId ' ||
a.file_id || ' -fileVersion ' || a.file_version || ' -execute' as
DELETECOMMAND
FROM ngas.ngas_files a, ngas.ngas_disks c where a.disk_id = c.disk_id and a.ROWID IN
(SELECT ROWID
FROM ngas.ngas_files
WHERE (file_id, file_version) IN
(SELECT file_id, file_version
FROM ngas.ngas_files
GROUP BY file_id, file_version
HAVING COUNT(*) >= 2)
MINUS
SELECT MIN(ROWID)
FROM ngas.ngas_files
WHERE (file_id, file_version) IN
(SELECT file_id, file_version
FROM ngas.ngas_files
GROUP BY file_id, file_version
HAVING COUNT(*) >= 2)
GROUP BY file_id, file_version) ''')
for cmd in cursorsco.fetchall():
print cmd[0]
#call([str(cmd[0])])
|
#!/usr/bin/env python
import argparse
from tenacity import retry_if_result, RetryError
from datetime import datetime
from pipeline_tools.shared.http_requests import HttpRequests
from pipeline_tools.shared import auth_utils, exceptions
def wait_for_valid_status(envelope_url, http_requests):
"""
Check the status of the submission. Retry until the status is "Valid", or if there is an error with the request to
get the submission envelope.
Args:
envelope_url (str): Submission envelope url
http_requests (HttpRequests): HttpRequests object
Returns:
str: Status of the submission ("Valid", "Validating", etc.)
Raises:
requests.HTTPError: if 4xx error or 5xx error past timeout
tenacity.RetryError: if status is invalid past timeout
"""
def log_before(envelope_url):
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print('{0} Getting status for {1}'.format(now, envelope_url))
def keep_polling(response):
# Keep polling until the status is "Valid/Complete" or "Invalid"
envelope_js = response.json()
status = envelope_js.get('submissionState')
print('submissionState: {}'.format(status))
return status not in ('Valid', 'Complete', 'Invalid')
response = http_requests.get(
envelope_url,
before=log_before(envelope_url),
retry=retry_if_result(keep_polling),
)
return response.json()
def confirm(envelope_url, http_requests, runtime_environment, service_account_key_path):
"""Confirms the submission.
Args:
envelope_url (str): the url for the envelope.
http_requests (HttpRequests): HttpRequests object.
runtime_environment (str): Environment where the pipeline is running ('dev', 'test', 'staging' or 'prod').
service_account_key_path (str): Path to the JSON service account key for generating a JWT.
Returns:
str: The text of the response
Raises:
requests.HTTPError: if the response status indicates an error
"""
print('Making auth headers')
dcp_auth_client = auth_utils.DCPAuthClient(
service_account_key_path, runtime_environment
)
auth_headers = dcp_auth_client.get_auth_header()
print('Confirming submission')
headers = {'Content-type': 'application/json'}
headers.update(auth_headers)
response = http_requests.put(
'{}/submissionEvent'.format(envelope_url), headers=headers
)
text = response.text
print(text)
return text
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--envelope_url',
required=True,
help='The url of the submission envelope in Ingest service.',
)
parser.add_argument(
'--runtime_environment',
required=True,
help='Environment where the pipeline is running ("dev", "test", "staging" or "prod").',
)
parser.add_argument(
'--service_account_key_path',
required=True,
help='Path to the JSON service account key for generating a JWT.',
)
args = parser.parse_args()
http_requests = HttpRequests()
try:
response = wait_for_valid_status(args.envelope_url, http_requests)
status = response.get('submissionState')
except RetryError:
message = 'Timed out while waiting for Valid status.'
raise ValueError(message)
if status == 'Invalid':
raise exceptions.SubmissionError('Invalid submission envelope.')
elif status == 'Valid':
confirm(
args.envelope_url,
http_requests,
args.runtime_environment,
args.service_account_key_path,
)
if __name__ == '__main__':
main()
|
__all__ = [
'ScriptResponse',
'ScriptDeleteResponse',
'ScriptNotFoundResponse',
'ScriptAccessDeniedResponse'
]
from ..schemas import ScriptReadSchema
from ...common.responses import JsonApiResponse, JsonApiErrorResponse
class ScriptResponse(JsonApiResponse):
def __init__(self, script):
super().__init__(ScriptReadSchema.dump(script), 200)
class ScriptResponse(JsonApiResponse):
def __init__(self, script):
super().__init__(ScriptReadSchema.dump(script), 200)
class ScriptDeleteResponse(JsonApiResponse):
def __init__(self):
super().__init__('', 200)
class ScriptAccessDeniedResponse(JsonApiErrorResponse):
def __init__(self):
super().__init__('SCRIPT_ACCESS_DENIED', 403)
class ScriptNotFoundResponse(JsonApiErrorResponse):
def __init__(self):
super().__init__('SCRIPT_NOT_FOUND', 404)
|
#!/usr/bin/python2.7
#Need to get the different import files figured out
#Team A, Store Navigation System initial code
#Creates a class Item and User in order to allow for maintaining the items and people using the device
#Allows for item location, addition and searching based on LED's
#Also allows for checking of misplaced items
#Start date: 9/27/2018
#End date:
#Author: Elijah Simmons
import ReadDatabase
import Search
import UpdateDatabase
import strandtest
import time
#Used to create an item class
#Name of the item with the aisle number, row and column of the item's location
class Item:
def __init__(self, inName, inLocCol, inLocRow, aisle):
self.name = inName
self.locCol = inLocCol
self.locRow = inLocRow
self.locAisle = aisle
#self.weight = itemWeight
#self.color = inColor
#User class in order to keep track of users and colors for the user
class User:
def __init__(self, inUserName, inColor):
self.userName = inUserName
self.color = inColor
self.RGBW = []
if(inColor.lower() in 'red'.lower()):
self.RGB = [255,0,0]
if(inColor.lower() in 'blue'.lower()):
self.RGB = [0,0,255]
if(inColor.lower() in 'green'.lower()):
self.RGB = [0,255,0]
if(inColor.lower() in 'white'.lower()):
self.RGB = [255,255,255]
if(inColor.lower() in 'yellow'.lower()):
self.RGB = [255,255,0]
if(inColor.lower() in 'purple'.lower()):
self.RGB = [128,0,128]
#Main function to be called to start all code
def main():
#Make a list to hold the values of the aisle
listOfItems = []
arrayOfItems = []
#Get an array of the items from the txt file
arrayOfItems = ReadDatabase.readWrittenList()
#iterate through that array to populate the list of items
i = 0
while i < len(arrayOfItems)-4:
itemName = arrayOfItems[i]
i+=1
locAisle = arrayOfItems[i]
i+=1
locCol = arrayOfItems[i]
i+=1
locRow = arrayOfItems[i]
i+=1
listOfItems.append(Item(itemName,locCol,locRow,locAisle))
printList(listOfItems)
#While loop to make sure the code doesn't stop unless it is killed
while(1):
#Need to get input of what kind of user they are with authentification
userType = raw_input('What kind of user are you? Customer or Employee? \n')
if(userType.lower() == 'customer' or userType.lower() == 'employee'):
loggedIn = 'yes'
#While loop while the user is logged into a specific area
while(loggedIn == 'yes'):
#Find what they want to do
if(userType.lower() == 'employee'):
password = raw_input('Password: ')
if(password == '123'):
while(loggedIn == 'yes'):
empAct = raw_input('Would you like to add new objects(1) or monitor misplaced items(2) or delete an item(3) or print the list(4) or log out(5)? ')
print '\n'
#if they are employee and in the placement mode
if(empAct == '1'):
#Find out all of the information from the employee and make an item and set it in the array
itemName = raw_input('What is the name of the item?(Full name of item) ')
print '\n'
aisle = raw_input('What aisle is it in?(Number location 1) ')
print '\n'
aisleCheck = 0
#Double check that the aisle exists
while (aisleCheck == 0):
if((aisle == '1')):
aisleCheck = 1
else:
aisle = raw_input("The desired aisle doesn't exist, try again ")
print '\n'
#Check that the column input exists
locCol = raw_input('What column is it in?(Number location 1-3) ')
print '\n'
colCheck = 0
while (colCheck == 0):
if(locCol == '1' or locCol == '2' or locCol == '3'):
colCheck = 1
else:
locCol = raw_input("The desired column doesn't exist, try again ")
print '\n'
#Check that the row actually exists
locRow = raw_input('What row is it in?(Number location 1-2) ')
print '\n'
rowCheck = 0
while (rowCheck == 0):
if(locRow == '1' or locRow == '2'):
rowCheck = 1
else:
locRow = raw_input("The desired row doesn't exist, try again ")
print '\n'
#Check to make sure no other item is in the slot
locationUsed = Search.searchLocationList(listOfItems,aisle,locRow,locCol)
if(locationUsed == -1):
#Add in the new item to the array
#WNTBD find the weight of the item from the FSR code
#WNTBD find the color differences and save what is needed
listOfItems.append(Item(itemName,locCol,locRow,aisle))
#Double check the item that is being added
print 'Here is the item you just registered \nName %s\nAisle %s\nColumn %s\nRow %s\n'%(listOfItems[(len(listOfItems)-1)].name,listOfItems[(len(listOfItems)-1)].locAisle,listOfItems[(len(listOfItems)-1)].locCol,listOfItems[(len(listOfItems)-1)].locRow)
UpdateDatabase.updateWrittenList(listOfItems)
else:
print 'Location is already being used, nothing was done.\n'
if(empAct == '2'):
#This is where we will continually look for another input while checking the other sets for anything misplaced(WNTBD)
#WNTBD How can we take a kill input while it runs other code?
iteration = 0
#for item in listOfItems:
# misplaced = 0
#if(listOfItems[iteration].weight != FSR check at the location)
# misplaced = 1
#if(listOfItems[iteration].color != Check color sensor at location)
# misplaced += 1
#
#if(misplaced == 2):
# print 'There is an item misplaced in Aisle %s, Row %s, Column %s'%(listOfItems[iteration].locAisle,listOfItmes[iteration].locRow,listOfItems[iteration].locCol)
#iteration+=1
check = 0
if(empAct == '3'):
delete = raw_input('What is name of the item you would like to delete from the inventory? ')
print '\n'
location = Search.searchListDel(listOfItems,delete)
#Check to make sure that the item exists
if(location != -1):
#Takes out the item and allows us to see the deleted item
item = listOfItems.pop(location)
print '%s item has been removed.\n'%(item.name)
UpdateDatabase.updateWrittenList(listOfItems)
else:
print "That item didn't exist in our inventory.\n"
if(empAct == '4'):
printList(listOfItems)
if(empAct == '5'):
loggedIn = 'no'
print 'Logged off\n\n'
#Find if the user is a customer
if(userType.lower() == 'customer'):
#Now we need to make a profile for them
username = raw_input('What username would you like to use? ')
print '\n'
userColor = '0'
colorCheck = 0
#Go through the checking process for what color they want to use
while (colorCheck == 0):
userColor = raw_input('What color would you like to light up with?(red, green, blue, white, yellow, purple): ')
print '\n'
if(userColor.lower() == 'red' or userColor.lower() == 'green' or userColor.lower() == 'blue' or userColor.lower() == 'white' or userColor.lower() == 'yellow' or userColor.lower() == 'purple'):
colorCheck = 1
else:
locRow = raw_input("The desired color doesn't exist, try again ")
print '\n'
#Set the username and the color
user = User(username,userColor)
while(loggedIn == 'yes'):
userAction = raw_input('Would you like to Search(1) or log out(2)? ')
print '\n'
if(userAction == '1'):
location = Search.searchNameList(listOfItems)
if(location != -1):
print '%s, we found %s in the inventory.\n'%(user.userName,listOfItems[location].name)
print '%s can be found in Aisle %s, Row %s, and Column %s\n'%(listOfItems[location].name,listOfItems[location].locAisle,listOfItems[location].locRow,listOfItems[location].locCol)
search = raw_input('Would you like the item location to light up? (y or n): ')
print '\n'
while(search == 'y'):
#Find the strip that needs to be lit up
if(listOfItems[location].locAisle == '1'):
if(listOfItems[location].locRow == '1'):
stripPin = 21 #This sets pin 21 to the top row on the first aisle
elif(listOfItems[location].locRow == '2'):
stripPin = 20 #This sets pin 20 to the bottom row of the first aisle
elif(listOfItems[location].locAisle == '2'):
if(listOfItems[location].locRow == '1'):
stripPin = 16 #This sets pin 16 to the top row on the second aisle
elif(listOfItems[location].locRow == '2'):
stripPin = 12 #This sets pin 12 to the bottom row of the second aisle
runLEDS(user,listOfItems,location,stripPin)
search = raw_input('Would you like to light the item location up again?(y or n): ')
else:
print'%s, unfortunately that item is not in our stock.\n'%(user.userName)
if(userAction == '2'):
loggedIn = 'no'
print 'Logged off\n\n'
if(userType.lower() != 'customer' and userType.lower() != 'employee'):
print 'Your input was invalid\n'
def printList(list):
i = 0
while i < len(list):
print 'Here is the item you just read \nName %s\nAisle %s\nColumn %s\nRow %s'%(list[i].name,list[i].locAisle,list[i].locCol,list[i].locRow)
i+=1
def runLEDS(user,listOfItems,location,stripPin):
#Runs the LEDs to light up for a period of time
strandtest.setColors(user.RGB,int(float((listOfItems[location].locCol))*10),20,stripPin)
time.sleep(5)
strandtest.setColors([0,0,0],0,60,stripPin)
#Run the LED code for dark across all of them
main()
|
def imprimir_lista(v1, v2, v3):
return print(f'{v1} {v2} {v3}')
vv0 = 0
vv1 = 1
vv2 = 2
imprimir_lista(vv0,vv1,vv2)
def lista_de_compras(pessoa,*args):
print(f'Lista de compras de {pessoa}:')
for item in args:
print(item)
print('')
lista_de_compras('João', 'miojo', 'pão')
lista_de_compras('Obede', 'Pão de queijo', 'Bife', 'Batata')
lista_de_compras('Maria','pizza')
def lista_de_compras_dic(**kwargs):
fruta = kwargs.get('fruta') #procura se tem uma chave fruta
if fruta is not None:
print(f'Na lista de compras há uma fruta: {fruta}')
print(f'{kwargs}')
lista_de_compras_dic(fruta='banana', massas='nhoque', verdura='alface')
lista_de_compras_dic(bebida='vinho', sorvete='flocos')
|
from tests.base_case import ChatBotTestCase
from chatterbot.logic import SpecificResponseAdapter
from chatterbot.conversation import Statement
class SpecificResponseAdapterTestCase(ChatBotTestCase):
"""
Test cases for the SpecificResponseAdapter
"""
def setUp(self):
super().setUp()
self.adapter = SpecificResponseAdapter(
self.chatbot,
input_text='Open sesame!',
output_text='Your sesame seed hamburger roll is now open.'
)
def test_exact_match(self):
"""
Test the case that an exact match is given.
"""
statement = Statement(text='Open sesame!')
match = self.adapter.process(statement)
self.assertEqual(match.confidence, 1)
self.assertEqual(match, self.adapter.response_statement)
def test_not_exact_match(self):
"""
Test the case that an exact match is not given.
"""
statement = Statement(text='Open says me!')
match = self.adapter.process(statement)
self.assertEqual(match.confidence, 0)
self.assertEqual(match, self.adapter.response_statement)
|
class Box(object):
""" Warper for object detection data """
def __init__(self, info: dict):
self.left = info["left"]
self.right = info["right"]
self.top = info["top"]
self.bottom = info["bottom"]
def horizontal_intersection(self, other):
return max(min(self.right, other.right) - max(self.left, other.left), 0)
def vertical_intersection(self, other):
return max(min(self.bottom, other.bottom) - max(self.top, other.top), 0)
def intersection(self, other):
h_intersect = self.horizontal_intersection(other)
v_intersect = self.vertical_intersection(other)
return h_intersect * v_intersect
def iou(self, other):
intersect = self.intersection(other)
return intersect / (self.area + other.area - intersect)
@property
def height(self):
return self.bottom - self.top
@property
def width(self):
return self.right - self.left
@property
def area(self):
return self.height * self.width
@property
def v_center(self):
return (self.top - self.bottom) / 2
@property
def h_center(self):
return (self.right - self.left) / 2
|
from pyramid.view import view_config
from .MainController import MainController
from .ErrorCodeController import ErrorCodeController
from .WebParameters import WebParameters
from pyquery import PyQuery as pq
from .Scrapper import Scrapper, urlencode
import base64
import logging
log = logging.getLogger(__name__)
class MainViews(object):
def __init__(self, request):
self.request = request
self.reqon = MainController( request )
@view_config(route_name='home', renderer='templates/mytemplate.jinja2')
def my_view(self):
return {'project': 'rianoil-public'}
@view_config(route_name='lion-search', renderer='jsonp')
def lion_search(self):
try:
self.reqon.checkComplete(self.reqon.REQ_IGNITE_SEARCH)
scrapper = Scrapper()
url = 'https://secure2.lionair.co.id/lionairibe2/OnlineBooking.aspx'
response_data = scrapper.requestData(url=url, file_log_name="lion1.html")
web_parameters = WebParameters()
web_parameters.setDepartureCityCode(self.request.params['departure_city_code'])
web_parameters.setDestinationCityCode(self.request.params['destination_city_code'])
web_parameters.setDepartureDate(self.request.params['departure_date'])
web_parameters.setReturnDate(self.request.params['return_date'])
web_parameters.setTripType(self.request.params['round_trip'])
web_parameters.setAdultNumber(self.request.params['adult_count'])
web_parameters.setChildNumber(self.request.params['child_count'])
web_parameters.setInfantNumber(self.request.params['infant_count'])
values = web_parameters.paramsForLion()
urlref = url + '?{}'.format(urlencode(values))
response_data = scrapper.requestData(referer = urlref, url = urlref, file_log_name="lion2.html")
response_data = scrapper.requestData(referer = urlref, url = url, file_log_name="lion3.html")
if response_data is None or response_data == b'':
raise Exception('Result is None')
result_html = ''
for per_content in pq(response_data)('.content')('.flight-matrix-container'):
result_html += pq(per_content).html()
result = base64.b64encode(result_html.encode()).decode()
return {'code' : 'OK', 'message' : 'result is base64 encoded', 'content' : result}
except Exception as e:
log.exception('Error ignite_search')
err_conf = ErrorCodeController( exception = e )
err_code_no = err_conf.getErrorCodeNo()
err_code_status = err_conf.getErrorStatus()
message_to_end_user = err_conf.getMessage2EndUser()
if message_to_end_user is None:
message_to_end_user = 'Gagal ignite_search. Kode {}'.format(err_code_no)
return {'status' : err_code_status, 'message' : message_to_end_user, 'content' : None}
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
class ProjectForm(FlaskForm):
projectName = StringField("projectName", validators=[DataRequired()])
teamName = StringField("teamName", validators=[DataRequired()])
submit = SubmitField("Submit")
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# dataGrabFl.py
#
# grab data from FL state websites
#
#
# ==============================================================================
# -- imports -------------------------------------------------------------------
# ==============================================================================
from __future__ import print_function
import os
from os.path import isfile, join
import pandas as pd
import csv
import datetime
import urllib
import re
import requests
from lxml import html
import numpy as np
import os
from selenium import webdriver
from time import sleep
import time
from datetime import date
from datetime import timedelta
#from gi.repository import Poppler, Gtk
# ==============================================================================
# -- codes -------------------------------------------------------------------
# ==============================================================================
# class for dataGrab
class dataGrabWV(object):
## the start entry of this class
def __init__(self, l_config, n_state):
# create a node
print("welcome to dataGrab")
self.state_name = n_state
self.state_dir = './'+n_state.lower()+'/'
self.l_state_config = l_config
self.name_file = ''
self.now_date = ''
## save to csv
def save2File(self, l_data, csv_name):
csv_data_f = open(csv_name, 'w')
# create the csv writer
csvwriter = csv.writer(csv_data_f)
# make sure the 1st row is colum names
if('County' in str(l_data[0][0])): pass
else: csvwriter.writerow(['County', 'Cases', 'Deaths'])
for a_row in l_data:
csvwriter.writerow(a_row)
csv_data_f.close()
## parse from exel format to list
## open a website
def open4Website(self, fRaw):
csv_url = self.l_state_config[5][1]
print(' search website', csv_url)
# save html file
#urllib.urlretrieve(csv_url, fRaw)
# save html file
c_page = requests.get(csv_url)
c_tree = html.fromstring(c_page.content)
l_dates = c_tree.xpath('//a') # ('//div[@class="col-xs-12 button--wrap"]')
#print(' dddd', l_dates)
today = date.today()
print("Today is: ", today)
datall = ''
# Yesterday date
yesterday = today - timedelta(days = 1)
print("Yesterday was: ", yesterday)
dt_obj = str(yesterday)
print("++++++++++++++ ", dt_obj)
print("++++++++++++++ ", type(dt_obj))
dt_obj = datetime.datetime.strptime(dt_obj, '%Y-%m-%d')
datall = dt_obj.strftime('%m-%d-%Y')
print('ddddddddddddddd', datall)
a_address = ''
for l_date in l_dates:
#print(l_date.text_content())
if('COVID-19 Daily Update ' + datall[:2]) in l_date.text_content():
print(' sss', l_date)
a_address =l_date.get('href')
break
#print('11111111111111', a_address)
return a_address
## paser data FL
def dataDownload(self, name_target):
print(' A.dataDownload', name_target)
f_namea = self.state_dir + 'data_raw/'+self.state_name.lower()+'_covid19_'+name_target+'.aspx'
if(not os.path.isdir(self.state_dir + 'data_raw/') ): os.mkdir(self.state_dir + 'data_raw/')
# step A: downlowd and save
if( True):
a_address = self.open4Website(f_namea)
if(a_address == ''):
print (' No address of downloading PDF is found')
return ('')
return a_address
## paser data FL
def dataReadConfirmed(self, f_name):
# save html file
siteOpen = webdriver.Chrome()
siteOpen.get(f_name)
time.sleep(7)
c_page = requests.get(f_name)
c_tree = html.fromstring(c_page.content)
caseNumbers = siteOpen.find_elements_by_xpath('//font[@size="3"]')
case_num_list = []
for case_num in caseNumbers: # this is cases------------------------------------bc-bar-inner dw-rect
dStringList = case_num.text.split()
#print(' ------------case_num', dStringList )
if 'Barbour' in dStringList:
print(' ------------case_num', dStringList )
case_num_list=(dStringList)
l_cases2 = np.reshape(case_num_list[1:], (len(case_num_list[1:])/2, 2)).T
print('ccccccccccccccc', l_cases2)
cases= []
for c_c in l_cases2[1]:
c_d = c_c.replace('(', '').replace(')', '').replace(',', '').replace('.', '')
cases.append(c_d)
#print('333333333333', cases)
zeros = [0] * len(l_cases2[0])
l_data = np.vstack((l_cases2[0], cases, zeros)).T
print('ffffffffff', l_data)
case = 0
death = 0
for a_da in l_data:
case += int(a_da[1])
death += int(a_da[2])
l_cases3 = np.append(l_data, [['Total', case, death]], axis=0)
l_datas= []
self.save2File(l_cases3, self.state_dir + 'data/'+self.state_name.lower()+'_covid19_'+self.name_file+'.csv')
print('dddddddddddddddddd', l_cases3)
return (l_cases3)
## paser data FL
def parseData(self, name_target, date_target, type_download):
self.name_file = name_target
self.now_date = date_target
#Step A download and save as raw html files
f_targeta = self.dataDownload(name_target)
if(f_targeta == ''): return ([], name_target, '')
#Step B read confirmed cases
l_d_sort = self.dataReadConfirmed(f_targeta)
#Step C read death cases
return(l_d_sort, self.name_file, self.now_date)
## end of file
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from scipy.special import softmax
from tqdm import tqdm
import pickle
import sys
labels = ["capital-common-countries",
"capital-world",
"currency",
"city-in-state",
"family",
"gram1-adjective-to-adverb",
"gram2-opposite",
"gram3-comparative",
"gram4-superlative",
"gram5-present-participle",
"gram6-nationality-adjective",
"gram7-past-tense",
"gram8-plural",
"gram9-plural-verbs"]
our_scores = [0.0011277068060088402, 0.0011216120248049599, 0.0011436999813109361, 0.0011132382449654716, 0.0011526468554336814, 0.0010981989725253024, 0.0010550457019624965, 0.0011676809744903811, 0.0011533618632391918, 0.001125787833161391, 0.0011275413023786103, 0.0011075178897068, 0.001157652988995624, 0.001150372426892663]
glove_scores = [0.7368733300118644, 0.6115903193793514, 0.09013083528494338, 0.11751395841244312, 0.39422096995222206, 0.08111536404535909, 0.08267367024341611, 0.32381551899065314, 0.2814888079361966, 0.14964578002432458, 0.877407380352127, 0.12696211268288082, 0.1551990816200274, 0.27880771204267313]
random_scores = [3.358321351958763e-06, 4.673111574007968e-06, 3.268578763674968e-06, 1.541741611840181e-06, 1.9087878094016046e-06, 3.081952496547868e-06, 1.4544415895213011e-06, 3.829251558967892e-06, 1.9843045661471394e-06, 2.698178046367704e-06, 2.176430960583065e-06, 4.612700033715085e-06, 2.2493188336098928e-06, 7.109939203962873e-06]
# Set up bar plot
# Matplot code from https://matplotlib.org/stable/gallery/lines_bars_and_markers/barchart.html
x = np.arange(len(labels)) # the label locations
width = 0.15 # the width of the bars
fig, ax = plt.subplots()
exp = lambda x: np.e**(x)
log = lambda x: np.log(x)
ax.set_yscale('log')#'function', functions=(exp, log))
rects1 = ax.bar(x + width, our_scores, width, label='Ours', align='edge')
rects2 = ax.bar(x + 2*width, glove_scores, width, label='Glove', align='edge')
rects3 = ax.bar(x + 3*width, random_scores, width, label='Random', align='edge')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Scores')
ax.set_title('Scores by Embeddings in Each Section')
ax.set_xticks(x)
ax.set_xticklabels(labels, rotation=45, fontsize=8)
ax.legend()
ax.bar_label(rects1, padding=3)
ax.bar_label(rects2, padding=3)
ax.bar_label(rects3, padding=3)
plt.gca().set_ylim(bottom=0, top=1)
# fig.tight_layout()
plt.show() |
# -*- coding: utf-8 -*-
from flask import Flask
from flask.ext.socketio import SocketIO
from flask_bootstrap import Bootstrap
app = Flask(__name__)
Bootstrap(app)
app.debug = True
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
import views |
#python assignment | day 2
#que 1[list and its default methods ]
#Append list2 into list1:
list1 = ["a", "b" , "c"]
list2 = [1, 2, 3]
for x in list2:
list1.append(x)
print(list1)
#output :['a', 'b', 'c', 1, 2, 3]
#Make a copy of a list with the copy() method:
thislist = ["apple", "banana", "cherry"]
mylist = thislist.copy()
print(mylist)
#output:['apple', 'banana', 'cherry']
#The clear() method empties the list:
thislist = ["apple", "banana", "cherry"]
thislist.clear()
print(thislist)
#output:[]
#Insert an item as the second position:
thislist = ["apple", "banana", "cherry"]
thislist.insert(1, "orange")
print(thislist)
#output:['apple', 'orange', 'banana', 'cherry']
#The remove() method removes the specified item:
thislist = ["apple", "banana", "cherry"]
thislist.remove("banana")
print(thislist)
#output:['apple', 'cherry']
# que 2 [dictionary and its default functions]
#Create and print a dictionary:
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964
}
print(thisdict)
#Accessing items
#Change the "year" to 2018:
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964
}
thisdict["year"] = 2018
#Removing Items
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964
}
thisdict.pop("model")
print(thisdict)
#The popitem() method
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964
}
thisdict.popitem()
print(thisdict)
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964
}
del thisdict["model"]
print(thisdict)
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964
}
del thisdict
#The clear() method empties the dictionary:
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964
}
thisdict.clear()
print(thisdict)
#Copy a Dictionary
#Make a copy of a dictionary with the copy() method:
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964
}
mydict = thisdict.copy()
print(mydict)
#que 3[sets and its default functions]
#Create a Set:
thisset = {"apple", "banana", "cherry"}
print(thisset)
#Access Items
thisset = {"apple", "banana", "cherry"}
for x in thisset:
print(x)
#Check if "banana" is present in the set:
thisset = {"apple", "banana", "cherry"}
print("banana" in thisset)
#Add Items
thisset = {"apple", "banana", "cherry"}
thisset.add("orange")
print(thisset)
#Add multiple items to a set, using the update() method:
thisset = {"apple", "banana", "cherry"}
thisset.update(["orange", "mango", "grapes"])
print(thisset)
#Get the number of items in a set:
thisset = {"apple", "banana", "cherry"}
print(len(thisset))
#Remove Item
thisset = {"apple", "banana", "cherry"}
thisset.remove("banana")
print(thisset)
#Remove "banana" by using the discard() method:
thisset = {"apple", "banana", "cherry"}
thisset.discard("banana")
print(thisset)
#Remove the last item by using the pop() method:
thisset = {"apple", "banana", "cherry"}
x = thisset.pop()
print(x)
print(thisset)
#The clear() method empties the set:
thisset = {"apple", "banana", "cherry"}
thisset.clear()
print(thisset)
#The del keyword will delete the set completely:
thisset = {"apple", "banana", "cherry"}
del thisset
#Join Two Sets
#The union() method returns a new set with all items from both sets:
set1 = {"a", "b" , "c"}
set2 = {1, 2, 3}
set3 = set1.union(set2)
print(set3)
#The update() method inserts the items in set2 into set1:
set1 = {"a", "b" , "c"}
set2 = {1, 2, 3}
set1.update(set2)
print(set1)
#que 4[tuple and explore default method]
#Create a Tuple:
thistuple = ("apple", "banana", "cherry")
print(thistuple)
#Print the second item in the tuple:
thistuple = ("apple", "banana", "cherry")
print(thistuple[1])
#Print the last item of the tuple:
thistuple = ("apple", "banana", "cherry")
print(thistuple[-1])
#The del keyword can delete the tuple completely:
thistuple = ("apple", "banana", "cherry")
del thistuple
#this will raise an error because the tuple no longer exists
#Join two tuples:
tuple1 = ("a", "b" , "c")
tuple2 = (1, 2, 3)
tuple3 = tuple1 + tuple2
print(tuple3)
#que 5[string and explore default methods ]
#Get the characters from position 2 to position 5 (not included):
b = "Hello, World!"
print(b[2:5])
#The len() function returns the length of a string:
a = "Hello, World!"
print(len(a))
#The strip() method removes any whitespace from the beginning or the end:
a = " Hello, World! "
print(a.strip()) # returns "Hello, World!"
#The lower() method returns the string in lower case:
a = "Hello, World!"
print(a.lower())
#The upper() method returns the string in upper case:
a = "Hello, World!"
print(a.upper())
#The replace() method replaces a string with another string:
a = "Hello, World!"
print(a.replace("H", "J"))
#The split() method splits the string into substrings if it finds instances of the separator:
a = "Hello, World!"
print(a.split(",")) # returns ['Hello', ' World!']
#output of entire program
['a', 'b', 'c', 1, 2, 3]
['apple', 'banana', 'cherry']
[]
['apple', 'orange', 'banana', 'cherry']
['apple', 'cherry']
{'brand': 'Ford', 'model': 'Mustang', 'year': 1964}
{'brand': 'Ford', 'year': 1964}
{'brand': 'Ford', 'model': 'Mustang'}
{'brand': 'Ford', 'year': 1964}
{}
{'brand': 'Ford', 'model': 'Mustang', 'year': 1964}
{'cherry', 'banana', 'apple'}
cherry
banana
apple
True
{'orange', 'cherry', 'banana', 'apple'}
{'banana', 'grapes', 'orange', 'mango', 'apple', 'cherry'}
3
{'cherry', 'apple'}
{'cherry', 'apple'}
cherry
{'banana', 'apple'}
set()
{'c', 1, 2, 3, 'a', 'b'}
{'c', 1, 2, 3, 'a', 'b'}
('apple', 'banana', 'cherry')
banana
cherry
('a', 'b', 'c', 1, 2, 3)
llo
13
Hello, World!
hello, world!
HELLO, WORLD!
Jello, World!
['Hello', ' World!']
|
import npyscreen
import textwrap
from ui.box_title_color import BoxTitleColor
class SenderBox(BoxTitleColor):
_contained_widget = npyscreen.MultiLineEdit
def clear_text(self):
self.entry_widget.value = ""
self.entry_widget.display()
def add_text(self, text):
buffer = self.entry_widget.values
buffer_string = "".join(buffer)
buffer_string += text
wrapper = textwrap.TextWrapper(
width=self.entry_widget.width - 1,
replace_whitespace=False,
drop_whitespace=False,
break_long_words=False
)
values = wrapper.wrap(text=buffer_string)
self.entry_widget.values = values
self.entry_widget.buffer(
[], scroll_end=True, scroll_if_editing=False)
self.entry_widget.display()
def get_text(self):
return self.entry_widget.value
|
class Car():
def __init__(self,make,model,year): #
self.make = make
self.model = model
self.year = year
def get_descriptive_name(self):
long_name = str(self.year) + " " + self.make + " " + self.model
return long_name.title()
def update_odoment(self,mileage):
"""
将里程表读数设置为指定的值
禁止讲里程表度数往回调
:param mileage:
:return:
"""
class ElectricCar(Car):
|
import os
import logging
import time
from datetime import date, datetime
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.api.taskqueue import Task
from twilio import twiml
from twilio import TwilioRestException
from twilio.rest import TwilioRestClient
import configuration
import timezone
#
# Valid request formats:
# xx <message> - minutes until reminder
# xxd <message> - days until reminder
# x:xx[ap] <message> - time of current day (am or pm)
#
shortcuts = { 'a':5, 'd':10, 'g':15, 'j':30, 'm':60 }
class RequestLog(db.Model):
phone = db.StringProperty(indexed=True)
date = db.DateTimeProperty(auto_now_add=True)
request = db.StringProperty()
## end
class ReminderTaskHandler(webapp.RequestHandler):
def post(self):
phone = self.request.get('phone')
msg = self.request.get('msg')
try:
client = TwilioRestClient(configuration.TWILIO_ACCOUNT_SID,
configuration.TWILIO_AUTH_TOKEN)
logging.debug('sending SMS - %s - to %s' % (msg,phone))
message = client.sms.messages.create(to=phone,
from_=configuration.TWILIO_CALLER_ID,
body=msg)
except TwilioRestException,te:
logging.error('Unable to send SMS message! %s' % te)
## end
class MainHandler(webapp.RequestHandler):
def post(self):
# who called? and what did they say?
phone = self.request.get("From")
body = self.request.get("Body")
logging.debug('New request from %s : %s' % (phone, body))
createLog(phone,body)
cmd = body.split()
# assume everything to the left of the first space is the command, and
# everything to the right of the first space is the reminder message
command = cmd[0]
logging.debug('new request command is %s' % command)
msg = ''
for m in cmd:
if m == command:
continue
msg += m + ' '
# parse the command
# a shortcut code request
if command.isdigit() == False and len(command) == 1:
# single letters are default minute values
# a = 5 d = 10 g = 15 j = 30 m = 60
command = command.lower()
if command not in shortcuts:
response = 'illegal shortcut code - a, d, g, j, m are the only valid shortcuts'
else:
mins = shortcuts[command]
createTask(phone, msg, mins * 60)
response = "got it. we'll remind you in %s minutes" % mins
# a minute request
elif command.isdigit():
# create a task in <command> minutes
createTask(phone, msg, int(command)*60)
response = "got it. we'll remind you in %s minutes" % command
# an hour request
elif command.lower().find('h') > 0:
# create a task in a certain number of days
hours = command.lower().split('h')[0]
sec = int(hours) * 60 * 60
createTask(phone, msg, sec)
response = "got it. we'll remind you in %s day" % hours
# a day request
elif command.lower().find('d') > 0:
# create a task in a certain number of days
days = command.lower().split('d')[0]
sec = int(days) * 24 * 60 * 60
createTask(phone, msg, sec)
response = "got it. we'll remind you in %s day" % days
# a specific time request
elif command.find(':') > 0:
# create a task at a specified time
local = timezone.LocalTimezone()
tod = datetime.strptime(command, "%H:%M")
eta = datetime.combine(date.today(), tod.time()).replace(tzinfo=local)
now = datetime.now(local)
delta = eta - now
createTask(phone, msg, delta.seconds)
response = "got it. we'll remind you at %s" % eta
logging.debug('ETA : %s' % eta)
logging.debug('... now : %s' % now)
logging.debug('... delta : %s' % delta.seconds)
else:
response = '<minutes>, <hours>h, <days>d or hh:mm <reminder-message>'
# ignore positive feedback on new requests
if response.lower().find('got it') == -1:
self.response.out.write(smsResponse(response))
else:
logging.debug('NOT sending response to caller : %s' % response)
return
## end MainHandler
class IndexHandler(webapp.RequestHandler):
def get(self):
self.response.out.write("You've wondered to a strange place my friend. <a href=http://twitter.com/gregtracy>@gregtracy</a>")
## end IndexHandler
def smsResponse(msg):
r = twiml.Response()
r.append(twiml.Sms(msg))
return r
def createTask(phone,msg,sec):
logging.debug("Creating new task to fire in %s minutes" % str(int(sec)/60))
task = Task(url='/reminder',
params={'phone':phone,'msg':msg},
countdown=sec)
task.add('reminders')
# end
def createLog(phone,request):
log = RequestLog()
log.phone = phone
log.request = request
log.put()
def main():
logging.getLogger().setLevel(logging.DEBUG)
application = webapp.WSGIApplication([('/sms', MainHandler),
('/test', MainHandler),
('/reminder', ReminderTaskHandler),
('/.*', IndexHandler)
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
import numpy as np
from PIL import Image
from load_mnist import *
from functions_final import *
from config import *
import random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
import time
def visualize (w1, hyper_para, epoch, show_flag ) :
date = time.strftime("%Y-%m-%d_%H_%M")
if w1.shape[0] == 784:
if w1.shape[1] == 50:
nrow = 10
ncol = 5
if w1.shape[1] == 100:
nrow = 10
ncol = 10
if w1.shape[1] == 200:
nrow = 20
ncol = 10
if w1.shape[1] == 500:
nrow = 20
ncol = 10
fig = plt.figure(figsize=(ncol + 1, nrow + 1))
gs = gridspec.GridSpec(nrow, ncol,
wspace=0.0, hspace=0.0,
top=1. - 0.5 / (nrow + 1), bottom=0.5 / (nrow + 1),
left=0.5 / (ncol + 1), right=1 - 0.5 / (ncol + 1))
cnt = 0
for i in range(nrow):
for j in range(ncol):
im = np.reshape(w1[:, cnt], (28, 28))
ax = plt.subplot(gs[i, j])
ax.imshow(im)
ax.set_xticklabels([])
ax.set_yticklabels([])
cnt = cnt + 1
#plt.title('Weights at layer 1')
kk = '_k_' + str(hyper_para['k'])
#hh = 'hl_size_' + str(hyper_para['hidden_layer_1_size'])
plt.suptitle( kk + '\tBatch_size=' + str(hyper_para['batch_size']) + '\tlearning_rate=' + str(
hyper_para['learning_rate']) + '\tk=' + str(hyper_para['k']) + 'epoch_' + str(epoch))
# if show_flag == 1:
# plt.show()
# plt.savefig('./figures/5b/Q5_b_weights' + hh+'_'+ date + '.png')
plt.savefig('./figures/5b/Q5_b_weights' + kk+'_'+ date + '.png')
plt.close()
def plot_ce_train_valid(train_ce, valid_ce, hyper_para):
date = time.strftime("%Y-%m-%d_%H_%M")
plt.plot(train_ce)
plt.plot(valid_ce)
plt.title('Error vs Epochs')
#hh = 'hl_size_' + str(hyper_para['hidden_layer_1_size'])
kk = '_k_' + str(hyper_para['k'])
a = '\tBatch_size=' + str(hyper_para['batch_size'])
b = '\tlearning_rate=' + str(hyper_para['learning_rate'])
c = '\tk=' + str(hyper_para['k'])
plt.suptitle(kk + a + b + c)
plt.xlabel('Epochs')
plt.ylabel('Cross Entropy Error')
plt.legend(['Train Cross Entropy', 'Valid Cross Entropy'], loc='upper right')
plt.savefig('./figures/5b/Q5_b_rbm_' + kk + '_'+ date + '.png')
#plt.show()
plt.close()
# Load Training Data
xtrain, ytrain, xvalidate, yvalidate, xtest, ytest = load_mnist()
no_of_train_samples = len(ytrain)
# Random seed for the run
random_seed = hyper_para['random_seed']
mu = hyper_para['w_init_mu']
sigma = hyper_para['w_init_sig']
np.random.seed(random_seed)
no_of_train_samples = len(ytrain)
h = np.zeros((1, hyper_para['hidden_layer_1_size']))
h.astype(float)
# Variables Storing Results
J_train = 0.0
J_valid = 0.0
# learning iterations
indices = range(no_of_train_samples)
random.shuffle(indices)
batch_size = hyper_para['batch_size']
epochs = hyper_para['epochs']
max_iter = no_of_train_samples / batch_size #Iterations within epoch for training
#hlayer_size_grid = np.array([50,100,200,500])
hlayer_size_grid = np.array([100])
k_grid = np.array([1,5,10,20])
for hh in range(0,hlayer_size_grid.shape[0]):
for k in range(0, k_grid.shape[0]):
hyper_para['hidden_layer_1_size'] = hlayer_size_grid[hh]
hyper_para['k'] = k_grid[k]
print 'Starting K = ', str(hyper_para['k']) + ' HL size ' + str(hyper_para['hidden_layer_1_size'])
train_ce = [] # Cross Entropy = CE
valid_ce = []
hidden_layer_1_size = hlayer_size_grid[hh] # 100 hidden units
w = np.random.normal(mu, sigma, (input_layer_size * hidden_layer_1_size))
w = w.reshape(input_layer_size, hidden_layer_1_size)
b = np.random.normal(mu, sigma, (1, hidden_layer_1_size))
c = np.random.normal(mu, sigma, (1, input_layer_size))
param = {'w': w, 'b': b, 'c': c}
for epoch in range(epochs):
for step in range(max_iter):
# get mini-batch and setup the cnn with the mini-batch
start_idx = step * batch_size % no_of_train_samples
end_idx = (step + 1) * batch_size % no_of_train_samples
if start_idx > end_idx:
random.shuffle(indices)
continue
idx = indices[start_idx: end_idx]
x_p, h_p = gibbs_step(param, xtrain[idx, :], hyper_para)
param = update_param(param, x_p, xtrain[idx, :], hyper_para)
J_train = loss_calc(param, xtrain, ytrain, hyper_para)
J_valid = loss_calc(param, xvalidate, yvalidate, hyper_para)
print 'epoch', epoch, '\tTrain CE', J_train, '\t\tValid CE', J_valid
param_string = 'param_rbm_k_' + str(hyper_para['k'])
if (epoch > 49) & (epoch % 50 == 0):
save_obj(param, param_string, str(epoch))
visualize(param['w'], hyper_para, epoch, 0)
train_ce.append(J_train)
valid_ce.append(J_valid)
plot_ce_train_valid(train_ce, valid_ce, hyper_para)
visualize(param['w'], hyper_para, epoch, 1)
|
import subprocess
import time
from datetime import datetime
pingCount = 10
count = 0
ipList = ['www.google.com']
def WritePingStats():
t = datetime.utcnow()
for ip in ipList:
p = subprocess.Popen(["ping", "-c " + str(pingCount),ip], stdout = subprocess.PIPE)
out = str(p.communicate()[0])
timeUnit = out.split()[-1][:2]
averageLatency = str(out.split()[-2]).split('/')[1]
maxLatency = str(out.split()[-2]).split('/')[2]
stdDev = str(out.split()[-2]).split('/')[3]
print(str(maxLatency) + str(timeUnit))
print(str(averageLatency) + str(timeUnit))
print(stdDev)
with open(str(ip) + ".csv", "a") as fd:
fd.write(str(t) + "," + averageLatency + "," + maxLatency + "," + stdDev + "\n")
while(True):
if count == 2:
break
WritePingStats()
count += 1
time.sleep(10)
|
# You might know some pretty large perfect squares. But what about the NEXT one?
# Complete the findNextSquare method that finds the next integral perfect square after the one passed as a parameter.
# Recall that an integral perfect square is an integer n such that sqrt(n) is also an integer.
# If the parameter is itself not a perfect square then -1 should be returned. You may assume the parameter is positive.
from math import sqrt
def find_next_square(sq):
# Return the next square if sq is a square, -1 otherwise
if((sqrt(sq)) == int(sqrt(sq))):
return (int(sqrt(sq))+1)**2
return -1 |
import subprocess
filename = "tmp_run_batch.sh"
for cores_n in range(1, 9):
for N in [3, 6, 8]:
with open(filename, "w") as f:
f.write(
"""#!/bin/bash
#SBATCH --ntasks={tasks}
#SBATCH --cpus-per-task=4
#SBATCH --partition=RT
#SBATCH --job-name=task-1-mumladze
#SBATCH --comment="Task 1. Mumladze Maximelian."
#SBATCH --output=out-{n}-{cores}.txt
#SBATCH --error=error.txt
mpiexec -np {cores} ./program {parts}""".format(tasks=(8 if cores_n < 4 else 16), cores=cores_n, parts=10 ** N, n=N))
program = subprocess.run(['sbatch', filename])
|
'''
Created on 2020-03-24 14:33:48
Last modified on 2020-10-27 18:30:27
@author: L. F. Pereira (lfpereira@fe.up.pt)
Main goal
---------
Create an RVE class from which more useful classes can inherit.
Notes
-----
-Based on code developed by M. A. Bessa.
References
----------
1. van der Sluis, O., et al. (2000). Mechanics of Materials 32(8): 449-462.
2. Melro, A. R. (2011). PhD thesis. University of Porto.
'''
# imports
# abaqus
from abaqusConstants import (TWO_D_PLANAR, DEFORMABLE_BODY, ON, FIXED,
THREE_D, DELETE, GEOMETRY, TET, FREE,
YZPLANE, XZPLANE, XYPLANE)
from part import (EdgeArray, FaceArray)
from mesh import MeshNodeArray
import regionToolset
# third-party
import numpy as np
# local library
from .utils import transform_point
from .utils import get_orientations_360
from ...utils.linalg import symmetricize_vector
from ...utils.linalg import sqrtm
# 2d RVE
class RVE2D:
def __init__(self, length, width, center, name='RVE'):
# TODO: generalize length and width to dims
# TODO: is center really required?
# TODO: inherit from a simply Python RVE?
'''
Notes
-----
-1st reference point represents the difference between right bottom
and left bottom vertices.
-2nd reference point represents the difference between left top
and left bottom vertices.
'''
self.length = length
self.width = width
self.center = center
self.name = name
# variable initialization
self.sketch = None
self.part = None
# mesh definitions
self.mesh_size = .02
self.mesh_tol = 1e-5
self.mesh_trial_iter = 1
self.mesh_refine_factor = 1.25
self.mesh_deviation_factor = .4
self.mesh_min_size_factor = .4
# additional variables
self.edge_positions = ('RIGHT', 'TOP', 'LEFT', 'BOTTOM')
self.vertex_positions = ('RT', 'LT', 'LB', 'RB')
self.ref_points_positions = ('LR', 'TB')
def change_mesh_definitions(self, **kwargs):
'''
See mesh definition at __init__ to find out the variables that can be
changed.
'''
for key, value in kwargs.items():
setattr(self, key, value)
def create_part(self, model):
# create RVE
self._create_RVE_geometry(model)
# create particular geometry
# TODO: call it add inner sketches
self._create_inner_geometry(model)
# create part
self.part = model.Part(name=self.name, dimensionality=TWO_D_PLANAR,
type=DEFORMABLE_BODY)
self.part.BaseShell(sketch=self.sketch)
# create PBCs sets (here because sets are required for meshing purposes)
self._create_bounds_sets()
def create_instance(self, model):
# create instance
model.rootAssembly.Instance(name=self.name,
part=self.part, dependent=ON)
def generate_mesh(self):
# seed part
self.part.seedPart(size=self.mesh_size,
deviationFactor=self.mesh_deviation_factor,
minSizeFactor=self.mesh_min_size_factor)
# seed edges
edges = [self.part.sets[self._get_edge_name(position)].edges[0] for position in self.edge_positions]
self.part.seedEdgeBySize(edges=edges, size=self.mesh_size,
deviationFactor=self.mesh_deviation_factor,
constraint=FIXED)
# generate mesh
self.part.generateMesh()
def generate_mesh_for_pbcs(self, fast=False):
it = 1
success = False
while it <= self.mesh_trial_iter and not success:
# generate mesh
self.generate_mesh()
# verify generated mesh
if fast:
success = self.verify_mesh_for_pbcs_quick()
else:
success = self.verify_mesh_for_pbcs()
# prepare next iteration
it += 1
if not success:
self.mesh_size /= self.mesh_refine_factor
return success
def verify_mesh_for_pbcs(self):
'''
Verify correctness of generated mesh based on allowed tolerance. It
immediately stops when a node pair does not respect the tolerance.
'''
# initialization
success = True
# get nodes
right_nodes, top_nodes, left_nodes, bottom_nodes = self._get_all_sorted_edge_nodes()
# verify if tolerance is respected (top and bottom)
for top_node, bottom_node in zip(top_nodes, bottom_nodes):
if abs(top_node.coordinates[0] - bottom_node.coordinates[0]) > self.mesh_tol:
# TODO: return insteads
success = False
break
# verify if tolerance is respected (right and left)
if success:
for right_node, left_node in zip(right_nodes, left_nodes):
if abs(right_node.coordinates[1] - left_node.coordinates[1]) > self.mesh_tol:
success = False
break
return success
def verify_mesh_for_pbcs_quick(self):
# get nodes
right_nodes, top_nodes, left_nodes, bottom_nodes = self._get_all_sorted_edge_nodes()
return len(top_nodes) == len(bottom_nodes) and len(right_nodes) == len(left_nodes)
def apply_pbcs_constraints(self, model):
# create reference points
self._create_pbcs_ref_points(model)
# get nodes
right_nodes, top_nodes, left_nodes, bottom_nodes = self._get_all_sorted_edge_nodes()
# get vertices
rt_vertex, lt_vertex, lb_vertex, rb_vertex = self._get_all_vertices(only_names=True)
# get reference points
lr_ref_point, tb_ref_point = self._get_all_ref_points(only_names=True)
# apply vertex constraints
for ndof in range(1, 3):
# right-top and left-bottom nodes
model.Equation(name='Constraint-RT-LB-V-%i' % ndof,
terms=((1.0, rt_vertex, ndof),
(-1.0, lb_vertex, ndof),
(-1.0, lr_ref_point, ndof),
(-1.0, tb_ref_point, ndof)))
# left-top and right-bottom nodes
model.Equation(name='Constraint-LT-RB-V-%i' % ndof,
terms=((1.0, rb_vertex, ndof),
(-1.0, lt_vertex, ndof),
(-1.0, lr_ref_point, ndof),
(1.0, tb_ref_point, ndof)))
# left-right edges constraints
for i, (left_node, right_node) in enumerate(zip(left_nodes[1:-1], right_nodes[1:-1])):
# create set with individual nodes
left_node_set_name = 'NODE-L-%i' % i
left_set_name = '%s.%s' % (self.name, left_node_set_name)
self.part.Set(name=left_node_set_name, nodes=MeshNodeArray((left_node,)))
right_node_set_name = 'NODE-R-%i' % i
self.part.Set(name=right_node_set_name, nodes=MeshNodeArray((right_node,)))
right_set_name = '%s.%s' % (self.name, right_node_set_name)
# create constraint
for ndof in range(1, 3):
model.Equation(name='Constraint-L-R-%i-%i' % (i, ndof),
terms=((1.0, right_set_name, ndof),
(-1.0, left_set_name, ndof),
(-1.0, lr_ref_point, ndof)))
# top-bottom edges constraints
for i, (top_node, bottom_node) in enumerate(zip(top_nodes[1:-1], bottom_nodes[1:-1])):
# create set with individual nodes
top_node_set_name = 'NODE-T-%i' % i
top_set_name = '%s.%s' % (self.name, top_node_set_name)
self.part.Set(name=top_node_set_name, nodes=MeshNodeArray((top_node,)))
bottom_node_set_name = 'NODE-B-%i' % i
self.part.Set(name=bottom_node_set_name, nodes=MeshNodeArray((bottom_node,)))
bottom_set_name = '%s.%s' % (self.name, bottom_node_set_name)
# create constraint
for ndof in range(1, 3):
model.Equation(name='Constraint-T-B-%i-%i' % (i, ndof),
terms=((1.0, top_set_name, ndof),
(-1.0, bottom_set_name, ndof),
(-1.0, tb_ref_point, ndof)))
# ???: need to create fixed nodes? why to not apply bcs e.g. LB and RB?
def apply_bcs_displacement(self, model, epsilon_11, epsilon_22, epsilon_12,
green_lagrange_strain=True):
# initialization
epsilon = symmetricize_vector([epsilon_11, epsilon_12, epsilon_22])
# TODO: receive only small deformations
# create strain matrix
if green_lagrange_strain:
epsilon = self._compute_small_strain(epsilon)
# apply displacement
# TODO: continue here
# TODO: fix left bottom node
@staticmethod
def _compute_small_strain(epsilon_lagrange):
identity = np.identity(2)
def_grad = sqrtm(2 * epsilon_lagrange + identity)
return 1 / 2 * (def_grad + np.transpose(def_grad)) - identity
def _create_RVE_geometry(self, model):
# rectangle points
pt1 = transform_point((-self.length / 2., -self.width / 2.),
origin_translation=self.center)
pt2 = transform_point((self.length / 2., self.width / 2.),
origin_translation=self.center)
# sketch
self.sketch = model.ConstrainedSketch(name=self.name + '_PROFILE',
sheetSize=2 * self.length)
self.sketch.rectangle(point1=pt1, point2=pt2)
def _create_bounds_sets(self):
# TODO: update edge finding to be more robust (with bounding box)
# create sets
r = np.sqrt((self.length / 2.) ** 2 + (self.width / 2.)**2)
for i, (edge_position, vertex_position, theta) in enumerate(
zip(self.edge_positions, self.vertex_positions, get_orientations_360(0))):
# find edge
pt = transform_point((self.length / 2 * np.cos(theta), self.width / 2 * np.sin(theta), 0.),
origin_translation=self.center)
edge = self.part.edges.findAt((pt,))
# create edge set
edge_set_name = self._get_edge_name(edge_position)
self.part.Set(name=edge_set_name, edges=edge)
# find vertex
ratio = self.length / self.width if i % 2 else self.width / self.length
alpha = np.arctan(ratio)
pt = transform_point((r * np.cos(alpha), r * np.sin(alpha), 0.),
orientation=theta,
origin_translation=self.center)
vertex = self.part.vertices.findAt((pt,))
# create vertex set
vertex_set_name = self._get_vertex_name(vertex_position)
self.part.Set(name=vertex_set_name, vertices=vertex)
def _create_pbcs_ref_points(self, model):
'''
Notes
-----
-any coordinate for reference points position works.
'''
# initialization
modelAssembly = model.rootAssembly
# create reference points
coord = list(self.center) + [0.]
for position in self.ref_points_positions:
ref_point = modelAssembly.ReferencePoint(point=coord)
modelAssembly.Set(name=self._get_ref_point_name(position),
referencePoints=((modelAssembly.referencePoints[ref_point.id],)))
def _get_all_sorted_edge_nodes(self):
'''
Notes
-----
-output is given in the order of position definition.
'''
nodes = []
for i, position in enumerate(self.edge_positions):
nodes.append(self._get_edge_nodes(position, sort_direction=(i + 1) % 2))
return nodes
def _get_all_vertices(self, only_names=False):
'''
Notes
-----
-output is given in the order of position definition.
'''
if only_names:
vertices = ['%s.%s' % (self.name, self._get_vertex_name(position)) for position in self.vertex_positions]
else:
vertices = [self.part.sets[self._get_vertex_name(position)] for position in self.vertex_positions]
return vertices
def _get_all_ref_points(self, model=None, only_names=False):
'''
Notes
-----
-output is given in the order of position definition.
-model is required if only_names is False.
'''
if only_names:
ref_points = [self._get_ref_point_name(position) for position in self.ref_points_positions]
else:
ref_points = [model.rootAssembly.sets[self._get_ref_point_name(position)] for position in self.ref_points_positions]
return ref_points
def _get_edge_nodes(self, position, sort_direction=None):
edge_name = self._get_edge_name(position)
nodes = self.part.sets[edge_name].nodes
if sort_direction is not None:
nodes = sorted(nodes, key=lambda node: self._get_node_coordinate(node, i=sort_direction))
return nodes
@staticmethod
def _get_edge_name(position):
return '%s_EDGE' % position
@staticmethod
def _get_vertex_name(position):
return 'VERTEX_%s' % position
@staticmethod
def _get_ref_point_name(position):
return '%s_REF_POINT' % position
@staticmethod
def _get_node_coordinate(node, i):
return node.coordinates[i]
class RVE3D(object):
def __init__(self, dims, name='RVE'):
self.dims = dims
self.name = name
# mesh definitions
self.mesh_size = .02
self.mesh_tol = 1e-5
self.mesh_trial_iter = 1
self.mesh_refine_factor = 1.25
self.mesh_deviation_factor = .4
self.mesh_min_size_factor = .4
# variable initialization
self.particles = []
self.part = None
# additional variables
self.face_positions = ('X-', 'X+', 'Y-', 'Y+', 'Z-', 'Z+')
def change_mesh_definitions(self, **kwargs):
'''
See mesh definition at __init__ to find out the variables that can be
changed.
'''
for key, value in kwargs.items():
setattr(self, key, value)
def add_particle(self, particle):
self.particles.append(particle)
def create_part(self, model):
# create RVE
sketch = self._create_RVE_geometry(model)
# create particular geometry
# self._create_inner_geometry(model)
# create part
self.part = model.Part(name=self.name, dimensionality=THREE_D,
type=DEFORMABLE_BODY)
self.part.BaseSolidExtrude(sketch=sketch, depth=self.dims[2])
# create particles parts
for particle in self.particles:
particle.create_part(model, self)
def _create_RVE_geometry(self, model):
# sketch
sketch = model.ConstrainedSketch(name=self.name + '_PROFILE',
sheetSize=2 * self.dims[0])
sketch.rectangle(point1=(0., 0.), point2=(self.dims[0], self.dims[1]))
return sketch
def create_instance(self, model):
# initialization
modelAssembly = model.rootAssembly
# create rve instance
modelAssembly.Instance(name=self.name,
part=self.part, dependent=ON)
# create particle instances
for particle in self.particles:
particle.create_instance(model)
# create merged rve
new_part_name = '{}_WITH_PARTICLES'.format(self.name)
modelAssembly.InstanceFromBooleanMerge(name=new_part_name,
instances=modelAssembly.instances.values(),
keepIntersections=ON,
originalInstances=DELETE,
domain=GEOMETRY)
self.part = model.parts[new_part_name]
# create PBCs sets (here because sets are required for meshing purposes)
self._create_bounds_sets()
def generate_mesh(self, face_by_closest=True, simple_trial=False):
# set mesh control
self.part.setMeshControls(regions=self.part.cells, elemShape=TET,
technique=FREE,)
# generate mesh by simple strategy
if simple_trial:
# generate mesh
self._seed_part()
self.part.generateMesh()
# verify mesh
success = self.verify_mesh_for_pbcs(face_by_closest=face_by_closest)
# retry meshing if unsuccessful
if not simple_trial or not success:
if simple_trial:
print("Warning: Unsucessful mesh generation. Another strategy will be tried out")
# retry meshing
self._retry_meshing()
# verify mesh
success = self.verify_mesh_for_pbcs(face_by_closest=face_by_closest)
if not success:
print("Warning: Unsucessful mesh generation")
def _seed_part(self):
# seed part
self.part.seedPart(size=self.mesh_size,
deviationFactor=self.mesh_deviation_factor,
minSizeFactor=self.mesh_min_size_factor)
# seed exterior edges
exterior_edges = self._get_exterior_edges()
self.part.seedEdgeBySize(edges=exterior_edges, size=self.mesh_size,
deviationFactor=self.mesh_deviation_factor,
constraint=FIXED)
def _retry_meshing(self):
# delete older mesh
self.part.deleteMesh()
# 8-cube partitions (not necessarily midplane)
planes = [YZPLANE, XZPLANE, XYPLANE]
for i, plane in enumerate(planes):
feature = self.part.DatumPlaneByPrincipalPlane(principalPlane=plane,
offset=self.dims[i] / 2)
datum = self.part.datums[feature.id]
self.part.PartitionCellByDatumPlane(datumPlane=datum, cells=self.part.cells)
# reseed (due to partitions)
self._seed_part()
# z+
# generate local mesh
k = self._mesh_half_periodically()
# transition
axis = 2
faces = self.part.faces.getByBoundingBox(zMin=self.dims[2])
for face in faces:
self._copy_face_mesh_pattern(face, axis, k)
k += 1
# z-
self._mesh_half_periodically(k, zp=False)
def _mesh_half_periodically(self, k=0, zp=True):
# initialization
if zp:
kwargs = {'zMin': self.dims[2] / 2}
else:
kwargs = {'zMax': self.dims[2] / 2}
pickedCells = self.part.cells.getByBoundingBox(xMin=self.dims[0] / 2,
yMin=self.dims[1] / 2,
**kwargs)
self.part.generateMesh(regions=pickedCells)
# copy pattern
axis = 0
faces = self.part.faces.getByBoundingBox(xMin=self.dims[0],
yMin=self.dims[1] / 2,
**kwargs)
for face in faces:
self._copy_face_mesh_pattern(face, axis, k)
k += 1
# generate local mesh
pickedCells = self.part.cells.getByBoundingBox(xMax=self.dims[0] / 2,
yMin=self.dims[1] / 2,
**kwargs)
self.part.generateMesh(regions=pickedCells)
# copy pattern
axis = 1
faces = self.part.faces.getByBoundingBox(yMin=self.dims[1],
**kwargs)
for face in faces:
self._copy_face_mesh_pattern(face, axis, k)
k += 1
# generate local mesh
pickedCells = self.part.cells.getByBoundingBox(xMax=self.dims[0] / 2,
yMax=self.dims[1] / 2,
**kwargs)
self.part.generateMesh(regions=pickedCells)
# copy pattern
axis = 0
faces = self.part.faces.getByBoundingBox(xMax=0.,
yMax=self.dims[1] / 2,
**kwargs)
for face in faces:
self._copy_face_mesh_pattern(face, axis, k, s=1)
k += 1
# generate local mesh
pickedCells = self.part.cells.getByBoundingBox(xMin=self.dims[0] / 2,
yMax=self.dims[1] / 2,
**kwargs)
self.part.generateMesh(regions=pickedCells)
return k
def _copy_face_mesh_pattern(self, face, axis, k, s=0):
pt = list(face.pointOn[0])
pt[axis] = self.dims[axis] * s
target_face = self.part.faces.findAt(pt)
face_set = self.part.Set(name='_FACE_{}'.format(k), faces=FaceArray((face,)))
self.part.Set(name='_TARGET_FACE_{}'.format(k), faces=FaceArray((target_face,)))
k += 1
vertex_indices = face.getVertices()
vertices = [self.part.vertices[index].pointOn[0] for index in vertex_indices]
coords = [list(vertex) for vertex in vertices]
for coord in coords:
coord[axis] = self.dims[axis] * s
nodes = [face_set.nodes.getClosest(vertex) for vertex in vertices]
self.part.copyMeshPattern(faces=face_set, targetFace=target_face,
nodes=nodes,
coordinates=coords)
def _get_edge_nodes(self, pos_i, pos_j, sort_direction=None):
edge_name = self._get_edge_name(pos_i, pos_j)
nodes = self.part.sets[edge_name].nodes
if sort_direction is not None:
nodes = sorted(nodes, key=lambda node: self._get_node_coordinate(node, i=sort_direction))
return nodes
def _get_face_nodes(self, pos, sort_direction_i=None, sort_direction_j=None):
face_name = self._get_face_name(pos)
nodes = self.part.sets[face_name].nodes
if sort_direction_i is not None and sort_direction_j is not None:
d = self._get_decimal_places()
nodes = sorted(nodes, key=lambda node: (
self._get_node_coordinate_with_tol(node, i=sort_direction_i, decimal_places=d),
self._get_node_coordinate_with_tol(node, i=sort_direction_j, decimal_places=d),))
return nodes
def verify_mesh_for_pbcs(self, face_by_closest=True):
'''
Verify correctness of generated mesh based on allowed tolerance. It
immediately stops when a node pair does not respect the tolerance.
'''
# verify edges
if not self._verify_edges():
return False
# verify faces
if face_by_closest:
return self._verify_faces_by_closest()
else:
return self._verify_faces_by_sorting()
def _verify_edges(self):
for i, pos_i in enumerate(zip(self.face_positions[:-2:2], self.face_positions[1:-2:2])):
for j, pos_j in enumerate(zip(self.face_positions[2 * (i + 1)::2], self.face_positions[(2 * (i + 1) + 1)::2])):
# get possible combinations
pos_comb = self._get_edge_combinations(pos_i, pos_j)
n_comb = len(pos_comb)
# get sorted nodes for each edge
nodes = []
k = self._get_edge_sort_direction(i, i + j + 1)
for (pos_i_, pos_j_) in pos_comb:
nodes.append(self._get_edge_nodes(pos_i_, pos_j_,
sort_direction=k))
# verify sizes
sizes = [len(node_list) for node_list in nodes]
if len(set(sizes)) > 1:
return False
# verify if tolerance is respected
for n, node in enumerate(nodes[0]):
for m in range(1, n_comb):
if abs(node.coordinates[k] - nodes[m][n].coordinates[k]) > self.mesh_tol:
# create set with error nodes
nodes_ = [node_list[n] for node_list in nodes]
set_name = self._verify_set_name('_ERROR_EDGE_NODES')
self.part.Set(set_name,
nodes=MeshNodeArray(nodes_))
return False
return True
def _verify_set_name(self, name):
new_name = name
i = 1
while new_name in self.part.sets.keys():
i += 1
new_name = '{}_{}'.format(new_name, i)
return new_name
def _verify_faces_by_sorting(self):
'''
Notes
-----
1. the sort method is less robust to due rounding errors. `mesh_tol`
is used to increase its robustness, but find by closest should be
preferred.
'''
for i, (pos_i, pos_j) in enumerate(zip(self.face_positions[::2], self.face_positions[1::2])):
# get nodes
j, k = self._get_face_sort_directions(i)
nodes_i = self._get_face_nodes(pos_i, j, k)
nodes_j = self._get_face_nodes(pos_j, j, k)
# verify size
if len(nodes_i) != len(nodes_j):
return False
# verify if tolerance is respected
for n, (node, node_cmp) in enumerate(zip(nodes_i, nodes_j)):
# verify tolerance
if not self._verify_tol_face_nodes(node, node_cmp, j, k):
return False
return True
def _verify_faces_by_closest(self):
'''
Notes
-----
1. at first sight, it appears to be more robust than using sort.
'''
for i, (pos_i, pos_j) in enumerate(zip(self.face_positions[::2], self.face_positions[1::2])):
# get nodes
j, k = self._get_face_sort_directions(i)
nodes_i = self._get_face_nodes(pos_i)
nodes_j = self._get_face_nodes(pos_j)
# verify size
if len(nodes_i) != len(nodes_j):
return False
# verify if tolerance is respected
for node in nodes_i:
node_cmp = nodes_j.getClosest(node.coordinates)
# verify tolerance
if not self._verify_tol_face_nodes(node, node_cmp, j, k):
return False
return True
def _verify_tol_face_nodes(self, node, node_cmp, j, k):
if abs(node.coordinates[j] - node_cmp.coordinates[j]) > self.mesh_tol or abs(node.coordinates[k] - node_cmp.coordinates[k]) > self.mesh_tol:
# create set with error nodes
set_name = self._verify_set_name('_ERROR_FACE_NODES')
self.part.Set(set_name,
nodes=MeshNodeArray((node, node_cmp)))
return False
return True
def _get_edge_combinations(self, pos_i, pos_j):
comb = []
for pos_i_ in pos_i:
for pos_j_ in pos_j:
comb.append([pos_i_, pos_j_])
return comb
def _get_edge_sort_direction(self, i, j):
if 0 not in [i, j]:
return 0
elif 1 not in [i, j]:
return 1
else:
return 2
def _get_face_sort_directions(self, i):
if i == 0:
return 1, 2
elif i == 1:
return 0, 2
else:
return 0, 1
def _get_exterior_edges(self):
exterior_edges = []
for i, position in enumerate(self.face_positions):
k = int(i // 2)
face_name = self._get_face_name(position)
sign = 1 if '+' in face_name else 0 # 0 to represent negative face
face_axis = face_name.split('_')[1][0].lower()
var_name = '{}Min'.format(face_axis) if sign else '{}Max'.format(face_axis)
kwargs = {var_name: self.dims[k] * sign}
edges = self.part.edges.getByBoundingBox(**kwargs)
exterior_edges.extend(edges)
# unique edges
edge_indices, unique_exterior_edges = [], []
for edge in exterior_edges:
if edge.index not in edge_indices:
unique_exterior_edges.append(edge)
edge_indices.append(edge.index)
return EdgeArray(unique_exterior_edges)
def _create_bounds_sets(self):
# faces
self._create_bound_faces_sets()
# edges
self._create_bound_edges_sets()
def _create_bound_faces_sets(self):
for i, position in enumerate(self.face_positions):
k = i // 2
face_name, var_name, dim = self._get_face_info(k, position)
kwargs = {var_name: dim}
faces = self.part.faces.getByBoundingBox(**kwargs)
self.part.Set(name=face_name, faces=faces)
# self.part.Surface(name='SUR{}'.format(face_name), side1Faces=faces)
def _create_bound_edges_sets(self):
for i, pos_i in enumerate(self.face_positions[:-2]):
k_i = i // 2
_, var_name_i, dim_i = self._get_face_info(k_i, pos_i)
for j, pos_j in enumerate(self.face_positions[2 * (k_i + 1):]):
k_j = j // 2
_, var_name_j, dim_j = self._get_face_info(k_j, pos_j)
edge_name = self._get_edge_name(pos_i, pos_j)
kwargs = {var_name_i: dim_i, var_name_j: dim_j}
edges = self.part.edges.getByBoundingBox(**kwargs)
self.part.Set(name=edge_name, edges=edges)
def _get_face_info(self, i, position):
face_name = self._get_face_name(position)
sign = 1 if '+' in face_name else 0 # 0 to represent negative face
face_axis = face_name.split('_')[1][0].lower()
var_name = '{}Min'.format(face_axis) if sign else '{}Max'.format(face_axis)
dim = self.dims[i] * sign
return face_name, var_name, dim
@staticmethod
def _get_edge_name(pos_i, pos_j):
return 'EDGE_{}{}'.format(pos_i, pos_j)
@staticmethod
def _get_face_name(position):
return 'FACE_{}'.format(position)
@staticmethod
def _get_node_coordinate(node, i):
return node.coordinates[i]
@staticmethod
def _get_node_coordinate_with_tol(node, i, decimal_places):
return round(node.coordinates[i], decimal_places)
def _get_decimal_places(self):
d = 0
aux = 1
while aux > self.mesh_tol:
d += 1
aux = 10**(-d)
return d
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from openstackclient.common import configuration
from openstackclient.tests.unit import fakes
from openstackclient.tests.unit import utils
class TestConfiguration(utils.TestCommand):
columns = (
'auth.password',
'auth.token',
'auth.username',
'identity_api_version',
'region',
)
datalist = (
configuration.REDACTED,
configuration.REDACTED,
fakes.USERNAME,
fakes.VERSION,
fakes.REGION_NAME,
)
opts = [
mock.Mock(secret=True, dest="password"),
mock.Mock(secret=True, dest="token"),
]
@mock.patch(
"keystoneauth1.loading.base.get_plugin_options", return_value=opts
)
def test_show(self, m_get_plugin_opts):
arglist = []
verifylist = [('mask', True)]
cmd = configuration.ShowConfiguration(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
columns, data = cmd.take_action(parsed_args)
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist, data)
@mock.patch(
"keystoneauth1.loading.base.get_plugin_options", return_value=opts
)
def test_show_unmask(self, m_get_plugin_opts):
arglist = ['--unmask']
verifylist = [('mask', False)]
cmd = configuration.ShowConfiguration(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
columns, data = cmd.take_action(parsed_args)
self.assertEqual(self.columns, columns)
datalist = (
fakes.PASSWORD,
fakes.AUTH_TOKEN,
fakes.USERNAME,
fakes.VERSION,
fakes.REGION_NAME,
)
self.assertEqual(datalist, data)
@mock.patch(
"keystoneauth1.loading.base.get_plugin_options", return_value=opts
)
def test_show_mask_with_cloud_config(self, m_get_plugin_opts):
arglist = ['--mask']
verifylist = [('mask', True)]
self.app.client_manager.configuration_type = "cloud_config"
cmd = configuration.ShowConfiguration(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
columns, data = cmd.take_action(parsed_args)
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist, data)
@mock.patch(
"keystoneauth1.loading.base.get_plugin_options", return_value=opts
)
def test_show_mask_with_global_env(self, m_get_plugin_opts):
arglist = ['--mask']
verifylist = [('mask', True)]
self.app.client_manager.configuration_type = "global_env"
column_list = (
'identity_api_version',
'password',
'region',
'token',
'username',
)
datalist = (
fakes.VERSION,
configuration.REDACTED,
fakes.REGION_NAME,
configuration.REDACTED,
fakes.USERNAME,
)
cmd = configuration.ShowConfiguration(self.app, None)
parsed_args = self.check_parser(cmd, arglist, verifylist)
columns, data = cmd.take_action(parsed_args)
self.assertEqual(column_list, columns)
self.assertEqual(datalist, data)
|
import subprocess
import time
# helper modules
import analyzer
import sendsms
# activate microphone, record for 10 seconds
activeMic = "arecord -r 160000 -d 10 ./a.wav -D sysdefault:CARD=1"
while True:
# 1: Collect data
p = subprocess.Popen(activeMic, shell=True)
time.sleep(10)
p.terminate()
# 2: Analyse data
overThreshold = analyzer.analyze(10000)
if overThreshold > 0:
# 3: Send out notifications
sendsms.sendText()
|
n=int(input("numero:"))
t=0 #quantidade de divisores
x=1 #numero que esta dividindo
while(x<=n ):
if(n%x==0):
t+=1
print(x)
x+=1
if(t==1):
i="divisor"
else:
i="divisores"
print(t,i) |
# print("Hello World")
Bill_Amount = float(input("Please enter a number: "))
Tax_Rate = 0.08
Tip_Rate = 0.18
Total_Tax_Amount = (1 + Tax_Rate) * Bill_Amount
Total_Bill_Amount = (1 + Tip_Rate) * Total_Tax_Amount
print("Your Bill Will Be %s" %Total_Bill_Amount) |
#!/usr/bin/env python3
import fileinput
import numpy as np
def kernel(a):
occupied = np.count_nonzero(a[0:3, 0:3] == '#')
if a[1, 1] == 'L' and occupied == 0:
return '#'
if a[1, 1] == '#' and occupied >= 5:
return 'L'
return a[1, 1]
def test_task1():
assert True
print('tests for task 1: ok')
def solve_task1():
area = [list(line.rstrip()) for line in fileinput.input()]
area = np.pad(np.array(area), pad_width=1, mode='constant', constant_values='.')
new = area
while True:
old = np.copy(new)
for i in range(1, len(old) - 1):
for j in range(1, len(old[i]) - 1):
new[i, j] = kernel(old[i-1:i+2, j-1:j+2])
if (old == new).all():
break
solution = np.count_nonzero(new == '#')
print(f'answer to task 1: {solution}')
def test_task2():
assert True
print('tests for task 2: ok')
def solve_task2():
area = [list(line.rstrip()) for line in fileinput.input()]
area = np.pad(np.array(area), pad_width=1, mode='constant', constant_values='.')
new = area
while True:
old = np.copy(new)
for i in range(1, len(old) - 1):
for j in range(1, len(old[i]) - 1):
if old[i, j] == '.':
continue
occupied = 0
for k in range(-1, 2):
for l in range(-1, 2):
for x in range(1, max(len(old), len(old[i]))):
if i + k * x < 0 or i + k * x >= len(old) or j + l * x < 0 or j + l * x >= len(old[i]):
break
if old[i + k * x, j + l * x] == 'L':
break
if old[i + k * x, j + l * x] == '#':
occupied += 1
break
if old[i, j] == 'L' and occupied == 0:
new[i, j] = '#'
if old[i, j] == '#' and occupied >= 6:
new[i, j] = 'L'
if (old == new).all():
break
solution = np.count_nonzero(new == '#')
print(f'answer to task 2: {solution}')
def main():
test_task1()
solve_task1()
test_task2()
solve_task2()
if __name__ == '__main__':
main()
|
from selenium import webdriver
import time
driver = webdriver.Chrome('./chromedriver')
driver.get("https://www.youtube.com")
time.sleep(5)
popular = driver.find_elements_by_css_selector("paper-item.style-scope")
popular[1].click()
|
# -*- coding: utf-8 -*-
"""
Organizational functions for pgrid.
"""
# **** USER EDIT ********
#gridname = 'aestus3'
gridname = 'cas6'
# **** END USER EDIT ****
import os; import sys
sys.path.append(os.path.abspath('../../LiveOcean/alpha'))
import Lfun
Ldir = Lfun.Lstart()
sys.path.append(os.path.abspath('../../LiveOcean/plotting'))
dir0 = Ldir['parent']
pgdir = dir0 + 'ptools_output/pgrid/'
def default_choices(Gr, wet_dry=False):
# Default choices (can override in each case)
dch = dict()
# Decide if the grid will allow wetting and drying.
# We do this first becasue it affects several subsequent choices
dch['wet_dry'] = wet_dry
# GRID CREATION
# Set analytical to true when we define the bathymetry analytically.
dch['analytical'] = False
# z_offset is an adjustment to zero of the bathymetry to account for
# the fact that mean sea level is somewhat higher than NAVD88.
dch['use_z_offset'] = True
dch['z_offset'] = -1.06
# specify topography files to use
dch['t_dir'] = Gr['dir0'] + 'ptools_data/topo/'
# list of topo files: coarsest to finest
dch['t_list'] = ['srtm15/topo15.nc',
'cascadia/cascadia_gridded.nc',
'psdem/PS_183m.nc',
'ttp_patch/TTP_Regional_27m_patch.nc']
# MASKING
# list of existing masks to work from
dch['maskfiles'] = []
# set z position of INITIAL dividing line (positive up)
dch['z_land'] = 0
# Set unmask_coast to True to unmask all cells crossed by the coastline.
if dch['wet_dry'] == True:
dch['unmask_coast'] = True
else:
dch['unmask_coast'] = False
# Set remove_islands to True to automatically remove isolated patches of
# land or ocean.
dch['remove_islands'] = True
# SMOOTHING
dch['use_min_depth'] = True # now I think this is always a good idea
dch['min_depth'] = 4 # meters (positive down)
# NUDGING
# Use nudging edges to decide which edges to have nudging to climatology
# on. And the nudging_days are the the (short, long) timescales to use.
dch['nudging_edges'] = ['north', 'south', 'east', 'west']
dch['nudging_days'] = (3.0, 60.0)
return dch
def gstart(gridname=gridname):
if gridname in ['aestus1', 'aestus2']:
ri_dir = dir0 + 'ptools_output/river/analytical/'
else:
ri_dir = dir0 + 'ptools_output/river/pnw_all_2016_07/'
gdir = pgdir + gridname + '/'
Gr ={'gridname': gridname, 'dir0': dir0, 'pgdir': pgdir, 'gdir': gdir,
'ri_dir': ri_dir}
return Gr
def select_file(Gr, using_old_grid=False):
# interactive selection
if using_old_grid==True:
fn_list = []
dir0 = Ldir['parent'] + 'LiveOcean_data/grids/'
gn_list = ['cascadia1', 'cascadia2']
for gn in gn_list:
fn_list.append(dir0 + gn + '/grid.nc')
elif using_old_grid==False:
print('\n** %s in <<%s>> **\n' % ('Choose file to edit', Gr['gridname']))
fn_list_raw = os.listdir(Gr['gdir'])
fn_list = []
for item in fn_list_raw:
if item[-3:] == '.nc':
fn_list.append(item)
fn_list.sort()
Nfn = len(fn_list)
fn_dict = dict(zip(range(Nfn), fn_list))
for nfn in range(Nfn):
print(str(nfn) + ': ' + fn_list[nfn])
my_nfn = int(input('-- Input number -- '))
fn = fn_dict[my_nfn]
return fn
def increment_filename(fn, tag='_m'):
# create the new file name
gni = fn.find(tag)
new_num = ('00' + str(int(fn[gni+2: gni+4]) + 1))[-2:]
fn_new = fn.replace(fn[gni:gni+4], tag + new_num)
return fn_new
|
# ch14_9.py
fn = 'out14_9.txt'
string = 'I love Python.'
with open(fn, 'w') as file_Obj:
file_Obj.write(string)
|
# Given an integer n, generate a square matrix filled with elements from 1 to n2 in spiral order.
# For example,
# Given n = 3,
# You should return the following matrix:
# [
# [ 1, 2, 3 ],
# [ 8, 9, 4 ],
# [ 7, 6, 5 ]
# ]
class Solution:
def generateMatrix(self, n):
"""
:type n: int
:rtype: List[List[int]]
"""
def generateLayer(matrix, nums, i):
if len(nums) == 0:
return
x = y = i
if len(nums) == 1:
matrix[x][y] = nums[0]
return
while y < len(matrix) and matrix[x][y] == 0:
matrix[x][y] = nums.pop()
y += 1
y -= 1
x += 1
while x < len(matrix) and matrix[x][y] == 0:
matrix[x][y] = nums.pop()
x += 1
x -= 1
y -= 1
while y >= 0 and matrix[x][y] == 0:
matrix[x][y] = nums.pop()
y -= 1
y += 1
x -= 1
while x >= 0 and matrix[x][y] == 0:
matrix[x][y] = nums.pop()
x -= 1
generateLayer(matrix, nums, i + 1)
nums = [i for i in reversed(range(1, n ** 2 + 1))]
matrix = [[0 for _ in range(n)] for _ in range(n)]
generateLayer(matrix, nums, 0)
return matrix
# inside-out rotate
def generateMatrix(self, n):
A, lo = [], n*n+1
while lo > 1:
lo, hi = lo - len(A), lo
A = [range(lo, hi)] + zip(*A[::-1])
return A
|
from functools import wraps
def make_bold(fn):
return getwrapped(fn, "b")
def make_italic(fn):
return getwrapped(fn, "i")
def getwrapped(function, tag):
# This makes the decorator transparent in terms of its name and docstring
@wraps(function)
def wrapped():
# Grab the return value of the function being decorated
function_result = function()
# Add new functionality to the function being decorated
return "<%s>%s</%s>" % (tag, function_result, tag)
return wrapped
@make_bold
@make_italic
def hello():
"""a decorated hello world"""
return "hello world"
if __name__ == '__main__':
print('result:{} name:{} doc:{}'.format(hello(), hello.__name__, hello.__doc__))
# source: https://github.com/faif/python-patterns/blob/master/decorator.py
|
# Copyright 2016-2022. Couchbase, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from couchbase.encryption import Key
class Keyring(ABC):
@abstractmethod
def get_key(self,
key_id, # type: str
) -> Key:
"""Returns requested key
Args:
keyid (str): Key ID to retrieve
Returns:
:class:`~couchbase.encryption.Key`: The corresponding :class:`~couchbase.encryption.Key`
of the provided key_id.
Raises:
:raises :class:`~couchbase.exceptions.CryptoKeyNotFoundException`
"""
|
#!/usr/bin/env python
from itertools import *
import re
from collections import Counter
SPACE_CHAR = ' '
SPACE_ASCII = ord(SPACE_CHAR)
NULL_ASCII = 0
NULL_CHAR = chr(0)
def is_control(character):
return ord(character) < SPACE_ASCII
def escape_int(inint):
as_char = chr(inint + 1)
if not is_control(as_char):
return NULL_CHAR + as_char
else:
return as_char
def quote_ascii(instring):
ret = ""
for one_char in instring:
if ord(one_char) < SPACE_ASCII:
ret += chr(0) + one_char
else:
ret += one_char
return ret
def unquote_ascii(instring):
instring = list(instring)
retstring = ""
while len(instring) > 0:
current_char = instring.pop(0)
if not is_control(current_char):
retstring += current_char
continue
else: # a control characte
numerical_value = 0
if ord(current_char) == NULL_ASCII:
escaped_value = instring.pop(0)
if is_control(escaped_value):
retstring += escaped_value
continue
else:
numerical_value = ord(escaped_value)
else:#some other control character
numerical_value = ord(current_char)
assert numerical_value != 0, "Should not happen"
numerical_value -= 1
yield retstring
retstring = ""
yield numerical_value
#continue
if retstring is not "":
yield retstring
#simple_float = re.compile("[-]?\d+\.\d+")
simple_float = re.compile("[-]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?")
def find_floats(string):
non_floats = simple_float.split(string)
floats = simple_float.findall(string)
return non_floats, floats
format_finder = re.compile("[-+]?(\d+)(\.\d*)?([eE])?")
def determine_format(float_ascii):
match = format_finder.match(float_ascii)
assert match is not None, "No match!"
before = len(match.group(1))
after = len(match.group(2)) - 1
exponent = match.group(3)
return before,after,exponent
def find_and_format(string):
sub_strs, floats = find_floats(string)
formats = [(determine_format(f), f) for f in floats]
format_counts = Counter([i for i,j in formats]).items()
format_counts.sort(key=lambda x:x[1], reverse=True)
format_set = [i for i,j in format_counts]
format_hash = dict(zip(format_set, count(0)))
format_table = dict([(i,j) for j,i in format_hash.iteritems()])
formats = [format_hash.get(i) for i,j in formats]
floats = map(float, floats)
#now we have: sub_strs, formats, floats, and format_table
return sub_strs, formats, floats, format_table
def quote_split(some_text):
sub_strs, formats, floats, format_table = find_and_format(some_text)
sub_strs = map(quote_ascii, sub_strs)
text = ""
for plain, format in izip(sub_strs[:-1], formats):
text += plain + escape_int(format)
text += sub_strs[-1]
return text, floats, format_table
class formatter(object):
def __init__(self, a,b,e):
if e is None:
self.fmtstr = "%" + str(a) + "." + str(b) + "f"
elif e in ['e', 'E']:
self.fmtstr = "%" + str(a) + "." + str(b) + e
else:
raise Exception("Should not get here")
def __call__(self, float):
return self.fmtstr%float
def format_table_to_format_functions(format_table):
retdict = dict()
for k, v in format_table.iteritems():
retdict[k] = formatter(*v)
return retdict
def quote_combine(text, floats, format_table):
format_funcs = format_table_to_format_functions(format_table)
out_text = ""
tokens = unquote_ascii(text)
for t in tokens:
if type(t) in [str]:
out_text += t
continue
#must be an int (or other control code if we extend this)
out_text += format_funcs[t](floats.pop(0))
return out_text
if __name__ == "__main__":
mystr = "this is a test " + chr(0) + " foobarbaz"
quoted = quote_ascii(mystr)
print repr(quoted)
rebuilt = "".join([i for i in unquote_ascii(quoted)])
print "mystr == rebuilt : " , mystr == rebuilt
all_control = ' '.join(map(chr,range(128)))
all_quoted = quote_ascii(all_control)
print repr(all_quoted)
all_rebuilt = ''.join([i for i in unquote_ascii(all_quoted)])
print "all_control == all_rebuilt : ", all_control == all_rebuilt
test_mixed = """
uni:0.296203966948793):0.0198614304913658):0.00313131156568666,(Ochrogaster_lunifer:0.0335397657256079,Leucoptera_malifoliella:0.182398130282726):0.204654940030161):0.0285165061041942):0.09061
25666360033,Anopheles_janconnae:0.0296054179988425):0.23297075510678,Psacothea_hilaris:0.116613155785488):0.14300025246744,Libelloides_macaronius:0.126125873732874):0.113157561360003,((((((Cul
ex_quinquefasciatus:0.128071805904412,Hydaropsis_longirostris:0.232767699087299):0.0756506729636065,Papilio_maraho:0.132131329339393):0.0693022898276685,((Drosophila_littoralis:0.0675965217657
067,(Fergusonina_sp:0.0118042970178113,Fergusonina_taylori:0.0273468886324472):0.13320494600693):0.183729959583521,(Radoszkowskius_oculata:0.483929844509303,Chauliognathus_opacus:0.13049475006
1746):0.0144023962254054):0.169225394395885):1.60198243883956e-05,Calosoma_sp:0.225135731050968):0.0318179868523602,Saturnia_boisduvalii:0.171394047556305):0.019931413637447,Pieris_rapae:0.177565426475903):0.035014799384982):0.00768938639746985,Ctenoptilum_vasava:0.224094258552591):0.0366767838042755):0.224978718946498,Pristomyrmex_punctatus:0.179458026630544,Cephus_cinctus:0.473643725204688);
1
8.32447056025
0.612247538276
5000
9.75683378985
0.785629412398 0.451207130083 0.578293117491 1.15091849484
0.802012262232 0.825449905706 0.709524362196 1.60867293101 1.57365779191 0.470584293816
0.50410073019 0.0112524472997 0.0210404324894 0.463606390021
0.048633128877 0.0768247243058 0.00958532684383 0.864956819973
0.162594297468 0.070392991515 0.0245891591225 0.742423551894
0.441329986715 0.00980131982253 0.102325662397 0.446543031065
0.0292357753269 0.179832097862 0.0210820261087 0.769850100702
0.0267404315607 0.908333737631 0.0180542065187 0.0468716242896
0.0861348350274 0.00622694813271 0.289835799751 0.617802417089
0.602019418358 0.0104031257314 0.225365910904 0.162211545006
0.717769999178 0.00520863106162 0.249239274839 0.0277820949206
0.017409301224 0.175557368003 0.780909985125 0.0261233456481
0.743394195269 0.0273672613383 0.0205452644774 0.208693278916
0.0584151337798 0.097362698597 0.270780012832 0.573442154791
0.707523442908 0.00769127647777 0.0730814093904 0.211703871224
0.157782880864 0.322778872355 0.285144353627 0.234293893154
0.0370108519042 0.0954211619303 0.660541958011 0.207026028155
0.652377049752 0.0770410996937 0.0751890240406 0.195392826513
0.214985030293 0.00614055801601 0.615024755845 0.163849655846
0.225389618052 0.0148086743634 0.0394970620291 0.720304645556
0.79514168245 0.0714877318432 0.0399749404705 0.0933956452367
0.53034370836 0.0365042684262 0.348832010177 0.0843200130362
0.797499469215 0.0120892949631 0.104685381278 0.0857258545441
0.536911204749 0.0474624759152 0.0331488201886 0.382477499147
0.330074593462 0.0455863791227 0.0231825223289 0.601156505086
0.0634716703549 0.0576030497373 0.131509675441 0.747415604466
0.340976382049 0.175331986371 0.0417037900684 0.441987841511
0.349323615727 0.00336967888599 0.606040463369 0.0412662420189
0.325893300113 0.0692610791681 0.119787923369 0.48505769735
0.0128323060265 0.0990355031096 0.00458172762244 0.883550463241
0.237401923487 0.0216131905593 0.295548225404 0.44543666055
0.0996363577612 0.0340124121964 0.392446779402 0.47390445064
"""
print quote_split(test_mixed)
|
from TwitterAPI import TwitterAPI
import json
import os
consumer_key = os.getenv("CONSUMER_KEY")
consumer_secret = os.getenv("CONSUMER_SECRET")
access_token = os.getenv("ACCESS_TOKEN")
access_token_secret = os.getenv("ACCESS_TOKEN_SECRET")
api = TwitterAPI(consumer_key, consumer_secret,access_token,access_token_secret)
def sendDM(user_id,message_text):
event = {
"event": {
"type": "message_create",
"message_create": {
"target": {
"recipient_id": user_id
},
"message_data": {
"text": message_text
}
}
}
}
r = api.request('direct_messages/events/new', json.dumps(event))
print('SUCCESS' if r.status_code == 200 else 'PROBLEM: ' + r.text)
|
"""Script to create and trigger Firefox testruns in Jenkins."""
import ConfigParser
import copy
import logging
import os
import re
import sys
import time
import jenkins
import requests
import taskcluster
from mozdownload import FactoryScraper
from mozdownload import errors as download_errors
from thclient import TreeherderClient
logging.basicConfig(format='%(levelname)s | %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
# Limit logging of dependent packages to warnings only
logging.getLogger("hawk").setLevel(logging.WARN)
logging.getLogger('redo').setLevel(logging.WARN)
logging.getLogger('requests').setLevel(logging.WARN)
logging.getLogger("taskcluster").setLevel(logging.WARN)
logging.getLogger('thclient').setLevel(logging.WARN)
def query_file_url(properties, property_overrides=None):
"""Query for the specified build by using mozdownload.
This method uses the properties as received via Mozilla Pulse to query
the build via mozdownload. Use the property overrides to customize the
query, e.g. different build or test package files.
"""
property_overrides = property_overrides or {}
kwargs = {
# General arguments for all types of builds
'build_type': properties.get('build_type'),
'locale': properties.get('locale'),
'platform': properties.get('platform'),
'retry_attempts': 5,
'retry_delay': 30,
# Arguments for daily builds
'branch': properties.get('branch'),
'build_id': properties.get('buildid'),
# Arguments for candidate builds
'build_number': properties.get('build_number'),
'version': properties.get('version'),
'logger': logger,
}
# Update arguments with given overrides
kwargs.update(property_overrides)
logger.debug('Query file details for: %s' % kwargs)
return FactoryScraper(kwargs['build_type'], **kwargs).url
def get_installer_url(properties):
"""Get the installer URL via mozdownload."""
return query_file_url(properties)
def query_taskcluster_for_test_packages_url(properties):
"""Return the URL of the test packages JSON file."""
queue = taskcluster.Queue()
route = "gecko.v2.{branch}.nightly.revision.{revision}.firefox.{platform}-opt"
task_id = taskcluster.Index().findTask(route.format(**properties))['taskId']
artifacts = queue.listLatestArtifacts(task_id)["artifacts"]
for artifact in artifacts:
if artifact['name'].endswith("test_packages.json"):
return queue.buildUrl('getLatestArtifact', task_id, artifact["name"])
break
return None
def query_treeherder_for_test_packages_url(properties):
"""Return the URL of the test packages JSON file.
In case of localized daily builds we can query the en-US build to get
the URL, but for candidate builds we need the tinderbox build
of the first parent changeset which was not checked-in by the release
automation process (necessary until bug 1242035 is not fixed).
"""
overrides = {
'locale': 'en-US',
'extension': 'test_packages.json',
'build_type': 'tinderbox',
}
platform_map = {
'linux': {'build_platform': 'linux32'},
'linux64': {'build_platform': 'linux64'},
'mac': {'build_os': 'mac', 'build_architecture': 'x86_64'},
'win32': {'build_os': 'win', 'build_architecture': 'x86'},
'win64': {'build_os': 'win', 'build_architecture': 'x86_64'},
}
revision = properties['revision']
client = TreeherderClient()
resultsets = client.get_resultsets(properties['branch'],
tochange=revision,
count=50)
# Retrieve the option hashes to filter for opt builds
option_hash = None
for key, values in client.get_option_collection_hash().iteritems():
for value in values:
if value['name'] == 'opt':
option_hash = key
break
if option_hash:
break
# Set filters to speed-up querying jobs
kwargs = {
'job_type_name': 'Build',
'exclusion_profile': False,
'option_collection_hash': option_hash,
'result': 'success',
}
kwargs.update(platform_map[properties['platform']])
for resultset in resultsets:
kwargs.update({'result_set_id': resultset['id']})
jobs = client.get_jobs(properties['branch'], **kwargs)
if len(jobs):
revision = resultset['revision']
break
overrides['revision'] = revision
# For update tests we need the test package of the target build. That allows
# us to add fallback code in case major parts of the ui are changing in Firefox.
if properties.get('target_buildid'):
overrides['build_id'] = properties['target_buildid']
# The test package json file has a prefix with bug 1239808 fixed. Older builds need
# a fallback to a prefix-less filename.
try:
url = query_file_url(properties, property_overrides=overrides)
except download_errors.NotFoundError:
extension = overrides.pop('extension')
build_url = query_file_url(properties, property_overrides=overrides)
url = '{}/{}'.format(build_url[:build_url.rfind('/')], extension)
r = requests.head(url)
if r.status_code != 200:
url = None
return url
def get_build_details(version_string):
"""Extract the type, version, and build_number of a version as given in the config file."""
# Expression to parse versions like: '5.0', '5.0#3', '5.0b1',
# '5.0b2#1', '10.0esr#1', '10.0.4esr#1'
pattern = re.compile(r'(?P<version>\d+[^#\s]+)(#(?P<build>\d+))?')
version, build_number = pattern.match(version_string).group('version', 'build')
if 'esr' in version:
branch = 'mozilla-esr{}'.format(version.split('.')[0])
elif 'b' in version:
branch = 'mozilla-beta'
else:
branch = 'mozilla-release'
return {
'branch': branch,
'build_number': build_number,
'build_type': 'candidate' if build_number else 'release',
'version': version,
}
def get_target_build_details(properties, platform):
"""Retrieve build details for the target version."""
props = copy.deepcopy(properties)
props.update({'platform': platform})
# Retrieve platform specific info.txt
overrides = {
'locale': 'en-US',
'extension': 'json',
}
logger.info('Retrieving target build details for Firefox {} build {} on {}...'.format(
props['version'], props['build_number'], props['platform']))
url = query_file_url(props, property_overrides=overrides)
r = requests.get(url)
# Update revision to retrieve the test package URL
props.update({'revision': r.json()['moz_source_stamp']})
details = {
'build_id': r.json()['buildid'],
'revision': props['revision'],
}
# First try to retrieve the build details from Taskcluster. If it cannot be found
# fallback to querying Treeherder.
try:
details.update({'test_packages_url': query_taskcluster_for_test_packages_url(props)})
except taskcluster.exceptions.TaskclusterFailure as exc:
msg = "Could not find builds's 'test_packages.json' via TaskCluster: {}"
logger.warning(msg.format(exc.message))
details.update({'test_packages_url': query_treeherder_for_test_packages_url(props)})
logger.info('Target build details: {}'.format(details))
return details
def load_authentication_config():
root_dir = os.path.abspath(__file__)
for p in range(0, 5):
root_dir = os.path.dirname(root_dir)
authfile = os.path.join(root_dir, '.authentication.ini')
if not os.path.exists(authfile):
raise IOError('Config file for authentications not found: {}'.
format(os.path.abspath(authfile)))
config = ConfigParser.ConfigParser()
config.read(authfile)
auth = {}
for section in config.sections():
auth.setdefault(section, {})
for key, value in config.items(section):
auth[section].update({key: value})
return auth
def main():
auth = load_authentication_config()
logger.info('Connecting to Jenkins at "%s"...' % auth['jenkins']['url'])
j = jenkins.Jenkins(auth['jenkins']['url'],
username=auth['jenkins']['user'],
password=auth['jenkins']['password'])
logger.info('Connected to Jenkins.')
if not len(sys.argv) == 2:
logger.error('Configuration file not specified.')
logger.error('Usage: %s config' % sys.argv[0])
sys.exit(1)
# Read-in configuration options
config = ConfigParser.SafeConfigParser()
config.read(sys.argv[1])
# Read all testrun entries
testrun = {}
for entry in config.options('testrun'):
testrun.update({entry: config.get('testrun', entry, raw=True)})
# Retrieve version details of the target build
testrun.update(get_build_details(testrun.pop('target-version')))
if testrun['build_type'] != 'candidate':
raise Exception('Target build has to be a candidate build.')
# Cache for platform specific target build details
target_build_details = {}
# Iterate through all target nodes
job_count = 0
for section in config.sections():
# Retrieve the platform, i.e. win32 or linux64
if not config.has_option(section, 'platform'):
continue
node_labels = section.split()
platform = config.get(section, 'platform')
if platform not in target_build_details:
target_build_details[platform] = get_target_build_details(testrun, platform)
# Iterate through all builds per platform
for entry in config.options(section):
try:
# Skip all non version lines
build_details = get_build_details(entry)
build_details.update({'platform': platform})
except:
continue
for locale in config.get(section, entry).split():
build_details.update({'locale': locale})
parameters = {
'BRANCH': testrun['branch'],
'INSTALLER_URL': get_installer_url(build_details),
'LOCALE': locale,
'NODES': ' && '.join(node_labels),
'REVISION': target_build_details[platform]['revision'],
'TEST_PACKAGES_URL': target_build_details[platform]['test_packages_url'],
}
if testrun['script'] == 'update':
parameters['TARGET_BUILD_ID'] = target_build_details[platform]['build_id']
parameters['CHANNEL'] = testrun['channel']
parameters['ALLOW_MAR_CHANNEL'] = \
testrun.get('allow-mar-channel', None)
parameters['UPDATE_NUMBER'] = build_details['version']
logger.info('Triggering job: ondemand_%s with %s' % (testrun['script'],
parameters))
j.build_job('ondemand_%s' % testrun['script'], parameters)
job_count += 1
# Give Jenkins a bit of breath to process other threads
time.sleep(2.5)
logger.info('%d jobs have been triggered.' % job_count)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
from grader import inizializza, pensoCheCodiceSia, checkCode
def check_code(attempt, result, history):
black = 0
white = 0
fr_att = [0] * 6
fr_hist = [0] * 6
for i in range(4):
if attempt[i] == history[i]:
black += 1
else:
assert(0 <= attempt[i] <= 5)
assert(0 <= history[i] <= 5)
fr_att[attempt[i]] += 1
fr_hist[history[i]] += 1
for i in range(6):
white += min(fr_att[i], fr_hist[i])
result[0] = black
result[1] = white
def rank(black, white):
return black * 4 + white
def increase(num):
for i in reversed(range(4)):
num[i] += 1
if num[i] == 6:
num[i] = 0
else:
break
is_invalid = [[[[False for _ in range(6)]
for _ in range(6)] for _ in range(6)] for _ in range(6)]
def is_valid(code):
return not is_invalid[code[0]][code[1]][code[2]][code[3]]
def set_invalid(code):
is_invalid[code[0]][code[1]][code[2]][code[3]] = 1
def get_score(code):
p_code = [0, 0, 0, 0]
results = [0] * 17
while True:
if is_valid(p_code):
result = [0, 0]
check_code(p_code, result, code)
results[rank(result[0], result[1])] += 1
increase(p_code)
if sum(p_code) == 0:
break
worst = 0
for i in range(17):
if results[i]:
worst = max(results[i], worst)
return worst
inizializza()
g_code = [0, 0, 1, 1] # Codice soluzione da sottomettere
guesses = 0
result = [0, 0]
previous_answers = [[0] * 4 for _ in range(10)]
previous_rankings = [0] * 10
while True:
guesses += 1
checkCode(g_code, result)
ranking = rank(result[0], result[1])
previous_answers[guesses - 1][0] = g_code[0]
previous_answers[guesses - 1][1] = g_code[1]
previous_answers[guesses - 1][2] = g_code[2]
previous_answers[guesses - 1][3] = g_code[3]
previous_rankings[guesses - 1] = ranking
if result[0] == 4:
break
a_code = [0] * 4 # Codice soluzione temporaneo
while True:
if is_valid(a_code):
for i in range(guesses):
check_code(a_code, result, previous_answers[i])
test_ranking = rank(result[0], result[1])
if test_ranking != previous_rankings[i]:
set_invalid(a_code)
break
increase(a_code)
if sum(a_code) == 0:
break
best_score = 0x7fffffff
b_code = [0] * 4
while True:
if is_valid(a_code):
score = get_score(a_code)
if best_score > score:
best_score = score
b_code[0] = a_code[0]
b_code[1] = a_code[1]
b_code[2] = a_code[2]
b_code[3] = a_code[3]
increase(a_code)
if sum(a_code) == 0:
break
g_code[0] = b_code[0]
g_code[1] = b_code[1]
g_code[2] = b_code[2]
g_code[3] = b_code[3]
pensoCheCodiceSia(g_code)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'frmMainMenuGUI.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_frmMainMenuGUI(object):
def setupUi(self, frmMainMenuGUI):
frmMainMenuGUI.setObjectName("frmMainMenuGUI")
frmMainMenuGUI.resize(723, 494)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/Icons/icons8-brain-128.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
frmMainMenuGUI.setWindowIcon(icon)
self.label = QtWidgets.QLabel(frmMainMenuGUI)
self.label.setGeometry(QtCore.QRect(100, 14, 611, 80))
font = QtGui.QFont()
font.setPointSize(48)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.tabWidget = QtWidgets.QTabWidget(frmMainMenuGUI)
self.tabWidget.setGeometry(QtCore.QRect(10, 120, 701, 201))
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.btnPreprocess = QtWidgets.QToolButton(self.tab)
self.btnPreprocess.setGeometry(QtCore.QRect(10, 10, 160, 150))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/icons/Icons/402612-200.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnPreprocess.setIcon(icon1)
self.btnPreprocess.setIconSize(QtCore.QSize(100, 100))
self.btnPreprocess.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.btnPreprocess.setObjectName("btnPreprocess")
self.btnFeatureAnalysis = QtWidgets.QToolButton(self.tab)
self.btnFeatureAnalysis.setGeometry(QtCore.QRect(180, 10, 160, 150))
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/icons/Icons/icons8-scatter-plot-filled-64.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnFeatureAnalysis.setIcon(icon2)
self.btnFeatureAnalysis.setIconSize(QtCore.QSize(100, 100))
self.btnFeatureAnalysis.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.btnFeatureAnalysis.setObjectName("btnFeatureAnalysis")
self.btnModelAnalysis = QtWidgets.QToolButton(self.tab)
self.btnModelAnalysis.setGeometry(QtCore.QRect(350, 10, 160, 150))
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/icons/Icons/noun_mind_1705433.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnModelAnalysis.setIcon(icon3)
self.btnModelAnalysis.setIconSize(QtCore.QSize(100, 100))
self.btnModelAnalysis.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.btnModelAnalysis.setObjectName("btnModelAnalysis")
self.btnVisualization = QtWidgets.QToolButton(self.tab)
self.btnVisualization.setGeometry(QtCore.QRect(520, 10, 160, 150))
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(":/icons/Icons/noun_Brain_7784.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnVisualization.setIcon(icon4)
self.btnVisualization.setIconSize(QtCore.QSize(100, 100))
self.btnVisualization.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.btnVisualization.setObjectName("btnVisualization")
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.cbTools = QtWidgets.QComboBox(self.tab_2)
self.cbTools.setGeometry(QtCore.QRect(10, 40, 491, 121))
self.cbTools.setObjectName("cbTools")
self.label_2 = QtWidgets.QLabel(self.tab_2)
self.label_2.setGeometry(QtCore.QRect(10, 20, 471, 20))
self.label_2.setObjectName("label_2")
self.btnTools = QtWidgets.QToolButton(self.tab_2)
self.btnTools.setGeometry(QtCore.QRect(520, 10, 160, 150))
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(":/icons/Icons/icons8-play-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnTools.setIcon(icon5)
self.btnTools.setIconSize(QtCore.QSize(100, 100))
self.btnTools.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)
self.btnTools.setObjectName("btnTools")
self.tabWidget.addTab(self.tab_2, "")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.label_3 = QtWidgets.QLabel(self.tab_3)
self.label_3.setGeometry(QtCore.QRect(10, 20, 471, 20))
self.label_3.setObjectName("label_3")
self.txtEZDIR = QtWidgets.QLineEdit(self.tab_3)
self.txtEZDIR.setGeometry(QtCore.QRect(10, 47, 321, 111))
self.txtEZDIR.setReadOnly(True)
self.txtEZDIR.setObjectName("txtEZDIR")
self.btnStable = QtWidgets.QToolButton(self.tab_3)
self.btnStable.setGeometry(QtCore.QRect(350, 10, 160, 150))
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(":/icons/Icons/icons8-administrative-tools-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnStable.setIcon(icon6)
self.btnStable.setIconSize(QtCore.QSize(100, 100))
self.btnStable.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.btnStable.setObjectName("btnStable")
self.btnDev = QtWidgets.QToolButton(self.tab_3)
self.btnDev.setGeometry(QtCore.QRect(520, 10, 160, 150))
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap(":/icons/Icons/icons8-settings-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnDev.setIcon(icon7)
self.btnDev.setIconSize(QtCore.QSize(100, 100))
self.btnDev.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.btnDev.setObjectName("btnDev")
self.tabWidget.addTab(self.tab_3, "")
self.tab_4 = QtWidgets.QWidget()
self.tab_4.setObjectName("tab_4")
self.cbSource = QtWidgets.QComboBox(self.tab_4)
self.cbSource.setGeometry(QtCore.QRect(10, 40, 491, 121))
self.cbSource.setObjectName("cbSource")
self.label_4 = QtWidgets.QLabel(self.tab_4)
self.label_4.setGeometry(QtCore.QRect(10, 20, 471, 20))
self.label_4.setObjectName("label_4")
self.btnUpdate = QtWidgets.QToolButton(self.tab_4)
self.btnUpdate.setGeometry(QtCore.QRect(520, 10, 160, 150))
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap(":/icons/Icons/icons8-installing-updates-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnUpdate.setIcon(icon8)
self.btnUpdate.setIconSize(QtCore.QSize(100, 100))
self.btnUpdate.setPopupMode(QtWidgets.QToolButton.DelayedPopup)
self.btnUpdate.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)
self.btnUpdate.setObjectName("btnUpdate")
self.tabWidget.addTab(self.tab_4, "")
self.label_5 = QtWidgets.QLabel(frmMainMenuGUI)
self.label_5.setGeometry(QtCore.QRect(10, 10, 91, 91))
self.label_5.setText("")
self.label_5.setPixmap(QtGui.QPixmap(":/icons/Icons/icons8-brain-128.png"))
self.label_5.setScaledContents(True)
self.label_5.setObjectName("label_5")
self.btnExit = QtWidgets.QToolButton(frmMainMenuGUI)
self.btnExit.setGeometry(QtCore.QRect(550, 330, 160, 150))
self.btnExit.setText("")
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap(":/icons/Icons/icons8-shutdown-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnExit.setIcon(icon9)
self.btnExit.setIconSize(QtCore.QSize(100, 100))
self.btnExit.setPopupMode(QtWidgets.QToolButton.DelayedPopup)
self.btnExit.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)
self.btnExit.setObjectName("btnExit")
self.btnAbout = QtWidgets.QToolButton(frmMainMenuGUI)
self.btnAbout.setGeometry(QtCore.QRect(10, 330, 160, 150))
icon10 = QtGui.QIcon()
icon10.addPixmap(QtGui.QPixmap(":/icons/Icons/icons8-whatsapp-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnAbout.setIcon(icon10)
self.btnAbout.setIconSize(QtCore.QSize(100, 100))
self.btnAbout.setObjectName("btnAbout")
self.retranslateUi(frmMainMenuGUI)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(frmMainMenuGUI)
def retranslateUi(self, frmMainMenuGUI):
_translate = QtCore.QCoreApplication.translate
frmMainMenuGUI.setWindowTitle(_translate("frmMainMenuGUI", "easy fMRI (2.0)"))
self.label.setText(_translate("frmMainMenuGUI", "easy fMRI"))
self.btnPreprocess.setText(_translate("frmMainMenuGUI", "Preprocessing (FSL)"))
self.btnFeatureAnalysis.setText(_translate("frmMainMenuGUI", "Feature Analysis"))
self.btnModelAnalysis.setText(_translate("frmMainMenuGUI", "Model Analysis"))
self.btnVisualization.setText(_translate("frmMainMenuGUI", "Visualization (AFNI)"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("frmMainMenuGUI", "Main"))
self.label_2.setText(_translate("frmMainMenuGUI", "Select a tool:"))
self.btnTools.setText(_translate("frmMainMenuGUI", "Run"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("frmMainMenuGUI", "Tools"))
self.label_3.setText(_translate("frmMainMenuGUI", "Easy fMRI DIR (set from $EASYFMRI):"))
self.btnStable.setText(_translate("frmMainMenuGUI", "Stable"))
self.btnDev.setText(_translate("frmMainMenuGUI", "Developing"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate("frmMainMenuGUI", "Mode"))
self.label_4.setText(_translate("frmMainMenuGUI", "Source:"))
self.btnUpdate.setText(_translate("frmMainMenuGUI", "Update"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _translate("frmMainMenuGUI", "Update"))
self.btnAbout.setText(_translate("frmMainMenuGUI", "Contact Us"))
import icon_rc
|
import os
import pandas as pd
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
from flask import Flask, jsonify, render_template
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__, static_url_path='', static_folder='static')
app.config["TEMPLATES_AUTO_RELOAD"] = True
@app.route("/")
def index():
"""Return the homepage."""
# return render_template("index.html")
return render_template("AGCdashboard.html")
@app.route("/dashboard")
def viewdashboard():
return render_template("AGCdashboard.html")
@app.route("/localsnapshot")
def snapshots():
return render_template("AGCdashboard2.html")
@app.route("/hoiviews")
def hoiviews():
return render_template("hoiviews.html")
@app.route("/tech_education_model")
def tech_education_model():
return render_template("tech_education_model.html")
@app.route("/historic_trend_model")
def historic_trend_model():
return render_template("historic_trend_model.html")
if __name__ == "__main__":
app.run()
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import division # Standardmäßig float division - Ganzzahldivision kann man explizit mit '//' durchführen
from __future__ import absolute_import
import pygame
from pyecs import *
# from pyecs.components import *
# from components import *
class Process(Component):
"""docstring for Process"""
def __init__(self, num_inputs, num_outputs, *args,**kwargs):
'''
@summary:
@param num_inputs:
@param num_outputs:
@param *args:
@param **kwargs:
@result:
'''
super(Process, self).__init__(*args,**kwargs)
self.num_inputs = num_inputs
self.num_outputs = num_outputs
|
import sys
from words2number import *
import numpy as np
import json
import cPickle as pkl
import os
import gensim
import collections
class ReadBatchData():
def __init__(self, param):
np.random.seed(1)
self.pad = '<pad>'
self.unk = '<unk>'
self.start = '</s>'
self.end = '</e>'
self.start_index = 0
self.end_index = 1
self.unk_index = 2
self.pad_index = 3
self.none_argtype_index = 0
self.none_argtype = 'none'
self.none_progtype_index = 0
self.none_progtype = 'none'
self.terminate_progtype = 'terminate'
# self.noop_progtype = 'no-op'
# self.noop_progtype_index = 1#bla
# self.noop_argtype = 'none'
# self.noop_argtype_index = 0
self.batch_size = param['batch_size']
self.wikidata_embed_dim = param['wikidata_embed_dim']
self.pad = '<pad>'
self.pad_kb_symbol_index = 0
all_questypes = {'Simple Question (Direct)':'simple',
'Verification (Boolean) (All)':'verify',
'Quantitative Reasoning (Count) (All)':'quantitative count',
'Quantitative Reasoning (All)':'quantitative',
'Comparative Reasoning (Count) (All)':'comparative count',
'Comparative Reasoning (All)':'comparative',
'Logical Reasoning (All)':'logical'}
self.all_questypes_inv = {v:k for k,v in all_questypes.items()}
if param['question_type']=='all':
self.state_questype_map = all_questypes
else:
if not param['questype_wise_batching']:
raise Exception(' if question_type is not "all", questype_wise_batching should be set to True')
if ',' not in param['question_type']:
self.state_questype_map = {self.all_questypes_inv[param['question_type']]:param['question_type']}
else:
self.state_questype_map = {self.all_questypes_inv[q]:q for q in param['question_type'].split(',')}
self.wikidata_ent_embed = np.load(param['wikidata_dir']+'/ent_embed.pkl.npy').astype(np.float32)
self.wikidata_ent_vocab = {self.pad:self.pad_kb_symbol_index}
self.wikidata_ent_vocab.update({k:(v+1) for k,v in pkl.load(open(param['wikidata_dir']+'/ent_id_map.pickle')).items()})
self.wikidata_ent_vocab_inv = {v:k for k,v in self.wikidata_ent_vocab.items()}
self.wikidata_rel_embed = np.load(param['wikidata_dir']+'/rel_embed.pkl.npy').astype(np.float32)
self.wikidata_rel_vocab = {self.pad:self.pad_kb_symbol_index}
self.wikidata_rel_vocab.update({k:(v+1) for k,v in pkl.load(open(param['wikidata_dir']+'/rel_id_map.pickle')).items()})
self.wikidata_rel_vocab_inv = {v:k for k,v in self.wikidata_rel_vocab.items()}
self.wikidata_type_embed = np.load(param['wikidata_dir']+'/type_embed.pkl.npy').astype(np.float32)
self.wikidata_type_vocab = {self.pad:self.pad_kb_symbol_index}
self.wikidata_type_vocab.update({k:(v+1) for k,v in pkl.load(open(param['wikidata_dir']+'/type_id_map.pickle')).items()})
self.wikidata_type_vocab_inv = {v:k for k,v in self.wikidata_type_vocab.items()}
new_row = np.zeros((1,param['wikidata_embed_dim']), dtype=np.float32)
self.wikidata_ent_embed = np.vstack([new_row, self.wikidata_ent_embed]) # corr. to <pad_kb>
self.wikidata_rel_embed = np.vstack([new_row, self.wikidata_rel_embed]) # corr. to <pad_kb>
self.wikidata_type_embed = np.vstack([new_row, self.wikidata_type_embed])
# self.program_type_vocab = {self.none_progtype:self.none_progtype_index, self.noop_progtype:self.noop_progtype_index}
self.program_type_vocab = {self.none_progtype:self.none_progtype_index}
self.argument_type_vocab = {self.none_argtype:self.none_argtype_index}
self.vocab = pkl.load(open(param['vocab_file'],'rb'))
self.vocab_size = len(self.vocab)
self.vocab_init_embed = np.empty([len(self.vocab.keys()), param['text_embed_dim']], dtype=np.float32)
self.word2vec_pretrain_embed = gensim.models.KeyedVectors.load_word2vec_format(param['glove_dir']+'/GoogleNews-vectors-negative300.bin', binary=True)
for i in xrange(self.vocab_init_embed.shape[0]):
if self.vocab[i] in self.word2vec_pretrain_embed:
self.vocab_init_embed[i,:] = self.word2vec_pretrain_embed[self.vocab[i]]
elif i==4:
self.vocab_init_embed[i,:] = np.zeros((1, self.vocab_init_embed.shape[1]), dtype=np.float32)
else:
self.vocab_init_embed[i,:] = np.random.rand(1, self.vocab_init_embed.shape[1]).astype(np.float32)
count = 1
self.rel_index = None
self.type_index = None
for line in open('argument_types.txt').readlines():
line = line.strip()
if line not in self.argument_type_vocab:
self.argument_type_vocab[line] = count
if line=="relation":
self.rel_index = count
if line=="type":
self.type_index = count
count+=1
self.num_argtypes = len(self.argument_type_vocab)
self.max_num_var = param['max_num_var']
self.required_argtypes_for_responsetype = {}
self.targettype_prog_map = {}
self.prog_to_argtypes_map = {}
program_to_argtype_temp = {}
program_to_targettype_temp = {}
count = 1
for line in open('program_definition.txt').readlines():
parts = line.strip().split('\t')
prog = parts[0]
argtypes = parts[1].split(',')
targettype = parts[2]
if targettype not in self.targettype_prog_map:
self.targettype_prog_map[targettype] = []
self.targettype_prog_map[targettype].append(prog)
self.prog_to_argtypes_map[prog] = argtypes
if parts[0] not in self.program_type_vocab:
self.program_type_vocab[parts[0]] = count
# self.program_type_vocab[parts[0]] = len(self.program_type_vocab)
count +=1
program_to_argtype_temp[self.program_type_vocab[parts[0]]] = [self.argument_type_vocab[a] for a in parts[1].split(',')]
program_to_targettype_temp[self.program_type_vocab[parts[0]]] = self.argument_type_vocab[parts[2]]
if param['terminate_prog']:
self.program_type_vocab[self.terminate_progtype] = len(self.program_type_vocab)
self.terminate_progtype_index = self.program_type_vocab[self.terminate_progtype]
self.program_type_vocab_inv = {v:k for k,v in self.program_type_vocab.items()}
self.argument_type_vocab_inv = {v:k for k,v in self.argument_type_vocab.items()}
self.num_progs = len(self.program_type_vocab)
self.max_arguments = max([len(v) for v in program_to_argtype_temp.values()])
for k,v in program_to_argtype_temp.items():
v = v[:min(self.max_arguments, len(v))]+[self.none_argtype_index]*max(0, self.max_arguments-len(v))
program_to_argtype_temp[k] = v
self.program_to_argtype = {k:[self.none_argtype_index]*self.max_arguments for k in self.program_type_vocab.values()}
self.program_to_targettype = {k:self.none_argtype_index for k in self.program_type_vocab.values()}
self.program_to_argtype.update(program_to_argtype_temp)
self.program_to_targettype.update(program_to_targettype_temp)
self.program_to_argtype = np.asarray(collections.OrderedDict(sorted(self.program_to_argtype.items())).values())
self.program_to_targettype = np.asarray(collections.OrderedDict(sorted(self.program_to_targettype.items())).values())
self.program_algorithm_phase = [self.program_type_vocab[x.strip()] for x in open('program_algorithm.txt').readlines()]
self.program_algorithm_phase.append(self.none_progtype_index)
if param['terminate_prog']:
self.program_algorithm_phase.append(self.terminate_progtype_index)
self.program_variable_declaration_phase = [x for x in self.program_type_vocab.values() if x not in self.program_algorithm_phase]
self.program_variable_declaration_phase.append(self.none_progtype_index)
print 'finished init: read data'
def get_response_type(self, question):
question = question.lower()
if question.startswith('how many'):
return self.argument_type_vocab['int']
elif any([question.startswith(x) for x in ['who','what','which','where','whose','whom']]):
if any([' '+x.strip()+' ' in ' '+question+' ' for x in ['max','min', 'maximum','minimum']]):
return self.argument_type_vocab['entity']
else:
return self.argument_type_vocab['set']
elif any([question.startswith(x) for x in ['is','was','has','does','did','do','will','would','shall','should','must','have','has']]):
return self.argument_type_vocab['bool']
else:
return self.argument_type_vocab['set']
def get_data_per_questype(self, data_dict):
data_dict_questype = {}
for d in data_dict:
state = d[11]
if state in self.state_questype_map:
if self.state_questype_map[state] not in data_dict_questype:
data_dict_questype[self.state_questype_map[state]] = []
data_dict_questype[self.state_questype_map[state]].append(d)
return data_dict_questype
def get_all_required_argtypes(self, type, reqd_argtypes):
if type in self.targettype_prog_map:
progs = self.targettype_prog_map[type]
for prog in progs:
argtypes = self.prog_to_argtypes_map[prog]
for k,v in collections.Counter(argtypes).items():
if k in reqd_argtypes:
reqd_argtypes[k] = max(reqd_argtypes[k], v)
else:
reqd_argtypes[k] = v
reqd_argtypes = self.get_all_required_argtypes(k, reqd_argtypes)
return reqd_argtypes
def get_all_required_argtypes_matrix(self, reqd_argtypes):
reqd_argtypes_mat = np.zeros((self.num_argtypes), dtype=np.float32)
for k in reqd_argtypes:
reqd_argtypes_mat[self.argument_type_vocab[k]] = float(reqd_argtypes[k])
return reqd_argtypes_mat
def get_batch_data(self, data_dict):
num_data = len(data_dict)
batch_orig_context = [data_dict[i][0] for i in range(num_data)]
batch_context_nonkb_words = [data_dict[i][1] for i in range(num_data)]
batch_context_kb_words = [data_dict[i][2] for i in range(num_data)]
batch_context_kb_words = np.asarray(batch_context_kb_words)
batch_context_kb_words[batch_context_kb_words==1]=0
batch_context_entities = [data_dict[i][3] for i in range(num_data)]
entity_variable_value_table = [[self.wikidata_ent_vocab_inv[e] if e!=self.pad_kb_symbol_index else \
None for e in batch_context_entities[i]] for i in range(num_data)]
batch_context_types = [data_dict[i][4] for i in range(num_data)]
type_variable_value_table = [[self.wikidata_type_vocab_inv[t] if t!=self.pad_kb_symbol_index else None \
for t in batch_context_types[i]] for i in range(num_data)]
batch_context_rel = [data_dict[i][5] for i in range(num_data)]
rel_variable_value_table = [[self.wikidata_rel_vocab_inv[r] if r!=self.pad_kb_symbol_index else None \
for r in batch_context_rel[i]] for i in range(num_data)]
batch_context_ints = [data_dict[i][6] for i in range(num_data)]
batch_orig_response = [data_dict[i][7] for i in range(num_data)]
batch_response_entities = [data_dict[i][8].split('|') if len(data_dict[i][8].strip())>0 else [] for i in range(num_data)]
batch_response_ints = [data_dict[i][9].split('|') if len(data_dict[i][9].strip())>0 else [] for i in range(num_data)]
batch_response_bools = [data_dict[i][10].split('|') if len(data_dict[i][10].strip())>0 else [] for i in range(num_data)]
batch_questype = [data_dict[i][11] for i in range(num_data)]
for i in range(num_data):
if any([batch_questype[i]==self.all_questypes_inv[x] for x in ['simple','logical','quantitative','comparative']]):
if len(batch_response_ints[i])>0:
batch_response_ints[i] = []
if len(batch_response_bools[i])>0:
batch_response_bools[i] = []
if any([batch_questype[i]==self.all_questypes_inv[x] for x in ['quantitative count','comparative count']]):
if len(batch_response_bools[i])>0:
batch_response_bools[i] = []
if len(batch_response_entities[i])>0:
batch_response_entities[i] = []
if batch_questype[i]=='verify':
if len(batch_response_ints[i])>0:
batch_response_ints[i] = []
if len(batch_response_entities[i])>0:
batch_response_entities[i] = []
batch_rel_attention = None
batch_type_attention = None
batch_ent_rel_type_kb_subgraph = [data_dict[i][12] for i in range(num_data)]
batch_type_rel_type_kb_subgraph = [data_dict[i][13] for i in range(num_data)]
batch_response_type = []
batch_required_argtypes = []
for i in range(num_data):
response_type = self.get_response_type(batch_orig_context[i])
if self.argument_type_vocab_inv[response_type] not in self.required_argtypes_for_responsetype:
required_argtypes = self.get_all_required_argtypes(self.argument_type_vocab_inv[response_type], {})
self.required_argtypes_for_responsetype[self.argument_type_vocab_inv[response_type]] = required_argtypes
else:
required_argtypes = self.required_argtypes_for_responsetype[self.argument_type_vocab_inv[response_type]]
required_argtypes_mat = self.get_all_required_argtypes_matrix(required_argtypes)
batch_required_argtypes.append(required_argtypes_mat)
batch_response_type.append(response_type)
batch_response_type = np.asarray(batch_response_type)
batch_context_nonkb_words = np.asarray([[xij for xij in context_words] for context_words in batch_context_nonkb_words])
batch_context_kb_words = np.asarray([[self.wikidata_ent_embed[xij] for xij in context_words] for context_words in batch_context_kb_words])
#print ' words ', [[xij for xij in context_words] for context_words in batch_context_kb_words]
#print 'batch_context_kb_words ', batch_context_kb_words.shape, batch_context_kb_words.mean(axis=-1)
batch_context_entities = np.asarray([[xij for xij in context_entities] for context_entities in batch_context_entities])
batch_context_types = np.asarray([[xij for xij in context_types] for context_types in batch_context_types])
batch_context_rel = np.asarray([[xij for xij in context_rel] for context_rel in batch_context_rel])
if not all([len(x)==self.max_num_var for x in batch_context_entities]):
raise Exception(str([len(x) for x in batch_context_entities]))
if not all([len(x)==self.max_num_var for x in batch_context_rel]):
raise Exception(str([len(x) for x in batch_context_rel]))
if not all([len(x)==self.max_num_var for x in batch_context_types]):
raise Exception(str([len(x) for x in batch_context_types]))
batch_context_ints = np.asarray([[i if i==self.pad_kb_symbol_index else text2int(i) for i in context_int] for context_int in batch_context_ints])
int_variable_value_table = [[i if i!=self.pad_kb_symbol_index else None for i in ints] for ints in batch_context_ints]
batch_response_entities = [[str(xij) for xij in response_entities] for response_entities in batch_response_entities]
batch_response_ints = [[i if i==self.pad_kb_symbol_index else text2int(i) for i in response_int] for response_int in batch_response_ints]
output_batch_response_bools = []
for response_bool in batch_response_bools:
batch_response_bool_i = []
for i in response_bool:
if i=='yes':
batch_response_bool_i.append(True)
elif i=='no':
batch_response_bool_i.append(False)
output_batch_response_bools.append(batch_response_bool_i)
batch_response_bools = output_batch_response_bools
if batch_rel_attention is not None:
batch_rel_attention = [np.asarray(rel_attention) for rel_attention in batch_rel_attention]
if batch_type_attention is not None:
batch_type_attention = [np.asarray(type_attention) for type_attention in batch_type_attention]
batch_context_nonkb_words = np.transpose(batch_context_nonkb_words, (1,0))
variable_mask, variable_embed, variable_atten = self.get_variable_table_data(batch_context_entities, batch_context_rel, batch_context_types, batch_context_ints, batch_rel_attention, batch_type_attention)
#self.debug_kb_attention(batch_ent_rel_type_kb_subgraph, batch_context_entities, batch_context_rel, batch_context_types, self.wikidata_ent_vocab_inv, self.wikidata_rel_vocab_inv, self.wikidata_type_vocab_inv, '(e,r,t)')
#self.debug_kb_attention(batch_type_rel_type_kb_subgraph, batch_context_types, batch_context_rel, batch_context_types, self.wikidata_type_vocab_inv, self.wikidata_rel_vocab_inv, self.wikidata_type_vocab_inv, '(t,r,t)')
kb_attention_for_progs = self.get_kb_attention(batch_ent_rel_type_kb_subgraph, batch_type_rel_type_kb_subgraph)
variable_value_table = self.get_variable_value_table(entity_variable_value_table, rel_variable_value_table, type_variable_value_table, int_variable_value_table)
variable_value_table = np.transpose(np.asarray(variable_value_table), (1,0,2))
#variable_value_table is of dimension batch_size x num_argtypes x max_num_var
return batch_orig_context, batch_context_nonkb_words, batch_context_kb_words, \
batch_context_entities, batch_context_types, batch_context_rel, batch_context_ints, \
batch_orig_response, batch_response_entities, batch_response_ints, batch_response_bools, batch_response_type, batch_required_argtypes, \
variable_mask, variable_embed, variable_atten, kb_attention_for_progs, variable_value_table
def debug_kb_attention(self, kb_attention, arg1, arg2, arg3, vocab1, vocab2, vocab3, type):
for i in range(len(kb_attention)):
kb_attention_i = np.reshape(kb_attention[i], (self.max_num_var, self.max_num_var, self.max_num_var))
for i1 in range(self.max_num_var):
for i2 in range(self.max_num_var):
for i3 in range(self.max_num_var):
if kb_attention_i[i1][i2][i3]==1.0:
print 'batch id ', i, ':: kb attention 1.0 for ',type,' = (',vocab1[arg1[i][i1]],',',vocab2[arg2[i][i2]],',',vocab3[arg3[i][i3]],')'
print ''
def get_variable_value_table(self, entity_variable_value_table, rel_variable_value_table, type_variable_value_table, int_variable_value_table):
# variable_value_table = [[['']*self.max_num_var]*self.batch_size]*self.num_argtypes
variable_value_table = [[[None]*self.max_num_var]*self.batch_size]*self.num_argtypes
for v_type, v_type_index in self.argument_type_vocab.items():
if v_type=="entity":
variable_value_table[v_type_index] = entity_variable_value_table
elif v_type=="relation":
variable_value_table[v_type_index] = rel_variable_value_table
elif v_type=="type":
variable_value_table[v_type_index] = type_variable_value_table
elif v_type=="int":
variable_value_table[v_type_index] = int_variable_value_table
return variable_value_table
def get_kb_attention(self, batch_ent_rel_type_kb_subgraph, batch_type_rel_type_kb_subgraph):
kb_attention_for_progs = [None]*self.num_progs
for prog,prog_id in self.program_type_vocab.items():
kb_attention = np.ones((self.batch_size, self.max_num_var, self.max_num_var, self.max_num_var), dtype=np.int32)
if prog=="gen_set":
kb_attention = np.asarray(batch_ent_rel_type_kb_subgraph)
elif prog=="gen_map1":
kb_attention = np.asarray(batch_type_rel_type_kb_subgraph)
elif prog=="verify":
for i in range(self.max_num_var):
kb_attention[:,i,:,i] =0.
kb_attention = np.reshape(kb_attention, (self.batch_size, -1))
elif prog in ["set_oper_union", "set_oper_ints", "set_oper_diff", "map_oper_union", "map_oper_ints", "map_oper_diff"]:
for i in range(self.max_num_var):
kb_attention[:,i,i,:] = 0.
kb_attention = np.reshape(kb_attention, (self.batch_size, -1))
else:
kb_attention = np.reshape(kb_attention, (self.batch_size, -1))
kb_attention_for_progs[prog_id]=kb_attention
kb_attention_for_progs = np.asarray(kb_attention_for_progs)
kb_attention_for_progs = np.transpose(kb_attention_for_progs, [1,0,2])
kb_attention_for_progs = kb_attention_for_progs.astype(np.float32)
return kb_attention_for_progs
def get_variable_table_data(self, batch_context_entities, batch_context_relations, batch_context_types, \
batch_context_ints, batch_rel_attention, batch_type_attention):
variable_mask = np.zeros((self.num_argtypes, self.batch_size, self.max_num_var), dtype=np.float32)
variable_embed = np.zeros((self.num_argtypes, self.batch_size, self.max_num_var, self.wikidata_embed_dim), \
dtype=np.float32)
variable_atten = np.zeros((self.num_argtypes, self.batch_size, self.max_num_var), dtype=np.float32)
#attention will have some issue if no variables are there to begin with ??????? ####CHECK
ones = np.ones((self.batch_size), dtype=np.float32)
for v_type, v_type_index in self.argument_type_vocab.items():
if v_type=="entity":
mask = np.ones_like(batch_context_entities)
mask[batch_context_entities==self.pad_kb_symbol_index]=0.0
#print 'for entity, mask : ', mask
num_entities = np.sum(mask, axis=1)
num_entities[num_entities==0] = 1e-5
#num_entities is of dimension batch_size
num_entities=np.tile(np.reshape(num_entities,(-1,1)), (1,self.max_num_var))
embed = self.wikidata_ent_embed[batch_context_entities]
#embed is of dimension batch_size x max_num_var x wikidata_embed_dim
atten = np.copy(mask)
atten = np.divide(atten, num_entities)
elif v_type=="relation":
mask = np.ones_like(batch_context_relations)
mask[batch_context_relations==self.pad_kb_symbol_index]=0.0
#print 'for relation, mask : ', mask
num_relations = np.sum(mask, axis=1)
num_relations[num_relations==0] = 1e-5
#num_relations is of dimension batch_size
num_relations = np.tile(np.reshape(num_relations,(-1,1)), (1,self.max_num_var))
embed = self.wikidata_rel_embed[batch_context_relations]
#embed is of dimension batch_size x max_num_var x wikidata_embed_dim
atten = np.copy(mask)
atten = np.divide(atten, num_relations)
elif v_type=="type":
mask = np.ones_like(batch_context_types)
mask[batch_context_types==self.pad_kb_symbol_index]=0.0
#print 'for types, mask : ', mask
num_types = np.sum(mask, axis=1)
num_types[num_types==0]=1e-5
#num_types is of dimension batch_size
num_types = np.tile(np.reshape(num_types,(-1,1)), (1,self.max_num_var))
embed = self.wikidata_type_embed[batch_context_types]
#embed is of dimension batch_size x max_num_var x wikidata_embed_dim
atten = np.copy(mask)
atten = np.divide(atten, num_types)
elif v_type=="int":
mask = np.ones_like(batch_context_ints)
#print 'batch_context_ints==0', batch_context_ints==self.pad_kb_symbol_index
mask[batch_context_ints==self.pad_kb_symbol_index]=0.0
embed = np.zeros((self.batch_size, self.max_num_var, self.wikidata_embed_dim), dtype=np.float32)
num_ints = np.sum(mask, axis=1)
num_ints[num_ints==0]=1e-5
#num_ints is of dimension batch_size
num_ints = np.tile(np.reshape(num_ints,(-1,1)), (1, self.max_num_var))
#embed is of dimension batch_size x max_num_prepro_var x wikidata_embed_dim
atten = np.copy(mask)
atten = np.divide(atten, num_ints)
else:
mask = np.zeros_like((self.batch_size), dtype=np.float32)
embed = np.zeros((self.batch_size, self.max_num_var, self.wikidata_embed_dim), dtype=np.float32)
atten = np.copy(mask)
variable_mask[v_type_index]=mask
variable_embed[v_type_index]= embed
variable_atten[v_type_index]=atten
variable_mask[0,:,0] = 1.
variable_mask = np.transpose(variable_mask, [0,2,1])
#print 'variable mask for entities', variable_mask[self.argument_type_vocab['entity']]
#print 'variable mask for relations', variable_mask[self.argument_type_vocab['relation']]
#print 'variable mask for types', variable_mask[self.argument_type_vocab['type']]
variable_embed = np.transpose(variable_embed, [0,2,1,3])
variable_atten = np.transpose(variable_atten, [0,2,1])
variable_mask = [[variable_mask[i][j] for j in range(self.max_num_var)] for i in range(self.num_argtypes)]
variable_embed = [[variable_embed[i][j] for j in range(self.max_num_var)] for i in range(self.num_argtypes)]
#print 'variable mask in readdata ', variable_mask
return variable_mask, variable_embed, variable_atten
if __name__=="__main__":
param = json.load(open(sys.argv[1]))
if os.path.isdir(param['train_data_file']):
training_files = [param['train_data_file']+'/'+x for x in os.listdir(param['train_data_file']) if x.endswith('.pkl')]
elif not isinstance(param['train_data_file'], list):
training_files = [param['train_data_file']]
else:
training_files = param['train_data_file']
data = []
for f in training_files:
data.extend(pkl.load(open(f)))
param['batch_size'] = len(data)
read_data = ReadBatchData(param)
read_data.get_batch_data(data)
|
#
# 따라하며 배우는 파이썬과 데이터과학(생능출판사 2020)
# LAB 5-11 무한 반복문으로 숫자 맞추기 게임을 만들자, 134쪽
#
import random
tries = 0
guess = 0
answer = random.randint(1, 100)
print("1부터 100 사이의 숫자를 맞추시오")
while guess != answer:
guess = int(input("숫자를 입력하시오: "))
tries = tries + 1
if guess < answer:
print("낮음!")
elif guess > answer:
print("높음!")
print("축하합니다. 총 시도횟수=", tries) |
import os
from PIL import Image
ext = ['jpg', 'jpeg', 'png']
files = os.listdir('.')
def redResolution(file, count):
name = str(count) + 'jpeg'
image = Image.open(file)
image.thumbnail((1136, 640))
print(image.format, image.size, image.mode)
image = image.convert("RGB")
image.save(name, 'JPEG')
if __name__ == '__main__':
# imag = Image.open('0.png')
# print(imag.size)
count = 0
for file in files:
if file.split('.')[-1] in ext:
# print(file)
count += 1
redResolution(file, count) |
#
# encoding: utf-8
# To create a Spider, you must subclass scrapy.spider.BaseSpider,
# and define the three main, mandatory, attributes:
## name: identifies the Spider. It must be unique, that is,
#
# you can’t set the same name for different Spiders.
## start_urls: is a list of URLs where the Spider will begin to crawl from.
# So, the first pages downloaded will be those listed here.
# The subsequent URLs will be generated successively from data contained in the start URLs.
## parse() is a method of the spider, which will be called with the downloaded
# Response object of each start URL. The response is passed to the
# method as the first and only argument.
# This method is responsible for parsing the response data and
# extracting scraped data (as scraped items) and more URLs to follow.
# The parse() method is in charge of processing the response and
# returning scraped data (as Item objects) and more URLs to follow (as Request objects).
# This is the code for our first Spider; save it in a file named dmoz_spider.py under the dmoz/spiders directory:
from scrapy.spider import BaseSpider
class DmozSpider(BaseSpider):
name = "dmoz"
allowed_domains = ["dmoz.org"]
start_urls = [
"http://www.dmoz.org/Computers/Programming/Languages/Python/Books/",
"http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/"
]
def parse(self, response):
filename = response.url.split("/")[-2]
open(filename, 'wb').write(response.body)
|
import renderapi
import os
import json
import numpy
import pathlib
from bigfeta import jsongz
from argschema import ArgSchemaParser
from .schemas import GenerateEMTileSpecsParameters
# this is a modification of https://github.com/AllenInstitute/
# render-modules/blob/master/rendermodules/dataimport/
# generate_EM_tilespecs_from_metafile.py
# that does not depend on render-modules nor
# on a running render server
class RenderModuleException(Exception):
pass
class GenerateEMTileSpecsModule(ArgSchemaParser):
default_schema = GenerateEMTileSpecsParameters
@staticmethod
def image_coords_from_stage(stage_coords, resX, resY, rotation):
cr = numpy.cos(rotation)
sr = numpy.sin(rotation)
x = stage_coords[0] / resX
y = stage_coords[1] / resY
return (int(x * cr + y * sr),
int(-x * sr + y * cr))
def tileId_from_basename(self, fname):
return os.path.splitext(os.path.basename(fname))[0]
def ts_from_imgdata(self, imgdata, imgdir, x, y,
minint=0, maxint=255, maskUrl=None,
width=3840, height=3840, z=None, sectionId=None,
scopeId=None, cameraId=None, pixelsize=None):
tileId = self.tileId_from_basename(imgdata['img_path'])
sectionId = (self.sectionId_from_z(z) if sectionId is None
else sectionId)
raw_tforms = [renderapi.transform.AffineModel(B0=x, B1=y)]
imageUrl = pathlib.Path(
os.path.abspath(os.path.join(
imgdir, imgdata['img_path']))).as_uri()
if maskUrl is not None:
maskUrl = pathlib.Path(maskUrl).as_uri()
ip = renderapi.image_pyramid.ImagePyramid()
ip[0] = renderapi.image_pyramid.MipMap(imageUrl=imageUrl,
maskUrl=maskUrl)
return renderapi.tilespec.TileSpec(
tileId=tileId, z=z,
width=width, height=height,
minint=minint, maxint=maxint,
tforms=raw_tforms,
imagePyramid=ip,
sectionId=sectionId, scopeId=scopeId, cameraId=cameraId,
imageCol=imgdata['img_meta']['raster_pos'][0],
imageRow=imgdata['img_meta']['raster_pos'][1],
stageX=imgdata['img_meta']['stage_pos'][0],
stageY=imgdata['img_meta']['stage_pos'][1],
rotation=imgdata['img_meta']['angle'], pixelsize=pixelsize)
def run(self):
with open(self.args['metafile'], 'r') as f:
meta = json.load(f)
roidata = meta[0]['metadata']
imgdata = meta[1]['data']
img_coords = {img['img_path']: self.image_coords_from_stage(
img['img_meta']['stage_pos'],
img['img_meta']['pixel_size_x_move'],
img['img_meta']['pixel_size_y_move'],
numpy.radians(img['img_meta']['angle'])) for img in imgdata}
# if not imgdata:
# raise RenderModuleException(
# "No relevant image metadata found for metafile {}".format(
# self.args['metafile']))
minX, minY = numpy.min(numpy.array(list(img_coords.values())), axis=0)
# assume isotropic pixels
pixelsize = roidata['calibration']['highmag']['x_nm_per_pix']
imgdir = self.args.get(
'image_directory', os.path.dirname(self.args['metafile']))
self.render_tspecs = [
self.ts_from_imgdata(
img, imgdir,
img_coords[img['img_path']][0] - minX,
img_coords[img['img_path']][1] - minY,
minint=self.args['minimum_intensity'],
maxint=self.args['maximum_intensity'],
width=roidata['camera_info']['width'],
height=roidata['camera_info']['height'],
z=self.args['z'],
sectionId=self.args.get('sectionId'),
scopeId=roidata['temca_id'],
cameraId=roidata['camera_info']['camera_id'],
pixelsize=pixelsize,
maskUrl=self.args['maskUrl']) for img in imgdata]
if 'output_path' in self.args:
self.args['output_path'] = jsongz.dump(
self.tilespecs,
self.args['output_path'],
self.args['compress_output'])
@property
def tilespecs(self):
tjs = [t.to_dict() for t in self.render_tspecs]
return tjs
if __name__ == '__main__':
gmod = GenerateEMTileSpecsModule()
gmod.run()
|
person = {'name': 'Phill', 'age': 22}
age = person.setdefault('age')
print('person = ',person)
print('Age = ',age)
#############
person = {'name': 'Phill'}
# key is not in the dictionary
salary = person.setdefault('salary')
print('person = ',person)
print('salary = ',salary)
# key is not in the dictionary
# default_value is provided
age = person.setdefault('age', 22)
print('person = ',person)
print('age = ',age) |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
from datetime import datetime
import scrapy
import re
from scrapy.loader import ItemLoader
from scrapy.loader.processors import MapCompose, TakeFirst,Join
# from ArticleSpider.utils.common import extract_num# 去common调用extract_num这个函数
# from ArticleSpider.settings import SQL_DATETIME_FORMAT,SQL_DATE_FORMAT
class ArticlespiderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
def add_cnblogs(value): # 谁调用这个方法谁后面就有一个-tangming
return value+"男"
def date_convert(value):
# 处理时间的格式,datetime类型变成date,而且只取日期
try:
create_date = datetime.strptime(value, "%Y/%m/%d").date()
except Exception as e:
create_date = datetime.now().date()
return create_date
def get_nums(value):
# 把点赞数量或者评论数量变成int类型,这样在数据更好检索
match_re = re.match(".*?(\d+).*", value)
if match_re:
nums = int(match_re.group(1))
else:
nums = 0
return nums
def remove_comment_tags(value):
# value = "三国梦-lk,编辑,收藏",先split成列表,然后取三国梦-lk
return value.split(",")[0]
def return_value(value):
return value
# 知乎和其他网站都可以用这个方法,这是分词用的生成一个suggest
# def gen_suggests(index, info_tuple):
# # 根据字符串生成搜索建议数组
# used_words = set() # 为什么要设置一个set,是因为在后面要去重
# suggests = [] # 这就是我们要返回的一个数组
# for text, weight in info_tuple:
# if text:
# # 调用es的anlyzer接口分析字符串, 分词和大小的转换
# words = es.indices.analyze(index=index, analyzer="ik_max_word", params={'filter':["lowercase"]}, body=text)
# anylyzed_words = set([r["token"] for r in words["tokens"] if len(r["token"])>1])# 大于一是过滤单个字符的,那是没有意义的
# # 已经存在过的单词过滤掉
# new_works = anylyzed_words - used_words
# else:
# new_works = set()
#
# if new_works:
# suggests.append({"input":list(new_works), "weight":weight})
# return suggests
# 我们都用这个自定义的itemLoader来做解析,这个是给文章cnblogs的item
class ArticleItemLoader(ItemLoader):
"""
是用了itemloader才有这个预处理, input_processor,output_processsor
可以这么来看 Item 和 Itemloader:Item提供保存抓取到数据的容器,而 Itemloader提供的是填充容器的机制。
第一个是输入处理器(input_processor) ,当这个item,title这个字段的值传过来时,可以在传进来的值上面做一些预处理。
第二个是输出处理器(output_processor) , 当这个item,title这个字段被预处理完之后,输出前最后的一步处理。
"""
# 自定义itemLoader
default_output_processor = TakeFirst()# list转换成str
class CnblogsArticleItem(scrapy.Item):
"""
是用了itemloader才有这个预处理, input_processor,output_processsor
可以这么来看 Item 和 Itemloader:Item提供保存抓取到数据的容器,而 Itemloader提供的是填充容器的机制。
第一个是输入处理器(input_processor) ,当这个item,title这个字段的值传过来时,可以在传进来的值上面做一些预处理。
第二个是输出处理器(output_processor) , 当这个item,title这个字段被预处理完之后,输出前最后的一步处理。
"""
title = scrapy.Field(
input_processor = MapCompose(lambda x:x+ "-风骚", add_cnblogs),# 预处理函数,会调用lambda函数,也会拿到一个value值给add_jobbole
)# 标题
create_date = scrapy.Field(
input_processor=MapCompose(date_convert),
output_processsor = TakeFirst()
)# 日期
url = scrapy.Field()# 文章网址
url_object_id = scrapy.Field()
front_image_url = scrapy.Field(
output_processor = MapCompose(return_value)
)# 封面图片
front_image_path = scrapy.Field()
praise_nums = scrapy.Field(
input_processor = MapCompose(get_nums)
)# 点赞数
comment_nums = scrapy.Field(
input_processor = MapCompose(get_nums)
) # 评论数
fav_nums = scrapy.Field(
input_processor=MapCompose(get_nums)
) # 感兴趣数
tags = scrapy.Field(
input_processor = MapCompose(remove_comment_tags),
# output_processor = Join(",")# list转换成str
) # 标签
content = scrapy.Field() # 文章内容
def get_insert_sql(self):
insert_sql = """
insert into cnblogs_article(title, url, url_object_id, front_image_url, front_image_path, praise_nums, comment_nums, tags, content, create_date, fav_nums)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)ON DUPLICATE KEY UPDATE create_date = VALUES(create_date)
"""
params = (
self.get("title", ""),
self.get("url", ""),
self.get("url_object_id", ""),
self.get("front_image_url", ""),
self.get("front_image_path", ""),
self.get("praise_nums", 0),
self.get("comment_nums", 0),
self.get("tags", ""),
self.get("content", ""),
self.get("create_date", "1970-07-01"),
self.get("fav_nums", 0),
)
# 返回到pipelines中的do_insert,因为那里调用了get_sql,得返回去值
return insert_sql, params
|
# coding: utf-8
# In[1]:
from __future__ import division
import pandas as pd
from pandas import ExcelWriter
from pandas import ExcelFile
import numpy as np
from matplotlib import pyplot as plt
from sklearn.cluster import KMeans
from sklearn.cluster import SpectralClustering
from sklearn.cluster import AgglomerativeClustering
import math
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import time
from scipy import spatial
# In[2]:
#df = pd.read_csv('formatted_data_updated.csv')
df = pd.read_csv('formatted_data_ing.csv')
df = df.replace('?', np.nan)
#print df
newdf = df.drop('UserID', axis = 1)
newdf
# In[3]:
training_set = newdf[0:31]
test_set = newdf[31:37]
# Adding all of the ratings corresponding to each recipe in a list of lists to make computation easier.
# In[4]:
def makeArrays(dataframe):
recipeRatings = []
#print recipeRatings
for col in dataframe:
lists = []
for val in dataframe[col]:
lists.append(val)
recipeRatings.append(lists)
#print recipeRatings
return recipeRatings
# def averages(array):
# i = 0
# for ratings in array:
# sums = 0
# count = 1
# for rating in ratings:
#
# if(not math.isnan(rating)):
# sums = sums + rating
# count = count +1
# average = float(sums/count)
# #print average
# array[i] = [average if math.isnan(x) else float(x) for x in array[i]]
# i = i+1
#
# #returns a numpy array
# return np.array(array)
# In[5]:
#compute the averages of the row
def averages(row):
#instantiate our variables
avg = float(0)
counter = float(1)
#iterate over the cols in the row
for col in row:
if not np.isnan(float(col)):
avg += float(col)
counter += 1
#compute the average
return (avg/counter)
# In[6]:
#initlize list of lists for spectral
training_data = []
for index, row in newdf.iterrows():
#preprocessing for every row
avg = averages(row)
#initlize rows
rows = []
#for each col in row
for col in row:
#cast to float for nan behavior
col = float(col)
#if missing data
if np.isnan(col):
rows.append(avg)
#not missing data
else:
rows.append(col)
training_data.append(rows)
#cast to np array for fun times
training_data = np.array(training_data)
# # K-means
# #run PCA to reduce the data dimensionality so that it is easier to visualize
# from sklearn.decomposition import PCA
# reduced_data = PCA(n_components=2).fit_transform(training_data)
# In[7]:
#training_data = averages(makeArrays(training_set.T))
#print training_set.shape
kmeans = KMeans(n_clusters=6, random_state=0).fit(training_data)
# In[8]:
labels_kmeans = kmeans.labels_
set_lk = set(labels_kmeans)
print labels_kmeans
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.show()
# test_data = averages(makeArrays(test_set.T))
# kmeans.predict(test_data)
# kmeans.score(test_data)
#
# # Spectral clustering
# In[9]:
spectral = SpectralClustering()
spectral.fit(training_data)
print "lables from clustering"
spectral_labels = spectral.labels_
set_ls = set(spectral_labels)
# # Hierarchial/Agglomerative Clustering
# In[10]:
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(training_data, n_neighbors=10, include_self=False)
# In[11]:
ward = AgglomerativeClustering(n_clusters=8, connectivity=connectivity,
linkage='ward').fit(training_data)
# In[12]:
h_labels = ward.labels_
print h_labels
set_lh = set(h_labels)
# In[13]:
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(h_labels):
ax.scatter(training_data[h_labels == l, 0], training_data[h_labels == l, 1], training_data[h_labels == l, 2],
color=plt.cm.jet(float(l) / np.max(h_labels + 1)),
s=20, edgecolor='k')
plt.show()
# In[14]:
#testing
#
def getLabelsDict(set_labels, label_vals):
labels_dict = {}
ind = 0
for i in range(0, len(set_labels)):
labels_dict[i] = []
#print len(set_lk)
#print labels_dict
for i in label_vals:
#if i not in labels_dict.values:
#print i
labels_dict[i].append(ind)
ind = ind + 1
return labels_dict
# In[28]:
colNames = list(newdf.columns.values)
#print colNames
def findEle(index):
#get all the colums corresponding to the particular row
#colVals = df.loc[[index]]
toRet = []
likes = []
dislikes = []
counter = int(0)
for col in colNames:
if not np.isnan(float(newdf.loc[[index]][col])):
#print np.array(newdf.loc[[index]][col].astype(list))[1]
for i in np.array(newdf.loc[[index]][col].astype(list)):
#print type(i)
if i == "1":
likes.append(counter)
else:
dislikes.append(counter)
counter += 1
#toRet.append(col)
#print "likes: ", likes
#print "dislikes: ", dislikes
return (likes, dislikes)
# TF-IDF
# In[29]:
#compute tf-idf for the given array
#in Scikit-Learn
def tfIDF(colNames):
names_to_vec = {}
sklearn_tfidf = TfidfVectorizer(stop_words = 'english')
vec_representation = sklearn_tfidf.fit(colNames)
feature_names = sklearn_tfidf.get_feature_names()
#print vec_representation
idx = 0
for names in feature_names:
names_to_vec[names] = (sklearn_tfidf.idf_[idx], idx)
idx = idx +1
zerosLists = [[0] * len(feature_names)] * len(colNames)
i = 0
for rec in colNames:
for word in rec.strip().split(' '):
if word in names_to_vec:
zerosLists[i][names_to_vec[word][1]] = names_to_vec[word][0]
i = i + 1
#print zerosLists
return zerosLists
print len(tfIDF(colNames))
# recipeDict = {}
#
# for rec in colNames:
# #rec = set(rec)
# for word in rec.strip().split(' '):
#
# if (recipeDict.has_key(word)):
# recipeDict[word] += 1
# else:
# recipeDict[word] = 1
# def tfIDF():
# total = len(colNames)
# tf = [{}] * len(colNames)
# #zerosOnEveryRecipe = [[]] * total
# count = 0
# for rec in colNames:
# #print rec
# tf[count] = {}
#
# #zerosOnEveryRecipe[count].append(0)
# # get term frequency
# for word in rec.strip().split(' '):
#
# if (tf[count].has_key(word)):
# tf[count][word] += 1
# else:
# tf[count][word] = 1
#
# # scale term frequency by idf
# for word in tf[count]:
# tf[count][word] = float(tf[count][word]*total/recipeDict[word])
# count += 1
# print tf
# return tf
#
# tfIDF()
# Cosine similarity
# In[32]:
def cosine_computations(getLabelsDict, set_lk, label):
userdata = {}
tfidf_vecs = tfIDF(colNames)
for i in range(0, len(set_lk)):
userdata[i] = {}
for j in range(0, len(label)):
userdata[i][j] = {}
for k in range(0, len(label)):
userdata[i][j][k] = ([],[])
user_ignored = int(0)
#1-------
# for like in likes:
# eachuserlike = []
# if not len(likes) == 0:
# eachuserlike = like
# userdata.append(eachuserlike)
#for each cluster
#for each user in the cluster
#get the liked recipes
#get the disliked recipes
#for each user not /= original
#get the liked recipes
#get the disliked recipes
#compare the recipes
#for each cluster
for key in labels_dict:
print "key: " , key
#print "Key: ", key
#for each user in the cluster
for user1 in labels_dict[key]:
likes1, dislikes1 = findEle(user1)
#print "User1: ", user1
#-----------1
#go through every other user
if not len(likes1) == 0 and not len(dislikes1) == 0:
for user2 in labels_dict[key]:
#print "User2", user2
#get the likes and dislikes
likes2, dislikes2 = findEle(user2)
if not len(likes2) == 0 and not len(dislikes2) == 0:
#comparison of recipes
#for each recipe in first user's
for like1 in likes1:
# #for each recipe in second user's
for like2 in likes2:
#get the liked similarity and place in array
#cluster -> user1 -> user2 -> tuple of likes/dislikes -> liked comparison
#print "like1: ", np.array(like1), " like2: ", np.array(like2)
#userdata[key][user1][user2][0].append(cosine_similarity(like1, like2)) #likes
userdata[key][user1][user2][0].append(spatial.distance.cosine(tfidf_vecs[like1], tfidf_vecs[like2]))
for dislike1 in dislikes1:
# #for each recipe in second user's
for dislike2 in dislikes2:
# #get the liked similarity and place in array
# #cluster -> user1 -> user2 -> tuple of likes/dislikes -> disliked comparison
# userdata[key][user1][user2][1].append(cosine_similarity(dislike1, dislike2)) #likes
userdata[key][user1][user2][1].append(spatial.distance.cosine(tfidf_vecs[dislike1], tfidf_vecs[dislike2])) #likes
else:
user_ignored += 1
#print "likes: ", userdata[key][user1][user2][0]
#print "dislikes: ", userdata[key][user1][user2][0]
else:
user_ignored += 1
#print user_ignored
return userdata
#tfIDF(likes)
#print likes
#print likes
#print likes
#print dislikes
# In[33]:
def average_list_nan(data):
average = float(0)
counter = float(1)
for el in data:
if not np.isnan(float(el)):
average += float(el)
counter += 1
return average/counter
# In[34]:
def average_sim_cluster(userdata):
averages = []#[() for _ in range(len(userdata))]
for cluster in userdata:
averagelikes = float(0)
averagedislikes = float(0)
counter = float(1)
for user1 in userdata[cluster]:
for user2 in userdata[cluster]:
averagelikes += average_list_nan(userdata[cluster][user1][user2][0])
averagedislikes += average_list_nan(userdata[cluster][user1][user2][1])
counter += 1
averages.append((averagelikes/counter, averagedislikes/counter))
return averages
#return (averagelikes/counter, averagedislikes/counter)
# For k-means
# In[ ]:
#get the labels dictionary
labels_dict = getLabelsDict(set_lk, labels_kmeans)
#pass into cosine similarity computations
userdata = cosine_computations(labels_dict, set_lk, labels_kmeans)
# print userdata
print average_sim_cluster(userdata)
# In[ ]:
#get the labels dictionary
labels_dict = getLabelsDict(set_ls, spectral_labels)
#pass into cosine similarity computations
userdata = cosine_computations(labels_dict, set_ls,spectral_labels )
# print userdata
print average_sim_cluster(userdata)
# In[ ]:
#get the labels dictionary
labels_dict = getLabelsDict(set_lh, h_labels)
#pass into cosine similarity computations
userdata = cosine_computations(labels_dict, set_lh, h_labels)
# print userdata
print average_sim_cluster(userdata)
# In[ ]:
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re, sys, os, shutil
open_html=open('LinguaLeo.html')
read_html=open_html.read()
word_value=re.findall('(?<="word_value":").*?(?=",")', read_html) # список слов
translate_value=re.findall('(?<=translate_value":").*?(?=",")', read_html) # список перевод
id_word=re.findall('[0-9]+-[0-9]+.mp3', read_html) # id слов в коде html
sound_url=re.findall('(?<=sound_url":").*?(?=",")', read_html) # произношение аудио
title_text=re.search('(?<=<title>).*?(?= текст перевод)', read_html).group() # название текста
open_html.close()
# создать "пустоту"
file_hush=open('hush', 'r+'); file_hush_2=file_hush.readlines()
# создаем директорию и переходим в нее
os.mkdir(title_text)
os.chdir(title_text)
# создаем необходимое количество папок mp3 and wav
len_word=len(word_value)/10
for i in range(1,len_word+1):
i=str(i)
os.mkdir(i+'-mp3')
os.mkdir(i+'-wav')
# скачиваем аудио
for i in sound_url:
os.system('wget %s' %i)
# дописываем в конец аудио файла "пустоту"
for i in id_word:
file_id_word=open(i, 'a+')
file_id_word.writelines(file_hush_2)
file_hush.close()
file_id_word.close()
# переименовать аудио файлы по на манер 'rays-лучи.mp3'
while len(word_value) != 0:
a=word_value.pop(0)
b=translate_value.pop(0)
c=id_word.pop(0)
os.rename(c, a+' - '+b+'.mp3')
# создаем файл слово - перевод
def word_text():
z=0
file_word=open('ang-rus.txt', 'a+')
file_word_translate=open('rus.txt', 'a+')
file_word_value=open('ang.txt', 'a+')
while len(word_value) > z:
file_word.writelines(word_value[z]+' - '+translate_value[z]+'\n')
file_word_value.writelines(word_value[z]+'\n')
file_word_translate.writelines(translate_value[z]+'\n')
z+=1
file_word_translate.close()
file_word_value.close()
file_word.close()
# удаляем пробелы, для ecasound
def space(s, e):
for i in os.listdir("."):
if s in i:
os.rename(i, i.replace(s, e))
space(" ", "_")
# конвертируем mp3 в wav
for i in os.listdir("."):
if i[-4:]=='.mp3':
os.system('ecasound -i ' +i +' -ea:20% -o ' +i[:-4]+'.wav')
#~
# вернуть пробелы
space("_", " ")
# раскидываем по спискам wav и mp3 + списки директорий
list_dir_mp3=[]
list_dir_wav=[]
list_file_mp3=[]
list_file_wav=[]
for i in os.listdir("."):
if i[-4:]=='.mp3':
list_file_mp3.append(i)
elif i[-4:]=='.wav':
list_file_wav.append(i)
elif (i[-4:]!='.mp3') and (i[-4:]!='.txt') and (i[-4:]!='.wav') and (i[-4:]=='-wav'):
list_dir_wav.append(i)
elif (i[-4:]!='.mp3') and (i[-4:]!='.txt') and (i[-4:]!='.wav') and (i[-4:]=='-mp3'):
list_dir_mp3.append(i)
# раскидываем файлы по нужным директориям
def list_dir(list_d, list_f):
def rec(d, z):
while z != 0:
shutil.move(list_f.pop(0), d)
z-=1
while len(list_f) >0:
for i in list_d:
rec(i, 1)
try:
list_dir(list_dir_mp3, list_file_mp3)
except IndexError:
print "pop from empty list MP3"
try:
list_dir(list_dir_wav, list_file_wav)
except IndexError:
print "pop from empty list WAV"
|
message = input('Enter a message with many es in it')
num_of_e = message.count('e')
print(f'e occurs {num_of_e} times') |
# from dust i have come, dust i will be
x=[]
for i in range(3):
a,b,c=map(int,input().split())
s=a+b+c-(min(a,b,c)+max(a,b,c))
x.append(s)
x.sort()
print(x[1])
|
# coding: utf-8
from scipy import stats
from rpy2.robjects.packages import importr
from rpy2.robjects.vectors import FloatVector
stats_r = importr('stats')
import sys
if sys.argv[3]=='cp':
fin = open('MsigDB_c2.cp.v5.1.symbols.gmt','r')
if sys.argv[3]=='halmark':
fin = open('h.all.v5.1.symbols.gmt','r')
if sys.argv[3]=='KEGG':
fin = open('KEGG_pathway_genes.gmt','r')
if sys.argv[3]=='KEGG_signal':
fin = open('KEGG_Gene_new_signal.txt.strip.txt')
if sys.argv[3]=='MSigDB_KEGG':
fin = open('MSigDB_KEGG_updata.txt')
if sys.argv[3]=='CHR':
fin = open('c1.all.v5.1.symbols.gmt.txt', 'r')
if sys.argv[3]=='mouse_KEGG':
fin = open('mouse_kegg_geneset_full.txt','r')
dic_reac_uni = {}
all_unip = set()
for line in fin.readlines():
single_unip = set()
words = line.strip().split('\t')
key = words[0].strip()
length = len(words)
for i in range(2,length):
all_unip.add(words[i])
single_unip.add(words[i])
dic_reac_uni[key] = single_unip
fin.close()
all_u = len(all_unip)
print("all genes in background datasets:")
print(all_u)
import re
input_file = open(sys.argv[1],'r')
input_list = set()
for line in input_file.readlines():
input_list.add((line.strip().split('\t')[int(sys.argv[2])]).strip('"'))
all_match = 0
match_list_all=[]
for uni in input_list:
if uni in all_unip:
match_list_all.append(uni)
all_match = all_match + 1
print("all matched uniprot")
print(all_match)
dic_match = {}
for pathway in dic_reac_uni:
count = 0
match_item=set()
for word in input_list:
if word in dic_reac_uni[pathway]:
count = count + 1
match_item.add(word)
dic_match[pathway]=[count, len(dic_reac_uni[pathway]),','.join(match_item)]
ave_match = float(all_match)/all_u
if sys.argv[3]=='cp':
PRE='CP_'
if sys.argv[3]=='halmark':
PRE='Halmark_'
if sys.argv[3]=='KEGG':
PRE='KEGG_'
if sys.argv[3]=='KEGG_signal':
PRE='KEGG_signal_'
if sys.argv[3]=='MSigDB_KEGG':
PRE='MSigDB_KEGG_'
if sys.argv[3]=='CHR':
PRE='CHR'
if sys.argv[3]=='mouse_KEGG':
PRE='MOUSE_KEGG_'
fou1 = open(PRE+sys.argv[1]+'.fdr.txt','w')
fou1.write('Pathway\tMatched_gene\tAll_genes\tRatio\tP-value\tFDR\tLable\tGenes\n')
result = {}
Name_list = []
un_Name_list = []
FDR_gene_list = []
for i in dic_match:
if dic_match[i][0] != 0:
Name_list.append(i)
table = [(dic_match[i][0]),(dic_match[i][1]),(all_match)-(dic_match[i][0]),(all_u-dic_match[i][1])]
p = stats.fisher_exact([[table[0],table[1]],[table[2],table[3]]],alternative='greater')
FDR_gene_list.append(p[1])
result[i]=[dic_match[i][0],dic_match[i][1],float(dic_match[i][0])/float(dic_match[i][1]),p[1]]
else:
un_Name_list.append(i)
result[i]=[dic_match[i][0],dic_match[i][1],float(dic_match[i][0])/float(dic_match[i][1]),'NA']
FDR_gene_list_adjust = stats_r.p_adjust(FloatVector(FDR_gene_list), method='BH')
n = 0
for i in Name_list:
fou1.write(i+'\t')
for m in result[i]:
fou1.write(str(m)+'\t')
fou1.write(str(FDR_gene_list_adjust[n])+'\t')
if result[i][2] > ave_match:
fou1.write('Enriched\t')
else:
fou1.write('BelowAverage\t')
fou1.write(dic_match[i][2]+'\t')
fou1.write("\n")
n = n+1
fou1.close()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 6 16:07:17 2018
@author: stevechen
"""
import pandas as pd
import urllib
from urllib.request import urlopen
import json
def UseUrllib(BaseURL,URLPost):
# Combine information into a URL
URL=BaseURL + "?"+ urllib.parse.urlencode(URLPost)
# Open URL
WebURL=urlopen(URL)
# Read the URL
data=WebURL.read()
# Encoding
encoding = WebURL.info().get_content_charset('utf-8')
# Store the data and return the data
jsontxt = json.loads(data.decode(encoding))
return jsontxt
def main():
lat=40.7992048
lon=-73.95367575
BaseURL="https://maps.googleapis.com/maps/api/place/nearbysearch/json"
URLPost={'location': str(lat)+','+str(lon),
'radius':500,
'type':'subway_station',
'key':'###YOUR_API_KEY'}
jsontxt=UseUrllib(BaseURL,URLPost)
with open('google_data.json', 'w') as f:
json.dump(jsontxt, f)
f.close()
if __name__ == "__main__":
main()
|
##In England the currency is made up of pound, £, and pence, p, and there are
##eight coins in general circulation:
##
##1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) and £2 (200p).
##It is possible to make £2 in the following way:
##
##1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p
##How many different ways can £2 be made using any number of coins?
from time import time
t1=time()
n=1
for p100 in range(0,300,100):
for p50 in range(0,250,50):
for p20 in range(0,220,20):
for p10 in range(0,210,10):
for p5 in range(0,205,5):
for p2 in range(0,202,2):
if sum([p2,p5,p10,p20,p50,p100])<=200:
n+=1
print(n,time()-t1)
|
class Solution:
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
s = str(x)
if s[0] == "-":
digits = s[1:]
digits = digits[::-1]
while digits[0] == "0" and len(digits) > 1:
digits = digits[1:]
answer = int("-" + digits)
return answer if answer >= -2147483648 else 0
else:
s = s[::-1]
while s[0] == "0" and len(s) > 1:
s = s[1:]
answer = int(s)
return answer if answer <= 2147483647 else 0
|
class Meeting(object):
def __init__(self, name, date, start, end, participants):
self.name = name
self.date = date
self.start = start
self.end = end
self.participants = participants
def include(self, user):
return user in self.participants
def conflict(self, other_meeting):
return other_meeting.date == self.date and \
(self.start < other_meeting.end <= self.end or
self.start <= other_meeting.start < self.end or
other_meeting.start < self.end <= other_meeting.end or
other_meeting.start <= self.start < other_meeting.end)
def __str__(self):
str_participants = self.participants[0]
for user in self.participants[1:]:
str_participants += ',' + user
return self.name + ' ' + self.date.strftime(
"%m/%d/%Y") + ' ' + self.start.isoformat(
timespec='minutes') + ' ' + self.end.isoformat(
timespec='minutes') + ' ' + str_participants
class Log(object):
def __init__(self, op, time, node, value):
self.op = op
self.time = time
self.node = node # a number representation of node
self.value = value
def __str__(self):
return self.op + ' ' + str(self.value)
|
from core.keyboard import Keyboard
from typing import Optional
import logging
from core.telegram_api import TelegramApi
from trivia.telegram_models import UpdatesResponse
import json
import aiohttp
from contextlib import asynccontextmanager
from pathlib import Path
class LiveTelegramApi(TelegramApi):
def __init__(self, token: str):
self.token = token
self.session = aiohttp.ClientSession()
async def close(self):
await self.session.close()
async def get_updates(self, offset: int) -> UpdatesResponse:
"""
Получает входящее обновление
:param offset: числовой номер обновления
:return: Response
"""
url = f"https://api.telegram.org/bot{self.token}/getUpdates"
logging.info(f"Listen to telegram. offset: {offset}")
body = {
"offset": offset,
"timeout": 10
}
response = await self.session.get(url, json=body)
logging.info(f"Status code get_update {response.status}")
response_body = await response.text()
response_json = json.loads(response_body)
update_data = UpdatesResponse.parse_obj(response_json)
return update_data
async def send_message(self,
chat_id: int,
text: str,
parse_mode: Optional[str] = None,
keyboard: Optional[Keyboard] = None,
) -> None:
url = f"https://api.telegram.org/bot{self.token}/sendMessage"
body = {
"text": text,
"chat_id": chat_id
}
if parse_mode is not None:
body["parse_mode"] = parse_mode
if keyboard is not None:
body["reply_markup"] = {
"inline_keyboard": keyboard.as_json()
}
response = await self.session.post(url, json=body)
logging.info(f"Send message status code: {response.status} ")
if response.status != 200:
response_text = await response.text()
logging.info(f"TelegramAPI: Unexpected status code: {response.status}. Response body: {response_text}")
async def answer_callback_query(self, callback_query_id: str) -> None:
url = f"https://api.telegram.org/bot{self.token}/answerCallbackQuery"
body = {
"callback_query_id": callback_query_id
}
response = await self.session.post(url, json=body)
logging.info(f"TelegramAPI answer_callback_query status code: {response.status}")
async def edit_message(self, chat_id: int, message_id: int, text: str, parse_mode: Optional[str] = None) -> None:
url = f"https://api.telegram.org/bot{self.token}/editMessageText"
body = {
"chat_id": chat_id,
"message_id": message_id,
"text": text
}
if parse_mode is not None:
body["parse_mode"] = parse_mode
response = await self.session.post(url, json=body)
logging.info(f"TelegramAPI message_edit status code: {response.status}")
async def set_webhook(self, https_url: str, cert_filepath: Optional[Path] = None) -> None:
url = f"https://api.telegram.org/bot{self.token}/setWebhook"
if not cert_filepath:
logging.info("Setting hook without certificate")
body = {
"url": https_url,
}
response = await self.session.post(url, json=body)
else:
logging.info(f"Setting hook with certificate from {cert_filepath}")
with open(cert_filepath, 'r') as cert:
files = {'certificate': cert, 'url': https_url}
response = await self.session.post(url, data=files)
logging.info(f"TelegramAPI set_webhook status code: {response.status}")
async def delete_webhook(self, drop_pending_updates: bool) -> None:
url = f"https://api.telegram.org/bot{self.token}/deleteWebhook"
body = {
"drop_pending_updates": drop_pending_updates
}
response = await self.session.post(url, json=body)
logging.info(f"TelegramAPI delete_webhook status code: {response.status}")
@asynccontextmanager
async def make_live_telegram_api(token: str):
telegram = LiveTelegramApi(token)
try:
yield telegram
finally:
await telegram.close()
|
from relation_extraction import triple_scoring
from relation_extraction import id2entity, id2relation
import pickle
import re
import requests
# 采用固定的规则将关系三元组转化为自然语言
def fixed_rules(sent):
sent = re.sub(r"atlocation", "is at location of", sent)
sent = re.sub(r"relatedto", "is related to", sent)
sent = re.sub(r"notcapableof", "is not capable of", sent)
sent = re.sub(r"capableof", "is capable of", sent)
sent = re.sub(r"madeof", "is made of", sent)
sent = re.sub(r"antonym", "is antonym of", sent)
sent = re.sub(r"hasproperty", "has property of", sent)
sent = re.sub(r"partof", "is part of", sent)
sent = re.sub(r"isa", "is a", sent)
sent = re.sub(r"hascontext", "has", sent)
sent = re.sub(r"createdby", "is created by", sent)
sent = re.sub(r"usedfor", "is used for", sent)
sent = re.sub(r"hassubevent", "causes", sent)
sent = re.sub(r"receivesaction", "is caused by", sent)
sent = re.sub(r"notdesires", "does not desire", sent)
return sent
def rel(sent):
sent = re.sub(r"atlocation", "AtLocationf", sent)
sent = re.sub(r"relatedto", "RelatedTo", sent)
sent = re.sub(r"notcapableof", "NotCapableOf", sent)
sent = re.sub(r"capableof", "CapableOf", sent)
sent = re.sub(r"madeof", "MadeOf", sent)
sent = re.sub(r"antonym", "Antonym", sent)
sent = re.sub(r"hasproperty", "HasProperty", sent)
sent = re.sub(r"partof", "PartOf", sent)
sent = re.sub(r"isa", "IsA", sent)
sent = re.sub(r"hascontext", "HasContext", sent)
sent = re.sub(r"createdby", "CreatedBy", sent)
sent = re.sub(r"usedfor", "UsedFor", sent)
sent = re.sub(r"hassubevent", "HasSubevent", sent)
sent = re.sub(r"receivesaction", "ReceiveSaction", sent)
sent = re.sub(r"notdesires", "NotDesires", sent)
return sent
def get_text(start,end,relation):
obj = None
while obj is not None:
obj = requests.get("http://api.conceptnet.io/query?start=/c/en/%s&end=/c/en/%s&rel=/r/%s"%(start,end,rel(relation))).json()
print(obj)
return obj['edges'][0]['surfaceText']
# 为每条语句获取知识
def create_new_exp(exp):
new_exp = []
score_dict = {}
for rel in exp:
if rel is None or len(rel) == 0:
return []
else:
relations = rel["pf_res"]
if len(relations) == 0:
return []
for relation in relations:
score = 1
exp = ""
for i in range(len(relation["rel"])):
rel = relation["rel"][i][0]
head, tail = relation["path"][i], relation["path"][i+1]
score = score * triple_scoring(head, rel, tail)
if id2relation[rel] == "hascontext" or id2relation[rel] == "hascontext*":
continue
if id2entity[tail] == "slang" or id2entity[head] == 'slang':
continue
# if rel >= 17:
# sub_exp = id2entity[tail] + ' ' + id2relation[rel - 17] + ' ' + id2entity[head]
# else:
# sub_exp = id2entity[head] + ' ' + id2relation[rel] + ' ' + id2entity[tail]
if rel >= 17:
sub_exp = get_text(id2entity[tail],id2entity[head],id2relation[rel - 17])
else:
sub_exp = get_text(id2entity[head],id2entity[tail],id2relation[rel])
exp = exp + ' ' + sub_exp
score_dict.update({exp: score})
scorelst = sorted(score_dict.items(), key=lambda x: x[1], reverse=True)
scorelst = [x for x in scorelst if len(x[0].split()) >= 3]
# print(scorelst)
for element in scorelst[:3]:
assert "*" not in element[0]
new_exp.append(fixed_rules(element[0]))
return new_exp
# 为原始数据集增加新的外部知识
def adding_explanations(filename):
paths = pickle.load(open("../dataset/" + filename + "_path.2.pf.pruned.pickle", 'rb'))
index = 1
explanations = []
for i in range(0, len(paths)-1, 2):
path_form = {"data_id": index, "path1": paths[i], "path2": paths[i+1]}
explanations.append(path_form)
index += 1
datafile = open("../dataset/" + filename + "_bert.txt", "r", encoding="utf_8")
data = datafile.readlines()
print(len(data), len(explanations))
newdata = []
# """
for i in range(len(data)):
line = eval(data[i])
statement1, statement2 = line["statement1"], line["statement2"]
exp1, exp2 = explanations[i]["path1"], explanations[i]["path2"]
new_exp1, new_exp2 = create_new_exp(exp1), create_new_exp(exp2)
print(new_exp1, new_exp2)
assert len(new_exp1) <= 3
assert len(new_exp2) <= 3
for new_exp in new_exp1:
statement1 = new_exp + statement1
for new_exp in new_exp2:
statement2 = statement2 + ':' + new_exp
# print(statement1, '|||', statement2)
newdata.append({"data_id": line["data_id"], "correct": line["correct"],
"statement1": statement1, "statement2": statement2})
with open("../dataset/bert_new_" + filename + ".txt", "w", encoding="utf_8") as file:
for line in newdata:
file.writelines(str(line) + '\n')
# """
if __name__ == "__main__":
adding_explanations("test")
# adding_explanations("dev")
adding_explanations("train") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.