seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
38451818612 | # aca desarrollaremos la parte de contar renglones y palabras
import logging
import logging.config
# abriendo archivo de configuración
logging.config.fileConfig('log_config_file.conf')
# creando el logger
logger = logging.getLogger('functions')
def count_words(file, nombre_path: str) -> any:
'''
Función que cuenta renglones y
cantidad de palabras por renglon
:param file: archivo abierto
:type file: file
:param nombre_path: nombre del archivo
:type nombre_path: str
'''
new_file = file
# cantidad de renglones
renglones = new_file.split("\n")
logger.info(f'{nombre_path} - Cantidad de renglones: {len(renglones)}')
# cantidad de palabras por renglon
for n_renglon, renglon in enumerate(renglones):
cword = renglon.split(" ")
logger.info(f'Renglón {n_renglon+1}: {len(cword)} palabras')
| Carlos-Montana/Act-tiimiit-alkemy | Act_4_logII/editorial/function.py | function.py | py | 868 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "logging.config.fileConfig",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "logging.config",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
}
] |
75200594022 | # This file is a part of GrumpyWidgets.
# The source code contained in this file is licensed under the MIT license.
# See LICENSE.txt in the main project directory, for more information.
import re
from xml.etree import ElementTree
from htmlcompare import assert_same_html as assert_same_html_
__all__ = [
'as_normalized_html',
'assert_same_html',
'flatten_stream',
'reconfigure_widget',
'template_widget',
]
def assert_same_html(expected, actual, message=None):
assert_same_html_(expected, flatten_stream(actual), message=message)
def flatten_stream(value):
if hasattr(value, '__unicode__'):
# serialize Genshi stream to plain strings
value = value.__unicode__()
return value
def as_normalized_html(input_):
# ----------------------------------------------------------------------
# initial idea from Fredrik Lundh
# http://mail.python.org/pipermail/xml-sig/2003-November/009997.html
def parse_as_normalized_xml(xml):
xml_document = ElementTree.fromstring(flatten_stream(xml))
for node in xml_document.iter():
if node.text:
node.text = node.text.strip()
if node.tail is not None:
node.tail = node.tail.strip() or None
return xml_document
# ----------------------------------------------------------------------
# to my suprise ElementTree.tostring() returns bytes and not a string
# in Python 3, see https://bugs.python.org/issue10942
# We could work around the issue by using "encoding='unicode'" but
# unfortunately Python 2 doesn't know about the 'unicode' encoding.
# So the simplest way to tackle this is to decode the bytes here.
result = ElementTree.tostring(parse_as_normalized_xml(input_))
if isinstance(result, bytes):
return result.decode('utf8')
return result
def extract_classes(html):
class_str = re.findall('class="\s*([^"]+)\s*"', html)[0]
return set(re.split('\s+', class_str))
def reconfigure_widget(widget, template_engine):
template = widget.template
template_extension = '.' + template_engine
if not template.endswith(template_extension):
new_template = template.replace('.jinja2', template_extension)
widget.template = new_template
widget.template_engine = template_engine
return widget
def template_widget(widget, template_engine, kwargs=None):
"""
Return a widget instance using the specified template engine.
"""
if kwargs is None:
kwargs = {}
if 'template_engine' in kwargs:
return kwargs
kwargs['template_engine'] = template_engine
return widget(**kwargs)
| FelixSchwarz/grumpywidgets | grumpywidgets/testhelpers.py | testhelpers.py | py | 2,672 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "htmlcompare.assert_same_html",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.fromstring",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 33,
"usage_type": "name"
},
{
... |
6655999686 | """Postgres Dataconn Connection Implementation"""
import glob
import sys
import os
from os import environ
from cml.data_v1.customconnection import CustomConnection
class PostgresCustomImp(CustomConnection):
"""Using https://pypi.python.org/pypi/postgres to connect to Postgres DBs"""
""" Print information on requirements and how to use the implemented functions in this module """
def print_usage(self):
print(
"""
Generic Postgres CML Python Dataconnection
Prerequisite packages:
pip install postgres
pip install pandas
Connection Parameters:
(These can be set in the Dataconnection defintion created by Admins or set as User Envs for the CML Session as fallback)
PG_HOST
PG_PORT
PG_USER
PG_DB
(This should be set by the user in their session)
PG_PASS
Functions:
get_cursor() -- Get a basic psycopg2 cursor
get_pandas_dataframe(query) -- Get a pandas dataframe for a specified sql query
--Sample Usage--
import cml.data_v1 as cmldata
CONNECTION_NAME = "%s"
conn = cmldata.get_connection(CONNECTION_NAME)
cursor = conn.get_cursor()
"""
% (self.app_name)
)
""" Set up a connection to the postgres server """
def get_base_connection(self):
try:
import psycopg2
except ImportError:
raise ImportError(
'psycopg2 module not found, install it with "pip install postgres"'
)
conn_string = (
"host=" + self.pg_host
+ " port=" + self.pg_port
+ " dbname=" + self.pg_db
+ " user=" + self.pg_user
+ " password=" + self.pg_pass
)
conn = psycopg2.connect(conn_string)
return conn
""" Get a pandas dataframe for the postgres connection """
def get_pandas_dataframe(self, query):
try:
import pandas as pds
except ImportError:
raise ImportError(
'pandas module not found, install it with "pip install pandas"'
)
conn = self.get_base_connection()
dataFrame = pds.read_sql(query, conn)
pds.set_option("display.expand_frame_repr", False)
return dataFrame
""" Get a pysopg2 cursor for the postgres connection """
def get_cursor(self):
conn = self.get_base_connection()
try:
import psycopg2.extras
except ImportError:
raise ImportError(
'psycopg2 module not found, install it with "pip install postgres"'
)
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
return cursor
""" Helper function for override_parameters that gets parameters from the loaded self.parameters or ENV vars """
def check_params_or_env(self, name):
if self.parameters.get(name) is not None:
return self.parameters.get(name)
else:
if environ.get(name) is not None:
return environ.get(name)
else:
sys.exit(
"No %s specified in CML Dataconn params or ENV fallback" % name
)
""" Set up connection parameters that will be needed by other functions """
def override_parameters(self):
print(
"Checking Connection parameters from CML DataConnections service, fall back to ENV vars if needed"
)
print(self.parameters)
self.pg_host = self.check_params_or_env("PG_HOST")
self.pg_port = self.check_params_or_env("PG_PORT")
self.pg_db = self.check_params_or_env("PG_DB")
self.pg_user = self.check_params_or_env("PG_USER")
self.pg_pass = os.getenv("PG_PASS")
| pdefusco/Using_CustomConn_CML | postgresconn/pg-conn.py | pg-conn.py | py | 3,675 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "cml.data_v1.customconnection.CustomConnection",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "psycopg2.connect",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 68,
"usage_type": "call"
},
{
"api... |
3289690742 | __all__ = ['display']
import logging
import sys
import types
from six import itervalues
logger = logging.getLogger('pyomo.core')
def display(obj, ostream=None):
""" Display data in a Pyomo object"""
if ostream is None:
ostream = sys.stdout
try:
display_fcn = obj.display
except AttributeError:
raise TypeError(
"Error trying to display values for object of type %s:\n"
"\tObject does not support the 'display()' method"
% (type(obj), ) )
try:
display_fcn(ostream=ostream)
except Exception:
err = sys.exc_info()[1]
logger.error(
"Error trying to display values for object of type %s:\n\t%s"
% (type(obj), err) )
raise
def create_name(name, ndx):
"""Create a canonical name for a component using the given index"""
if ndx is None:
return name
if type(ndx) is tuple:
tmp = str(ndx).replace(', ',',')
return name+"["+tmp[1:-1]+"]"
return name+"["+str(ndx)+"]"
def apply_indexed_rule(obj, rule, model, index, options=None):
try:
if options is None:
if index.__class__ is tuple:
return rule(model, *index)
elif index is None and not obj.is_indexed():
return rule(model)
else:
return rule(model, index)
else:
if index.__class__ is tuple:
return rule(model, *index, **options)
elif index is None and not obj.is_indexed():
return rule(model, **options)
else:
return rule(model, index, **options)
except TypeError:
try:
if options is None:
return rule(model)
else:
return rule(model, **options)
except:
# Nothing appears to have matched... re-trigger the original
# TypeError
if options is None:
if index.__class__ is tuple:
return rule(model, *index)
elif index is None and not obj.is_indexed():
return rule(model)
else:
return rule(model, index)
else:
if index.__class__ is tuple:
return rule(model, *index, **options)
elif index is None and not obj.is_indexed():
return rule(model, **options)
else:
return rule(model, index, **options)
def apply_parameterized_indexed_rule(obj, rule, model, param, index):
if index.__class__ is tuple:
return rule(model, param, *index)
if index is None:
return rule(model, param)
return rule(model, param, index)
class _robust_sort_keyfcn(object):
"""Class for robustly generating sortable keys for arbitrary data.
Generates keys (for use with Python `sorted()` that are
(str(type_name), val), where val is the actual value (if the type
is comparable), otherwise is the string representation of the value.
If str() also fails, we fall back on id().
This allows sorting lists with mixed types in Python3
We implement this as a callable object so that we can store the
_typemap without resorting to global variables.
"""
def __init__(self):
self._typemap = {}
def __call__(self, val):
"""Generate a tuple ( str(type_name), val ) for sorting the value.
`key=` expects a function. We are generating a functor so we
have a convenient place to store the _typemap, which converts
the type-specific functions for converting a value to the second
argument of the sort key.
"""
_type = type(val)
if _type not in self._typemap:
# If this is not a type we have seen before, determine what
# to use for the second value in the tuple.
try:
# 1: Check if the type is comparable
val < val
self._typemap[_type] = lambda x:x
except:
try:
# 2: try converting the value to string
str(val)
self._typemap[_type] = lambda x:str(x)
except:
# 3: fallback on id(). Not deterministic
# (run-to-run), but at least is consistent within
# this run.
self._typemap[_type] = lambda x:id(x)
return _type.__name__, self._typemap[_type](val)
def sorted_robust(arg):
"""Utility to sort an arbitrary iterable.
This returns the sorted(arg) in a consistent order by first tring
the standard sorted() function, and if that fails (for example with
mixed-type Sets in Python3), use the _robust_sort_keyfcn utility
(above) to generate sortable keys.
"""
try:
return sorted(arg)
except:
return sorted(arg, key=_robust_sort_keyfcn())
def _safe_to_str(obj):
try:
return str(obj)
except:
return "None"
def tabular_writer(ostream, prefix, data, header, row_generator):
"""Output data in tabular form
Parameters:
- ostream: the stream to write to
- prefix: prefix each line with this string
- data: a generator returning (key, value) pairs (e.g., from iteritems())
- header: list of column header strings
- row_generator: a generator that returns tuples of values for each
line of the table
"""
_rows = {}
#_header = ("Key","Initial Value","Lower Bound","Upper Bound",
# "Current Value","Fixed","Stale")
# NB: _width is a list because we will change these values
if header:
header = ('Key',) + tuple(header)
_width = [len(x) for x in header]
else:
_width = None
for _key, _val in data:
try:
_rowSet = row_generator(_key, _val)
except ValueError:
_rows[_key] = None
continue
if isinstance(_rowSet, types.GeneratorType):
_rows[_key] = [
((_safe_to_str("" if i else _key),) if header else ()) +
tuple( _safe_to_str(x) for x in _r )
for i,_r in enumerate(_rowSet) ]
else:
_rows[_key] = [
((_safe_to_str(_key),) if header else ()) +
tuple( _safe_to_str(x) for x in _rowSet) ]
for _row in _rows[_key]:
if not _width:
_width = [0]*len(_row)
for _id, x in enumerate(_row):
_width[_id] = max(_width[_id], len(x))
# NB: left-justify header
if header:
# Note: do not right-pad the last header with unnecessary spaces
tmp = _width[-1]
_width[-1] = 0
ostream.write(prefix
+ " : ".join( "%%-%ds" % _width[i] % x
for i,x in enumerate(header) )
+ "\n")
_width[-1] = tmp
# If there is no data, we are done...
if not _rows:
return
# right-justify data, except for the last column if there are spaces
# in the data (probably an expression or vector)
_width = ["%"+str(i)+"s" for i in _width]
if any( ' ' in r[-1]
for x in itervalues(_rows) if x is not None
for r in x ):
_width[-1] = '%s'
for _key in sorted_robust(_rows):
_rowSet = _rows[_key]
if not _rowSet:
_rowSet = [ [_key] + [None]*(len(_width)-1) ]
for _data in _rowSet:
ostream.write(
prefix
+ " : ".join( _width[i] % x for i,x in enumerate(_data) )
+ "\n")
| igorsowa9/vpp | venv/lib/python3.6/site-packages/pyomo/core/base/misc.py | misc.py | py | 7,746 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "sys.exc_info",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "types.GeneratorType",
... |
6752697116 | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QTreeWidgetItem, QDialog, QTreeWidgetItemIterator
from PyQt5.QtCore import pyqtSlot
from product.controllers.productcontroller import ProductController
from workshop.views.selectoddmentdraw import Ui_Dialog
import user
class SelectoddmentdrawModule(QDialog, Ui_Dialog):
def __init__(self, ppid, parent=None):
super(SelectoddmentdrawModule, self).__init__(parent)
self.setupUi(self)
self.ppid = ppid
self.prodid = ''
self.id_list = tuple()
self.PC = ProductController()
# 获取ppid对应的prodid
self.get_prodid()
# 查找prodid相同的记录,并保存这些记录的id_list
self.get_id_list()
# 零头记录中找到没有过有效期且没有被领取(dppid=0)
self.get_oddments_list()
def get_prodid(self):
values_list = ('prodid',)
key_dict = {
'autoid': self.ppid
}
res = self.PC.get_producingplan(True, *values_list, **key_dict)
if len(res) == 1:
self.prodid = res[0]
def get_id_list(self):
if not self.prodid:
return
values_list = ('autoid', )
key_dict = {
'prodid': self.prodid
}
res = self.PC.get_producingplan(True, *values_list, **key_dict)
# 去除本批ppid
self.id_list = list(res)
if self.ppid in self.id_list:
self.id_list .remove(self.ppid)
def get_oddments_list(self):
self.treeWidget_oddmentdrawlist.clear()
self.treeWidget_oddmentdrawlist.hideColumn(0)
values_list = (
'autoid', 'batchno', 'amount', 'unit', 'registerid', 'registername',
'regdate', 'invaliddate'
)
# 没有发放,没有寄库,没有过期
key_dict = {
'ppid__in': self.id_list,
'dppid': 0,
'status':0,
'invaliddate__gte': user.now_date,
}
res = self.PC.get_oddmentdrawnotes(False, *values_list, **key_dict)
if not len(res):
return
for item in res:
qtreeitem = QTreeWidgetItem(self.treeWidget_oddmentdrawlist)
qtreeitem.setText(0, str(item['autoid']))
qtreeitem.setText(1, item['batchno'])
qtreeitem.setText(2, str(item['amount']))
qtreeitem.setText(3, item['unit'])
qtreeitem.setText(
4, item['registerid'] + ' ' + item['registername']
)
qtreeitem.setText(5, str(item['regdate']))
qtreeitem.setText(6, str(item['invaliddate']))
qtreeitem.setCheckState(1, 0)
for i in range(1, 7):
self.treeWidget_oddmentdrawlist.resizeColumnToContents(i)
@pyqtSlot(QTreeWidgetItem, int)
def on_treeWidget_oddmentdrawlist_itemDoubleClicked(self, qtreeitem, p_int):
state = qtreeitem.checkState(1)
if state == 0:
qtreeitem.setCheckState(1, 2)
else:
qtreeitem.setCheckState(1, 0)
@pyqtSlot()
def on_pushButton_accept_clicked(self):
it = QTreeWidgetItemIterator(self.treeWidget_oddmentdrawlist)
select_id_list = []
while it.value():
qtreeitem = it.value()
if qtreeitem.checkState(1) == 2:
select_id_list.append(int(qtreeitem.text(0)))
it += 1
if not len(select_id_list):
return
detail = {
'dppid': self.ppid,
'drawerid': user.user_id,
'drawername': user.user_name,
'drawdate': user.now_date
}
res = self.PC.update_oddmentdrawnotes(select_id_list, **detail)
if res:
self.accept()
@pyqtSlot()
def on_pushButton_cancel_clicked(self):
self.close() | zxcvbnmz0x/gmpsystem | workshop/modules/selectoddmentdrawmodule.py | selectoddmentdrawmodule.py | py | 3,851 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QDialog",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "workshop.views.selectoddmentdraw.Ui_Dialog",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "product.controllers.productcontroller.ProductController",
"line_number": 2... |
1372282786 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# author aliex-hrg
import sys,os,socketserver,json,hashlib
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
from conf import settings
import admin,re
class MyTCPHandler(socketserver.BaseRequestHandler):
ftp_dir = settings.HOME_DIR #定义FTP服务器根目录
curr_dir = None #定义用户当前目录
size_limit = 0
def ls(self,info):
print('bug:',self.curr_dir)
if not info.get('par1'):
cmd = 'dir %s' %self.curr_dir
user_data = os.popen(cmd).read()
return user_data
else:
path_dir = info["par1"]
cmd = 'dir %s\%s' %(self.curr_dir,path_dir)
user_data = os.popen(cmd).read()
return user_data
def pwd(self,info):
pwd_dir = [i for i in self.curr_dir.split("D:\\ftp\%s" %info['user']) if i]
if len(pwd_dir) == 0:
pwd_dir.append('\\')
return pwd_dir[0]
def dir_strip(self,path):
path = path.rstrip('\\')
while '\\..' in path:
path = re.sub('\\\\?[^\\\\]*\\\\\.\.', '', path,count=1)
if '\\.' in path:
path = path.replace('\\.', '')
return path
def cd(self,info):
username = info['user']
if not info.get('par1'):
self.curr_dir = '%s\%s' % (self.ftp_dir, username)
print('curr_dir',self.curr_dir)
return self.curr_dir
else:
path_dir = info["par1"]
curr_dir = '%s\%s' % (self.curr_dir, path_dir)
curr_dir = self.dir_strip(curr_dir)
if os.path.isdir(os.path.join(curr_dir)): #如果目录存在则把当前目录赋予全局变量
self.curr_dir = curr_dir
return self.curr_dir
else:
return "this directory is not exist"
def login(self,info):
if info.get('par1') and info.get('par2'):
username = info["par1"]
password = info["par2"]
login_status = admin.login(username, password)
if login_status == 0:
self.curr_dir = '%s\%s' % (self.ftp_dir, username)
with open(os.path.join(BASE_DIR,'conf',username+'.json'),'r',encoding='utf-8') as f:
self.size_limit = json.load(f)['limit']
print('used space:',self.limit_check(self.curr_dir))
return str(login_status)
else:
return 3
def limit_check(self,path):
pathlist = os.listdir(path)
sum_size = 0
for f in pathlist:
f_path = os.path.join(path,f)
if os.path.isfile(f_path):
sum_size += os.stat(f_path).st_size
elif os.path.isdir(f_path):
sum_size += self.limit_check(f_path)
return sum_size
def get(self,info):
if info.get('par1'):
file_path = "%s\%s" %(self.curr_dir,info['par1'])
if os.path.isfile(file_path):
filesize = os.stat(file_path).st_size
m = hashlib.md5()
self.request.sendall(str(filesize).encode()) #发送文件大小
has_size = self.request.recv(1024).decode() #接收已传大小和消除粘包
print('has_size:',has_size)
with open(file_path,'rb') as f:
if has_size != '0':
f.seek(int(has_size)) #如果已经传送了一些,则从后面继续传送
for line in f:
self.request.send(line)
m.update(line)
self.request.recv(1024) #消除粘包
file_md5 = m.hexdigest()
self.request.sendall(file_md5.encode())
self.request.recv(1024) # 消除粘包
return "download success"
else:
self.request.send(b'0')
print("file does not exist")
return "file does not exist"
else:
self.request.send(b'0')
print("miss a request parameters")
return "miss a request parameters"
def put(self,info):
if not info.get('par1'):return "miss request parameters"
file_path = os.path.join(self.curr_dir,info['par1'])
rest = int(self.size_limit*1024*1024 - self.limit_check(self.curr_dir))
self.request.sendall(str(rest).encode()) #发送磁盘剩余空间
filesize = int(self.request.recv(8096).decode())
print("filesize:",filesize)
if filesize == 0:return "file does not exist,because filesize is zero" #解决客户端提交一个不存在的文件
if os.path.isfile(file_path+'.tmp'): #实现断点续传
has_size = os.stat(file_path+'.tmp').st_size
self.request.sendall(str(has_size).encode())
filesize -= has_size
else:
self.request.sendall(b"0")
filenum = 0
m = hashlib.md5()
with open(file_path+'.tmp','ab') as f:
while filenum != filesize:
data = self.request.recv(8096)
f.write(data)
filenum += len(data)
m.update(data)
self.request.send(b"ack") #解决粘包
recv_md5 = self.request.recv(8096).decode()
if recv_md5 == m.hexdigest():
if os.path.isfile(file_path):
os.remove(file_path)
os.rename(file_path+'.tmp',file_path)
else:
print('file transfer error')
return "md5 error"
return "file upload success"
def handle(self):
try:
while True:
data = self.request.recv(1024) #获取用户提交的命令和参数
if not data:break
print("Client IP:{}".format(self.client_address[0]))
recv_data = json.loads(data.decode())
print('cmd:',recv_data)
if hasattr(MyTCPHandler,recv_data["cmd"]):
func = getattr(MyTCPHandler,recv_data["cmd"]) #反射函数并执行
msg = func(self,recv_data)
print('cmd_result:',msg)
if not len(msg):msg = "Command Error,Pls Retype"
self.request.sendall(msg.encode()) #返回正常执行结果给客户端
else:
print("this command is not support")
self.request.sendall(b'this command is not support') #返回错误执行结果给客户端
except ConnectionResetError as e:
print(e)
if __name__ == "__main__":
HOST, PORT = settings.servername,settings.port
server = socketserver.ThreadingTCPServer((HOST, PORT), MyTCPHandler)
server.serve_forever()
| hrghrghg/test | 作业/FTP文件服务器/server/core/core.py | core.py | py | 6,863 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line... |
6742370298 | # -*- coding: utf-8 -*-
# file lyxpreview2bitmap.py
# This file is part of LyX, the document processor.
# Licence details can be found in the file COPYING.
# author Angus Leeming
# with much advice from members of the preview-latex project:
# David Kastrup, dak@gnu.org and
# Jan-Åke Larsson, jalar@mai.liu.se.
# Full author contact details are available in file CREDITS
# This script takes a LaTeX file and generates a collection of
# png or ppm image files, one per previewed snippet.
# Pre-requisites:
# * python 2.4 or later (subprocess module);
# * A latex executable;
# * preview.sty;
# * dvipng;
# * dv2dt;
# * pngtoppm (if outputing ppm format images).
# preview.sty and dvipng are part of the preview-latex project
# http://preview-latex.sourceforge.net/
# preview.sty can alternatively be obtained from
# CTAN/support/preview-latex/
# Example usage:
# lyxpreview2bitmap.py --bg=faf0e6 0lyxpreview.tex
# This script takes one obligatory argument:
#
# <input file>: The name of the .tex file to be converted.
#
# and these optional arguments:
#
# --png, --ppm: The desired output format. Either 'png' or 'ppm'.
# --dpi=<res>: A scale factor, used to ascertain the resolution of the
# generated image which is then passed to gs.
# --fg=<color>: The foreground color as a hexadecimal string, eg '000000'.
# --bg=<color>: The background color as a hexadecimal string, eg 'faf0e6'.
# --latex=<exe>: The converter for latex files. Default is latex.
# --bibtex=<exe>: The converter for bibtex files. Default is bibtex.
# --lilypond: Preprocess through lilypond-book. Default is false.
# --lilypond-book=<exe>:
# The converter for lytex files. Default is lilypond-book.
#
# -d, --debug Show the output from external commands.
# -h, --help Show an help screen and exit.
# -v, --verbose Show progress messages.
# Decomposing TEXFILE's name as DIR/BASE.tex, this script will,
# if executed successfully, leave in DIR:
# * a (possibly large) number of image files with names
# like BASE[0-9]+.png
# * a file BASE.metrics, containing info needed by LyX to position
# the images correctly on the screen.
# What does this script do?
# 1) Call latex/pdflatex/xelatex/whatever (CONVERTER parameter)
# 2) If the output is a PDF fallback to legacy
# 3) Otherwise check each page of the DVI (with dv2dt) looking for
# PostScript literals, not well supported by dvipng. Pages
# containing them are passed to the legacy method in a new LaTeX file.
# 4) Call dvipng on the pages without PS literals
# 5) Join metrics info coming from both methods (legacy and dvipng)
# and write them to file
# dvipng is fast but gives problem in several cases, like with
# PSTricks, TikZ and other packages using PostScript literals
# for all these cases the legacy route is taken (step 3).
# Moreover dvipng can't work with PDF files, so, if the CONVERTER
# paramter is pdflatex we have to fallback to legacy route (step 2).
from __future__ import print_function
import getopt, glob, os, re, shutil, sys, tempfile
import lyxpreview_tools
from legacy_lyxpreview2ppm import extract_resolution, legacy_conversion_step1
from lyxpreview_tools import bibtex_commands, check_latex_log, copyfileobj, \
error, filter_pages, find_exe, find_exe_or_terminate, \
join_metrics_and_rename, latex_commands, latex_file_re, make_texcolor, \
pdflatex_commands, progress, run_command, run_latex, run_tex, \
warning, write_metrics_info
PY2 = sys.version_info[0] == 2
def usage(prog_name):
msg = """
Usage: %s <options> <input file>
Options:
--dpi=<res>: Resolution per inch (default: 128)
--png, --ppm: Select the output format (default: png)
--fg=<color>: Foreground color (default: black, ie '000000')
--bg=<color>: Background color (default: white, ie 'ffffff')
--latex=<exe>: Specify the executable for latex (default: latex)
--bibtex=<exe>: Specify the executable for bibtex (default: bibtex)
--lilypond: Preprocess through lilypond-book (default: false)
--lilypond-book=<exe>:
The executable for lilypond-book (default: lilypond-book)
-d, --debug: Show the output from external commands
-h, --help: Show this help screen and exit
-v, --verbose: Show progress messages
The colors are hexadecimal strings, eg 'faf0e6'."""
return msg % prog_name
# Returns a list of tuples containing page number and ascent fraction
# extracted from dvipng output.
# Use write_metrics_info to create the .metrics file with this info
def extract_metrics_info(dvipng_stdout):
# "\[[0-9]+" can match two kinds of numbers: page numbers from dvipng
# and glyph numbers from mktexpk. The glyph numbers always match
# "\[[0-9]+\]" while the page number never is followed by "\]". Thus:
page_re = re.compile(r"\[([0-9]+)[^]]");
metrics_re = re.compile("depth=(-?[0-9]+) height=(-?[0-9]+)")
success = 0
page = ""
pos = 0
results = []
while 1:
match = page_re.search(dvipng_stdout, pos)
if match == None:
break
page = match.group(1)
pos = match.end()
match = metrics_re.search(dvipng_stdout, pos)
if match == None:
break
success = 1
# Calculate the 'ascent fraction'.
descent = float(match.group(1))
ascent = float(match.group(2))
frac = 0.5
if ascent < 0:
# This is an empty image, forbid its display
frac = -1.0
elif ascent >= 0 or descent >= 0:
if abs(ascent + descent) > 0.1:
frac = ascent / (ascent + descent)
# Sanity check
if frac < 0:
frac = 0.5
results.append((int(page), frac))
pos = match.end() + 2
if success == 0:
error("Failed to extract metrics info from dvipng")
return results
def fix_latex_file(latex_file, pdf_output):
# python 2 does not allow to declare a string as raw byte so we double
# the backslashes and remove the r preffix
def_re = re.compile(b"(\\\\newcommandx|\\\\global\\\\long\\\\def)"
b"(\\\\[a-zA-Z]+)")
tmp = tempfile.TemporaryFile()
changed = False
macros = []
for line in open(latex_file, 'rb').readlines():
if not pdf_output and line.startswith(b"\\documentclass"):
changed = True
line += b"\\PassOptionsToPackage{draft}{microtype}\n"
else:
match = def_re.match(line)
if match != None:
macroname = match.group(2)
if macroname in macros:
definecmd = match.group(1)
if definecmd == b"\\newcommandx":
changed = True
line = line.replace(definecmd, b"\\renewcommandx")
else:
macros.append(macroname)
tmp.write(line)
if changed:
copyfileobj(tmp, open(latex_file,"wb"), 1)
return changed
def convert_to_ppm_format(pngtopnm, basename):
png_file_re = re.compile(r"\.png$")
for png_file in glob.glob("%s*.png" % basename):
ppm_file = png_file_re.sub(".ppm", png_file)
p2p_cmd = '%s "%s"' % (pngtopnm, png_file)
p2p_status, p2p_stdout = run_command(p2p_cmd)
if p2p_status:
error("Unable to convert %s to ppm format" % png_file)
ppm = open(ppm_file, 'w')
ppm.write(p2p_stdout)
os.remove(png_file)
# Returns a tuple of:
# ps_pages: list of page indexes of pages containing PS literals
# pdf_pages: list of page indexes of pages requiring running pdflatex
# page_count: total number of pages
# pages_parameter: parameter for dvipng to exclude pages with PostScript/PDF
def find_ps_pages(dvi_file):
# latex failed
# FIXME: try with pdflatex
if not os.path.isfile(dvi_file):
error("No DVI output.")
# Check for PostScript specials in the dvi, badly supported by dvipng,
# and inclusion of PDF/PNG/JPG files.
# This is required for correct rendering of PSTricks and TikZ
dv2dt = find_exe_or_terminate(["dv2dt"])
dv2dt_call = '%s "%s"' % (dv2dt, dvi_file)
# The output from dv2dt goes to stdout
dv2dt_status, dv2dt_output = run_command(dv2dt_call)
psliteral_re = re.compile("^special[1-4] [0-9]+ '(\"|ps:)")
hyperref_re = re.compile("^special[1-4] [0-9]+ 'ps:.*/DEST pdfmark")
pdffile_re = re.compile("^special[1-4] [0-9]+ 'PSfile=.*\\.(pdf|png|jpg|jpeg|PDF|PNG|JPG|JPEG)")
# Parse the dtl file looking for PostScript specials and pdflatex files.
# Pages using PostScript specials or pdflatex files are recorded in
# ps_pages or pdf_pages, respectively, and then used to create a
# different LaTeX file for processing in legacy mode.
# If hyperref is detected, the corresponding page is recorded in pdf_pages.
page_has_ps = False
page_has_pdf = False
page_index = 0
ps_pages = []
pdf_pages = []
ps_or_pdf_pages = []
for line in dv2dt_output.split("\n"):
# New page
if line.startswith("bop"):
page_has_ps = False
page_has_pdf = False
page_index += 1
# End of page
if line.startswith("eop") and (page_has_ps or page_has_pdf):
# We save in a list all the PostScript/PDF pages
if page_has_ps:
ps_pages.append(page_index)
else:
pdf_pages.append(page_index)
ps_or_pdf_pages.append(page_index)
if psliteral_re.match(line) != None:
# Literal PostScript special detected!
# If hyperref is detected, put this page on the pdf pages list
if hyperref_re.match(line) != None:
page_has_ps = False
page_has_pdf = True
else:
page_has_ps = True
elif pdffile_re.match(line) != None:
# Inclusion of pdflatex image file detected!
page_has_pdf = True
# Create the -pp parameter for dvipng
pages_parameter = ""
if len(ps_or_pdf_pages) > 0 and len(ps_or_pdf_pages) < page_index:
# Don't process Postscript/PDF pages with dvipng by selecting the
# wanted pages through the -pp parameter. E.g., dvipng -pp 4-12,14,64
pages_parameter = " -pp "
skip = True
last = -1
# Use page ranges, as a list of pages could exceed command line
# maximum length (especially under Win32)
for index in range(1, page_index + 1):
if (not index in ps_or_pdf_pages) and skip:
# We were skipping pages but current page shouldn't be skipped.
# Add this page to -pp, it could stay alone or become the
# start of a range.
pages_parameter += str(index)
# Save the starting index to avoid things such as "11-11"
last = index
# We're not skipping anymore
skip = False
elif (index in ps_or_pdf_pages) and (not skip):
# We weren't skipping but current page should be skipped
if last != index - 1:
# If the start index of the range is the previous page
# then it's not a range
pages_parameter += "-" + str(index - 1)
# Add a separator
pages_parameter += ","
# Now we're skipping
skip = True
# Remove the trailing separator
pages_parameter = pages_parameter.rstrip(",")
# We've to manage the case in which the last page is closing a range
if (not index in ps_or_pdf_pages) and (not skip) and (last != index):
pages_parameter += "-" + str(index)
return (ps_pages, pdf_pages, page_index, pages_parameter)
def main(argv):
# Set defaults.
dpi = 128
fg_color = "000000"
bg_color = "ffffff"
bibtex = None
latex = None
lilypond = False
lilypond_book = None
output_format = "png"
script_name = argv[0]
# Parse and manipulate the command line arguments.
try:
(opts, args) = getopt.gnu_getopt(argv[1:], "dhv", ["bibtex=", "bg=",
"debug", "dpi=", "fg=", "help", "latex=", "lilypond",
"lilypond-book=", "png", "ppm", "verbose"])
except getopt.GetoptError as err:
error("%s\n%s" % (err, usage(script_name)))
opts.reverse()
for opt, val in opts:
if opt in ("-h", "--help"):
print(usage(script_name))
sys.exit(0)
elif opt == "--bibtex":
bibtex = [val]
elif opt == "--bg":
bg_color = val
elif opt in ("-d", "--debug"):
lyxpreview_tools.debug = True
elif opt == "--dpi":
try:
dpi = int(val)
except:
error("Cannot convert %s to an integer value" % val)
elif opt == "--fg":
fg_color = val
elif opt == "--latex":
latex = [val]
elif opt == "--lilypond":
lilypond = True
elif opt == "--lilypond-book":
lilypond_book = [val]
elif opt in ("--png", "--ppm"):
output_format = opt[2:]
elif opt in ("-v", "--verbose"):
lyxpreview_tools.verbose = True
# Determine input file
if len(args) != 1:
err = "A single input file is required, %s given" % (len(args) or "none")
error("%s\n%s" % (err, usage(script_name)))
input_path = args[0]
dir, latex_file = os.path.split(input_path)
# Check for the input file
if not os.path.exists(input_path):
error('File "%s" not found.' % input_path)
if len(dir) != 0:
os.chdir(dir)
if lyxpreview_tools.verbose:
f_out = open('verbose.txt', 'a')
sys.stdout = f_out
sys.stderr = f_out
# Echo the settings
progress("Running Python %s" % str(sys.version_info[:3]))
progress("Starting %s..." % script_name)
if os.name == "nt":
progress("Use win32_modules: %d" % lyxpreview_tools.use_win32_modules)
progress("Output format: %s" % output_format)
progress("Foreground color: %s" % fg_color)
progress("Background color: %s" % bg_color)
progress("Resolution (dpi): %s" % dpi)
progress("File to process: %s" % input_path)
# For python > 2 convert strings to bytes
if not PY2:
fg_color = bytes(fg_color, 'ascii')
bg_color = bytes(bg_color, 'ascii')
fg_color_dvipng = make_texcolor(fg_color, False)
bg_color_dvipng = make_texcolor(bg_color, False)
# For python > 2 convert bytes to string
if not PY2:
fg_color_dvipng = fg_color_dvipng.decode('ascii')
bg_color_dvipng = bg_color_dvipng.decode('ascii')
# External programs used by the script.
latex = find_exe_or_terminate(latex or latex_commands)
bibtex = find_exe(bibtex or bibtex_commands)
if lilypond:
lilypond_book = find_exe_or_terminate(lilypond_book or
["lilypond-book --safe"])
# These flavors of latex are known to produce pdf output
pdf_output = latex in pdflatex_commands
progress("Latex command: %s" % latex)
progress("Latex produces pdf output: %s" % pdf_output)
progress("Bibtex command: %s" % bibtex)
progress("Lilypond-book command: %s" % lilypond_book)
progress("Preprocess through lilypond-book: %s" % lilypond)
progress("Altering the latex file for font size and colors")
# Make sure that multiple defined macros and the microtype package
# don't cause issues in the latex file.
fix_latex_file(latex_file, pdf_output)
if lilypond:
progress("Preprocess the latex file through %s" % lilypond_book)
if pdf_output:
lilypond_book += " --pdf"
lilypond_book += " --latex-program=%s" % latex.split()[0]
# Make a copy of the latex file
lytex_file = latex_file_re.sub(".lytex", latex_file)
shutil.copyfile(latex_file, lytex_file)
# Preprocess the latex file through lilypond-book.
lytex_status, lytex_stdout = run_tex(lilypond_book, lytex_file)
if pdf_output:
progress("Using the legacy conversion method (PDF support)")
return legacy_conversion_step1(latex_file, dpi, output_format, fg_color,
bg_color, latex, pdf_output)
# This can go once dvipng becomes widespread.
dvipng = find_exe(["dvipng"])
if dvipng == None:
progress("Using the legacy conversion method (dvipng not found)")
return legacy_conversion_step1(latex_file, dpi, output_format, fg_color,
bg_color, latex, pdf_output)
dv2dt = find_exe(["dv2dt"])
if dv2dt == None:
progress("Using the legacy conversion method (dv2dt not found)")
return legacy_conversion_step1(latex_file, dpi, output_format, fg_color,
bg_color, latex, pdf_output)
pngtopnm = ""
if output_format == "ppm":
pngtopnm = find_exe(["pngtopnm"])
if pngtopnm == None:
progress("Using the legacy conversion method (pngtopnm not found)")
return legacy_conversion_step1(latex_file, dpi, output_format,
fg_color, bg_color, latex, pdf_output)
# Compile the latex file.
error_pages = []
latex_status, latex_stdout = run_latex(latex, latex_file, bibtex)
latex_log = latex_file_re.sub(".log", latex_file)
if latex_status:
progress("Will try to recover from %s failure" % latex)
error_pages = check_latex_log(latex_log)
# The dvi output file name
dvi_file = latex_file_re.sub(".dvi", latex_file)
# If there's no DVI output, look for PDF and go to legacy or fail
if not os.path.isfile(dvi_file):
# No DVI, is there a PDF?
pdf_file = latex_file_re.sub(".pdf", latex_file)
if os.path.isfile(pdf_file):
progress("%s produced a PDF output, fallback to legacy." \
% (os.path.basename(latex)))
progress("Using the legacy conversion method (PDF support)")
return legacy_conversion_step1(latex_file, dpi, output_format,
fg_color, bg_color, latex, True)
else:
error("No DVI or PDF output. %s failed." \
% (os.path.basename(latex)))
# Look for PS literals or inclusion of pdflatex files in DVI pages
# ps_pages: list of indexes of pages containing PS literals
# pdf_pages: list of indexes of pages requiring running pdflatex
# page_count: total number of pages
# pages_parameter: parameter for dvipng to exclude pages with PostScript
(ps_pages, pdf_pages, page_count, pages_parameter) = find_ps_pages(dvi_file)
# If all pages need PostScript or pdflatex, directly use the legacy method.
if len(ps_pages) == page_count:
progress("Using the legacy conversion method (PostScript support)")
return legacy_conversion_step1(latex_file, dpi, output_format, fg_color,
bg_color, latex, pdf_output)
elif len(pdf_pages) == page_count:
progress("Using the legacy conversion method (PDF support)")
return legacy_conversion_step1(latex_file, dpi, output_format, fg_color,
bg_color, "pdflatex", True)
# Retrieve resolution
resolution = extract_resolution(latex_log, dpi)
# Run the dvi file through dvipng.
dvipng_call = '%s -Ttight -depth -height -D %d -fg "%s" -bg "%s" %s "%s"' \
% (dvipng, resolution, fg_color_dvipng, bg_color_dvipng, pages_parameter, dvi_file)
dvipng_status, dvipng_stdout = run_command(dvipng_call)
if dvipng_status:
warning("%s failed to generate images from %s... fallback to legacy method" \
% (os.path.basename(dvipng), dvi_file))
progress("Using the legacy conversion method (dvipng failed)")
return legacy_conversion_step1(latex_file, dpi, output_format, fg_color,
bg_color, latex, pdf_output)
# Extract metrics info from dvipng_stdout.
metrics_file = latex_file_re.sub(".metrics", latex_file)
dvipng_metrics = extract_metrics_info(dvipng_stdout)
# If some pages require PostScript pass them to legacy method
if len(ps_pages) > 0:
# Create a new LaTeX file just for the snippets needing
# the legacy method
legacy_latex_file = latex_file_re.sub("_legacy.tex", latex_file)
filter_pages(latex_file, legacy_latex_file, ps_pages)
# Pass the new LaTeX file to the legacy method
progress("Pages %s include postscript specials" % ps_pages)
progress("Using the legacy conversion method (PostScript support)")
legacy_status, legacy_metrics = legacy_conversion_step1(legacy_latex_file,
dpi, output_format, fg_color, bg_color, latex, pdf_output, True)
# Now we need to mix metrics data from dvipng and the legacy method
original_bitmap = latex_file_re.sub("%d." + output_format, legacy_latex_file)
destination_bitmap = latex_file_re.sub("%d." + output_format, latex_file)
# Join metrics from dvipng and legacy, and rename legacy bitmaps
join_metrics_and_rename(dvipng_metrics, legacy_metrics, ps_pages,
original_bitmap, destination_bitmap)
# If some pages require running pdflatex pass them to legacy method
if len(pdf_pages) > 0:
# Create a new LaTeX file just for the snippets needing
# the legacy method
legacy_latex_file = latex_file_re.sub("_legacy.tex", latex_file)
filter_pages(latex_file, legacy_latex_file, pdf_pages)
# Pass the new LaTeX file to the legacy method
progress("Pages %s require processing with pdflatex" % pdf_pages)
progress("Using the legacy conversion method (PDF support)")
legacy_status, legacy_metrics = legacy_conversion_step1(legacy_latex_file,
dpi, output_format, fg_color, bg_color, "pdflatex", True, True)
# Now we need to mix metrics data from dvipng and the legacy method
original_bitmap = latex_file_re.sub("%d." + output_format, legacy_latex_file)
destination_bitmap = latex_file_re.sub("%d." + output_format, latex_file)
# Join metrics from dvipng and legacy, and rename legacy bitmaps
join_metrics_and_rename(dvipng_metrics, legacy_metrics, pdf_pages,
original_bitmap, destination_bitmap)
# Invalidate metrics for pages that produced errors
if len(error_pages) > 0:
error_count = 0
for index in error_pages:
if index not in ps_pages and index not in pdf_pages:
dvipng_metrics.pop(index - 1)
dvipng_metrics.insert(index - 1, (index, -1.0))
error_count += 1
if error_count:
warning("Failed to produce %d preview snippet(s)" % error_count)
# Convert images to ppm format if necessary.
if output_format == "ppm":
convert_to_ppm_format(pngtopnm, latex_file_re.sub("", latex_file))
# Actually create the .metrics file
write_metrics_info(dvipng_metrics, metrics_file)
return (0, dvipng_metrics)
if __name__ == "__main__":
sys.exit(main(sys.argv)[0])
| cburschka/lyx | lib/scripts/lyxpreview2bitmap.py | lyxpreview2bitmap.py | py | 23,257 | python | en | code | 33 | github-code | 36 | [
{
"api_name": "sys.version_info",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "lyxpreview_tools.error"... |
14437408352 | # -*- coding: utf-8 -*-
"""
Created on Sat Nov 26 2022
@author: MotoWiZ
"""
#%%
import cv2
import time
import numpy as np
import math
from picamera2 import Picamera2
lower_white = (0, 0, 0)
upper_white = (0, 0, 255)
t = time.time()
prevBallPosX, prevBallPosY, ballPosX, ballPosY = 0, 0, 0, 0
# -> Configuration of Raspberry Pi camera (Legacy disabled in raspi-config)
def camera_config():
picam2 = Picamera2()
picam2.configure(picam2.create_preview_configuration(main={"format": 'XRGB8888', "size": (800, 600)}))
picam2.start()
return picam2
# -> Configure ROI with double mouse clicks
def mark_roi(event,x,y,flags,param):
global mouseX, mouseY, nr_click
if event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(img,(x,y),10,(0, 0, 255),-1)
mouseX, mouseY = x, y
nr_click += 1
# -> Crop and convert image from camera
def get_image(picam2):
img = picam2.capture_array()
img = cv2.cvtColor(img,cv2.COLOR_RGBA2RGB)
return img
# -> Find white ball on table
def find_ball(img, x_cent, y_cent):
ball_exists = False
cX, cY = 0, 0
hsv = cv2.cvtColor(img,cv2.COLOR_RGB2HSV)
# Mask image for whites
mask = cv2.inRange(hsv, lower_white, upper_white)
# Get bitwise image with mask
result = cv2.bitwise_and(img, img, mask = mask)
# Transform img in Black & White
gray = cv2.cvtColor(result,cv2.COLOR_RGB2GRAY)
(thresh, bw) = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# Detect if exeists white object
if np.sum(bw) > 50000:
# Find center of white ball
M = cv2.moments(mask)
if M["m00"] != 0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# Place red circle and text in center of white ball
cv2.circle(result, (cX, cY), 10, (0, 0, 255), -1)
cv2.putText(result, "Object center", (cX - 110, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)
ball_exists = True
else:
cv2.putText(result, "No ball detected", (x_cent - 130, 100),cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(img, "No ball detected", (x_cent - 130, 100),cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
# Place green circle in center of result
cv2.circle(result, (x_cent, y_cent), 10, (0, 255, 0), -1)
# Place green circle in center of reduced image
cv2.circle(img, (x_cent, y_cent), 10, (0, 255, 0), -1)
return img, result, ball_exists, cX, cY
# -> Windows for original, reduced and mask images
def windows(original, img, result):
# Original image window
cv2.namedWindow("Original image", cv2.WINDOW_NORMAL)
cv2.imshow("Original image",original)
cv2.resizeWindow("Original image", 400, 300)
cv2.moveWindow("Original image", 10,100)
# Reduced image window
cv2.namedWindow("Reduced image", cv2.WINDOW_NORMAL)
cv2.imshow("Reduced image",img)
cv2.resizeWindow("Reduced image", 400, 300)
cv2.moveWindow("Reduced image", 440,100)
# Masked image window
cv2.namedWindow("Masked image", cv2.WINDOW_NORMAL)
cv2.imshow("Masked image",result)
cv2.resizeWindow("Masked image", 400, 300)
cv2.moveWindow("Masked image", 870,100)
#----------------------------------------#
#------------PID CONTROL-----------------#
# Nota inicial: Eles aqui utilizam os ângulos alpha e beta. A meu ver, devíamos utilizar estes ângulos de maneira a
# que alpha controla, por exemplo, o movimento da bola no eixo X e beta controla a bola no eixo Y.
# Os seguintes valores têm que ser inseridos: coordenadas do centro da bandeja, Kp, Ki, Kd,
sumErrorX = 1
sumErrorY = 1
#timeInterval = 1 <- Creio que não seja necessário
alpha, beta, prevAlpha, prevBeta = 0,0,0,0
omega = 0.2
#centerX, centerY = 240,240 # <- CENTRO DA BANDEJA, não sei os valores. Não precisa de ser declarado aqui, deve
#ser determinado noutro sítio qualquer e deve ser colocado nas variáveis globais centerX e centerY
def PIDcontrol(ballPosX, ballPosY, prevBallPosX, prevBallPosY, centerX, centerY):
global omega
global sumErrorX, sumErrorY
global alpha, beta, prevAlpha, prevBeta
#Definição dos parâmetros:
Kp = 1 #valor de Kp
Ki = 1 #valor de Ki
Kd = 1 #valor de Kd
#Cálculo dos erros em X e Y:
Ix = Kp*(centerX-ballPosX) + Ki*sumErrorX + Kd*((prevBallPosX-ballPosX)/0.0333) # <- não entendo o 0.0333...
Iy = Kp*(centerY-ballPosY) + Ki*sumErrorY + Kd*((prevBallPosY-ballPosY)/0.0333)
#Arredondar erros para 4 casas decimais
Ix = round(Ix/10000, 4)
Iy = round(Iy/10000, 4)
#Se bola está no centro da bandeja, mete bandeja na horizontal:
if Ix == 0 and Iy == 0:
alpha = 0
beta = 0
# Os próximos 4 "elif"s são diferentes casos de erros nas posições da bola (Ix, Iy) e os respetivos ângulos para os servos.
# Neste exemplo existiam 3 servos em triângulo. Como no nosso só temos 2, as contas serão diferentes.
# Temos que averiguar os casos para o nosso sistema, para diferentes erros Ix e Iy.
elif Ix != 0 and math.sqrt(Ix**2 + Iy**2) < 1:
beta = math.atan(Iy/Ix)
alpha = math.asin(math.sqrt(Ix**2 + Iy**2))
beta = math.degrees(beta)
alpha = math.degrees(alpha)
if Ix < 0 and Iy >= 0:
beta = abs(beta)
elif Ix > 0 and Iy >= 0:
beta = 180-abs(beta)
elif Ix > 0 and Iy <= 0:
beta = 180+abs(beta)
elif Ix < 0 and Iy <= 0:
beta = 360-abs(beta)
elif Ix == 0 and math.sqrt(Ix**2 + Iy**2) < 1:
if Iy > 0:
beta = 90
alpha = math.asin(math.sqrt(Ix**2 + Iy**2))
elif Iy < 0:
beta = 270
alpha = math.asin(math.sqrt(Ix**2 + Iy**2))
alpha = math.degrees(alpha)
elif Ix != 0 and math.sqrt(Ix**2 + Iy**2) > 1:
beta = math.degrees(math.atan(Iy/Ix))
alpha = 35
if Ix < 0 and Iy >= 0:
beta = abs(beta)
elif Ix > 0 and Iy >= 0:
beta = 180-abs(beta)
elif Ix > 0 and Iy <= 0:
beta = 180+abs(beta)
elif Ix < 0 and Iy <= 0:
beta = 360-abs(beta)
elif Ix == 0 and math.sqrt(Ix**2 + Iy**2) > 1:
alpha = 35
if Iy > 0:
beta = 90
elif Iy < 0:
beta = 270
#Alguma limitação de alpha:
if alpha > 35:
alpha = 35
#Adicionar alguma influência dos ângulos anteriores segundo o parâmetro omega.
#Neste caso, o novo alpha e cálculado como 20% de prevAlpha e 80% de alpha.
#O mesmo é realizado para beta.
alpha = prevAlpha * omega + (1-omega) * alpha
beta = prevBeta * omega + (1-omega) * beta
#Permite arredondamento com precisão de 0.2 (honestamente não percebo o motivo disto)
alpha = round(round(alpha / 0.2) * 0.2, -int(math.floor(math.log10(0.2))))
beta = round(round(beta / 0.2) * 0.2, -int(math.floor(math.log10(0.2))))
#Com alpha e beta determinados, é preciso enviar os ângulos para os servos.
#O if serve para limitar os ângulos aos limites físicos dos servos, talvez possam ser alterados
#consoante os modelos no nosso hardware.
if alpha <= 35 and beta <= 360:
# !!!!!!!! inserir código para enviar os ângulos para os servos !!!!!!!!! #
pass
#Prints...
#print(alpha, beta)
print(Ix,Iy,alpha,beta,ballPosX,ballPosY,prevBallPosX,prevBallPosY,sumErrorX,sumErrorY)
#Cálculo de erros, que são utilizados no PID:
sumErrorX += (centerX-ballPosX)
sumErrorY += (centerY-ballPosY)
#----------END OF PID CONTROL------------#
#----------------------------------------#
# -> Main
if __name__ == "__main__":
picam2 = camera_config()
img = get_image(picam2)
cv2.namedWindow("Pick points")
cv2.moveWindow("Pick points", 280,100)
cv2.setMouseCallback("Pick points",mark_roi)
nr_click = 0
prev_nr_click = nr_click
x_pos, y_pos = [], []
# -> Start picker of corners
while True:
cv2.imshow("Pick points",img)
k = cv2.waitKey(1) & 0xFF
if nr_click == 3 or k == ord('q'):
break
elif nr_click != prev_nr_click:
prev_nr_click = nr_click
x_pos.append(mouseX)
y_pos.append(mouseY)
#print (mouseX,mouseY,nr_click)
cv2.destroyAllWindows()
#print (x_pos[0], y_pos[0])
#print (x_pos[1], y_pos[1])
# -> Find ball during movement
while True:
# Start timer to count FPS
start_time = time.time()
original = get_image(picam2)
img = original.copy()
# Calculate center of croped image and reduce original image
if y_pos[0] < y_pos[1]:
y_cent = int((y_pos[1] - y_pos[0])/2)
if x_pos[0] < x_pos[1]:
img = img[y_pos[0]:y_pos[1], x_pos[0]:x_pos[1]]
x_cent = int((x_pos[1] - x_pos[0])/2)
elif x_pos[0] > x_pos[1]:
img = img[y_pos[0]:y_pos[1], x_pos[1]:x_pos[0]]
x_cent = int((x_pos[0] - x_pos[1])/2)
elif y_pos[0] > y_pos[1]:
y_cent = int((y_pos[0] - y_pos[1])/2)
if x_pos[0] < x_pos[1]:
img = img[y_pos[1]:y_pos[0], x_pos[0]:x_pos[1]]
x_cent = int((x_pos[1] - x_pos[0])/2)
elif x_pos[0] > x_pos[1]:
img = img[y_pos[1]:y_pos[0], x_pos[1]:x_pos[0]]
x_cent = int((x_pos[0] - x_pos[1])/2)
#print(x_cent, y_cent)
if ballPosX != 0:
prevBallPosX = ballPosX
prevBallPosY = ballPosY
img, result, ball_exists, ballPosX, ballPosY = find_ball(img, x_cent, y_cent)
if not prevBallPosX == 0:
prevBallPosX = ballPosX
prevBallPosY = ballPosY
# Call PID function
if ball_exists:
PIDcontrol(ballPosX, ballPosY, prevBallPosX, prevBallPosY, x_cent, y_cent)
# Place green circle in center of original image
cv2.circle(original, (400, 300), 10, (0, 255, 0), -1)
windows(original, img, result)
# Press 'q' to quit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Print Frames per Second
if time.time() - t>1:
print("FPS: ", "%.2f" % (1.0 / (time.time() - start_time)))
t = time.time()
# Exit closing cv2 windows
cv2.destroyAllWindows()
| MotoWiZ/Balance-ball-on-a-table | Testers/ball - 360.py | ball - 360.py | py | 10,389 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "picamera2.Picamera2",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.EVENT_LBUTTONDBLCLK",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "cv2.circle... |
2412653803 | import pytest
import elasticsearch_dsl as es_dsl
from karp5.domain.services import search_service
from karp5.config import conf_mgr
@pytest.mark.parametrize("from_,size", [
(0, 25),
(1000, 25),
(10000, 25),
(0, None),
(15000, 15875)
])
def test_large_lex(app_w_large_lex, from_, size):
mode = "large_lex"
total_num_entries = 20000
es_search = es_dsl.Search(using=conf_mgr.elastic(mode), index=mode)
result = search_service.execute_query(es_search, from_=from_, size=size)
if size is None:
expected_len_hits = total_num_entries - from_
else:
expected_len_hits = size if (from_ + size < total_num_entries) else total_num_entries - from_
assert len(result["hits"]["hits"]) == expected_len_hits
assert result["hits"]["total"] == total_num_entries
for hit in result["hits"]["hits"]:
assert "_source" in hit
| spraakbanken/karp-backend-v5 | karp5/tests/integration_tests/domain/services/test_search_service_it.py | test_search_service_it.py | py | 887 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "elasticsearch_dsl.Search",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "karp5.config.conf_mgr.elastic",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "karp5.config.conf_mgr",
"line_number": 19,
"usage_type": "name"
},
{
"api_... |
74277064423 | from PyQt5 import Qt, QtCore, QtGui, QtWidgets
from ui.mainw import Ui_MainWindow
import os, json
import subprocess
class VirtualCD_Window(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self):
super(VirtualCD_Window, self).__init__()
self.setupUi(self)
self.setWindowOpacity(0.5)
self.setWindowFlags(Qt.Qt.WindowCloseButtonHint | Qt.Qt.WindowMinimizeButtonHint | Qt.Qt.WindowStaysOnTopHint)
self.wHeight = self.height()
_result = subprocess.getstatusoutput('losetup -J')
# print(_result)
_loaded = False
self._vcd_list = {}
if _result[0] == 0:
if _result[1] != '':
self._vcd_list = json.loads(_result[1])
# print(self._vcd_list)
_loaded = True
# print(_result[1].split('\n')[1])
# print(_result)
# print(type(_result))
# print(_loaded)
self.btnEject.setEnabled(_loaded)
self.btnOpen.clicked.connect(self.loadISO)
self.btnEject.clicked.connect(self.unloadISO)
def loadISO(self):
iso_file, file_type = QtWidgets.QFileDialog.getOpenFileName(self, caption='选择ISO文件', filter='光盘镜像文件(*.iso)')
# print(iso_file, file_type)
if iso_file != '':
# print(f'选择文件为{iso_file}')
result = subprocess.getstatusoutput(f'losetup -r /dev/loop0 {iso_file}')
if result[0] != 0:
result = subprocess.getstatusoutput(f'gksudo -m 加载虚拟光驱需要输入当前用户的密码才能继续 "losetup -r /dev/loop0 {iso_file}"')
# print(result[0])
if result[0] == 0:
self.btnEject.setEnabled(True)
_result = subprocess.getstatusoutput('losetup -J')
self._vcd_list = json.loads(_result[1])
# print(self._vcd_list)
else:
QtWidgets.QMessageBox.warning(self, '警告', f'文件"{iso_file}"加载失败!')
# else:
# print('没有选择任何文件')
def unloadISO(self):
iso_filename = os.path.basename(self._vcd_list['loopdevices'][0]['back-file'])
iso_mnt_path = '/media/' + os.environ['USER'] + '/' +iso_filename[:iso_filename.rfind('.')]
# print('iso filename', iso_filename, iso_mnt_path)
if os.path.exists(iso_mnt_path):
result = subprocess.getstatusoutput(f'umount {iso_mnt_path}')
# print(result)
result = subprocess.getstatusoutput('losetup -d /dev/loop0')
# print(result)
if result[0] != 0:
result = subprocess.getstatusoutput('gksudo -m 弹出虚拟光驱需要输入当前用户的密码才能继续 "losetup -d /dev/loop0"')
# print(result)
if result[0]==0:
self.btnEject.setEnabled(False)
else:
QtWidgets.QMessageBox.warning(self, '警告', '虚拟光驱弹出失败!')
if __name__=="__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
mainw = VirtualCD_Window()
x = (app.desktop().width() - mainw.width()) / 2
y = (app.desktop().height() - mainw.height()) / 2
mainw.move(x, y)
mainw.show()
sys.exit(app.exec_())
| zslukevin/Virtual-CD | VirtualCD.py | VirtualCD.py | py | 3,258 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "ui.mainw.Ui_MainWindow",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "... |
23604207590 | import argparse
import logging
import subprocess
import sys
import stanza
from iso639 import languages
sys.path.append('./resources')
import unimorph_inflect
def download_unimorph(lg: str):
logging.info("downloading UniMorph dictionary for %s" % lg)
subprocess.run("mkdir -p unimorph_dicts", shell=True)
subprocess.run(
"wget -q https://raw.githubusercontent.com/unimorph/%s/master/%s -O unimorph_dicts/%s" % (
lg, lg, lg),
shell=True,
)
def download_unimorph_inflect_models(lg: str):
logging.info("downloading unimorph_inflect models for %s" % lg)
unimorph_inflect.download(lg)
def prepare_label_vocab(lg: str):
logging.info("preparing label vocabulary for %s" % lg)
subprocess.run("mkdir -p label_vocab", shell=True)
subprocess.run(
"python morpheus_multilingual/utils/build_label_vocab.py -d unimorph_dicts/%s "
"-l morpheus_multilingual/utils/dimension2label.txt -v label_vocab/%s" % (lg, lg),
shell=True,
)
def download_stanza_model(two_letter_lg: str):
logging.info("downloading stanza model for %s" % two_letter_lg)
stanza.download(two_letter_lg)
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
handlers=[logging.StreamHandler()],
)
parser = argparse.ArgumentParser(description="setup language specific resources")
parser.add_argument("langs", type=str, nargs="+", help="list of language codes (e.g. rus, deu)")
args = parser.parse_args()
for lg in args.langs:
download_unimorph(lg)
download_unimorph_inflect_models(lg)
prepare_label_vocab(lg)
two_letter_lg = languages.get(part3="deu").alpha2
download_stanza_model(two_letter_lg)
| murali1996/morpheus_multilingual | run_setup.py | run_setup.py | py | 1,835 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_n... |
13890870450 | """
Background
Algorithm Inorder(tree)
1. Traverse the left subtree, i.e., call Inorder(left-subtree)
2. Visit the root.
3. Traverse the right subtree, i.e., call Inorder(right-subtree)
**TODO: Can use a stack**
Algorithm Preorder(tree)
1. Visit the root.
2. Traverse the left subtree, i.e., call Preorder(left-subtree)
3. Traverse the right subtree, i.e., call Preorder(right-subtree)
Algorithm Postorder(tree)
1. Traverse the left subtree, i.e., call Postorder(left-subtree)
2. Traverse the right subtree, i.e., call Postorder(right-subtree)
3. Visit the root.
"""
"""
Notes:
It's known that inorder traversal of BST is an array sorted in the ascending order.
"""
from typing import List
class Node:
def __init__(self, val):
self.l_child = None
self.r_child = None
self.data = val
def binary_insert(root, node):
if root is None:
root = node
else:
if root.data > node.data:
if root.l_child is None:
root.l_child = node
else:
binary_insert(root.l_child, node)
else:
if root.r_child is None:
root.r_child = node
else:
binary_insert(root.r_child, node)
"""
Trivial Solution:
1. First, in order traverse the tree and get the (almost) sorted list
2. Identify two swapped values in the list
3. Traverse the tree again to swap back
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def recoverTree(self, root: TreeNode) -> None:
"""
Do not return anything, modify root in-place instead.
"""
nodes = self.in_order_traverse(root)
print(f'nodes: {nodes}')
self.first_val, self.second_val = self.find_swap(nodes)
print(f'self.first_val: {self.first_val}')
print(f'self.second_val: {self.second_val}')
self.dfs(root)
def dfs(self, root: TreeNode):
if root is None:
return
if root.val == self.first_val:
root.val = self.second_val
elif root.val == self.second_val:
root.val = self.first_val
else:
pass
self.dfs(root.left)
self.dfs(root.right)
def in_order_traverse(self, root: TreeNode) -> List[int]:
nodes = [root.val]
if root.left is None and root.right is None:
return nodes
if root.left is not None:
nodes = self.in_order_traverse(root.left) + nodes
if root.right is not None:
nodes = nodes + self.in_order_traverse(root.right)
return nodes
def find_swap(self, l: List[int]):
first_pos = -1
first_val = None
second_pos = -1
second_val = None
for i in range(len(l) - 1):
if l[i] > l[i + 1]:
if first_pos == -1:
first_pos = i
else:
second_pos = i + 1
if second_pos == -1:
second_pos = first_pos + 1
first_val = l[first_pos]
second_val = l[second_pos]
return first_val, second_val
def in_order_find_swap(self, root: TreeNode):
""" Use interative method to traverse and find swap """
x, y = None, None
pred = None
stack = []
while len(stack) > 0 or root:
while root:
stack.append(root)
root = root.left
root = stack.pop()
if pred and root.val < pred.val:
y = root
if x is None:
x = pred
else:
pass
pred = root
root = root.right
x.val, y.val = y.val, x.val
"""
The idea of Morris algorithm is to set the temporary link between the node and its predecessor: predecessor.right = root. So one starts from the node, computes its predecessor and verifies if the link is present.
There is no link? Set it and go to the left subtree.
There is a link? Break it and go to the right subtree.
There is one small issue to deal with : what if there is no left child, i.e. there is no left subtree? Then go straightforward to the right subtree.
"""
| orangered233/LeetCode | recover_binary_search_tree.py | recover_binary_search_tree.py | py | 4,358 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 97,
"usage_type": "name"
}
] |
27904683592 | import argparse
def parse_args():
"""
parsing and configuration
:return: parse_args
"""
desc = "Tensorflow implementation of pix2pix"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--module', type=str, default='test_dataset',
help='Module to select: train, test ...')
parser.add_argument('--is_training', type=bool, default=True,
help='When the module is train, True, else False')
parser.add_argument('--tfrecord_dir', type=str, default='data/VOC-tfrecord',
help='Path of the dataset transferred to tfrecord')
parser.add_argument('--split', type=str, default='train',
help='Type of data to transfer: train, test ...')
parser.add_argument("--l1_weight", type=float, default=100.0,
help="weight on L1 term for generator gradient")#L1正则化项的权重参数
parser.add_argument("--gan_weight", type=float, default=1.0,
help="weight on GAN term for generator gradient")#生成器梯度的GAN的权重参数
parser.add_argument("--dataset1", type=str, default='C:\\project\\PY\\GAN\\pix2pix-tensorflow-master\\pix2pix-tensorflow-master\\photos\\combined\\tfrecords',
help="Path of the dataset")
parser.add_argument("--dataset", type=str, default='C:\\project\\PY\\GAN\\pix2pix-tensorflow-master\\pix2pix-tensorflow-master\\rain\\facades\\train\\tfrecords',
help="Path of the dataset")
parser.add_argument('--batch_size', type=int, default=1,
help='The size of each training batch')
parser.add_argument('--num_epochs', type=int, default=1,
help='The number of the total training epoch')
parser.add_argument('--img_width', type=int, default=256,
help='The width of img')
parser.add_argument('--img_height', type=int, default=256,
help='The height of img')
parser.add_argument('--scale_size', type=int, default=530,
help='The scale of img for crop')
parser.add_argument('--crop_size', type=int, default=512,
help='The crop_size of img')
return parser.parse_args() | SunNYNO1/pix2pix | code/config.py | config.py | py | 2,348 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
}
] |
2959315606 | #%%
import pandas as pd
import numpy as np
import os
import re
import time
from functools import wraps, reduce
from copy import deepcopy
from sklearn.model_selection import train_test_split
#%%
project_dict_second = {
'01':'有源手术器械',
'02':'无源手术器械',
'03':'神经和心血管手术器械',
'04':'骨科手术器械',
'05':'耳鼻喉手术器械',
'06':'医用成像器械',
'07':'医用诊察监护器械',
'08':'呼吸麻醉急救器械',
'09':'物理治疗器械',
'10':'矫形外科(骨科)手术器械',
'11':'医疗器械消毒灭菌器械',
'12':'妇产科用手术器械',
'13':'计划生育手术器械',
'14':'注射护理防护器械',
'16':'眼科器械',
'17':'口腔科器械',
'18':'妇产科辅助生殖和避孕器械',
'19':'物理治疗及康复设备',
'20':'中医器械',
'21':'医用诊察和监护器械',
'22':'医用光学器具仪器及内窥镜设备',
'23':'医用超声仪器及有关设备',
'24':'医用激光仪器设备',
'25':'医用高频仪器设备',
'26':'物理治疗及康复设备',
'27':'中医器械',
'28':'医用成像器械(磁共振)',
'30':'医用成像器械(X射线)',
'31':'医用成像器械(成像辅助)',
'32':'放射治疗器械',
'33':'医用核素设备',
'40':'临床检验器械',
'41':'临床检验器械',
'45':'输血透析和体外循环器械',
'46':'认知言语试听障碍康复设备',
'50':'??????',
'54':'手术室急救室诊疗室设备及器具',
'55':'口腔科器械',
'56':'病房护理设备及器具',
'57':'医疗器械消毒灭菌器械',
'58':'医用冷疗低温冷藏设备及器具',
'63':'口腔科器械',
'64':'注射护理防护器械',
'65':'医用缝合材料及粘合剂',
'66':'医用高分子材料及制品',
'70':'医用软件',
'77':'无源手术器械'
}
project_dict_third = {
'01':'有源手术器械',
'02':'无源手术器械',
'03':'神经和心血管手术器械',
'04':'骨科手术器械',
'05':'放射治疗器械',
'06':'医用成像器械',
'07':'医用侦察和监护器械',
'08':'呼吸麻醉急救器械',
'09':'物理治疗器械',
'10':'输血透析体外循环器械',
'11':'医疗器械消毒灭菌器械',
'12':'有源植入器械',
'13':'无源植入器械',
'14':'注射护理防护器械',
'15':'注射护理防护器械',
'16':'眼科器械',
'17':'口腔科器械',
'18':'妇产科辅助生殖和避孕器械',
'19':'物理治疗及康复设备',
'20':'中医器械',
'21':'医用诊察和监护器械',
'22':'医用光学器具仪器及内窥镜设备',
'23':'医用超声仪器及有关设备',
'24':'医用激光仪器设备',
'25':'医用高频仪器设备',
'26':'物理治疗及康复设备',
'27':'中医器械',
'28':'医用成像器械(磁共振)',
'30':'医用成像器械(X射线)',
'31':'医用成像器械(成像辅助)',
'32':'放射治疗器械',
'33':'医用核素设备',
'40':'临床检验器械',
'41':'临床检验器械',
'45':'输血透析和体外循环器械',
'46':'无源植入器械',
'54':'手术室急救室诊疗室设备及器具',
'58':'物理治疗器械',
'63':'口腔科器械',
'64':'注射护理防护器械',
'65':'医用缝合材料及粘合剂',
'66':'注输护理和防护器械',
'70':'医用软件',
'77':'神经和心血管手术器械'
}
#%%
def benchmark():
def middle(f):
@wraps(f)
def wrapper(*args, **kwargs):
t_start = time.time()
result = f(*args, **kwargs)
t_end = time.time()
print(f'{f.__name__} takes {t_end-t_start}')
return result
return wrapper
return middle
@benchmark()
def rm_element(content: list, condition: str) -> list:
result = deepcopy(content)
for i in result[::-1]:
if re.match(re.compile(condition), i) is not None:
pass
else:
result.remove(i)
return result
@benchmark()
def data_preprocess(folder_name: str,
project_dict_second: dict,
project_dict_third: dict) -> pd.DataFrame:
# get the list of data files
file_list = os.listdir(folder_name)
# rm the unrelated file
df_list = rm_element(file_list, '[0-9a-zA-Z\_]+\.csv')
# loop the list, transfer it into pd.DataFrame and concat
for i, j in enumerate(df_list):
df_list[i] = pd.read_csv(folder_name+'/'+j)
df = pd.concat(df_list)
# select the data from 2010-2019
df = df[df['注册证编号'].str.match(r'[\u4e00-\u9fa5]{1,5}201[1-9]{1}[23]{1}[0-7]{1}[0-9]{1}[0-9]{4}')]
# select the target columns
df = df.loc[:, ['注册证编号','产品名称','结构及组成/主要组成成分', '适用范围/预期用途']]
# replace the class row
df['注册证编号'] = df['注册证编号'].str.extract(r'(201[1-9]{1}[23]{1}[0-7]{1}[0-9]{1}[0-9]{4})')
df['注册证编号'] = df['注册证编号'].str[4:7]
# fill NaN
df = df.fillna(value='')
# concat text columns
def column_add(c1: pd.Series, c2: pd.Series) -> pd.Series:
return c1.str.cat(c2, sep=' ')
df['合并数据'] = reduce(column_add, [df[column] for column in df.columns][1:])
df = df[['注册证编号','合并数据']]
# reset Index
df.reset_index(drop=True)
# rename dataframe
df = df.rename(columns={'注册证编号':'label', '合并数据':'text'})
# drop the \r\n\t in the str in the text col
df['text'] = df['text'].str.replace('(\\t|\\n|\\r)', '', regex=True)
# drop 215 cause manual check for conflict
df = df[(df['label'] != '215') & (df['label'] != '234')]
# drop duplicates
df = df.drop_duplicates()
# drop the label which just got 6 or less examples
df = df.groupby('label').filter(lambda x: len(x) > 13)
# transfer label into str
def project(x: int, project_dict_second: dict, project_dict_third:dict) -> str:
# type transform
try:
data = str(x)
except:
pass
data = str(x)
# administration category
if data[0] == '2':
result = '第二类'
result = result + project_dict_second[data[1:]]
else:
result = '第三类'
result = result + project_dict_third[data[1:]]
return result
df['label'] = df['label'].apply(project, args=(project_dict_second, project_dict_third))
# split them in to train, val, test
modelling, val = train_test_split(df, test_size=0.1, stratify=df['label'], random_state=42)
train, test = train_test_split(modelling, test_size=0.11, stratify=modelling['label'], random_state=42)
return train, val, test
#%%
if __name__ == '__main__':
train, val, test = data_preprocess('data_folder', project_dict_second, project_dict_third)
print(len(train))
print(len(val))
print(len(test))
train.to_csv('cleaned_data/training_data.txt', index=False, header=False, sep='\t', encoding='utf-8')
val.to_csv('cleaned_data/val_data.txt', index=False, header=False, sep='\t', encoding='utf-8')
test.to_csv('cleaned_data/test_data.txt',index=False, header=False, sep='\t', encoding='utf-8')
print(train.groupby(['label']).count())
print(val.groupby(['label']).count())
print(test.groupby(['label']).count())
print(train.head())
# when filter is > 11, label = 63
# when filter is > 6, label = 67 | BigMasonFang/cfda | data_anly.py | data_anly.py | py | 7,677 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_numb... |
35781259149 | import os
import pandas as pd
import random
import numpy as np
import tensorflow as tf
from datetime import datetime
import models
from sklearn.metrics import r2_score
import math
IDS_TRAIN = []
IDS_TRAIN.append([12, 0, 1, 11])
IDS_TRAIN.append([7, 0, 17, 11])
IDS_TRAIN.append([12, 5, 15, 9])
IDS_TRAIN.append([8, 22, 3, 11])
IDS_TRAIN.append([7, 0, 14, 9])
if not len(IDS_TRAIN):
n_combs = 5
else:
n_combs = len(IDS_TRAIN)
os.environ["CUDA_VISIBLE_DEVICES"] = ""
np.random.seed(0)
random.seed(0)
params = {}
params["path_data"] = []
params["path_data"].append("../Dataset_Collection/Datasets/Exp_2021-08-01 15-18-24/")
params["path_data"].append("../Dataset_Collection/Datasets/Exp_2021-08-04 16-58-16/")
params["BATCH_SIZE"] = 32
params["LOSS"] = 'mse'
params["N_ITERATIONS_VAL"] = 0.0625
params["CATEGORY_VAL"] = []
params["TERRAIN_IDS"] = []
params["LENGTH_SHOTS"] = 3
params["LENGTH_META"] = 3
params["N_SHOTS"] = 3
params["N_META"] = 1
params["LENGTH_SEQUENCE"] = 3
params["LENGTH_PAST"] = 0
params["MERGED_SHOTS_GEOM"] = True
params["MERGED_META_GEOM"] = True
params["MERGED_SHOTS_OUTPUT"] = True
params["MERGED_META_OUTPUT"] = True
params["MERGED_MODEL"] = True
params["SHOTS_EXTRA_INFO"] = []
params["WHICH_MODELS"] = []
params["ENERGY_NORM_TYPE"] = "standardize"
params["REMOVE_ROUGH"] = False
params["MAX_VAR_ROUGH"] = 2.25
params["MODEL"] = "model_ST-Conv1D"
params["REF_MODEL_DIR"] = "./Exp00/log_ST_conv1d/"
params["MODEL_LOG_NAME"] = "conv_sum"
params["BATCH_TYPE"] = "mixed"
params["INPUT_FEATURES"] = ["wheel_trace"]
params["WHEEL_TRACE_SHAPE"] = (78,40)
params["REMOVE_CENTRAL_BAND"] = 0
params["path_sum_indices"] = []
params["path_sum_indices"].append("../Dataset_Collection/Datasets/Exp_2021-08-01 15-18-24/Merged_Data/sum_indices.csv")
params["path_sum_indices"].append("../Dataset_Collection/Datasets/EExp_2021-08-04 16-58-16/Merged_Data/sum_indices.csv")
CATEGORIES = []
CATEGORIES.append([7, 8, 10, 12]) # Clay high moisture content
CATEGORIES.append([5, 0, 22]) # Loose frictional
CATEGORIES.append([1, 3, 4, 13, 14, 15, 16, 17]) # Compact frictional
CATEGORIES.append([9, 11]) # Dry clay
class Batch_Generator():
"""
Keras Sequence object to train a model on larger-than-memory data.
modified from: https://stackoverflow.com/questions/51843149/loading-batches-of-images-in-keras-from-pandas-dataframe
"""
def __init__(self, df, df_idx, batch_size, mode='train'):
self.len_df = len(df)
self.batch_size = batch_size
self.mode = mode # shuffle when in train mode
self.df = {}
self.df_idx = {}
self.terrain_ids = list(df["terrain_id"].drop_duplicates().values)
self.datasets = list(df["dataset"].drop_duplicates().values)
for terrain_id in self.terrain_ids:
self.df["{}".format(terrain_id)] = {}
dfi = df[df.terrain_id==terrain_id]
self.df_idx["{}".format(terrain_id)] = df_idx[df_idx.terrain_id==terrain_id]
for dataset in self.datasets:
self.df["{}".format(terrain_id)]["{}".format(dataset)] = dfi[dfi.dataset==dataset]
if self.mode == 'train':
self.total_steps = int(math.ceil(self.len_df / float(self.batch_size))*params["N_ITERATIONS_TRAIN"])
elif self.mode == 'validation':
self.total_steps = int(math.ceil(self.len_df / float(self.batch_size))*params["N_ITERATIONS_VAL"])
self.step = 0
self.epoch = 0
self.on_epoch_end()
def on_epoch_end(self):
# Shuffles indexes after each epoch if in training mode
self.indexes = range(self.len_df)
if self.mode == 'train':
self.indexes = random.sample(self.indexes, k=len(self.indexes))
def get_batch(self):
for i in range(self.batch_size):
terrain_id = random.sample(self.terrain_ids,1)[0]
dfx = self.df_idx["{}".format(terrain_id)]
if params["BATCH_TYPE"] == "mixed" and len(self.datasets)>1:
if random.random() > 1/3:
dataset_id = random.sample(self.datasets, 1)[0]
dfx = dfx[dfx["dataset"]==dataset_id]
row = dfx.sample(n=params["N_SHOTS"]+params["N_META"])
row_shots = row.sample(n=params["N_SHOTS"])
row_meta = row.drop(row_shots.index)
if len(self.datasets)>1:
shot_tot = pd.DataFrame()
for (k,d) in zip(row_shots.k.values,row_shots.dataset.values):
shot = self.df["{}".format(terrain_id)]["{}".format(d)].iloc[[k+z for z in range(params["LENGTH_SHOTS"])]]
shot_tot = pd.concat([shot_tot,shot])
meta_tot = pd.DataFrame()
for (k,d) in zip(row_meta.k.values,row_meta.dataset.values):
meta = self.df["{}".format(terrain_id)]["{}".format(d)].iloc[[k+z for z in range(params["LENGTH_META"])]]
meta_tot = pd.concat([meta_tot,meta])
else:
idx = []
for k in row_shots.k.values:
idx.extend(k+z for z in range(params["LENGTH_SHOTS"]))
shot_tot = self.df["{}".format(terrain_id)]["0"].iloc[idx]
idx = []
for k in row_meta.k.values:
idx.extend(k+z for z in range(params["LENGTH_META"]))
meta_tot = self.df["{}".format(terrain_id)]["0"].iloc[idx]
xe_shots = shot_tot.loc[:, ["energy"]].values
ye_meta = meta_tot.loc[:, ["energy"]].values
if params["SHOTS_EXTRA_INFO"]:
xei_shots = shot_tot.loc[:, params["SHOTS_EXTRA_INFO"]].values
xg_shots_string = shot_tot.loc[:, ["wheel_trace"]].values
for p in range(len(xg_shots_string)):
xg = np.array([float(v) for v in xg_shots_string[p][0].split(' ')]).reshape(params["WHEEL_TRACE_SHAPE"])
if not p:
xg_shots = np.expand_dims(xg, axis=0)
else:
xg_shots = np.concatenate(
[xg_shots, np.expand_dims(xg, axis=0)], axis=0)
xg_meta_string = meta_tot.loc[:, ["wheel_trace"]].values
for p in range(len(xg_meta_string)):
xg = np.array([float(v) for v in xg_meta_string[p][0].split(' ')]).reshape(params["WHEEL_TRACE_SHAPE"])
if not p:
xg_meta = np.expand_dims(xg, axis=0)
else:
xg_meta = np.concatenate(
[xg_meta, np.expand_dims(xg, axis=0)], axis=0)
if params["MERGED_SHOTS_GEOM"]:
xg_shots_tot = np.empty((params["N_SHOTS"],
xg_shots.shape[-2]+16 *
(params["LENGTH_SHOTS"]-1),
xg_shots.shape[-1]))
zrel = shot_tot.loc[:, ["zrel"]].values
for z in range(params["N_SHOTS"]):
# Merge the shots in a single terrain trace
xg_shots_tot[z, :xg_shots.shape[-2]
] = xg_shots[z*params["LENGTH_SHOTS"]]
for p in range(1, params["LENGTH_SHOTS"]):
xg_shots_tot[z, xg_shots.shape[-2]+16*(p-1):xg_shots.shape[-2]+16*p] = xg_shots[p+z *
params["LENGTH_SHOTS"], -16:]+zrel[p+z*params["LENGTH_SHOTS"]]-zrel[z*params["LENGTH_SHOTS"]]
else:
xg_shots_tot = xg_shots
if params["MERGED_META_GEOM"]:
xg_meta_tot = np.empty((params["N_META"],
xg_meta.shape[-2]+16 *
(params["LENGTH_META"]-1),
xg_meta.shape[-1]))
zrel = meta_tot.loc[:, ["zrel"]].values
for z in range(params["N_META"]):
# Merge the shots in a single terrain trace
xg_meta_tot[z, :xg_meta.shape[-2]
] = xg_meta[z*params["LENGTH_META"]]
for p in range(1, params["LENGTH_META"]):
xg_meta_tot[z, xg_meta.shape[-2]+16*(p-1):xg_meta.shape[-2]+16*p] = xg_meta[p+z *
params["LENGTH_META"], -16:]+zrel[p+z*params["LENGTH_META"]]-zrel[z*params["LENGTH_META"]]
else:
xg_meta_tot = xg_meta
if not i:
XG_SHOTS = np.expand_dims(xg_shots_tot, axis=0)
XE_SHOTS = np.expand_dims(xe_shots, axis=0)
XG_META = np.expand_dims(xg_meta_tot, axis=0)
YE_META = np.expand_dims(ye_meta, axis=0)
if params["SHOTS_EXTRA_INFO"]:
XEI_SHOTS = np.expand_dims(xei_shots, axis=0)
else:
XG_SHOTS = np.concatenate(
[XG_SHOTS, np.expand_dims(xg_shots_tot, axis=0)], axis=0)
XE_SHOTS = np.concatenate(
[XE_SHOTS, np.expand_dims(xe_shots, axis=0)], axis=0)
XG_META = np.concatenate(
[XG_META, np.expand_dims(xg_meta_tot, axis=0)], axis=0)
YE_META = np.concatenate(
[YE_META, np.expand_dims(ye_meta, axis=0)], axis=0)
if params["SHOTS_EXTRA_INFO"]:
XEI_SHOTS = np.concatenate(
[XEI_SHOTS, np.expand_dims(xei_shots, axis=0)], axis=0)
if params["REMOVE_CENTRAL_BAND"]:
XG_l = XG_SHOTS[:, :, :, :(
params["WHEEL_TRACE_SHAPE"][1]-params["REMOVE_CENTRAL_BAND"])//2]
XG_r = XG_SHOTS[:, :, :, -(params["WHEEL_TRACE_SHAPE"]
[1]-params["REMOVE_CENTRAL_BAND"])//2:]
XG_SHOTS = np.concatenate([XG_l, XG_r], axis=-1)
XG_l = XG_META[:, :, :, :(
params["WHEEL_TRACE_SHAPE"][1]-params["REMOVE_CENTRAL_BAND"])//2]
XG_r = XG_META[:, :, :, -(params["WHEEL_TRACE_SHAPE"]
[1]-params["REMOVE_CENTRAL_BAND"])//2:]
XG_META = np.concatenate([XG_l, XG_r], axis=-1)
if params["SHOTS_EXTRA_INFO"]:
XEI_SHOTS_TOT = np.concatenate([XE_SHOTS, XEI_SHOTS], axis = -1)
else:
XEI_SHOTS_TOT = XE_SHOTS
X = [XG_SHOTS, XEI_SHOTS_TOT, XG_META]
Y = []
for sh in range(params["N_SHOTS"]):
if params["MERGED_META_OUTPUT"]:
for z in range(params["N_META"]):
Y.append(
np.sum(YE_META[:, z*params["LENGTH_META"]:(z+1)*params["LENGTH_META"]], axis=1))
else:
for z in range(params["N_META"]*params["LENGTH_META"]):
Y.append(YE_META[:, z])
Y = np.stack([y for y in Y],axis=1)
if self.step == self.total_steps-1:
self.step = 0
self.epoch += 1
self.on_epoch_end()
else:
self.step += 1
if self.mode == "prediction":
return X
else:
return X, Y
def reference_pred(ref_models, X):
XG_SHOTS, XE_SHOTS, XG_META = X
XE_SHOTS = np.sum(XE_SHOTS.reshape(params["BATCH_SIZE"],params["N_SHOTS"],params["LENGTH_SHOTS"]),axis=-1)
for n_shots in range(1, params["N_SHOTS"]+1):
# Identify most similar model based on shots
error = np.empty((len(ref_models),params["BATCH_SIZE"]))
for i, ref_model in enumerate(ref_models):
for b in range(params["BATCH_SIZE"]):
ye = ref_model["model"](XG_SHOTS[b,:n_shots])
ye = ye.numpy().reshape((ye.shape[0],))
if not b:
YE_SHOTS = np.expand_dims(ye,axis=0)
else:
YE_SHOTS = np.concatenate([YE_SHOTS,np.expand_dims(ye,axis=0)],axis=0)
YE_SHOTS = YE_SHOTS*ref_model["energy_val2"] + params["N_SHOTS"]*ref_model["energy_val1"]
if params["LOSS"] == 'mse':
error_ref_i = np.sum(np.square(YE_SHOTS-XE_SHOTS[:,:n_shots]),axis=1)
elif params["LOSS"] == 'mae':
error_ref_i = np.sum(np.abs(YE_SHOTS-XE_SHOTS[:,:n_shots]),axis=1)
error[i] = error_ref_i.squeeze()
best_m = np.argmin(error,axis=0)
# Use it to make new prediction
for i, b in enumerate(best_m):
ye = ref_models[b]["model"](XG_META[i])
ye = ye.numpy()*ref_models[b]["energy_val2"]+params["N_SHOTS"]*ref_models[b]["energy_val1"]
if not i:
YE_P_META = np.expand_dims(ye,axis=0)
else:
YE_P_META = np.concatenate([YE_P_META,np.expand_dims(ye,axis=0)],axis=0)
if n_shots ==1:
YE_P_META_TOT = YE_P_META
else:
YE_P_META_TOT = np.concatenate([YE_P_META_TOT,YE_P_META],axis=1)
return YE_P_META_TOT
def isint(x):
try:
int(x)
return True
except ValueError:
return False
def isfloat(x):
if '[' in x:
x = x[1:-1]
try:
float(x)
return True
except ValueError:
return False
def main():
if not params["TERRAIN_IDS"]:
id_files = [".csv"]
else:
id_files = ["data_{}.csv".format(idt) for idt in params["TERRAIN_IDS"]]
df = pd.DataFrame()
for i, path_data in enumerate(params["path_data"]):
files = os.listdir(path_data)
for file in files:
if any([p in file for p in id_files]):
dfi = pd.read_csv(path_data+file)
## Removing some of data:
# Samples without failures
try:
dfi = dfi[dfi.goal==1]
except:
pass
# Samples without initial acceleration
dfi = dfi[dfi.segment!=0]
try:
# Samples without low mean speed
dfi = dfi[dfi.mean_speed>0.87]
# Samples without low initial speed
dfi = dfi[dfi.initial_speed>0.88]
except:
# Samples without low mean speed
dfi = dfi[dfi.mean_speed_long>0.87]
# Samples without low initial speed
dfi = dfi[dfi.initial_speed_long>0.88]
if params["REMOVE_ROUGH"]:
# Samples without rough pitch/roll variations
dfi = dfi.loc[(dfi.var_pitch_est <=params["MAX_VAR_ROUGH"]) | (dfi.var_roll_est <=params["MAX_VAR_ROUGH"])]
# dfi["std_pitch_est"] = dfi["var_pitch_est"].pow(0.5)
# dfi["std_roll_est"] = dfi["var_roll_est"].pow(0.5)
# dfi["curvature"] = dfi["curvature"]*100
# dfi["curvature_tm1"] = dfi["curvature_tm1"]*100
try:
dfi = dfi.drop(columns=['wheel_types'])
except:
pass
dfi["energy"] = dfi["energy"].clip(lower = 0.0)
dfi["dataset"] = [i]*len(dfi)
df = pd.concat([df,dfi])
df_sum_indices = pd.DataFrame()
for i, path_idx in enumerate(params["path_sum_indices"]):
dfi = pd.read_csv(path_idx).drop_duplicates()
dfi["dataset"] = [i]*len(dfi)
df_sum_indices = pd.concat([df_sum_indices,dfi], ignore_index=True)
results = []
for comb in range(n_combs):
if not len(IDS_TRAIN):
# Selection of terrains for training and validation by category
id_val = []
id_train = []
for cat in range(len(CATEGORIES)):
t_ids = CATEGORIES[cat]
id_train.extend(random.sample(t_ids, params["TRAINING_DATASETS_PER_CATEGORY"][cat]))
id_val.extend([t for t in t_ids if t not in id_train])
else:
id_train = IDS_TRAIN[comb]
id_val = []
for cat in range(len(CATEGORIES)):
t_ids = CATEGORIES[cat]
id_val.extend([t for t in t_ids if t not in id_train])
df_train = df[df["terrain_id"].isin(id_train)]
df_val = df[df["terrain_id"].isin(id_val)]
print("Validation Samples: {}".format(len(df_val)))
print("Training Terrains {}".format(id_train))
print("Validation Terrains {}".format(id_val))
# Ref models
ref_models = []
summary_flag = True
for id_t in id_train:
ref_models.append({"model": models.get_model(params, summary=summary_flag),
"id_t": str(id_t),
"energy_val1": 0, "energy_val2": 1})
model_weights = "{}{}_{}/model_best.hdf5".format(params["REF_MODEL_DIR"],id_t,params["MODEL_LOG_NAME"])
ref_models[-1]["model"].load_weights(model_weights)
summary_flag = False
if params["ENERGY_NORM_TYPE"]:
log_dir = "{}{}_{}/log_params.txt".format(params["REF_MODEL_DIR"],id_t,params["MODEL_LOG_NAME"])
l = open(log_dir, "r")
cont = l.readlines()
for line, c in enumerate(cont):
if params["ENERGY_NORM_TYPE"]:
if params["ENERGY_NORM_TYPE"] == 'standardize':
val1 = 'mean'
val2 = 'std'
elif params["ENERGY_NORM_TYPE"] == 'normalize':
val1 = 'min'
val2 = 'int'
if "energy_{}".format(val1) in c:
eval1 = [x for x in c.split() if isfloat(x)][0]
if '[' in eval1:
eval1 = eval1[1:-1]
eval1 = float(eval1)
ref_models[-1]["energy_val1"] = eval1
elif "energy_{}".format(val2) in c:
eval2 = [x for x in c.split() if isfloat(x)][0]
if '[' in eval2:
eval2 = eval2[1:-1]
eval2 = float(eval2)
ref_models[-1]["energy_val2"] = eval2
bg_val = Batch_Generator(df_val, df_sum_indices,params["BATCH_SIZE"], mode='validation')
# Losses
ref_loss = []
ref_r2_score = []
for i in range(params["N_SHOTS"]):
if params["LOSS"] == 'mse':
ref_loss.append(tf.keras.metrics.MeanSquaredError(name='ref_loss_{}'.format(i)))
else:
ref_loss.append(tf.keras.metrics.MeanAbsoluteError(name='ref_loss_{}'.format(i)))
ref_r2_score.append(tf.keras.metrics.Mean(name='ref_r2_score_{}'.format(i)))
for i in range(len(ref_loss)):
ref_loss[i].reset_states()
ref_r2_score[i].reset_states()
while bg_val.epoch < 1:
if not bg_val.step%10:
print("File: {}. Perc: {}%".format(comb, round(bg_val.step/bg_val.total_steps*100,2)))
X, Y = bg_val.get_batch()
# Reference Model
Y_P = reference_pred(ref_models,X)
for i in range(len(ref_loss)):
ref_loss[i](Y[:,i],Y_P[:,i])
ref_r2_score[i](r2_score(tf.reshape(Y[:,i],[-1]),tf.reshape(Y_P[:,i],[-1])))
r = {"comb": comb}
for i in range(params["N_SHOTS"]):
r["mse_{}".format(i)] = ref_loss[i].result().numpy()
r["r2_{}".format(i)] = ref_r2_score[i].result().numpy()
results.append(r)
print(results[-1])
print()
print(results)
if __name__ == "__main__":
main()
| picchius94/META-CONV1D | Training/evaluate_ST-Conv1D.py | evaluate_ST-Conv1D.py | py | 20,247 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "random.seed",
... |
2152538973 | import os
import sqlite3
from sqlite3.dbapi2 import Connection, Cursor
from typing import Any, AnyStr, Final, Optional
import codenotes.db.utilities.notes as notes
import codenotes.db.utilities.notes_categories as notes_categories
import codenotes.db.utilities.tasks as tasks
import codenotes.db.utilities.tasks_categories as tasks_categories
class SQLiteConnection:
"""Connection with SQLite3 class
Class has the purpouse to manage the connection with the database created with
sqlite3. Everytime the constructor is executed, it connects to the database, then
execute the SQL statements that creates the tables if they not exist. Also, this class
allows you to execute sql, commit the transactions and close the connection with
the database.
Attributes
---------
BASE_DIR: Final[AnyStr]
Root path where the __main__ is executed
DATABASE_NAME:Final[str]
Name of the database
DATABASE_PATH: Final[str]
Complete path where is the database (its getted after joinning BASE_DIR & DATABASE_NAME)
connection: Connection
Connection with the database specified in DATABASE_PATH
cursor: Cursor
Cursor created to interact with the database
"""
BASE_DIR: Final[AnyStr] = os.path.dirname(
os.path.dirname(os.path.abspath(__file__))
)
DATABASE_NAME: Final[str] = "codenotes.db"
DATABASE_PATH: Final[str] = os.path.join(BASE_DIR, DATABASE_NAME)
connection: Connection
cursor: Cursor
def __init__(self) -> None:
"""SQLiteConnection Constructor"""
self.connection = sqlite3.connect(self.DATABASE_PATH)
self.cursor = self.connection.cursor()
self.exec_sql(notes_categories.CREATE_TABLE) # Notes Category Table
self.cursor.execute(
notes_categories.INSERT_DEFAULT_CATEGORY
) # Insert Default Category
self.exec_sql(notes.CREATE_TABLE) # Notes Table
self.exec_sql(tasks_categories.CREATE_TABLE) # Task Category Table
self.cursor.execute(
tasks_categories.INSERT_DEFAULT_CATEGORY
) # Insert Default Category
self.exec_sql(tasks.CREATE_TABLE) # Tasks Table
self.connection.commit()
def exec_sql(self, sql: str, values: Optional[tuple[Any]] = None) -> Cursor:
"""Method that executes sql command
Parameters
----------
sql : str
SQL statement to be executed
values: tuple[Any]
Optional argument typo of tuple, which contains the values the sql statement requires
Returns
-------
cursor : Cursor
Method will return the cursor that the method execute returns
"""
if values is not None:
return self.cursor.execute(sql, values)
return self.cursor.execute(sql)
def commit(self) -> None:
"""Commits the current transaction"""
self.connection.commit()
def close(self) -> None:
"""Close database and cursor connection"""
self.cursor.close()
self.connection.close()
| EGAMAGZ/codenotes | codenotes/db/connection.py | connection.py | py | 3,101 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Final",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "typing.AnyStr",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
3023673855 | import os
from PIL import Image
import PIL.ImageOps
import argparse
import torchvision as V
import matplotlib.pyplot as plt
import ujson as json
import glob
import re
from . import configs
import rich
from rich.progress import track
console = rich.get_console()
def mnist_burst(dest: str, split: str, *,
mnist='mnist'):
'''
burst mnist dataset into png images
svg background is white by default in vtracer,
and hence the polygons draw the background.
We need to invert the color, so that the paths are drawing
the foreground object.
'''
assert(split in ('train', 'test'))
assert(mnist in ('mnist', 'fashion'))
if not os.path.exists(dest):
os.mkdir(dest)
if mnist == 'mnist':
data = V.datasets.MNIST(root=configs.datasets.mnist.root,
train=True if split=='train' else False,
download=True)
elif mnist == 'fashion':
data = V.datasets.FashionMNIST(root=configs.datasets.fashion.root,
train=True if split=='train' else False,
download=True)
for i, (x, y) in enumerate(data):
fpath = os.path.join(dest, '%05d-%1d.png'%(i, y))
xinv = PIL.ImageOps.invert(x)
xinv.save(fpath)
console.print(fpath)
print(x, y)
print(len(data))
def mnist_collect(src: str):
'''
collect processed data from
'''
dataset = []
files = glob.glob(os.path.join(src, '*.json'))
for file in track(files):
with open(file, 'rt') as f:
j = json.load(f)
label = re.match(r'.*/\d+-(\d).json', file).groups()[0]
j[0]['label'] = int(label)
dataset.append(j)
with open(src+'.json', 'wt') as f:
json.dump(dataset, f)
console.print('>_< done')
if __name__ == '__main__':
ag = argparse.ArgumentParser('python3 -m veccls.mnist')
ag.add_argument('action', choices=('burst', 'collect'))
ag.add_argument('-d', '--destination', default='.', help='dest directory')
ag.add_argument('-s', '--split', default='train', choices=('train', 'test'))
ag = ag.parse_args()
console.print(ag)
if ag.action == 'burst':
mnist_burst(ag.destination, ag.split)
elif ag.action == 'collect':
mnist_collect(ag.destination)
| cdluminate/MyNotes | rs/2022-veccls/veccls/mnist.py | mnist.py | py | 2,329 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rich.get_console",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_num... |
41774659941 | from scapy.all import *
from scapy.layers.dot11 import Dot11, Dot11Beacon, Dot11Elt, RadioTap, Dot11Deauth
from colorama import Fore
from string import Template
import run
class fakeAP:
fake_ap_interface = "none"
fake_ssid = "fake"
sniffer = "none"
def __init__(self, fake_ap_interface, fake_ssid, sniffer) -> None:
self.fake_ap_interface = fake_ap_interface
self.fake_ssid = fake_ssid
self.sniffer = sniffer
# Remove the build directory
os.system('rm -rf build/')
# Copy the Templates directory to a new build directory
os.system('cp -r Templates build')
# Clear port 53
os.system('systemctl disable systemd-resolved.service >/dev/null 2>&1')
os.system('systemctl stop systemd-resolved>/dev/null 2>&1')
# Modify the hostapd.conf file with the access point interface and network name
with open('build/hostapd.conf', 'r+') as f:
template = Template(f.read())
f.seek(0)
f.write(template.substitute(INTERFACE=self.fake_ap_interface, NETWORK=self.fake_ssid))
f.truncate()
# Modify the dnsmasq.conf file with the access point interface
with open('build/dnsmasq.conf', 'r+') as f:
template = Template(f.read())
f.seek(0)
f.write(template.substitute(INTERFACE=self.fake_ap_interface))
f.truncate()
# Modify the prepareAP.sh file with the access point interface
with open('build/prepareAP.sh', 'r+') as f:
template = Template(f.read())
f.seek(0)
f.write(template.substitute(INTERFACE=self.fake_ap_interface))
f.truncate()
# Modify the cleanup.sh file with the sniffer and access point interfaces
with open('build/cleanup.sh', 'r+') as f:
template = Template(f.read())
f.seek(0)
f.write(template.substitute(SNIFFER=self.sniffer, AP=self.fake_ap_interface))
f.truncate()
# AP with address 10.0.0.1 on the given interface
os.system(f"ifconfig {fake_ap_interface} up 10.0.0.1 netmask 255.255.255.0")
print(Fore.YELLOW + "[-->] setting {} with ip 10.0.0.1 netmask 255.255.255.0".format(self.fake_ap_interface))
# Clear all IP Rules
os.system('iptables --flush')
os.system('iptables --table nat --flush')
os.system('iptables --delete-chain')
os.system('iptables --table nat --delete-chain')
print(Fore.YELLOW + "[-->] Clearing all IP Rules")
# Redirect any request to the captive portal
os.system(f'iptables -t nat -A PREROUTING -i usb0 -p tcp --dport 80 -j DNAT --to-destination 10.0.0.1:80')
os.system(f'iptables -t nat -A PREROUTING -i usb0 -p tcp --dport 443 -j DNAT --to-destination 10.0.0.1:80')
print(Fore.YELLOW + "[-->] Redirecting any request to the captive portal")
# Enable internet access use the usb0 interface
os.system(f'iptables -A FORWARD --in-interface {fake_ap_interface} -j ACCEPT')
os.system(f'iptables -t nat -A POSTROUTING --out-interface usb0 -j MASQUERADE')
print(Fore.YELLOW + "[-->] Enableing internet access")
# Initial wifi interface configuration (seems to fix problems)
os.system(f'ip link set {fake_ap_interface} down')
os.system(f'ip addr flush dev {fake_ap_interface}')
os.system(f'ip link set {fake_ap_interface} up')
os.system(f'ip addr add 10.0.0.1/10 dev {fake_ap_interface}')
# Enable IP forwarding from one interface to another
os.system('echo 1 > /proc/sys/net/ipv4/ip_forward')
os.system(f'sleep 3')
print(Fore.YELLOW + "[-->] Enable IP forwarding from one interface to another")
cmd = "sudo dnsmasq -C build/dnsmasq.conf"
p = subprocess.Popen(cmd, shell=True, preexec_fn=os.setsid)
os.system(f'route add default gw 10.0.0.1')
self.start_apache()
os.system("hostapd build/hostapd.conf -B >/dev/null 2>&1")
print(Fore.GREEN + '[+] The Fake Access Point is now available using Name : {} '.format(fake_ssid))
# listen_thread = Thread(target=start_listen, daemon=True)
# listen_thread.start()
while True:
user_input = input(Fore.YELLOW + '[*] to turn off the Access Point Please press \"done\"\n\n')
if user_input == 'done':
run.exit_and_cleanup(0, 'Done! , thanks for using')
else:
print(Fore.RED + 'invalid option...')
def start_apache(self):
os.system('sudo rm -r /var/www/html/* 2>/dev/null') # delete all folders and files in this directory
os.system('sudo cp -r fake-facebook-website/* /var/www/html')
os.system('sudo chmod 777 /var/www/html/*')
os.system('sudo chmod 777 /var/www/html')
# update rules inside 000-default.conf of apache2
os.system('sudo cp -f 000-default.conf /etc/apache2/sites-enabled')
os.system('a2enmod rewrite >/dev/null 2>&1') # enable the mod_rewrite in apache
os.system('service apache2 restart >/dev/null 2>&1') # reload and restart apache2
print(Fore.YELLOW + '\n[*] appache server start successfully')
time.sleep(1) | yehonatanBar61/EvilTwin_T | fakeAP.py | fakeAP.py | py | 5,330 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "string.Template",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "string.Template",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "string.Template",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "string.Template",
... |
21687254250 | import math
import random
import keyboard
from random import choice
import pygame
import pygame.freetype
from pygame.draw import *
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.font.init()
pygame.init()
# Ускорение свободного падения
g = 3
# Громкость музыки и выбор музыки (менять не надо)
track = 0
vol = 0.5
# Разброс снарядов (+- по x и +- по y)
razbros = 0
# Количество жизний целей, бота, игрока
livetarget = 3
livebot = 10
livegun = 5
# Задаем ширину и высоту окна
WIDTH = 1500
HEIGHT = 700
# Делаем поверхности
screen = pygame.display.set_mode((WIDTH, HEIGHT))
boomscr = pygame.Surface((WIDTH, HEIGHT), pygame.SRCALPHA)
# Вывод текста
text2 = pygame.font.Font(None, 36)
text1 = pygame.font.Font(None, 150)
# Различные счетчики
schet = 0 # Счетчик счета
schetcounter = 0 # Счетчик тысяч очков, нужно для призыва нового бота
FPS = 60
clock = pygame.time.Clock()
tickcounter = 0 # Счетчик тиков до перезарядки
tcglobal = 0 # Глобальный счетчик тиков
tcshoot = 0 # Запоминает время предыдущего выстрела игрока
# Массивы
balls = [] # Снаряды
targets = [] # Пассивные цели
kapli = [] # Капли эффекта убийства
booms = [] # Эффект взрыва
# Флаги
Ps = True # Флаг паузы музыки
Flag = True # Изменение режима управления, сейчас управление мышкой
finished = False # Флаг выхода из основного цикла
kt = 5 # Количество пассивных целей
auto = 0 # Отвечает за включение автоматического режима стрельбы тройным снарядом
# Цвета
RED = 0xFF0000
BLUE = 0x0000FF
YELLOW = 0xFFC91F
GREEN = 0x00FF00
MAGENTA = 0xFF03B8
CYAN = 0x00FFCC
BLACK = (0, 0, 0)
WHITE = 0xFFFFFF
GREY = 0x7D7D7D
GAME_COLORS = [RED, BLUE, YELLOW, GREEN, MAGENTA, CYAN]
# Музыка и звуки
pulemet = pygame.mixer.Sound("Звуки/pulemet.ogg")
pulemet.set_volume(0.1)
fon1 = "Звуки/фон1.ogg"
fon2 = "Звуки/фон2.ogg"
fon3 = "Звуки/фон3.ogg"
fon4 = "Звуки/фон4.ogg"
MUSIC = [fon1, fon2, fon3, fon4]
class Ball:
def __init__(self, screen: pygame.Surface):
""" Конструктор класса ball
Args:
x - начальное положение мяча по горизонтали
y - начальное положение мяча по вертикали
g - ускорение свободного падения
"""
self.screen = screen
self.g = g
self.x = 40
self.y = 450
self.r = random.randint(20, 20)
self.vx = 0
self.vy = 0
self.color = choice(GAME_COLORS)
self.live = 30
def move(self):
"""Переместить мяч по прошествии единицы времени.
Метод описывает перемещение мяча за один кадр перерисовки. То есть, обновляет значения
self.x и self.y с учетом скоростей self.vx и self.vy, силы гравитации, действующей на мяч,
и стен по краям окна. Слева и сверху стен нет.
"""
if self.x + self.r <= WIDTH:
self.x = self.x + self.vx
else:
if self.x + self.r >= WIDTH:
self.x = WIDTH - self.r
self.vx = self.vx * -1
self.x = self.x + self.vx
if self.y + self.r <= HEIGHT:
self.vy = self.vy + self.g
self.y = self.y + self.vy - 1
else:
if self.y + self.r >= HEIGHT:
self.y = HEIGHT - self.r
self.vy = self.vy * -1
self.vy = self.vy + self.g
self.y = self.y + self.vy + 1
def draw(self):
pygame.draw.circle(self.screen, self.color, (self.x, self.y), self.r)
def hittest(self, obj):
if ((obj.x - self.x) ** 2 + (obj.y - self.y) ** 2 <= (obj.r + self.r) ** 2) and type(obj) != Gun and type(
obj) != Ball:
return True
else:
return False
class BallBot(Ball):
def hittest(self, obj):
if ((obj.x - self.x) ** 2 + (obj.y - self.y) ** 2 <= (obj.r + self.r) ** 2) and type(obj) != BotKiller and type(
obj) != BallBot:
return True
else:
return False
class Gun:
def __init__(self, screen):
self.live = livegun
self.liveconst = self.live
self.r = 10
self.screen = screen
self.f2_power = 10
self.f2_on = 0
self.an = 1
self.color = GREY
self.x = 20
self.y = 450
# vx и vy нужны для отправки в BotKiller для расчета упреждения
self.vx = 0
self.vy = 0
self.mx = 0
self.my = 0
def fire2_start(self, k):
self.f2_on = k
def fire2_end(self, px, py):
"""Выстрел мячом.
Происходит при отпускании кнопки мыши.
Начальные значения компонент скорости мяча vx и vy зависят от положения мыши.
"""
posx = px
posy = py
global balls
new_ball = Ball(self.screen)
if posx > self.x:
self.an = math.atan((-posy + self.y) / (posx - self.x))
if posx < self.x:
self.an = math.pi + math.atan((-posy + self.y) / (posx - self.x))
if posx == self.x:
self.an = math.pi / 2
new_ball.vx = self.f2_power * math.cos(self.an)
new_ball.vy = -self.f2_power * math.sin(self.an)
new_ball.x = math.cos(self.an) * (self.f2_power + 10) + self.x
new_ball.y = -(math.sin(self.an) * (self.f2_power + 10)) + self.y
balls.append(new_ball)
def fire2_end2(self, px, py):
"""Выстрел тройным мячом.
Происходит при отпускании кнопки мыши.
Начальные значения компонент скорости мяча vx и vy зависят от положения мыши.
"""
posx = px
posy = py
global balls
new_ball1 = Ball(self.screen)
new_ball2 = Ball(self.screen)
new_ball3 = Ball(self.screen)
if posx > self.x:
self.an = math.atan((-posy + self.y) / (posx - self.x))
if posx < self.x:
self.an = math.pi + math.atan((-posy + self.y) / (posx - self.x))
if posx == self.x:
self.an = math.pi / 2
new_ball1.vx = self.f2_power * math.cos(self.an)
new_ball1.vy = -self.f2_power * math.sin(self.an)
new_ball2.vx = self.f2_power * math.cos(self.an)
new_ball2.vy = -self.f2_power * math.sin(self.an)
new_ball3.vx = self.f2_power * math.cos(self.an)
new_ball3.vy = -self.f2_power * math.sin(self.an)
new_ball1.x = math.cos(self.an) * (self.f2_power + 10) + self.x
new_ball1.y = -(math.sin(self.an) * (self.f2_power + 10)) + self.y
new_ball2.x = math.cos(self.an) * (self.f2_power + 10) + self.x - (math.cos(self.an + math.pi / 2)) * 40
new_ball2.y = -(math.sin(self.an) * (self.f2_power + 10)) + self.y + math.sin(self.an + math.pi / 2) * 40
new_ball3.x = math.cos(self.an) * (self.f2_power + 10) + self.x + (math.cos(self.an + math.pi / 2)) * 40
new_ball3.y = -(math.sin(self.an) * (self.f2_power + 10)) + self.y - math.sin(self.an + math.pi / 2) * 40
balls.append(new_ball1)
balls.append(new_ball2)
balls.append(new_ball3)
self.f2_power = 10
def targetting(self, x, y):
"""Прицеливание. Зависит от положения мыши."""
xm = x
ym = y
self.mx = x
self.my = y
if xm > self.x:
self.an = math.atan((-ym + self.y) / (xm - self.x))
if xm < self.x:
self.an = math.pi + math.atan((-ym + self.y) / (xm - self.x))
if xm == self.x:
self.an = math.pi / 2
def draw(self, tickcounter):
if tickcounter >= FPS:
tick = FPS
else:
tick = tickcounter
'''Расчет полосочек хп и перезарядки, аналогично и у бота'''
line(screen, self.color, (self.x, self.y),
(math.cos(self.an) * (self.f2_power + 10) + self.x, -(math.sin(self.an) * (self.f2_power + 10)) + self.y),
width=20)
line(screen, BLUE, (self.x - 20, self.y - 30),
(self.x - 20 + int(40 * (self.live / self.liveconst)), self.y - 30),
width=4)
rect(screen, BLACK, [(self.x - 20, self.y - 32), (41, 6)], width=1)
line(screen, GREEN, (self.x - 20, self.y - 38), (self.x - 20 + int(40 * (tick / FPS)), self.y - 38), width=4)
rect(screen, BLACK, [(self.x - 20, self.y - 40), (41, 6)], width=1)
circle(screen, RED, (self.x, self.y), self.r)
def power_up(self):
'''Расчет силы выстрела и рисование прицела. '''
if self.f2_on == 0:
if self.f2_power < 100:
self.f2_power += 1
self.color = (int((100 - self.f2_power) * 2.5), 0, 0)
for i in range(1, 10):
circle(screen, RED, (
self.x + math.cos(self.an) * (self.f2_power + 10) + math.cos(self.an) * self.f2_power * i,
self.y - (math.sin(self.an) * (self.f2_power + 10)) - (
math.sin(self.an) * self.f2_power * i - (g * i ** 2) / 2)), 5)
elif self.f2_on == 1:
if self.f2_power < 100:
self.f2_power += 1
self.color = (int((100 - self.f2_power) * 2.5), 0, 0)
else:
self.color = GREY
def move(self):
'''Два типа управления.
Переключаются флагом Flag в начале программы
'''
x0 = self.x
y0 = self.y
if Flag == False:
if self.x < WIDTH:
if keyboard.is_pressed('w'):
self.y -= 5
if keyboard.is_pressed('a'):
self.x -= 5
if keyboard.is_pressed('s'):
self.y += 5
if keyboard.is_pressed('d'):
self.x += 5
else:
self.x = 40
else:
if self.x < WIDTH:
if (self.mx - self.x) ** 2 + (self.my - self.y) ** 2 >= 900:
if keyboard.is_pressed('w'):
self.x += math.cos(self.an) * 10
self.y += -(math.sin(self.an) * 10)
if keyboard.is_pressed('a'):
self.x += -(math.cos(self.an - math.pi / 2) * 10)
self.y += (math.sin(self.an - math.pi / 2) * 10)
if keyboard.is_pressed('s'):
self.x += -(math.cos(self.an) * 10)
self.y += math.sin(self.an) * 10
if keyboard.is_pressed('d'):
self.x += math.cos(self.an - math.pi / 2) * 10
self.y += -(math.sin(self.an - math.pi / 2) * 10)
else:
self.x += 0
self.y += 0
else:
self.x = 40
self.vx = self.x - x0
self.vy = self.y - y0
class BotKiller:
def __init__(self, screen):
self.live = livebot
self.liveconst = self.live
self.screen = screen
self.an = 1
self.an2 = 0
self.color = GREY
self.x = WIDTH - 20
self.y = 400
self.t = 30 # время полета снаряда в тиках, от него зависит скорость полета снаряда
self.r = 30
self.flag = False
self.xlnach = 0
self.vx = 0
self.vy = 0
self.gamerx = 0
self.gamery = 0
def fire(self ):
"""Выстрел мячом.
Происходит каждые 2 секунды
Начальные значения компонент скорости мяча vx и vy зависят от положения игрока.
"""
global balls
new_ball = BallBot(self.screen)
new_ball.vx = self.vx + random.randint(-razbros, razbros) # Разброс
new_ball.vy = self.vy + random.randint(-razbros, razbros)
new_ball.x = math.cos(self.an2) * 60 + self.x
new_ball.y = -(math.sin(self.an2) * 60) + self.y
balls.append(new_ball)
def targetting(self, obj):
"""Прицеливание. Зависит от положения игрока.
Тут также расчитываются скорости снарядов по осям и угол, на который отклоняется сама пушка
"""
self.gamerx = obj.x
self.gamery = obj.y
if obj.x > self.x:
self.an = math.atan((-obj.y + self.y) / (obj.x - self.x))
if obj.x < self.x:
self.an = math.pi + math.atan((-obj.y + self.y) / (obj.x - self.x))
if obj.x == self.x:
self.an = math.pi / 2
if self.flag == False:
self.xlnach = abs(self.x - obj.x)
self.flag = True
t = self.t * (abs(self.x - obj.x) / self.xlnach) * 1.1
if t == 0:
t = 1
self.vx = (obj.x + obj.vx * t - (math.cos(self.an2) * 60 + self.x)) / t
self.vy = (obj.y + obj.vy * t - (-(math.sin(self.an2) * 60) + self.y)) / t - (g * t) / 2
circle(screen, RED, (obj.x + obj.vx * t, obj.y + obj.vy * t), 10)
xobj = self.x + self.vx * 5
yobj = self.y + self.vy * 5
if xobj > self.x:
self.an2 = math.atan((-yobj + self.y) / (xobj - self.x))
if xobj < self.x:
self.an2 = math.pi + math.atan((-yobj + self.y) / (xobj - self.x))
if xobj == self.x:
self.an2 = math.pi / 2
def draw(self, tickcounter):
# Рисование пушки
tick = tickcounter
line(screen, self.color, (self.x, self.y),
(math.cos(self.an2) * 60 + self.x, -(math.sin(self.an2) * 60) + self.y), width=20)
line(screen, RED, (self.x - 20, self.y - 30),
(self.x - 20 + int(40 * (self.live / self.liveconst)), self.y - 30), width=4)
rect(screen, BLACK, [(self.x - 20, self.y - 32), (41, 6)], width=1)
line(screen, GREEN, (self.x - 20, self.y - 38), (self.x - 20 + int(40 * (tick / (FPS * 2))), self.y - 38),
width=4)
rect(screen, BLACK, [(self.x - 20, self.y - 40), (41, 6)], width=1)
circle(screen, BLUE, (self.x, self.y), 20)
def move(self):
if (self.gamerx - self.x) ** 2 + (self.gamery - self.y) ** 2 >= 90000:
self.x += math.cos(self.an) * 1
self.y -= math.sin(self.an) * 1
else:
self.x += 0
self.y -= 0
class Target:
def __init__(self):
self.live = livetarget
self.rmin = 10
self.rmax = 50
self.r = random.randint(self.rmin, self.rmax)
self.x = random.randint(self.r + 100, WIDTH - self.r - 100)
self.y = random.randint(self.r + 100, HEIGHT - self.r - 100)
self.xspeed = random.randint(1, 10)
self.yspeed = random.randint(1, 10)
self.color = RED
self.pr = 0 # Степень прозрачности
self.zt = int(255 / self.live) # Убывание прозрачности
def move(self):
if (self.x + self.r <= WIDTH) and (self.x - self.r >= 0):
self.x = self.x + self.xspeed
else:
if self.x + self.r >= WIDTH:
self.x = WIDTH - self.r
if self.x - self.r <= 0:
self.x = self.r
self.xspeed = self.xspeed * -1
self.x = self.x + self.xspeed
if (self.y + self.r <= HEIGHT) and (self.y - self.r >= 0):
self.y = self.y + self.yspeed
else:
if self.y + self.r >= HEIGHT:
self.y = HEIGHT - self.r
if self.y - self.r <= 0:
self.y = self.r
self.yspeed = self.yspeed * -1
self.y = self.y + self.yspeed
def draw(self):
self.pr = self.zt * self.live
if self.pr < 20:
self.pr = 0
circle(boomscr, (255, 0, 0, abs(self.pr)), (self.x, self.y), self.r)
screen.blit(boomscr, (0, 0))
class KaplyaOfTarget:
def __init__(self, xx, yy, speed, color, r):
self.g = 1
self.x = xx
self.y = yy
self.r = int(r / 5)
self.color = color
self.xspeed = (random.randint(-3, 3) + speed) * 1.5
self.yspeed = random.randint(-5, 5)
def move(self):
self.yspeed = self.yspeed + self.g
self.x = int(self.x + self.xspeed)
self.y = int(self.y + self.yspeed)
if self.y > HEIGHT + 10:
self.yspeed = 0
def draw(self):
circle(screen, self.color, (self.x, self.y), self.r)
class Boom:
def __init__(self, xx, yy, r):
self.x = xx
self.y = yy
self.r = int(r / 2)
self.dr = 7
self.liveticks = 8
self.death = 1
self.pr = 255
self.zt = int(self.pr / (self.liveticks / self.death))
def draw(self):
self.r += self.dr
self.pr -= self.zt
if self.pr < 20:
self.pr = 0
self.liveticks -= self.death
circle(boomscr, (255, 0, 0, self.pr), (self.x, self.y), self.r)
screen.blit(boomscr, (0, 0))
gun = Gun(screen)
targets.append(gun)
for t in range(kt):
t = Target()
targets.append(t)
while not finished:
clock.tick(FPS)
tickcounter += 1
tcglobal += 1
# Создание нового бота
if schet // 1000 > schetcounter:
schetcounter += 1
bot = BotKiller(screen)
targets.append(bot)
for event in pygame.event.get():
pos = pygame.mouse.get_pos()
if event.type == pygame.QUIT:
finished = True
# При нажатии на esс игра заканчивается, при нажатии на пробел меняется флаг автоматической стрельбы
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
finished = True
elif event.key == pygame.K_SPACE:
auto = 1
elif event.key == pygame.K_UP:
if vol < 1:
vol = vol + 0.1
else:
vol = 1
pygame.mixer.music.set_volume(vol)
elif event.key == pygame.K_DOWN:
if vol > 0:
vol = vol - 0.1
else:
vol = 0
pygame.mixer.music.set_volume(vol)
elif event.key == pygame.K_RIGHT:
if track < 3:
track = track + 1
else:
track = 3
pygame.mixer.music.load(MUSIC[track])
pygame.mixer.music.play(-1)
elif event.key == pygame.K_LEFT:
if track > 0:
track = track - 1
else:
track = 0
pygame.mixer.music.load(MUSIC[track])
pygame.mixer.music.play(-1)
if event.key == pygame.K_p:
Ps = not Ps
if Ps:
pygame.mixer.music.pause()
else:
pygame.mixer.music.unpause()
elif event.type == pygame.KEYUP:
if event.key == pygame.K_SPACE:
auto = 0
# Сама автоматическая стрельба
elif auto == 1:
gun.fire2_start(1)
gun.f2_power = 100
gun.targetting(pos[0], pos[1])
if tcglobal - tcshoot >= FPS / 10:
gun.fire2_end2(pos[0], pos[1])
tcshoot = tcglobal
pulemet.play()
elif auto == 0:
gun.fire2_start(2)
gun.f2_power = 10
auto = 2
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
gun.fire2_start(0)
elif event.type == pygame.MOUSEBUTTONUP:
if event.button == 1:
if tcglobal - tcshoot >= FPS:
gun.fire2_end(pos[0], pos[1])
tcshoot = tcglobal
gun.fire2_start(2)
gun.f2_power = 10
pulemet.play()
else:
gun.fire2_start(2)
gun.f2_power = 10
else:
gun.targetting(pos[0], pos[1])
# Проверка убитых юнитов, проверка попаданий, запись новых эффектов в массивы, добавление игровых очков
for b in balls:
if b.x < -10:
balls.remove(b)
else:
b.draw()
b.move()
for t in targets:
if b.hittest(t) == True:
t.live -= 1
boom = Boom(b.x, b.y, b.r)
booms.append(boom)
if b in balls:
balls.remove(b)
k1 = random.randint(5, 10)
if t.live == 0:
if type(t) != BotKiller and type(t) != Gun:
for i in range(k1):
kaplya = KaplyaOfTarget(int(t.x), int(t.y), t.xspeed, t.color, t.r)
kapli.append(kaplya)
schet += int(100 - ((t.r - t.rmin) / (t.rmax - t.rmin)) * 100)
elif type(t) == BotKiller:
schet += 500
else:
finished = not finished
boom = Boom(t.x, t.y, t.r)
booms.append(boom)
targets.remove(t)
t = Target()
targets.append(t)
for b1 in balls:
for b2 in balls:
if b1.hittest(b2) == True:
boom = Boom((b1.x + b2.x) / 2, (b1.y + b2.y) / 2, b1.r / 2)
booms.append(boom)
balls.remove(b1)
balls.remove(b2)
# Отрисовка юнитов, стрельба ботов
for t in targets:
t.move()
if type(t) == BotKiller:
if tickcounter >= FPS * 2:
t.fire()
tickcounter = 0
pulemet.play()
else:
t.targetting(gun)
t.draw(tickcounter)
elif type(t) == Gun:
t.draw(tcglobal - tcshoot)
else:
t.draw()
for k in kapli:
if k.y > HEIGHT:
kapli.remove(k)
else:
k.draw()
k.move()
for boom in booms:
if boom.liveticks == 0:
booms.remove(boom)
else:
boom.draw()
gun.power_up()
schetv = text2.render("score:" + " " + str(schet), True, (180, 0, 0))
screen.blit(schetv, (10, 50))
pygame.display.update()
screen.fill(WHITE)
boomscr.fill(WHITE)
# Конечная заставка
finished = not finished
while not finished:
clock.tick(FPS)
for event in pygame.event.get():
pos = pygame.mouse.get_pos()
if event.type == pygame.QUIT:
finished = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
finished = True
schetv = text1.render("GAME OVER", True, (180, 0, 0))
screen.blit(schetv, (WIDTH / 2 - 350, HEIGHT / 2 - 50))
pygame.display.update()
screen.fill(WHITE)
pygame.quit()
| MrKotMatroskin/Practika_programm | Игры/Пушка/gun.py | gun.py | py | 24,603 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "pygame.mixer.pre_init",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.init",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygame.font",
... |
24435584276 | from PyMca5.PyMcaPhysics.xrf import FastXRFLinearFit
from XRDXRFutils.data import DataXRF, SyntheticDataXRF
import h5py
from os.path import basename, dirname, join, exists
from os import remove
class FastFit():
def __init__(self, data = None, cfgfile = None, outputdir = None):
self.data = data
self.cfgfile = cfgfile
self.outputdir = outputdir
self.outputRoot = "IMAGES"
if self.outputdir:
self.filename = join(self.outputdir, f'{self.outputRoot}.h5')
def fit(self):
if type(self.data) != type(None) and self.cfgfile and self.outputdir:
self.fastfit = FastXRFLinearFit.FastXRFLinearFit()
self.fastfit.setFitConfigurationFile(self.cfgfile)
outbuffer = self.fastfit.fitMultipleSpectra(
y = self.data,
weight = 1,
refit = 1,
concentrations = 0,
outputDir = self.outputdir,
outputRoot= self.outputRoot,
h5 = True)
zero = outbuffer['configuration']['detector']['zero']
gain = outbuffer['configuration']['detector']['gain']
self.parameters = (0, gain, zero)
self._load_fit_results()
return self
def _load_fit_results(self):
self.labels = {}
with h5py.File(self.filename, 'r') as f:
keys = f['images']['xrf_fit']['results']['parameters'].keys()
for k in keys:
if 'errors' not in k:
label = f['images']['xrf_fit']['results']['parameters'][k][()]
self.labels[k.replace('_','-')] = label
def get_labels(self):
if hasattr(self, "labels"):
return self.labels
def get_calibration_pars(self):
if hasattr(self, "parameters"):
return self.parameters
| RosarioAndolina/Map2H5 | Map2H5/FastFit.py | FastFit.py | py | 1,893 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "PyMca5.PyMcaPhysics.xrf.FastXRFLinearFit.FastXRFLinearFit",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "PyMca5.PyMcaPhysics.xrf.FastXRFLinearFit",
"line_number": 18,
"usa... |
71331018983 | from flask import render_template, flash, redirect, url_for, request, jsonify
from datetime import datetime
from application.Utils.CLP_Algorithm.volume_maximization import volume_maximization
from application.Utils.utils import params
from flask_login import current_user, login_user, logout_user, login_required
from json import loads, dumps
from werkzeug.urls import url_parse
from application.models import User, Dispatch, Post
from application import application, db
from urllib.parse import quote
from config import base_url
@application.route('/')
@application.route('/index', methods=['GET', 'POST'])
@login_required
def index():
dispatches = Dispatch.query.filter_by(user_id=current_user.id).all()
if request.method == 'POST' and request.form.get('post') is not None:
dispatch = Dispatch.query.filter_by(name=request.form.get('savedSchemas')).first()
flash(request.form.get('savedSchemas'))
if dispatch is not None:
dispatch_id = dispatch.id
else:
dispatch_id = None
post = Post(body=request.form.get('post'), user_id=current_user.id,
username=current_user.username, dispatch_id=dispatch_id)
db.session.add(post)
db.session.commit()
params.update({'user': current_user})
posts = current_user.followed_posts().all()
print(posts)
post_params = []
for user, post, dispatch in posts:
if dispatch is not None:
post_params.append({"user_id": user.id,
"user_name": user.username,
"post_body": post.body,
"post_timestamp": post.timestamp,
"dispatch_name": dispatch.name,
"dispatch_body": url_for('results') + '/' + quote(dispatch.body.replace('$$$', '').replace('[', '/['))
})
print(post_params[-1]['dispatch_body'])
else:
post_params.append({"user_id": user.id,
"user_name": user.username,
"post_body": post.body,
"post_timestamp": post.timestamp})
params.update({'posts': dumps(post_params, default=str)})
# print(dispatches)
return render_template('index.html', params=params, dispatches=dispatches)
@application.before_request
def before_request():
if current_user.is_authenticated:
current_user.last_seen = datetime.utcnow()
db.session.commit()
@application.route('/user/<username>', methods=['GET', 'POST'])
@login_required
def user(username):
user = User.query.filter_by(username=username).first_or_404()
posts = Post.query.filter_by(user_id=user.id).all()
params = [{'author': user, 'body': post.body} for post in posts]
if request.method == 'POST':
if request.form.get('follow') is not None:
current_user.follow(user)
db.session.commit()
flash(f'User: {user.username} followed successfully')
elif request.form.get('unfollow') is not None:
current_user.unfollow(user)
db.session.commit()
flash(f'User: {user.username} unfollowed successfully')
return render_template('user.html', user=user, params=params)
@application.route('/follow/<username>')
def follow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash(f'user: {username} does not exist')
return redirect(url_for('index'))
if user == current_user:
flash('you cannot follow yourself!')
return redirect(url_for('user', username=username))
current_user.follow(user)
db.session.commit()
flash(f'You are now following {username}!')
return redirect(url_for('user', username=username))
@application.route('/unfolow/<username>')
def unfollow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash(f'User: {username} does not exist!')
return redirect(url_for('index'))
if username == current_user.username:
flash('You cannot unfollow yourself!')
return redirect(url_for('user', username=username))
else:
current_user.unfollow(user)
db.session.commit()
flash(f'You are no longer following {username}')
return redirect(url_for('user', username=username))
@application.route('/edit_profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
if request.method == "POST":
if User.query.filter_by(username=request.form.get('username')).first() is not None:
flash(f"Username: {request.form.get('username')} is already taken!")
elif request.form.get('username') == current_user.username:
flash("Your username did not change!")
elif len(request.form.get('username')) == 0:
flash("Empty username not allowed")
else:
current_user.username = request.form.get('username')
current_user.about_me = request.form.get('about_me')
db.session.commit()
flash("Changes executed successfully!")
# TODO: Mejorar lógica para poder cambiar solo un atributo
return render_template('edit_profile.html', title='Edit Prodile')
@application.route('/container', methods=['POST', 'GET'])
@login_required
def container():
dispatches = Dispatch.query.filter_by(user_id=current_user.id)
if request.method == "POST" and request.form.get('submitnew') is not None:
flash('new')
return redirect(url_for('boxes', containerX=request.form.get('containerX'),
containerY=request.form.get('containerY'), containerZ=request.form.get('containerZ')))
elif request.method == "POST" and request.form.get('submitload') is not None:
containerParams, boxesParams = request.form.get('savedSchemas').split('$$$')
return redirect(url_for('results', container_params=containerParams, boxes_params=boxesParams))
return render_template('container.html', dispatches=dispatches)
# TODO: Save info to pass to algorithm
@application.route('/boxes/<containerX>/<containerY>/<containerZ>', methods=['GET', 'POST'])
@login_required
def boxes(containerX, containerY, containerZ):
params = {"containerX": containerX,
"containerY": containerY,
"containerZ": containerZ}
if request.method == 'POST' and request.form.get('submit') is not None:
containerParams = {"x1": 0,
"y1": 0,
"z1": 0,
"x2": float(containerX),
"y2": float(containerY),
"z2": float(containerZ)}
results = request.form.to_dict()
boxesParams = [{} for i in range(int(results.get('num_items')))]
for key, itm in results.items():
if key != "num_items" and key != 'submit':
key_, idx = key.split("-")
if key[:9] == "num_items":
boxesParams[int(idx)].update({key_: int(itm)})
else:
boxesParams[int(idx)].update({key_: float(itm)})
containerParams = dumps(containerParams)
boxesParams = dumps(boxesParams)
# flash(containerParams)
# flash(boxesParams)
return redirect(url_for('results', container_params=containerParams, boxes_params=boxesParams))
elif request.method == 'POST' and request.form.get('updateContainer') is not None:
return redirect(url_for('boxes', containerX=request.form.get('contXupdate'),
containerY=request.form.get('contYupdate'), containerZ=request.form.get('contZupdate')))
return render_template('boxes.html', params=params)
# TODO: Change orientations?
@application.route('/results/<container_params>/<boxes_params>', methods=['GET', 'POST'])
@login_required
def results(container_params, boxes_params):
if request.method == "POST":
favorite = Dispatch(name=request.form.get('fav-name'), description=request.form.get('fav-description'),
body=container_params + '$$$' + boxes_params, user_id=current_user.id)
db.session.add(favorite)
db.session.commit()
params = {'container_params': container_params,
'boxes_params': boxes_params}
allocated_list, utilization, container, allocated_json = volume_maximization(problem_params=loads(boxes_params),
container_params=loads(
container_params))
# flash(allocated_json)
total_boxes = 400
# max_iter = max(allocated_list, key=lambda x: x.iteration).iteration
max_iter = 3
return render_template('results.html', allocated_list=allocated_list, utilization=utilization, container=container,
total_boxes=total_boxes, boxes_params=boxes_params, params=params,
allocated_list_json=allocated_json,
max_iter=max_iter)
# Login Logic
@application.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
if request.method == 'POST':
user = User.query.filter_by(username=request.form.get('username')).first()
if user is None or not user.check_password(request.form.get('password')):
flash('Invalid username or password')
return redirect(url_for('login'))
login_user(user)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('index')
return redirect(next_page)
return render_template('login.html', title='Sign In')
@application.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('index'))
@application.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('index'))
if request.method == 'POST':
user = User(username=request.form.get('username'), email=request.form.get('email'))
user.set_password(request.form.get('password1'))
db.session.add(user)
db.session.commit()
return redirect(url_for('login'))
return render_template('registration.html', title='Register')
@application.route('/search', methods=['GET', 'POST'])
@login_required
def search():
if request.method == 'POST' and request.form.get('search-value'):
users = User.query.filter(User.username.like(f"%{request.form.get('search-value')}%"))
return render_template('search.html', users=users.all(), search_value=request.form.get('search-value'))
return redirect(url_for('index'))
| carlosdonado10/CLP_flask_app | application/routes.py | routes.py | py | 10,983 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "application.models.Dispatch.query.filter_by",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "application.models.Dispatch.query",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "application.models.Dispatch",
"line_number": 18,
"usag... |
72515318503 |
# Predict water positions
import time
import sys # To use sys.argv
import mdtraj
import numpy as np
import math
def BWHB(x1, x2, print_dist=False): # Bridge Water Hydrogen Bond
d = np.sqrt(((x1*10-x2*10)**2).sum())
if print_dist:
print(f'{d:.3f}', end=' \t')
return (1 / (1 + (d/2.6)**6)) / 0.58
def rotation3D(v, axis, degrees):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians. Using the Euler-Rodrigues formula:
https://stackoverflow.com/questions/6802577/rotation-of-3d-vector
"""
axis = np.asarray(axis)
axis = axis / math.sqrt(np.dot(axis, axis))
theta = degrees * math.pi / 180
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
rot_mat = np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
return np.dot(rot_mat, v)
def calcAngle(A, B, C):
vec1 = np.array(A) - np.array(B)
vec2 = np.array(C) - np.array(B)
return np.arccos(np.dot(vec1, vec2) / np.linalg.norm(vec1) / np.linalg.norm(vec2))*180/ np.pi, vec1, vec2
def prepVec(v, length):
v = v / np.linalg.norm(v)
v.flatten()
return v * length
def outputMockPDB(fname, p, e='X'):
with open(fname,'w') as f:
# f.write(f'{len(DOL)}\n\n')
for ii in range(len(p)):
# ATOM 1 N PRO A 1 8.316 21.206 21.530 1.00 17.44 N
f.write(f'ATOM {ii+1:3d} {e} {e*3} A 1 {p[ii][0]*10:8.3f}{p[ii][1]*10:8.3f}{p[ii][2]*10:8.3f} 0.00 0.00 \n')
f.write('END\n')
def outputMolecularMockPDB(fname, p, e=None, resname='HOH'):
assert len(e) == len(p[0]), "Input: p as a N_mol * N_atom * 3 matrix, e as a list of elements with length N_atom"
resname += ' '*3 # Fill in resnames
e2 = [x + ' '*3 for x in e]
with open(fname,'w') as f:
# f.write(f'{len(DOL)}\n\n')
for jj in range(len(p)):
for ii in range(len(p[jj])):
# ATOM 1 N PRO A 1 8.316 21.206 21.530 1.00 17.44 N
f.write(f'ATOM {jj*len(p[jj])+ii+1:3d} {e2[ii][:3]} {resname[:3]} A{jj:4d} {p[jj][ii][0]*10:8.3f}{p[jj][ii][1]*10:8.3f}{p[jj][ii][2]*10:8.3f} 0.00 0.00 \n')
f.write('END\n')
def calcWater(inItem, outFile=None, outResname='HOH', printTiming=False, printResult=False, processFrame=0, returnNumWater=False, returnHBscore=False):
# processFrame takes a number, a list of numbers, or -1 for all frames
if printTiming:
t0 = time.time()
if type(inItem) == str:
comp = mdtraj.load(inItem)
if type(inItem) == mdtraj.Trajectory:
comp = inItem
if printTiming:
t1 = time.time()
print(f'Load pdb: {t1-t0:.3f} s')
t0 = time.time()
protein_residue_list = [
'ALA', 'ARG', 'ASN', 'ASP', 'CYS',
'GLN', 'GLU', 'GLY', 'HIS', 'ILE',
'LEU', 'LYS', 'MET', 'PHE', 'PRO',
'SER', 'THR', 'TYR', 'VAL', 'TRP',
'ACE', 'NME', 'HID', 'HIE', 'HIP', 'WAT', 'HOH', 'TIP3'] # For additional caps and protonation states of HIS
atom_vdW_radii = { # in nanometers
'H': 0.107,
'C': 0.170,
'N': 0.155,
'O': 0.152,
'F': 0.147,
'S': 0.180,
'P': 0.180,
'Cl': 0.175,
'Br': 0.185,
'I': 0.198
}
atom_list = {}
bond_to_symbol = {}
bond_to_index = {}
atom_type = {}
atom_in_residue = {}
NOS_index = []
H_index = []
ALL_vdW_radii = []
for atom in comp.top.atoms:
# print(atom.index, atom)
atom_list[atom.index] = atom.element.symbol
atom_in_residue[atom.index] = atom.residue.name
bond_to_symbol[atom.index] = []
bond_to_index[atom.index] = []
atom_type[atom.index] = []
ALL_vdW_radii.append(atom_vdW_radii[atom.element.symbol])
if atom.element.symbol in ['N','O','S']:
NOS_index.append(atom.index)
elif atom.element.symbol == 'H':
H_index.append(atom.index)
NOS_index = np.array(NOS_index)
H_index = np.array(H_index)
ALL_vdW_radii = np.array(ALL_vdW_radii)
for bond in comp.top.bonds:
bond_to_symbol[bond.atom1.index].append(bond.atom2.element.symbol)
bond_to_symbol[bond.atom2.index].append(bond.atom1.element.symbol)
bond_to_index[bond.atom1.index].append(bond.atom2.index)
bond_to_index[bond.atom2.index].append(bond.atom1.index)
for ii in range(len(atom_list)):
bond_to_symbol[ii] = np.array(bond_to_symbol[ii])
bond_to_index[ii] = np.array(bond_to_index[ii])
sort_order = np.argsort(bond_to_symbol[ii])
if len(sort_order) > 1:
# print(sort_order)
bond_to_symbol[ii] = bond_to_symbol[ii][sort_order]
bond_to_index[ii] = bond_to_index[ii][sort_order]
# print(ii, atom_list[ii], bond_to_symbol[ii], bond_to_index[ii])
bond_to_symbol[ii] = list(bond_to_symbol[ii])
bond_to_index[ii] = list(bond_to_index[ii])
# Determine its atom type from bonds
for ii in range(len(atom_list)):
if atom_in_residue[ii] in protein_residue_list: # this is protein atom, treat with protein rules
if atom_list[ii] == 'H':
if bond_to_symbol[ii] == ['O']: # This is a hydroxyl H, generate donor
atom_type[ii] = 'Hydroxyl H'
atom_type[ii] = 'OH'
elif bond_to_symbol[ii] == ['N']: # This is amino H, generate donor
atom_type[ii] = 'Amino H'
atom_type[ii] = 'NH'
elif bond_to_symbol[ii] == ['C']: # This is aliphatic H
atom_type[ii] = 'Aliphatic H'
atom_type[ii] = 'CH'
elif bond_to_symbol[ii] == ['S']: # This is thiol H, generate donor
atom_type[ii] = 'Thiol H'
atom_type[ii] = 'SH'
elif atom_list[ii] == 'O':
if bond_to_symbol[ii] == ['C']: # This is carbonyl, generate two acceptor
atom_type[ii] = 'Carbonyl O'
atom_type[ii] = 'CO'
if bond_to_symbol[bond_to_index[ii][0]] == ['C','O','O']: # Carboxylate
atom_type[ii] = 'Carboxylate O'
atom_type[ii] = 'cCO'
elif bond_to_symbol[ii] == ['C','H']: # This is hydroxyl O, generate two acceptor, one donor
atom_type[ii] = 'Hydroxyl O'
atom_type[ii] = 'CHO'
elif bond_to_symbol[ii] == ['H','H']: # This is water O, generate two acceptor, two donor
atom_type[ii] = 'Water O'
atom_type[ii] = 'HHO'
elif atom_list[ii] == 'N':
if bond_to_symbol[ii] == ['C','C','H']: # This is either the amide N (one donor), NME N, HIS N
atom_type[ii] = 'Amino N 1 donor'
atom_type[ii] = 'CCHN'
elif bond_to_symbol[ii] == ['C','H','H']: # This is ASN N, GLN N, or ARG N. Generate 2 donors each
atom_type[ii] = 'Amino N 2'
atom_type[ii] = 'CCHN'
elif bond_to_symbol[ii] == ['C','C']: # This is imine N (in Histidine and heterocycle cpds), one acceptor
atom_type[ii] = 'Imine N'
atom_type[ii] = 'CCN'
elif bond_to_symbol[ii] == ['C','C','C']: # This is proline N. It has no H-bond
atom_type[ii] = 'Proline N'
atom_type[ii] = 'CCCN'
elif atom_list[ii] == 'S':
if bond_to_symbol[ii] == ['C','H']: # This is thiol S, maybe generate acceptors??
atom_type[ii] = 'Thiol S'
atom_type[ii] = 'CHS'
elif bond_to_symbol[ii] == ['C','C']: # This is thioester S, maybe generate acceptors??
atom_type[ii] = 'Thioester S'
atom_type[ii] = 'CCS'
# if atom_list[ii] != 'C' and atom_type[ii] == []:
# print(ii, atom_list[ii], atom_type[ii], bond_to_symbol[ii])
else: # Ligand residues
if atom_list[ii] == 'H':
if bond_to_symbol[ii] == ['O']: # This is a hydroxyl H, generate donor
atom_type[ii] = 'Hydroxyl H'
atom_type[ii] = 'OH'
elif bond_to_symbol[ii] == ['N']: # This is amino H, generate donor
atom_type[ii] = 'Amino H'
atom_type[ii] = 'NH'
elif bond_to_symbol[ii] == ['C']: # This is aliphatic H
atom_type[ii] = 'Aliphatic H'
atom_type[ii] = 'CH'
elif bond_to_symbol[ii] == ['S']: # This is thiol H, generate donor
atom_type[ii] = 'Thiol H'
atom_type[ii] = 'SH'
elif bond_to_symbol[ii] == ['P']: # This is phosphine H, generate donor
atom_type[ii] = 'Phosphine H'
atom_type[ii] = 'PH'
elif atom_list[ii] == 'O':
if bond_to_symbol[ii] == ['C']: # This is carbonyl, generate two acceptor
atom_type[ii] = 'Carbonyl O'
atom_type[ii] = 'CO'
if bond_to_symbol[bond_to_index[ii][0]] == ['C','O','O']: # Carboxylate group
atom_type[ii] = 'Carboxylate O'
atom_type[ii] = 'cCO'
if bond_to_symbol[ii] == ['N']: # This is nitro, generate one acceptor
atom_type[ii] = 'Nitro O'
atom_type[ii] = 'NO'
elif bond_to_symbol[ii] == ['S']: # This is carbonyl, generate two acceptor
atom_type[ii] = 'Sulfonyl / Sulfoxide O'
atom_type[ii] = 'SO'
elif bond_to_symbol[ii] == ['P']: # This is carbonyl, generate two acceptor
atom_type[ii] = 'Phosphine oxide / Phosphone O'
atom_type[ii] = 'PO'
elif bond_to_symbol[ii] == ['C','H']: # This is hydroxyl O, generate two acceptor, one donor
atom_type[ii] = 'Hydroxyl O'
atom_type[ii] = 'CHO'
elif bond_to_symbol[ii] == ['C','C']: # This is ether, generate one acceptor
atom_type[ii] = 'Ether O'
atom_type[ii] = 'CCO'
elif bond_to_symbol[ii] == ['H','H']: # This is water O, generate two acceptor, two donor
atom_type[ii] = 'Water O'
atom_type[ii] = 'HHO'
elif bond_to_symbol[ii] == ['H','O']: # This is water O, generate two acceptor, two donor
atom_type[ii] = 'Peroxide O'
atom_type[ii] = 'HOO'
elif bond_to_symbol[ii] == ['C','O']: # This is water O, generate two acceptor, two donor
atom_type[ii] = 'Peroxide O'
atom_type[ii] = 'COO'
elif atom_list[ii] == 'N':
if bond_to_symbol[ii] == ['C']: # Cyanate / nitrile. Nitrile is an acceptor but cyanate is not
atom_type[ii] = 'Nitrile N' # Default is nitrile unless the connecting C bonds to an O
atom_type[ii] = 'nCN'
for temp in bond_to_index[ii]:
if bond_to_symbol[temp] == 'O':
atom_type[ii] = 'Cyanate N' # in which case this is an cyanate
atom_type[ii] = 'cCN'
if bond_to_symbol[ii] == ['N']: # Azide
atom_type[ii] = 'Azide N, acceptor'
atom_type[ii] = 'NN'
if bond_to_symbol[ii] == ['C','C']: # Isonitrile / sp2 nitrogen
atom_type[ii] = 'sp2 N acceptor' # Default is sp2 nitroten
atom_type[ii] = 'aCCN'
for temp in bond_to_index[ii]:
if len(bond_to_symbol[temp]) == 1:
atom_type[ii] = 'Isonitrile N' # If there's an sp carbon, than this is an isonitrile N
atom_type[ii] = 'iCCN'
if bond_to_symbol[ii] == ['C','H']: # Primary ketimine / aldimine
atom_type[ii] = 'Ketimine / aldimine N'
atom_type[ii] = 'CHN'
if bond_to_symbol[ii] == ['C','N']: # Azo / Azide
atom_type[ii] = 'Azo / Azide starting N'
atom_type[ii] = 'CNN'
if bond_to_symbol[ii] == ['C','O']: # Nitroso
atom_type[ii] = 'Nitroso / Oxime N'
atom_type[ii] = 'CON'
if bond_to_symbol[ii] == ['N','N']: # Azide
atom_type[ii] = 'Azide N, base of acceptor'
atom_type[ii] = 'NNN'
if bond_to_symbol[ii] == ['O','O']: # Nitrite
atom_type[ii] = 'Nitrite N'
atom_type[ii] = 'OON'
if bond_to_symbol[ii] == ['C','C','C']: #
atom_type[ii] = 'sp3 N'
atom_type[ii] = 'aCCCN'
for temp in bond_to_index[ii]:
if len(bond_to_symbol[temp]) == 3:
atom_type[ii] = 'sp2 N'
atom_type[ii] = 'iCCCN'
if bond_to_symbol[ii] == ['C','C','H']: #
atom_type[ii] = 'sp3 N'
atom_type[ii] = 'adCCHN'
for temp in bond_to_index[ii]:
if len(bond_to_symbol[temp]) == 3:
atom_type[ii] = 'sp2 N donor'
atom_type[ii] = 'dCCHN'
if bond_to_symbol[ii] == ['C','H','H']: #
atom_type[ii] = 'sp3 N'
atom_type[ii] = 'adCHHN'
for temp in bond_to_index[ii]:
if len(bond_to_symbol[temp]) == 3:
atom_type[ii] = 'sp2 N donors'
atom_type[ii] = 'dCHHN'
if bond_to_symbol[ii] == ['H','H','H']: #
atom_type[ii] = 'Ammonia N, donors + acceptors'
atom_type[ii] = 'HHHN'
if bond_to_symbol[ii] == ['C','O','O']: #
atom_type[ii] = 'Nitro N'
atom_type[ii] = 'COON'
if bond_to_symbol[ii] == ['O','O','O']: #
atom_type[ii] = 'Nitrate / Nitric acid'
atom_type[ii] = 'OOON'
if bond_to_symbol[ii] == ['C','C','C','C']: #
atom_type[ii] = '4 deg ammonium'
atom_type[ii] = 'CCCCN'
if bond_to_symbol[ii] == ['H','H','H','H']: #
atom_type[ii] = 'Ammonium'
atom_type[ii] = 'HHHHN'
elif atom_list[ii] == 'S':
if bond_to_symbol[ii] == ['C']: # This is thioketone, generate two acceptor
atom_type[ii] = 'Thioketone S / Isothiocyanite S / Thial S '
atom_type[ii] = 'CS'
elif bond_to_symbol[ii] == ['C','H']: # This is thiol S, maybe generate acceptors??
atom_type[ii] = 'Thiol S'
atom_type[ii] = 'CHS'
elif bond_to_symbol[ii] == ['C','C']: # This is thioester S, maybe generate acceptors??
atom_type[ii] = 'Thioether S / Thiocyanate S'
atom_type[ii] = 'CCS'
elif bond_to_symbol[ii] == ['C','S']: # Disulfide
atom_type[ii] = 'Disulfide S'
atom_type[ii] = 'CSS'
elif bond_to_symbol[ii] == ['C','C','O']: # Sulfoxide
atom_type[ii] = 'Sulfoxide S'
atom_type[ii] = 'CCOS'
elif bond_to_symbol[ii] == ['C','C','O','O']: # Sulfone
atom_type[ii] = 'Sulfone S'
atom_type[ii] = 'CCOOS'
elif bond_to_symbol[ii] == ['C','O','O']: # Sulfinic acid
atom_type[ii] = 'Sulfinic acid S'
atom_type[ii] = 'COOS'
elif bond_to_symbol[ii] == ['C','O','O','O']: # Sulfonic acid / Ester
atom_type[ii] = 'Sulfonic acid / ester S'
atom_type[ii] = 'COOOS'
elif atom_list[ii] == 'P':
if bond_to_symbol[ii] == ['C','C','C']:
atom_type[ii] = 'CCCP'
if bond_to_symbol[ii] == ['C','C','H']:
atom_type[ii] = 'CCCP'
if bond_to_symbol[ii] == ['C','H','H']:
atom_type[ii] = 'CCCP'
if bond_to_symbol[ii] == ['H','O','O','O']:
atom_type[ii] = 'HOOOP'
if bond_to_symbol[ii] == ['O','O','O','O']:
atom_type[ii] = 'OOOOP'
if printTiming:
t1 = time.time()
print(f'Parse atom types: {t1-t0:.3f} s')
t0 = time.time()
if processFrame == -1:
frames = range(len(comp))
elif type(processFrame) == int:
frames = [processFrame]
elif type(processFrame) == list:
frames = processFrame
for frame in frames:
NOS_coordinate = []
H_coordinate = []
ALL_coordinate = comp.xyz[frame]
NOS_coordinate = comp.xyz[frame][NOS_index]
# print(NOS_coordinate.shape)
H_coordinate = comp.xyz[frame][H_index]
# NOS_coordinate = np.array(NOS_coordinate)
# H_coordinate = np.array(H_coordinate)
# Donor generation
DOL = [] # Donor Oxygen Location
DOL_index = []
for ii in range(len(atom_list)):
if atom_type[ii] in ['OH', 'NH', 'SH']:
vec = comp.xyz[frame][ii] - comp.xyz[frame][bond_to_index[ii][0]]
vec /= np.linalg.norm(vec)
vec = vec.flatten()
if atom_type[ii] == 'OH':
vec *= 0.18
elif atom_type[ii] == 'NH':
vec *= 0.2
elif atom_type[ii] == 'SH':
vec *= 0.245 # Temporary
DOL.append(comp.xyz[frame][ii] + vec)
DOL_index.append(ii)
# print(np.array(DOL))
DOL = np.array(DOL)
DOL_index = np.array(DOL_index)
# # Write out a test xyz
# with open('test_donor.pdb','w') as f:
# # f.write(f'{len(DOL)}\n\n')
# for ii in range(len(DOL)):
# # ATOM 1 N PRO A 1 8.316 21.206 21.530 1.00 17.44 N
# f.write(f'ATOM {ii+1:3d} X XXX A{ii:4d} {DOL[ii][0]*10:8.3f}{DOL[ii][1]*10:8.3f}{DOL[ii][2]*10:8.3f} 0.00 0.00 \n')
# f.write('END\n')
#
# Acceptor generation
counter = 0
AHL = [] # Acceptor hydrogen Location
AHL_index = []
for ii in range(len(atom_list)):
if atom_type[ii] in ['CO', 'cCO']: # Carbonyl
# counter += 1
vec1 = comp.xyz[frame][ii] - comp.xyz[frame][bond_to_index[ii][0]]
# print(ii)
# print(bond_to_index[bond_to_index[ii][0]])
for temp in bond_to_index[bond_to_index[ii][0]]:
if temp != ii:
vec2 = comp.xyz[frame][temp] - comp.xyz[frame][bond_to_index[ii][0]]
rot_axis = np.cross(vec1.flatten(), vec2.flatten())
# print(rot_axis)
break
vec = rotation3D(vec1, rot_axis, 60)
AHL.append(comp.xyz[frame][ii] + prepVec(vec, 0.194))
AHL_index.append(ii)
vec = rotation3D(vec1, rot_axis, -60)
AHL.append(comp.xyz[frame][ii] + prepVec(vec, 0.194))
AHL_index.append(ii)
if atom_type[ii] in ['NO']: # Nitro, very similar to CO
# counter += 1
vec1 = comp.xyz[frame][ii] - comp.xyz[frame][bond_to_index[ii][0]]
# print(ii)
# print(bond_to_index[bond_to_index[ii][0]])
for temp in bond_to_index[bond_to_index[ii][0]]:
if temp != ii:
vec2 = comp.xyz[frame][temp] - comp.xyz[frame][bond_to_index[ii][0]]
rot_axis = np.cross(vec1.flatten(), vec2.flatten())
# print(rot_axis)
break
vec = rotation3D(vec1, rot_axis, 60)
AHL.append(comp.xyz[frame][ii] + prepVec(vec, 0.233))
AHL_index.append(ii)
vec = rotation3D(vec1, rot_axis, -60)
AHL.append(comp.xyz[frame][ii] + prepVec(vec, 0.233))
AHL_index.append(ii)
# if atom_type[ii] in ['cCO']: # Carboxylate
# vec = comp.xyz[frame][bond_to_index[ii][0]] - comp.xyz[frame][bond_to_index[bond_to_index[ii][0]][0]]
# AHL.append(comp.xyz[frame][ii] + prepVec(vec, 0.192))
# AHL_index.append(ii)
if atom_type[ii] in ['CHO']: # Hydroxyl
vec1 = comp.xyz[frame][bond_to_index[ii][1]] - comp.xyz[frame][ii] # OH
vec2 = comp.xyz[frame][ii] - comp.xyz[frame][bond_to_index[ii][0]] # CH
vec = rotation3D(vec1, vec2, 180)
AHL.append(comp.xyz[frame][ii] + prepVec(vec, 0.203))
AHL_index.append(ii)
if (atom_type[ii] in ['aCCN','CCN']): # mostly histidine and nitrogens in heterocycles
counter += 1
# vec = comp.xyz[frame][ii] - comp.xyz[frame][bond_to_index[ii]]
# print(ii)
# print(bond_to_index[bond_to_index[ii][0]])
avgCC = np.mean(comp.xyz[frame][bond_to_index[ii]],axis=0)
vec = comp.xyz[frame][ii] - avgCC
AHL.append(comp.xyz[frame][ii] + prepVec(vec, 0.21))
AHL_index.append(ii)
if atom_type[ii] in ['CCO']: # Ether
counter += 1
# vec = comp.xyz[frame][ii] - comp.xyz[frame][bond_to_index[ii]]
# print(ii)
# print(bond_to_index[bond_to_index[ii][0]])
avgCC = np.mean(comp.xyz[frame][bond_to_index[ii]],axis=0)
vec = comp.xyz[frame][ii] - avgCC
AHL.append(comp.xyz[frame][ii] + prepVec(vec, 0.21))
AHL_index.append(ii)
if atom_type[ii] in ['SO']: # Sulfoxide
vec = comp.xyz[frame][ii] - comp.xyz[frame][bond_to_index[ii][0]]
AHL.append(comp.xyz[frame][ii] + prepVec(vec, 0.196))
AHL_index.append(ii)
if atom_type[ii] in ['PO']: # Phosphine
vec = comp.xyz[frame][ii] - comp.xyz[frame][bond_to_index[ii][0]]
AHL.append(comp.xyz[frame][ii] + prepVec(vec, 0.188))
AHL_index.append(ii)
if atom_type[ii] in ['nCN']: # Nitrile
vec = comp.xyz[frame][ii] - comp.xyz[frame][bond_to_index[ii][0]]
AHL.append(comp.xyz[frame][ii] + prepVec(vec, 0.21))
AHL_index.append(ii)
if atom_type[ii] in ['aCCCN', 'adCCHN', 'adCHHN', 'HHHN']: # Various acceptors for amines
avgCC = np.mean(comp.xyz[frame][bond_to_index[ii]],axis=0)
vec = comp.xyz[frame][ii] - avgCC
AHL.append(comp.xyz[frame][ii] + prepVec(vec, 0.213))
AHL_index.append(ii)
AHL = np.array(AHL)
AHL_index = np.array(AHL_index)
# # Write out a test xyz
# with open('test_acceptor.pdb','w') as f:
# # f.write(f'{len(DOL)}\n\n')
# for ii in range(len(AHL)):
# # ATOM 1 N PRO A 1 8.316 21.206 21.530 1.00 17.44 N
# f.write(f'ATOM {ii+1:3d} Z ZZZ A{ii:4d} {AHL[ii][0]*10:8.3f}{AHL[ii][1]*10:8.3f}{AHL[ii][2]*10:8.3f} 0.00 0.00 \n')
# f.write('END\n')
#
if printTiming:
t1 = time.time()
print(f'Generate donors / acceptors: {t1-t0:.3f} s')
t0 = time.time()
# Calculate DOL -> N, O, S distance
DOL_NOS_dist = np.min(np.sqrt(((DOL[:,:,None] - NOS_coordinate[:,:,None].T)**2).sum(1)),axis=1)
# print(DOL_NOS_dist.shape)
AHL_H_dist = np.min(np.sqrt(((AHL[:,:,None] - H_coordinate[:,:,None].T)**2).sum(1)),axis=1)
# print(AHL_H_dist.shape)
# import matplotlib.pyplot as plt
# plt.hist(DOL_NOS_dist, bins=30)
# plt.hist(AHL_H_dist, bins=30)
# np.nonzero(AHL_H_dist > 0.1)[0]
AHL_inter = AHL[np.nonzero(AHL_H_dist > 0.12)[0]]
AHL_inter_index = AHL_index[np.nonzero(AHL_H_dist > 0.12)[0]] # Atom indices of acceptor atoms
DOL_inter = DOL[np.nonzero(DOL_NOS_dist > 0.12)[0]]
DOL_inter_index = DOL_index[np.nonzero(DOL_NOS_dist > 0.12)[0]]
# outputMockPDB('test_AHL_inter.pdb',AHL_inter)
# outputMockPDB('test_DOL_inter.pdb',DOL_inter)
# Calculate inter-donor/acceptor distances
DOL_DOL = np.sqrt(((DOL_inter[:,:,None] - DOL_inter[:,:,None].T)**2).sum(1))
np.fill_diagonal(DOL_DOL, 1)
AHL_DOL = np.sqrt(((AHL_inter[:,:,None] - DOL_inter[:,:,None].T)**2).sum(1))
# np.fill_diagonal(AHL_DOL, 1)
AHL_AHL = np.sqrt(((AHL_inter[:,:,None] - AHL_inter[:,:,None].T)**2).sum(1))
np.fill_diagonal(AHL_AHL, 1)
# Qualify donor/acceptors based on distances
DOL_DOL_q = np.where(DOL_DOL < 0.14)
AHL_DOL_q = np.where(AHL_DOL < 0.28)
AHL_AHL_q = np.where(AHL_AHL < 0.28)
if printTiming:
t1 = time.time()
print(f'Qualify donors / acceptors: {t1-t0:.3f} s')
t0 = time.time()
# Place water molecules. Note this is ordered - AHL+AHL takes priority.
# This will happen in redundant water elimination and hydrogen placements
temp_water = []
temp_water_index = []
temp_water_type = []
for ii in range(len(AHL_AHL_q[0])):
temp_water.append((AHL_inter[AHL_AHL_q[0][ii]]+AHL_inter[AHL_AHL_q[1][ii]])/2)
temp_water_index.append([AHL_inter_index[AHL_AHL_q[0][ii]], AHL_inter_index[AHL_AHL_q[1][ii]]])
temp_water_type.append(0)
for ii in range(len(AHL_DOL_q[0])):
temp_water.append((AHL_inter[AHL_DOL_q[0][ii]]+DOL_inter[AHL_DOL_q[1][ii]])/2)
temp_water_index.append([AHL_inter_index[AHL_DOL_q[0][ii]], DOL_inter_index[AHL_DOL_q[1][ii]]])
temp_water_type.append(1)
for ii in range(len(DOL_DOL_q[0])):
temp_water.append((DOL_inter[DOL_DOL_q[0][ii]]+DOL_inter[DOL_DOL_q[1][ii]])/2)
temp_water_index.append([DOL_inter_index[DOL_DOL_q[0][ii]], DOL_inter_index[DOL_DOL_q[1][ii]]])
temp_water_type.append(2)
temp_water = np.array(temp_water)
temp_water_index = np.array(temp_water_index)
temp_water_type = np.array(temp_water_type)
# outputMockPDB('temp_water.pdb',temp_water,e='O')
if printTiming:
t1 = time.time()
print(f'Place template oxygens: {t1-t0:.3f} s')
t0 = time.time()
# Test for waters that are too close
ALL_WAT = np.sqrt(((ALL_coordinate[:,:,None] - temp_water[:,:,None].T)**2).sum(1)) - ALL_vdW_radii[:,None] - 0.06
qualified_water = temp_water[np.where(np.all(ALL_WAT > 0, axis=0))]
qualified_water_index = temp_water_index[np.where(np.all(ALL_WAT > 0, axis=0))]
qualified_water_type = temp_water_type[np.where(np.all(ALL_WAT > 0, axis=0))]
# outputMockPDB('qualified_water.pdb',qualified_water,e='O')
if printTiming:
t1 = time.time()
print(f'Place qualified oxygens: {t1-t0:.3f} s')
t0 = time.time()
# Orient each qualifying water
full_water = []
full_water_index = []
for ii in range(len(qualified_water)):
this_water = np.array([qualified_water[ii], [0,0,0], [0,0,0]])
if qualified_water_type[ii] == 0: # A two donor water - H's of water point to acceptors
this_water[1] = this_water[0] + prepVec(comp.xyz[frame][qualified_water_index[ii][0]] - this_water[0], 0.09572) # Acceptor 1
this_water[2] = this_water[0] + prepVec(comp.xyz[frame][qualified_water_index[ii][1]] - this_water[0], 0.09572) # Acceptor 2
# Adjust angles to 104.5 deg
thisAngle, vec1, vec2 = calcAngle(comp.xyz[frame][qualified_water_index[ii][0]], this_water[0], comp.xyz[frame][qualified_water_index[ii][1]])
thisAxis = np.cross(vec1, vec2)
this_water[1] = this_water[0] + rotation3D(this_water[1] - this_water[0], thisAxis, (thisAngle - 104.5)/2)
this_water[2] = this_water[0] + rotation3D(this_water[2] - this_water[0], thisAxis, -(thisAngle - 104.5)/2)
if qualified_water_type[ii] == 1: # A acceptor / donor water
this_water[1] = this_water[0] + prepVec(comp.xyz[frame][qualified_water_index[ii][0]] - this_water[0], 0.09572) # Acceptor 1
# Create second hydrogen and rotate that away (so the lone pair faces the donor)
thisAngle, vec1, vec2 = calcAngle(comp.xyz[frame][qualified_water_index[ii][0]], this_water[0], comp.xyz[frame][qualified_water_index[ii][1]])
thisAxis = np.cross(vec1, vec2)
this_water[2] = this_water[0] + rotation3D(this_water[1] - this_water[0], thisAxis, 104.5)
this_water[2] = this_water[0] + rotation3D(this_water[2] - this_water[0], this_water[1] - this_water[0], 120)
#parallel_NH = comp.xyz[frame][qualified_water_index[ii][1]] - comp.xyz[frame][bond_to_index[qualified_water_index[ii][1]]]
#this_water[2] = this_water[0] + prepVec(parallel_NH, 0.09572) # Acceptor 2
full_water.append(this_water)
full_water_index.append(qualified_water_index[ii])
# print(full_water_index)
# outputMolecularMockPDB('Full_water.pdb', full_water, e=['O','H','H'])
if printTiming:
t1 = time.time()
print(f'Place full waters: {t1-t0:.3f} s')
t0 = time.time()
# Geometry check
geo_water = []
geo_water_O = []
geo_water_index = []
lowerLength = {'O':0.161, 'N': 0.171, 'S':0.229}
upperLength = {'O':0.230, 'N': 0.240, 'S':0.312}
for ii in range(len(full_water)):
# print(f'WATER {ii}')
if qualified_water_type[ii] == 0: # Test two distances and two angles
testLength = np.linalg.norm(full_water[ii][1] - comp.xyz[frame][qualified_water_index[ii][0]])
# print(testLength)
# if atom_list[qualified_water_index[ii][0]] in ['O','S','N']:
if (testLength < lowerLength[atom_list[qualified_water_index[ii][0]]]) or \
(testLength > upperLength[atom_list[qualified_water_index[ii][0]]]):
# print(f'Water {ii} failed due to distance between H1 and acceptor')
continue
testLength = np.linalg.norm(full_water[ii][2] - comp.xyz[frame][qualified_water_index[ii][1]])
# print(testLength)
# if atom_list[qualified_water_index[ii][0]] in ['O','S','N']:
if (testLength < lowerLength[atom_list[qualified_water_index[ii][1]]]) or \
(testLength > upperLength[atom_list[qualified_water_index[ii][1]]]):
# print(f'Water {ii} failed due to distance between H2 and acceptor')
continue
testAngle, _, _ = calcAngle(full_water[ii][0], full_water[ii][1], comp.xyz[frame][qualified_water_index[ii][0]])
# print(testAngle)
if testAngle < 120:
# print(f'Water {ii} failed due to angle between O, H1 and acceptor')
continue
testAngle, _, _ = calcAngle(full_water[ii][0], full_water[ii][2], comp.xyz[frame][qualified_water_index[ii][1]])
# print(testAngle)
if testAngle < 120:
# print(f'Water {ii} failed due to angle between O, H2 and acceptor')
continue
if qualified_water_type[ii] == 1:
# Test 1. distance between OH and acceptor,
# 2. angle between (donor, donor H, O), and
# 3. angle between (accpetor expected H, acceptor, H)
testLength = np.linalg.norm(full_water[ii][1] - comp.xyz[frame][qualified_water_index[ii][0]])
# print(testLength)
if (testLength < lowerLength[atom_list[qualified_water_index[ii][0]]]) or \
(testLength > upperLength[atom_list[qualified_water_index[ii][0]]]):
# print(f'Water {ii} failed due to distance between H1 and acceptor')
continue
# print(f'Qualified H, {qualified_water_index[ii][1]} (H) bonds to {bond_to_index[qualified_water_index[ii][1]][0]}')
testAngle, _, _ = calcAngle(comp.xyz[frame][bond_to_index[qualified_water_index[ii][1]][0]],
comp.xyz[frame][qualified_water_index[ii][1]],
full_water[ii][0])
if testAngle < 120:
# print(f'Water {ii} failed due to angle between donor, donor H and water O')
continue
for jj in np.where(AHL_index == qualified_water_index[ii][0])[0]:
testAngle, _, _ = calcAngle(AHL[jj],
comp.xyz[frame][qualified_water_index[ii][0]],
full_water[ii][1])
if testAngle < 45:
# print(f'passed angle test')
break
# print(f'jjjj {testAngle}')
else:
continue
geo_water.append(full_water[ii])
geo_water_O.append(full_water[ii][0])
geo_water_index.append(full_water_index[ii])
geo_water = np.array(geo_water)
geo_water_O = np.array(geo_water_O)
geo_water_index = np.array(geo_water_index)
# print(geo_water_index)
if printTiming:
t1 = time.time()
print(f'Water geometry check: {t1-t0:.3f} s')
t0 = time.time()
# outputMolecularMockPDB('Geo_water.pdb', geo_water, e=['O','H','H'])
# Eliminate duplicate O
accept_water = []
GEO_GEO = np.sqrt(((geo_water_O[:,:,None] - geo_water_O[:,:,None].T)**2).sum(1))
np.fill_diagonal(GEO_GEO, 1)
# print(GEO_GEO)
for ii in range(len(GEO_GEO)):
if np.any(GEO_GEO[ii][:ii] < 0.249):
GEO_GEO[ii] = 1
GEO_GEO[:,ii] = 1
else:
accept_water.append(ii)
# print(GEO_GEO)
# print(accept_water)
final_water = geo_water[accept_water]
final_water_O = geo_water_O[accept_water]
final_water_index = geo_water_index[accept_water]
if returnHBscore:
HB_score = 0
for ii in range(len(final_water)):
atom1 = comp.top.atom(final_water_index[ii][0])
atom2 = comp.top.atom(final_water_index[ii][1])
if atom1.residue.name == 'LIG' or atom2.residue.name == 'LIG':
if atom1.residue.name == 'LIG' and atom2.residue.name == 'LIG':
pass
else:
if atom1.element.symbol == 'H':
# print(atom1)
# print(comp.top.atom(bond_to_index[final_water_index[ii][0]][0]), end=' \t')
btom1_index = bond_to_index[final_water_index[ii][0]][0]
else:
# print(atom1, end=' \t')
btom1_index = final_water_index[ii][0]
HB_score += BWHB(ALL_coordinate[btom1_index], final_water_O[ii], print_dist=False)
# print(f'{HB(ALL_coordinate[btom1_index], final_water_O[ii], print_dist=False):.3f}')
if atom2.element.symbol == 'H':
# print(comp.top.atom(bond_to_index[final_water_index[ii][1]][0]), end=' \t')
btom2_index = bond_to_index[final_water_index[ii][1]][0]
else:
# print(atom2, end=' \t')
btom2_index = final_water_index[ii][1]
HB_score += BWHB(ALL_coordinate[btom2_index], final_water_O[ii], print_dist=False)
# print(f'{HB(ALL_coordinate[btom2_index], final_water_O[ii], print_dist=True):.3f}')
try:
HB_score_list.append(HB_score)
except:
HB_score_list = [HB_score]
# print()
# print(final_water_index)
if printTiming:
t1 = time.time()
print(f'Final water determination: {t1-t0:.3f} s')
t0 = time.time()
if outFile is not None:
outputMolecularMockPDB(outFile, final_water, e=['O','H1','H2'], resname=outResname)
if printTiming:
t1 = time.time()
print(f'Output final file: {t1-t0:.3f} s')
t0 = time.time()
if printResult:
print(f'For this input we can place {len(final_water)} waters')
if returnNumWater:
try:
numWater.append(len(final_water))
except:
numWater = [len(final_water)]
if returnNumWater:
return numWater
if returnHBscore:
return HB_score_list
| darrenjhsu/tiny_IFD | 01_Workflow/MDR_analysis/calcWater_func.py | calcWater_func.py | py | 39,291 | python | en | code | 12 | github-code | 36 | [
{
"api_name": "numpy.sqrt",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 26,... |
18873855877 | import sys
from PyQt5 import Qt, QtGui
from PyQt5.QtWidgets import QDialog, QWidget, QVBoxLayout, QScrollArea, QApplication, QMainWindow
from PyQt5.uic import loadUi
from Screens import db_objects as dbo
import gnrl_database_con
class InspectionPlannerPage(QDialog):
def __init__(self, mainWindowRef: QMainWindow):
super(InspectionPlannerPage, self).__init__()
self.previously_selected_constructionItem = None
loadUi(r'inspectionPlannerPage_UI.ui', self)
self.mainWindow = mainWindowRef
# self.rightSidedMenu.setFixedWidth(0)
self.setObjectName('InspectionPlannerPage')
self.db: gnrl_database_con.Database = QApplication.instance().database
self.scrollAreaContent: QWidget
self.scrollLayout = QVBoxLayout()
self.scrollArea: QScrollArea
self.scrollArea.setLayout(self.scrollLayout)
self.constructions_objects = self.loadConstructionList()
self.rightSidedMenu.setCurrentIndex(0)
# -------------------------------------------------- Buttons script
import inspectionPlannerWindow_SCRIPT
self.openModuleBtn.clicked.connect(
lambda: (self.openNewModule(inspectionPlannerWindow_SCRIPT.InspectionPlannerWindow(
self.constructions_objects))))
def loadConstructionList(self):
construction_objects = []
for construct_id in range(len(self.db.table_into_DF('Deki_mainConstructions'))):
construction = dbo.MainConstruction()
construction.load_info(construct_id + 1)
construction_objects.append(construction)
constructionListItem = ConstructionListItemPageVersion(self, construction)
constructionListItem.clicked.connect(self.updateRightMenu)
self.scrollLayout.addWidget(constructionListItem, alignment=Qt.Qt.AlignTop)
self.scrollLayout.setAlignment(Qt.Qt.AlignTop)
return construction_objects
def updateRightMenu(self):
if self.previously_selected_constructionItem is None:
for constructionItem in self.scrollArea.findChildren(ConstructionListItemPageVersion):
if constructionItem.selected:
self.previously_selected_constructionItem = constructionItem
self.load_selectedItemInfo(constructionItem.constructionObj)
break
else:
for constructionItem in self.scrollArea.findChildren(ConstructionListItemPageVersion):
if constructionItem.selected and constructionItem != self.previously_selected_constructionItem:
self.previously_selected_constructionItem.deselect()
self.load_selectedItemInfo(constructionItem.constructionObj)
self.previously_selected_constructionItem = constructionItem
def load_selectedItemInfo(self, constructionObj):
if not constructionObj.released:
self.rightSidedMenu.setCurrentIndex(1)
welds_df = self.db.table_into_DF(f"{constructionObj.info['serial_number']}_modelWelds")
unique_welds = welds_df[welds_df['same_as_weldID'].isna()]
if len(unique_welds) != 0:
self.uniqueWeldsBtn.setText(f"{len(unique_welds)}")
else:
self.uniqueWeldsBtn.setText(f"0")
test_assigned = self.db.df_from_filteredTable(
f"{constructionObj.info['serial_number']}_modelWelds", 'testing_methods',
"''", False)
if len(test_assigned) != 0:
self.testAssignedBtn.setText(f"{len(test_assigned)}")
else:
self.testAssignedBtn.setText(f"0")
else:
self.rightSidedMenu.setCurrentIndex(2)
def openNewModule(self, newModuleWindow: QWidget):
newModuleWindow.show()
# self.close()
# in lambda definition an "event" has to be passed for proper functionality!
newModuleWindow.closeEvent = lambda event: self.show()
class ConstructionListItemPageVersion(QWidget):
clicked = Qt.pyqtSignal(object)
deselected = Qt.pyqtSignal(object)
def __init__(self, parentScreenObj: InspectionPlannerPage, loadedConstructionObject: dbo.MainConstruction):
super(ConstructionListItemPageVersion, self).__init__()
# set attribute that deletes the instance of this class on closeEvent
loadUi(r'MainConstructionListItem_UI.ui', self)
self.constructionID = loadedConstructionObject.info['id']
self.parentScreen = parentScreenObj
self.constructionObj = loadedConstructionObject
self.db = QApplication.instance().database
self.subConstructions_db_table = f"{self.constructionObj.info['serial_number']}_SubConstructions"
self.releaseConstructionBtn.hide()
self.selected = False
self.assignInfoToWidgets()
def mousePressEvent(self, a0: QtGui.QMouseEvent) -> None:
super(ConstructionListItemPageVersion, self).mousePressEvent(a0)
if not self.selected and self.constructionObj.released:
# If construction has been released mark it as clicked and released (Green)
self.selected = True
self.mainFrame.setStyleSheet("#mainFrame{border-width: 3px;"
"border-style: solid;"
"border-color: rgb(30, 210, 80);}")
elif not self.selected and not self.constructionObj.released:
# If construction hasn't been released mark it as clicked but not released (Orange)
self.mainFrame.setStyleSheet("#mainFrame{border-width: 3px;"
"border-style: solid;"
"border-color: rgb(255, 150, 0);}")
self.selected = True
# Has to be at the end in order to call the connected function after run of above code
self.clicked.emit(self.constructionObj)
def assignInfoToWidgets(self):
self.constructionTag.setText(self.constructionObj.info["tag"])
self.constructionName.setText(self.constructionObj.info['name'])
self.constructionPicture.setPixmap(
self.constructionObj.picture.scaledToHeight(120, mode=Qt.Qt.SmoothTransformation))
self.seriesSize.setText(self.constructionObj.info['serial_number'])
self.clientLbl.setText(self.constructionObj.info['owner'])
in_preparation_style = 'color: rgb(255, 255, 255);' \
'font: 75 bold 9pt "Arial";' \
'background-color: rgb(250,150,0);' \
'border-radius: 10px;'
released_style = 'color: rgb(255,255,255);' \
'font: 75 bold 9pt "Arial";' \
'text-decoration: underline;' \
'background-color: rgb(30, 210, 80);' \
'border-radius: 10px;'
(self.stateLbl.setText('In preparation'), self.stateLbl.setStyleSheet(in_preparation_style)) if not \
self.db.is_table(f'{self.constructionObj.info["serial_number"]}_welds') else \
(self.stateLbl.setText('Released at: 17 Dec 22'), self.stateLbl.setStyleSheet(released_style))
counter = len(self.constructionObj.subConstructions_df)
self.subsAmountLbl.setText(str(counter))
counter = len(self.constructionObj.modelWelds_df)
self.weldsAmountLbl.setText(str(counter))
def deselect(self):
print(f"{self} deselected")
self.mainFrame.setStyleSheet("#mainFrame{border-width: 0px;")
self.selected = False
self.deselected.emit(self.constructionObj)
if __name__ == "__main__":
from mainWindow import DekiDesktopApp, MainWindow
qApp = DekiDesktopApp(sys.argv)
mw = MainWindow()
mw.show()
try:
sys.exit(qApp.exec_())
except:
print("Exiting the App")
| krzysiek-droid/Deki-Desktop-App | DekiApp_pyqt5/Screens/mainWindow_Pages.py | mainWindow_Pages.py | py | 7,953 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QDialog",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "PyQt5.uic.loadUi",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "... |
74031724903 | #!/bin/python3
import glob
import os
import sys
import re
import mimetypes
import time
import shutil
import platform
from tkinter import *
from pathlib import Path
system = platform.platform()
if 'win' in system:
filedelimiter = "\\"
else:
opener = 'xdg-open'
filedelimiter = "/"
# glob, glob, glob if pred do action
def move(f, t):
os.renames(f, t)
def copy(f, t):
pathEntities = t.split(filedelimiter)
parent = filedelimiter.join(pathEntities[0:len(pathEntities) - 1])
try:
os.makedirs(parent)
except FileExistsError:
pass
shutil.copy(f, t)
def delete(f):
os.remove(f)
def prompt(f, preview = False):
# not cross platform
if preview:
os.system(opener + ' ' + f)
print(f + ": Are you sure? (y/n)", end = '')
reply = input().lower()
if reply == 'y':
return True
else:
return False
# returns pattern, cond, action
def parse(source):
if 'IF' not in source:
source = source.replace('DO', ' IF DO');
parts = list(re.findall('(.*)IF(.*)DO(.*)', source)[0])
for i in range(3):
parts[i] = parts[i].strip()
return parts
folder = os.getcwd()
def cmd(line):
paths = []
global folder
# change directory
if 'DO' not in line:
os.chdir(line)
folder = os.getcwd()
print(folder)
return
# parsing
pattern, predicate, action = parse(line)
if pattern == '':
pattern = '*'
pattern = folder + filedelimiter + pattern
if predicate == '':
predicate = 'True'
if action == '':
action = 'print(path)';
# macros
predicate = predicate.replace('KB', '* 1000')
predicate = predicate.replace('MB', '* 1000000')
predicate = predicate.replace('GB', '* 1000000000')
predicate = predicate.replace('MIN', '* 60')
predicate = predicate.replace('HR', '* 3600')
predicate = predicate.replace('DAY', '* 86400')
action = re.sub('^preview\s+print$', 'print(path) if prompt(path, True) else None', action)
action = re.sub('^alert\s+print$', 'print(path) if prompt(path) else None', action)
action = re.sub('^print$', 'print(path)', action)
action = re.sub('^preview\s+delete$', 'delete(path) if prompt(path, True) else None', action)
action = re.sub('^alert\s+delete$', 'delete(path) if prompt(path) else None', action)
action = re.sub('^delete$', 'delete(path)', action)
action = re.sub('^preview\s+move\s+to\s+(.*)$', 'move(path, \'\\1\' + filedelimiter + name) if prompt(path, True) else None', action)
action = re.sub('^alert\s+move\s+to\s+(.*)$', 'move(path, \'\\1\' + filedelimiter + name) if prompt(path) else None', action)
action = re.sub('^move\s+to\s+(.*)$', 'move(path, \'\\1\' + filedelimiter + name)', action)
action = re.sub('^preview\s+copy\s+to\s+(.*)$', 'copy(path, \'\\1\' + filedelimiter + name) if prompt(path, True) else None', action)
action = re.sub('^alert\s+copy\s+to\s+(.*)$', 'copy(path, \'\\1\' + filedelimiter + name) if prompt(path) else None', action)
action = re.sub('^copy\s+to\s+(.*)$', 'copy(path, \'\\1\' + filedelimiter + name)', action)
global path
for path in glob.glob(pattern, recursive = True):
if os.path.isfile(path):
stat = os.stat(path)
global size
size = stat.st_size # in bytes
global name
name = path.split(filedelimiter)[-1]
global parent
parent = path.split(filedelimiter)[-2]
global extension
extension = path.split('.')[-1]
global mimetype
mimetype = mimetypes.guess_type(path)[0]
if mimetype == None:
mimetype = ''
global accessed
accessed = time.time() - stat.st_atime
global modified
modified = time.time() - stat.st_mtime
if eval(predicate, globals()) == True:
paths.append(path)
exec(action)
return paths
def gui():
window = Tk()
window.title('Automatic file organizer')
window.resizable(False, False)
Label(window, text= 'Files', fg = 'white' ).grid(row=0, column=0)
fileName = Entry(window, width= 40, bg = 'black', fg = 'white')
fileName.grid(row=0, column=1, columnspan=4)
Label(window, text= 'Condition', fg = 'white' ).grid(row=1, column=0)
condition = Entry(window, width= 40, bg = 'black', fg = 'white')
condition.grid(row=1, column=1, columnspan=4)
Label(window, text= 'Destination (?)', fg = 'white' ).grid(row=2, column=0)
dest = Entry(window, width= 40, bg = 'black', fg = 'white' )
dest.grid(row=2, column=1, columnspan=4)
Button(window, text='MOVE', command=lambda : cmd(f"{fileName.get()} {'' if condition.get() == '' else 'IF'} {condition.get()} DO move to {dest.get()}")).grid(row=3, column=0)
Button(window, text='COPY', command=lambda : cmd(f"{fileName.get()} {'' if condition.get() == '' else 'IF'} {condition.get()} DO copy to {dest.get()}")).grid(row=3, column=1)
Button(window, text='TRASH', command=lambda : cmd(f"{fileName.get()} {'' if condition.get() == '' else 'IF'} {condition.get()} DO move to {Path.home()}/trash")).grid(row=3, column=3)
Button(window, text='DELETE', command=lambda : cmd(f"{fileName.get()} {'' if condition.get() == '' else 'IF'} {condition.get()} DO delete")).grid(row=3, column=4)
box = Listbox(window, width=70, height=32)
box.grid(row=4, columnspan=5)
def view():
box.delete(0, END)
for file in cmd(f'{fileName.get()} DO None'):
box.insert(END, file)
Button(window, text='VIEW', command=view).grid(row=3, column=0, columnspan=5)
window.mainloop()
# MAIN
if len(sys.argv) < 2:
command = ''
while(command != 'quit'):
if command != '':
cmd(command);
print('\x1b[1;33m' + os.getcwd() + '> ', end='');
print('\x1b[0;33m', end='');
command = input().strip();
print('\x1b[0m', end='');
elif sys.argv[1] == 'gui':
gui()
else:
with open(sys.argv[1]) as f:
for line in f:
cmd(line)
| Balajisrinivasan26/automatic_file_organizer | janitor.py | janitor.py | py | 6,163 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "platform.platform",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.renames",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "shutil.copy",
"line_numb... |
43884835663 | from models.vn_layers import *
import torch
import torch.nn as nn
from math import *
from models.kernelization import *
class TransformerEncoder(nn.Module):
def __init__(self, args):
super(TransformerEncoder, self).__init__()
self.args = args
self.heads = args.num_heads
self.num_features = args.num_features
self.channels_per_head = self.num_features // self.heads
self.ln_attn = VNLayerNorm(self.channels_per_head * self.heads)
self.out_linear = VNLinear(self.channels_per_head * self.heads,
self.channels_per_head * self.heads)
self.mlp = VNLinearAndLeakyReLU(self.heads * self.channels_per_head, args.hidden)
self.map_back = VNLinear(args.hidden, self.heads * self.channels_per_head)
self.ln_mlp = VNLayerNorm(self.heads * self.channels_per_head)
self.wq = VNLinear(self.num_features, self.channels_per_head * self.heads)
self.wk = VNLinear(self.num_features, self.channels_per_head * self.heads)
self.wv = VNLinear(self.num_features, self.channels_per_head * self.heads)
self.kernel = args.kernel
self.antithetic = args.antithetic
self.num_random = args.num_random
self.kernel_channel = self.channels_per_head * 3
if self.kernel:
if self.antithetic:
w = gaussian_orthogonal_random_matrix(nb_rows=self.num_random // 2, nb_columns=self.kernel_channel)
self.w = torch.cat([w, -w], dim=0).cuda()
else:
self.w = gaussian_orthogonal_random_matrix(nb_rows=self.num_random, nb_columns=self.kernel_channel)
def forward(self, x):
'''
q: query B, C, 3, N
k: key B, C, 3, N
v: value B, C, 3, N
'''
skip = x # skip is defined as input
B, C, _, N = x.shape
# q(k) --> B, C//H * H, 3, N
q = self.wq(x)
k = self.wk(x)
v = self.wv(x)
# --> B, H, N, C // H * 3
q = torch.stack(q.transpose(1, -1).split(self.channels_per_head, -1), 3) ## B N 3 C --> B N 3 H C/H
k = torch.stack(k.transpose(1, -1).split(self.channels_per_head, -1), 3)
v = torch.stack(v.transpose(1, -1).split(self.channels_per_head, -1), 3)
# v --> B, H, N, C//H * 3
q = q.permute(0, 3, 1, -1, 2)
k = k.permute(0, 3, 1, -1, 2)
v = v.permute(0, 3, 1, -1, 2)
q = q.flatten(-2)
k = k.flatten(-2)
# separated attention
# B, H, N, N
if self.kernel:
if self.training:
if self.antithetic:
w = gaussian_orthogonal_random_matrix(nb_rows=self.num_random // 2, nb_columns=self.kernel_channel)
w = torch.cat([w, -w], dim=0).cuda()
else:
w = gaussian_orthogonal_random_matrix(nb_rows=self.num_random, nb_columns=self.kernel_channel)
else:
w = self.w
k = softmax_kernel(data=k, projection_matrix=w, is_query=False)
q = softmax_kernel(data=q, projection_matrix=w, is_query=True)
out = compute_attn(q, k, v).contiguous()
else:
div = 1 / sqrt(q.shape[-1])
attn = torch.einsum('...nc, ...mc->...nm', q, k)
attn *= div
attn = attn.softmax(-1)
# B, H, N, C//H, 3 --> B, C//H * H, 3, N
out = torch.einsum('...nm, ...mcp->...ncp', attn, v) # B, H, N, C//H, 3
out = out.permute(0, -1, 2, 3, 1).contiguous()
out = out.view(B, -1, N, self.channels_per_head*self.heads) # B, 3, N, C//H * H
out = out.permute(0, -1, 1, 2)
out = self.out_linear(out)
# add and norm
out = self.ln_attn(out.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) # B, N, C, 3
out += skip
skip = out # B C, 3, N
# MLP
mid = self.mlp(out) # B, C, 3, N
out = self.map_back(mid) # B, C, 3, N
out = self.ln_mlp(out.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) # B, N, C, 3 --> B, C, 3, N
out += skip # B, C, 3, N
return out
| MagicSssak/VNPerformer | models/TransformerEncoder.py | TransformerEncoder.py | py | 4,149 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number"... |
70172687463 | from django.shortcuts import render
# allow us to redirect
from django.shortcuts import redirect
from django.shortcuts import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.template import RequestContext, loader
# import the User class in models.py
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.decorators import login_required
# import the auth.models User
from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate, logout
from WebApp.models import *
@login_required
def index(request):
print("in the index function")
context = {}
user = request.user
context['user'] = user
return render(request, 'WebApp/index.html', context)
# registration is normal route, and login is login is "django.contrib.views.login"
def registration(request):
errors = []
context = {}
if request.method == "GET":
return render(request, 'WebApp/register.html', context)
# add 'errors' attribute to the context
context['errors'] = errors
password1 = request.POST['password']
password2 = request.POST['password_confirmation']
if password1 != password2:
print("Passwords did not match.")
# error1 happens
errors.append("Passwords did not match.")
if len(User.objects.all().filter(username = request.POST['user_name'])) > 0:
print("Username is already taken.")
# error2 happens
errors.append("Username is already taken.")
if errors:
return render(request, 'WebApp/register.html', context)
# create a new user from the valid form data, using create_user function with 2 arguments, 'username' and 'password'
new_user = User.objects.create_user(username=request.POST['user_name'], password=request.POST['password'], first_name=request.POST['first_name'], last_name=request.POST['last_name'])
new_user.save()
# using 'authenticate' function
new_user = authenticate(username = request.POST['user_name'], password = request.POST['password'])
# using 'login' function
login(request, new_user)
# using 'redirect' function
return redirect(reverse('message'))
@login_required
def message(request):
print("in the message function.")
context = {}
user = request.user
context['user'] = user
return render(request, 'WebApp/message.html', context)
@login_required
def upload(request):
print("in the upload function.")
context = {}
user = request.user
context['user'] = user
return render(request, 'WebApp/upload.html', context)
@login_required
def preprocess(request):
print("in the preprocess function.")
context = {}
user = request.user
context['user'] = user
return render(request, 'WebApp/preprocessing.html', context)
@login_required
def visualization(request):
print("in the visualization function.")
context = {}
user = request.user
context['user'] = user
return render(request, 'WebApp/knnresult.html', context)
# def logout view
def my_logout(request):
logout(request)
return redirect(reverse('index'))
@login_required
def honeycell(request):
print("in the honeycell function")
context = {}
user = request.user
context['user'] = user
return render(request, 'WebApp/honeycell.html', context)
@login_required
def honeycomb(request):
print("in the honeycomb function")
context = {}
user = request.user
context['user'] = user
return render(request, 'WebApp/honeycomb.html', context)
@login_required
def analytics(request):
print("in the analytics function")
context = {}
user = request.user
context['user'] = user
return render(request, 'WebApp/analytics.html', context)
@login_required
def add_reporter(request):
print("in the add_reporter function.")
context = {}
context['user'] = request.user
errors = []
context['errors'] = errors
if request.method == "GET":
return render(request, 'WebApp/add_reporter.html', context)
else:
first_name = request.POST['first_name']
last_name = request.POST['last_name']
email = request.POST['email']
if len(Reporter.objects.filter(first_name=first_name)):
print("The first_name already exist.")
errors.append("The first_name already exist.")
context['first_name'] = first_name
context['last_name'] = last_name
context['email'] = email
return render(request, 'WebApp/add_reporter.html', context)
if len(Reporter.objects.filter(last_name=last_name)):
print("The last_name already exist.")
errors.append("The last_name already exist.")
context['first_name'] = first_name
context['last_name'] = last_name
context['email'] = email
return render(request, 'WebApp/add_reporter.html', context)
if len(Reporter.objects.filter(email=email)):
print("The email already exist.")
errors.append("The email already exist.")
context['first_name'] = first_name
context['last_name'] = last_name
context['email'] = email
return render(request, 'WebApp/add_reporter.html', context)
new_reporter_instance = Reporter(first_name=first_name,
last_name=last_name,
email=email)
new_reporter_instance.save()
print("new_reporter_instance already save.")
return render(request, 'WebApp/add_reporter.html', context)
from WebApp.forms import *
@login_required
def add_article(request):
print("in the add_article function.")
context = {}
context['user'] = request.user
errors = []
context['errors'] = errors
if request.method == "GET":
form = ArticleForm()
context['form'] = form
return render(request, 'WebApp/add_article.html', context)
else:
form = ArticleForm(request.POST, request.FILES)
context['form'] = form
if not form.is_valid():
print("The form is not valid.")
context['form'] = form
return render(request, 'WebApp/add_article.html', context)
if len(Article.objects.filter(reporter=form.clean_reporter(), headline=form.clean_headline())):
print("The headline for this reporter already exist.")
errors.append("The headline for this reporter already exist.")
return render(request, 'WebApp/add_article.html', context)
if len(Article.objects.filter(reporter=form.clean_reporter(), content=form.clean_content())):
print("The content for this reporter already exist.")
errors.append("The content for this reporter already exist.")
return render(request, 'WebApp/add_article.html', context)
if len(Article.objects.filter(reporter=form.clean_reporter(), pub_date=form.clean_pub_date())):
print("The pub_date for this reporter already exist.")
errors.append("The pub_date for this reporter already exist.")
return render(request, 'WebApp/add_article.html', context)
print("The form is valid.")
form.save()
print("Already save the form.")
return render(request, 'WebApp/add_article.html', {'user': request.user, 'form': ArticleForm()})
@login_required
def show_reporters(request):
print("in the function show_reporters.")
context = {}
context['user'] = request.user
reporters = Reporter.objects.all()
context['reporters'] = reporters
return render(request, 'WebApp/show_reporters.html', context)
@login_required
def show_articles(request):
print("in the function show_articles.")
context = {}
context['user'] = request.user
articles = Article.objects.all()
context['articles'] = articles
return render(request, 'WebApp/show_articles.html', context)
@login_required
def show_reporters_articles(request):
print("in the function show_reporters_articles")
context = {}
context['user'] = request.user
data = []
context['data'] = data
for each_reporter in Reporter.objects.all():
each_reporter_articles = {}
data.append(each_reporter_articles)
each_reporter_articles['reporter'] = each_reporter
each_reporter_articles['articles'] = Article.objects.filter(reporter=each_reporter)
each_reporter_articles['row_span'] = len(Article.objects.filter(reporter=each_reporter)) + 1
print("%" * 30)
print(data)
print("%" * 30)
return render(request, 'WebApp/show_reporters_articles.html', context) | luojianhe1992/Django_many_to_one-relation-test | Django many_to_one relation test/HoneyCell_django/WebApp/views.py | views.py | py | 8,790 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 36,
"usage_type": "call"
... |
37290486339 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^test_view/$', views.test_fun_1),
url(r'^contact_us/$', views.contact_me_view),
url(r'^register_test/$', views.register_test),
url(r'^test_progress_bar/$', views.test_progress_bar),
url(r'^test_choicefield/',views.test_choicefield),
url(r'^test_tasks/',views.test_tasks),
]
| bitapardaz/bitasync | code_test/urls.py | urls.py | py | 375 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.co... |
4363721495 | from django.shortcuts import render
from matplotlib import pylab
from pylob import *
def graph():
x=[1,2,3,4,5,6]
y=[5,2,6,7,2,7]
plot(x,y,linewidth=2)
xlabel('x axis')
ylabel('y axis')
title('sample graph')
grid(True)
pylab.show()
| sharmajyo/blogsite | blog/views.py | views.py | py | 242 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pylab.show",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 14,
"usage_type": "name"
}
] |
26834249137 | """
.. module:: Wairakei/Tauhara MT inversion and Temp. extrapolation
:synopsis: Forward and inversion ot MT using MCMC 1D constrain inversion.
Extrapolation from temperature profiles in wells.
Estimate Temperature distribution.
.. moduleauthor:: Alberto Ardid
University of Auckland
.. conventions::
:order in impedanze matrix [xx,xy,yx,yy]
: number of layer 3 (2 layers + half-space)
: z1 and z2 in MT object refer to thickness of two first layers
: z1 and z2 in results of MeB mcmc inversion refer to depth of the top and bottom boundaries of CC (second layer)
: cc clay cap
: distances in meters
: MeB methylene blue
: temperature in celcius
"""
# ==============================================================================
# Imports
# ==============================================================================
import numpy as np
import glob
from matplotlib import pyplot as plt
import traceback, os, sys, shutil
from multiprocessing import Pool
#from scipy.optimize import curve_fit
#import corner, emcee
import time
from lib_MT_station import *
from lib_Well import *
from lib_mcmc_MT_inv import *
from lib_mcmc_meb import *
from lib_sample_data import*
from Maping_functions import *
from misc_functios import *
from matplotlib.backends.backend_pdf import PdfPages
textsize = 15
matplotlib.rcParams.update({'font.size': textsize})
pale_orange_col = u'#ff7f0e'
pale_blue_col = u'#1f77b4'
pale_red_col = u'#EE6666'
# ==============================================================================
# ==============================================================================
if __name__ == "__main__":
## PC that the code will be be run ('ofiice', 'personalSuse', 'personalWin')
#pc = 'office'
#pc = 'personalMac'
pc = 'office_uc'
# ==============================================================================
## Set of data to work with
full_dataset = True # True always
# Profiles
prof_WRKNW6 = False
prof_WRKNW5 = False
array_WRKNW5_WRKNW6 = False
prof_WRK_EW_7 = False # PW_TM_AR
prof_WRK_SENW_8 = False # KS_OT_AR
prof_WT_NS_1 = False # KS_OT_AR
#
prof_TH_SENW_2 = False # KS_OT_AR
prof_NEMT2 = False
prof_THNW03 = False
prof_THNW04 = False
prof_THNW05 = False
#
# Filter has quality MT stations
filter_lowQ_data_MT = True
# Filter MeB wells with useless info (for prior)
filter_useless_MeB_well = False
## run with quality filter per well
filter_lowQ_data_well = True # need to be checked, not working: changing the orther of well obj list
## re model temp profiles from wells with lateral inflows ()
temp_prof_remodel_wells = False # re model base on '.'+os.sep+'corr_temp_bc'+os.sep+'RM_temp_prof.txt'
# Stations not modeled
sta_2_re_invert = False
# ==============================================================================
## Sections of the code tu run
set_up = True
mcmc_meb_inv = False
prior_MT_meb_read = False
mcmc_MT_inv = False
plot_2D_MT = False
plot_3D_MT = False
wells_temp_fit = False
sta_temp_est = False
files_paraview = False
# (0) Import data and create objects: MT from edi files and wells from spreadsheet files
if set_up:
#### Import data: MT from edi files and wells from spreadsheet files
######### MT data
if pc == 'office':
######### MT data
path_files = "D:\workflow_data\kk_full\*.edi" # Whole array
####### Temperature in wells data
path_wells_loc = "D:\Wairakei_Tauhara_data\Temp_wells\well_location_latlon.txt"
path_wells_temp = "D:\Wairakei_Tauhara_data\Temp_wells\well_depth_redDepth_temp.txt"
#path_wells_temp_date = os.sep+'Users'+os.sep+'macadmin'+os.sep+'Documents'+os.sep+'WT_MT_inv'+os.sep+'data'+os.sep+'Wairakei_Tauhara_data'+os.sep+'Temp_wells'+os.sep+'well_depth_redDepth_temp_date.txt'
path_wells_temp_date = "D:\Wairakei_Tauhara_data\Temp_wells\well_depth_redDepth_temp_date.txt"
# Column order: Well Depth [m] Interpreted Temperature [deg C] Reduced Level [m]
####### MeB data in wells
path_wells_meb = "D:\Wairakei_Tauhara_data\MeB_wells\MeB_data.txt"
#path_wells_meb = "D:\Wairakei_Tauhara_data\MeB_wells\MeB_data_sample.txt"
######### MT data
if pc == 'office_uc':
######### MT data
path_files = 'C:\\Users\\aar135\\data\\Wairakei_Tauhara_data\\MT_Survey\\EDI_Files\\*.edi' # Whole array
####### Temperature in wells data
path_wells_loc = "C:\\Users\\aar135\\data\\Wairakei_Tauhara_data\\Temp_wells\\well_location_latlon.txt"
path_wells_temp = "C:\\Users\\aar135\\data\\Wairakei_Tauhara_data\\Temp_wells\\well_depth_redDepth_temp.txt"
#path_wells_temp_date = os.sep+'Users'+os.sep+'macadmin'+os.sep+'Documents'+os.sep+'WT_MT_inv'+os.sep+'data'+os.sep+'Wairakei_Tauhara_data'+os.sep+'Temp_wells'+os.sep+'well_depth_redDepth_temp_date.txt'
path_wells_temp_date = "C:\\Users\\aar135\\data\\Wairakei_Tauhara_data\\Temp_wells\\well_depth_redDepth_temp_date.txt"
# Column order: Well Depth [m] Interpreted Temperature [deg C] Reduced Level [m]
####### MeB data in wells
path_wells_meb = "C:\\Users\\aar135\\data\\Wairakei_Tauhara_data\\MeB_wells\\MeB_data.txt"
#path_wells_meb = "D:\Wairakei_Tauhara_data\MeB_wells\MeB_data_sample.txt"
## Data paths for personal's pc SUSE (uncommend the one to use)
if pc == 'personalMac':
######### MT data
path_files = os.sep+'Users'+os.sep+'macadmin'+os.sep+'Documents'+os.sep+'WT_MT_inv'+os.sep+'data'+os.sep+'Wairakei_Tauhara_data'+os.sep+'MT_survey'+os.sep+'EDI_Files'+os.sep+'*.edi'
# Whole array
####### Temperature in wells data
path_wells_loc = os.sep+'Users'+os.sep+'macadmin'+os.sep+'Documents'+os.sep+'WT_MT_inv'+os.sep+'data'+os.sep+'Wairakei_Tauhara_data'+os.sep+'Temp_wells'+os.sep+'well_location_latlon.txt'
path_wells_temp = os.sep+'Users'+os.sep+'macadmin'+os.sep+'Documents'+os.sep+'WT_MT_inv'+os.sep+'data'+os.sep+'Wairakei_Tauhara_data'+os.sep+'Temp_wells'+os.sep+'well_depth_redDepth_temp_fixTH12_rmTHM24_fixWK404.txt'
#path_wells_temp_date = os.sep+'Users'+os.sep+'macadmin'+os.sep+'Documents'+os.sep+'WT_MT_inv'+os.sep+'data'+os.sep+'Wairakei_Tauhara_data'+os.sep+'Temp_wells'+os.sep+'well_depth_redDepth_temp_date.txt'
path_wells_temp_date = os.sep+'Users'+os.sep+'macadmin'+os.sep+'Documents'+os.sep+'WT_MT_inv'+os.sep+'data'+os.sep+'Wairakei_Tauhara_data'+os.sep+'Temp_wells'+os.sep+'well_depth_redDepth_temp_date_3_new_wells.txt'
####### MeB data in wells
path_wells_meb = os.sep+'Users'+os.sep+'macadmin'+os.sep+'Documents'+os.sep+'WT_MT_inv'+os.sep+'data'+os.sep+'Wairakei_Tauhara_data'+os.sep+'MeB_wells'+os.sep+'MeB_data.txt'
## Create a directory of the name of the files of the stations
pos_ast = path_files.find('*')
file_dir = glob.glob(path_files)
#########################################################################################
## Create station objects
# Defined lists of MT station
if full_dataset:
sta2work = [file_dir[i][pos_ast:-4] for i in range(len(file_dir))]
if prof_WRKNW5:
sta2work = ['WT039a','WT024a','WT030a','WT501a','WT502a','WT060a','WT071a', \
'WT068a','WT070b','WT223a','WT107a','WT111a']
#sta2work = ['WT111a']
if prof_WRK_EW_7:
sta2work = ['WT169a','WT008a','WT006a','WT015a','WT023a','WT333a','WT060a', \
'WT507a','WT103a','WT114a','WT140a','WT153b','WT172a','WT179a'] # 'WT505a','WT079a','WT148a'
if prof_WRK_SENW_8:
sta2work = ['WT225a','WT066a','WT329a','WT078a','WT091a','WT107a','WT117b',
'WT122a','WT130a','WT140a','WT152a','WT153b'] # ,'WT150b'
if prof_WT_NS_1:
sta2work = ['WT061a','WT063a','WT069b','WT513a','WT082a','WT098a','WT096a', \
'WT099a','WT119a','WT125a','WT128a','WT036a','WT308a','WT156a','WT028a',\
'WT086a','WT055a','WT300a'] # 'WT117b'
if prof_TH_SENW_2:
sta2work = ['WT192a','WT306a','WT149a','WT328a','WT323a','WT199a',
'WT156a','WT166a','WT168a','WT185a','WT040a','WT313a','WT202a',\
'WT197a'] # ,'WT150b', 'WT340a', 'WT307a',
#########################################################################################
## Loop over the file directory to collect the data, create station objects and fill them
station_objects = [] # list to be fill with station objects
count = 0
# remove bad quality stations from list 'sta2work' (based on inv_pars.txt)
if filter_lowQ_data_MT:
name_file = '.'+os.sep+'mcmc_inversions'+os.sep+'00_global_inversion'+os.sep+'inv_pars.txt'
BQ_sta = [x.split()[0][:-4] for x in open(name_file).readlines() if x[0]!='#' and x[-2] is '0']
sta2work = [x for x in sta2work if not x in BQ_sta]
#
for file_aux in file_dir:
if (file_aux[pos_ast:-4] in sta2work and file_aux[pos_ast:-4] != 'WT067a'):# incomplete station WT067a, no tipper
file = file_aux[pos_ast:] # name on the file
sta_obj = Station(file, count, path_files)
sta_obj.read_edi_file()
## correction in elevation for discrepancy between elev in wells and MT stations (temporal solition, to check)
sta_obj.elev = float(sta_obj.elev) - 42.
##
sta_obj.rotate_Z()
sta_obj.app_res_phase()
# import PT derotated data
sta_obj.read_PT_Z(pc = pc)
sta_obj.app_res_phase() # [self.rho_app, self.phase_deg, self.rho_app_er, self.phase_deg_er]
## Create station objects and fill them
station_objects.append(sta_obj)
count += 1
#########################################################################################
#########################################################################################
## Import wells data:
#wl_name, wl_prof_depth, wl_prof_depth_red, wl_prof_temp, dir_no_depth_red = \
# read_well_temperature(path_wells_temp_date)
wl_name, wl_prof_depth, wl_prof_depth_red, wl_prof_temp, dir_no_depth_red, wl_prof_date = \
read_well_temperature_date(path_wells_temp_date)
# # Note: dir_no_depth_red contain a list of wells with no information of reduced depth
# ## Recover location for wells from path_wells_loc
wells_location = read_well_location(path_wells_loc)
# # Note: wells_location = [[wl_name1,lat1,lon1,elev1],...] list of arrays
# ## Recover MeB data for wells from path_wells_meb
wl_name_meb, wl_prof_depth_meb, wl_prof_meb = read_well_meb(path_wells_meb)
# name exepctions:
wl_name_meb = wl_name_meb[32:]
#for i, wl_meb in enumerate(wl_name_meb):
# if wl_meb in ['WK409','WK123','WK124']: # wells with an A in temp profiles but not A in MeB profiles
# wl_name_meb[i] = wl_meb+'A'
#########################################################################################
## Create wells objects
# Defined lists of wells
if full_dataset:
wl2work = wl_name
# add to wl2work names of well with meb data and no temp
wls_meb_notemp = []
for wl_meb in wl_name_meb:
if wl_meb not in wl_name:
#wl2work.append(wl_meb)
wls_meb_notemp.append(wl_meb)
# list of wells with bad quality temperature, wells with bas quality temp data
wls_BQ_temp = []
try:
name_file = '.'+os.sep+'corr_temp_bc'+os.sep+'Q_temp_prof.txt'
except:
try:
name_file = '.'+os.sep+'corr_temp_bc'
except:
os.mkdir( '.'+os.sep+'corr_temp_bc')
BQ_wls = [x.split()[0] for x in open(name_file).readlines() if x[0]!='#' and x[-2] is '0']
[wls_BQ_temp.append(wl_bq) for wl_bq in BQ_wls]
#########################################################################################
# ## Loop over the wells to create objects and assing data attributes
wells_objects = [] # list to be fill with station objects
count = 0
count2 = 0
for wl in wl_name:
if wl in wl2work: # and wl != 'THM24':
# create well object
wl_obj = Wells(wl, count)
# Search for location of the well and add to attributes
for i in range(len(wells_location)):
wl_name = wells_location[i][0]
if wl_obj.name == wl_name:
wl_obj.lat_dec = wells_location[i][2]
wl_obj.lon_dec = wells_location[i][1]
wl_obj.elev = wells_location[i][3]
# check if well have meb data and no temp data
#if wl in wls_meb_notemp:
# wl_obj.no_temp = True
# if not wl_obj.no_temp:
## load data attributes
## filter the data to the most recent one (well has overlap data cooresponding to reintepretations)
filter_by_date = True
filter_by_temp = False
if filter_by_date:
#year = max(wl_prof_date[count2]) # last year of interpretation
wl_prof_date[count2].sort() # last year of interpretation
idx_year = [i for i, x in enumerate(wl_prof_date[count2]) if x == wl_prof_date[count2][-1]] # great
if len(idx_year) < 2:
idx_year = [i for i, x in enumerate(wl_prof_date[count2]) if (x == wl_prof_date[count2][-1] or x == wl_prof_date[count2][-2])] # great
# condition for data in part. wells
if wl == 'WK047':
idx_year = [0,2,-1]
if wl == 'WK028':
idx_year = [-1]
if wl == 'WK401':
idx_year = [i for i, x in enumerate(wl_prof_date[count2])]
if wl == 'WK045':
idx_year = [i for i, x in enumerate(wl_prof_date[count2])]
if wl == 'WK005':
wdata = [1,2]
idx_year = [i for i, x in enumerate(wl_prof_date[count2]) if i not in wdata]
if wl == 'TH12':
idx_year = [i for i, x in enumerate(wl_prof_date[count2]) if x is not '2016']
if wl == 'WK219':
idx_year = [i for i, x in enumerate(wl_prof_date[count2]) if i != 5]
# if wl == 'WK684':
# idx_year = [i for i in idx_year if i != 3]
wl_obj.depth = [wl_prof_depth[count2][i] for i in idx_year]
wl_obj.red_depth = [wl_prof_depth_red[count2][i] for i in idx_year]
wl_obj.temp_prof_true = [wl_prof_temp[count2][i] for i in idx_year]
elif filter_by_temp:
pass
else:
wl_obj.depth = wl_prof_depth[count2]
wl_obj.red_depth = wl_prof_depth_red[count2]
wl_obj.temp_prof_true = wl_prof_temp[count2]
wl_obj.depth_raw = wl_prof_depth[count2]
wl_obj.red_depth_raw = wl_prof_depth_red[count2]
wl_obj.temp_prof_true_raw = wl_prof_temp[count2]
# check if measure points are too close
# find indexes of repeat values in red_depth and create vectors with no repetitions (ex. wel WK401)
wl_obj.red_depth, rep_idx= np.unique(wl_obj.red_depth, return_index = True)
temp_aux = [wl_obj.temp_prof_true[i] for i in rep_idx]
wl_obj.temp_prof_true = temp_aux
## add a initial point to temp (15°C) profile at 0 depth (elevation of the well)
if abs(wl_obj.red_depth[-1] - wl_obj.elev) > 10.: #[m]
wl_obj.red_depth = np.append(wl_obj.red_depth, wl_obj.elev)
if wl_obj.temp_prof_true[-1] < 10.:
wl_obj.temp_prof_true = np.append(wl_obj.temp_prof_true, wl_obj.temp_prof_true[-1] - 5.)
else:
wl_obj.temp_prof_true = np.append(wl_obj.temp_prof_true, 10.0)
## sort depth and temp based on depth (from max to min)
wl_obj.red_depth, wl_obj.temp_prof_true = zip(*sorted(zip(wl_obj.red_depth, wl_obj.temp_prof_true), reverse = True))
## resample .temp_prof_true and add to attribute prof_NEMT2 .temp_prof_rs
## method of interpolation : Cubic spline interpolation
## inverse order: wl_obj.red_depth start at the higuer value (elev)
xi = np.asarray(wl_obj.red_depth)
yi = np.asarray(wl_obj.temp_prof_true)
N_rs = 500 # number of resample points data
xj = np.linspace(xi[0],xi[-1],N_rs)
yj = cubic_spline_interpolation(xi,yi,xj, rev = True)
# add attributes
wl_obj.red_depth_rs = xj
wl_obj.temp_prof_rs = yj
# check if remodel is need fro temperature profile in the well
# it removes some points defore the cubic spline interpolation (-> resample profile)
if temp_prof_remodel_wells:
# read list_rm = RM_temp_prof.txt and make a copy of red_depth and temp_prof_true
with open('.'+os.sep+'corr_temp_bc'+os.sep+'RM_temp_prof.txt') as p:
next(p) # skip header
for line in p:
line = line.strip('\n')
currentline = line.split('\t')
# check is the wl match with any of the wells in list_rm[0]
if currentline[0] == wl:
# if it match -> remove data points from red_depth2 and temp_prof_true2 between list_rm[1] and list_rm[2]
# depth_from
val, idx_from = find_nearest(wl_obj.elev - np.asarray(wl_obj.red_depth), float(currentline[1]))
# depth_to
val, idx_to = find_nearest(wl_obj.elev - np.asarray(wl_obj.red_depth), float(currentline[2]))
# aux list red_depth2
red_depth2 = wl_obj.red_depth[:idx_from] + wl_obj.red_depth[idx_to:]
temp_prof_true2 = wl_obj.temp_prof_true[:idx_from] + wl_obj.temp_prof_true[idx_to:]
# perform the SCI on red_depth2 and temp_prof_true2
xi = np.asarray(red_depth2)
yi = np.asarray(temp_prof_true2)
N_rs = 500 # number of resample points data
xj = np.linspace(xi[0],xi[-1],N_rs)
yj = cubic_spline_interpolation(xi,yi,xj, rev = True)
# add attributes
wl_obj.red_depth_rs = xj
wl_obj.temp_prof_rs = yj
# save wl_obj.red_depth_rs and wl_obj.temp_prof_rs
else:
pass
# if wl == 'TH20':
# f1 = plt.figure(figsize=[9.5,7.5])
# ax1 = plt.axes([0.15,0.15,0.75,0.75])
# ax1.plot(wl_obj.temp_prof_rs,-1*(wl_obj.elev - wl_obj.red_depth_rs), 'r-')
# ax1.plot(wl_obj.temp_prof_true,-1*(wl_obj.elev - np.asarray(wl_obj.red_depth)),'b*')
# plt.grid()
# plt.show()
if filter_lowQ_data_well:
if wl_obj.name in wls_BQ_temp:
wl_obj.no_temp = True
## add well object to directory of well objects
wells_objects.append(wl_obj)
count += 1
#if not wl_obj.no_temp:
count2 +=1
# create well objects of wells with meb data and no temp data
for wl in wls_meb_notemp:
# create well object
wl_obj = Wells(wl, count)
# Search for location of the well and add to attributes
for i in range(len(wells_location)):
wl_name = wells_location[i][0]
if wl_obj.name == wl_name:
wl_obj.lat_dec = wells_location[i][2]
wl_obj.lon_dec = wells_location[i][1]
wl_obj.elev = wells_location[i][3]
wells_objects.append(wl_obj)
count += 1
# #if filter_lowQ_data_well:
# if True:
# name_file = '.'+os.sep+'corr_temp_bc'+os.sep+'Q_temp_prof.txt'
# BQ_wls = [x.split()[0] for x in open(name_file).readlines() if x[0]!='#' and x[-2] is '0']
# wl2work = [x for x in wl2work if not x in BQ_wls]
## Loop wells_objects (list) to assing data attributes from MeB files
# list of wells with MeB (names)
if filter_useless_MeB_well:
# extract meb mcmc results from file
useless_MeB_well = []
with open('.'+os.sep+'mcmc_meb'+os.sep+'00_global_inversion'+os.sep+'wls_meb_Q.txt') as p:
next(p) # skip header
for line in p:
line = line.strip('\n')
currentline = line.split(",")
if int(currentline[3]) == 0:
useless_MeB_well.append(str(currentline[0]))
wells_meb = []
count_meb_wl = 0
for wl in wells_objects:
if wl.name in wl_name_meb:
idx = wl_name_meb.index(wl.name)
wl.meb = True
wl.meb_prof = wl_prof_meb[idx]
wl.meb_depth = wl_prof_depth_meb[idx]
count_meb_wl+=1
#wells_meb.append(wl.name)
try:
if wl.name in useless_MeB_well:
wl.meb = False
except:
pass
## create folder structure
if True:
try:
os.mkdir('.'+os.sep+'corr_temp_bc'+os.sep+'00_global')
except:
pass
for wl in wells_objects:
try:
os.mkdir('.'+os.sep+'corr_temp_bc'+os.sep+wl.name)
except:
pass
# (1) Run MCMC for MeB priors
if mcmc_meb_inv:
pp = PdfPages('fit.pdf')
start_time = time.time()
count = 1
temp_full_list_z1 = []
temp_full_list_z2 = []
print("(1) Run MCMC for MeB priors")
for wl in wells_objects:
if wl.meb:
if count >= 0:
#if wl.name == 'WKM15':
mes_err = 2.
## filter meb logs: when values are higher than 5 % => set it to 5 % (in order to avoid control of this points)
fiter_meb = True # fix this per station criteria
if fiter_meb:
filt_val= 7.
wl.meb_prof = [filt_val if ele > filt_val else ele for ele in wl.meb_prof]
print(wl.name + ': {:}/{:}'.format(count, count_meb_wl))
mcmc_wl = mcmc_meb(wl, norm = 2., scale = 'lin', mes_err = mes_err, walk_jump=3000)
mcmc_wl.run_mcmc()
mcmc_wl.plot_results_mcmc()
#
if wl.no_temp:
f = mcmc_wl.sample_post(exp_fig = True, plot_hist_bounds = True, plot_fit_temp = False) # Figure with fit to be add in pdf pp
else:
f = mcmc_wl.sample_post(exp_fig = True, plot_hist_bounds = True, plot_fit_temp = False, wl_obj = wl, \
temp_full_list_z1 = temp_full_list_z1, temp_full_list_z2 = temp_full_list_z2) # Figure with fit to be add in pdf pp
#f = mcmc_wl.sample_post_temp(exp_fig = True) # Figure with fit to be add in pdf
pp.savefig(f)
## calculate estimate parameters (percentiels)
mcmc_wl.model_pars_est()
count += 1
## save lists: temp_full_list_z1, temp_full_list_z2
#with open('corr_z1_z1_temp_glob.txt', 'w') as f:
# for f1, f2 in zip(temp_full_list_z1, temp_full_list_z2):
# print(f1, f2, file=f)
#shutil.move('corr_z1_z1_temp_glob.txt','.'+os.sep+'mcmc_meb'+os.sep+'00_global_inversion'+os.sep+'corr_cc_temp'+os.sep+'corr_z1_z1_temp_glob.txt')
## enlapsed time for the inversion (every station in station_objects)
enlap_time = time.time() - start_time # enlapsed time
## print time consumed
print("Time consumed:\t{:.1f} min".format(enlap_time/60))
pp.close()
# move figure fit to global results folder
shutil.move('fit.pdf','.'+os.sep+'mcmc_meb'+os.sep+'00_global_inversion'+os.sep+'00_fit.pdf')
# print histogram of temperatures of z1 and z2 for the whole net
#g = plot_fit_temp_full(temp_full_list_z1,temp_full_list_z2)
#g = hist_z1_z2_temp_full()
#g.savefig('.'+os.sep+'mcmc_meb'+os.sep+'00_global_inversion'+os.sep+'01_temp_z1_z2_full_net.png') # save the figure to file
#plt.close(g) # close the figure
# (2) Construct priors for MT stations
if prior_MT_meb_read:
# attribute in meb wells for path to mcmc results
for wl in wells_objects:
if wl.meb:
wl.path_mcmc_meb = '.'+os.sep+str('mcmc_meb')+os.sep+wl.name
# Calculate prior values for boundaries of the cc in station
# (prior consist of mean and std for parameter, calculate as weighted(distance) average from nearest wells)
# Function assign results as attributes for MT stations in station_objects (list)
calc_prior_meb(station_objects, wells_objects, slp = 4*10., quadrant = False) # calc prior at MT stations position
# plot surface of prior
if False:
if False: # by Delanuay triangulation
path_base_image = '.'+os.sep+'base_map_img'+os.sep+'WT_area_gearth_hd.jpg'
ext_file = [175.934859, 176.226398, -38.722805, -38.567571]
x_lim = None #[176.0,176.1]
y_lim = None #[-38.68,-38.58]
path_q_wells = '.'+os.sep+'mcmc_meb'+os.sep+'00_global_inversion'+os.sep+'corr_cc_temp'+os.sep+'Q_meb_inv_results.txt'
# Figure: general
file_name = 'trig_meb_prior_wells_WT'
triangulation_meb_results(station_objects, wells_objects, path_base_image = path_base_image, xlim = x_lim, ylim = y_lim, ext_img = ext_file,\
file_name = file_name, format = 'png', filter_wells_Q = path_q_wells)
# Figure: z1_mean
file_name = 'trig_meb_prior_wells_WT_z1_mean'
triangulation_meb_results(station_objects, wells_objects, path_base_image = path_base_image, xlim = x_lim, ylim = y_lim, ext_img = ext_file,\
file_name = file_name, format = 'png', value = 'z1_mean', vmin=50, vmax=900, filter_wells_Q = path_q_wells)
# Figure: z2_mean
file_name = 'trig_meb_prior_wells_WT_z2_mean'
triangulation_meb_results(station_objects, wells_objects, path_base_image = path_base_image, xlim = x_lim, ylim = y_lim, ext_img = ext_file,\
file_name = file_name, format = 'png', value = 'z2_mean', vmin=50, vmax=900, filter_wells_Q = path_q_wells)
# Figure: z1_std
file_name = 'trig_meb_prior_wells_WT_z1_std'
triangulation_meb_results(station_objects, wells_objects, path_base_image = path_base_image, xlim = x_lim, ylim = y_lim, ext_img = ext_file,\
file_name = file_name, format = 'png', value = 'z1_std', filter_wells_Q = path_q_wells, vmin=50, vmax=900)
# Figure: z2_std
file_name = 'trig_meb_prior_wells_WT_z2_std'
triangulation_meb_results(station_objects, wells_objects, path_base_image = path_base_image, xlim = x_lim, ylim = y_lim, ext_img = ext_file,\
file_name = file_name, format = 'png', value = 'z2_std', filter_wells_Q = path_q_wells, vmin=50, vmax=900)
if True: # by gridding surface
# define region to grid
coords = [175.97,176.178,-38.69,-38.59] # [min lon, max lon, min lat, max lat]
coords = [175.99,176.178,-38.69,-38.59] # [min lon, max lon, min lat, max lat]
# fn. for griding and calculate prior => print .txt with [lon, lat, mean_z1, std_z1, mean_z2, std_z2]
file_name = 'grid_meb_prior'
path_output = '.'+os.sep+'plain_view_plots'+os.sep+'meb_prior'
try:
os.mkdir('.'+os.sep+'plain_view_plots'+os.sep+'meb_prior')
except:
pass
##
# image background
# image 1
path_base_image = '.'+os.sep+'base_map_img'+os.sep+'WT_area_gearth_hd.jpg'
ext_file = [175.934859, 176.226398, -38.722805, -38.567571]
# image 2
#path_base_image = '.'+os.sep+'base_map_img'+os.sep+'WT_area_gearth_hd_3.jpg'
#ext_file = [175.781956, 176.408620, -38.802528, -38.528097]
#
x_lim = [175.95,176.21]
y_lim = None #[-38.68,-38.58]
#x_lim = [175.99,176.21]
#y_lim = [-38.75,-38.58]
# call function
grid_meb_prior(wells_objects, coords = coords, n_points = 20, slp = 4*10., \
file_name = file_name, path_output = path_output,plot = True, \
path_base_image = path_base_image, ext_img = ext_file, \
xlim = x_lim, ylim = y_lim, cont_plot = True, scat_plot = True)
# (3) Run MCMC inversion for each staion, obtaning 1D 3 layer res. model
# Sample posterior, construct uncertain resistivity distribution and create result plots
if mcmc_MT_inv:
## create pdf file to save the fit results for the whole inversion
pdf_fit = False
if pdf_fit:
pp = PdfPages('fit.pdf')
start_time_f = time.time()
prior_meb = False # if False -> None; extra condition inside the loop: if every app res value is > 10 ohm m, prior_meb False
prior_meb_weigth = 1.0
station_objects.sort(key=lambda x: x.ref, reverse=False)
# run inversion
if True:
for sta_obj in station_objects:
if sta_obj.ref < 216: # start at 0
#if sta_obj.name[:-4] != 'WT016a': #sta2work = ['WT122','WT130a','WT115a']
pass
else:
print('({:}/{:}) Running MCMC inversion:\t'.format(sta_obj.ref+1,len(station_objects))+sta_obj.name[:-4])
verbose = True
## range for the parameters
par_range = [[.01*1e2,2.*1e3],[.5*1e1,1.*1e3],[1.*1e1,1.*1e5],[1.*1e0,.5*1e1],[.5*1e1,1.*1e3]]
#par_range = [[.01*1e2,.5*1e3],[.5*1e1,.5*1e3],[1.*1e1,1.*1e3],[1.*1e0,1.*1e1],[1.*1e1,1.*1e3]]
#par_range = [[.01*1e2,2.*1e3],[.5*1e1,1.*1e3],[1.*1e1,1.*1e5],[1.*1e0,20.*1e1],[.5*1e1,1.*1e3]]
# error floor
error_max_per = [5.,2.5] # [10.,5.] [20.,10.]
## inv_dat: weighted data to invert and range of periods
## inv_dat = [1,1,1,1] # [appres zxy, phase zxy, appres zyx, phase zyx]
## range_p = [0.001,10.] # range of periods
if True: # import inversion parameters from file
name_file = '.'+os.sep+'mcmc_inversions'+os.sep+'00_global_inversion'+os.sep+'inv_pars.txt'
inv_pars = [x.split() for x in open(name_file).readlines() if x[0]!='#']
inv_pars_names = [x[0] for x in inv_pars]
idx = inv_pars_names.index(sta_obj.name)
# load pars
range_p = [float(inv_pars[idx][1]), float(inv_pars[idx][2])] # range of periods
if inv_pars[idx][3] is '2':
inv_dat = [1,1,1,1] # [appres zxy, phase zxy, appres zyx, phase zyx]
elif inv_pars[idx][3] is '1':
inv_dat = [0,0,1,1] # [appres zxy, phase zxy, appres zyx, phase zyx]
elif inv_pars[idx][3] is '0':
inv_dat = [1,1,0,0] # [appres zxy, phase zxy, appres zyx, phase zyx]
else:
# Default values (inv pars)
range_p = [0.001,10] # range of periods, default values
inv_dat = [1,1,1,1] # [appres zxy, phase zxy, appres zyx, phase zyx]
# fitting mode xy or yx:
fit_max_mode = False
try:
path_img = 'mcmc_inversions'+os.sep+sta_obj.name[:-4]
sta_obj.plot_noise(path_img = path_img)
except:
pass
#print('mean noise in app res XY: {:2.2f}'.format(np.mean(sta_obj.rho_app_er[1])))
#print('mean noise in phase XY: {:2.2f}'.format(np.mean(sta_obj.phase_deg_er[1])))
###
error_mean = False
if error_mean:
error_max_per = [1.,1.]
# set number of walkers and walker jumps
nwalkers = 40
walk_jump = 3000
####### condition for MeB prior
# if prior_meb:
# if all([sta_obj.rho_app[1][i] > 10. for i in range(len(sta_obj.rho_app[1])-10)]):
# prior_meb = False
# if all([sta_obj.rho_app[2][i] > 10. for i in range(len(sta_obj.rho_app[2])-10)]):
# prior_meb = False
# inversion pars. per station (manual)
if True:
if sta_obj.name[:-4] == 'WT024a': # station with static shift
error_max_per = [5.,2.5]
range_p = [0.005,100.] # range of periods
prior_meb = True
# for two layers:
#par_range = [[.01*1e2,2.*1e3],[0.*1e1,1.*1e0],[1.*1e1,1.*1e5],[.50*1e1,.51*1e1],[.5*1e1,1.*1e3]]
if sta_obj.name[:-4] == 'WT039a': # station with static shift
range_p = [0.001,100.] # range of periods
error_max_per = [5.,2.5]
inv_dat = [0,0,1,1]
if sta_obj.name[:-4] == 'WT030a': # station with static shift
inv_dat = [1,1,0,0]
range_p = [0.001,10.] # range of periods
if sta_obj.name[:-4] == 'WT060a': # station with static shift
range_p = [0.005,1.] # range of periods
inv_dat = [1,1,0,0]
par_range = [[.01*1e2,.5*1e3],[.5*1e1,1.*1e3],[1.*1e1,1.*1e5],[1.*1e0,.5*1e1],[.5*1e1,1.*1e3]]
#error_max_per = [20.,10.]
if sta_obj.name[:-4] == 'WT068a': # station with static shift
range_p = [0,5.] # range of periods
error_max_per = [20.,10.]
#inv_dat = [1,1,0,1]
if sta_obj.name[:-4] == 'WT070b': # station with static shift
range_p = [0,5.] # range of periods
if sta_obj.name[:-4] == 'WT071a': # station with static shift
range_p = [0,5.] # range of periods
range_p = [0.005,3.] # range of periods
error_max_per = [5.,2.5]
if sta_obj.name[:-4] == 'WT107a': # station with static shift
par_range = [[.01*1e2,.5*1e3],[.5*1e1,1.*1e3],[1.*1e1,1.*1e5],[1.*1e0,.5*1e1],[.5*1e1,1.*1e3]]
range_p = [0.001,5.] # range of periods
error_max_per = [5.,2.5]
if sta_obj.name[:-4] == 'WT111a': # station with static shift
range_p = [0.001, 5.] # range of periods
if sta_obj.name[:-4] == 'WT223a': # station with static shift
range_p = [0,10.] # range of periods
error_max_per = [20.,5.]
if sta_obj.name[:-4] == 'WT501a': # station with static shift
range_p = [0.005,5.] # range of periods
if sta_obj.name[:-4] == 'WT502a': # station with static shift
#range_p = [0,5.] # range of periods
par_range = [[.01*1e2,.5*1e3],[.5*1e1,1.*1e3],[1.*1e1,1.*1e5],[1.*1e0,.5*1e1],[.5*1e1,1.*1e3]]
range_p = [0.005,5.] # range of periods
if sta_obj.name[:-4] == 'WT003a': # station with static shift
error_max_per = [20.,10.]
#inv_dat = [1,0,1,0]
###### run inversion
## print relevant information
if verbose:
print('range of periods: [{:2.3f}, {:2.2f}] [s]'.format(range_p[0],range_p[1]))
print('inverted data: '+str(inv_dat))
## plot noise
try:
mcmc_sta = mcmc_inv(sta_obj, prior='uniform', inv_dat = inv_dat, prior_input = par_range, \
walk_jump = walk_jump, nwalkers = nwalkers, prior_meb = prior_meb, prior_meb_weigth = prior_meb_weigth,\
range_p = range_p, autocor_accpfrac = True, data_error = True, \
error_mean = error_mean, error_max_per=error_max_per)
except: # in case that inversion breaks due to low number of independet samples
try:
mcmc_sta = mcmc_inv(sta_obj, prior='uniform', inv_dat = inv_dat, prior_input = par_range, \
walk_jump = walk_jump+1000, nwalkers = nwalkers+10, prior_meb = prior_meb, prior_meb_weigth = prior_meb_weigth,\
range_p = range_p, autocor_accpfrac = True, data_error = True, \
error_mean = error_mean, error_max_per=error_max_per)
except:
pass
if error_max_per:
## plot noise
try:
name_file='noise_appres_phase_error_floor'
path_img = 'mcmc_inversions'+os.sep+sta_obj.name[:-4]
sta_obj.plot_noise(path_img = path_img, name_file = name_file)
except:
pass
if prior_meb:
if verbose:
#print(" wells for MeB prior: {} ".format(sta_obj.prior_meb_wl_names))
print("Near wells for MeB prior: ")
for sta in sta_obj.prior_meb_wl_names:
print(str(sta.name))
#print(" [[z1_mean,z1_std],[z2_mean,z2_std]] = {} \n".format(sta_obj.prior_meb))
print(" distances = {}".format(sta_obj.prior_meb_wl_dist))
print(" prior [z1_mean, std][z2_mean, std] = {} \n".format(sta_obj.prior_meb))
## run inversion
mcmc_sta.inv()
## plot results (save in .png)
if True: # plot results for full chain
mcmc_sta.plot_results_mcmc(chain_file = 'chain.dat', corner_plt = False, walker_plt = True)
#shutil.move(mcmc_sta.path_results+os.sep+'corner_plot.png', mcmc_sta.path_results+os.sep+'corner_plot_full.png')
shutil.move(mcmc_sta.path_results+os.sep+'walkers.png', mcmc_sta.path_results+os.sep+'walkers_full.png')
## sample posterior
#mcmc_sta.sample_post()
if pdf_fit:
f, g = mcmc_sta.sample_post(idt_sam = True, plot_fit = True, exp_fig = True, plot_model = True) # Figure with fit to be add in pdf (whole station)
else:
mcmc_sta.sample_post(idt_sam = True, plot_fit = True, rms = True, exp_fig = False, plot_model = True) # Figure with fit to be add in pdf (whole station)
#mcmc_sta.sample_post(idt_sam = True, plot_fit = True, exp_fig = False, plot_model = True) # Figure with fit to be add in pdf (whole station)
## plot results without burn-in section
mcmc_sta.plot_results_mcmc(chain_file = 'chain_sample_order.dat', corner_plt = True, walker_plt = False)
shutil.move(mcmc_sta.path_results+os.sep+'corner_plot.png', mcmc_sta.path_results+os.sep+'corner_plot_burn.png')
# save figures
if pdf_fit:
pp.savefig(g)
pp.savefig(f)
plt.close('all')
#plt.clf()
## calculate estimate parameters
mcmc_sta.model_pars_est()
## delete chain.dat
#os.remove('.'+os.sep+'mcmc_inversions'+os.sep+sta.name[:-4]+os.sep+'chain.dat')
# save rms stations
rms_appres_list = []
rms_phase_list = []
rms_file = open('.'+os.sep+'mcmc_inversions'+os.sep+'00_global_inversion'+os.sep+'rms_misfit.txt','w')
rms_file.write('Station RMS misfit for apparent resistivity and phase, based on chi-square misfit (Pearson, 1900)'+'\n')
for sta_obj in station_objects:
rms_sta = np.genfromtxt('.'+os.sep+'mcmc_inversions'+os.sep+sta_obj.name[:-4]+os.sep+'rms_misfit.txt',skip_header=1).T
rms_file.write(sta_obj.name[:-4]+'\t'+str(np.round(rms_sta[0],2))+'\t'+str(np.round(rms_sta[1],2))+'\n')
rms_appres_list.append(rms_sta[0])
rms_phase_list.append(rms_sta[1])
rms_file.write('\n')
rms_file.write('mean'+'\t'+str(np.mean(rms_sta[0]))+'\t'+str(np.mean(rms_sta[1]))+'\n')
rms_file.close()
## enlapsed time for the inversion (every station in station_objects)
enlap_time_f = time.time() - start_time_f # enlapsed time
## print time consumed
print("Time consumed:\t{:.1f} min".format(enlap_time_f/60))
if pdf_fit:
pp.close()
# move figure fit to global results folder
shutil.move('fit.pdf','.'+os.sep+'mcmc_inversions'+os.sep+'00_global_inversion'+os.sep+'00_fit.pdf')
# save fitting plot in folder '01_sta_model'
try:
os.mkdir('.'+os.sep+'mcmc_inversions'+os.sep+'01_sta_model')
except:
pass
for sta_obj in station_objects:
shutil.copy('.'+os.sep+'mcmc_inversions'+os.sep+sta_obj.name[:-4]+os.sep+'app_res_fit.png', '.'+os.sep+'mcmc_inversions'+os.sep+'01_sta_model'+os.sep+'app_res_fit_'+sta_obj.name[:-4]+'.png')
# (4) Plot 2D profile of unceratain boundaries z1 and z2 (results of mcmc MT inversion)
if plot_2D_MT:
print('(4) Plot 2D profile of uncertain boundaries z1 and z2 (results of mcmc MT inversion)')
# quality inversion pars. plot (acceptance ratio and autocorrelation time)
autocor_accpfrac = False
# load mcmc results and assign to attributes of pars to station attributes
load_sta_est_par(station_objects, autocor_accpfrac = autocor_accpfrac)
# Create figure of unceratain boundaries of the clay cap and move to mcmc_inversions folder
file_name = 'z1_z2_uncert'
#plot_2D_uncert_bound_cc(station_objects, pref_orient = 'EW', file_name = file_name) # width_ref = '30%' '60%' '90%',
prior_meb = False
if prior_meb:
if prof_WRK_EW_7:
plot_some_wells = ['WK681','WK122','WK315B','WKM15','WK321'] # 'WK314','WK682','WK123'
elif prof_WRK_SENW_8:
plot_some_wells = ['WK402','WK404','WK321','WK315B','WK318'] # 'WK308' ,'WK403'
#'WK308','WK304','WK317','WK318'
elif prof_TH_SENW_2:
plot_some_wells = ['THM15','THM16','THM14','THM19','TH13','TH18','TH12']
elif prof_WT_NS_1:
plot_some_wells = ['THM15','THM21','THM16','THM14','THM17','THM13','WK123','WKM14','WK124','WK122']
else:
plot_some_wells = False
else:
plot_some_wells = False
# for plotting lithology
litho_plot = False
if litho_plot:
if prof_WRK_EW_7:
plot_litho_wells = ['WK315B','WKM14','WKM15','WK681'] # 'WK315b','WKM14','WKM15','WK681'
plot_litho_wells = ['WK315B','WKM14','WKM15','WK681','WK263','WK124A','WK317'] # 'WK315b','WKM14','WKM15','WK681','WK318'
elif prof_WRK_SENW_8:
plot_litho_wells = ['WK402','WK404','WK317','WK315B'] # 'WK403'
plot_litho_wells = ['WK402','WK404','WK317','WK315B','WK318'] # 'WK403'
else:
plot_litho_wells = False
else:
plot_litho_wells = False
if False: # plot resisitvity boundary reference
if prof_WRK_EW_7:
# risk, 1984
path_rest_bound_inter = '.'+os.sep+'base_map_img'+os.sep+'extras'+os.sep+'mt_prof'+os.sep+'rb_1980_coord_for_WK7.txt'
# update, 2015, IN
#path_rest_bound_inter = '.'+os.sep+'base_map_img'+os.sep+'extras'+os.sep+'mt_prof'+os.sep+'rb_2015_IN_coord_for_WK7.txt'
# update, 2015, OUT
#path_rest_bound_inter = '.'+os.sep+'base_map_img'+os.sep+'extras'+os.sep+'mt_prof'+os.sep+'rb_2015_OUT_coord_for_WK7.txt'
elif prof_WRK_SENW_8:
path_rest_bound_inter = '.'+os.sep+'base_map_img'+os.sep+'extras'+os.sep+'mt_prof'+os.sep+'rb_1980_coord_for_WK8.txt'
# update, 2015, IN
#path_rest_bound_inter = '.'+os.sep+'base_map_img'+os.sep+'extras'+os.sep+'mt_prof'+os.sep+'rb_2015_IN_coord_for_WK8.txt'
# update, 2015, OUT
#path_rest_bound_inter = '.'+os.sep+'base_map_img'+os.sep+'extras'+os.sep+'mt_prof'+os.sep+'rb_2015_OUT_coord_for_WK8.txt'
else:
path_rest_bound_inter = False
# plot temperature contours
temp_count = True
if temp_count:
if prof_WRK_EW_7:
temp_iso = [100.,160.,180.,200.]
temp_count_wells = ['WK650','WK210','WK217','WK052','WK019','WK060','WK048','WK045','WK059','WK301'] # 'WK681','WKM14','WKM15','WK313', 'WK045'
position_label = None
elif prof_WRK_SENW_8:
temp_iso = [100.,140.,180.,200.]
#temp_count_wells = ['WK402','WK407','WK226','WK310','WK301'] # WK321,WK036 WK133
temp_count_wells = ['WK402','WK407','WK226','WK310','WK301']
position_label = 'mid'
else:
temp_count_wells = False
temp_iso = False
position_label = None
## thickness of second layer to be considerer 'no conductor'
mask_no_cc = 80.#112.
##
pref_orient = 'EW'
if prof_WT_NS_1:
pref_orient = 'NS'
if prof_WRK_EW_7:
title = 'Uncertain conductor boundaries: profile WK7'
else:
title = False
plot_2D_uncert_bound_cc_mult_env(station_objects, pref_orient = pref_orient, file_name = file_name, \
width_ref = '90%', prior_meb = prior_meb, wells_objects = wells_objects , plot_some_wells = plot_some_wells,\
mask_no_cc = mask_no_cc, rest_bound_ref = path_rest_bound_inter, plot_litho_wells = plot_litho_wells,\
temp_count_wells = temp_count_wells, temp_iso = temp_iso, position_label = position_label, title = title)
shutil.move(file_name+'.png','.'+os.sep+'mcmc_inversions'+os.sep+'00_global_inversion'+os.sep+file_name+'.png')
# plot autocorrelation time and acceptance factor
if autocor_accpfrac:
file_name = 'autocor_accpfrac'
plot_profile_autocor_accpfrac(station_objects, pref_orient = 'EW', file_name = file_name)
shutil.move(file_name+'.png','.'+os.sep+'mcmc_inversions'+os.sep+'00_global_inversion'+os.sep+file_name+'.png')
# plot profile of KL divergence
if False:
file_name = 'KL_div_prof'
plot_profile_KL_divergence(station_objects, wells_objects, pref_orient = 'EW', file_name = file_name)
shutil.move(file_name+'.png','.'+os.sep+'mcmc_inversions'+os.sep+'00_global_inversion'+os.sep+file_name+'.png')
## create text file for google earth, containing names of MT stations considered
for_google_earth(station_objects, name_file = '00_stations_4_google_earth.txt', type_obj = 'Station')
shutil.move('00_stations_4_google_earth.txt','.'+os.sep+'mcmc_inversions'+os.sep+'00_global_inversion' \
+os.sep+'00_stations_4_google_earth.txt')
# plot uncertaity in boundary depths
if True:
file_name = 'bound_uncert'
plot_bound_uncert(station_objects, file_name = file_name) #
shutil.move(file_name+'.png','.'+os.sep+'mcmc_inversions'+os.sep+'00_global_inversion'+os.sep+file_name+'.png')
# (4.1) Plot surface of uncertain boundaries z1 and z2 (results of mcmc MT inversion)
if plot_3D_MT:
print('(4.1) Plot surface of uncertain boundaries z1 and z2 (results of mcmc MT inversion)')
if False: # plot plain view with circles
##
ext_file = [175.948466, 176.260520, -38.743590, -38.574484]
x_lim = [175.948466, 176.260520]
y_lim = [-38.743590,-38.574484]
type_plot = 'scatter'
path_plots = '.'+os.sep+'plain_view_plots'# path to place the outputs
##
# for plot with rest bound background
path_base_image = '.'+os.sep+'base_map_img'+os.sep+'WT_res_map_gearth_2.jpg'
bound2plot = 'top' # top bound
file_name = 'interface_LRA_'+bound2plot+'_rest_bound'
plot_surface_cc_count(station_objects, wells_objects, file_name = file_name, bound2plot = bound2plot, type_plot = type_plot,format = 'png', \
path_base_image = path_base_image, alpha_img = 0.6, ext_img = ext_file, xlim = x_lim, ylim = y_lim, hist_pars = False, path_plots = path_plots)
bound2plot = 'bottom'
file_name = 'interface_LRA_'+bound2plot+'_rest_bound'
plot_surface_cc_count(station_objects, wells_objects, file_name = file_name, bound2plot = bound2plot, type_plot = type_plot,format = 'png', \
path_base_image = path_base_image, alpha_img = 0.6, ext_img = ext_file, xlim = x_lim, ylim = y_lim, hist_pars = False, path_plots = path_plots)
# for plot with topo background
path_base_image = '.'+os.sep+'base_map_img'+os.sep+'WT_area_gearth_hd_2.jpg'
bound2plot = 'top' # top bound
file_name = 'interface_LRA_'+bound2plot+'_topo'
plot_surface_cc_count(station_objects, wells_objects, file_name = file_name, bound2plot = bound2plot, type_plot = type_plot,format = 'png', \
path_base_image = path_base_image, alpha_img = 0.6, ext_img = ext_file, xlim = x_lim, ylim = y_lim, hist_pars = False, path_plots = path_plots)
bound2plot = 'bottom'
file_name = 'interface_LRA_'+bound2plot+'_topo'
plot_surface_cc_count(station_objects, wells_objects, file_name = file_name, bound2plot = bound2plot, type_plot = type_plot,format = 'png', \
path_base_image = path_base_image, alpha_img = 0.6, ext_img = ext_file, xlim = x_lim, ylim = y_lim, hist_pars = False, path_plots = path_plots)
if False: # plot plain view with countours
##
# define region to grid
grid = [175.935, 176.255,-38.77,-38.545] # [min lon, max lon, min lat, max lat]
# fn. for griding and calculate prior => print .txt with [lon, lat, mean_z1, std_z1, mean_z2, std_z2]
file_name = 'grid_MT_inv'
path_output = '.'+os.sep+'plain_view_plots'+os.sep+'MT_inv'
try:
os.mkdir('.'+os.sep+'plain_view_plots'+os.sep+'MT_inv')
except:
pass
##
# image background
#path_base_image = '.'+os.sep+'base_map_img'+os.sep+'WT_area_gearth_hd.jpg'
#ext_file = [175.934859, 176.226398, -38.722805, -38.567571]
#path_base_image = '.'+os.sep+'base_map_img'+os.sep+'WT_area_gearth_hd_2.jpg'
#ext_file = [175.948466, 176.260520, -38.743590, -38.574484]
path_base_image = '.'+os.sep+'base_map_img'+os.sep+'WT_area_gearth_hd_3.jpg'
ext_file = [175.781956, 176.408620, -38.802528, -38.528097]
x_lim = [175.9,176.3]
y_lim = None #[-38.68,-38.57]
#path topo
path_topo = '.'+os.sep+'base_map_img'+os.sep+'coords_elev'+os.sep+'Topography_zoom_WT_re_sample_vertices_LatLonDec.csv'
# call function to grid and plot
if True:
grid_MT_inv_rest(station_objects, coords = grid, n_points = 20, slp = 4*10., file_name = file_name, path_output = path_output,\
plot = True, path_base_image = path_base_image, ext_img = ext_file, xlim = x_lim, masl = False)
# call function to plot with topo
if True:
file_name = 'topo_MT_inv'
try:
os.mkdir('.'+os.sep+'plain_view_plots'+os.sep+'MT_inv'+os.sep+'Topo')
except:
pass
path_output = '.'+os.sep+'plain_view_plots'+os.sep+'MT_inv'+os.sep+'Topo'
path_topo = '.'+os.sep+'base_map_img'+os.sep+'coords_elev'+os.sep+'Topography_zoom_WT_re_sample_vertices_LatLonDec.csv'
topo_MT_inv_rest(station_objects, path_topo, slp = 4*10., file_name = file_name, path_output = path_output, \
plot = True, path_base_image = path_base_image, ext_img = ext_file, xlim = x_lim, masl = False)
if True: # plot plain vien as scatter plot (colorbar as depths)
## load z1 and z2 pars
# load mcmc results and assign to attributes of pars to station attributes
load_sta_est_par(station_objects)
file_name = 'MT_inv'
try:
os.mkdir('.'+os.sep+'plain_view_plots'+os.sep+'MT_inv'+os.sep+'Scatter')
except:
pass
path_output = '.'+os.sep+'plain_view_plots'+os.sep+'MT_inv'+os.sep+'Scatter'
# define region to grid
coords = [175.97,176.200,-38.74,-38.58] # [min lon, max lon, min lat, max lat]
# fn. for griding and calculate prior => print .txt with [lon, lat, mean_z1, std_z1, mean_z2, std_z2]
##
img_back_topo_ge = True
img_back_rest_bound = False
# image background: topo
if img_back_topo_ge:
path_base_image = '.'+os.sep+'base_map_img'+os.sep+'WT_area_gearth_hd_3.jpg'
ext_file = [175.781956, 176.408620, -38.802528, -38.528097]
# image background: rest_bound
if img_back_rest_bound:
path_base_image = '.'+os.sep+'base_map_img'+os.sep+'WT_res_map_gearth_2.jpg'
ext_file = [175.948466, 176.260520, -38.743590, -38.574484]
x_lim = [175.9,176.3]
y_lim = None #[-38.68,-38.57]
# call function
#if False: # contourf plot
# print('gridding temp at cond. bound')
# grid_temp_conductor_bound(wells_objects, coords = coords, n_points = 100, slp = 5., file_name = file_name, path_output = path_output,\
# plot = True, path_base_image = path_base_image, ext_img = ext_file, xlim = x_lim, masl = False)
# scatter plot of temps at conductor boundaries
if img_back_topo_ge: # scatter plot
x_lim = [175.95,176.23]#[175.98,176.22]
y_lim = [-38.78,-38.57]
WK_resbound_line = '.'+os.sep+'base_map_img'+os.sep+'shorelines_reservoirlines'+os.sep+'rest_bound_WK_50ohmm.dat'
taupo_lake_shoreline= '.'+os.sep+'base_map_img'+os.sep+'shorelines_reservoirlines'+os.sep+'shoreline_TaupoLake.dat'
scatter_MT_conductor_bound(station_objects, path_output = path_output, alpha_img = 0.6,\
path_base_image = path_base_image, ext_img = ext_file, xlim = x_lim, ylim = y_lim, \
WK_resbound_line = WK_resbound_line, taupo_lake_shoreline = taupo_lake_shoreline)
# (5) Estimated distribution of temperature profile in wells. Calculate 3-layer model in wells and alpha parameter for each well
if wells_temp_fit:
print('(5) Calculating beta in wells and fitting temperature profile')
## Calculate 3-layer model in wells. Fit temperature profiles and calculate beta for each layer.
# Calculate normal dist. pars. [mean, std] for layer boundaries (z1 znd z2) in well position.
# Function assign results as attributes for wells in wells_objects (list of objects).
## Note: to run this section prior_MT_meb_read == True
calc_layer_mod_quadrant(station_objects, wells_objects)
## loop over wells to fit temp. profiles ad calc. betas
## file to save plots of temp prof samples for every well
pp = PdfPages('Test_samples.pdf') # pdf to plot the meb profiles
for wl in wells_objects:
print('Well: {}'.format(wl.name))
# calculate Test and beta values
f = wl.temp_prof_est(plot_samples = True, ret_fig = True) # method of well object
pp.savefig(f)
#Test, beta, Tmin, Tmax, slopes = T_beta_est(well_obj.temp_profile[1], well_obj.temp_profile[0], Zmin, Zmax) #
pp.close()
shutil.move('Test_samples.pdf','.'+os.sep+'temp_prof_samples'+os.sep+'wells'+os.sep+'Test_samples.pdf')
# (6) Estimated temperature profile in station positions
if sta_temp_est:
print('(6) Estimate Temerature profile in MT stations')
for wl in wells_objects:
# read samples of betas and others from wells. Load attributes
wl.read_temp_prof_est_wells(beta_hist_corr = True)
# Calculate betas and other in MT station positions
calc_beta_sta_quadrant(station_objects, wells_objects)
## Construct temperature profiles in MT stations
## file to save plots of temp prof samples for every well
pp = PdfPages('Test_samples.pdf') # pdf to plot the meb profiles
for sta_obj in station_objects:
print(sta_obj.name[:-4])
# read samples of betas and others from wells. Load attributes
f = sta_obj.temp_prof_est(plot_samples = True, ret_fig = True, Ns = 1000)
perc = np.arange(15.,90.,5.)#np.arange(5.,100.,5.) # percentiels to calculate: [5% , 10%, ..., 95%]
isoth = [50,100,150,200,250]
sta_obj.uncert_isotherms_percentils(isotherms = isoth, percentiels = perc)
pp.savefig(f)
# calc
pp.close()
shutil.move('Test_samples.pdf','.'+os.sep+'temp_prof_samples'+os.sep+'MTstation'+os.sep+'Test_samples.pdf')
# plot 2D profile
if prof_WRKNW6 or prof_WRKNW5:
print('(6.1) Printing uncertain isotherms plot')
# note: isotherms = [] are strings coherent with value given in uncert_isotherms_percentils()
#isoth = ['50','100','150','200']#,'250']
isoth = ['50','100','200']
plot_2D_uncert_isotherms(station_objects, wells_objects, pref_orient = 'EW', file_name = 'isotherm_uncert',\
percentiels = perc, isotherms = isoth)
# (7) Files for Paraview
if files_paraview:
filter_no_cc = True # filter stations with z2 too thin (no inferred claycap)
# (0) Create folder
if not os.path.exists('.'+os.sep+str('paraview_files')):
os.mkdir('.'+os.sep+str('paraview_files'))
# (1) print topography file
if True:
f = open('.'+os.sep+str('paraview_files')+os.sep+'topo.csv','w')
f.write('station, lon_dec, lat_dec, elev\n')
for sta in station_objects:
z, l, x, y = project([sta.lon_dec, sta.lat_dec])
f.write(str(sta.name[:-4])+', '+str(x)+', '+str(y)+', '+str(sta.elev)+'\n')
f.close()
# (2) files with percentils surfaces
if True:
# crear archivos de percentiles
load_sta_est_par(station_objects)
# .csv for mean
f = open('.'+os.sep+str('paraview_files')+os.sep+'z1_z2_mean.csv','w')
f.write('station, lon_dec, lat_dec, z1, z2\n')
# .csv for mean
f0 = open('.'+os.sep+str('paraview_files')+os.sep+'z1_z2_mean_byrow.csv','w')
f0.write('station, lon_dec, lat_dec, z\n') # contains z1 and z2 mean as one collumn
# .csv for percentils
f10 = open('.'+os.sep+str('paraview_files')+os.sep+'z1_z2_10.csv','w')
f10.write('station, lon_dec, lat_dec, z1, z2\n')
f20 = open('.'+os.sep+str('paraview_files')+os.sep+'z1_z2_20.csv','w')
f20.write('station, lon_dec, lat_dec, z1, z2\n')
f40 = open('.'+os.sep+str('paraview_files')+os.sep+'z1_z2_40.csv','w')
f40.write('station, lon_dec, lat_dec, z1, z2\n')
f60 = open('.'+os.sep+str('paraview_files')+os.sep+'z1_z2_60.csv','w')
f60.write('station, lon_dec, lat_dec, z1, z2\n')
f80 = open('.'+os.sep+str('paraview_files')+os.sep+'z1_z2_80.csv','w')
f80.write('station, lon_dec, lat_dec, z1, z2\n')
f90 = open('.'+os.sep+str('paraview_files')+os.sep+'z1_z2_90.csv','w')
f90.write('station, lon_dec, lat_dec, z1, z2\n')
for sta in station_objects:
# filter stations with z2 too thin (no inferred claycap)
if filter_no_cc:
#print(sta.name)
#print(sta.z2_pars[0])
if sta.z2_pars[0] > 50.:
# mean
z, l, x, y = project([sta.lon_dec, sta.lat_dec])
f.write(str(sta.name[:-4])+', '+str(x)+', '+str(y)+', '+str((sta.elev - sta.z1_pars[0]))+', '+
str((sta.elev - (sta.z1_pars[0]+sta.z2_pars[0])))+'\n')
f0.write(str(sta.name[:-4])+', '+str(x)+', '+str(y)+', '+str((sta.elev - sta.z1_pars[0]))+'\n')
f0.write(str(sta.name[:-4])+', '+str(x)+', '+str(y)+', '+str((sta.elev - (sta.z1_pars[0]+sta.z2_pars[0])))+'\n')
# percentils
# [mean, std, med, [5%, 10%, 15%, ..., 85%, 90%, 95%]]
f10.write(str(sta.name[:-4])+', '+str(x)+', '+str(y)+', '+str((sta.elev - sta.z1_pars[3][1]))+', '+
str((sta.elev - (sta.z1_pars[0]+sta.z2_pars[3][1])))+'\n')
f20.write(str(sta.name[:-4])+', '+str(x)+', '+str(y)+', '+str((sta.elev - sta.z1_pars[3][3]))+', '+
str((sta.elev - (sta.z1_pars[0]+sta.z2_pars[3][3])))+'\n')
f40.write(str(sta.name[:-4])+', '+str(x)+', '+str(y)+', '+str((sta.elev - sta.z1_pars[3][7]))+', '+
str((sta.elev - (sta.z1_pars[0]+sta.z2_pars[3][7])))+'\n')
f60.write(str(sta.name[:-4])+', '+str(x)+', '+str(y)+', '+str((sta.elev - sta.z1_pars[3][-7]))+', '+
str((sta.elev - (sta.z1_pars[0]+sta.z2_pars[3][-7])))+'\n')
f80.write(str(sta.name[:-4])+', '+str(x)+', '+str(y)+', '+str((sta.elev - sta.z1_pars[3][-3]))+', '+
str((sta.elev - (sta.z1_pars[0]+sta.z2_pars[3][-3])))+'\n')
f90.write(str(sta.name[:-4])+', '+str(x)+', '+str(y)+', '+str((sta.elev - sta.z1_pars[3][-1]))+', '+
str((sta.elev - (sta.z1_pars[0]+sta.z2_pars[3][-1])))+'\n')
else:
# mean
z, l, x, y = project([sta.lon_dec, sta.lat_dec])
f.write(str(sta.name[:-4])+', '+str(x)+', '+str(y)+', '+str((sta.elev - sta.z1_pars[0]))+', '+
str((sta.elev - (sta.z1_pars[0]+sta.z2_pars[0])))+'\n')
# percentils
# [mean, std, med, [5%, 10%, 15%, ..., 85%, 90%, 95%]]
f10.write(str(sta.name[:-4])+', '+str(x)+', '+str(y)+', '+str((sta.elev - sta.z1_pars[3][1]))+', '+
str((sta.elev - (sta.z1_pars[0]+sta.z2_pars[3][1])))+'\n')
f20.write(str(sta.name[:-4])+', '+str(x)+', '+str(y)+', '+str((sta.elev - sta.z1_pars[3][3]))+', '+
str((sta.elev - (sta.z1_pars[0]+sta.z2_pars[3][3])))+'\n')
f40.write(str(sta.name[:-4])+', '+str(x)+', '+str(y)+', '+str((sta.elev - sta.z1_pars[3][7]))+', '+
str((sta.elev - (sta.z1_pars[0]+sta.z2_pars[3][7])))+'\n')
f60.write(str(sta.name[:-4])+', '+str(x)+', '+str(y)+', '+str((sta.elev - sta.z1_pars[3][-7]))+', '+
str((sta.elev - (sta.z1_pars[0]+sta.z2_pars[3][-7])))+'\n')
f80.write(str(sta.name[:-4])+', '+str(x)+', '+str(y)+', '+str((sta.elev - sta.z1_pars[3][-3]))+', '+
str((sta.elev - (sta.z1_pars[0]+sta.z2_pars[3][-3])))+'\n')
f90.write(str(sta.name[:-4])+', '+str(x)+', '+str(y)+', '+str((sta.elev - sta.z1_pars[3][-1]))+', '+
str((sta.elev - (sta.z1_pars[0]+sta.z2_pars[3][-1])))+'\n')
f.close()
f10.close()
f20.close()
f40.close()
f60.close()
f80.close()
f90.close()
#####################################################################################################################################################################
## EXTRAS that use list of objects
if True:
# PDF file with figure of inversion misfit (observe data vs. estatimated data)
if False:
if False: # option 1: print appres fit to pdf
from PIL import Image
imagelist = []
for sta_obj in station_objects:
pngfile = Image.open('.'+os.sep+'mcmc_inversions'+os.sep+sta_obj.name[:-4]+os.sep+'app_res_fit.png')
pngfile = pngfile.convert('RGB')
#pngfile = pngfile.resize(size = (500, 500))
imagelist.append(pngfile)
#print(imagelist)
pngfile.save('.'+os.sep+'mcmc_inversions'+os.sep+'fit.pdf', save_all=True, append_images=[imagelist[1],imagelist[3]])
# move
try:
shutil.move('.'+os.sep+'mcmc_inversions'+os.sep+'fit.pdf', '.'+os.sep+'mcmc_inversions'+os.sep+'01_bad_model'+os.sep+'fit.pdf')
except:
os.mkdir( '.'+os.sep+'mcmc_inversions'+os.sep+'01_bad_model')
shutil.move('.'+os.sep+'mcmc_inversions'+os.sep+'fit.pdf', '.'+os.sep+'mcmc_inversions'+os.sep+'01_bad_model'+os.sep+'fit.pdf')
# in evaluation
if True: # option 2: move appres fit to a folder
try:
os.mkdir('.'+os.sep+'mcmc_inversions'+os.sep+'01_sta_model')
except:
pass
for sta_obj in station_objects:
shutil.copy('.'+os.sep+'mcmc_inversions'+os.sep+sta_obj.name[:-4]+os.sep+'app_res_fit.png', '.'+os.sep+'mcmc_inversions'+os.sep+'01_sta_model'+os.sep+'app_res_fit_'+sta_obj.name[:-4]+'.png')
# delete chain.dat (text file with the whole markov chains) from station folders
if False:
for sta in station_objects:
try:
os.remove('.'+os.sep+'mcmc_inversions'+os.sep+sta.name[:-4]+os.sep+'chain.dat')
except:
pass
for wl in wells_objects:
if wl.meb:
try:
os.remove('.'+os.sep+'mcmc_meb'+os.sep+wl.name+os.sep+'chain.dat')
except:
pass
## create text file for google earth, containing names of MT stations considered
if False:
for_google_earth(station_objects, name_file = '00_stations_4_google_earth.txt', type_obj = 'Station')
#shutil.move('00_stations_4_google_earth.txt','.'+os.sep+'base_map_img'+os.sep+'00_stations_4_google_earth.txt')
shutil.move('00_stations_4_google_earth.txt','.'+os.sep+'mcmc_inversions'+os.sep+'02_bad_sta_map'+os.sep+'00_stations_4_google_earth.txt')
## create file with range of periods to invert for every station
if False:
name_file = '.'+os.sep+'mcmc_inversions'+os.sep+'00_global_inversion'+os.sep+'range_periods_inv.txt'
range_p_set = open(name_file,'w')
range_p_set.write('#'+' '+'station_name'+'\t'+'initial_period'+'\t'+'final_period'+'\t'+'mode to model (0:TE, 1:TM, 2:both)'+'\t'+'Quality (0:bad, 1:mid, 2:good)'+'\n')
range_p_def = [0.001,10.,2,2] # default range of periods for inversion
for sta in station_objects:
range_p_set.write(sta.name+'\t'+str(range_p_def[0])+'\t'+str(range_p_def[1])+'\n')
range_p_set.close()
## create list of stations to invert base on changes in file range_periods_inv.txt
if False:
name_file_in = '.'+os.sep+'mcmc_inversions'+os.sep+'00_global_inversion'+os.sep+'inv_pars.txt'
par_inv_def = [str(0.001),str(10),str(2),str(2)] # default
sta_re_inv = [x for x in open(name_file_in).readlines() if x[0]!='#']
sta_re_inv = [x.split() for x in sta_re_inv]
sta_re_inv = [x[0][:-4] for x in sta_re_inv if \
x[1] != par_inv_def[0] or \
x[:][2] != par_inv_def[1] or \
x[:][3] != par_inv_def[2] or \
x[:][4] != par_inv_def[3]]
print(sta_re_inv)
if False: # list of stations to re invert (bad quality or wrong modelling)
name_file_in = '.'+os.sep+'mcmc_inversions'+os.sep+'00_global_inversion'+os.sep+'inv_pars.txt'
sta_re_inv = [x.split() for x in open(name_file_in).readlines()[1:]]
sta_re_inv = [x[0][:-4] for x in sta_re_inv if x[4] is '0']
print(sta_re_inv)
if True: # histogram of MT inversion parameters for stations inverted
# Resistivity Boundary, Risk
path_rest_bound_WT = '.'+os.sep+'base_map_img'+os.sep+'shorelines_reservoirlines'+os.sep+'rest_bound_WK_50ohmm.dat'
# Resistivity Boundary, Mielke, OUT
path_rest_bound_WT = '.'+os.sep+'base_map_img'+os.sep+'shorelines_reservoirlines'+os.sep+'rest_bound_OUT_Mielke.txt'
#histogram_mcmc_MT_inv_results(station_objects, filt_in_count=path_rest_bound_WT, filt_out_count=path_rest_bound_WT, type_hist = 'overlap')
#histogram_mcmc_MT_inv_results(station_objects, filt_in_count=path_rest_bound_WT, filt_out_count=path_rest_bound_WT, type_hist = 'sidebyside')
histogram_mcmc_MT_inv_results_multisamples(station_objects, filt_in_count=path_rest_bound_WT)
if False: # histogram of MeB inversion parameters for wells
path_rest_bound_WT = '.'+os.sep+'base_map_img'+os.sep+'shorelines_reservoirlines'+os.sep+'rest_bound_WK_50ohmm.dat'
#histogram_mcmc_meb_inv_results(wells_objects, filt_in_count=path_rest_bound_WT, filt_out_count=path_rest_bound_WT, type_hist = 'overlap')
histogram_mcmc_meb_inv_results(wells_objects, filt_in_count=path_rest_bound_WT, filt_out_count=path_rest_bound_WT, type_hist = 'sidebyside')
if False: # .dat of latlon for of wells and MT stations
# mt
mt_loc = open('.'+os.sep+'base_map_img'+os.sep+'location_mt_wells'+os.sep+'location_MT_stations.dat','w')
for sta in station_objects:
mt_loc.write(str(sta.lon_dec)+','+str(sta.lat_dec)+'\n')
mt_loc.close()
# wells
wl_loc = open('.'+os.sep+'base_map_img'+os.sep+'location_mt_wells'+os.sep+'location_wls.dat','w')
wlmeb_loc = open('.'+os.sep+'base_map_img'+os.sep+'location_mt_wells'+os.sep+'location_wls_meb.dat','w')
for wl in wells_objects:
wl_loc.write(str(wl.lon_dec)+','+str(wl.lat_dec)+'\n')
if wl.meb:
wlmeb_loc.write(str(wl.lon_dec)+','+str(wl.lat_dec)+'\n')
wl_loc.close()
wlmeb_loc.close()
if False: # .dat with results meb inversion, mt inversion, and temp estimation at boundaries of conductor
# mcmc MeB results
if False:
wl_meb_results = open('.'+os.sep+'mcmc_meb'+os.sep+'00_global_inversion'+os.sep+'wl_meb_results.dat','w')
wl_meb_results.write('well_name'+','+'lon_dec'+','+'lat_dec'+','+'z1_mean'+','+'z1_std'+','+'z2_mean'+','+'z2_std'+'\n')
# name lat lon file
wls_loc = open('.'+os.sep+'mcmc_meb'+os.sep+'00_global_inversion'+os.sep+'wls_meb_loc.txt','w')
wls_loc.write('well_name'+','+'lon_dec'+','+'lat_dec'+'\n')
for wl in wells_objects:
if wl.meb:
# extract meb mcmc results from file
meb_mcmc_results = np.genfromtxt('.'+os.sep+'mcmc_meb'+os.sep+str(wl.name)+os.sep+"est_par.dat")
# values for mean a std for normal distribution representing the prior
wl.meb_z1_pars = [meb_mcmc_results[0,1], meb_mcmc_results[0,2]] # mean [1] z1 # median [3] z1
wl.meb_z2_pars = [meb_mcmc_results[1,1], meb_mcmc_results[1,2]] # mean [1] z1 # median [3] z1
# write results
wl_meb_results.write(str(wl.name)+','+str(wl.lon_dec)+','+str(wl.lat_dec)+','+str(wl.meb_z1_pars[0])\
+','+str(wl.meb_z1_pars[1])+','+str(wl.meb_z2_pars[0])+','+str(wl.meb_z2_pars[1])+'\n')
wls_loc.write(str(wl.name)+','+str(wl.lon_dec)+','+str(wl.lat_dec)+'\n')
wl_meb_results.close()
wls_loc.close()
# mcmc MT results
if True:
sta_mcmc_results = open('.'+os.sep+'mcmc_inversions'+os.sep+'00_global_inversion'+os.sep+'mt_sta_results.dat','w')
sta_mcmc_results.write('sta_name'+','+'lon_dec'+','+'lat_dec'+','+'z1_mean'+','+'z1_std'+','+'z2_mean'+','+'z2_std'+'\n')
for sta in station_objects:
if True: # filter stations inside resistivity boundary
lats_nzgm, lons_nzgm = np.genfromtxt('.'+os.sep+'base_map_img'+os.sep+'shorelines_reservoirlines'+os.sep+'rest_bound_WK_50ohmm.dat', skip_header=1, delimiter=',').T
# define poligon
poli_in = [[lons_nzgm[i],lats_nzgm[i]] for i in range(len(lons_nzgm))]
# check if centre is station is inside the polygon
val = ray_tracing_method(sta.lon_dec, sta.lat_dec, poli_in)
if val:
# extract meb mcmc results from file
mt_mcmc_results = np.genfromtxt('.'+os.sep+'mcmc_inversions'+os.sep+str(sta.name[:-4])+os.sep+"est_par.dat")
# values for mean a std for normal distribution representing the prior
sta.z1_pars = [mt_mcmc_results[0,1], mt_mcmc_results[0,2]] # mean [1] z1 # median [3] z1
sta.z2_pars = [mt_mcmc_results[1,1], mt_mcmc_results[1,2]] # mean [1] z1 # median [3] z1
# write results
sta_mcmc_results.write(str(sta.name[:-4])+','+str(sta.lon_dec)+','+str(sta.lat_dec)+','
+str(sta.z1_pars[0])+','+str(sta.z1_pars[1])+','+str(sta.z2_pars[0])+','+str(sta.z2_pars[1])+'\n')
else:
pass
# extract meb mcmc results from file
mt_mcmc_results = np.genfromtxt('.'+os.sep+'mcmc_inversions'+os.sep+str(sta.name[:-4])+os.sep+"est_par.dat")
# values for mean a std for normal distribution representing the prior
sta.z1_pars = [mt_mcmc_results[0,1], mt_mcmc_results[0,2]] # mean [1] z1 # median [3] z1
sta.z2_pars = [mt_mcmc_results[1,1], mt_mcmc_results[1,2]] # mean [1] z1 # median [3] z1
# write results
sta_mcmc_results.write(str(sta.name[:-4])+','+str(sta.lon_dec)+','+str(sta.lat_dec)+','
+str(sta.z1_pars[0])+','+str(sta.z1_pars[1])+','+str(sta.z2_pars[0])+','+str(sta.z2_pars[1])+'\n')
sta_mcmc_results.close()
# temp at z1 an z2 in wells and z1 and z2 at well positions
if False:
wl_temp_z1_z2 = open('.'+os.sep+'corr_temp_bc'+os.sep+'00_global'+os.sep+'wls_conductor_T1_T2.dat','w')
wl_temp_z1_z2.write('wl_name'+','+'lon_dec'+','+'lat_dec'+','+'T1_mean'+','+'T1_std'+','+'T2_mean'+','+'T2_std'+'\n')
for wl in wells_objects:
# extract meb mcmc results from file
try:
if not wl.no_temp:
wl_temp_results = np.genfromtxt('.'+os.sep+'corr_temp_bc'+os.sep+str(wl.name)+os.sep+"conductor_T1_T2.txt")
# values for mean a std for normal distribution representing the prior
wl.T1_pars = [wl_temp_results[0], wl_temp_results[1]] # mean [1] z1 # median [3] z1
wl.T2_pars = [wl_temp_results[2], wl_temp_results[3]] # mean [1] z1 # median [3] z1
# write results
wl_temp_z1_z2.write(str(wl.name)+','+str(wl.lon_dec)+','+str(wl.lat_dec)+','
+str(wl.T1_pars[0])+','+str(wl.T1_pars[1])+','+str(wl.T2_pars[0])+','+str(wl.T2_pars[1])+'\n')
except:
pass
wl_temp_z1_z2.close()
##### D1 and D2 at well location
wl_d1_d2 = open('.'+os.sep+'corr_temp_bc'+os.sep+'00_global'+os.sep+'wls_conductor_D1_D2.dat','w')
wl_d1_d2.write('wl_name'+','+'lon_dec'+','+'lat_dec'+','+'D1_mean(depth to top of cond.)'+','+'D1_std'+','+'D2_mean(depth to bottom of cond.)'+','+'D2_std'+'\n')
for wl in wells_objects:
# extract z1 and z2 results from file
try:
wl_z1_z2_results = np.genfromtxt('.'+os.sep+'corr_temp_bc'+os.sep+str(wl.name)+os.sep+"conductor_z1_z2.txt", skip_header=1)
# values for mean a std for normal distribution representing the prior
wl.z1_pars = [wl_z1_z2_results[0], wl_z1_z2_results[1]] # mean [1] z1 # median [3] z1
wl.z2_pars = [wl_z1_z2_results[2], wl_z1_z2_results[3]] # mean [1] z1 # median [3] z1
# write results
wl_d1_d2.write(str(wl.name)+','+str(wl.lon_dec)+','+str(wl.lat_dec)+','
+str(wl.z1_pars[0])+','+str(wl.z1_pars[1])+','+str(wl.z1_pars[0] + wl.z2_pars[0])+','+str(wl.z2_pars[1])+'\n')
except:
pass
wl_d1_d2.close()
##### D1, T1 and D2,T2 at well location
wl_d1_d2_T1_T2 = open('.'+os.sep+'corr_temp_bc'+os.sep+'00_global'+os.sep+'wls_conductor_D1_D2_T1_T2.dat','w')
wl_d1_d2_T1_T2.write('wl_name'+','+'lon_dec'+','+'lat_dec'+','+'D1_mean(depth to top of cond.)'+','+'D2_mean(depth to bottom of cond.)'+','+'T1_mean'+','+'T2_mean'+'\n')
for wl in wells_objects:
# extract z1 and z2 results from file
try:
if not wl.no_temp:
wl_z1_z2_results = np.genfromtxt('.'+os.sep+'corr_temp_bc'+os.sep+str(wl.name)+os.sep+"conductor_z1_z2.txt", skip_header=1)
wl_temp_results = np.genfromtxt('.'+os.sep+'corr_temp_bc'+os.sep+str(wl.name)+os.sep+"conductor_T1_T2.txt")
# values for mean a std for normal distribution representing the prior
wl.z1_pars = [wl_z1_z2_results[0], wl_z1_z2_results[1]] # mean [1] z1 # median [3] z1
wl.z2_pars = [wl_z1_z2_results[2], wl_z1_z2_results[3]] # mean [1] z1 # median [3] z1
wl.T1_pars = [wl_temp_results[0], wl_temp_results[1]] # mean [1] z1 # median [3] z1
wl.T2_pars = [wl_temp_results[2], wl_temp_results[3]] # mean [1] z1 # median [3] z1
# write results
wl_d1_d2_T1_T2.write(str(wl.name)+','+str(wl.lon_dec)+','+str(wl.lat_dec)+','+
str(wl.z1_pars[0])+','+str(wl.z1_pars[0] + wl.z2_pars[0])+','+
str(wl.T1_pars[0])+','+str(wl.T2_pars[0])+'\n')
except:
pass
wl_d1_d2_T1_T2.close()
##### Thermal Gradient, Thermal Conductivity and Heat Flux at well location
wl_TG_TC_HF = open('.'+os.sep+'corr_temp_bc'+os.sep+'00_global'+os.sep+'wls_conductor_TG_TC_HF.dat','w')
wl_TG_TC_HF.write('wl_name'+','+'lon_dec'+','+'lat_dec'+','+'TG(Thermal Gradient)[C/m]'+','+'TC(Thermal Conductivity)[W/mC]'+','+'HF(Heat Flux)[W/m2]'+'\n')
for wl in wells_objects:
# extract z1 and z2 results from file
try:
if not wl.no_temp:
wl_TG_TC_HF_results = np.genfromtxt('.'+os.sep+'corr_temp_bc'+os.sep+str(wl.name)+os.sep+"conductor_TG_TC_HF.txt", skip_header=1)
# values for mean a std for normal distribution representing the prior
wl.thermal_grad = wl_TG_TC_HF_results[0]
wl.thermal_cond = wl_TG_TC_HF_results[1]
wl.heat_flux = wl_TG_TC_HF_results[2]
# write results
wl_TG_TC_HF.write(str(wl.name)+','+str(wl.lon_dec)+','+str(wl.lat_dec)+','+
str(round(wl.thermal_grad,2))+','+str(round(wl.thermal_cond,1))+','+str(round(wl.heat_flux,2))+'\n')
except:
pass
wl_TG_TC_HF.close()
if False: # .txt with names, lon, lat of wells with lithology
# lito wells
path = '.'+os.sep+'wells_info'+os.sep+'well_names_with_litho.txt' # this is filled manually
wls_lito = []
with open(path) as p:
next(p)
for line in p:
line = line.strip('\n')
wls_lito.append(line)
# file
wls_with_litho = open('.'+os.sep+'base_map_img'+os.sep+'wells_lithology'+os.sep+'wls_with_litho.txt','w')
wls_with_litho.write('well_name'+','+'lon_dec'+','+'lat_dec'+'\n')
for wl in wells_objects:
if wl.name in wls_lito:
wl.litho = True
# write results
wls_with_litho.write(str(wl.name)+','+str(wl.lon_dec)+','+str(wl.lat_dec)+'\n')
wls_with_litho.close()
if False: # .dat of temp at fix depth in location of every well
#depth = 500. # [m] from surface
masl = 0
temp_at_masl = open('.'+os.sep+'base_map_img'+os.sep+'extras'+os.sep+'temps_at_'+str(masl)+'m_masl.dat','w')
temp_at_masl.write('well_name'+','+'lon_dec'+','+'lat_dec'+','+'tempC'+'\n')
i = 0
for wl in wells_objects:
if not wl.litho:
try:
depth, idx = find_nearest(wl.red_depth_rs, masl)
temp = wl.temp_prof_rs[idx]
temp_at_masl.write(str(wl.name)+','+str(wl.lon_dec)+','+str(wl.lat_dec)+','+str(temp)+'\n')
except:
i+=1
pass
print('wells not considered: '+str(i)+'/'+str(len(wells_objects)))
temp_at_masl.close()
if False: # plot well temperature profile
if False:
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(10,7)
fig.suptitle(' ')
#(1) well 1
wl_ref = 'WK033'#'WK272'
for wl in wells_objects:
if wl.name == wl_ref:
#ax1.set_xscale("linear")
#ax1.set_yscale("linear")
ax1.plot(wl.temp_prof_true, wl.red_depth,'o', c = pale_blue_col) # plot true data
ax1.plot(wl.temp_prof_rs, wl.red_depth_rs,'-', c = pale_orange_col, linewidth = 2.0)
ax1.plot([], [],'o', c = pale_blue_col, label = 'temperature data') # plot true data
ax1.plot([], [],'-', c = pale_orange_col, label = 'interpolated data')
if False:
ax1.plot( [0,300.], [300.,300.],'g--', alpha = 0.5)
ax1.plot( [0,300.], [50.,50.],'g--', alpha = 0.5)
ax1.plot( [], [],'g--', label = 'suggested boundary', alpha = 0.5) # plot true data
#
ax1.set_xlim([-5,300])
#ax1.set_ylim([-300,500])
ax1.set_xlabel('temperature [°C]', fontsize=textsize)
ax1.set_ylabel('m.a.s.l. [m]', fontsize=textsize)
ax1.set_title('Temperature profile for well '+wl_ref, fontsize=textsize)
ax1.grid(True, which='both', linewidth=0.1)
ax1.legend(loc = 'lower left', prop={'size': textsize})
#(1) well 2
wl_ref = 'TH07'
for wl in wells_objects:
if wl.name == wl_ref:
#ax1.set_xscale("linear")
#ax1.set_yscale("linear")
ax2.plot(wl.temp_prof_true, wl.red_depth,'o', c = pale_blue_col) # plot true data
ax2.plot(wl.temp_prof_rs, wl.red_depth_rs,'-', c = pale_orange_col, linewidth = 2.0)
ax2.plot([], [],'o', c = pale_blue_col, label = 'temperature data') # plot true data
ax2.plot([], [],'-', c = pale_orange_col, label = 'interpolated data')
if False:
ax2.plot( [0,300.], [250.,250.],'g--', alpha = 0.5)
ax2.plot( [0,300.], [100.,100.],'g--', alpha = 0.5)
ax2.plot( [], [],'g--', label = 'suggested boundary', alpha = 0.5) # plot true data
#
ax2.set_xlim([-5,300])
ax2.set_ylim([-300,500])
ax2.set_xlabel('temperature [°C]', fontsize=textsize)
ax2.set_ylabel('m.a.s.l. [m]', fontsize=textsize)
ax2.set_title('Temperature profile for well '+wl_ref, fontsize=textsize)
ax2.grid(True, which='both', linewidth=0.1)
#ax2.legend(loc = 'lower left', prop={'size': textsize})
plt.tight_layout()
fig.savefig('.'+os.sep+'wells_info'+os.sep+'temp_prof_ex'+os.sep+'temp_profs_ex.png', dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', format='png',transparent=True, bbox_inches=None, pad_inches=.1)
plt.close("all")
# plot MeB profile
if True:
fig, (ax1,ax2) = plt.subplots(1, 2)
fig.set_size_inches(10,7)
fig.suptitle(' ')
#(1) loop over wells
wl_ref = ['WK261','WK270']#['WK307', 'WK686'] #number of wells needs to match number of axes
for i, ax in enumerate(fig.axes):
wl_aux = [wl for wl in wells_objects if wl.name == wl_ref[i]][0]
ax.plot(wl_aux.meb_prof, [wl_aux.elev - d for d in wl_aux.meb_depth],'*', c = pale_red_col, markersize = 12., label = 'MeB data') # plot true data
ax.plot(wl_aux.meb_prof, [wl_aux.elev - d for d in wl_aux.meb_depth],'--', c = pale_blue_col, linewidth = 1.5, label = 'interpolated data')
ax.plot([2,2],[wl_aux.elev-2500.,wl_aux.elev],'y--', linewidth = 1.0, label = 'resolution limit')
ax.plot([5,5],[wl_aux.elev-2500.,wl_aux.elev],'g--', linewidth = 1.0, label = 'high confidence limit')
ax.set_xlim([0,12])
ax.set_ylim([wl_aux.elev-1200.,wl_aux.elev])
if 'WK307' == wl_aux.name:
ax.set_ylim([-2000,500])#[wl_aux.elev-1200.,wl_aux.elev])
if 'WK686' == wl_aux.name:
ax.set_ylim([-400,500])#[wl_aux.elev-1200.,wl_aux.elev])
ax.set_xlabel('Clay content [MeB %]', fontsize=textsize)
ax.set_ylabel('m.a.s.l. [m]', fontsize=textsize)
ax.grid(True, which='both', linewidth=0.1)
ax.set_title('MeB profile well '+wl_ref[i], fontsize=textsize)
ax1.legend(loc = 'upper right', prop={'size': textsize})
plt.tight_layout()
fig.savefig('.'+os.sep+'wells_info'+os.sep+'meb_prof_ex'+os.sep+'meb_examples.png', dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', format='png',transparent=True, bbox_inches=None, pad_inches=.1)
plt.close("all")
if False: # .txt with wells locations and misfit temp from tough2 models
# load misfit results
file_path = '/Users/macadmin/Desktop/res_sim/wt_tough2_sim/ForAlberto/wells_info/hist_misfit.txt'
# create new file
g = open('.'+os.sep+'wells_info'+os.sep+'wl_lat_lon_mfcal_mfrecal.txt', "w")
g.write('# well name, lat, lon, misfir cal, misfit recal \n')
# loop over mf file, find location for each well, write line for new file
with open(file_path, "r") as f:
next(f)
for x in f:
wl_name, mf_cal, mf_recal = x.split(sep = ',')
if wl_name == 'GGL01': wl_name = 'GGL1'
mf_cal, mf_recal = float(mf_cal[1:]), float(mf_recal[1:])
# find wl in wells objects
wl1 = [wl for wl in wells_objects if wl.name == wl_name][0] # list of all elements with .n==30 # write new file
g.write(wl1.name+','+str(wl1.lat_dec)+','+str(wl1.lon_dec)+','+str(mf_cal)+','+str(mf_recal)+'\n')
g.close()
f.close()
| aardid/mt_meb_inv_code | 00_main_inversion.py | 00_main_inversion.py | py | 77,169 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.rcParams.update",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "os.s... |
5784439690 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
import six
from .bt import BluetoothConnection
from .dummy import DummyConnection
from .file import FileConnection
from .network import NetworkConnection
from .serial import SerialConnection
from .usb import USBConnection
__all__ = [
'BluetoothConnection',
'DummyConnection',
'FileConnection',
'NetworkConnection',
'SerialConnection',
'USBConnection',
]
if six.PY2:
__all__ = [name.encode('latin-1') for name in __all__]
BLUETOOTH = 'bluetooth'
DUMMY = 'dummy'
FILE = 'file'
NETWORK = 'network'
SERIAL = 'serial'
USB = 'usb'
ConnectionTypeInfo = namedtuple('ConnectionTypeInfo', [
'name', # friendly name
'fqname', # fully qualified name
'type',
])
CONNECTION_TYPES = (
(BLUETOOTH, ConnectionTypeInfo(
name='Bluetooth',
fqname='escpos.conn.bt.BluetoothConnection',
type=BluetoothConnection)),
(DUMMY, ConnectionTypeInfo(
name='Dummy',
fqname='escpos.conn.dummy.DummyConnection',
type=DummyConnection)),
(FILE, ConnectionTypeInfo(
name='File',
fqname='escpos.conn.file.FileConnection',
type=FileConnection)),
(NETWORK, ConnectionTypeInfo(
name='Network',
fqname='escpos.conn.network.NetworkConnection',
type=NetworkConnection)),
(SERIAL, ConnectionTypeInfo(
name='Serial (RS-232)',
fqname='escpos.conn.serial.SerialConnection',
type=SerialConnection)),
(USB, ConnectionTypeInfo(
name='USB',
fqname='escpos.conn.usb.USBConnection',
type=USBConnection)),
)
"""Known implementations for connection with printers."""
| base4sistemas/pyescpos | escpos/conn/__init__.py | __init__.py | py | 2,006 | python | en | code | 67 | github-code | 36 | [
{
"api_name": "six.PY2",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "collections.namedtuple",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "bt.BluetoothConnection",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "dummy.Dumm... |
7816667534 | import boto3
import re
def move_object(sourceBucket, sourceKey, destinationBucket,
destinationKey, isVTT):
s3 = boto3.client("s3")
print(
"Move " + sourceBucket + "/" + sourceKey + " to " +
destinationBucket + "/" + destinationKey
)
if isVTT is True:
s3.copy_object(
Bucket=destinationBucket,
Key=destinationKey,
CopySource=sourceBucket + sourceKey,
ContentType="text/vtt",
MetadataDirective="REPLACE"
)
else:
s3.copy_object(
Bucket=destinationBucket,
Key=destinationKey,
CopySource=sourceBucket + sourceKey
)
s3.delete_object(
Bucket=sourceBucket,
Key=sourceKey
)
def handler(event, context):
s3 = boto3.client("s3")
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
table = dynamodb.Table('subtitles')
sourceLanguage = 'en'
targetLanguages = [
"ar",
"es",
"fr",
"de",
# "pt",
"zh"
]
fileUUID = event.get("fileUUID")
mediaBucket = event.get("bucket")
print("Working with file UUID " + fileUUID + " from bucket " + mediaBucket)
# Identify static bucket
staticBucket = ""
buckets = s3.list_buckets()
for b in buckets.get("Buckets"):
isStaticBucket = re.search(
"subtitle\\.static\\.(.*)\\.aws\\.com",
b.get("Name")
)
if isStaticBucket:
staticBucket = "subtitle.static." + isStaticBucket.group(1) + \
".aws.com"
print("Target bucket is : " + staticBucket)
directory = "files/" + fileUUID + "/"
move_object(
mediaBucket,
"/1-source/" + fileUUID + ".mp4",
staticBucket,
directory + fileUUID + ".mp4",
False
)
move_object(
mediaBucket,
"/4-translated/" + fileUUID + "." + sourceLanguage + ".vtt",
staticBucket,
directory + fileUUID + ".en.vtt",
True
)
for i, targetLanguage in enumerate(targetLanguages):
move_object(
mediaBucket,
"/4-translated/" + fileUUID + "." + targetLanguage + ".vtt",
staticBucket,
directory + fileUUID + "." + targetLanguage + ".vtt",
True
)
print("Clean mp3 files from 2-transcoded")
s3.delete_object(
Bucket=mediaBucket,
Key="2-transcoded/"+fileUUID+".mp3"
)
print("Update Dynamodb")
table.update_item(
ExpressionAttributeNames={'#S': 'State'},
ExpressionAttributeValues={':s': 'DONE'},
Key={'Id': fileUUID},
TableName='subtitles',
UpdateExpression='SET #S = :s'
)
return True
| awslabs/serverless-subtitles | lambda/SUBLambdaFunctionOutput/index.py | index.py | py | 2,775 | python | en | code | 126 | github-code | 36 | [
{
"api_name": "boto3.client",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "boto3.resource",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number"... |
39479073026 | # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from cards.models import Card
from cards.models import Battle
from cards.models import Format
from cards.models import FormatBasecard
from django.utils import timezone
import re
from optparse import make_option
from datetime import datetime, timedelta
import sys
out = sys.stdout
class Command(BaseCommand):
#args = '<poll_id poll_id ...>'
help = 'Generate HTML links to do battles on the cards that are entered on stdin.'
def add_arguments(self, parser):
parser.add_argument('--format',
dest='format',
type='string',
default='modern',
help='Select the format to battle in. Default is modern.')
def handle(self, *args, **options):
regex = re.compile(r'^\d*\s*(?P<cn>\S.+)')
format_obj = None
if options['format']:
format_obj = Format.objects.filter(
formatname__iexact=options['format'],
start_date__lte=timezone.now(),
end_date__gt=timezone.now()).order_by('-end_date').first()
out.write('<div>Format: ' + format_obj.format + "</div>\n")
added_cards = []
for line in sys.stdin:
cardline = line.rstrip("\n")
if len(cardline) == 0:
continue
mm = regex.match(cardline)
#out.write("!!! " + str(mm.group('cn')) + "\n")
cardname = mm.group('cn')
card = Card.objects.filter(basecard__name__iexact=cardname).first()
if card is None:
out.write('Could not find card "' + str(cardname) + '"')
out.write('<br/>\n')
elif card in added_cards:
#out.write('already did this one \n')
pass
else:
added_cards.append(card)
fbc = FormatBasecard.objects.filter(basecard=card.basecard, format=format_obj).first()
if fbc is None:
out.write('The card "' + card.basecard.name + '" is not in ' + format_obj.formatname)
out.write('<br/>\n')
else:
out.write('<a target="battlepane" href="http://card.ninja/cards/battle/')
out.write(format_obj.formatname)
out.write('/?bcid=')
out.write(str(card.basecard.id))
out.write('&c=yes">')
out.write(card.basecard.name)
out.write('</a> ')
wincount = Battle.objects.filter(winner_pcard=card.basecard.physicalcard, format=format_obj).count()
losecount = Battle.objects.filter(loser_pcard=card.basecard.physicalcard, format=format_obj).count()
battlecount = wincount + losecount
out.write('[battles: ' + str(battlecount) + ' (' + str(wincount) + '-' + str(losecount) + ')]')
out.write('<br/>\n')
| jcrickmer/mtgdbpy | cards/management/commands/battlelinks.py | battlelinks.py | py | 3,085 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.stdout",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "re.compile",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "ca... |
21619030471 | from __future__ import absolute_import
import errno
import io
import logging
import multiprocessing
import re
import sys
import threading
import time
import traceback
from builtins import object
from apache_beam.internal.http_client import get_new_http
from apache_beam.io.filesystemio import Downloader
from apache_beam.io.filesystemio import DownloaderStream
from apache_beam.io.filesystemio import PipeStream
from apache_beam.io.filesystemio import Uploader
from apache_beam.io.filesystemio import UploaderStream
from apache_beam.utils import retry
__all__ = ['GcsIO']
_LOGGER = logging.getLogger(__name__)
# Issue a friendlier error message if the storage library is not available.
# TODO(silviuc): Remove this guard when storage is available everywhere.
try:
# pylint: disable=wrong-import-order, wrong-import-position
# pylint: disable=ungrouped-imports
import apitools.base.py.transfer as transfer
from apitools.base.py.batch import BatchApiRequest
from apitools.base.py.exceptions import HttpError
from apache_beam.internal.gcp import auth
from apache_beam.io.gcp.internal.clients import storage
except ImportError:
raise ImportError(
'Google Cloud Storage I/O not supported for this execution environment '
'(could not import storage API client).')
# This is the size of each partial-file read operation from GCS. This
# parameter was chosen to give good throughput while keeping memory usage at
# a reasonable level; the following table shows throughput reached when
# reading files of a given size with a chosen buffer size and informed the
# choice of the value, as of 11/2016:
#
# +---------------+------------+-------------+-------------+-------------+
# | | 50 MB file | 100 MB file | 200 MB file | 400 MB file |
# +---------------+------------+-------------+-------------+-------------+
# | 8 MB buffer | 17.12 MB/s | 22.67 MB/s | 23.81 MB/s | 26.05 MB/s |
# | 16 MB buffer | 24.21 MB/s | 42.70 MB/s | 42.89 MB/s | 46.92 MB/s |
# | 32 MB buffer | 28.53 MB/s | 48.08 MB/s | 54.30 MB/s | 54.65 MB/s |
# | 400 MB buffer | 34.72 MB/s | 71.13 MB/s | 79.13 MB/s | 85.39 MB/s |
# +---------------+------------+-------------+-------------+-------------+
DEFAULT_READ_BUFFER_SIZE = 16 * 1024 * 1024
# This is the number of seconds the library will wait for a partial-file read
# operation from GCS to complete before retrying.
DEFAULT_READ_SEGMENT_TIMEOUT_SECONDS = 60
# This is the size of chunks used when writing to GCS.
WRITE_CHUNK_SIZE = 8 * 1024 * 1024
# Maximum number of operations permitted in GcsIO.copy_batch() and
# GcsIO.delete_batch().
MAX_BATCH_OPERATION_SIZE = 100
# Batch endpoint URL for GCS.
# We have to specify an API specific endpoint here since Google APIs global
# batch endpoints will be deprecated on 03/25/2019.
# See https://developers.googleblog.com/2018/03/discontinuing-support-for-json-rpc-and.html. # pylint: disable=line-too-long
# Currently apitools library uses a global batch endpoint by default:
# https://github.com/google/apitools/blob/master/apitools/base/py/batch.py#L152
# TODO: remove this constant and it's usage after apitools move to using an API
# specific batch endpoint or after Beam gcsio module start using a GCS client
# library that does not use global batch endpoints.
GCS_BATCH_ENDPOINT = 'https://www.googleapis.com/batch/storage/v1'
def parse_gcs_path(gcs_path, object_optional=False):
"""Return the bucket and object names of the given gs:// path."""
match = re.match('^gs://([^/]+)/(.*)$', gcs_path)
if match is None or (match.group(2) == '' and not object_optional):
raise ValueError('GCS path must be in the form gs://<bucket>/<object>.')
return match.group(1), match.group(2)
class GcsIOError(IOError, retry.PermanentException):
"""GCS IO error that should not be retried."""
pass
class GcsIO(object):
"""Google Cloud Storage I/O client."""
def __init__(self, storage_client=None):
if storage_client is None:
storage_client = storage.StorageV1(
credentials=auth.get_service_credentials(),
get_credentials=False,
http=get_new_http(),
response_encoding=None if sys.version_info[0] < 3 else 'utf8')
self.client = storage_client
self._rewrite_cb = None
def _set_rewrite_response_callback(self, callback):
"""For testing purposes only. No backward compatibility guarantees.
Args:
callback: A function that receives ``storage.RewriteResponse``.
"""
self._rewrite_cb = callback
def open(
self,
filename,
mode='r',
read_buffer_size=DEFAULT_READ_BUFFER_SIZE,
mime_type='application/octet-stream'):
"""Open a GCS file path for reading or writing.
Args:
filename (str): GCS file path in the form ``gs://<bucket>/<object>``.
mode (str): ``'r'`` for reading or ``'w'`` for writing.
read_buffer_size (int): Buffer size to use during read operations.
mime_type (str): Mime type to set for write operations.
Returns:
GCS file object.
Raises:
ValueError: Invalid open file mode.
"""
if mode == 'r' or mode == 'rb':
downloader = GcsDownloader(
self.client, filename, buffer_size=read_buffer_size)
return io.BufferedReader(
DownloaderStream(
downloader, read_buffer_size=read_buffer_size, mode=mode),
buffer_size=read_buffer_size)
elif mode == 'w' or mode == 'wb':
uploader = GcsUploader(self.client, filename, mime_type)
return io.BufferedWriter(
UploaderStream(uploader, mode=mode), buffer_size=128 * 1024)
else:
raise ValueError('Invalid file open mode: %s.' % mode)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def delete(self, path):
"""Deletes the object at the given GCS path.
Args:
path: GCS file path pattern in the form gs://<bucket>/<name>.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsDeleteRequest(
bucket=bucket, object=object_path)
try:
self.client.objects.Delete(request)
except HttpError as http_error:
if http_error.status_code == 404:
# Return success when the file doesn't exist anymore for idempotency.
return
raise
# We intentionally do not decorate this method with a retry, as retrying is
# handled in BatchApiRequest.Execute().
def delete_batch(self, paths):
"""Deletes the objects at the given GCS paths.
Args:
paths: List of GCS file path patterns in the form gs://<bucket>/<name>,
not to exceed MAX_BATCH_OPERATION_SIZE in length.
Returns: List of tuples of (path, exception) in the same order as the paths
argument, where exception is None if the operation succeeded or
the relevant exception if the operation failed.
"""
if not paths:
return []
batch_request = BatchApiRequest(
batch_url=GCS_BATCH_ENDPOINT,
retryable_codes=retry.SERVER_ERROR_OR_TIMEOUT_CODES,
response_encoding='utf-8')
for path in paths:
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsDeleteRequest(
bucket=bucket, object=object_path)
batch_request.Add(self.client.objects, 'Delete', request)
api_calls = batch_request.Execute(self.client._http) # pylint: disable=protected-access
result_statuses = []
for i, api_call in enumerate(api_calls):
path = paths[i]
exception = None
if api_call.is_error:
exception = api_call.exception
# Return success when the file doesn't exist anymore for idempotency.
if isinstance(exception, HttpError) and exception.status_code == 404:
exception = None
result_statuses.append((path, exception))
return result_statuses
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def copy(
self,
src,
dest,
dest_kms_key_name=None,
max_bytes_rewritten_per_call=None):
"""Copies the given GCS object from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>.
dest: GCS file path pattern in the form gs://<bucket>/<name>.
dest_kms_key_name: Experimental. No backwards compatibility guarantees.
Encrypt dest with this Cloud KMS key. If None, will use dest bucket
encryption defaults.
max_bytes_rewritten_per_call: Experimental. No backwards compatibility
guarantees. Each rewrite API call will return after these many bytes.
Used for testing.
Raises:
TimeoutError: on timeout.
"""
src_bucket, src_path = parse_gcs_path(src)
dest_bucket, dest_path = parse_gcs_path(dest)
request = storage.StorageObjectsRewriteRequest(
sourceBucket=src_bucket,
sourceObject=src_path,
destinationBucket=dest_bucket,
destinationObject=dest_path,
destinationKmsKeyName=dest_kms_key_name,
maxBytesRewrittenPerCall=max_bytes_rewritten_per_call)
response = self.client.objects.Rewrite(request)
while not response.done:
_LOGGER.debug(
'Rewrite progress: %d of %d bytes, %s to %s',
response.totalBytesRewritten,
response.objectSize,
src,
dest)
request.rewriteToken = response.rewriteToken
response = self.client.objects.Rewrite(request)
if self._rewrite_cb is not None:
self._rewrite_cb(response)
_LOGGER.debug('Rewrite done: %s to %s', src, dest)
# We intentionally do not decorate this method with a retry, as retrying is
# handled in BatchApiRequest.Execute().
def copy_batch(
self,
src_dest_pairs,
dest_kms_key_name=None,
max_bytes_rewritten_per_call=None):
"""Copies the given GCS object from src to dest.
Args:
src_dest_pairs: list of (src, dest) tuples of gs://<bucket>/<name> files
paths to copy from src to dest, not to exceed
MAX_BATCH_OPERATION_SIZE in length.
dest_kms_key_name: Experimental. No backwards compatibility guarantees.
Encrypt dest with this Cloud KMS key. If None, will use dest bucket
encryption defaults.
max_bytes_rewritten_per_call: Experimental. No backwards compatibility
guarantees. Each rewrite call will return after these many bytes. Used
primarily for testing.
Returns: List of tuples of (src, dest, exception) in the same order as the
src_dest_pairs argument, where exception is None if the operation
succeeded or the relevant exception if the operation failed.
"""
if not src_dest_pairs:
return []
pair_to_request = {}
for pair in src_dest_pairs:
src_bucket, src_path = parse_gcs_path(pair[0])
dest_bucket, dest_path = parse_gcs_path(pair[1])
request = storage.StorageObjectsRewriteRequest(
sourceBucket=src_bucket,
sourceObject=src_path,
destinationBucket=dest_bucket,
destinationObject=dest_path,
destinationKmsKeyName=dest_kms_key_name,
maxBytesRewrittenPerCall=max_bytes_rewritten_per_call)
pair_to_request[pair] = request
pair_to_status = {}
while True:
pairs_in_batch = list(set(src_dest_pairs) - set(pair_to_status))
if not pairs_in_batch:
break
batch_request = BatchApiRequest(
batch_url=GCS_BATCH_ENDPOINT,
retryable_codes=retry.SERVER_ERROR_OR_TIMEOUT_CODES,
response_encoding='utf-8')
for pair in pairs_in_batch:
batch_request.Add(self.client.objects, 'Rewrite', pair_to_request[pair])
api_calls = batch_request.Execute(self.client._http) # pylint: disable=protected-access
for pair, api_call in zip(pairs_in_batch, api_calls):
src, dest = pair
response = api_call.response
if self._rewrite_cb is not None:
self._rewrite_cb(response)
if api_call.is_error:
exception = api_call.exception
# Translate 404 to the appropriate not found exception.
if isinstance(exception, HttpError) and exception.status_code == 404:
exception = (
GcsIOError(errno.ENOENT, 'Source file not found: %s' % src))
pair_to_status[pair] = exception
elif not response.done:
_LOGGER.debug(
'Rewrite progress: %d of %d bytes, %s to %s',
response.totalBytesRewritten,
response.objectSize,
src,
dest)
pair_to_request[pair].rewriteToken = response.rewriteToken
else:
_LOGGER.debug('Rewrite done: %s to %s', src, dest)
pair_to_status[pair] = None
return [(pair[0], pair[1], pair_to_status[pair]) for pair in src_dest_pairs]
# We intentionally do not decorate this method with a retry, since the
# underlying copy and delete operations are already idempotent operations
# protected by retry decorators.
def copytree(self, src, dest):
"""Renames the given GCS "directory" recursively from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>/.
dest: GCS file path pattern in the form gs://<bucket>/<name>/.
"""
assert src.endswith('/')
assert dest.endswith('/')
for entry in self.list_prefix(src):
rel_path = entry[len(src):]
self.copy(entry, dest + rel_path)
# We intentionally do not decorate this method with a retry, since the
# underlying copy and delete operations are already idempotent operations
# protected by retry decorators.
def rename(self, src, dest):
"""Renames the given GCS object from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>.
dest: GCS file path pattern in the form gs://<bucket>/<name>.
"""
self.copy(src, dest)
self.delete(src)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def exists(self, path):
"""Returns whether the given GCS object exists.
Args:
path: GCS file path pattern in the form gs://<bucket>/<name>.
"""
bucket, object_path = parse_gcs_path(path)
try:
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
self.client.objects.Get(request) # metadata
return True
except HttpError as http_error:
if http_error.status_code == 404:
# HTTP 404 indicates that the file did not exist
return False
else:
# We re-raise all other exceptions
raise
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def checksum(self, path):
"""Looks up the checksum of a GCS object.
Args:
path: GCS file path pattern in the form gs://<bucket>/<name>.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
return self.client.objects.Get(request).crc32c
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def size(self, path):
"""Returns the size of a single GCS object.
This method does not perform glob expansion. Hence the given path must be
for a single GCS object.
Returns: size of the GCS object in bytes.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
return self.client.objects.Get(request).size
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def kms_key(self, path):
"""Returns the KMS key of a single GCS object.
This method does not perform glob expansion. Hence the given path must be
for a single GCS object.
Returns: KMS key name of the GCS object as a string, or None if it doesn't
have one.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
return self.client.objects.Get(request).kmsKeyName
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def last_updated(self, path):
"""Returns the last updated epoch time of a single GCS object.
This method does not perform glob expansion. Hence the given path must be
for a single GCS object.
Returns: last updated time of the GCS object in second.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
datetime = self.client.objects.Get(request).updated
return (
time.mktime(datetime.timetuple()) - time.timezone +
datetime.microsecond / 1000000.0)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def list_prefix(self, path):
"""Lists files matching the prefix.
Args:
path: GCS file path pattern in the form gs://<bucket>/[name].
Returns:
Dictionary of file name -> size.
"""
bucket, prefix = parse_gcs_path(path, object_optional=True)
request = storage.StorageObjectsListRequest(bucket=bucket, prefix=prefix)
file_sizes = {}
counter = 0
start_time = time.time()
_LOGGER.info("Starting the size estimation of the input")
while True:
response = self.client.objects.List(request)
for item in response.items:
file_name = 'gs://%s/%s' % (item.bucket, item.name)
file_sizes[file_name] = item.size
counter += 1
if counter % 10000 == 0:
_LOGGER.info("Finished computing size of: %s files", len(file_sizes))
if response.nextPageToken:
request.pageToken = response.nextPageToken
else:
break
_LOGGER.info(
"Finished listing %s files in %s seconds.",
counter,
time.time() - start_time)
return file_sizes
class GcsDownloader(Downloader):
def __init__(self, client, path, buffer_size):
self._client = client
self._path = path
self._bucket, self._name = parse_gcs_path(path)
self._buffer_size = buffer_size
# Get object state.
self._get_request = (
storage.StorageObjectsGetRequest(
bucket=self._bucket, object=self._name))
try:
metadata = self._get_object_metadata(self._get_request)
except HttpError as http_error:
if http_error.status_code == 404:
raise IOError(errno.ENOENT, 'Not found: %s' % self._path)
else:
_LOGGER.error(
'HTTP error while requesting file %s: %s', self._path, http_error)
raise
self._size = metadata.size
# Ensure read is from file of the correct generation.
self._get_request.generation = metadata.generation
# Initialize read buffer state.
self._download_stream = io.BytesIO()
self._downloader = transfer.Download(
self._download_stream,
auto_transfer=False,
chunksize=self._buffer_size,
num_retries=20)
self._client.objects.Get(self._get_request, download=self._downloader)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _get_object_metadata(self, get_request):
return self._client.objects.Get(get_request)
@property
def size(self):
return self._size
def get_range(self, start, end):
self._download_stream.seek(0)
self._download_stream.truncate(0)
self._downloader.GetRange(start, end - 1)
return self._download_stream.getvalue()
class GcsUploader(Uploader):
def __init__(self, client, path, mime_type):
self._client = client
self._path = path
self._bucket, self._name = parse_gcs_path(path)
self._mime_type = mime_type
# Set up communication with child thread.
parent_conn, child_conn = multiprocessing.Pipe()
self._child_conn = child_conn
self._conn = parent_conn
# Set up uploader.
self._insert_request = (
storage.StorageObjectsInsertRequest(
bucket=self._bucket, name=self._name))
self._upload = transfer.Upload(
PipeStream(self._child_conn),
self._mime_type,
chunksize=WRITE_CHUNK_SIZE)
self._upload.strategy = transfer.RESUMABLE_UPLOAD
# Start uploading thread.
self._upload_thread = threading.Thread(target=self._start_upload)
self._upload_thread.daemon = True
self._upload_thread.last_error = None
self._upload_thread.start()
# TODO(silviuc): Refactor so that retry logic can be applied.
# There is retry logic in the underlying transfer library but we should make
# it more explicit so we can control the retry parameters.
@retry.no_retries # Using no_retries marks this as an integration point.
def _start_upload(self):
# This starts the uploader thread. We are forced to run the uploader in
# another thread because the apitools uploader insists on taking a stream
# as input. Happily, this also means we get asynchronous I/O to GCS.
#
# The uploader by default transfers data in chunks of 1024 * 1024 bytes at
# a time, buffering writes until that size is reached.
try:
self._client.objects.Insert(self._insert_request, upload=self._upload)
except Exception as e: # pylint: disable=broad-except
_LOGGER.error(
'Error in _start_upload while inserting file %s: %s',
self._path,
traceback.format_exc())
self._upload_thread.last_error = e
finally:
self._child_conn.close()
def put(self, data):
try:
self._conn.send_bytes(data.tobytes())
except EOFError:
if self._upload_thread.last_error is not None:
raise self._upload_thread.last_error # pylint: disable=raising-bad-type
raise
def finish(self):
self._conn.close()
# TODO(udim): Add timeout=DEFAULT_HTTP_TIMEOUT_SECONDS * 2 and raise if
# isAlive is True.
self._upload_thread.join()
# Check for exception since the last put() call.
if self._upload_thread.last_error is not None:
raise self._upload_thread.last_error # pylint: disable=raising-bad-type
| a0x8o/kafka | sdks/python/apache_beam/io/gcp/gcsio.py | gcsio.py | py | 22,338 | python | en | code | 59 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "apache_beam.utils.retry.PermanentException",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_na... |
33650052777 | import os, sys
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..'))
import subprocess
import shlex
import re
import pexpect
from nltk.tokenize.stanford_segmenter import StanfordSegmenter
from nltk import tokenize
from TMPosTagger.TMJapanesePosTagger import TMMyKyteaTagger
from TMPreprocessor.Xml.XmlUtils import XmlUtils
from TMPosTagger.ExternalDetokenizer.TMDetokenizer import Detokenizer
pragmatic_segmenter_home = os.path.join(os.path.abspath(os.path.join(__file__, "../..")), 'tools/pragmatic_segmenter-master/')
moses_tokenizer_home = os.path.join(os.path.abspath(os.path.join(__file__, "../..")), 'tools/mosesdecoder-master/scripts/tokenizer/')
stanford_tokenizer_home = os.path.join(os.path.abspath(os.path.join(__file__ ,"../..")),'tools/stanford-segmenter-2015-12-09/')
TAG_PREFIX = 'T' # Paterns to join tags that tokenized split
TOK_PATTERN = re.compile('< /?{}[0-9]*/? >'.format(TAG_PREFIX))
JOIN_PATTERN = '(<)( /?T[0-9]+/? )(>)'
class TMStanfordTokenizer():
models = {'ZH': 'ctb.gz',
'AR' : 'arabic-segmenter-atb+bn+arztrain.ser.gz'}
dics = {'ZH': 'dict-chris6.ser.gz',
'AR': ''}
def __init__(self, language):
self.language = language
model = self.models.get(language)
dic = self.dics.get(language)
if not model: raise (Exception("Unsupported language for tokenizer: {}".format(language)))
# Initialize Stanford Tokenizer
self.tm_tokenize = StanfordSegmenter(path_to_jar = os.path.join(stanford_tokenizer_home, 'stanford-segmenter-3.6.0.jar'),
path_to_model = os.path.join(stanford_tokenizer_home, 'data', model),
path_to_dict = os.path.join(stanford_tokenizer_home, 'data', dic),
path_to_sihan_corpora_dict = os.path.join(stanford_tokenizer_home, 'data'),
path_to_slf4j = os.path.join(stanford_tokenizer_home, 'slf4j-api.jar')
)
#Input: String
#Output: 这 是 斯坦福 中文 分词 器 测试
def process(self,sentences):
text = self.tm_tokenize.segment(sentences).strip('\n')
if re.search(TOK_PATTERN, text): # Check if the text have tags
text = XmlUtils.join_tags(text, JOIN_PATTERN)
return text
def tokenize_sent(self, text):
if self.language == 'ZH':
return [s +'。' for s in text.split('。') if s] # Split by sentence chinese
#self.tm_tokenize.segment_sents(text)
return [text]
class TMNLTKTokenizer():
# Available NLTK tokenizer models.
models = {'EN': 'tokenizers/punkt/english.pickle',
'ES': 'tokenizers/punkt/spanish.pickle',
'FR': 'tokenizers/punkt/french.pickle',
'DE': 'tokenizers/punkt/german.pickle',
'PT': 'tokenizers/punkt/portuguese.pickle', #portuguese
'IT': 'tokenizers/punkt/italian.pickle',
'PL': 'tokenizers/punkt/polish.pickle', # polish
'NL': 'tokenizers/punkt/dutch.pickle', # dutch
'ET': 'tokenizers/punkt/estonian.pickle', # estonian
'FI': 'tokenizers/punkt/finnish.pickle', # finnish
'CS': 'tokenizers/punkt/czech.pickle', # czech
'CZ': 'tokenizers/punkt/czech.pickle', # czech
'DA': 'tokenizers/punkt/danish.pickle', # danish
'EL': 'tokenizers/punkt/greek.pickle', # greek
'NO': 'tokenizers/punkt/norwegian.pickle', # norwegian
'SL': 'tokenizers/punkt/slovene.pickle', # slovene
'SV': 'tokenizers/punkt/swedish.pickle', # swedish
'TU': 'tokenizers/punkt/turkish.pickle', # turkish
}
def __init__(self, language):
self.language = language
model = self.models.get(self.language)
if not model: raise (Exception("Unsupported language for Tokenizer: {}".format(language)))
self.tokenizer = tokenize
# Split by sentence and words (returns list of lists)
# Output --> ['Ciao meraviglioso e perfetto mondo!', 'Ciao meraviglioso.'] --> List with the sentence, If only one sentence the list was only one element
def tokenize_sent(self, text):
# **********load tokenizer according to the language
nltk_model = self.models.get(self.language).split('/')[2].split('.')[0]
return self.tokenizer.sent_tokenize(text,nltk_model)
#Return a tokenizer text
#Input: String
#Output --> Ciao meraviglioso e perfetto mondo !
def process(self, text):
# **********load tokenizer according to the language
nltk_model = self.models.get(self.language).split('/')[2].split('.')[0]
text = ' '.join(self.tokenizer.word_tokenize(text, nltk_model))
if re.search(TOK_PATTERN, text): # Check if the text have tags
text = XmlUtils.join_tags(text, JOIN_PATTERN)
return text
# Class to split in sentences
'''
@api {INFO} /TMPragmatic TMPragmatic -- Split sentences with punctation (".").
@apiName TMPragmatic
@apiVersion 0.1.0
@apiGroup TMTokenizer
@apiParam {String} language sentences language.
@apiSuccess {List} split_sentence List of sentences split by punctuation.
@apiExample {curl} Example & Notes
# Input: Hello world. My name is Mr. Smith. I work for the U.S. Government and I live in the U.S. I live in New York.
# Output: ['Hello world.', 'My name is Mr. Smith.', 'I work for the U.S. Government and I live in the U.S.', 'I live in New York.']
# The class execute a ruby command line to split the sentences
# TMPragmatic diretory is in tools/pragmatic_segmenter-master/
'''
class TMPragmatic():
def __init__(self, language):
self.args = 'ruby ' + pragmatic_segmenter_home + 'segmenter.rb ' + language.lower()
# Input: Hello world. My name is Mr. Smith. I work for the U.S. Government and I live in the U.S. I live in New York.
# Output: ['Hello world.', 'My name is Mr. Smith.', 'I work for the U.S. Government and I live in the U.S.', 'I live in New York.']
def tokenize_sent(self, text):
sentences = pexpect.run(self.args + ' ' '"' + text + '"', withexitstatus=False)
text = [sent for sent in sentences.decode("utf-8").split('\r\n') if sent]
return text
class TMNLTKTokenizerGeneric():
def __init__(self, language):
self.tokenizer = tokenize
self.sentence = TMNLTKTokenizer('EN')#TMPragmatic(language)
def process(self, text):
text = ' '.join(self.tokenizer.wordpunct_tokenize(text))
if re.search(TOK_PATTERN, text): # Check if the text have tags
text = XmlUtils.join_tags(text, JOIN_PATTERN)
return text
def tokenize_sent(self, text):
return self.sentence.tokenize_sent(text)
class TMMosesTokenizer():
def __init__(self, language):
#-protected --> specify file with patters to be protected in tokenisation (URLs, etc)
#-no-escape --> don't perform HTML escaping on apostrophy, quotes
self.args = shlex.split(moses_tokenizer_home + 'tokenizer.perl -protect -no-escape -l ' + language.lower())
#self.tokenizer = Popen(self.args, stdin=PIPE, stdout=PIPE)
#Input: String
#Output: Esto es un problema muy grande y complicado .
def process(self, text):
#Probably if good transform the input text in ' ' + text + '\n'
tokenizer = subprocess.Popen(self.args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
tok_sents, tok_exc = tokenizer.communicate(input = text.encode('utf8'))
tokenizer.wait()
text = (tok_sents.decode("utf-8")).strip('\n')
if re.search(TOK_PATTERN, text): # Check if the text have tags
text = XmlUtils.join_tags(text, JOIN_PATTERN)
return text
def tokenize_sent(self, text):
return [text]
'''
class TMTokenizerTemplate():
def __init__(self, language):
self.tokenizer
self.sentence =
def process(self, text): #
return text_word
def tokenize_sent(self, text):
return text_sent
'''
"""
@api {INFO} /TMTokenizer TMTokenizer -- Available Tokenizer models. This class initializes the available models
@apiName TMTokenizer
@apiVersion 0.1.0
@apiGroup TMTokenizer
@apiExample {curl} Input & Output:
# Input: (String) language
# Output: (Object) tokenizer model
# Error: Unsupported language for Tokenize. This messages appear on uwsgi.log
* The error messages appear on uwsgi.log
# The tokenizer models:
- split sentences with punctation (".") => function "tokenize_sent(self, text)"
Example
#Input: 'Labas pasauli. Mano vardas yra p Smithas. Dirbu su JAV vyriausybe ir aš gyventi į JAV, gyvenu Niujorke.'
#Output: ['Labas pasauli.', 'Mano vardas yra p Smithas.', 'Dirbu su JAV vyriausybe ir aš gyventi į JAV, gyvenu Niujorke.']
- split into individual words => function "process(self, text)"
Example
# Input: '3. 土著知识扎根于土著社区的文化传统和长期习俗。'
# Output: ' 3 . 土著 知识 扎 根于 土著 社区 的 文化 传统 和 长期 习俗 。'
@apiExample {curl} Example & Notes
# List of available class and tokenizer tool
# - TMMosesTokenizer => Moses (Not currently used)
# - TMNLTKTokenizerGeneric => Based on regular expression to split into individual words and the TMPragmatic class to split into sentences.
# - TMStanfordTokenizer => Stanfornd Tokenizer. Currently is used for chinese and arabic. It's very slow.
# - TMNLTKTokenizer => NLTK
# All the class must implement a function "process(self, text)" to split the sententes into individual words
# and "tokenize_sent(self, text)" to split sentences with punctation (".").
* Search "split by Sentences" on uwsgi.log to see the segmentation with punctation (".").
To include new tokenizer for other languages
1- Define the model for new language
models = {'EN': 'nltk',
'RU': 'generic',
'ZH': 'stanford',
'HH': 'tokenizerTemplate',
2- Create the class to execute the new model
if self.models == 'tokenizerTemplate':
self.tokenizer = TMTokenizerTemplate(language)
* See the class TMTokenizerTemplate in TMTokenizer file to add other tokenizer
"""
# 'generic' tokenizer use the Pragmatic class to split by sentences. I do that, because the some language that chinese for example, that use Stanford to word and sentence tokenizer
# then if I create a differente class to tokenizer and split sentence I will to execute the Stanford tokenizer in two time and is very expensive.
class TMTokenizer():
# There are others languages avaliable in moses and nltk.
# Moses --> look out --> moses/scripts/share/nonbreaking-prefixes
# Nltk --> look out --> nltk_data/tokenizers/punkt
models = {'EN': 'nltk', # --> moses
'ES': 'nltk', #--> moses
'FR': 'nltk', #--> moses
'DE': 'nltk', # german #--> moses
'IT': 'nltk',#'moses', # italian
'PT': 'nltk', # portuguese #--> moses
'PL': 'nltk', # polish #--> moses
'RU': 'generic', # russian --> #'moses',
'BG': 'generic', # bulgarian
'NL': 'nltk', # dutch #--> moses
'ET': 'nltk', # estonian
'FI': 'nltk', # finnish
'CR': 'generic', #'KoNLPy', # korean
'JA': 'kytea', # Japanese
'ZH': 'stanford', # chinese
'AR': 'generic', # arabic
'CZ': 'nltk', # czech
'CS': 'nltk', # czech
'DA': 'nltk', # danish
'EL': 'nltk', # greek --> moses
'NO': 'nltk', # norwegian
'SL': 'nltk', # slovene --> moses
'SV': 'nltk', # swedish --> moses
'TU': 'nltk', # turkish
'HE': 'generic', #hb
'GA': 'generic', # Irish --> moses
'HU': 'generic', # hungarian --> moses
'LT': 'generic', # Lithuanian
'LV': 'generic', # Latvian --> moses
'MT': 'generic', # Maltese
'RO': 'generic', # Romanian --> moses
'SK': 'generic', # Slovak
'IS': 'generic', # Icelandic --> moses
'HR': 'generic' # Croatian
}
def __init__(self, language):
language = language.upper()
# Available Tokenizer models. TODO: fill entries for other languages
self.tool = self.models.get(language.upper())
if not self.tool: raise (Exception("Unsupported language for Tokenize: {}".format(language)))
if self.tool == 'stanford':
# Initialize Stanford
self.tokenizer = TMStanfordTokenizer(language)
# Initialize NLTK
if self.tool == 'nltk':
self.tokenizer = TMNLTKTokenizer(language)
# Initialize kytea
if self.tool == 'kytea':
self.tokenizer = TMMyKyteaTagger()
# Initialize Moses
if self.tool == 'moses':
self.tokenizer = TMMosesTokenizer(language)
#Initialize Generic Tokenizer
if self.tool == 'generic':
self.tokenizer = TMNLTKTokenizerGeneric(language)
def un_tokenizer(self, in_text):
my_puntation_list = ['!', '"', '#', '$', '%', '&', "'", ')', '*', '+', ',', '-', '.', ':', ';', '<', '=', '>', '?', '@',
'\\', ']', '^', '_', '`', '|', '}', '~', '。', ',', ';', '、']
if isinstance(in_text, bytes):
in_text = in_text.decode('utf8')
text = ("".join([" " + i if not i.startswith("'") and i not in my_puntation_list else i for i in in_text]).strip()).encode('utf8')
else:
text = ("".join([" " + i if not i.startswith("'") and i not in my_puntation_list else i for i in in_text]).strip())
text = re.sub('\( ', '(', text)
text = re.sub('\[ ', '[', text)
text = re.sub('\{ ', '{', text)
text = re.sub('\/ ', '/', text)
text = re.sub(' \/', '/', text)
return text
# Class to split in sentences
class TMPragmatic():
def __init__(self, language):
args = shlex.split('ruby ' + pragmatic_segmenter_home + 'segmenter.rb ' + language.lower())
self.segmenter = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Input:
# text = 'Labas pasauli. Mano vardas yra p Smithas. Dirbu su JAV vyriausybe ir aš gyventi į JAV, gyvenu Niujorke.'
# lang = 'lt'
#Output: ['Labas pasauli.', 'Mano vardas yra p Smithas.', 'Dirbu su JAV vyriausybe ir aš gyventi į JAV, gyvenu Niujorke.']
def tokenize_sent(self, text):
sentences, tok_exc = self.segmenter.communicate(text.encode('utf8'))
return [sent for sent in sentences.decode("utf-8").split('\n') if sent]
"""
@api {INFO} /TMUNTokenizer TMUNTokenizer -- Initialize available Detokenizer models.
@apiName TMUNTokenizer
@apiVersion 0.1.0
@apiGroup TMTokenizer
@apiPermission admin
@apiExample {curl} Input & Output:
# Input:
(String) language
(List) List of words.
# Output: (String) Detokenizer sentence
# To include new detokenizer for other languages change the function un_tokenizer(self, in_text)
# Currently, we used "en" rules to detokenizer several language. We use specific rules to detokenizer english, japanese, italian and czench
"""
class TMUNTokenizer():
# Use moses untokenizer TODO: fill entries for other languages
models = {'EN': 'en', # --> moses
'ES': 'en', # --> moses
'FR': 'fr', # --> moses
'DE': 'en', # german #--> moses
'IT': 'it', # 'moses', # italian
'PT': 'en', # portuguese #--> moses
'PL': 'en', # polish #--> moses
'RU': 'en', # russian --> #'moses',
'BG': 'en', # bulgarian
'NL': 'en', # dutch #--> moses
'ET': 'en', # estonian
'FI': 'fi', # finnish
'CR': 'en', # 'KoNLPy', # korean
'JA': 'kytea', # Japanese
'ZH': 'en', # chinese
'AR': 'en', # arabic
'CZ': 'en', # czech
'CS': 'cs', # czech
'DA': 'en', # danish
'EL': 'en', # greek --> moses
'NO': 'en', # norwegian
'SL': 'en', # slovene --> moses
'SV': 'en', # swedish --> moses
'TU': 'en', # turkish
'HE': 'en', # hb
'GA': 'en', # Irish --> moses
'HU': 'en', # hungarian --> moses
'LT': 'en', # Lithuanian
'LV': 'en', # Latvian --> moses
'MT': 'en', # Maltese
'RO': 'en', # Romanian --> moses
'SK': 'en', # Slovak
'IS': 'en', # Icelandic --> moses
'HR': 'en' # Croatian
}
def __init__(self, language):
self.language = language.upper()
if self.language == 'ZH' or self.language == 'AR' or self.language == 'CR':
self.tool = Detokenizer(options={'language':language})
def un_tokenizer(self, in_text):
if self.language == 'ZH' or self.language == 'AR' or self.language == 'CR':
return self.tool.detokenize(' '.join(in_text))
else:
my_puntation_list = ['!', '"', '#', '$', '%', '&', "'", ')', '*', '+', ',', '-', '.', ':', ';', '<', '=', '>',
'?', '@',
'\\', ']', '^', '_', '`', '|', '}', '~', '。', ',', ';', '、']
if isinstance(in_text, bytes):
in_text = in_text.decode('utf8')
text = ("".join(
[" " + i if not i.startswith("'") and i not in my_puntation_list else i for i in in_text]).strip()).encode(
'utf8')
else:
text = (
"".join([" " + i if not i.startswith("'") and i not in my_puntation_list else i for i in in_text]).strip())
text = re.sub('\( ', '(', text)
text = re.sub('\[ ', '[', text)
text = re.sub('\{ ', '{', text)
text = re.sub('\/ ', '/', text)
text = re.sub(' \/', '/', text)
return text
'''
# Receive a list of words, return a string
def un_tokenizer(self, text):
return self.tool.detokenize(' '.join(text))
'''
| shasha79/nectm | src/TMPosTagger/TMTokenizer.py | TMTokenizer.py | py | 17,900 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
31045306948 | from step_functions.state_machine_resource import execute_state_machine
import click
# as per available non english ids https://docs.aws.amazon.com/polly/latest/dg/ntts-voices-main.html
NEURAL_VOICE_LIST = [
"Vicki",
"Bianca",
"Takumi",
"Seoyeon",
"Camila",
"Vitoria",
"Ines",
"Lucia",
"Mia",
"Lupe",
"Lea",
"Gabrielle",
"Hannah",
"Arlet",
]
# note some of comprehend services like sentiment are less restrictive but detect syntax only allows these
COMPREHEND_LANG_CODES = [
"de",
"pt",
"en",
"it",
"fr",
"es",
]
@click.command()
@click.option("--sf_name", help="Name of step function to execute")
@click.option("--target_lang_code", help="Lang to translate video into")
@click.option(
"--voice_id",
help="Polly voice id for synthesising text to speech. Must be available with neural engine option in AWS Polly",
)
@click.option(
"--deploy/--no-deploy",
default=False,
help="Flag to determine whether to deploy new step function or not before execution",
)
@click.option(
"--bucket",
default="awstestnlp",
help="s3 bucket containing the source and output files",
)
@click.option(
"--source_filename", default="transcribe-sample.mp3", help="filename of source mp3",
)
@click.option(
"--source_lang_code", default="en-US", help="language code for source video"
)
@click.option(
"--job_name",
default="Test",
help="Name of transcription job to execute in step function",
)
@click.option(
"--sf_role",
default="StepFunctionAWSNLPServices",
help="Name of IAM role assumed by step function",
)
def execute_nlp_state_machine(
sf_name,
target_lang_code,
bucket,
source_filename,
source_lang_code,
job_name,
voice_id,
deploy,
sf_role,
):
if voice_id in NEURAL_VOICE_LIST:
engine = "neural"
else:
engine = "standard"
if target_lang_code in COMPREHEND_LANG_CODES:
skip_comprehend = False
else:
skip_comprehend = True
sf_input = {
"BucketName": bucket,
"Source": f"s3://{bucket}/source/{source_lang_code}/{source_filename}",
"TranscribeOutputKey": f"transcribe/{target_lang_code}/transcribed.json",
"PollyVideoOutputKey": f"polly/{target_lang_code}/{voice_id}/",
"PollyResponseOutputKey": f"polly/{target_lang_code}/response.json",
"ComprehendOutputKey": f"comprehend/{target_lang_code}/response.json",
"TranslateOutputKey": f"translate/{target_lang_code}/response.json",
"SourceLanguageCode": source_lang_code,
"TargetLanguageCode": target_lang_code,
"JobName": job_name,
"VoiceId": voice_id,
"EngineType": engine,
"SkipComprehend": skip_comprehend,
}
if deploy:
return execute_state_machine(sf_input, sf_name, True, sf_role)
else:
return execute_state_machine(sf_input, sf_name)
if __name__ == "__main__":
execute_nlp_state_machine()
| ryankarlos/AWS-ML-services | projects/nlp/execute_pipeline.py | execute_pipeline.py | py | 3,003 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "step_functions.state_machine_resource.execute_state_machine",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "step_functions.state_machine_resource.execute_state_machine",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "click.command",
"li... |
41023646811 | import numpy as np
import argparse
import cv2
cap = cv2.VideoCapture(0)
while(1):
ret, frame = cap.read()
gray_vid = cv2.cvtColor(frame, cv2.IMREAD_GRAYSCALE)
cv2.imshow('Original',frame)
edged_frame = cv2.Canny(frame,100,200)
cv2.imshow('Edges',edged_frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| emredogan7/METU-EE-2017-2018-Capstone-Design-Project-Repository | Code/edge_detection_canny.py | edge_detection_canny.py | py | 365 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_GRAYSCALE",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
... |
38078314636 | import torch
from torch import nn
from torch import Tensor
class RanEncoder(nn.Module):
def __init__(self, per_dim, in_channel, out_channel):
super().__init__()
self.per_dim = per_dim
self.in_channel = in_channel
self.out_channel = out_channel
self.conv1 = nn.Conv2d(self.in_channel, self.out_channel, kernel_size=(1, 1))
self.bn = nn.BatchNorm2d(self.out_channel)
self.drop_out = nn.Dropout(p=0.2)
self.relu = nn.ReLU(inplace=True)
def forward(self, x: Tensor):
"""
:param x: shape(bs,features_dim)
:return:
"""
x_nums = x.shape[0]
x = torch.reshape(x, (-1, self.in_channel, self.per_dim, self.per_dim))
out = self.conv1(x)
out = self.bn(out)
out = self.drop_out(out)
out = self.relu(out)
out = torch.reshape(out, (x_nums, self.out_channel, -1))
return out
| DTI-dream/EDC-DTI | src/core/RAN_encoder.py | RAN_encoder.py | py | 984 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
856846577 | #!/usr/bin/env python
from pyhesity import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--vip', type=str, default='helios.cohesity.com')
parser.add_argument('-u', '--username', type=str, default='helios')
parser.add_argument('-d', '--domain', type=str, default='local')
parser.add_argument('-c', '--clustername', type=str, default=None)
parser.add_argument('-mcm', '--mcm', action='store_true')
parser.add_argument('-i', '--useApiKey', action='store_true')
parser.add_argument('-pwd', '--password', type=str, default=None)
parser.add_argument('-np', '--noprompt', action='store_true')
parser.add_argument('-m', '--mfacode', type=str, default=None)
parser.add_argument('-em', '--emailmfacode', action='store_true')
parser.add_argument('-n', '--viewname', action='append', type=str)
parser.add_argument('-l', '--viewlist', type=str, default=None)
args = parser.parse_args()
vip = args.vip
username = args.username
domain = args.domain
clustername = args.clustername
mcm = args.mcm
useApiKey = args.useApiKey
password = args.password
noprompt = args.noprompt
mfacode = args.mfacode
emailmfacode = args.emailmfacode
viewnames = args.viewname
viewlist = args.viewlist
# gather list
def gatherList(param=None, filename=None, name='items', required=True):
items = []
if param is not None:
for item in param:
items.append(item)
if filename is not None:
f = open(filename, 'r')
items += [s.strip() for s in f.readlines() if s.strip() != '']
f.close()
if required is True and len(items) == 0:
print('no %s specified' % name)
exit()
return items
viewnames = gatherList(viewnames, viewlist, name='views', required=True)
# authenticate
apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=useApiKey, helios=mcm, prompt=(not noprompt), emailMfaCode=emailmfacode, mfaCode=mfacode)
# if connected to helios or mcm, select access cluster
if mcm or vip.lower() == 'helios.cohesity.com':
if clustername is not None:
heliosCluster(clustername)
else:
print('-clustername is required when connecting to Helios or MCM')
exit()
# exit if not authenticated
if apiconnected() is False:
print('authentication failed')
exit(1)
jobs = api('get', 'data-protect/protection-groups?isActive=true&isPaused=true&environments=kView', v=2)
views = api('get', 'file-services/views', v=2)
# delete old remote views from target cluster
for viewname in viewnames:
view = [v for v in views['views'] if v['name'].lower() == viewname.lower()]
if view is None or len(view) == 0:
print('view %s not found' % viewname)
else:
view = view[0]
if view['isReadOnly'] is not True:
print('View %s is live. Skipping...' % view['name'])
else:
print('Deleting old remote view %s' % view['name'])
result = api('delete', 'views/%s' % view['name'])
if 'protectionGroups' in jobs and jobs['protectionGroups'] is not None:
for job in jobs['protectionGroups']:
if job['viewParams']['objects'] is None or len(job['viewParams']['objects']) == 0:
print('Deleting old job %s' % job['name'])
result = api('delete', 'data-protect/protection-groups/%s' % job['id'], v=2)
| bseltz-cohesity/scripts | python/viewDR66/deleteOldViews.py | deleteOldViews.py | py | 3,313 | python | en | code | 85 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
}
] |
72007183145 | from flask import Flask, request
from covid import CovidAssist
import pycountry
import requests_cache
import datetime
import json
import requests
import os
from twilio.twiml.messaging_response import MessagingResponse
app = Flask(__name__)
agent = CovidAssist()
requests_cache.install_cache(cache_name='covid_cache', backend='sqlite', expire_after=86400)
@app.route('/bot', methods=['POST'])
def bot():
# add webhook logic here and return a response
# keys = authenticate()
incoming_msg = request.values.get('Body')
response = message_parser(incoming_msg)
return response
def authenticate():
ACCOUNT_SID = ""
AUTH_TOKEN = ""
with open("auth.txt","r") as f:
return [str(el).strip("\n") for el in f.readlines()]
def message_parser(incoming_msg):
resp = MessagingResponse()
msg = resp.message()
if "information" in incoming_msg or "Information" in incoming_msg:
agent.country=country_parser(incoming_msg)
date = agent.get_latest_date()
date = date.text
data = agent.get_latest_country_data()
data = json.loads(data)
msg.body(" COVID-19 Cases in : "+ str(agent.country)+
" \n Date Updated : " + str(date)+
" \n\n \U0001F449 confirmed = " + str(data["result"][date]["confirmed"]) +
" \n \U0001F449 deaths = " + str(data["result"][date]["deaths"]) +
" \n \U0001F449 recoverd = " + str(data["result"][date]["recovered"]))
elif "charts" in incoming_msg or "chart" in incoming_msg:
pass
else:
msg.body("I didnot understand, could you mention 'information' or 'chart' please!")
return str(resp)
def country_parser(incoming_msg):
x = incoming_msg.split()
country = pycountry.countries.search_fuzzy(x[-1])[0].alpha_3
return country
if __name__ == '__main__':
app.run()
| amansr02/CovidAssist | app.py | app.py | py | 1,877 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "covid.CovidAssist",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests_cache.install_cache",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.requ... |
72198305384 | import io
from collections import Counter
import pandas as pd
import plotly.express as px
from django.db.models import Count, Sum
from django.conf import settings
from django.http import HttpResponse
from pywaffle import Waffle
import matplotlib.pyplot as plt
from .countries import REGIONS
from .models import DEMOGRAPH_CHOICES, AUDIENCE_CHOICES, ACTIVITY_CHOICES, Imprint, Report, Cohort
def get_partner_sum(year):
choices = {'demographic': DEMOGRAPH_CHOICES,
'audience': AUDIENCE_CHOICES,
'activity': ACTIVITY_CHOICES}
aggregates = {}
totals = []
# Imprints not including citizen science
imprints = Imprint.objects.filter(report__period__year=year).exclude(activity=7)
for plotname in ['demographic','audience','activity']:
count = Counter()
for i in imprints.values_list(plotname,'size'):
count.update({i[0]:i[1]})
opts = {d[0]:d[1] for d in choices[plotname]}
total = sum(count.values())
totals.append(total)
aggregates[plotname] = [{'name':opts[k], 'number':v,'percent':f"{v/total*100:.0f}"} for k, v in dict(count).items()]
return aggregates, max(totals)
def get_partner_counts(reports):
total = reports.count()
demos = reports.annotate(count=Count('imprint__demographic')).values_list('imprint__demographic','count')
audience = reports.annotate(count=Count('imprint__audience')).values_list('imprint__audience','count')
activity = reports.annotate(count=Count('imprint__activity')).values_list('imprint__activity','count')
data = [
{'source': demos, 'choices': DEMOGRAPH_CHOICES,'id':'demographics'},
{'source': audience, 'choices': AUDIENCE_CHOICES, 'id':'audience'},
{'source': activity, 'choices': ACTIVITY_CHOICES, 'id':'activity'},
]
return breakdown_per_partner(data, total)
def breakdown_per_partner(data, total):
aggregates = {}
for datum in data:
opts = {d[0]:d[1] for d in datum['choices']}
counts = Counter([opts[d[0]] for d in datum['source'] if d[0] != None])
aggregates[datum['id']] = [{'name':k, 'number':v,'percent':f"{v/total*100:.0f}"} for k, v in dict(counts).items()]
return aggregates
def cohort_countries(year):
cohort = Cohort.objects.get(year=year)
count = Counter()
regions_count = Counter()
imprints = Imprint.objects.filter(report__period=cohort).exclude(countries=None)
icountries = [c.alpha3 for i in imprints for c in i.countries]
reports = Report.objects.filter(period=cohort).exclude(countries=None)
rcountries = [c.alpha3 for i in reports for c in i.countries]
for c in [icountries, rcountries]:
count.update(c)
for c in reports.values_list('countries', flat=True):
ccodes = c.split(',')
regions = set([REGIONS[code]['region'] for code in ccodes])
regions_count.update(regions)
regions_list = [{'name':k,'number':v} for k,v in dict(regions_count).items()]
return dict(count), regions_list
def countries_summary(request, year):
"""
For `cohort` find all the countries from
"""
count, regions = cohort_countries(year)
# Change index from 2 letter code to Name of country
# data = [{'code':countries.alpha3(code=k), 'pop':v} for k,v in count.items()]
# return JsonResponse(data, safe=False)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="countries.csv"'
writer = csv.writer(response)
writer.writerow(['code', 'number'])
for k,v in count.items():
writer.writerow([k,v])
return response
def choropleth_map(year):
count, regions = cohort_countries(year)
if not regions:
return None
countries = pd.DataFrame([[k,v]for k,v in count.items()],columns=['code','number'])
fig = px.choropleth(countries, locations="code",
color="number",
color_continuous_scale=px.colors.sequential.Mint)
fig.update_layout(coloraxis_colorbar_x=-0.15)
config = {
'toImageButtonOptions': {
'format': 'png', # one of png, svg, jpeg, webp
'filename': f"partners-{year}",
'height': 1500,
'width': 2100,
'scale':12 # Multiply title/legend/axis/canvas sizes by this factor
}
}
return fig.to_html(full_html=False, default_height=500, config=config)
def meta_plot(request, year,plotname):
reports = Report.objects.filter(period__year=year)
data = get_partner_counts(reports)
data_dict = {a['name']:a['number']for a in data[plotname]}
fig = plt.figure(
FigureClass=Waffle,
rows=5,
values=data_dict,
labels=["{0}".format(k) for k, v in data_dict.items()],
legend={ 'loc': 'upper left', 'bbox_to_anchor': (1, 1)},
block_arranging_style='snake',
figsize=(12, 5)
)
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
plt.close()
return HttpResponse(buf.read(),content_type="image/png")
| LCOGT/globalskypartners | reports/plots.py | plots.py | py | 5,063 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "models.DEMOGRAPH_CHOICES",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "models.AUDIENCE_CHOICES",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "models.ACTIVITY_CHOICES",
"line_number": 20,
"usage_type": "name"
},
{
"api_name... |
41516616217 | import os
import shrimpy
from datetime import datetime
from arbitrage import *
def main():
shrimpy_public_key = os.environ.get("PUBLIC_KEY")
shrimpy_secret_key = os.environ.get("PRIVATE_KEY")
shrimpy_client = shrimpy.ShrimpyApiClient(shrimpy_public_key, shrimpy_secret_key)
# Works well on binance but have bugs on some other exchanges
exchange = "binance"
sentence = "\n----------------------- \n-----------------------\n {} on {}\n----------------------- \n-----------------------"
date = str(datetime.now())
sentence = sentence.format(date, exchange)
with open("infos.csv", "a") as file:
file.write(sentence)
triangle_creation = Triangle()
triangles = triangle_creation.form_triangles(exchange,
shrimpy_public_key,
shrimpy_secret_key,
shrimpy_client)
array_of_symbols1 = triangles[0]["symbol"]
array_of_symbols1_base = triangles[0]["base"]
array_of_symbols1_quote = triangles[0]["quote"]
array_of_symbols2 = triangles[1]["symbol"]
array_of_symbols2_base = triangles[1]["base"]
array_of_symbols2_quote = triangles[1]["quote"]
array_of_symbols3 = triangles[2]["symbol"]
array_of_symbols3_base = triangles[2]["base"]
array_of_symbols3_quote = triangles[2]["quote"]
# Release memory
del sentence
del date
del triangle_creation
del triangles
def Arbitrage():
if (len(array_of_symbols1) == len(array_of_symbols1_base) == len(array_of_symbols1_quote) ==
len(array_of_symbols2) == len(array_of_symbols2_base) == len(array_of_symbols2_quote) ==
len(array_of_symbols3) == len(array_of_symbols3_base) == len(array_of_symbols3_quote)):
while True:
for i in range(len(array_of_symbols1) - 1):
triangle = Triangle()
try:
triangle.set_fees(shrimpy_public_key, shrimpy_secret_key, shrimpy_client, exchange)
except:
print("Can't access the fees")
continue
triangle.symbol1 = Symbol(array_of_symbols1[i], array_of_symbols1_base[i],
array_of_symbols1_quote[i])
triangle.symbol2 = Symbol(array_of_symbols2[i], array_of_symbols2_base[i],
array_of_symbols2_quote[i])
triangle.symbol3 = Symbol(array_of_symbols3[i], array_of_symbols3_base[i],
array_of_symbols3_quote[i])
try:
triangle.order_triangle()
triangle.symbol1.calc_bid_and_ask(shrimpy_public_key, shrimpy_secret_key, shrimpy_client,
exchange)
triangle.symbol2.calc_bid_and_ask(shrimpy_public_key, shrimpy_secret_key, shrimpy_client,
exchange)
triangle.symbol3.calc_bid_and_ask(shrimpy_public_key, shrimpy_secret_key, shrimpy_client,
exchange)
except:
continue
triangle.calculate_delta_long()
triangle.calculate_delta_short()
if triangle.PLBuy > triangle.fees:
try:
gain_net = str(round(triangle.PLBuy - triangle.fees, 2))
triangle.save_triangle("infos.csv", gain_net, "long")
except:
print("Error")
print(
f"Long arbitrage opportunity in triangle: {triangle.symbol1.name} = {triangle.symbol2.name} * {triangle.symbol3.name}")
print(f"Gain net : {gain_net} percent")
elif triangle.PLSell > triangle.fees:
try:
gain_net = str(round(triangle.PLSell - triangle.fees, 2))
triangle.save_triangle("infos.csv", gain_net, "short")
except:
print("Error")
print(
f"Short arbitrage opportunity in triangle: {triangle.symbol1.name} = {triangle.symbol2.name} * {triangle.symbol3.name}")
print(f"Gain net : {gain_net} percent")
else:
print(
f"No arbitrage opportunity in triangle: {triangle.symbol1.name} {triangle.symbol2.name} {triangle.symbol3.name}")
Arbitrage()
if __name__ == '__main__':
main()
| terencebeauj/arbitrage_scanner | main.py | main.py | py | 4,921 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_num... |
11632604595 | from abc import abstractmethod
from io import BytesIO
from json import JSONEncoder
import numpy as np
import imageio
from PIL import Image
from pydicom import Sequence
from pydicom import read_file
from pydicom.dataelem import PersonName
from pydicom.multival import MultiValue
from pydicom.valuerep import DA, DT, TM, DSfloat, DSdecimal, IS
from apps.core.models import *
class DicomSaver:
@classmethod
def save(cls, img):
print("save")
if isinstance(img, Dataset):
ds: Dataset = img
img = BytesIO()
img.seek(0)
ds.save_as(img)
else:
ds: Dataset = read_file(img)
if isinstance(img, str):
img = open(img, 'rb')
if Instance.objects.filter(sop_instance_uid=ds.SOPInstanceUID).exists():
instance = Instance.objects.get(sop_instance_uid=ds.SOPInstanceUID)
instance.image.delete()
instance.image.save('', img)
return instance
elif Series.objects.filter(series_instance_uid=ds.SeriesInstanceUID).exists():
series = Series.objects.get(series_instance_uid=ds.SeriesInstanceUID)
instance = Instance.from_dataset(ds=ds)
instance.series = series
instance.image.save('', img)
instance.save()
img.close()
return instance
elif Study.objects.filter(study_instance_uid=ds.StudyInstanceUID).exists():
study = Study.objects.get(study_instance_uid=ds.StudyInstanceUID)
series = Series.from_dataset(ds=ds)
series.study = study
series.save()
instance = Instance.from_dataset(ds=ds)
instance.series = series
instance.image.save('', img)
instance.save()
img.close()
return instance
if ds.PatientID is None or ds.PatientID == '':
patient = Patient.from_dataset(ds=ds)
patient.save()
study = Study.from_dataset(ds=ds)
study.patient = patient
study.save()
series = Series.from_dataset(ds=ds)
series.study = study
series.save()
instance = Instance.from_dataset(ds=ds)
instance.series = series
instance.image.save('', img)
instance.save()
img.close()
return instance
elif Patient.objects.filter(patient_id=ds.PatientID):
patient = Patient.objects.get(patient_id=ds.PatientID)
study = Study.from_dataset(ds=ds)
study.patient = patient
study.save()
series = Series.from_dataset(ds=ds)
series.study = study
series.save()
instance = Instance.from_dataset(ds=ds)
instance.series = series
instance.image.save('', img)
instance.save()
img.close()
return instance
else:
patient = Patient.from_dataset(ds=ds)
patient.save()
study = Study.from_dataset(ds=ds)
study.patient = patient
study.save()
series = Series.from_dataset(ds=ds)
series.study = study
series.save()
instance = Instance.from_dataset(ds=ds)
instance.series = series
instance.image.save('', img)
instance.save()
img.close()
return instance
class BaseProcessor:
@abstractmethod
def process(self, img, **params):
pass
class ImageProcessor(BaseProcessor):
def __init__(self, plugin: Plugin):
self.processor = __import__(plugin.name).Plugin()
def __enter__(self):
if hasattr(self.processor, "__enter__"):
self.processor.__enter__()
return self
def process(self, instance: Instance, **params):
ds = read_file(instance.image).pixel_array
result = self.processor.process(ds, **params)
return result
def __exit__(self, exc_type, exc_val, exc_tb):
if hasattr(self.processor, "__exit__"):
self.processor.__exit__(exc_type, exc_val, exc_tb)
return self
class DicomJsonEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, PersonName):
return obj.original_string
if isinstance(obj, MultiValue) or isinstance(obj, Sequence):
return_list = []
for value in obj:
return_list.append(self.default(value))
return return_list
if isinstance(obj, DA):
return '%d-%02d-%02d' % (obj.year, obj.month, obj.day)
if isinstance(obj, DT):
return '%d-%02d-%02d %02d:%02d:%02d' % (obj.year, obj.month, obj.day, obj.hour, obj.minute, obj.second)
if isinstance(obj, TM):
return '%02d:%02d:%02d' % (obj.hour, obj.minute, obj.second)
if isinstance(obj, DSfloat):
return str(obj)
if isinstance(obj, DSdecimal):
return str(obj)
if isinstance(obj, IS):
return obj.original_string or str(obj)
if isinstance(obj, Dataset):
child_tags = obj.dir()
return_dict = {}
for tag in child_tags:
return_dict[tag] = self.default(obj.data_element(tag).value)
return return_dict
return str(obj)
def convert_dicom_to_img(ds: Dataset, img_format='jpeg'):
pixel_array = ds.pixel_array
file = BytesIO()
imageio.imwrite(file, pixel_array, format='jpeg')
file.seek(0)
return file.read()
def convert_array_to_img(pixel_array: np.ndarray, fname, instance):
file = BytesIO()
imageio.imwrite(file, pixel_array, format='jpeg')
file.seek(0)
resave = ProcessingResult.objects.create(instance=instance, filename=fname)
resave.result.save('', file)
file.seek(0)
return file.read()
def convert_img(ds: Dataset, img_max, img_min):
pixel_array = ds.pixel_array
pixel_array[pixel_array >= img_max] = img_max
pixel_array[pixel_array <= img_min] = img_min
file = BytesIO()
imageio.imwrite(file, pixel_array, format='jpeg')
file.seek(0)
return file.read()
| FDU-VTS/PACS-VTS | PACS/ndicom_server/apps/core/utils.py | utils.py | py | 6,224 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "io.BytesIO",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pydicom.read_file",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "abc.abstractmethod",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "pydicom.read_file",
... |
74784270182 | import numpy
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import matplotlib.cm as cm
def ternary_plot(data_fn):
reader = pd.read_csv(data_fn)
SQRT3 = numpy.sqrt(3)
SQRT3OVER2 = SQRT3 / 2.
def unzip(l):
return zip(*l)
def permute_point(p, permutation=None):
if not permutation:
return p
return [p[int(permutation[i])] for i in range(len(p))]
def project_point(p, permutation=None):
permuted = permute_point(p, permutation=permutation)
a = permuted[0]
b = permuted[1]
x = a + b/2.
y = SQRT3OVER2 * b
return numpy.array([x, y])
def project_sequence(s, permutation=None):
xs, ys = unzip([project_point(p, permutation=permutation) for p in s])
return xs, ys
data = []
for i, (a, b, c) in reader.iterrows():
a_ = a / (a + b + c)
b_ = b / (a + b + c)
c_ = c / (a + b + c)
data.append((a_, b_, c_))
xs, ys = project_sequence(data)
vs = (1, 2, 3)
fig = plt.figure(num=None, figsize=(10, 6), dpi=80, facecolor='w', edgecolor='k')
corners = numpy.array([[0, 0], [1, 0], [0.5, numpy.sqrt(3) * 0.5 * 1]])
triangle = tri.Triangulation(corners[:, 0], corners[:, 1])
# creating the grid
refiner = tri.UniformTriRefiner(triangle)
trimesh = refiner.refine_triangulation(subdiv=4)
#plotting the colorbar
colormap = plt.cm.get_cmap('Reds')
#plotting the mesh
plt.triplot(trimesh, '', color='0.9', zorder = 1)
#plotting the points
plt.scatter(xs, ys, c=vs, s=100, zorder = 10, cmap=colormap)
for i in range(len(xs)):
plt.text(xs[i] + 0.001, ys[i] + 0.001, 'Samp-' + str(i))
plt.tricontourf(xs,ys,triangle.triangles,vs)
#plotting the axes
plt.plot([corners[0][0], corners[1][0]], [corners[0][1], corners[1][1]], color='0.7', linestyle='-', linewidth=2)
plt.plot([corners[0][0], corners[2][0]], [corners[0][1], corners[2][1]], color='0.7', linestyle='-', linewidth=2)
plt.plot([corners[1][0], corners[2][0]], [corners[1][1], corners[2][1]], color='0.7', linestyle='-', linewidth=2)
def plot_ticks(start, stop, tick, n):
r = numpy.linspace(0, 1, num = 10)
xs = start[0] * (1 - r) + stop[0] * r
xs = numpy.vstack((xs, xs + tick[0]))
ys = start[1] * (1 - r) + stop[1] * r
ys = numpy.vstack((ys, ys + tick[1]))
for i in range(0, len(xs.tolist()[1])):
x = xs.tolist()[1][i]
y = ys.tolist()[1][i]
plt.text(x, y, i, ha='center')
plt.plot(xs, ys, 'k', lw=1, color='0.7')
n = 10
tick_size = 0.2
margin = 0.1
left = corners[0]
right = corners[1]
top = corners[2]
# define vectors for ticks
bottom_tick = tick_size * (right - top) / n
right_tick = tick_size * (top - left) / n
left_tick = tick_size * (left - right) / n
# plot_ticks(left, right, bottom_tick, n)
# plot_ticks(right, top, right_tick, n)
# plot_ticks(left, top, left_tick, n)
names = [reader[column].name for column in reader]
plt.text(left[0] - 0.01, left[1], names[2], horizontalalignment = 'right', fontsize = 15, color = 'b')
plt.text(right[0], right[1], names[0], horizontalalignment = 'left', fontsize = 15, color = 'b')
plt.text(top[0], top[1], names[1], fontsize = 15, color = 'b')
plt.colorbar(label="Sample density")
#
# plt.savefig('chart.png')
plt.show() | yashvardhan747/Statistical-and-Aqual-chemical-plots | StatisticalTools/ternary_plot.py | ternary_plot.py | py | 3,213 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
... |
3900882792 | import os
import random
import cv2
from torchvision import transforms
import matplotlib.pyplot as plt
torch_image_transform = transforms.ToTensor()
def get_data(dir, labels):
data = []
for label in labels:
path = os.path.join(dir, label)
class_num = labels.index(label)
for img in os.listdir(path):
try:
img = cv2.imread(os.path.join(path, img))
torch_img = torch_image_transform(img)
torch_img_numpy = torch_img.cpu().numpy().transpose([1, 2, 0])
data.append([torch_img_numpy, class_num])
except Exception as e:
print(e)
return data, len(data)
def get_random_data(dir, labels, num_samples_per_label):
data = []
for label in labels:
path = os.path.join(dir, label)
class_num = labels.index(label)
images = os.listdir(path)
# Shuffle the images to get random samples
random.shuffle(images)
# Take a specified number of random samples from each label
selected_images = random.sample(images, num_samples_per_label)
for img_filename in selected_images:
try:
img = cv2.imread(os.path.join(path, img_filename))
torch_img = torch_image_transform(img)
torch_img_numpy = torch_img.cpu().numpy().transpose([1, 2, 0])
data.append([torch_img_numpy, class_num])
except Exception as e:
print(e)
return data, len(data)
def get_random_file(directory_path):
# Get the list of files in the directory
files = os.listdir(directory_path)
# Optional: Filter out directories from the list (if needed)
files = [file for file in files if os.path.isfile(os.path.join(directory_path, file))]
# Check if there are any files in the directory
if not files:
print("No files found in the directory.")
return None
# Generate a random index within the range of the files list
random_index = random.randint(0, len(files) - 1)
# Get the randomly chosen file from the list
random_file = files[random_index]
# Return the full path to the randomly chosen file
return os.path.join(directory_path, random_file)
def get_labels(data_path):
#get children folders of root data directory
labels = os.listdir(data_path)
return labels
def count_files_in_folder(folder_path):
file_count = 0
for item in os.listdir(folder_path):
if os.path.isfile(os.path.join(folder_path, item)):
file_count += 1
return file_count
| nguyen-tho/VGG19_Insect_Classification | get_data.py | get_data.py | py | 2,689 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.... |
25142936013 | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
from pandas import DataFrame
from util import spider_util
#获取学校学区信息
xx_url='http://map.28dat.net/s_ft/school.aspx?no=101'
cz_url='http://map.28dat.net/s_ft/school.aspx?no=225'#初中
url_arr=[]
url_arr.append(xx_url)
url_arr.append(cz_url)
schoolarea = []
for url in url_arr:
html = spider_util.open_url(url, self_rotation=5, timeout=20) # 20秒超时
bsObj = BeautifulSoup(html, "html.parser", from_encoding="utf-8")
a_tags=bsObj.find('span',{'id':'s_school'}).find_all('a')
for a_tag in a_tags:
schoolname=a_tag.get_text()
url='http://map.28dat.net/s_ft/school.aspx'+a_tag.get('href')
html = spider_util.open_url(url, self_rotation=5, timeout=20) # 20秒超时
bsObj = BeautifulSoup(html, "html.parser", from_encoding="utf-8")
span=bsObj.find('span',{'id':'s_list'})
area_text=span.get_text()
area_text_arr=area_text.split(',')
for text in area_text_arr:
area = {}
area['学校名称'] = schoolname
area['区域范围']=text
schoolarea.append(area)
DataFrame(schoolarea).to_csv("D:\\011111111111111111111111\\软件\\school_area.csv", index=False, sep=',')
| w341000/PythonTheWord | SchoolArea.py | SchoolArea.py | py | 1,184 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "util.spider_util.open_url",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "util.spider_util",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "util.spi... |
33639047987 | import io
import os
import sys
import tempfile
from Config.Config import G_CONFIG
from JobApi.ESJobApi import ESJobApi
spark_config = G_CONFIG.config['spark']
task_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'tasks')
src_root_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
# Create zip file of all (python) sources to be passed to Spark task
def create_source_zip():
zip_file = os.path.join(tempfile.gettempdir(), 'elastictm.zip')
cmd = 'cd {}; zip -r {} *'.format(src_root_path, zip_file)
logger.info("Zipping source with the command: {}".format(cmd))
os.system(cmd)
return zip_file
class SparkTaskDispatcher:
src_zip_file = create_source_zip()
def __init__(self):
self.job_api = ESJobApi()
def run_old(self, job_id, pyscript):
master_path = spark_config['master_path']
cmd = "export PYSPARK_PYTHON={}; cd {}; {}/bin/spark-submit --master {} {}/{}.py {} --py-files {}"\
.format(sys.executable, src_root_path, spark_config['path'], master_path, task_path, pyscript, job_id, self.src_zip_file)
logger.info("Dispatching Spark task: {}".format(cmd))
exit_code = os.system(cmd + "> /dev/null 2>&1")
if exit_code:
status = 'failed'
else:
status = 'succeded'
logger.info("Dispatching Spark status: {}, exit code: {}".format(status, exit_code))
self.job_api.set_status(job_id, status)
def run(self, job_id, pyscript):
from subprocess import Popen, PIPE
master_path = spark_config['master_path']
cmd = "{}/bin/spark-submit".format(spark_config['path'])
env = dict(os.environ, PYSPARK_PYTHON=sys.executable)
params = "--master {} {}/{}.py {} --py-files {}".format(master_path, task_path, pyscript, job_id, self.src_zip_file)
logger.info("Dispatching Spark task: export PYSPARK_PYTHON={}; cd {}; {} {}".format(sys.executable, src_root_path, cmd, params))
logger.info("ENV: {}".format(env))
p = Popen("{} {}".format(cmd, params).split(), stdout=PIPE, stderr=PIPE, env=env, cwd=src_root_path)
for line in io.TextIOWrapper(p.stdout, encoding="utf-8"):
logger.info(line)
p.wait()
exit_code = p.returncode
if exit_code:
status = 'failed'
else:
status = 'succeded'
logger.info("Spark status: {}, exit code: {}".format(status, exit_code))
self.job_api.set_status(job_id, status)
if __name__ == "__main__":
import sys
G_CONFIG.config_logging()
std = SparkTaskDispatcher()
std.run(sys.argv[1], "Delete")
| shasha79/nectm | src/JobApi/SparkTaskDispatcher.py | SparkTaskDispatcher.py | py | 2,602 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "Config.Config.G_CONFIG.config",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "Config.Config.G_CONFIG",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "... |
3985485979 | import os
from skimage import io
import copy
import numpy as np
import random
from glob import glob
import h5py
import torch
import torch.utils.data as data
from torchvision import transforms, datasets
from src.datasets.root_paths import DATA_ROOTS
class BaseSo2Sat(data.Dataset):
CLASSES = ['Compact High-Rise',
'Compact Midrise',
'Compact Low-Rise',
'Open High-Rise',
'Open Midrise',
'Open Low-Rise',
'Lightweight Low-Rise',
'Large Low-Rise',
'Sparsely Built',
'Heavy Industry',
'Dense Trees',
'Scattered Trees',
'Brush, Scrub',
'Low Plants',
'Bare Rocks or Paved',
'Bare Soil or Sand',
'Water']
def __init__(
self,
root=DATA_ROOTS["so2sat"],
train=True,
image_transforms=None,
seed=42,
sen='sen1',
):
super().__init__()
self.root = root
self.train = train
self.image_transforms = image_transforms
if self.train:
fileName = 'training.h5'
else:
fileName = 'validation.h5'
fid = h5py.File(self.root + "/" + fileName,'r')
self.data = np.array(fid[sen], dtype=np.float32).transpose(0,3,1,2)
self.labels = np.argmax(np.array(fid['label']), axis=1)
self.targets = copy.deepcopy(self.labels)
def __getitem__(self, index):
label = self.labels[index]
image = torch.tensor(self.data[index])
if self.image_transforms:
image = self.image_transforms(image)
return image, label
def __len__(self):
return self.data.shape[0]
class So2Sat_Sen1(BaseSo2Sat):
NUM_CLASSES = 17
MULTI_LABEL = False
NUM_CHANNELS = 8
FILTER_SIZE = 32
def __init__(
self,
root=DATA_ROOTS["so2sat"],
train=True,
image_transforms=None,
):
super().__init__()
self.dataset = BaseSo2Sat(
root=root,
train=train,
image_transforms=image_transforms,
sen='sen1',
)
def __getitem__(self, index):
img_data, label = self.dataset.__getitem__(index)
img2_data, _ = self.dataset.__getitem__(index)
data = [index, img_data.float(), img2_data.float(), label, label]
return tuple(data)
def __len__(self):
return len(self.dataset)
class So2Sat_Sen2(BaseSo2Sat):
NUM_CLASSES = 17
MULTI_LABEL = False
NUM_CHANNELS = 10
FILTER_SIZE = 32
def __init__(
self,
root=DATA_ROOTS["so2sat"],
train=True,
image_transforms=None,
):
super().__init__()
self.dataset = BaseSo2Sat(
root=root,
train=train,
image_transforms=image_transforms,
sen='sen2',
)
def __getitem__(self, index):
img_data, label = self.dataset.__getitem__(index)
img2_data, _ = self.dataset.__getitem__(index)
data = [index, img_data.float(), img2_data.float(), label, label]
return tuple(data)
def __len__(self):
return len(self.dataset) | jbayrooti/divmaker | src/datasets/so2sat.py | so2sat.py | py | 3,310 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "src.datasets.root_paths.DATA_ROOTS",
"line_number": 36,
"usage_type": "name"
},
{
... |
6694542098 | import time
import random
from typing import List, Tuple, Generator
class Life:
state: List[List[bool]]
m: int
n: int
def __init__(self, m: int, n: int):
self.m = m
self.n = n
self.state = [[False for _ in range(n)] for _ in range(m)]
def __repr__(self) -> str:
return str(self.state)
def random_select(self, rate):
for x_grid in range(self.m):
for y_grid in range(self.n):
if random.randint(0, rate) == 0:
self.state[x_grid][y_grid] = True
def neighbours(self, i: int, j: int) -> Generator[Tuple[int, int], None, None]:
for x_grid in range(-1, 2):
for y_grid in range(-1, 2):
if self.m > i + x_grid >= 0 and self.n > j + y_grid >= 0 \
and not (x_grid == y_grid == 0):
yield i + x_grid, j + y_grid
def nextstate(self) -> None:
next_ = self.state.copy()
for x_grid in range(self.m):
for y_grid in range(self.n):
num_of_on = 0
for cell in self.neighbours(x_grid, y_grid):
try:
if self.state[cell[0]][cell[1]]:
num_of_on += 1
except:
print(x_grid, y_grid)
print(cell[0], cell[1])
return
if self.state[x_grid][y_grid]:
if num_of_on not in (2, 3):
next_[x_grid][y_grid] = False
elif num_of_on == 3:
next_[x_grid][y_grid] = True
self.state = next_
def addfigure(self, i: int, j: int, figure: List[str]) -> None:
if i + len(figure) > self.m:
raise ValueError
for x_grid in range(len(figure)):
if len(figure[x_grid]) > self.n - j:
raise ValueError
for y_grid in range(len(figure[x_grid])):
self.state[x_grid + i][y_grid + j] = figure[x_grid][y_grid] not in ". "
def __str__(self) -> str:
grid = ""
for x_grid in range(self.m):
for y_grid in range(self.n):
if self.state[x_grid][y_grid]:
grid += '# '
else:
grid += '. '
grid += '\n'
return grid
| nurlbk/gameOfLife-video | gameOfLife.py | gameOfLife.py | py | 2,472 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "typing.Generator",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_n... |
73997935782 | #%% packages
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import seaborn as sns
#%% data import
cars_file = 'https://gist.githubusercontent.com/noamross/e5d3e859aa0c794be10b/raw/b999fb4425b54c63cab088c0ce2c0d6ce961a563/cars.csv'
cars = pd.read_csv(cars_file)
cars.head()
#%% visualise the model
sns.scatterplot(x='wt', y='mpg', data=cars)
sns.regplot(x='wt', y='mpg', data=cars)
#%% convert data to tensor
X_list = cars.wt.values
X_np = np.array(X_list, dtype=np.float32).reshape(-1,1)
y_list = cars.mpg.values
y_np = np.array(y_list, dtype=np.float32).reshape(-1,1)
X = torch.from_numpy(X_np)
y_true = torch.from_numpy(y_np)
#%% model class
class LinearRegressionTorch(nn.Module):
def __init__(self, input_size, output_size):
super(LinearRegressionTorch, self).__init__()
self.linear = nn.Linear(input_size, output_size)
def forward(self, x):
out = self.linear(x)
return out
input_dim = 1
output_dim = 1
model = LinearRegressionTorch(input_dim, output_dim)
#%% loss Function
loss_fun = nn.MSELoss()
#
# %% Optimizer
LR = 0.02
optimizer = torch.optim.SGD(model.parameters(), lr=LR)
# %% training
losses, slope, bias = [], [], []
NUM_EPOCHS = 1000
for epoch in range(NUM_EPOCHS):
# set gradients to zero
optimizer.zero_grad()
# fowrad pass
y_pred = model(X)
# compute loss
loss = loss_fun(y_pred, y_true)
# compute gradients
loss.backward()
# update weights
optimizer.step()
# get parameter
for name, param in model.named_parameters():
if param.requires_grad:
if name == 'linear.weight':
slope.append(param.data.numpy()[0][0])
if name == 'linear.bias':
bias.append(param.data.numpy()[0])
losses.append(float(loss.data))
# print loss
if epoch % 100 == 0:
print('Epoch: {}, Loss: {:.4f}'.format(epoch, loss.data))
# %% visualize model training
sns.scatterplot(x=range(NUM_EPOCHS), y=losses)
# %% visualize the bias development
sns.scatterplot(x=range(NUM_EPOCHS), y=bias)
# %%
sns.scatterplot(x=range(NUM_EPOCHS), y=slope)
# %% check the result
y_pred = model(X).data.numpy().reshape(-1)
sns.scatterplot(x=X_list, y=y_list)
sns.lineplot(x=X_list, y=y_pred, color='red')
# %%
| ono5/learn-pytorch | 03_ModelingIntroduction/10_LinReg_ModelClass_start.py | 10_LinReg_ModelClass_start.py | py | 2,294 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "seaborn.scatterplot",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "seaborn.regplot",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.array",
... |
3520624550 | """Defines helpers related to the system database."""
import logging
import time
from sqlalchemy import create_engine
from sqlalchemy.engine import Engine
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import text
from .project_settings_service import ProjectSettingsService
# Keep a Project → Engine mapping to serve
# the same engine for the same Project
_engines = dict()
def project_engine(project, default=False) -> ("Engine", sessionmaker):
"""Creates and register a SQLAlchemy engine for a Meltano project instance."""
existing_engine = _engines.get(project)
if existing_engine:
return existing_engine
settings = ProjectSettingsService(project)
engine_uri = settings.get("database_uri")
logging.debug(f"Creating engine {project}@{engine_uri}")
engine = create_engine(engine_uri, pool_pre_ping=True)
check_db_connection(
engine,
max_retries=settings.get("database_max_retries"),
retry_timeout=settings.get("database_retry_timeout"),
)
init_hook(engine)
engine_session = (engine, sessionmaker(bind=engine))
if default:
# register the default engine
_engines[project] = engine_session
return engine_session
def check_db_connection(engine, max_retries, retry_timeout): # noqa: WPS231
"""Check if the database is available the first time a project's engine is created."""
attempt = 0
while True:
try:
engine.connect()
except OperationalError:
if attempt == max_retries:
logging.error(
"Could not connect to the Database. Max retries exceeded."
)
raise
attempt += 1
logging.info(
f"DB connection failed. Will retry after {retry_timeout}s. Attempt {attempt}/{max_retries}"
)
time.sleep(retry_timeout)
else:
break
def init_hook(engine):
function_map = {"sqlite": init_sqlite_hook}
try:
function_map[engine.dialect.name](engine)
except KeyError:
pass
except Exception as e:
raise Exception(f"Can't initialize database: {str(e)}") from e
def init_sqlite_hook(engine):
# enable the WAL
engine.execute("PRAGMA journal_mode=WAL")
class DB:
@classmethod
def ensure_schema_exists(cls, engine, schema_name, grant_roles=()):
"""
Make sure that the given schema_name exists in the database
If not, create it
:param db_conn: psycopg2 database connection
:param schema_name: database schema
"""
schema_identifier = schema_name
group_identifiers = ",".join(grant_roles)
create_schema = text(f"CREATE SCHEMA IF NOT EXISTS {schema_identifier}")
grant_select_schema = text(
f"ALTER DEFAULT PRIVILEGES IN SCHEMA {schema_identifier} GRANT SELECT ON TABLES TO {group_identifiers}"
)
grant_usage_schema = text(
f"GRANT USAGE ON SCHEMA {schema_identifier} TO {group_identifiers}"
)
with engine.connect() as conn, conn.begin():
conn.execute(create_schema)
if grant_roles:
conn.execute(grant_select_schema)
conn.execute(grant_usage_schema)
logging.info(f"Schema {schema_name} has been created successfully.")
for role in grant_roles:
logging.info(f"Usage has been granted for role: {role}.")
| learningequality/meltano | src/meltano/core/db.py | db.py | py | 3,535 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "project_settings_service.ProjectSettingsService",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 30,
"usage_type": "call"
},
{... |
36121458003 | import random
import json
from typing import Tuple, Union, Dict, Any
from forte.data.ontology import Annotation
from forte.processors.data_augment.algorithms.single_annotation_op import (
SingleAnnotationAugmentOp,
)
from forte.common.configuration import Config
from forte.utils import create_import_error_msg
__all__ = [
"UniformTypoGenerator",
"TypoReplacementOp",
]
class UniformTypoGenerator:
r"""
A uniform generator that generates a typo from a typo dictionary.
Args:
word: input word that needs to be replaced,
dict_path: the url or the path to the pre-defined typo json file.
The key is a word we want to replace. The value is a list
containing various typos of the corresponding key.
.. code-block:: python
{
"apparent": ["aparent", "apparant"],
"bankruptcy": ["bankrupcy", "banruptcy"],
"barbecue": ["barbeque"]
}
"""
def __init__(self, dict_path: str):
try:
import requests # pylint: disable=import-outside-toplevel
except ImportError as e:
raise ImportError(
create_import_error_msg(
"requests", "data_aug", "data augment support"
)
) from e
try:
r = requests.get(dict_path, timeout=30)
self.data = r.json()
except requests.exceptions.RequestException:
with open(dict_path, encoding="utf8") as json_file:
self.data = json.load(json_file)
def generate(self, word: str) -> str:
if word in self.data.keys():
result: str = random.choice(self.data[word])
return result
else:
return word
class TypoReplacementOp(SingleAnnotationAugmentOp):
r"""
This class is a replacement op using a pre-defined
spelling mistake dictionary to simulate spelling mistake.
"""
def __init__(self, configs: Union[Config, Dict[str, Any]]):
super().__init__(configs)
if "dict_path" in configs.keys():
self.dict_path = configs["dict_path"]
else:
# default typo dictionary
self.dict_path = (
"https://raw.githubusercontent.com/wanglec/"
+ "temporaryJson/main/misspelling.json"
)
if configs["typo_generator"] == "uniform":
self.typo_generator = UniformTypoGenerator(self.dict_path)
else:
raise ValueError(
"The valid options for typo_generator are [uniform]"
)
def single_annotation_augment(
self, input_anno: Annotation
) -> Tuple[bool, str]:
r"""
This function replaces a word from a typo dictionary.
Args:
input_anno: The input annotation.
Returns:
A tuple, where the first element is a boolean value indicating
whether the replacement happens, and the second element is the
replaced string.
"""
# If the replacement does not happen, return False.
if random.random() > self.configs.prob:
return False, input_anno.text
word: str = self.typo_generator.generate(input_anno.text)
return True, word
@classmethod
def default_configs(cls):
r"""
Returns:
A dictionary with the default config for this processor.
Following are the keys for this dictionary:
- prob (float): The probability of replacement,
should fall in [0, 1]. Default value is 0.1
- dict_path (str): the `url` or the path to the pre-defined
typo json file. The key is a word we want to replace.
The value is a list containing various typos
of the corresponding key.
- typo_generator (str): A generator that takes in a word and
outputs the replacement typo.
"""
return {
"prob": 0.1,
"dict_path": "https://raw.githubusercontent.com/wanglec/"
+ "temporaryJson/main/misspelling.json",
"typo_generator": "uniform",
}
| asyml/forte | forte/processors/data_augment/algorithms/typo_replacement_op.py | typo_replacement_op.py | py | 4,240 | python | en | code | 230 | github-code | 36 | [
{
"api_name": "forte.utils.create_import_error_msg",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_nam... |
9044214062 | import requests
import json
import pandas as pd
from dotenv import load_dotenv
import os
load_dotenv()
API_KEY = os.getenv("API_KEY")
def extract_lat_long_via_address(address_or_zipcode):
lat, lng = None, None
base_url = "https://maps.googleapis.com/maps/api/geocode/json"
endpoint = f"{base_url}?address={address_or_zipcode}&key={API_KEY}"
r = requests.get(endpoint)
if r.status_code not in range(200, 299):
return None, None
try:
results = r.json()['results'][0]
print(results)
lat = results['geometry']['location']['lat']
lng = results['geometry']['location']['lng']
except:
pass
print(f'Found {address_or_zipcode}: ({lat}, {lng})')
return lat, lng
def enrich_with_geocoding_api(row):
column_name = 'address'
address_value = row[column_name]
address_lat, address_lng = extract_lat_long_via_address(address_value)
row['lat'] = address_lat
row['lng'] = address_lng
return row
with open("data.json", "r") as file:
file_data = file.read()
raw_data = json.loads(file_data)
facilities = raw_data['data']
df = pd.DataFrame.from_records(facilities)
df = df[
[
"name",
"type",
"address",
"maxSeated",
"website",
]
]
df = df.apply(enrich_with_geocoding_api, axis=1)
df.to_csv("out.csv") | shrey150/playeasy-gis | convert_to_csv.py | convert_to_csv.py | py | 1,396 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number... |
26249935476 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#@ brief introduction
"""
本段程序描述了移动机器人底盘的一些控制方法,包括使能、前进、设置底盘速度等
底盘电机的控制可以通过多种方式,例如:can modbus等
"""
from ZL_motor_control import ZL_motor_control
# class ZL_motor_control
import can
from math import *
import time
# 外部设置bus,然后传递给car
class car(object):
def __init__(self,wheel_diameter,wheel_distance,bus,id_list):
self.bus = bus
self.diameter = wheel_diameter
self.distance = wheel_distance
self.id_list = id_list
self.odom = {'x':0,'y':0,'theta':0,'v':0,'w':0}
self.isRunMode = False
self.isSending = False
def enable(self):
return self.bus.enable(self.id_list)
def disable(self):
return self.bus.disable(self.id_list)
#根据车的速度,计算轮子的速度
def set_car_vel(self,v,w):
data_dict = self.cal_wheel_vel(v,w)
return self.bus.only_set_vel(self.id_list,data_dict)
# 计算轮子速度
def cal_wheel_vel(self,v,w):
w1 = 2*v/self.diameter - w*self.distance/self.diameter
w2 = -(2*v/self.diameter + w*self.distance/self.diameter)
return {self.id_list[0]:w1,self.id_list[1]:w2}
# 获取车的速度和转速
def get_car_status(self):
#print(self.bus.status)
w1 = self.bus.status[self.id_list[0]]["Vel"]
w2 = self.bus.status[self.id_list[1]]["Vel"]
w = (w1+w2)*self.diameter/2/self.diameter
v = (w1-w2)*self.diameter/2
return [v,w]
#设置车辆odom信息
def set_odom(self):
dt = 0.05
#print("set odom")
v,w = self.get_car_status()
self.odom['x']= self.odom['x'] + v*dt*cos(self.odom['theta'])
self.odom['y']= self.odom['y'] + v*dt*sin(self.odom['theta'])
self.odom['theta'] = self.odom['theta'] + w*dt
self.odom['v'] = v
self.odom['w'] = w
# 进入config mode,关闭bus
def config_mode(self):
self.bus.bus.ser.setDTR(False)
self.bus.set_vel_stop(self.id_list)
self.bus.disable(self.id_list)
self.isRunMode = False
self.bus.close_bus()
# 进入run_mode,就可以进行速度控制了
def run_mode(self):
self.bus.open_bus()
self.bus.bus.ser.setDTR(False)
# 设置运动模式--速度模式
mode = {}
for member in self.id_list:
mode[member] = 0x2F
self.bus.set_mode(self.id_list,mode)
self.bus.enable(self.id_list)
self.isRunMode = True
print("diff_car go into the run mode")
# 跟新轮子信息以及车子信息
def update_status(self):
recv_msg = None
try:
recv_msg = self.bus.recv(timeout=self.bus.timeout)
except can.CanError as e:
print(e)
return False
finally:
if recv_msg != None:
#print("update car status")
self.bus.read_status(recv_msg)
self.set_odom()
def test_set_car_vel(v,w):
bus_channel = "/dev/wheels_ZL"
bus_bitrate = bus_baudrate = 115200
bus_id_list = [1,2]
bus_type='serial'
wheel_diameter = 100
wheel_distance = 100
bus = ZL_motor_control(bus_channel,bus_bitrate,bus_baudrate,bus_id_list,bus_type)
diff_car = car(wheel_diameter,wheel_distance,bus,bus_id_list)
diff_car.run_mode()
start = time.time()
while True:
diff_car.set_car_vel(v,w)
if (start-time.time()>2):
break
def test_car_run_mode():
bus_channel = "/dev/wheels_ZL"
bus_bitrate = bus_baudrate = 115200
bus_id_list = [1,2]
bus_type='serial'
wheel_diameter = 100
wheel_distance = 100
bus = ZL_motor_control(bus_channel,bus_bitrate,bus_baudrate,bus_id_list,bus_type)
diff_car = car(wheel_diameter,wheel_distance,bus,bus_id_list)
diff_car.run_mode()
def test_car_config_mode():
bus_channel = "/dev/wheels_ZL"
bus_bitrate = bus_baudrate = 115200
bus_id_list = [1,2]
bus_type='serial'
wheel_diameter = 100
wheel_distance = 100
bus = ZL_motor_control(bus_channel,bus_bitrate,bus_baudrate,bus_id_list,bus_type)
bus.open_bus()
diff_car = car(wheel_diameter,wheel_distance,bus,bus_id_list)
diff_car.config_mode()
if __name__ == '__main__':
#test_car_config_mode()
#test_car_run_mode()
test_set_car_vel(5,5)
| TiderFang/motor_control_bus | diff_car_controller/scripts/car_control.py | car_control.py | py | 4,546 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "can.CanError",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "ZL_motor_control.ZL_motor_control",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "time.... |
33638590751 | import numpy as np
import hydra
from tools.profiler import timeit
from waymo_dataset.registry import PIPELINES
from utils.sampler import preprocess as prep
from utils.bbox import box_np_ops
def _dict_select(dict_, inds):
for k, v in dict_.items():
if isinstance(v, dict):
_dict_select(v, inds)
else:
dict_[k] = v[inds]
def drop_arrays_by_name(gt_names, used_classes):
inds = [i for i, x in enumerate(gt_names) if x not in used_classes]
inds = np.array(inds, dtype=np.int64)
return inds
@PIPELINES.register_module
class Preprocess(object):
def __init__(self, **kwargs):
self.shuffle_points = kwargs.get("shuffle_points", False) # cfg.shuffle_points
self.min_points_in_gt = kwargs.get("min_points_in_gt", -1)
self.mode = kwargs.get('mode')
if self.mode == "train":
self.global_rotation_noise = list(kwargs.get("global_rot_noise", None)) if kwargs.get("global_rot_noise", None) is not None else None
self.global_scaling_noise = list(kwargs.get("global_scale_noise", None)) if kwargs.get("global_rot_noise", None) is not None else None
self.class_names = kwargs.get("class_names")
assert len(self.class_names) != 0
self.db_sampler = kwargs.get('db_sampler', None)
if self.db_sampler != None:
# print(cfg.db_sampler)
# raise NotImplementedError # TODO: implement the builder !
# self.db_sampler = build_dbsampler(cfg.db_sampler)
from utils.sampler.sample_ops import DataBaseSamplerV2
import pickle, logging
logger = logging.getLogger("build_dbsampler")
info_path = hydra.utils.to_absolute_path(self.db_sampler['db_info_path']) # skip hydra current output folder
with open(info_path, "rb") as f:
db_infos = pickle.load(f)
# build preprocessors
from utils.sampler.preprocess import DBFilterByDifficulty, DBFilterByMinNumPoint, DataBasePreprocessor
preprocessors = []
if "filter_by_difficulty" in self.db_sampler['db_prep_steps']:
v = list(self.db_sampler['db_prep_steps']["filter_by_difficulty"])
preprocessors.append(DBFilterByDifficulty(v, logger=logger))
elif "filter_by_min_num_points" in self.db_sampler['db_prep_steps']:
v = self.db_sampler['db_prep_steps']["filter_by_min_num_points"]
preprocessors.append(DBFilterByMinNumPoint(v, logger=logger))
db_prepor = DataBasePreprocessor(preprocessors)
self.db_sampler = DataBaseSamplerV2(
db_infos,
groups = self.db_sampler['sample_groups'],
db_prepor = db_prepor,
rate = self.db_sampler['rate'],
global_rot_range = list(self.db_sampler['global_random_rotation_range_per_object']),
logger=logger
)
else:
self.db_sampler = None
self.npoints = kwargs.get("npoints", -1)
self.no_augmentation = kwargs.get('no_augmentation', False)
# @timeit
def __call__(self, res, info):
res["mode"] = self.mode
if res["type"] in ["WaymoDataset"]:
if "combined" in res["lidar"]:
points = res["lidar"]["combined"]
else:
points = res["lidar"]["points"]
else:
raise NotImplementedError
if self.mode == "train":
anno_dict = res["lidar"]["annotations"]
gt_dict = {
"gt_boxes": anno_dict["boxes"],
"gt_names": np.array(anno_dict["names"]).reshape(-1),
}
if self.mode == "train" and not self.no_augmentation:
selected = drop_arrays_by_name(
gt_dict["gt_names"], ["DontCare", "ignore", "UNKNOWN"]
)
_dict_select(gt_dict, selected)
if self.min_points_in_gt > 0:
point_counts = box_np_ops.points_count_rbbox(
points, gt_dict["gt_boxes"]
)
mask = point_counts >= self.min_points_in_gt
_dict_select(gt_dict, mask)
gt_boxes_mask = np.array(
[n in self.class_names for n in gt_dict["gt_names"]], dtype=np.bool_
)
if self.db_sampler:
sampled_dict = self.db_sampler.sample_all(
res["metadata"]["image_prefix"],
gt_dict["gt_boxes"],
gt_dict["gt_names"],
res["metadata"]["num_point_features"],
False,
gt_group_ids=None,
calib=None,
road_planes=None
)
if sampled_dict is not None:
sampled_gt_names = sampled_dict["gt_names"]
sampled_gt_boxes = sampled_dict["gt_boxes"]
sampled_points = sampled_dict["points"]
sampled_gt_masks = sampled_dict["gt_masks"]
gt_dict["gt_names"] = np.concatenate(
[gt_dict["gt_names"], sampled_gt_names], axis=0
)
gt_dict["gt_boxes"] = np.concatenate(
[gt_dict["gt_boxes"], sampled_gt_boxes]
)
gt_boxes_mask = np.concatenate(
[gt_boxes_mask, sampled_gt_masks], axis=0
)
points = np.concatenate([sampled_points, points], axis=0)
_dict_select(gt_dict, gt_boxes_mask)
gt_classes = np.array(
[self.class_names.index(n) + 1 for n in gt_dict["gt_names"]],
dtype=np.int32,
)
gt_dict["gt_classes"] = gt_classes
gt_dict["gt_boxes"], points = prep.random_flip_both(gt_dict["gt_boxes"], points)
gt_dict["gt_boxes"], points = prep.global_rotation(
gt_dict["gt_boxes"], points, rotation=self.global_rotation_noise
)
gt_dict["gt_boxes"], points = prep.global_scaling_v2(
gt_dict["gt_boxes"], points, *self.global_scaling_noise
)
elif self.no_augmentation:
gt_boxes_mask = np.array(
[n in self.class_names for n in gt_dict["gt_names"]], dtype=np.bool_
)
_dict_select(gt_dict, gt_boxes_mask)
gt_classes = np.array(
[self.class_names.index(n) + 1 for n in gt_dict["gt_names"]],
dtype=np.int32,
)
gt_dict["gt_classes"] = gt_classes
if self.shuffle_points:
np.random.shuffle(points)
res["lidar"]["points"] = points
if self.mode == "train":
res["lidar"]["annotations"] = gt_dict
return res, info | abahnasy/waymo_gnn | waymo_dataset/pipelines/preprocess.py | preprocess.py | py | 7,111 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.int64",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "hydra.utils.to_absolut... |
26769234529 | from event_manager.models import Event
from datetime import datetime, timedelta
from celery import shared_task
from django.core.mail import send_mail
import pytz
from scheduler_APP.settings import EMAIL_HOST_USER
@shared_task()
def to_remind():
events_set = Event.objects.filter(remind_option__isnull=False)
for event in events_set:
current_time = pytz.UTC.localize(datetime.now() + timedelta(hours=3))
if event.time_to_remind <= current_time:
subject = 'Upcoming event notification'
message = f'Please be informed of upcoming event, details are below: \n' \
f'Event: {event.event} \n Event starts: {event.event_start} and ends ' \
f'{event.event_finish}'
send_mail(
subject=subject,
message=message,
from_email=EMAIL_HOST_USER,
recipient_list=[event.user.email]
)
Event.objects.filter(id=event.id).update(remind_option=None)
| EugeneVojtik/eugene_vojtik_scheduler | event_manager/tasks.py | tasks.py | py | 1,020 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "event_manager.models.Event.objects.filter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "event_manager.models.Event.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "event_manager.models.Event",
"line_number": 11,
"usage_... |
17010990531 | #import libs
import sys
import os
import socket
import pygame
pygame.init()
#define variables
PORT = int(sys.argv[1])
host = "localhost"
HOST = socket.gethostbyname(host)
#Connect to host at port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#try connecting to host
try:
s.connect((HOST, PORT))
print("Connected to server " + str(HOST))
except:
print("ERROR: Could not connect to server")
print("Quitting Program")
quit()
#store recived data in data_recived
decoded_data = s.recv(1024).decode()
player = int(decoded_data)
print("You are player " + str(decoded_data) )
running = True
while running:
keys = pygame.key.get_pressed()
print(keys)
input()
if (keys[pygame.K_a]):
s.send(str.encode("a"))
print("sendt")
quit()
if (keys[pygame.K_1]):
quit()
print("Ended koop " + player)
data = input()
s.send(str.encode(data))
print(decoded_data)
| ivanebos/pingPongNetworks | client.py | client.py | py | 1,036 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "socket.gethostbyname",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"li... |
6533803679 | import cv2
import numpy as np
from matplotlib import pyplot as plt
blur1 = cv2.imread('part1/blur1.png', 0)
blur2 = cv2.imread('part1/blur2.png', 0)
blur3 = cv2.imread('part1/blur3.png', 0)
oriimg = cv2.imread('part1/original.jpg', 0)
# fft to convert the image to freq domain
fblur1 = np.fft.fft2(blur1)
fblur2 = np.fft.fft2(blur2)
fblur3 = np.fft.fft2(blur3)
fori = np.fft.fft2(oriimg)
def magspec(fim):
# shift the center
fshiftblur =np.fft.fftshift(fim)
magnitude_spectrum = 20*np.log(np.abs(fshiftblur))
return fshiftblur, magnitude_spectrum
fshiftblur1, magnitude_spectrum1 = magspec(fblur1)
fshiftblur2, magnitude_spectrum2 = magspec(fblur2)
fshiftblur3, magnitude_spectrum3 = magspec(fblur3)
fshiftori, magnitude_spectrumori = magspec(fori)
# ---------------------------------------------------------------
def decon(fshiftblur, fshiftor):
# blur image / original image = kernel
# after that, blur /kernel and use inverse fft = original image
kernel = fshiftblur / fshiftor
result = fshiftblur / kernel
magnitude_spectrumkernel = 20 * np.log(np.abs(kernel))
magnitude_spectrumresult = 20 * np.log(np.abs(result))
return kernel, result, magnitude_spectrumkernel, magnitude_spectrumresult
kernel1, result1, magnitude_spectrumkernel1, magnitude_spectrumresult1 = decon(fshiftblur1, fshiftori)
kernel2, result2, magnitude_spectrumkernel2, magnitude_spectrumresult2 = decon(fshiftblur2, fshiftori)
kernel3, result3, magnitude_spectrumkernel3, magnitude_spectrumresult3 = decon(fshiftblur3, fshiftori)
def ifft(image):
# inverse fft to get the image back
fshift = image
# shift back (shifted the center before)
f_ishift = np.fft.ifftshift(fshift)
d_shift = np.array(np.dstack([f_ishift.real, f_ishift.imag]))
img_back = cv2.idft(d_shift)
img_back = cv2.magnitude(img_back[:, :, 0], img_back[:, :, 1])
return img_back
im_backresult1 = ifft(result1)
im_backresult2 = ifft(result2)
im_backresult3 = ifft(result3)
# images
plt.figure('images', figsize=(16,16))
plt.subplot(221), plt.imshow(blur1, cmap='gray')
plt.title('blur1'), plt.xticks([]), plt.yticks([])
plt.subplot(222), plt.imshow(blur2, cmap='gray')
plt.title('blur2'), plt.xticks([]), plt.yticks([])
plt.subplot(223), plt.imshow(blur3, cmap='gray')
plt.title('blur3'), plt.xticks([]), plt.yticks([])
plt.subplot(224), plt.imshow(oriimg, cmap='gray')
plt.title('original image'), plt.xticks([]), plt.yticks([])
plt.savefig('results/part1/images.png')
plt.show()
# kernels
plt.figure('kernels', figsize=(16,16))
plt.subplot(221), plt.imshow(magnitude_spectrumkernel1, cmap='gray')
plt.title('kernel1 magnitude spectrum'), plt.xticks([]), plt.yticks([])
plt.subplot(222), plt.imshow(magnitude_spectrumkernel2, cmap='gray')
plt.title('kernel2 magnitude spectrum'), plt.xticks([]), plt.yticks([])
plt.subplot(223), plt.imshow(magnitude_spectrumkernel3, cmap='gray')
plt.title('kernel3 magnitude spectrum'), plt.xticks([]), plt.yticks([])
plt.subplot(224), plt.imshow(magnitude_spectrumori, cmap='gray')
plt.title('original img. mag. spectrum'), plt.xticks([]), plt.yticks([])
plt.savefig('results/part1/kernels.png')
plt.show()
# deconvolution results
plt.figure('results', figsize=(16,16))
plt.subplot(221), plt.imshow(im_backresult1, cmap='gray')
plt.title('deconvolution result1'), plt.xticks([]), plt.yticks([])
plt.subplot(222), plt.imshow(im_backresult2, cmap = 'gray')
plt.title('deconvolution result2'), plt.xticks([]), plt.yticks([])
plt.subplot(223), plt.imshow(im_backresult3, cmap = 'gray')
plt.title('deconvolution result3'), plt.xticks([]), plt.yticks([])
plt.subplot(224), plt.imshow(oriimg, cmap = 'gray')
plt.title('original image'), plt.xticks([]), plt.yticks([])
plt.savefig('results/part1/deconresults.png')
plt.show()
| ebbalseven/Motion-Deblurring-in-Frequency-Domain | part1.py | part1.py | py | 3,884 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 8,
... |
4518167916 | #Python libraries that we need to import for our bot
import random
from flask import Flask, request
from pymessenger.bot import Bot
# using get
import os
app = Flask(__name__)
ACCESS_TOKEN = "EAAHGgxLDfLYBAGg0ZCgxEgo297oOfe0SuqVIvT2xWmXeJfNKZC7bpm35LZCluAHwULwKiAPmny2SVeLDCBlGackR9F5LYBPnoRHZBhWqGVEEEwZBPA9WbWn1DdApxPoF2pJbMNuGtaCOXvMMkWOsomQ61PivuTOUrTIpAaUCzggZDZD"
VERIFY_TOKEN = "NEWPROJECT"
bot = Bot(ACCESS_TOKEN)
#We will receive messages that Facebook sends our bot at this endpoint
@app.route("/", methods=['GET', 'POST'])
def receive_message():
if request.method == 'GET':
# verify before message
token_sent = request.args.get("hub.verify_token")
return verify_fb_token(token_sent)
else:
# get message from user
output = request.get_json()
print(output)
for event in output['entry']:
messaging = event['messaging']
for message in messaging:
# get("key") in dictionary => value
if message.get('message'):
#Facebook Messenger ID for user so we know where to send response back to
recipient_id = message['sender']['id']
if message['message'].get('text'):
response_sent_text = get_message()
# Response to user depend on id and message
send_message(recipient_id, response_sent_text)
#if user sends us a GIF, photo,video, or any other non-text item
if message['message'].get('attachments'):
response_sent_nontext = get_message()
send_message(recipient_id, response_sent_nontext)
return "Message Processed"
def verify_fb_token(token_sent):
#take token sent by facebook and verify it matches the verify token you sent
#if they match, allow the request, else return an error
if token_sent == VERIFY_TOKEN:
return request.args.get("hub.challenge")
return 'Invalid verification token'
#chooses a random message to send to the user
def get_message():
sample_responses = ["You are stunning!", "We're proud of you.", "Keep on being you!", "We're greatful to know you :)"]
# return selected item to the user
return random.choice(sample_responses)
#uses PyMessenger to send response to user
def send_message(recipient_id, response):
#sends user the text message provided via input response parameter
bot.send_text_message(recipient_id, response)
return "success"
if __name__ == "__main__":
app.run() | longNT0dev/mess_bot_chat | botMessenger.py | botMessenger.py | py | 2,553 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pymessenger.bot.Bot",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "flask.reques... |
17551788197 | #!/usr/bin/env python3
import cv2
import depthai as dai
import contextlib
# Start defining a pipeline
pipeline = dai.Pipeline()
# Define a source - color camera
cam_rgb = pipeline.createColorCamera()
cam_rgb.setPreviewSize(600, 600)
cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB)
cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
cam_rgb.setInterleaved(False)
# Create output
xout_rgb = pipeline.createXLinkOut()
xout_rgb.setStreamName("rgb")
cam_rgb.preview.link(xout_rgb.input)
q_rgb_list = []
# https://docs.python.org/3/library/contextlib.html#contextlib.ExitStack
with contextlib.ExitStack() as stack:
for device_info in dai.Device.getAllAvailableDevices():
device = stack.enter_context(dai.Device(pipeline, device_info))
print("Conected to " + device_info.getMxId())
device.startPipeline()
# Output queue will be used to get the rgb frames from the output defined above
q_rgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
q_rgb_list.append(q_rgb)
while True:
for i, q_rgb in enumerate(q_rgb_list):
in_rgb = q_rgb.tryGet()
if in_rgb is not None:
cv2.imshow("rgb-" + str(i + 1), in_rgb.getCvFrame())
if cv2.waitKey(1) == ord('q'):
break
| hello-word-yang/depthai-experiments | gen2-multiple-devices/main.py | main.py | py | 1,320 | python | en | code | null | github-code | 36 | [
{
"api_name": "depthai.Pipeline",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "depthai.CameraBoardSocket",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "depthai.ColorCameraProperties",
"line_number": 14,
"usage_type": "attribute"
},
{
... |
15675080040 | from django.views.decorators.http import require_http_methods
from common.json import ModelEncoder
from .models import AutomobileVO, SalesPerson, Customer, SaleRecord
from django.http import JsonResponse
import json
class AutomobileVOEncoder(ModelEncoder):
model = AutomobileVO
properties = [
"vin",
"import_href",
"id"
]
class SalesPersonEncoder(ModelEncoder):
model = SalesPerson
properties = [
"first_name",
"last_name",
"employee_id",
"id"
]
class CustomerEncoder(ModelEncoder):
model = Customer
properties = [
"first_name",
"last_name",
"address",
"phone_number",
"id"
]
class SaleRecordEncoder(ModelEncoder):
model = SaleRecord
properties = [
"automobile",
"sales_person",
"customer",
"price",
"id"
]
encoders = {
"automobile": AutomobileVOEncoder(),
"sales_person": SalesPersonEncoder(),
"customer": CustomerEncoder(),
}
def get_extra_data(self, o):
return {"sales_person": o.sales_person.first_name,
"customer": o.customer.last_name,
"automobile": o.automobile.vin,
}
@require_http_methods(["GET", "POST"])
def api_salesperson_list(request):
if request.method == "GET":
salespeople = SalesPerson.objects.all()
return JsonResponse(
{'salespeople': salespeople},
encoder=SalesPersonEncoder
)
else:
content = json.loads(request.body)
salesperson = SalesPerson.objects.create(**content)
return JsonResponse(
salesperson,
encoder=SalesPersonEncoder,
safe=False,
)
@require_http_methods(["GET", "PUT"])
def api_detail_salesperson(request, id):
if request.method == 'GET':
try:
salesperson = SalesPerson.objects.get(id=id)
return JsonResponse(
salesperson,
encoder=SalesPersonEncoder,
safe=False
)
except SalesPerson.DoesNotExist:
return JsonResponse({"message": "Invaild ID"}, status=404)
elif request.method == 'PUT':
try:
content = json.loads(request.body)
SalesPerson.objects.filter(id=id).update(**content)
salesperson = SalesPerson.objects.get(id=id)
return JsonResponse(
salesperson,
encoder=SalesPersonEncoder,
safe=False,
)
except SalesPerson.DoesNotExist:
return JsonResponse({"message": "Invaild ID"}, status=404)
else:
try:
count, _ = SalesPerson.objects.filter(id=id).delete()
return JsonResponse({"deleted": count > 0})
except SalesPerson.DoesNotExist:
return JsonResponse({"message": "Invaild ID"}, status=404)
@require_http_methods(["GET", "POST"])
def customer_list(request):
if request.method == "GET":
customer = Customer.objects.all()
return JsonResponse(
{'customer': customer},
encoder=CustomerEncoder
)
else:
content = json.loads(request.body)
customer = Customer.objects.create(**content)
return JsonResponse(
customer,
encoder=CustomerEncoder,
safe=False,
)
@require_http_methods(["DELETE", "GET", "PUT"])
def customer_details(request, id):
if request.method == 'GET':
try:
customer = Customer.objects.get(id=id)
return JsonResponse(
customer,
encoder=CustomerEncoder,
safe=False
)
except Customer.DoesNotExist:
return JsonResponse({"message": "Invaild ID"}, status=404)
elif request.method == 'PUT':
try:
content = json.loads(request.body)
Customer.objects.filter(id=id).update(**content)
customer = Customer.objects.get(id=id)
return JsonResponse(
customer,
encoder=CustomerEncoder,
safe=False,
)
except Customer.DoesNotExist:
return JsonResponse({"message": "Invaild ID"}, status=404)
else:
try:
count, _ = Customer.objects.filter(id=id).delete()
return JsonResponse({"deleted": count > 0})
except Customer.DoesNotExist:
return JsonResponse({"message": "Invaild ID"}, status=404)
@require_http_methods({"GET", "POST"})
def sales_list(request):
if request.method == "GET":
sales = SaleRecord.objects.all()
return JsonResponse(
{"sales": sales},
encoder=SaleRecordEncoder,
safe=False
)
else:
content = json.loads(request.body)
try:
salesperson_id = content["sales_person"]
salesperson = SalesPerson.objects.get(id=salesperson_id)
content["sales_person"] = salesperson
except SalesPerson.DoesNotExist:
return JsonResponse(
{"message": "Invaild Sales_Person ID"},
status=400
)
try:
customer_id = content["customer"]
customer = Customer.objects.get(id=customer_id)
content["customer"] = customer
except SalesPerson.DoesNotExist:
return JsonResponse(
{"message": "invaild customer id"},
status=400
)
try:
auto_vin = content["automobile"]
automobile = AutomobileVO.objects.get(vin=auto_vin)
content["automobile"] = automobile
except AutomobileVO.DoesNotExist:
return JsonResponse(
{"message": "Invalid automobile vin"},
status=400
)
sale = SaleRecord.objects.create(**content)
return JsonResponse(
sale,
encoder=SaleRecordEncoder,
safe=False
)
@require_http_methods(["GET", "DELETE"])
def sale_details(request, id):
if request.method == "GET":
sale = SaleRecord.objects.get(id=id)
return JsonResponse(
sale,
encoder=SaleRecordEncoder,
safe=False,
)
else:
count, _ = SaleRecord.objects.filter(id=id).delete()
return JsonResponse({"deleted sale": count > 0})
| colinprize/DealershipPro | sales/api/sales_rest/views.py | views.py | py | 6,502 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "common.json.ModelEncoder",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "models.AutomobileVO",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "common.json.ModelEncoder",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "m... |
42778532873 | import tempfile
from datetime import datetime, timedelta
from typing import Any, Generator
import openpyxl
import pandas as pd
import pytest
from dateutil.tz import tzutc
from pytest_mock import MockFixture
from toucan_connectors.s3.s3_connector import S3Connector, S3DataSource
from toucan_connectors.toucan_connector import ConnectorStatus
@pytest.fixture
def raw_connector() -> S3Connector:
return S3Connector(
name='my_sts_s3_connector',
bucket_name='my-s3-bucket',
role_arn='my-role-arn',
prefix='some/path',
workspace_id='workspace-id',
sts_access_key_id='id',
sts_secret_access_key='secret',
)
@pytest.fixture
def connector(mocker: MockFixture, raw_connector: S3Connector) -> Generator[Any, Any, Any]:
mocker.patch.object(
raw_connector,
'_get_assumed_sts_role',
return_value={
'Credentials': {
'AccessKeyId': 'test',
'SecretAccessKey': 'toto',
'SessionToken': 'tata',
}
},
)
yield raw_connector
@pytest.fixture
def sts_data_source_csv() -> Generator[Any, Any, Any]:
yield S3DataSource(
domain='test',
name='test',
file='my-file.csv',
reader_kwargs={'preview_nrows': 2},
fetcher_kwargs={},
)
@pytest.fixture
def sts_data_source_xlsx() -> Generator[Any, Any, Any]:
yield S3DataSource(
domain='test',
name='test',
file='my-file.xlsx',
reader_kwargs={'engine': 'openpyxl'},
fetcher_kwargs={},
)
@pytest.fixture
def sts_data_source_regex() -> Generator[Any, Any, Any]:
yield S3DataSource(
domain='test',
name='test',
file='data[0-9]+\\.csv$',
reader_kwargs={},
fetcher_kwargs={},
)
def test_get_status(mocker: MockFixture, connector: S3Connector) -> None:
# Test case where get_sts_role returns without raising an exception
expected_status = ConnectorStatus(status=True)
actual_status = connector.get_status()
assert actual_status == expected_status
mocker.patch.object(connector, '_get_assumed_sts_role', side_effect=Exception('Error'))
expected_status = ConnectorStatus(
status=False,
error='Cannot verify connection to S3 and/or AssumeRole failed : Error',
)
actual_status = connector.get_status()
assert actual_status == expected_status
def test_forge_url(connector: S3Connector) -> None:
assert (
connector._forge_url('key', 'secret', 'token', 'file')
== 's3://key:secret@my-s3-bucket/some/path/file'
)
# with special characters, those needed to be urlencoded
assert (
connector._forge_url('k/e@y', 'sec/re@special/t', 'token1', 'file')
== 's3://k%2Fe%40y:sec%2Fre%40special%2Ft@my-s3-bucket/some/path/file'
)
# on prefix empty
connector.prefix = ''
assert (
connector._forge_url('key', 'secret', 'token3', 'file')
== 's3://key:secret@my-s3-bucket/file'
)
connector.prefix = 'tea/'
assert (
connector._forge_url('key', 'secret', 'token', 'fileC')
== 's3://key:secret@my-s3-bucket/tea/fileC'
)
connector.prefix = '/tea/secondo'
assert (
connector._forge_url('key', 'secret', 'token', 'fileB')
== 's3://key:secret@my-s3-bucket/tea/secondo/fileB'
)
connector.prefix = '///tea/secondo/tertio////'
assert (
connector._forge_url('key', 'secret', 'token', 'fileA')
== 's3://key:secret@my-s3-bucket/tea/secondo/tertio/fileA'
)
connector.prefix = 'tea'
assert (
connector._forge_url('key', 'secret', 'token', '/fileZ')
== 's3://key:secret@my-s3-bucket/tea/fileZ'
)
def test_validate_external_id(mocker: MockFixture) -> None:
# workspace_id should override external_id
assert (
S3Connector(
name='my_sts_s3_connector',
bucket_name='my-s3-bucket',
role_arn='my-role-arn',
prefix='some/path',
workspace_id='a',
external_id='b',
).external_id
== 'a'
)
def test_retrieve_data_with_limit_offset(
mocker: MockFixture,
connector: S3Connector,
sts_data_source_csv: S3DataSource,
sts_data_source_xlsx: S3DataSource,
) -> None:
# We mock s3_open()
mock_s3_open = mocker.patch('peakina.io.s3.s3_utils.s3_open')
mock_s3_open_retries = mocker.patch('peakina.io.s3.s3_utils._s3_open_file_with_retries')
boto3_session = mocker.patch('toucan_connectors.s3.s3_connector.boto3.Session')
boto3_session.return_value.client.return_value.get_paginator.return_value.paginate.return_value = [
{'Contents': [{'Key': 'my-file.csv'}, {'Key': 'my-file.xlsx'}]}
]
with tempfile.NamedTemporaryFile(suffix='.xlsx') as temp_excel_file:
with tempfile.NamedTemporaryFile(suffix='.csv') as temp_csv_file:
### --- for excel --- ###
excel_df = pd.DataFrame({'X': [1, 2, 3, 4], 'Y': [5, 6, 7, 8], 'Z': [9, 10, 11, 12]})
excel_df.to_excel(temp_excel_file.name, engine='openpyxl', index=False)
mocker.patch('tempfile.NamedTemporaryFile', return_value=temp_excel_file)
expected_return = excel_df.to_string()
mock_s3_open_retries.return_value.read.return_value = expected_return.encode('utf-8')
# s3_open side_effect
mock_s3_open.side_effect = [
temp_excel_file.read(),
openpyxl.load_workbook(temp_excel_file.name),
]
result = connector._retrieve_data(sts_data_source_xlsx, offset=2, limit=1)
# assert that result is a DataFrame and has the expected values
assert isinstance(result, pd.DataFrame)
expected_result = pd.DataFrame(
{'X': [3], 'Y': [7], 'Z': [11], '__filename__': 'my-file.xlsx'}
)
assert result.equals(expected_result)
### --- for csv --- ###
csv_df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [5, 6, 7, 8], 'C': [9, 10, 11, 12]})
csv_df.to_csv(temp_csv_file.name, index=False)
mocker.patch('tempfile.NamedTemporaryFile', return_value=temp_csv_file)
expected_return = csv_df.to_csv(index=False, sep=',') or ''
mock_s3_open_retries.return_value.read.return_value = expected_return.encode('utf-8')
# s3_open side_effect
mock_s3_open.side_effect = [
temp_csv_file.read().decode('utf-8'),
pd.read_csv(temp_csv_file.name),
]
result = connector._retrieve_data(sts_data_source_csv, offset=1, limit=2)
# assert that result is a DataFrame and has the expected values
assert isinstance(result, pd.DataFrame)
expected_result = pd.DataFrame(
{'A': [2, 3], 'B': [6, 7], 'C': [10, 11], '__filename__': 'my-file.csv'}
)
assert result.equals(expected_result)
def test_retrieve_data_match_patterns(
mocker: MockFixture, connector: S3Connector, sts_data_source_regex: S3DataSource
) -> None:
connector._forge_url = mocker.Mock(return_value='s3://example.com/data.csv')
boto3_session = mocker.patch('toucan_connectors.s3.s3_connector.boto3.Session')
boto3_session.return_value.client.return_value.get_paginator.return_value.paginate.return_value = [
{
'Contents': [
{'Key': 'data/file1.txt'},
{'Key': 'data1.csv'},
{'Key': 'data123.csv'},
{'Key': 'data/subfolder/file3.txt'},
{'Key': 'data/subfolder/data2.csv'},
]
}
]
peakina_datasource = mocker.patch('toucan_connectors.s3.s3_connector.DataSource')
peakina_datasource.return_value.get_df.return_value = pd.DataFrame()
# Invoke the _retrieve_data method
_ = connector._retrieve_data(sts_data_source_regex)
# Assertions
connector._forge_url.assert_called()
# the url forger was called 2 times
assert connector._forge_url.call_count == 2
# for data1.csv and data123.csv because they match the regex
# 'data[0-9]+\.csv$'
assert connector._forge_url.call_args_list[0][1]['file'] == 'data1.csv'
assert connector._forge_url.call_args_list[1][1]['file'] == 'data123.csv'
def test_get_assumed_sts_role_cached(mocker: MockFixture, raw_connector: S3Connector) -> None:
"""should cache assume role"""
boto3_client = mocker.patch('toucan_connectors.s3.s3_connector.boto3.client')
sts_client = boto3_client()
sts_client.assume_role.return_value = {
'Credentials': {
'Expiration': datetime.utcnow().replace(tzinfo=tzutc()) + timedelta(hours=1)
}
}
raw_connector._get_assumed_sts_role()
raw_connector._get_assumed_sts_role()
assert sts_client.assume_role.call_count == 1
def test_get_assumed_sts_role_expired(mocker: MockFixture, raw_connector: S3Connector) -> None:
"""should invalidate cache and re-assume role when expired"""
boto3_client = mocker.patch('toucan_connectors.s3.s3_connector.boto3.client')
sts_client = boto3_client()
sts_client.assume_role.return_value = {
'Test': 'OK',
'Credentials': {
'Expiration': datetime.utcnow().replace(tzinfo=tzutc()) + timedelta(hours=-1)
},
}
raw_connector._get_assumed_sts_role()
assert sts_client.assume_role.call_count == 2
| ToucanToco/toucan-connectors | tests/s3/test_s3.py | test_s3.py | py | 9,501 | python | en | code | 16 | github-code | 36 | [
{
"api_name": "toucan_connectors.s3.s3_connector.S3Connector",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "toucan_connectors.s3.s3_connector.S3Connector",
"line_number": 16,
"usa... |
830274847 | import os
import sys
import numpy as np
import torch
from torchdrug import data
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from diffpack import rotamer
if __name__ == "__main__":
protein = data.Protein.from_pdb('10mh_A.pdb', atom_feature=None, bond_feature=None,
residue_feature=None, mol_feature=None)
protein = protein.subgraph(protein.atom_name != 37)
chis = rotamer.get_chis(protein)
rotate_angles = torch.zeros_like(chis)
new_protein = protein.clone()
new_protein = rotamer.rotate_side_chain(protein, rotate_angles)
assert (new_protein.node_position == protein.node_position).all()
for i in range(8):
new_protein = protein.clone()
rotamer.rotate_side_chain(new_protein, rotate_angles)
new_chis = rotamer.get_chis(new_protein)
diff = (new_chis - chis).fmod(np.pi * 2)
test_mask = diff.isnan() | ((diff - np.pi * i / 4).abs() < 1e-4) | ((diff + np.pi * (8-i) / 4).abs() < 1e-4)
if not test_mask.all():
import pdb; pdb.set_trace()
rotate_angles = rotate_angles + np.pi / 4
| DeepGraphLearning/DiffPack | test/test_rotamer.py | test_rotamer.py | py | 1,137 | python | en | code | 42 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
14102775036 | """
Functions that are helpful for evaluating models.
"""
from typing import Dict, Any, List
import pandas as pd
import numpy as np
from . import error_metrics
def evaluate_predictions(
df: pd.DataFrame,
y_true: pd.Series,
y_pred: pd.Series,
metrics_dict: Dict[str, Any] = {
'mean_absolute_error':
error_metrics.mean_absolute_error
},
) -> pd.DataFrame:
"""
Evaluate predictions and return evaluation results data frame.
Parameters
----------
df : pd.DataFrame
Must contain the 'group' column.
Each group is evaluated separately
and becomes a row in the output data frame.
Same index as y_true and y_pred.
y_true : pd.Series
Ground truth labels.
Same index as df and y_pred.
y_pred : pd.Series
Predictions. Okay if contains null.
Same index as y_true and df.
metrics_dict : Dict[str, Any], default only has MAE
Dictionary of metric names & functions to use for evaluation.
These become columns in output data frame.
Returns
-------
pd.DataFrame with evaluation results.
"""
evaluation_df = pd.DataFrame(
index=df.group.unique(),
)
for metric_name, metric_func in metrics_dict.items():
if metric_name == 'percent_within_n':
continue # Handled by separate function
evaluation_df[metric_name] = None
for group in df.group.unique():
evaluation_df.loc[group, metric_name] =\
metric_func(
df.loc[df.group == group, y_true],
df.loc[df.group == group, y_pred],
)
return evaluation_df
def calc_percent_within_n_df(
df: pd.DataFrame,
y_true: pd.Series,
y_pred: pd.Series,
ns: List[int] = [10, 30, 60],
) -> pd.DataFrame:
"""
Calculate percent within N metrics for regression model.
Parameters
----------
df : pd.DataFrame
Must contain the 'group' column.
Each group is evaluated separately
and becomes a row in the output data frame.
Same index as y_true and y_pred.
y_true : pd.Series
Ground truth labels.
Same index as df and y_pred.
y_pred : pd.Series
Predictions. Okay if contains null.
Same index as y_true and df.
ns : List[int], default [10, 30, 60]
list of thresholds n to use
Returns
-------
pd.DataFrame
Evaluation results.
Row per group.
Column per threshold.
"""
evaluation_df = pd.DataFrame(
index=df.group.unique(),
)
for t in ns:
evaluation_df['percent_within_{}'.format(t)] = 0.0
for group in df.group.unique():
evaluation_df.loc[group, 'percent_within_{}'.format(t)] =\
error_metrics.percent_within_n(
df.loc[df.group == group, y_true],
df.loc[df.group == group, y_pred],
t
)
return evaluation_df
def residual_distribution_summary_lookahead(
data: pd.DataFrame,
target: str,
prediction: str,
lookahead_column: str,
lookahead_bins_seconds: np.ndarray = 60*np.array(
[-1, 0, 5, 10, 15, 30, 60, 90, 120, 240, 360]
),
quantiles: np.ndarray = np.array([0.05, 0.25, 0.5, 0.75, 0.95]),
):
residual_distribution_summary_lookahead_bins_list = []
for bin_lower, bin_upper in zip(
lookahead_bins_seconds[:-1],
lookahead_bins_seconds[1:]
):
summary_lookahead_bin = pd.DataFrame(
index=[0],
columns=(
['bin_lower', 'bin_middle', 'bin_upper'] +
['q{}'.format(q) for q in quantiles] +
['num_samples', 'median_absolute_error']
),
)
summary_lookahead_bin.loc[0, 'bin_lower'] = bin_lower
summary_lookahead_bin.loc[0, 'bin_upper'] = bin_upper
summary_lookahead_bin.loc[0, 'bin_middle'] = (bin_upper + bin_lower)/2
data_bin = data.loc[
(
(data[lookahead_column] > bin_lower) &
(data[lookahead_column] <= bin_upper)
),
:
]
summary_lookahead_bin.loc[0, 'num_samples'] = data_bin.shape[0]
errors_bin = (data_bin[target] - data_bin[prediction])
summary_lookahead_bin.loc[0, 'median_absolute_error'] = (
np.nanmedian(np.abs(errors_bin))
)
summary_lookahead_bin.loc[
0,
['q{}'.format(q) for q in quantiles],
] = np.nanquantile(
errors_bin,
quantiles,
)
residual_distribution_summary_lookahead_bins_list.append(
summary_lookahead_bin
)
return pd.concat(residual_distribution_summary_lookahead_bins_list)\
.reset_index(drop=True)\
.astype(float)
| nasa/ML-airport-data-services | data_services/evaluation_utils.py | evaluation_utils.py | py | 4,896 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pandas.Series",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pandas.Series",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "typing.Di... |
6071086081 | '''
Created on 29.03.2017
@author: abaktheer
Microplane Fatigue model 3D
(compressive plasticity (CP) + tensile damage (TD)
+ cumulative damage sliding (CSD))
Using Jirasek homogenization approach [1999]
'''
import numpy as np
from bmcs_utils.api import Float, View, Item
from ibvpy.tmodel.mats3D.mats3D_eval import MATS3DEval
from scipy.integrate import cumtrapz
# @tr.provides(INTIM)
class VUNTIM(MATS3DEval):
"""
Vectorized uncoupled normal tngential interface model
"""
# -------------------------------------------------------------------------
# Elasticity
# -------------------------------------------------------------------------
E_N = Float(46666.6666666667, MAT=True)
E_T = Float(7777.7777777777765, MAT=True)
gamma_T = Float(1000., MAT=True)
K_T = Float(1000., MAT=True)
S_T = Float(0.005, MAT=True)
r_T = Float(9., MAT=True)
e_T = Float(12., MAT=True)
c_T = Float(4.6, MAT=True)
sigma_T_0 = Float(1.7, MAT=True)
a = Float(0.003, MAT=True)
# ------------------------------------------------------------------------------
# Normal_Tension constitutive law parameters (without cumulative normal strain)
# ------------------------------------------------------------------------------
Ad = Float(500.0, MAT=True)
eps_0 = Float(0.00008, MAT=True)
# -----------------------------------------------
# Normal_Compression constitutive law parameters
# -----------------------------------------------
K_N = Float(10000., MAT=True)
gamma_N = Float(5000., MAT=True)
sig_0 = Float(30., MAT=True)
ipw_view = View(
Item('E_N'),
Item('E_T'),
Item('Ad'),
Item('eps_0'),
Item('K_N'),
Item('gamma_N'),
Item('sig_0'),
Item('gamma_T', latex=r'\gamma_\mathrm{T}', minmax=(10, 100000)),
Item('K_T', latex=r'K_\mathrm{T}', minmax=(10, 10000)),
Item('S_T', latex=r'S_\mathrm{T}', minmax=(0.001, 0.01)),
Item('r_T', latex=r'r_\mathrm{T}', minmax=(1, 3)),
Item('e_T', latex=r'e_\mathrm{T}', minmax=(1, 40)),
Item('c_T', latex=r'c_\mathrm{T}', minmax=(1, 10)),
Item('sigma_T_0', latex=r'\bar{\sigma}^\pi_{T}', minmax=(1, 10)),
Item('a', latex=r'a_\mathrm{T}', minmax=(0.001, 3)),
)
n_D = 3
state_var_shapes = dict(
omega_N=(), # damage N
z_N=(),
alpha_N =(),
r_N=(),
eps_N_p=(),
sig_N=(),
omega_T=(),
z_T=(),
alpha_T_a=(n_D,),
eps_T_p_a=(n_D,),
sig_T_a = (n_D,),
)
# --------------------------------------------------------------
# microplane constitutive law (normal behavior CP + TD)
# --------------------------------------------------------------
def get_normal_law(self, eps_N, **Eps):
omega_N, z_N, alpha_N, r_N, eps_N_p, sig_N = [
Eps[key] for key in ['omega_N', 'z_N', 'alpha_N', 'r_N', 'eps_N_p','sig_N']
]
E_N = self.E_N
# When deciding if a microplane is in tensile or compression, we define a strain boundary such that that
# sigN <= 0 if eps_N < 0, avoiding entering in the quadrant of compressive strains and traction
sigma_N_Emn_tilde = E_N * (eps_N - eps_N_p)
pos = sigma_N_Emn_tilde > 1e-6 # microplanes under tension
pos2 = sigma_N_Emn_tilde < -1e-6 # microplanes under compression
tension = 1.0 * pos
compression = 1.0 * pos2
# thermo forces
Z = self.K_N * z_N * compression
X = self.gamma_N * alpha_N * compression
h = (self.sig_0 + Z) * compression
f_trial = (abs(sigma_N_Emn_tilde - X) - h) * compression
# threshold plasticity
thres_1 = f_trial > 1e-10
delta_lamda = f_trial / \
(E_N / (1 - omega_N) + abs(self.K_N) + self.gamma_N) * thres_1
eps_N_p += delta_lamda * \
np.sign(sigma_N_Emn_tilde - X)
z_N += delta_lamda
alpha_N += delta_lamda * \
np.sign(sigma_N_Emn_tilde - X)
def R_N(r_N_Emn): return (1.0 / self.Ad) * (-r_N_Emn / (1.0 + r_N_Emn))
Y_N = 0.5 * tension * E_N * (eps_N - eps_N_p) ** 2.0
Y_0 = 0.5 * E_N * self.eps_0 ** 2.0
f = (Y_N - (Y_0 + R_N(r_N))) * tension
# threshold damage
thres_2 = f > 1e-6
def f_w(Y): return 1.0 - 1.0 / (1.0 + self.Ad * (Y - Y_0))
omega_N[f > 1e-6] = f_w(Y_N)[f > 1e-6]
omega_N[...] = np.clip(omega_N, 0, 1.0)
r_N[f > 1e-6] = -omega_N[f > 1e-6]
sig_N[...] = (1.0 - tension * omega_N) * E_N * (eps_N - eps_N_p)
Z = self.K_N * z_N * compression
X = self.gamma_N * alpha_N * compression
return sig_N
# -------------------------------------------------------------------------
# microplane constitutive law (Tangential CSD)-(Pressure sensitive cumulative damage)
# -------------------------------------------------------------------------
def get_tangential_law(self, eps_T_a, eps_N, **Eps):
omega_T, omega_N, z_T, alpha_T_a, eps_T_p_a, eps_N_p, sig_T_a= [
Eps[key] for key in ['omega_T', 'omega_N', 'z_T', 'alpha_T_a', 'eps_T_p_a', 'eps_N_p','sig_T_a']
]
E_T = self.E_T
E_N = self.E_N
# thermodynamic forces
sig_pi_trial = E_T * (eps_T_a - eps_T_p_a)
Z = self.K_T * z_T
X = self.gamma_T * alpha_T_a
norm_1 = np.sqrt(
np.einsum(
'...na,...na->...n',
(sig_pi_trial - X), (sig_pi_trial - X))
)
Y = 0.5 * E_T * \
np.einsum(
'...na,...na->...n',
(eps_T_a - eps_T_p_a),
(eps_T_a - eps_T_p_a))
sig_N = (1.0 - omega_N) * E_N * (eps_N - eps_N_p)
f = norm_1 - self.sigma_T_0 - Z - self.a * sig_N
plas_1 = f > 1e-15
elas_1 = f < 1e-15
delta_lamda = (f + self.a * sig_N) / \
(E_T / (1.0 - omega_T) + self.gamma_T + self.K_T) * plas_1
norm_2 = 1.0 * elas_1 + np.sqrt(
np.einsum(
'...na,...na->...n',
(sig_pi_trial - X), (sig_pi_trial - X))) * plas_1
eps_T_p_a[..., 0] += plas_1 * delta_lamda * \
((sig_pi_trial[..., 0] - X[..., 0]) /
(1.0 - omega_T)) / norm_2
eps_T_p_a[..., 1] += plas_1 * delta_lamda * \
((sig_pi_trial[..., 1] - X[..., 1]) /
(1.0 - omega_T)) / norm_2
eps_T_p_a[..., 2] += plas_1 * delta_lamda * \
((sig_pi_trial[..., 2] - X[..., 2]) /
(1.0 - omega_T)) / norm_2
omega_T += plas_1 * ((1 - omega_T) ** self.c_T) * \
(delta_lamda * (Y / self.S_T) ** self.r_T) * \
(self.sigma_T_0 / (self.sigma_T_0 - self.a * sig_N)) ** self.e_T
# omega_T_Emn[...] = np.clip(omega_T_Emn,0,1.0)
alpha_T_a[..., 0] += plas_1 * delta_lamda * \
(sig_pi_trial[..., 0] - X[..., 0]) / norm_2
alpha_T_a[..., 1] += plas_1 * delta_lamda * \
(sig_pi_trial[..., 1] - X[..., 1]) / norm_2
alpha_T_a[..., 2] += plas_1 * delta_lamda * \
(sig_pi_trial[..., 2] - X[..., 2]) / norm_2
z_T += plas_1 * delta_lamda
sig_T_a[...] = np.einsum(
'...n,...na->...na', (1 - omega_T), E_T * (eps_T_a - eps_T_p_a))
Z = self.K_T * z_T
X = self.gamma_T * alpha_T_a
Y = 0.5 * E_T * \
np.einsum(
'...na,...na->...n',
(eps_T_a - eps_T_p_a),
(eps_T_a - eps_T_p_a))
return sig_T_a
def get_corr_pred(self, eps_a, t_n1, **Eps):
eps_a_ = np.einsum('...a->a...',eps_a)
eps_N_n1 = eps_a_[0,...]
eps_T_a_n1 = np.einsum('a...->...a', eps_a_[1:,...])
sig_N = self.get_normal_law(eps_N_n1, **Eps)
sig_T_a = self.get_tangential_law(eps_T_a_n1,eps_N_n1, **Eps)
D_ = np.zeros(eps_a.shape + (eps_a.shape[-1],))
D_[..., 0, 0] = self.E_N # * (1 - omega_N)
D_[..., 1, 1] = self.E_T # * (1 - omega_T)
D_[..., 2, 2] = self.E_T # * (1 - omega_T)
D_[..., 3, 3] = self.E_T # * (1 - omega_T)
sig_a = np.concatenate([sig_N[...,np.newaxis], sig_T_a], axis=-1)
return sig_a, D_
def get_eps_NT_p(self, **Eps):
"""Plastic strain tensor
"""
return Eps['eps_N_p'], Eps['eps_T_p_a']
def plot_idx(self, ax_sig, ax_d_sig, ax_energy, idx=0):
eps_max = self.eps_max
n_eps = self.n_eps
eps1_range = np.linspace(1e-9,eps_max,n_eps)
Eps = { var : np.zeros( (1,) + shape )
for var, shape in self.state_var_shapes.items()
}
eps_range = np.zeros((n_eps, 4))
eps_range[:,idx] = eps1_range
# monotonic load in the normal direction
sig1_range, d_sig11_range, eps1_in_range = [], [], []
for eps_a in eps_range:
sig_a, D_range = self.get_corr_pred(eps_a[np.newaxis, ...], 1, **Eps)
eps_in = [Eps['eps_N_p'], Eps['eps_T_p_a'][0][0]]
eps1_in_range.append(eps_in[idx])
sig1_range.append(sig_a[0, idx])
d_sig11_range.append(D_range[0, idx, idx])
sig1_range = np.array(sig1_range, dtype=np.float_)
eps1_in_range = np.array(eps1_in_range, dtype=np.float_).squeeze()
eps1_range = eps1_range[:len(sig1_range)]
# print(eps1_in_range)
W_arr, U_arr, G_arr = self.energy_calc(eps1_range,sig1_range,eps1_in_range)
ax_sig.plot(eps1_range, sig1_range,color='blue')
d_sig11_range = np.array(d_sig11_range, dtype=np.float_)
ax_d_sig.plot(eps1_range, d_sig11_range, linestyle='dashed', color='gray')
ax_sig.set_xlabel(r'$\varepsilon_{11}$ [-]')
ax_sig.set_ylabel(r'$\sigma_{11}$ [MPa]')
ax_d_sig.set_ylabel(r'$\mathrm{d} \sigma_{11} / \mathrm{d} \varepsilon_{11}$ [MPa]')
ax_d_sig.plot(eps1_range[:-1],
(sig1_range[:-1]-sig1_range[1:])/(eps1_range[:-1]-eps1_range[1:]),
color='orange', linestyle='dashed')
ax_energy.plot(eps1_range, W_arr, lw=0.5, color='black', label=r'$W$ - Input work')
ax_energy.plot(eps1_range, G_arr, '--', color='black', lw=0.5, label=r'$W^\mathrm{inel}$ - Inelastic work')
ax_energy.fill_between(eps1_range, W_arr, G_arr,
color='green', alpha=0.2)
ax_energy.fill_between(eps1_range, G_arr, np.zeros_like(G_arr),
color='black', alpha=0.2)
ax_energy.set_xlabel('$\varepsilon_{11}$ [-]');
ax_energy.set_ylabel(r'$E$ [Nmm]')
ax_energy.legend()
def subplots(self, fig):
ax_sig_N, ax_ener_N, ax_sig_T, ax_ener_T = fig.subplots(1,4)
ax_d_sig_N = ax_sig_N.twinx()
ax_d_sig_T = ax_sig_T.twinx()
return ax_sig_N, ax_d_sig_N, ax_ener_N, ax_sig_T, ax_d_sig_T, ax_ener_T
def update_plot(self, axes):
ax_sig_N, ax_d_sig_N, ax_ener_N, ax_sig_T, ax_d_sig_T, ax_ener_T = axes
self.plot_idx(ax_sig_N, ax_d_sig_N, ax_ener_N, 0)
self.plot_idx(ax_sig_T, ax_d_sig_T,ax_ener_T, 1)
def energy_calc(self,eps_a,sig_a,eps_in):
W_arr = (
cumtrapz(sig_a, eps_a, initial=0)
)
eps_el = (eps_a - eps_in)
U_arr = (
sig_a * eps_el / 2.0
)
G_arr = W_arr - U_arr
return W_arr, U_arr, G_arr
# if __name__ == "__main__":
# plane = VUNTIM()
# fig = plt.figure()
# ax_sig_N, ax_ener_N, ax_sig_T, ax_ener_T = fig.subplots(1, 4)
# ax_d_sig_N = ax_sig_N.twinx()
# ax_d_sig_T = ax_sig_T.twinx()
# axes = ax_sig_N, ax_d_sig_N, ax_ener_N, ax_sig_T, ax_d_sig_T, ax_ener_T
# plane.plot_idx(ax_sig_N, ax_d_sig_N, ax_ener_N, 0)
# plane.plot_idx(ax_sig_T, ax_d_sig_T,ax_ener_T, 1) | bmcs-group/bmcs_matmod | bmcs_matmod/ntim/vuntim.py | vuntim.py | py | 12,151 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ibvpy.tmodel.mats3D.mats3D_eval.MATS3DEval",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "bmcs_utils.api.Float",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "bmcs_utils.api.Float",
"line_number": 30,
"usage_type": "call"
},
{
... |
2004723473 | """
Project Settings file
"""
import os
from starlette.datastructures import CommaSeparatedStrings, Secret
default_route_str = "/api"
ALLOWED_HOSTS = CommaSeparatedStrings(os.getenv("ALLOWED_HOSTS", "*"))
SECRET_KEY = Secret(os.getenv(
"SECRET_KEY",
"4bf4f696a653b292bc674daacd25195b93fce08a8dac7373b36c38f63cd442938b12ef911bd5d7d0")
)
# Mongo configuration
mongo_max_connections = int(os.getenv("MAX_CONNECTIONS_COUNT", 10))
mongo_min_connections = int(os.getenv("MIN_CONNECTIONS_COUNT", 10))
mongo_db = "fastapi"
mongo_url = f"mongodb://localhost:27017/{mongo_db}"
# Sendgrid configuration
SG_API = os.getenv(
"SENDGRID_API",
"")
FROM_EMAIL = "noreply@email.com"
| marirs/fastapi-boilerplate | server/core/settings.py | settings.py | py | 687 | python | en | code | 141 | github-code | 36 | [
{
"api_name": "starlette.datastructures.CommaSeparatedStrings",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "starlette.datastructures.Secret",
"line_number": 11,
"usage_type": "call"
},
{... |
25873892301 | from Physics import Physics, MassPoint, Position
import matplotlib.pyplot as plt
import numpy as np
import re
# Import 3D Axes
from mpl_toolkits.mplot3d import axes3d
def plotMassPoints(phyInstance):
massPts = phyInstance.getMassPoints()
pointArray = list(mp.position.arr for mp in massPts)
ptMatrix = np.stack(pointArray,axis=0)
x,y,z = np.hsplit(ptMatrix, 3)
plottingWindow = plt.figure()
# plotting normal 3D view
ax = plottingWindow.add_subplot(111, projection='3d')
ax.scatter(x,y,z, label='Mass points')
ax.axis('equal')
ax.legend()
# Labeling
ax.set_xlabel('Y axis')
ax.set_ylabel('X axis')
ax.set_zlabel('Z axis')
plt.show()
def plotWithVolumes(phyInstance, dictVol):
if len(dictVol) == 0:
print("<Nothing to plot>")
return
# plotting setup
plottingWindow = plt.figure('ForceVolume',figsize=plt.figaspect(0.5))
ax = plottingWindow.add_subplot(1,2,1, projection='3d')
# prep plotting volumes
pointArray = list(tup[1].arr for tup in dictVol)
ptMatrix = np.stack(pointArray,axis=0)
x,y,z = np.hsplit(ptMatrix, 3)
# prep mass points
massPts = phyInstance.getMassPoints()
massPtArray = list(mp.position.arr for mp in massPts)
if len(massPts) > 0:
ptMatrix = np.stack(massPtArray,axis=0)
mx,my,mz = np.hsplit(ptMatrix, 3)
ax.scatter(mx,my,mz, label='Mass Points')
if len(pointArray) != 0:
# add vectors
endPositions = list()
normList = []
for pt in pointArray:
norm, direction = phyInstance.getNormAndRirectionVecFromPosition(Position.from_array(pt))
endPositions.append(norm * direction)
normList.append(norm)
ptMatrix_end = np.stack(endPositions, axis=0)
endX,endY,endZ = np.hsplit(ptMatrix_end,3)
ax.quiver(x,y,z,endX,endY,endZ,length=100, normalize=True)
# plotting histogram over applied forces
ax2 = plottingWindow.add_subplot(1,2,2)
ax2.hist(normList)
ax2.set_xlabel("| applied force |")
ax2.set_ylabel("Elements in bin")
ax.scatter(x,y,z, label='ForceVolume')
ax.legend()
# Labeling
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
plottingWindow.tight_layout(w_pad=6)
plt.show()
def deleteRotationInPathElements(inputStr):
actorSplitBegin = re.split(r"Begin Actor", inputStr)
nextLst = [actorSplitBegin.pop(0)]
for el in actorSplitBegin:
nextLst.append("Begin Actor" + el)
resList = []
for el in nextLst:
actorSplitEnd = re.split(r"End Actor", el)
nextLst2 = [actorSplitEnd.pop(0)]
for el in actorSplitEnd:
nextLst2.append("End Actor" + el)
for splittedEl in nextLst2:
pathNodesWithoutRotation = re.sub(r"(Begin Actor Class=PathNode (?:.|\n)*?) Rotation.*\n","\g<1>", splittedEl, flags=re.MULTILINE)
resList.append(pathNodesWithoutRotation)
res = ''.join(resList)
return res
def changeForceMode(programState):
userChoice = ""
while True:
listedOptions = """ - 'ForceMode_Force' <F> : Applies the specified constant force on all contained objects (default).
- 'ForceMode_Acceleration' <A> : Accelerates all objects inside with a constant rate to the target vector. (good alternative)
- 'ForceMode_Impulse' <I> : Applies an impuls continuously (? makes no sense...).
- 'ForceMode_Velocity' <V> : Propells all entered dynamic objects with a constant speed.
- 'ForceMode_SmoothImpulse' <sI> : Like impulse mode, but applied along a longer time interval (?)
- 'ForceMode_SmoothVelocity' <sV> : Like velocity mode, but no direct acceleration to target velocity.
Type the desired mode then enter.
"""
print("\nThis setting will remain unchanged for the duration of this program or until changed again.\nUDK allows the following force application in the constant mode: \n" + listedOptions)
userInput = input().lower()
if userInput == 'f':
userChoice = "ForceMode_Force"
break
if userInput == 'a':
userChoice = "ForceMode_Acceleration"
break
if userInput == 'i':
userChoice = "ForceMode_Impulse"
break
if userInput == 'v':
userChoice = "ForceMode_Velocity"
break
if userInput == 'si':
userChoice = "ForceMode_SmoothImpulse"
break
if userInput == 'sv':
userChoice = "ForceMode_SmoothVelocity"
break
print(f"Scelected option \'{userChoice}\'\n")
programState.forceMode = userChoice | DoktorBotti/RL_LocalGravityAssistant | code/HelperFunctions.py | HelperFunctions.py | py | 4,874 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.stack",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.hsplit",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",... |
6778601752 | from utils import session
from models import User, Organization
from werkzeug.wrappers import Request
from werkzeug.utils import cached_property
from werkzeug.contrib.securecookie import SecureCookie
# you could just use
# os.urandom(20) to get something random
SECRET_KEY = '\xc9\xd5+\xe7U\x8f\xef\r\xa60\xed\xf4\x1cp\xf7tA\xed\x9f\xd1'
#SECRET_KEY = os.urandom(20)
class CustomRequest(Request):
@cached_property
def client_session(self):
return SecureCookie.load_cookie(self, secret_key=SECRET_KEY)
@cached_property
def client_user_object(self):
if 'user_id' not in self.client_session:
return None
else:
user_id = self.client_session['user_id']
user_result = session.query(User).filter(User.user_id==user_id).all()
user_object = None
for user in user_result:
user_object = user
if not user_object:
del self.client_session["user_id"]
self.client_session.modified
return None
else:
return user_object
@cached_property
def client_organization_object(self):
if not self.client_user_object:
return None
else:
org_id = self.client_user_object.user_organization_id
org_result = session.query(Organization).filter(Organization.organization_id==org_id).all()
org_object = None
for org in org_result:
org_object = org
return org_object
| anzarafaq/iqp | src/py/iqpapp/custom_request.py | custom_request.py | py | 1,551 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "werkzeug.wrappers.Request",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "werkzeug.contrib.securecookie.SecureCookie.load_cookie",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "werkzeug.contrib.securecookie.SecureCookie",
"line_number": ... |
5709821710 | """Example of script that run Disim and plots data from logs."""
import numpy as np
import os
import matplotlib.pylab as plt
from scipy import optimize
def run_disim(idm_args):
lua_args = "--lua-args=\""
for n, v in idm_args.items():
lua_args += n+"={},".format(v)
lua_args += "\""
duration = 3600 * 6
os.system('../../disim --start-time=06:00 --duration={} --lua="../car/IDM_MOBIL.lua" '
'--luacontrol="../control/I-210W.lua" --map="../../maps/I-210W.map" --ncpu=8 '
'--record --nogui --time-step=0.5 '.format(duration) + lua_args)
def plot():
flow = [np.genfromtxt('logs/huntington_flow_{}.txt'.format(i), delimiter=' ') for i in range(1, 5)]
density = [np.genfromtxt('logs/huntington_density_{}.txt'.format(i), delimiter=' ') for i in range(1, 5)]
flow = np.mean(np.stack(f[:, 1] for f in flow), axis=0)
density = np.mean(np.stack(d[:, 1] for d in density), axis=0)
def piecewise_linear(x, x0, y0, k):
return np.piecewise(x, [x < x0], [lambda x: y0 / x0 * x, lambda x: k * x + y0 - k * x0])
p, e = optimize.curve_fit(piecewise_linear, density, flow, p0=[40., 2000., -1.])
x = np.linspace(np.min(density), np.max(density), 100)
plt.figure()
plt.scatter(density, flow)
plt.plot(x, piecewise_linear(x, *p), lw=2, color='red')
plt.xlabel('Average Density [veh/km/lane]')
plt.ylabel('Average Flow [veh/h/lane]')
plt.show()
if __name__ == '__main__':
idm_args = {
'v0': 105/3.6, # 65 mph
'v0_truck': 85/3.6, # 55 mph
'a': 1.4,
'a_truck': 0.7,
'b': 2.0,
'gamma': 4.0,
't': 1.0,
't_truck': 1.5,
's0': 2.0,
's0_truck': 4.0,
'b_safe': 4.0,
'p': 0.25,
}
# run_disim(idm_args)
plot()
| sgowal/disim | scripts/python/run_and_plot.py | run_and_plot.py | py | 1,741 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.system",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.genfromtxt",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.genfromtxt",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_nu... |
70089050984 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['text.usetex'] = True
def main() -> None:
w = 1.
emission_psf = np.genfromtxt('tem_m_0_n_0.csv', delimiter=',')
y_linspace = np.linspace(-2.0, 2.0, np.shape(emission_psf)[0])
x_linspace = np.linspace(-2.0, 2.0, np.shape(emission_psf)[1])
for m in range(0,4):
for n in range(0,4):
excitation_psf = np.genfromtxt(f'tem_m_{m}_n_{n}.csv', delimiter=',')
total_psf = np.multiply(excitation_psf, emission_psf)
total_psf *= 1. / np.max(total_psf)
exec('plt.title(r"$\mathbf{TEM}_{0,0}\circ\mathbf{TEM}_{' + str(m) + ',' + str(n) + '}$", fontsize=16)')
plt.pcolormesh(x_linspace / w, y_linspace / w, total_psf, cmap='Blues')
plt.xlabel(r"$x/w$", fontsize=16)
plt.ylabel(r"$y/w$", fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
cbar = plt.colorbar()
cbar.set_label(r"$P_{x,y}/P_{max}$", fontsize=16, rotation=-90, labelpad=20)
# cbar.set_ticks
plt.gca().set_aspect(1)
plt.tight_layout()
plt.savefig(f'total_psf_m_{m}_n_{n}.png', dpi=1200, bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main() | oliver-peoples/honours-project | old/combined_probabilities.py | combined_probabilities.py | py | 1,437 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.rcParams",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "numpy.genfromtxt",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.shape",... |
5209818029 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 29 16:04:51 2020
@authors:
Verifiers: Alessandro Busatto, Paolo Graziani, Aurora Rossi, Davide Roznowicz;
Applet: Giacomo Di Maggio, Marco Emporio, Marco Fattorelli, Sebastiano Gaiardelli, Francesco Trotti;
Map: Rosario Di Matteo, Marco Emporio, Adriano Tumminelli;
OneDrive: Marco Fattorelli, Davide Roznowicz;
Integration: Alice Raffaele, Romeo Rizzi.
"""
import argparse
import csv
import generate_exam as g
import os
from pathlib import Path
import sys
import time
REL_PATH_SHUTTLE = 'shuttle' # main folder where to put all exams generated
ALL_EXER_PER_STUD = 'all_exercises_list_' # csv file where to save all exercises assigned to students (to facilitate correction)
if __name__ == "__main__":
parser=argparse.ArgumentParser(
description='''Script to generate all exams for a given date and a given list of students''',
epilog="""-------------------""")
parser.add_argument('exam_date', type=str, default='2020-06-30', help='exam date in the format YYYY-MM-DD')
# parser.add_argument('students_list_csv', type=str, default=os.getcwd()+'/students_lists/2020-06-30/lista_studenti_iscritti_con_chiavi.csv/', help='csv file with students'' data')
parser.add_argument("--with_uncompressed_folder", help="the generated anchored folder will contain also the uncompressed folder",
action="store_true")
args = parser.parse_args()
if args.with_uncompressed_folder:
assert len(sys.argv) == 3
print("The generated anchored folders will contain also the respective uncompressed folder.")
else:
assert len(sys.argv) == 2
exam_date = str(sys.argv[1])
FILE_STUDENTS_LIST = "students_lists/"+exam_date+"/lista_studenti_iscritti_con_chiavi.csv"
# Creation of shuttle
start_time = time.time()
PATH_SHUTTLE = os.getcwd() + '/' + REL_PATH_SHUTTLE
if os.path.exists(PATH_SHUTTLE):
answer = None # if the exam has been already created for given student and date, ask if re-write it or not
while answer not in ("y", "n"):
answer = input("Do you want to generate again the shuttle folder? Enter 'y' or 'n': ")
if answer == "y": # it empties the folder PATH_SHUTTLE
for root, dirs, files in os.walk(PATH_SHUTTLE, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(PATH_SHUTTLE)
os.mkdir(PATH_SHUTTLE)
keep_going = 1
elif answer == "n":
keep_going = 0
else:
print("Please enter 'yes' or 'no'")
else:
os.mkdir(PATH_SHUTTLE)
keep_going = 1
# Generation of the exams
if keep_going:
if os.path.isfile(ALL_EXER_PER_STUD + exam_date + '.csv'): # it deletes the csv file with all exercises per student if already existing
Path(ALL_EXER_PER_STUD + exam_date + '.csv').unlink()
all_exer_list = [] # to save all exercises assigned to each student
with open(FILE_STUDENTS_LIST) as csv_file: # it reads the students list csv and generates an exam for each one of them
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
matricola = row[0]
anchor = row[2]
student_ID = row[4]
name = row[5]
surname = row[6]
print('\nGenerating the exam for ' + name + ' ' + surname + ' (' + matricola + ')...')
e_list = matricola + ',' + name + ',' + surname + ','
chosen_exer = g.gen_exam(exam_date, anchor, student_ID, matricola, name, surname, args.with_uncompressed_folder)
for e in chosen_exer:
e_list += str(e) + ','
e_list += '\n'
line_count += 1
all_exer_list += [e_list]
print(f'\nGenerated {line_count} exams.')
exer_file = open(ALL_EXER_PER_STUD + exam_date + '.csv','w+') # writing the csv file with all exercises per student
for line in all_exer_list:
exer_file.write(str(line))
exer_file.close()
print("--- %s seconds ---" % (time.time() - start_time))
| romeorizzi/esami-RO-public | old/generate_all_exams_given_list.py | generate_all_exams_given_list.py | py | 4,434 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"l... |
27048685248 | import pygame
class Berry(pygame.sprite.Sprite):
def __init__(self, x, y):
width = height = 16
red = (255, 0, 0)
self.image = pygame.Surface((width, height))
self.image.fill(red)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def IsEaten(self, snake):
for piece in snake.pieces:
if pygame.sprite.collide_rect(self, piece):
return True
return False
| JoeZlonicky/Snake | Source/Berry.py | Berry.py | py | 478 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.sprite",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "pygame.Surface",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.sprite.collide_rect",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.spr... |
26820836865 | #Pardhu Gorripati
#sqlalchemy components
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Float
from sqlalchemy.orm import sessionmaker
from sqlalchemy import and_, or_, not_
Base = declarative_base()
class baseRoom(Base):
#Database Table name
__tablename__ = 'room'
#Table Columns
Id = Column(Integer, primary_key=True)
Type = Column(String)
__Price = Column(Float, primary_key=True)
Status = Column(String)
def __init__(self, Id, Type, Price):
self.Id = Id
self.Type = Type
self.__Price = Price
self.Status = 'A' #Default Room Status 'A' - Available
def getPrice(self):
return self.__Price
class personalProfile(Base):
#Database Table name
__tablename__ = 'person'
#Table Columns
Id = Column(Integer, primary_key=True)
FName = Column(String)
LName = Column(String)
Type = Column(String)
phone = Column(Integer)
overdue = Column(String)
def __init__(self, Id, FName, LName, Type, Phone):
self.Id = Id
self.FName = FName
self.LName = LName
self.Type = Type
self.phone = Phone
self.overdue = ''
def getName(self):
#Method to return User Name
return str(self.FName) + ' ' + str(self.LName)
class baseReservation(Base):
#Database Table name
__tablename__ = 'reservations'
#Table Columns
RoomId = Column(Integer, primary_key=True)
PersonId = Column(Integer, primary_key=True)
FromDate = Column(Integer, primary_key=True)
ToDate = Column(Integer)
Status = Column(String)
Charge = Column(Float)
paymentStatus = Column(String)
def __init__(self, RoomId, PersonId, FromDate, ToDate, Charge):
self.RoomId = RoomId
self.PersonId = PersonId
self.FromDate = FromDate
self.ToDate = ToDate
self.Status = 'Booked' #Default Initial Status Booked
self.Charge = Charge
self.paymentStatus = '' #Default Payment Status Blank
def getPersonInReservation(self):
return self.PersonId
class manageDB():
def __init__(self, databaseSession):
self.dbSession = databaseSession
class managePerson(manageDB):
def addPerson(self, Id, FName, LName, Type, Phone):
person = personalProfile(Id, FName, LName, Type, Phone)
self.dbSession.add(person)
self.dbSession.commit()
self.printPersonDetails(person)
def printPersonDetails(self, person):
print(person.getName(), "with ID", person.Id)
def searchById(self, Id):
person = self.dbSession.query(personalProfile).filter_by(Id = Id).first()
if person != None:
return person
class manageRoom(manageDB):
def addRoom(self, Id, Type, Price):
room = baseRoom( Id, Type, Price)
self.dbSession.add(room)
self.dbSession.commit()
self.printRoomDetails(room, True)
def printRoomDetails(self, room, DbConfirmation=None):
if DbConfirmation == True:
print("Room", room.Id, "Type", room.Type, "with price $", room.getPrice(), "added to Database")
else:
print("Room", room.Id, "Type", room.Type, "with price $", room.getPrice())
def checkDuplicateId(self, Id):
room = self.searchRoomById(Id)
if room != None:
return True
else:
return False
def searchRoomById(self, Id):
room = self.dbSession.query(baseRoom).filter_by(Id = Id).first()
if room != None:
return room
def checkAvailablity(self, Type=None):
if Type != None:
rooms = self.dbSession.query(baseRoom).filter_by(Type = Type, Status = 'A').all()
else:
rooms = self.dbSession.query(baseRoom).filter_by(Status = 'A').all()
for room in rooms:
self.printRoomDetails(room)
class manageReservation(manageDB):
def addReservation(self, RoomId, PersonId, FromDate, ToDate, RoomCharge):
reservationToAdd = baseReservation(RoomId, PersonId, FromDate, ToDate, RoomCharge)
self.dbSession.add(reservationToAdd)
self.dbSession.commit()
self.printReservationDetails(reservationToAdd, True)
def searchReservation(self, RoomId, PersonId, FromDate):
return self.dbSession.query(baseReservation).\
filter_by(RoomId=RoomId, PersonId = PersonId, FromDate=FromDate).first()
def printReservationDetails(self, reservation, DbConfirmation=None):
rooms = manageRoom( self.dbSession )
room = rooms.searchRoomById( reservation.RoomId )
if room == None:
print("No Room found for Room ID", reservation.RoomId)
persons = managePerson( self.dbSession )
personId = reservation.getPersonInReservation()
person = persons.searchById( personId )
if person == None:
print("No Person found with ID", personId)
if DbConfirmation == True:
print("Room", reservation.RoomId, "Type", room.Type, "with status", reservation.Status, "& price $", reservation.Charge, "for", person.getName() , "added to Database")
else:
print("Room", reservation.RoomId, "Type", room.Type, "with status", reservation.Status, "& price $", room.getPrice())
def cancelReservation(self, roomId, personId, FromDate):
reservation = self.searchReservation( roomId, personId, FromDate )
reservation.Status = "Cancelled"
self.dbSession.commit()
class databaseSession(object):
#Private variables to have instance & list
__instance = None
session = None
def __new__(self):
#New object contructor to always have one instance of Class
#Check if there is already instance
if databaseSession.__instance is None:
#If no instance create one & save it to private variable
databaseSession.__instance = object.__new__(self)
#Return the instance saved in variable
return databaseSession.__instance
def createSession(self):
if self.session == None:
#Initiate DB connection to memory
engine = create_engine('sqlite:///:memory:', echo=False)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
#Capture connection Session
self.session = Session()
return self.session
class GetInputAndValidate():
def inputAny(self, inputText):
return input(inputText)
def inputText(self, inputText, errorText=None):
while True:
try:
inputValue = str(input(inputText))
except ValueError:
self.errorMessage(errorText, "for text" )
else:
break
return inputValue
def validateNumberInput(self, inputText, errorText=None ):
#Function for valid number input
while True:
try:
inputValue = int(input(inputText))
except ValueError:
self.errorMessage(errorText, "for number" )
else:
break
return inputValue
def validateFloatInput(self, inputText, errorText=None ):
#Function for valid number input
while True:
try:
inputValue = float(input(inputText))
except ValueError:
self.errorMessage(errorText, "for number with decimals" )
else:
break
return inputValue
def invalidEntry(self, text ):
#Function to print message for invalid input
print("Invalid " + text +", please try again")
print()
def errorMessage(self,errorText, defaultText ):
if errorText !=None:
self.invalidEntry(errorText)
else:
self.invalidEntry(defaultText)
class inputRoomDetails(GetInputAndValidate):
def __init__(self):
self.roomType = ["Queen","King","Twin"]
def checkRoomType(self, roomType):
for type in self.roomType:
if type.lower() == roomType.lower():
return type
def getRoomNumber(self, Add=None):
if Add != None:
dbConnection = databaseSession()
roomCollection = manageRoom( dbConnection.session )
while True:
Id = self.validateNumberInput("Please Enter Room Number: ")
if roomCollection.checkDuplicateId( Id ):
print("Room number already Used, please enter a unique Room number")
else:
return Id
else:
return self.validateNumberInput("Please Enter Room Number: ")
def getRoomPrice(self):
return self.validateFloatInput("Please Enter Price for the Room: $", "for price")
def getRoomType(self):
while True:
self.printRoomTypes()
roomType = self.inputText("Please Enter valid Room Type: ", "Room type")
roomType = self.checkRoomType( roomType )
if roomType != None:
return roomType
else:
self.errorMessage(None, "Room Type" )
def printRoomTypes(self):
print("\nSelect Room Type from below:")
for type in self.roomType:
print(type)
def getInputRoomDetails(self, add=None):
return self.getRoomNumber(add), self.getRoomType(), self.getRoomPrice()
class inputReservationDetails(GetInputAndValidate):
def __init__(self):
self.roomType = ["Queen","King","Twin"]
def main():
dbConnection = databaseSession()
dbConnection.createSession()
roomDetails = inputRoomDetails( )
roomCollection = manageRoom( dbConnection.session )
roomCollection.addRoom(1, 'Queen', 12)
roomId, roomType, roomPrice = roomDetails.getInputRoomDetails(True)
roomCollection.addRoom(roomId, roomType, roomPrice)
print()
print("Show all availability")
roomCollection.checkAvailablity()
print()
print("Show only Queen availability")
roomCollection.checkAvailablity("Queen")
profiles = managePerson( dbConnection.session )
profiles.addPerson(1, 'David', 'Thonny', 'C', 9521231234)
print("Person added to DB")
print()
print("Searching person with ID 1")
dbPerson = profiles.searchById(1)
profiles.printPersonDetails(dbPerson)
mngReservations = manageReservation( dbConnection.session )
mngReservations.addReservation( 1, 1, 20190806, 20190808, 20.1)
mngReservations.cancelReservation(1, 1, 20190806)
mngReservations.printReservationDetails(mngReservations.searchReservation(1, 1, 20190806))
main()
| bobbilisantosh321/python | Final Prj.py | Final Prj.py | py | 11,507 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlalchemy.ext.declarative.declarative_base",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 15,
"usage_type": "argument"
},
{
... |
14175838199 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('workArounds', '0002_remove_monthlyreport_cycle'),
]
operations = [
migrations.RemoveField(
model_name='workday',
name='offDay',
),
]
| ravitbar/workers | workArounds/migrations/0003_remove_workday_offday.py | 0003_remove_workday_offday.py | py | 365 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.RemoveField",
"line_number": 14,
"usage_type": "call"
},
... |
28923475301 | #18405. 경쟁적 전염
"""
heapq를 쓰는법도 있을것만 같지만 일단 이렇게 구현했다.
"""
#STEP 1. INPUT SETTING
import sys
from collections import deque
input=sys.stdin.readline
N,K=map(int,input().rstrip().split())
board=[]
dist=[[-1]* N for _ in range(N)]
start_deq=deque([])
for row in range(N):
arr=list(map(int,input().rstrip().split()))
board.append(arr)
for col in range(N):
if arr[col]:
dist[row][col]=0 #바이러스가 들어있는건 거리 0으로 설정.
start_deq.append((row,col))
#STEP 2. OPERATION START(BFS, ON)
def bfs(deq, board, dist):
dx=[1,-1,0,0]
dy=[0,0,1,-1]
#이미 시작점은 넣어놨으니 start부분 작업할 필요 없음
while deq:
x,y=deq.popleft()
for i in range(4):
nx=x+dx[i]; ny=y+dy[i]
if nx<0 or ny<0 or nx>=N or ny>=N: continue
if dist[nx][ny]>=0: #이미 방문한 적이 있는데, 하필 나랑 같은 우선순위라면
if dist[nx][ny]==dist[x][y]+1:
board[nx][ny]=min(board[nx][ny],board[x][y]) #더 빠른놈이 먹겠지
continue
board[nx][ny]=board[x][y]
dist[nx][ny]=dist[x][y]+1
deq.append((nx,ny))
bfs(start_deq,board,dist)
# #바이러스 확인용
# print("-------board--------")
# for row in board:
# print(row)
# print("-------dist---------")
# for row in dist:
# print(row)
#결과 출력용
S,X,Y=map(int,input().rstrip().split())
result= int(dist[X-1][Y-1]<=S) * board[X-1][Y-1]
print(result)
| GuSangmo/BOJ_practice | BOJ/18405.py | 18405.py | py | 1,764 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "sys.stdin",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 12,
"usage_type": "call"
}
] |
1702730464 | # %%
from itertools import combinations
import spacy
# Load spaCy's pre-trained word embedding model
nlp = spacy.load("en_core_web_sm")
# Input text containing words for similarity comparison
text = (
"funny comedy music laugh humor song songs jokes musical hilarious"
)
# Process the input text with spaCy
doc = nlp(text)
# Calculate and display similarity between word pairs using spaCy's word vectors
for token1, token2 in combinations(doc, 2):
print(
f"similarity between {token1} and {token2} is {token1.similarity(token2)}"
)
# %%
import pandas as pd
from gensim.models import Word2Vec
from tqdm import tqdm
# Read the movie reviews data from a CSV file named "train.csv"
data = pd.read_csv("train.csv")
# Preprocess the movie reviews into a list of sentences using spaCy's sentence detection
sentences = []
for review in tqdm(data["review"]):
review_doc = nlp(review)
review_sentences = [sent.text for sent in review_doc.sents]
sentences.extend(review_sentences)
# Train a Word2Vec model using the list of sentences
model = Word2Vec(sentences, vector_size=100, window=5, min_count=1, workers=4)
# Ensure words from the input text are present in the model's vocabulary
model.build_vocab([text.split()], update=True)
# %%
# Calculate and display similarity between word pairs using the trained Word2Vec model
for token1, token2 in combinations(text.split(), 2):
similarity = model.wv.similarity(token1, token2)
print(
f"similarity between {token1} and {token2} is {similarity}"
)
| NewDonkCity/Portfolio | word_embedding.py | word_embedding.py | py | 1,598 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "spacy.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"lin... |
26478265364 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0004_auto_20150623_1841'),
]
operations = [
migrations.AlterField(
model_name='element',
name='element_type',
field=models.CharField(blank=True, max_length=12, null=True, choices=[(b'SELECT', b'SELECT'), (b'MULTI_SELECT', b'MULTI_SELECT'), (b'RADIO', b'RADIO'), (b'GPS', b'GPS'), (b'SOUND', b'SOUND'), (b'PICTURE', b'PICTURE'), (b'ENTRY', b'ENTRY')]),
),
]
| protocolbuilder/sana.protocol_builder | src-django/api/migrations/0005_auto_20150703_2228.py | 0005_auto_20150703_2228.py | py | 608 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterField",
"line_number": 14,
"usage_type": "call"
},
{... |
18284253607 | # 用xpath爬取糗事百科里面段子的详情,存储为json格式
# https://www.qiushibaike.com/8hr/page/1/
# 每个段子 //div[contains(@id,"qiushi_tag_")]
# 用户名 ./div/a/h2
# 图片链接 ./div/a/img[@class="illustration"]/@src
# 段子内容 ./a/div[@class="content"]/span
# 点赞数 ./div/span/i[@class="number"]
# 评论数 ./div/span/a/i[@class="number"]
import requests
from lxml import etree
import json
def write_item(data):
content = json.dumps(data, ensure_ascii=False)
with open('stories.json','a', encoding='utf-8') as f:
f.write(content + '\n')
def deal_item(selector):
items = selector.xpath('//div[contains(@id,"qiushi_tag_")]')
for item in items:
# 可能有匿名用户
username = item.xpath('./div[@class="author clearfix"]//h2')[0].text
img = item.xpath('./div/a/img[@class="illustration"]/@src')
# 忽略内容中的br,获取标签下的所有内容,用'string(.)'
content = item.xpath('./a/div[@class="content"]/span')[0].xpath('string(.)')
# content = item.xpath('./a/div[@class="content"]/span//text()')
good = item.xpath('./div/span/i[@class="number"]')[0].text
comment = item.xpath('./div/span/a/i[@class="number"]')[0].text
data = {
'user':username,
'img':img,
'content':content,
'good':good,
'comment':comment,
}
write_item(data)
def main():
# 爬取糗事百科前10页的内容
for page in range(1, 11):
url = "https://www.qiushibaike.com/8hr/page/"+ str(page) +"/"
headers = {"User-Agent":"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0)"}
response = requests.get(url, headers=headers)
selector = etree.HTML(response.content)
deal_item(selector)
if __name__ == '__main__':
import time
start_time = time.time()
main()
end_time = time.time()
print("爬取解析工作共耗时%s秒" % str(end_time - start_time))
| longyincug/crawler | 12_xpath_json_demo.py | 12_xpath_json_demo.py | py | 2,016 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.dumps",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "lxml.etree.HTML",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number... |
4511367599 | import openpyxl
import pandas
from collections import Counter
from difflib import SequenceMatcher
from collections import OrderedDict
import time
import numpy
import igraph
import sys
pathTest = "E:\\Pham Thanh Quyet - 23.12.2022\\DSKH 22.12.23\\VRS VRH\\Book1.XLSX"
path = "E:\\Pham Thanh Quyet - 23.12.2022\\DSKH 22.12.23\\VRS VRH\\23.04.13 Riverside+ Harmony Full - Tổng hợp khách hàng và căn V23 - for processing.XLSX"
wb_obj = openpyxl.load_workbook(path)
sheet_base = wb_obj.active
wb_test = openpyxl.load_workbook(pathTest)
sh_test = wb_test.active
start = time.time()
print("Start time = " + time.strftime("%H:%M:%S", time.gmtime(start)))
time.sleep(5)
graph = igraph.Graph(n=8,edges=[(0,4),(1,4),(1,6),(2,5),(3,6),(3,7)])
graph.vs['name'] = ['H1','H2','H3','H4','P1','P2','P3','P4']
print(graph)
for s in graph.components().subgraphs():
print(s.vs['name'])
print(' | '.join(s.vs['name']))
hStr = []
pStr = []
for ele in s.vs['name']:
if "H" in ele:
hStr.append(ele)
if "P" in ele:
pStr.append(ele)
print(';'.join(hStr) + " and " + ';'.join(pStr))
# n = 12
# graphTest = igraph.Graph(n)
# for x in range(0,5):
# graphTest.add_edges([(x,n-1-x)])
# print(graphTest)
# phList = ['H1','H2','H3','H4','P1','P2','P3','P4']
# graphTest = igraph.Graph(n=len(phList))
# graphTest.vs['name'] = phList
# for x in range(2,sh_test.max_row+1):
# # for idx,val in enumerate(graphTest.vs['name']):
# # if sh_test.cell(row=x,column=2).value == val:
# # sh_test.cell(row=x,column=4).value = idx
# # elif sh_test.cell(row=x,column=3).value == val:
# # sh_test.cell(row=x,column=5).value = idx
# sh_test.cell(row=x,column=4).value = phList.index(sh_test.cell(row=x,column=2).value)
# sh_test.cell(row=x,column=5).value = phList.index(sh_test.cell(row=x,column=3).value)
# for i in range(2,sh_test.max_row+1):
# graphTest.add_edges([(sh_test.cell(row=i,column=4).value,sh_test.cell(row=i,column=5).value)])
# print(graphTest)
# aSet = []
# aSet.append("this")
# aSet.append("is")
# aSet.append("a")
# aSet.append("set")
# print(aSet)
# print(aSet.index("this"))
# print(sorted(aSet))
wb_test.save(pathTest)
end = time.time()
print("Run time = " + time.strftime("%H:%M:%S", time.gmtime(end-start)))
print("End time = " + time.strftime("%H:%M:%S", time.gmtime(end))) | ChinhTheHugger/vscode_python | excel_graph_test.py | excel_graph_test.py | py | 2,397 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "openpyxl.load_workbook",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "openpyxl.load_workbook",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "time.strftime... |
43102936048 | from collections import namedtuple
from playment.utilities import Decodable
class JobResult(Decodable):
def __init__(self, job_id: str = None, batch_id: str = None, project_id: str = None, reference_id: str = None,
status: str = None, tag: str = None, priority_weight: int = None, result: str = None):
self.job_id = job_id
self.batch_id = batch_id
self.project_id = project_id
self.reference_id = reference_id
self.status = status
self.tag = tag
self.priority_weight = priority_weight
self.result = result
def json_object_hook(self, d):
return namedtuple(self.__class__.__name__, d.keys())(*d.values())
| crowdflux/playment-sdk-python | playment/jobs/job_result.py | job_result.py | py | 703 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "playment.utilities.Decodable",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "collections.namedtuple",
"line_number": 18,
"usage_type": "call"
}
] |
3269636790 | #
#
#
import time
from .external import getid
from .types import MissingParamException
from .utils import StopException, TaskRunner
def genarid() -> str:
return getid()
def makearfsfromdisk(disk: str) -> int:
runner = TaskRunner()
try:
runner.setvalue("disk", disk)
for step in (
"add-partition-table",
"verify-partition-table",
"generate-luks-key",
"add-luks-partition",
):
runner.runstep(step)
# cprt will be the part for the rest of the steps
runner.addalias("cprt", "part")
runner.runstep("verify-luks-partition")
runner.setvalue("arid", genarid())
runner.runstep("verify-arid-unused")
runner.runstep("add-archive-filesystem")
time.sleep(0.2)
runner.runstep("verify-archive-filesystem")
runner.runstep("mount-archive-filesystem")
time.sleep(0.2)
runner.runstep("check-archive-filesystem")
runner.dump()
except MissingParamException as err:
print("Missing parameter: [%s]" % err)
runner.dump()
return 1
except StopException as err:
print("Will not continue [%s]" % err)
runner.dump()
return 1
print("Finished adding /arfs/%s" % runner.getvalue("arid"))
return runner.getretcode()
| toppk/saveme | lib/saveme/block.py | block.py | py | 1,345 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "external.getid",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "utils.TaskRunner",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_num... |
23540887587 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Language Detector is a RESTful web service for detecting the language of
arbitrary text.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os
import sys
from typing import Any, Dict
import fastText
from flask import Flask, Response, abort, jsonify, request
from prometheus_flask_exporter import PrometheusMetrics
class PredictionEndpoint():
"""The endpoint for making predictions"""
def __init__(self, model) -> None:
self.model = model
def predict(self, text: str) -> Dict[str, Any]:
"""Make a language prediction"""
prediction = self.model.predict(text)
language = prediction[0][0].replace("__label__", "")
confidence = prediction[1][0]
return {
"language": language,
"confidence": confidence,
}
def endpoint(self) -> Response :
"""The endpoint implementation"""
req = request.get_json()
text = ""
try:
text = req["text"]
except KeyError:
abort(400)
return jsonify({
"prediction": self.predict(text),
})
def main() -> None:
"""The command entrypoint"""
parser = argparse.ArgumentParser(prog="language_predictor")
parser.add_argument(
"--port",
type=int,
help="The port the API server should bind to ($PORT)",
default=os.environ.get("PORT", 8080),
)
parser.add_argument(
"--host",
type=str,
help="The local address the API server should bind to ($HOST)",
default=os.environ.get("HOST", "127.0.0.1"),
)
parser.add_argument(
"--model",
type=str,
help="[REQUIRED] The path to the fasttext model file ($MODEL)",
default=os.environ.get("MODEL", None),
required=True,
)
args = parser.parse_args(sys.argv[1:])
model = fastText.load_model(args.model)
app = Flask(__name__)
predict = PredictionEndpoint(model)
app.add_url_rule(
"/api/v1/language/predict",
"predict",
view_func=predict.endpoint,
methods=["POST"],
)
PrometheusMetrics(app)
app.run(host=args.host, port=args.port)
if __name__ == "__main__":
main()
| NLPKit/LanguagePredictor | language_predictor.py | language_predictor.py | py | 2,386 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "typing.Dict",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "flask.request.get_json",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "flask.request",
"li... |
26010207985 | import argparse
import os
import re
import subprocess
import sys
import test_common
parser = argparse.ArgumentParser()
parser.add_argument("files", nargs="*")
args = parser.parse_args()
here = os.path.dirname(os.path.realpath(__file__))
codes = {}
for s in open(os.path.join(here, "..", "src", "etc.h")).readlines():
m = re.match(r"const int (\w+Error) = (\d+);", s)
if m:
codes[int(m[2])] = m[1]
problems = []
if args.files:
for file in args.files:
problems.append(os.path.join(here, file))
else:
for root, dirs, files in os.walk(here):
for file in files:
ext = os.path.splitext(file)[1]
if ext in (".p", ".cnf", ".smt2"):
problems.append(os.path.join(root, file))
def err():
print(s, end="")
raise Exception(str(code))
for file in problems:
print(file)
e = test_common.get_expected(file)
cmd = "./ayane", "-t3", file
p = subprocess.run(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8"
)
code = p.returncode
code = codes.get(code, code)
s = p.stdout
if code in ("inappropriateError", "inputError"):
r = code
elif code:
err()
elif "unsat" in s:
r = 0
elif "sat" in s:
r = 1
else:
err()
if r != e:
err()
| russellw/ayane | test/test.py | test.py | py | 1,339 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath"... |
43281589774 | #!/usr/bin/env python
from sys import argv
import boto3
import logging
logging.basicConfig(level=logging.INFO)
session = boto3.Session()
sqs = session.client('sqs')
sts = session.client('sts')
def queue_transfer(from_queue, to_queue):
logging.info(f"Transfer from {from_queue} to {to_queue}")
from_queue_url = get_queue_url(from_queue)
to_queue_url = get_queue_url(to_queue)
logging.info(f"From URL: {from_queue_url}")
logging.info(f"To URL: {to_queue_url}")
response = sqs.receive_message(
QueueUrl=from_queue_url,
MaxNumberOfMessages=10
)
while 'Messages' in response:
for message in response['Messages']:
logging.info("Transferring message...")
sqs.send_message(
QueueUrl=to_queue_url,
MessageBody=message['Body']
)
sqs.delete_message(
QueueUrl=from_queue_url,
ReceiptHandle=message['ReceiptHandle']
)
response = sqs.receive_message(
QueueUrl=from_queue_url,
MaxNumberOfMessages=10
)
def get_queue_url(queue_name):
return sqs.get_queue_url(QueueName=queue_name)["QueueUrl"]
def list_queues():
account_id = sts.get_caller_identity()["Account"]
logging.info(f"Listing queues for account {account_id}")
result = sqs.list_queues()
for queue_url in result["QueueUrls"]:
logging.info(f"Queue URL: {queue_url}")
def create_queue(queue_name):
logging.info(f"Creating queue {queue_name}")
sqs.create_queue(QueueName=queue_name)
def delete_queue(queue_name):
logging.info(f"Deleting queue {queue_name}")
queue_url = get_queue_url(queue_name)
logging.info(f"Queue URL: {queue_url}")
sqs.delete_queue(QueueUrl=queue_url)
def purge_queue(queue_name):
logging.info(f"Purging queue {queue_name}")
queue_url = get_queue_url(queue_name)
logging.info(f"Queue URL: {queue_url}")
sqs.purge_queue(QueueUrl=queue_url)
def main():
if argv[1] == 'transfer':
queue_transfer(argv[2], argv[3])
elif argv[1] == 'list':
list_queues()
elif argv[1] == 'create':
create_queue(argv[2])
elif argv[1] == 'delete':
delete_queue(argv[2])
elif argv[1] == 'purge':
purge_queue(argv[2])
else:
print('Command not recognized')
if __name__ == "__main__":
main() | bhoven/sqs-util | sqs_util.py | sqs_util.py | py | 2,405 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "boto3.Session",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.info",
"... |
10665105745 | import datetime
from flask.ext.jwt import current_identity, jwt_required
from flask_restplus import Namespace, Resource, fields, reqparse
from packr.models import Order, Role, User
api = Namespace('lookup',
description='Operations related to looking up an order')
lookup = api.model('Lookup', {
'con_number': fields.Integer(readOnly=True,
description='The consignment number'),
})
@api.route('/')
class LookupItem(Resource):
@api.expect(lookup)
@api.response(204, 'Successfully looked up order.')
@jwt_required()
def post(self):
req_parse = reqparse.RequestParser(bundle_errors=True)
req_parse.add_argument('con_number', type=int, required=True,
location='json')
args = req_parse.parse_args()
con_number = args.get('con_number', -1)
if con_number == -1:
return {'message': {'con_number':
'No consignment number provided'}}, 400
# Find the consignment note information.
order = Order.query.filter_by(id=con_number).first()
if not order:
return {'description': 'Unknown consignment number.'}, 404
if current_identity.role.role_name != 'admin':
if order.user_id != current_identity.id:
if current_identity.role.role_name == 'driver':
if order.driver_id != current_identity.id:
return {'description': 'Access denied.'}, 401
else:
return {'description': 'Access denied.'}, 401
statuses = list()
for status in order.status:
statuses.append({
'status': status.status.status,
'date': status.time.strftime('%Y-%m-%dT%H:%M:%S'),
'address': status.address
})
packages = list()
for package in order.package:
packages.append({
'weight': package.weight,
'length': package.length,
'width': package.width,
'height': package.height
})
pickup_date = datetime.date(year=order.pickup_time.year,
month=order.pickup_time.month,
day=order.pickup_time.day)
pickup_time = datetime.time(hour=order.pickup_time.hour,
minute=order.pickup_time.minute,
second=order.pickup_time.second)
pickup = {
'businessName': order.pickup_contact.business_name,
'contactName': order.pickup_contact.contact_name,
'phone': order.pickup_contact.phone,
'email': order.pickup_contact.email,
'street': order.pickup_address.street,
'suburb': order.pickup_address.suburb,
'state': order.pickup_address.state,
'postCode': order.pickup_address.post_code,
'date': pickup_date.strftime('%Y-%m-%d'),
'time': pickup_time.strftime('%H:%M:%S')
}
delivery = {
'businessName': order.delivery_contact.business_name,
'contactName': order.delivery_contact.contact_name,
'phone': order.delivery_contact.phone,
'email': order.delivery_contact.email,
'street': order.delivery_address.street,
'suburb': order.delivery_address.suburb,
'state': order.delivery_address.state,
'postCode': order.delivery_address.post_code
}
driver = ''
if order.driver:
driver = order.driver.id
driver_role = Role.query.filter_by(role_name='driver').first()
drivers = list()
for driv in User.query.filter_by(role=driver_role):
drivers.append({
'id': driv.id,
'full_name': driv.full_name
})
return {'eta': order.eta.strftime('%Y-%m-%d'),
'type': order.service_type.name,
'driver': driver,
'drivers': drivers,
'cost': order.cost,
'paymentType': order.payment_type,
'fragile': ('yes' if order.fragile else 'no'),
'delivery': delivery,
'pickup': pickup,
'customerComments': order.notes,
'adminComments': order.driver_notes,
'statuses': statuses,
'packages': packages}, 201
| ZeroEcks/packr | packr/api/lookup.py | lookup.py | py | 4,547 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask_restplus.Namespace",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask_restplus.fields.Integer",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask_restplus.fields",
"line_number": 12,
"usage_type": "name"
},
{
"api_n... |
20583496296 | import cv2
import numpy as np
#COLOR PICKER RANGE
green = np.uint8([[[40, 40, 255]]]) #here insert the bgr values which you want to convert to hsv
hsvGreen = cv2.cvtColor(green, cv2.COLOR_BGR2HSV)
print(hsvGreen)
lowerLimit = hsvGreen[0][0][0] - 10, 150, 150
upperLimit = hsvGreen[0][0][0] + 10, 255, 255
print(upperLimit)
print(lowerLimit)
img = cv2.imread('D:/HW/OpenCV Workshop - distro2/OpenCV Workshop/tek2.png')
# Convert BGR to HSV
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_blue = np.array([110,50,50])
upper_blue = np.array([130,255,255])
lower_red = np.array([0,50,50])
upper_red = np.array([10,255,255])
lower_piece_red = np.array([170,50,50])
upper_piece_red = np.array([180,255,255])
lower_green = np.array([50,50,50])
upper_green = np.array([70,255,255])
# Threshold the HSV image to get only blue colors
blue_mask = cv2.inRange(hsv, lower_blue, upper_blue)
red_mask = cv2.inRange(hsv,lower_red,upper_red)
green_mask = cv2.inRange(hsv,lower_green,upper_green)
piece_red_mask = cv2.inRange(hsv,lower_piece_red,upper_piece_red)
# Bitwise-AND mask and original image
blue_res = cv2.bitwise_and(img,img, mask= blue_mask)
red_res = cv2.bitwise_and(img,img, mask=(red_mask | piece_red_mask))
green_res = cv2.bitwise_and(img,img, mask=green_mask)
cv2.imshow('img',img)
cv2.imshow('mask',blue_mask)
cv2.imshow('res_blue',blue_res)
cv2.imshow('res_red',red_res)
cv2.imshow('res_green',green_res)
cv2.waitKey(0)
cv2.destroyAllWindows()
| Altair115/OpenCV2-Workshop | Opdrachten/Op3.py | Op3.py | py | 1,498 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.uint8",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2HSV",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_n... |
73490872105 | from selenium.webdriver.common.action_chains import ActionChains
from urllib.parse import urljoin
import time
import pytest
@pytest.fixture
def catalog_url(baseurl_option):
return urljoin(baseurl_option, '/index.php?route=product/category&path=20')
def test_login_catalog(catalog_url, browser):
browser.get(catalog_url)
assert browser.current_url == catalog_url
@pytest.mark.parametrize(
'xpath, expected_text',
[
('//div/h2', 'Desktops'),
('//div/h3', 'Refine Search')
]
)
def test_text_main_page(xpath, expected_text, catalog_url, browser):
browser.get(catalog_url)
label = browser.find_element_by_xpath(xpath)
assert label.text == expected_text
@pytest.mark.parametrize(
'xpath, expected_text',
[
('//a[text()="MacBook"]', 'MacBook'),
('//a[text()="iPhone"]', 'iPhone'),
('//a[contains(text(), "Apple Cinema 30")]', 'Apple Cinema 30"'),
('//a[text()="Canon EOS 5D"]', "Canon EOS 5D")
]
)
def test_label_text_main_page(xpath, expected_text, catalog_url, browser):
browser.get(catalog_url)
label = browser.find_element_by_xpath(xpath)
assert label.text == expected_text
def test_add_to_wish_list(catalog_url, browser):
browser.get(catalog_url)
buttons = browser.find_elements_by_xpath('//div[2]/div[2]/button[2]')
for button in buttons:
actions = ActionChains(browser)
actions.move_to_element(button).perform()
button.click()
time.sleep(5)
| astretcova/Otus-lessons | hw_8/catalog_test.py | catalog_test.py | py | 1,503 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "urllib.parse.urljoin",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pytest... |
44101908061 | from flask import Flask, request, send_from_directory
from PIL import Image
import pathlib
import os
import urllib
S3_BUCKET_URL = "https://s3-us-west-2.amazonaws.com/makersdistillery/"
IMAGES_PATH = pathlib.Path("images")
app = Flask(__name__)
@app.route("/images/<path:image_path>")
def images(image_path):
file_path = IMAGES_PATH / os.path.dirname(image_path)
file_name = os.path.basename(image_path)
local_image_path = file_path / file_name
_, extension = os.path.splitext(image_path)
if not os.path.isfile(local_image_path):
file_path.mkdir(parents=True, exist_ok=True)
s3_url = S3_BUCKET_URL + image_path
urllib.request.urlretrieve(s3_url, local_image_path)
h_arg = request.args.get('h', None)
w_arg = request.args.get('w', None)
format_arg = request.args.get('format', None)
if not (h_arg or w_arg or format_arg):
return send_from_directory("images", image_path)
image = Image.open(local_image_path)
width, height = image.size
new_height = height
new_width = width
if h_arg and not w_arg:
new_height = int(h_arg)
scale = new_height / height
new_width = round(width * scale)
elif w_arg and not h_arg:
new_width = int(w_arg)
scale = new_width / width
new_height = round(height * scale)
elif w_arg and h_arg:
new_width = int(w_arg)
new_height = int(h_arg)
resized_image_path = pathlib.Path("resized", local_image_path)
resized_image_file = "{}-{}{}".format(new_width, new_height, extension)
if not os.path.isfile(IMAGES_PATH / resized_image_path / resized_image_file):
(IMAGES_PATH / resized_image_path).mkdir(parents=True, exist_ok=True)
resized_image = image.resize((new_width, new_height))
resized_image.save(IMAGES_PATH / resized_image_path / resized_image_file)
return send_from_directory("images", resized_image_path / resized_image_file)
if __name__ == "__main__":
app.run() | akb/image-resize-service | image-resize.py | image-resize.py | py | 2,004 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
14147663072 | import inspect
from pathlib import Path
import torch
import yaml
from comfy import model_detection, model_management
from comfy.sd import CLIP, VAE, load_model_weights
from comfy.model_patcher import ModelPatcher
from comfy.utils import calculate_parameters
from folder_paths import models_dir as comfy_models_path
from sd_meh import merge_methods
from sd_meh.merge import merge_models
from sd_meh.presets import BLOCK_WEIGHTS_PRESETS
from sd_meh.utils import weights_and_bases
from tensordict import TensorDict
print(Path.cwd())
MODELS_DIR = Path(comfy_models_path, "checkpoints")
MERGE_METHODS = dict(inspect.getmembers(merge_methods, inspect.isfunction))
EXTRA_CONFIG_YAML_PATH = Path(".", "extra_model_paths.yaml")
if EXTRA_CONFIG_YAML_PATH.exists():
with EXTRA_CONFIG_YAML_PATH.open() as f:
EXTRA_CONFIG_YAML = yaml.safe_load(f)
else:
EXTRA_CONFIG_YAML = {}
def get_checkpoints():
checkpoints = []
for ext in ("ckpt", "safetensors"):
checkpoints.extend(list(MODELS_DIR.glob(f"**/*.{ext}")))
for c in EXTRA_CONFIG_YAML.values():
extra_checkpoints_dir = Path(
c["base_path"] if "base_path" in c else "", c["checkpoints"]
)
checkpoints.extend(list(extra_checkpoints_dir.glob(f"**/*.{ext}")))
return {c.stem: c for c in checkpoints}
def split_model(
sd,
output_clip=True,
output_vae=True,
):
clip = None
vae = None
model = None
clip_target = None
parameters = calculate_parameters(sd, "model.diffusion_model.")
fp16 = model_management.should_use_fp16(model_params=parameters)
class WeightsLoader(torch.nn.Module):
pass
model_config = model_detection.model_config_from_unet(
sd, "model.diffusion_model.", fp16
)
if model_config is None:
raise RuntimeError("ERROR: Could not detect model type")
offload_device = model_management.unet_offload_device()
model = model_config.get_model(sd, "model.diffusion_model.")
model = model.to(offload_device)
model.load_model_weights(sd, "model.diffusion_model.")
if output_vae:
vae = VAE()
w = WeightsLoader()
w.first_stage_model = vae.first_stage_model
load_model_weights(w, sd)
if output_clip:
w = WeightsLoader()
clip_target = model_config.clip_target()
clip = CLIP(clip_target, embedding_directory=None)
w.cond_stage_model = clip.cond_stage_model
sd = model_config.process_clip_state_dict(sd)
load_model_weights(w, sd)
left_over = sd.keys()
if len(left_over) > 0:
print("left over keys:", left_over)
return (
ModelPatcher(
model,
load_device=model_management.get_torch_device(),
offload_device=offload_device,
),
clip,
vae,
)
class MergingExecutionHelper:
ckpts = get_checkpoints()
@classmethod
def INPUT_TYPES(self):
required = {
"model_a": (list(self.ckpts.keys()), {"default": None}),
"model_b": (list(self.ckpts.keys()), {"default": None}),
"merge_mode": (list(MERGE_METHODS.keys()), {"default": "weighted_sum"}),
"base_alpha": (
"FLOAT",
{"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.1},
),
}
optional = {
"precision": ([16, 32], {"default": 16}),
"model_c": (["None"] + list(self.ckpts.keys()), {"default": None}),
"base_beta": (
"FLOAT",
{"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.1},
),
"weights_alpha": ("STRING", ""),
"weights_beta": ("STRING", ""),
"re_basin": (["disabled", "enabled"], {"default": "disabled"}),
"re_basin_iterations": (
"INT",
{"default": 10, "min": 0, "max": 1000, "step": 1},
),
"weights_clip": (["disabled", "enabled"], {"default": "disabled"}),
"device": (["cpu", "cuda"], {"default": "cpu"}),
"work_device": (["cpu", "cuda"], {"default": "cpu"}),
"threads": ("INT", {"default": 1, "min": 1, "step": 1}),
"prune": (["disabled", "enabled"], {"default": "disabled"}),
"block_weights_preset_alpha": (
["None"] + list(BLOCK_WEIGHTS_PRESETS.keys()),
{"default": None},
),
"presets_alpha_lambda": (
"FLOAT",
{"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.1},
),
"block_weights_preset_alpha_b": (
["None"] + list(BLOCK_WEIGHTS_PRESETS.keys()),
{"default": None},
),
"block_weights_preset_beta": (
["None"] + list(BLOCK_WEIGHTS_PRESETS.keys()),
{"default": None},
),
"presets_beta_lambda": (
"FLOAT",
{"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.1},
),
"block_weights_preset_beta_b": (
["None"] + list(BLOCK_WEIGHTS_PRESETS.keys()),
{"default": None},
),
}
self.optional_keys = list(optional.keys())
return {
"required": required,
"optional": optional,
}
RETURN_TYPES = ("MODEL", "CLIP", "VAE")
FUNCTION = "merge"
CATEGORY = "meh"
def merge(
self,
model_a,
model_b,
base_alpha,
merge_mode,
**kwargs,
):
for opt_k in self.optional_keys:
if opt_k not in kwargs:
kwargs[opt_k] = None
for k in ("re_basin", "weights_clip", "prune"):
kwargs[k] = kwargs[k] == "enabled"
for k in ("weights_alpha", "weights_beta"):
kwargs[k] = None if kwargs[k] == "" else kwargs[k]
for k in (
"model_c",
"block_weights_preset_alpha",
"block_weights_preset_alpha_b",
"block_weights_preset_beta",
"block_weights_preset_beta_b",
):
kwargs[k] = None if kwargs[k] == "None" else kwargs[k]
models = {
"model_a": self.ckpts[model_a],
"model_b": self.ckpts[model_b],
}
if kwargs["model_c"]:
models["model_c"] = self.ckpts[kwargs["model_c"]]
weights, bases = weights_and_bases(
merge_mode,
kwargs["weights_alpha"],
base_alpha,
kwargs["block_weights_preset_alpha"],
kwargs["weights_beta"],
kwargs["base_beta"],
kwargs["block_weights_preset_beta"],
kwargs["block_weights_preset_alpha_b"],
kwargs["block_weights_preset_beta_b"],
kwargs["presets_alpha_lambda"],
kwargs["presets_beta_lambda"],
)
merged = merge_models(
models,
weights,
bases,
merge_mode,
kwargs["precision"],
kwargs["weights_clip"],
kwargs["re_basin"],
kwargs["re_basin_iterations"],
kwargs["device"],
kwargs["work_device"],
kwargs["prune"],
kwargs["threads"],
)
if isinstance(merged, TensorDict):
return split_model(merged.to_dict())
return merged
NODE_CLASS_MAPPINGS = {
"MergingExecutionHelper": MergingExecutionHelper,
}
| s1dlx/comfy_meh | meh.py | meh.py | py | 7,524 | python | en | code | 13 | github-code | 36 | [
{
"api_name": "pathlib.Path.cwd",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "folder_paths.models_dir",
... |
8097764457 | import os
# use CPU only
os.environ["CUDA_VISIBLE_DEVICES"] = ""
import pickle
import argparse
import numpy as np
from math import ceil
from tqdm import tqdm
import tensorflow as tf
from shutil import rmtree
np.random.seed(1234)
def split(sequence, n):
""" divide sequence into n sub-sequence evenly """
k, m = divmod(len(sequence), n)
return [
sequence[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n)
]
def normalize(x, x_min, x_max):
""" scale x to be between 0 and 1 """
return (x - x_min) / (x_max - x_min)
def fft(signals):
""" Apply FFT over each neuron recordings """
real = np.zeros(signals.shape, dtype=np.float32)
imag = np.zeros(signals.shape, dtype=np.float32)
for b in tqdm(range(signals.shape[0])):
for n in range(signals.shape[-1]):
x = signals[b, :, n]
x = tf.signal.fft(x.astype(np.complex64))
x = x.numpy()
real[b, :, n], imag[b, :, n] = np.real(x), np.imag(x)
return np.concatenate([real, imag], axis=-1)
def calculate_num_per_shard(hparams):
"""
calculate the number of data per shard given sequence_length such that each
shard is target_size GB
"""
num_per_shard = ceil((120 / hparams.sequence_length) * 1100) * 10 # 1GB shard
if hparams.fft:
num_per_shard *= 2 / 3
return int(num_per_shard * hparams.target_shard_size)
def get_segments(hparams):
print('processing file {}...'.format(hparams.input))
assert hparams.stride >= 1
with open(hparams.input, 'rb') as file:
data = pickle.load(file)
raw_signals = np.array(data['signals'], dtype=np.float32)
raw_spikes = np.array(data['oasis'], dtype=np.float32)
# remove first two rows in signals
if not hparams.is_dg_data:
raw_signals = raw_signals[2:]
raw_spikes = raw_spikes[2:]
assert raw_signals.shape == raw_spikes.shape
# set signals and spikes to WC [sequence, num. neurons, ...]
raw_signals = np.swapaxes(raw_signals, 0, 1)
raw_spikes = np.swapaxes(raw_spikes, 0, 1)
hparams.num_neurons = raw_signals.shape[1]
hparams.num_channels = hparams.num_neurons
print('\nsegmentation with stride {}'.format(hparams.stride))
signals, spikes, i = [], [], 0
while i + hparams.sequence_length < raw_signals.shape[0]:
signals.append(raw_signals[i:i + hparams.sequence_length, ...])
spikes.append(raw_spikes[i:i + hparams.sequence_length, ...])
i += hparams.stride
signals = np.array(signals, dtype=np.float32)
spikes = np.array(spikes, dtype=np.float32)
if hparams.fft:
print('\napply fft')
signals = fft(signals)
hparams.num_channels = signals.shape[-1]
if hparams.conv2d:
print('\nconvert to 3D matrix')
if hparams.fft:
# convert matrix to [sequence, num. neurons, 2]
mid = signals.shape[-1] // 2
real = np.expand_dims(signals[..., :mid], axis=-1)
imag = np.expand_dims(signals[..., mid:], axis=-1)
signals = np.concatenate((real, imag), axis=-1)
else:
# convert matrix to [sequence, num. neurons, 1]
signals = np.expand_dims(signals, axis=-1)
hparams.num_channels = signals.shape[-1]
print('signals shape {}'.format(signals.shape))
print('\nsignals min {:.04f}, max {:.04f}, mean {:.04f}'.format(
np.min(signals), np.max(signals), np.mean(signals)))
# normalize signals to [0, 1]
hparams.signals_min = np.min(signals)
hparams.signals_max = np.max(signals)
if hparams.normalize:
print('\napply normalization')
signals = normalize(signals, hparams.signals_min, hparams.signals_max)
print('signals min {:.04f}, max {:.04f}, mean {:.04f}'.format(
np.min(signals), np.max(signals), np.mean(signals)))
print('\nsignals shape {}, spikes shape {}'.format(signals.shape,
spikes.shape))
return signals, spikes
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def serialize_example(signal, spike):
features = {
'signal': _bytes_feature(signal.tostring()),
'spike': _bytes_feature(spike.tostring())
}
example = tf.train.Example(features=tf.train.Features(feature=features))
return example.SerializeToString()
def get_record_filename(hparams, mode, shard_id, num_shards):
filename = '{}-{:03d}-of-{:03d}.record'.format(mode, shard_id + 1, num_shards)
return os.path.join(hparams.output_dir, filename)
def write_to_record(hparams, mode, shard, num_shards, signals, spikes, indexes):
record_filename = get_record_filename(hparams, mode, shard, num_shards)
print('writing {} segments to {}...'.format(len(indexes), record_filename))
with tf.io.TFRecordWriter(record_filename) as writer:
for i in indexes:
example = serialize_example(signals[i], spikes[i])
writer.write(example)
def write_to_records(hparams, mode, signals, spikes, indexes):
if not os.path.exists(hparams.output_dir):
os.makedirs(hparams.output_dir)
# calculate the number of records to create
num_shards = 1 if hparams.num_per_shard == 0 else ceil(
len(indexes) / hparams.num_per_shard)
print('writing {} segments to {} {} records...'.format(
len(indexes), num_shards, mode))
if mode == 'train':
hparams.num_train_shards = num_shards
else:
hparams.num_validation_shards = num_shards
sharded_indexes = split(indexes, num_shards)
for shard in range(num_shards):
write_to_record(
hparams,
mode=mode,
shard=shard,
num_shards=num_shards,
signals=signals,
spikes=spikes,
indexes=sharded_indexes[shard],
)
def main(hparams):
if not os.path.exists(hparams.input):
print('input file {} does not exists'.format(hparams.input))
exit()
if os.path.exists(hparams.output_dir):
if hparams.replace:
rmtree(hparams.output_dir)
else:
print('output directory {} already exists\n'.format(hparams.output_dir))
exit()
signals, spikes = get_segments(hparams)
# shuffle data
indexes = np.arange(len(signals))
np.random.shuffle(indexes)
hparams.train_size = len(signals) - hparams.validation_size
hparams.signal_shape = signals.shape[1:]
hparams.spike_shape = spikes.shape[1:]
hparams.num_per_shard = calculate_num_per_shard(hparams)
print('\n{} segments in each shard with target shard size {}'.format(
hparams.num_per_shard, hparams.target_shard_size))
write_to_records(
hparams,
mode='train',
signals=signals,
spikes=spikes,
indexes=indexes[:hparams.train_size])
write_to_records(
hparams,
mode='validation',
signals=signals,
spikes=spikes,
indexes=indexes[hparams.train_size:])
# save information of the dataset
with open(os.path.join(hparams.output_dir, 'info.pkl'), 'wb') as file:
info = {
'train_size': hparams.train_size,
'validation_size': hparams.validation_size,
'signal_shape': hparams.signal_shape,
'spike_shape': hparams.spike_shape,
'sequence_length': hparams.sequence_length,
'num_neurons': hparams.num_neurons,
'num_channels': hparams.num_channels,
'num_train_shards': hparams.num_train_shards,
'num_validation_shards': hparams.num_validation_shards,
'buffer_size': min(hparams.num_per_shard, hparams.train_size),
'normalize': hparams.normalize,
'stride': hparams.stride,
'fft': hparams.fft,
'conv2d': hparams.conv2d,
}
if hparams.normalize:
info['signals_min'] = hparams.signals_min
info['signals_max'] = hparams.signals_max
pickle.dump(info, file)
print('saved {} TFRecords to {}'.format(
hparams.num_train_shards + hparams.num_validation_shards,
hparams.output_dir))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--input', default='raw_data/ST260_Day4_signals4Bryan.pkl', type=str)
parser.add_argument('--output_dir', default='tfrecords', type=str)
parser.add_argument('--sequence_length', default=2048, type=int)
parser.add_argument('--stride', default=2, type=int)
parser.add_argument('--normalize', action='store_true')
parser.add_argument('--fft', action='store_true')
parser.add_argument('--conv2d', action='store_true')
parser.add_argument('--replace', action='store_true')
parser.add_argument('--validation_size', default=1000, type=float)
parser.add_argument('--is_dg_data', action='store_true')
parser.add_argument(
'--target_shard_size',
default=0.5,
type=float,
help='target size in GB for each TFRecord file.')
hparams = parser.parse_args()
main(hparams)
| bryanlimy/CalciumGAN | dataset/generate_tfrecords.py | generate_tfrecords.py | py | 8,641 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
... |
70943334823 | import time
import smbus
from datetime import datetime, timedelta
import arrow # local/utc conversions
# set I2c bus addresses of clock module and non-volatile ram
DS3231ADDR = 0x68 #known versions of DS3231 use 0x68
AT24C32ADDR = 0x57 #older boards use 0x56
I2C_PORT = 1 #valid ports are 0 and 1
def _bcd_to_int(bcd):
"""
Decode a 2x4bit BCD to a integer.
"""
out = 0
for digit in (bcd >> 4, bcd):
for value in (1, 2, 4, 8):
if digit & 1:
out += value
digit >>= 1
out *= 10
return out / 10
def _int_to_bcd(number):
"""
Encode a one or two digits number to the BCD format.
"""
bcd = 0
for idx in (number // 10, number % 10):
for value in (8, 4, 2, 1):
if idx >= value:
bcd += 1
idx -= value
bcd <<= 1
return bcd >> 1
def _set_bit(value,index,state):
"""
Set bit given by index (zero-based) in value to state and return the result
"""
mask = 1 << index
value &= ~mask
if state:
value |= mask
return value
def _local2utc(dtime):
"""
Convert a naive datetime-object in local-time to UTC
"""
a = arrow.get(dtime,'local')
return a.to('utc').naive
def _utc2local(dtime):
"""
Convert a naive datetime-object in UTC to local-time
"""
a = arrow.get(dtime,'utc')
return a.to('local').naive
class ds3231(object):
"""
Define the methods needed to read and update the real-time-clock module.
"""
_SECONDS_REGISTER = 0x00
_MINUTES_REGISTER = 0x01
_HOURS_REGISTER = 0x02
_DAY_OF_WEEK_REGISTER = 0x03
_DAY_OF_MONTH_REGISTER = 0x04
_MONTH_REGISTER = 0x05
_YEAR_REGISTER = 0x06
_ALARM1_OFFSET = 0x07
_ALARM1_SEC_REGISTER = 0x07
_ALARM1_MIN_REGISTER = 0x08
_ALARM1_HOUR_REGISTER = 0x09
_ALARM1_DATE_REGISTER = 0x0A
_ALARM2_OFFSET = 0x0B
_ALARM2_MIN_REGISTER = 0x0B
_ALARM2_HOUR_REGISTER = 0x0C
_ALARM2_DATE_REGISTER = 0x0D
_CONTROL_REGISTER = 0x0E
_STATUS_REGISTER = 0x0F
_AGING_REGISTER = 0x10 # unused
_TEMP_MSB_REGISTER = 0x11
_TEMP_LSB_REGISTER = 0x12
def __init__(self, port=I2C_PORT, utc=True,addr=DS3231ADDR, at24c32_addr=AT24C32ADDR):
"""
???
"""
self._bus = smbus.SMBus(port) #valid ports are 0 and 1
self._utc = utc
self._addr = addr
self._at24c32_addr = at24c32_addr
###########################
# DS3231 real time clock functions
###########################
def _write(self, register, data):
"""
???
"""
self._bus.write_byte_data(self._addr, register, data)
def _read(self, data):
"""
???
"""
return self._bus.read_byte_data(self._addr, data)
def _read_seconds(self):
"""
???
"""
return _bcd_to_int(self._read(self._SECONDS_REGISTER) & 0x7F) # wipe out the oscillator on bit
def _read_minutes(self):
"""
???
"""
return _bcd_to_int(self._read(self._MINUTES_REGISTER))
def _read_hours(self):
"""
???
"""
tmp = self._read(self._HOURS_REGISTER)
if tmp == 0x64:
tmp = 0x40
return _bcd_to_int(tmp & 0x3F)
def _read_day(self):
"""
???
"""
return _bcd_to_int(self._read(self._DAY_OF_WEEK_REGISTER))
def _read_date(self):
"""
???
"""
return _bcd_to_int(self._read(self._DAY_OF_MONTH_REGISTER))
def _read_month(self):
"""
???
"""
return _bcd_to_int(self._read(self._MONTH_REGISTER) & 0x1F)
def _read_year(self):
"""
???
"""
return _bcd_to_int(self._read(self._YEAR_REGISTER))
def read_all(self):
"""
Return a tuple such as (year, month, daynum, dayname, hours, minutes, seconds).
"""
return (self._read_year(), self._read_month(), self._read_date(),
self._read_day(), self._read_hours(), self._read_minutes(),
self._read_seconds())
def read_str(self):
"""
Return a string such as 'YY-DD-MMTHH-MM-SS'.
"""
return '%02d-%02d-%02dT%02d:%02d:%02d' % (self._read_year(),
self._read_month(), self._read_date(), self._read_hours(),
self._read_minutes(), self._read_seconds())
def read_datetime(self):
"""
Return the datetime.datetime object.
"""
dtime = datetime(2000 + self._read_year(),
self._read_month(), self._read_date(), self._read_hours(),
self._read_minutes(), self._read_seconds(), 0)
if (self._utc):
return _utc2local(dtime)
else:
return dtime
def write_all(self, seconds=None, minutes=None, hours=None, day_of_week=None,
day_of_month=None, month=None, year=None):
"""
Direct write each user specified value.
Range: seconds [0,59], minutes [0,59], hours [0,23],
day_of_week [0,7], day_of_month [1-31], month [1-12], year [0-99].
"""
if seconds is not None:
if seconds < 0 or seconds > 59:
raise ValueError('Seconds is out of range [0,59].')
seconds_reg = _int_to_bcd(seconds)
self._write(self._SECONDS_REGISTER, seconds_reg)
if minutes is not None:
if minutes < 0 or minutes > 59:
raise ValueError('Minutes is out of range [0,59].')
self._write(self._MINUTES_REGISTER, _int_to_bcd(minutes))
if hours is not None:
if hours < 0 or hours > 23:
raise ValueError('Hours is out of range [0,23].')
self._write(self._HOURS_REGISTER, _int_to_bcd(hours)) # not | 0x40 according to datasheet
if year is not None:
if year < 0 or year > 99:
raise ValueError('Years is out of range [0, 99].')
self._write(self._YEAR_REGISTER, _int_to_bcd(year))
if month is not None:
if month < 1 or month > 12:
raise ValueError('Month is out of range [1, 12].')
self._write(self._MONTH_REGISTER, _int_to_bcd(month))
if day_of_month is not None:
if day_of_month < 1 or day_of_month > 31:
raise ValueError('Day_of_month is out of range [1, 31].')
self._write(self._DAY_OF_MONTH_REGISTER, _int_to_bcd(day_of_month))
if day_of_week is not None:
if day_of_week < 1 or day_of_week > 7:
raise ValueError('Day_of_week is out of range [1, 7].')
self._write(self._DAY_OF_WEEK_REGISTER, _int_to_bcd(day_of_week))
def write_datetime(self, dtime):
"""
Write from a datetime.datetime object.
"""
if(self._utc):
dtime = _local2utc(dtime)
self.write_all(dtime.second, dtime.minute, dtime.hour,
dtime.isoweekday(), dtime.day, dtime.month, dtime.year % 100)
def write_system_datetime_now(self):
"""
shortcut version of "DS3231.write_datetime(datetime.datetime.now())".
"""
self.write_datetime(datetime.now())
#######################################################################
# SDL_DS3231 alarm handling. Recurring alarms are currently unsupported.
########################################################################
def set_alarm_time(self,alarm,dtime):
"""
Set alarm to given time-point. Note: although this method has a
full datetime-value as input, only the values of day-of-month,
hours, minutes and seconds (only alarm1) are used
"""
if (self._utc):
dtime = _local2utc(dtime)
if alarm == 1:
self._write(self._ALARM1_SEC_REGISTER, _int_to_bcd(dtime.second))
self._write(self._ALARM1_MIN_REGISTER, _int_to_bcd(dtime.minute))
self._write(self._ALARM1_HOUR_REGISTER, _int_to_bcd(dtime.hour))
self._write(self._ALARM1_DATE_REGISTER, _int_to_bcd(dtime.day))
else:
self._write(self._ALARM2_MIN_REGISTER, _int_to_bcd(dtime.minute))
self._write(self._ALARM2_HOUR_REGISTER, _int_to_bcd(dtime.hour))
self._write(self._ALARM2_DATE_REGISTER, _int_to_bcd(dtime.day))
def get_alarm_time(self,alarm,convert=True):
"""
Query the given alarm and construct a valid datetime-object or
a tuple (day-of-month/day-of-week,hour,min,sec) depending on the
convert flag.
"""
# seconds
is_interval = False
if alarm == 1:
buffer = self._read(self._ALARM1_SEC_REGISTER)
if buffer & 0x80:
# we fire every second
if not convert:
return (None,None,None,None,None)
else:
return datetime.now() # not very sensible
sec = _bcd_to_int(buffer & 0x7F)
offset = self._ALARM1_OFFSET + 1
else:
sec = 0
offset = self._ALARM2_OFFSET
# minutes
buffer = self._read(offset)
if buffer & 0x80:
# alarm when seconds match
if not convert:
return (None,None,None,None,sec)
else:
return self._next_dt_match(alarm,None,None,None,None,sec)
min = _bcd_to_int(buffer & 0x7F)
# hour
offset = offset + 1
buffer = self._read(offset)
if buffer & 0x80:
# alarm when minutes match
if not convert:
return (None,None,None,min,sec)
else:
return self._next_dt_match(alarm,None,None,None,min,sec)
hour = _bcd_to_int(buffer & 0x7F)
# day-in-month/day-of-week
offset = offset + 1
buffer = self._read(offset)
if buffer & 0x80:
# alarm when hour match
if not convert:
return (None,None,hour,min,sec)
else:
return self._next_dt_match(alarm,None,None,hour,min,sec)
elif buffer & 0x40:
# DY/DT (bit 6) is 1
weekday = _bcd_to_int(buffer & 0x3F)
day = None
else:
weekday = None
day = _bcd_to_int(buffer & 0x3F)
if not convert:
return (day,weekday,hour,min,sec)
else:
return self._next_dt_match(alarm,day,weekday,hour,min,sec)
def _next_dt_match(self,alarm,day,weekday,hour,min,sec):
"""
Calculate the next alarm datetime (in case alarm did not fire yet)
or the last datetime if the alarm fired.
The former is exact while the latter is just a best guess - the
alarm could already have fired way in the past.
"""
if (self._utc):
now = datetime.utcnow()
else:
now = datetime.now()
# convert weekday to day of month
if not weekday == None:
now = now + timedelta((weekday - now.weekday()+7) % 7)
day = now.day
year = now.year
month = now.month
enabled,fired = self.get_alarm_state(alarm)
# first try: assume alarm is in the curren month
# we try to create a valid datetime object
try:
alarm_dtime = datetime(year,month,day,hour,min,sec)
except ValueError:
# day-of-month might not be valid for current month!
# no year roll-over necessary, since this won't happen
# for December or January
if fired:
month = month - 1 # alarm must have been in the past
else:
month = month + 1 # alarm date is in the future
alarm_dtime = datetime(year,month,day,hour,min,sec)
# check if first alarm-datetime is correct
# this depends on the state of the alarm
if now > alarm_dtime and not fired:
# alarm did not fire yet, but must be in the future
month = month + 1
if month > 12:
month = 1
year = year + 1
elif now < alarm_dtime and fired:
# alarm fired, but must be in the past
month = month - 1
if month == 0:
month = 12
year = year - 1
else:
# proper alignment of now and alarm_dtime, so return it
if (self._utc):
return _utc2local(alarm_dtime)
else:
return alarm_dtime
# second try: we moved the alarm one month, but the day-of-month
# might not be valid for the new month
try:
alarm_dtime = datetime(year,month,day,hour,min,sec)
except ValueError:
# again, no year roll-over necessary
if fired:
month = month - 1 # alarm is in the past, go back in time
else:
month = month + 1 # alarm waiting, i.e. in the future
alarm_dtime = datetime(year,month,day,hour,min,sec)
if (self._utc):
return _utc2local(alarm_dtime)
else:
return alarm_dtime
def get_alarm_state(self,alarm):
"""
Query if the state of the alarm. Returns a tuple (enabled,fired)
of two booleans.
"""
control = self._read(self._CONTROL_REGISTER)
status = self._read(self._STATUS_REGISTER)
return (bool(control & alarm),bool(status & alarm))
def clear_alarm(self,alarm):
"""
Clear the given alarm (set A1F or A2F in the status-register to zero)
"""
status = self._read(self._STATUS_REGISTER)
status &= ~alarm
self._write(self._STATUS_REGISTER,status)
def set_alarm(self,alarm,state):
"""
Set the given alarm-flag A1IE or A2IE in the control-register to the
desired state (0 or 1)
"""
control = self._read(self._CONTROL_REGISTER)
control = _set_bit(control,alarm-1,state)
self._write(self._CONTROL_REGISTER,control)
def dump_value(self,value):
"""
Dump a value as hex and binary string
"""
return "0x{0:02X} 0b{0:08b}".format(value,value)
def dump_register(self,reg):
"""
Read and return a raw register as binary string
"""
return self.dump_value(self._read(reg))
###########################
# SDL_DS3231 module onboard temperature sensor
###########################
def get_temp(self):
"""
???
"""
byte_tmsb = self._bus.read_byte_data(self._addr, self._TEMP_MSB_REGISTER)
byte_tlsb = bin(self._bus.read_byte_data(self._addr,
self._TEMP_LSB_REGISTER))[2:].zfill(8)
return byte_tmsb + int(byte_tlsb[0]) * 2 ** (-1) + int(byte_tlsb[1]) * 2 ** (-2)
###########################
# AT24C32 non-volatile ram Code
###########################
def set_current_at24c32_address(self, address):
"""
???
"""
addr1 = address / 256
addr0 = address % 256
self._bus.write_i2c_block_data(self._at24c32_addr, addr1, [addr0])
def read_at24c32_byte(self, address):
"""
???
"""
self.set_current_at24c32_address(address)
return self._bus.read_byte(self._at24c32_addr)
def write_at24c32_byte(self, address, value):
"""
???
"""
addr1 = address / 256
addr0 = address % 256
self._bus.write_i2c_block_data(self._at24c32_addr, addr1, [addr0, value])
time.sleep(0.20)
| bablokb/pi-wake-on-rtc | files/usr/local/sbin/ds3231.py | ds3231.py | py | 15,996 | python | en | code | 37 | github-code | 36 | [
{
"api_name": "arrow.get",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "arrow.get",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "smbus.SMBus",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number"... |
21663810901 | #!/usr/bin/env python3
import random, argparse
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--rows", help="how many rows defaults to 2")
parser.add_argument("-c", "--cols", help="how many cols defaults to 2")
args = parser.parse_args()
if args.rows:
try:
rows = int(args.rows)
except:
print("Arguments must be integer... using 2 as default rows\n")
rows = 2
else:
rows = 2
if args.cols:
try:
cols = int(args.cols)
except:
print("Arguments must be integers... using 2 as default cols\n")
cols = 2
else:
cols = 2
name_ls = ["Nate", "Ben", "Laurent", "Gabe", "Jessy", "Imran", "Dionte", "Sakib"]
random.shuffle(name_ls)
string = "FRONT OF CLASS"
#print(f"{string:^50}")
#longest_name_len = max(len(x) for x in name_ls)
for i in range(cols):
for j in range(rows):
if(name_ls):
# 2 per table so pop both names from list then put in front left.
name1 = name_ls.pop(0)
name2 = name_ls.pop(0)
#table = f"{name1:^{longest_name_len}} {name2:^{longest_name_len}} "
#can't use thing above because it doesn't like text formatting that way
#11 and 7 just got from experimenting and playing around with names
table = f"{name1:>11} {name2:<7}"
#print(table, end=" ")
#print("\n")
print("RIP SEATING CHART \nYou guys make me sad")
| nathankurt/random-seat-teams-bot | random_seats.py | random_seats.py | py | 1,528 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 30,
"usage_type": "call"
}
] |
23677049937 | import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
#import matplotlib as plt
import matplotlib.pyplot as plt
from pyFTS.common import Util
from pyFTS.benchmarks import Measures
from pyFTS.partitioners import Grid, Entropy
from pyFTS.models import hofts
from pyFTS.common import Membership
dataset = pd.read_csv('https://query.data.world/s/2bgegjggydd3venttp3zlosh3wpjqj', sep=';')
dataset['data'] = pd.to_datetime(dataset["data"], format='%Y-%m-%d %H:%M:%S')
train_uv = dataset['glo_avg'].values[:24505]
test_uv = dataset['glo_avg'].values[24505:]
train_mv = dataset.iloc[:24505]
test_mv = dataset.iloc[24505:]
dataset.head()
models = []
fig, ax = plt.subplots(nrows=2, ncols=1,figsize=[20,5])
ax[0].plot(train_uv[:240])
ax[1].plot(train_uv)
from statsmodels.tsa.stattools import acf
fig, ax = plt.subplots(nrows=1, ncols=1,figsize=[15,5])
ax.plot(acf(train_uv,nlags=48))
ax.set_title("Autocorrelation")
ax.set_ylabel("ACF")
ax.set_xlabel("LAG")
from itertools import product
levels = ['VL','L','M','H','VH']
sublevels = [str(k) for k in np.arange(0,7)]
names = []
for combination in product(*[levels, sublevels]):
names.append(combination[0]+combination[1])
print(names)
fig, ax = plt.subplots(nrows=1, ncols=1,figsize=[15,3])
part = Grid.GridPartitioner(data=train_uv,npart=35, names=names)
part.plot(ax)
| minhazul-alam/Fuzzy_Systems | FuzzyPracticeCodes/solarfts.py | solarfts.py | py | 1,382 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.... |
1411699424 | import os
from vk import VK
from ya_disk import YaDisk
import json
from datetime import datetime
from tqdm import tqdm
from dotenv import load_dotenv
def main():
vk_user_id = input('Enter VK user ID (only digits): ')
num_photos = int(input('Enter the number of photos to save (default is 5): ') or 5)
load_dotenv()
token_ya = os.getenv('ya_token')
token_vk = os.getenv('vk_token')
try:
vk = VK(token_vk)
photos = vk.get_photos(vk_user_id, count=num_photos)
result = []
ya_disk = YaDisk(token_ya)
folder_name = f'VK_Photos_{vk_user_id}'
ya_disk.create_folder(folder_name)
for photo in tqdm(photos, desc='Uploading photos', unit='photo', ncols=80):
likes = photo['likes']['count']
date = datetime.fromtimestamp(photo['date']).strftime('%Y-%m-%d')
file_name = f'{likes}_{date}.jpg'
photo_url = photo['sizes'][-1]['url']
if ya_disk.upload_photo(folder_name, file_name, photo_url):
result.append({
'file_name': file_name,
'size': 'z',
})
with open('result.json', 'w') as json_file:
json.dump(result, json_file, ensure_ascii=False, indent=4)
print(f'The photos has been successfully uploaded to Yandex.Disk.')
except Exception as e:
print(f'An error occurred. {str(e)}')
if __name__ == '__main__':
main()
| kanadass/photo_backup_cw | main.py | main.py | py | 1,466 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "vk.VK",
"line_number": 19,... |
21753084092 | # -*- coding: utf-8 -*-
import os
import tensorflow as tf
from PIL import Image
writer = tf.python_io.TFRecordWriter("train.tfrecords")
images_path = "./snow/"
classes = {'snow'}
for index, name in enumerate(classes):
for img_name in os.listdir(images_path):
img_path = images_path + img_name
img = Image.open(img_path)
img = img.resize((224, 224))
img_raw = img.tobytes()
example = tf.train.Example(features=tf.train.Features(feature={
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[index])),
'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw]))
}))
writer.write(example.SerializeToString())
writer.close()
for serialized_example in tf.python_io.tf_record_iterator("train.tfrecords"):
example = tf.train.Example()
example.ParseFromString(serialized_example)
image = example.features.feature['image'].bytes_list.value
label = example.features.feature['label'].int64_list.value
print("image is : %s" % image)
print("label is : %s" % label) | crayhuang/TGOTPeopleRecognition | ImageHandler.py | ImageHandler.py | py | 1,094 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.python_io.TFRecordWriter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "tensorflow.python_io",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 14,
"usage_type": "call"
},
{
"api_name":... |
72052973863 | from starlette.applications import Starlette
from starlette.responses import JSONResponse
from starlette.routing import Route
import uvicorn
async def homepage(request):
return JSONResponse({'hello': 'world'})
async def empty_route(request):
return JSONResponse({'new route': 'this route does nothing'})
async def contact(request):
contact_page = """
<html>
<head>Contact Us</head>
<body>
<p>send an email to: example@hotmail.com</p>
</body>
</html>
"""
return HTMLResponse(content=contact_page)
app = Starlette(debug=True, routes=[
Route('/', homepage),
Route('/empty', empty_route),
Route('/contact', contact)
])
if __name__ == "__main__":
uvicorn.run(app, host='0.0.0.0', port=8000)
| MuelGoh/starlette_project | main.py | main.py | py | 793 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "starlette.responses.JSONResponse",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "starlette.responses.JSONResponse",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "starlette.applications.Starlette",
"line_number": 27,
"usage_type": "cal... |
8674468084 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2022/10/25 15:03
# @Author : Hanchiao
# @File : antColony.py
import numpy as np
from enum import Enum
from itertools import count
import statistics
import matplotlib.pyplot as plt
import sys
import time
def _custom_create(custom_generate, pool):
initial_loc, accessible = custom_generate()
return Solution([initial_loc], pool, _access=accessible)
def _custom_route(ant, selectMap, custom_routing, get_fitness):
_pool = ant.Pool
_route = ant.Route[:]
_access = ant.Access[:]
while _access:
_route, _access = custom_routing(_route, _access, selectMap.prob_query(_route[-1], _access))
# print(_route)
return Solution(_route, _pool, fitness=get_fitness(_route))
def get_best(items_num, opt_fitness,
get_fitness, display,
custom_generate=None, custom_move=None,
forget_rate=0.5, alpha=1, beta=1, poolSize=10,
generation=None):
historical_best_fitness = []
generation_mean_fitness = []
select_map = SelectProbMap(items_num, get_fitness, _forget=forget_rate, _alpha=alpha, _beta=beta)
if custom_generate is None:
def fnCreate(pool):
pass
else:
def fnCreate(pool):
return _custom_create(custom_generate, pool)
if custom_move is None:
def fnMove(candidate):
pass
else:
def fnMove(candidate):
return _custom_route(candidate, select_map, custom_move, get_fitness)
strategyLookup = {
Strategies.Create: lambda pool=1: fnCreate(pool),
Strategies.Explore: lambda candidate: fnMove(candidate)
}
def _get_improvement(strategyLookup, poolSize, iteration=None):
pool = [strategyLookup[Strategies.Create](i) for i in range(poolSize)] # 蚁群初始化
_generation = count(0)
routes = [strategyLookup[Strategies.Explore](pool[pool_index]) for pool_index in range(poolSize)] # 探索一条完整解
best = max(routes)
generation_mean_fitness.append(best.Fitness.eval())
historical_best_fitness.append(best.Fitness.eval())
yield best
while True:
generation_num = next(_generation)
if iteration is not None:
if generation_num > iteration - 1:
break
routes = [strategyLookup[Strategies.Explore](pool[pool_index]) for pool_index in range(poolSize)]
local_best = max(routes)
if best < local_best:
best = local_best
yield best
_w = [routes[i].Fitness.eval() for i in range(poolSize)]
generation_mean_fitness.append(np.sum(_w) / poolSize)
historical_best_fitness.append(best.Fitness.eval())
select_map.update(routes, best)
# print("{}".format(select_map))
improvement = None
for improvement in _get_improvement(strategyLookup, poolSize, generation):
display(improvement)
if not opt_fitness > improvement.Fitness:
historical_best_fitness.append(improvement.Fitness.eval())
generation_mean_fitness.append(generation_mean_fitness[-1])
break
return improvement, generation_mean_fitness, historical_best_fitness
class Solution:
def __init__(self, route, _pool, fitness=None, _access=None):
self.Route = route[:]
self.Fitness = fitness
self.Pool = _pool
self.Access = _access
def __gt__(self, other):
return self.Fitness > other.Fitness
class SelectProbMap:
def __init__(self, _size, get_fitness, _forget, _alpha, _beta):
self.Pheromone = np.ones([_size, _size])
self.Size = _size
self.Forget = _forget
self.Alpha = _alpha
self.Beta = _beta
self.fnGetFitness = get_fitness
self.Probability = np.zeros([_size, _size])
for _i in range(_size):
for _j in range(_size):
self.Probability[_i, _j] = 1
def prob_query(self, _i, _access):
_sum = sum(self.Probability[_i, _access])
probability = self.Probability[_i, _access] / _sum
return probability
def update(self, _routes, _best):
for _i in range(self.Size):
for _j in range(self.Size):
if self.Pheromone[_i, _j] > 1e-20:
self.Pheromone[_i, _j] *= self.Forget # 信息素衰减
# for _r in _routes:
# _fit = self.fnGetFitness(_r.Route)
# for _i in range(len(_r.Route)-1):
# self.Pheromone[_r.Route[_i], _r.Route[_i+1]] += _fit.info()*1e-8
# self.Pheromone[_r.Route[_i+1], _r.Route[_i]] = self.Pheromone[_r.Route[_i], _r.Route[_i+1]]
best_fit = self.fnGetFitness(_best.Route) # 最优路径信息素增强
for _i in range(len(_best.Route)-1):
self.Pheromone[_best.Route[_i], _best.Route[_i+1]] += best_fit.info()
self.Pheromone[_best.Route[_i+1], _best.Route[_i]] = self.Pheromone[_best.Route[_i], _best.Route[_i+1]]
for _i in range(self.Size): # 概率图更新
for _j in range(_i):
_fit = self.fnGetFitness([_j])
_tmp = np.power(self.Pheromone[_i, _j], self.Alpha) * np.power(_fit.eval(), self.Beta)
self.Probability[_i, _j] = _tmp
self.Probability[_j, _i] = _tmp
def __str__(self):
return "Pheromone: {}".format(self.Pheromone)
class Strategies(Enum):
Create = 0,
Explore = 1
class Benchmark:
@staticmethod
def run(function, visualization=False):
timings = []
optimal_cost = []
stdout = sys.stdout
# print("\t{:3}\t{}\t{}".format("No.", "Mean", "Stdev"))
# pool_size = [0, 5, 15, 50]
# color = ['salmon', 'sandybrown', 'greenyellow', 'darkturquoise']
# for i, value in enumerate(neighbor_range):
for i in range(1):
startTime = time.time()
# sys.stdout = None # avoid the output to be chatty
best, generation_mean_fitness, historical_best_fitness = function()
seconds = time.time() - startTime
optimal_cost.append(best.Fitness.eval())
sys.stdout = stdout
if visualization:
fig = plt.figure()
ax_1 = fig.add_subplot(111)
# ax_1.set_aspect(1)
ax_1.set(ylabel='Collecting value')
plt.grid(linestyle='--', linewidth=1, alpha=0.3)
# ax_2 = fig.add_subplot(212)
# # ax_2.set_aspect(1.2)
# ax_2.set(xlabel='No. of generation', ylabel='Best so far')
plt.grid(linestyle='--', linewidth=1, alpha=0.3)
# fig.tight_layout()
fig.suptitle('0-1 Bag Problem: Ant Colony Optimization', fontweight="bold")
x_axis = len(generation_mean_fitness)
x_axis = list(range(x_axis))
ax_1.plot(x_axis, generation_mean_fitness, color='b', label="mean value", ls='-')
ax_1.plot(x_axis, historical_best_fitness, color='g', label="best so far", ls='-')
# ax_1.plot(x_axis, generation_mean_fitness, color=color[i], label='[{}, {}]'.format(value[0], value[1]), ls='-')
# ax_2.plot(x_axis, historical_best_fitness, color=color[i], label='[{}, {}]'.format(value[0], value[1]), ls='-')
timings.append(seconds)
mean_time = statistics.mean(timings)
mean_cost = statistics.mean(optimal_cost)
print("Time Consuming:\t{}\tOptimal Costs:\t{}".format(timings[i], optimal_cost[i]))
# only display statistics for the first ten runs and then every 10th run after that.
if i < 10 or i % 10 == 9:
print("\t{:3}\tMean Time Consuming: {:<3.2f}\tStandard Deviation {:<3.2f}".format(
1 + i, mean_time,
statistics.stdev(timings, mean_time)
if i > 1 else 0))
print("\t{:3}\tMean Traveling Cost: {:<3.2f}\tStandard Deviation {:<3.2f}".format(
1 + i, mean_cost,
statistics.stdev(optimal_cost, mean_cost)
if i > 1 else 0))
if visualization:
ax_1.legend(loc='lower right')
# ax_2.legend(loc='upper right')
fig.savefig('../../fig/[tmp-ACO]0-1 Bag Problem.pdf')
| Timber-Ye/intl-opt | AntColony/knapsack-problem/antColony.py | antColony.py | py | 8,503 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "itertools.count",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number"... |
9626703877 | import json
import re
import os
import time
import logging
from collections import namedtuple
from paddle.fluid import core
import paddle.fluid as fluid
import numpy as np
Doc = namedtuple("Document", ("id", "title", "content"))
SegDoc = namedtuple("Seg_Document",("id", "title", "content", "seg_content"))
def read_origin_data(filename, *, limit=None):
"""
read the origin work from multi-lines json
return the list of json with length of limit
:param filename: the json filename
:param limit: only read the limit number of lines of work(if it's None type return )
:return:
"""
papers = []
with open(filename, 'r', encoding='utf-8') as f:
for id ,line in enumerate(f.readlines()):
dic = json.loads(line)
papers.append(dic)
if limit is not None and id > limit-2:
break
return papers
def dict_doc(documents)->dict:
"""
transfer the origin document list into document dictionary
:param documents: the list of document
:return:
"""
doc_dict = {}
for doc in documents:
doc_id = doc.get('id')
sentences = doc.get('sentences')
title = sentences[0]
content = "".join(sentences[1:])
doc_dict[doc_id] = Doc(doc_id, title, content)
return doc_dict
def pre_text(text: str) -> str:
"""
do some dirty work for the string
1. delete the punctuation
2. sub the number into <NUM>
:param text:
:return:
"""
punc = re.compile(
r',|/|:|;|:|\'|`|\[|\]|<|>|\?|:|"|\{|\}|\~|!|@|#|\$|%|\^|&|\(|\)|-|=|\_|\+|,|、|‘|’|【|】|·|!|”|“| |…|(|)|」|「|《|》|。|,|\.|。|;|;|\+',
re.S)
res = punc.sub(" ", text)
# num = re.compile("\d+")
# res = num.sub("<NUM>", res)
return res
def time_clock(func):
def compute_time_clock(*args, **kwargs):
start = time.time()
res = func(*args, **kwargs)
end = time.time()
print("Implement function %s, using %.2f s"%(func.__name__ ,end-start))
return res
return compute_time_clock
@time_clock
def hello_word():
print("Hello world")
time.sleep(1)
return 0
def get_programname(file):
prgram_filename = os.path.split(file)[1]
return os.path.splitext(prgram_filename)[0]
def read_json(filename):
with open(filename, 'r', encoding='utf-8') as f:
data = json.load(f)
return data
def normal_leven(str1, str2):
len_str1 = len(str1) + 1
len_str2 = len(str2) + 1
# create matrix
matrix = [0 for n in range(len_str1 * len_str2)]
# init x axis
for i in range(len_str1):
matrix[i] = i
# init y axis
for j in range(0, len(matrix), len_str1):
if j % len_str1 == 0:
matrix[j] = j // len_str1
for i in range(1, len_str1):
for j in range(1, len_str2):
if str1[i - 1] == str2[j - 1]:
cost = 0
else:
cost = 1
matrix[j * len_str1 + i] = min(matrix[(j - 1) * len_str1 + i] + 1,
matrix[j * len_str1 + (i - 1)] + 1,
matrix[(j - 1) * len_str1 + (i - 1)] + cost)
return matrix[-1]
def time_clock(func):
def compute_time_clock(*args, **kwargs):
start = time.time()
res = func(*args, **kwargs)
end = time.time()
print("Implement function %s, using %.2f s"%(func.__name__ ,end-start))
return res
return compute_time_clock
@time_clock
def hello_word():
print("Hello world")
time.sleep(1)
return 0
def set_base(file,keyword='result'):
"""
:param file:
:param keyword:
:return:
"""
base_flle = get_programname(file)
base_dir = os.path.join(".", keyword, base_flle)
if not os.path.exists(base_dir):
os.mkdir(base_dir)
return base_dir
def json_saver(obj, filename):
with open(filename, 'w', encoding='utf-8') as f:
json.dump(obj, f)
@time_clock
def read_train_data(filename) :
"""
read the train.txt and get the list of all work
:param filename:
:return:
"""
corpus = [] # 存储所有对话的列表
with open(filename, 'r', encoding='utf-8') as f:
data = list() # 存储一个对话的列表
for line in f:
if line == '\n':
corpus.append(data)
data = list()
continue
raw_data = line.strip().split('\t')
processed_data = []
for idx, item in enumerate(raw_data):
try:
temp = eval(item)
except:
temp = item
finally:
if idx == 2 and type(temp)==int:
temp = str(temp)
processed_data.append(temp)
data.append(processed_data)
return corpus
def log(filename= "test.log",level=logging.DEBUG):
#创建logger,如果参数为空则返回root logger
logger = logging.getLogger("encocder pretrain")
logger.setLevel(level) #设置logger日志等级
#这里进行判断,如果logger.handlers列表为空,则添加,否则,直接去写日志
if not logger.handlers:
#创建handler
fh = logging.FileHandler(filename,encoding="utf-8")
ch = logging.StreamHandler()
#设置输出日志格式
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
DATE_FORMAT = "%m/%d/%Y %H:%M:%S %p"
logger.propagate = False
formatter = logging.Formatter(
fmt=LOG_FORMAT,
datefmt=DATE_FORMAT
)
#为handler指定输出格式
fh.setFormatter(formatter)
#为logger添加的日志处理器
logger.addHandler(fh)
return logger #直接返回logger
class Clock():
def __init__(self):
time.time()
def save_model(filename, param_name_list, opt_var_name_list, name=''):
save_model_file = os.path.join(filename, name + "model_stage_0")
save_opt_state_file = os.path.join(filename, name + "opt_state_stage_0")
model_stage_0 = {}
for name in param_name_list:
t = np.asarray(fluid.global_scope().find_var(name).get_tensor())
model_stage_0[name] = t
np.savez(save_model_file, **model_stage_0)
opt_state_stage_0 = {}
for name in opt_var_name_list:
t_data = np.asarray(fluid.global_scope().find_var(name).get_tensor())
opt_state_stage_0[name] = t_data
np.savez(save_opt_state_file, **opt_state_stage_0)
info_msg = "Finish saving the parameter. "
return info_msg
def load_model(model_init_file, param_name_list, place, opt_state_init_file='',datatype='float32'):
""" init model """
try:
model_init = np.load(model_init_file)
except:
print("load init model failed", model_init_file)
raise Exception("load init model failed")
print("load init model")
loading_msg = []
for name in param_name_list:
try:
t = fluid.global_scope().find_var(name).get_tensor()
load_param = model_init[str(name)]
if load_param.shape == np.asarray(t).shape:
t.set(load_param.astype(datatype), place)
except AttributeError as e:
loading_msg.append(str(e) + "%s exist not in this model and cannot be load!"%name)
except KeyError as e:
loading_msg.append(str(e) + "%s exist not in this model and cannot be load!"%name)
return loading_msg
# load opt state
if opt_state_init_file != "":
print("begin to load opt state")
opt_state_data = np.load(opt_state_init_file)
for k, v in opt_state_data.items():
t = fluid.global_scope().find_var(str(k)).get_tensor()
t.set(v, place)
print("set opt state finished")
print("init model parameters finshed")
def write_iterable(filename, iterable_obj):
with open(filename, 'w', encoding='utf8') as f:
for i in iterable_obj:
f.write(str(i)+'\n')
if __name__ == '__main__':
res = hello_word()
print(res) | hanguantianxia/model | utils.py | utils.py | py | 8,186 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.namedtuple",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "re.compile",... |
469424801 | #!/usr/bin/env python
from setuptools import setup
VERSION = "0.2"
REPO = "https://github.com/duedil-ltd/python-sloq"
README = "README.rst"
with open(README) as f:
long_description = f.read()
setup(
name="sloq",
version=VERSION,
description="Rate-limited Queue",
author="Paul Scott, Duedil Limited",
author_email="paul@duedil.com",
url=REPO,
download_url="%s/tarball/%s" % (REPO, VERSION),
py_modules=["sloq"],
test_suite="test_sloq",
license="MIT",
long_description=long_description,
keywords="queue rate limit slow token bucket".split(),
classifiers=[],
)
| duedil-ltd/python-sloq | setup.py | setup.py | py | 617 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 11,
"usage_type": "call"
}
] |
4352028229 | import sys
import csv
import json
# Converts the JSON output of a PowerBI query to a CSV file
def extract(input_file, output_file):
input_json = read_json(input_file)
data = input_json["results"][0]["result"]["data"]
dm0 = data["dsr"]["DS"][0]["PH"][0]["DM0"]
columns_types = dm0[0]["S"]
columns = map(lambda item: item["GroupKeys"][0]["Source"]["Property"] if item["Kind"] == 1 else item["Value"], data["descriptor"]["Select"])
value_dicts = data["dsr"]["DS"][0].get("ValueDicts", {})
reconstruct_arrays(columns_types, dm0)
expand_values(columns_types, dm0, value_dicts)
replace_newlines_with(dm0, "")
write_csv(output_file, columns, dm0)
def read_json(file_name):
with open(file_name) as json_config_file:
return json.load(json_config_file)
def write_csv(output_file, columns, dm0):
with open(output_file, "w") as csvfile:
wrt = csv.writer(csvfile)
wrt.writerow(columns)
for item in dm0:
wrt.writerow(item["C"])
def reconstruct_arrays(columns_types, dm0):
# fixes array index by applying
# "R" bitset to copy previous values
# "Ø" bitset to set null values
lenght = len(columns_types)
for item in dm0:
currentItem = item["C"]
if "R" in item or "Ø" in item:
copyBitset = item.get("R", 0)
deleteBitSet = item.get("Ø", 0)
for i in range(lenght):
if is_bit_set_for_index(i, copyBitset):
currentItem.insert(i, prevItem[i])
elif is_bit_set_for_index(i, deleteBitSet):
currentItem.insert(i, None)
prevItem = currentItem
def is_bit_set_for_index(index, bitset):
return (bitset >> index) & 1 == 1
# substitute indexes with actual values
def expand_values(columns_types, dm0, value_dicts):
for (idx, col) in enumerate(columns_types):
if "DN" in col:
for item in dm0:
dataItem = item["C"]
if isinstance(dataItem[idx], int):
valDict = value_dicts[col["DN"]]
dataItem[idx] = valDict[dataItem[idx]]
def replace_newlines_with(dm0, replacement):
for item in dm0:
elem = item["C"]
for i in range(len(elem)):
if isinstance(elem[i], str):
elem[i] = elem[i].replace("\n", replacement)
def main():
if len(sys.argv) == 3:
extract(sys.argv[1], sys.argv[2])
else:
sys.exit("Usage: python3 " + sys.argv[0] + " input_file output_file", file=sys.stderr)
if __name__ == "__main__":
main()
| ondata/covid19italia | webservices/vaccini/puntiSomministrazione.py | puntiSomministrazione.py | py | 2,598 | python | en | code | 207 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 71,
... |
20028159089 | from boto3 import client
from flask import Flask, jsonify, request, make_response
from .utils import get_timestamp
from .constants import FAVOURITE_COMPANIES_TABLE, FAVOURITE_ORG_ID, ORG_ID
app = Flask(__name__)
app.config["JSONIFY_PRETTYPRINT_REGULAR"] = True
client = client("dynamodb", region_name="eu-west-1")
hey = lambda x: x
print(hey)
@app.route("/favourite_companies", methods=["GET"])
def get_all_favourite_companies():
table = client.scan(TableName=FAVOURITE_COMPANIES_TABLE)
return jsonify(table["Items"]), 200, {"Content-Type": "application/json"}
@app.route("/favourite_company/<string:org_id>", methods=["GET"])
def get_company(org_id):
resp = client.query(
TableName=FAVOURITE_COMPANIES_TABLE,
KeyConditions={
ORG_ID: {
"ComparisonOperator": "EQ",
"AttributeValueList": [{"S": org_id}],
}
},
)
if not resp["Items"]:
return jsonify({"error": f"There is no company with org_id: {org_id}"}), 400
return jsonify(resp["Items"])
@app.route(
"/favourite_company/delete/<string:org_id>/<string:favourite_org_id>",
methods=["DELETE"],
)
def delete_company(org_id, favourite_org_id):
resp = client.get_item(
TableName=FAVOURITE_COMPANIES_TABLE,
Key={
ORG_ID: {"S": org_id},
FAVOURITE_ORG_ID: {"S": favourite_org_id},
},
)
if not resp.get("Item"):
return (
jsonify(
{
"error": f"There is no company with org_id: {org_id} and favourite_org_id: {favourite_org_id}."
}
),
400,
)
_ = client.delete_item(
TableName=FAVOURITE_COMPANIES_TABLE,
Key={
ORG_ID: {"S": org_id},
FAVOURITE_ORG_ID: {"S": favourite_org_id},
},
)
return jsonify(
{
"success": f"Deleted company with org_id: {org_id} and favourite_org_id: {favourite_org_id}."
}
)
@app.route("/favourite_company/create", methods=["POST"])
def create_user():
org_id = request.json.get(ORG_ID)
favourite_org_id = request.json.get(FAVOURITE_ORG_ID)
if not org_id or not favourite_org_id:
return (
jsonify(
{
"error": "Please provide org_id and favourite_org_id in the request body."
}
),
400,
)
item = {
ORG_ID: {"S": org_id},
FAVOURITE_ORG_ID: {"S": favourite_org_id},
"date": {"S": get_timestamp()},
}
_ = client.put_item(TableName=FAVOURITE_COMPANIES_TABLE, Item=item)
return jsonify(
{
"success": f"Created company with org_id: {org_id} and favourite_org_id: {favourite_org_id}"
}
)
@app.errorhandler(404)
def resource_not_found(_):
return make_response(jsonify(error_handled="Not found!"), 404)
| AndreuJove/serverless_training | app/app.py | app.py | py | 2,942 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "boto3.client.scan",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_nu... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.